id
int64
0
3.29k
file_name
stringlengths
4
37
programming_language
stringclasses
2 values
method_name
stringlengths
3
112
code_before
stringlengths
701
809k
code_after
stringlengths
701
809k
func_before
stringlengths
40
60.4k
func_after
stringlengths
43
61.2k
diff
stringlengths
67
133k
num_lines_added
int64
1
1.49k
num_lines_deleted
int64
1
1.13k
num_lines_in_file
float64
23
18.6k
num_tokens_in_file
float64
129
172k
num_lines_in_method
int64
1
259
num_tokens_in_method
int64
10
1.29k
method_complexity
int64
1
110
repo
stringclasses
267 values
cve_id
stringlengths
13
16
cwe_id
stringclasses
8 values
2,815
eval.c
C
init_evalarg
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * eval.c: Expression evaluation. */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) #ifdef VMS # include <float.h> #endif #define NAMESPACE_CHAR (char_u *)"abglstvw" /* * When recursively copying lists and dicts we need to remember which ones we * have done to avoid endless recursiveness. This unique ID is used for that. * The last bit is used for previous_funccal, ignored when comparing. */ static int current_copyID = 0; /* * Info used by a ":for" loop. */ typedef struct { int fi_semicolon; // TRUE if ending in '; var]' int fi_varcount; // nr of variables in the list int fi_break_count; // nr of line breaks encountered listwatch_T fi_lw; // keep an eye on the item used. list_T *fi_list; // list being used int fi_bi; // index of blob blob_T *fi_blob; // blob being used char_u *fi_string; // copy of string being used int fi_byte_idx; // byte index in fi_string } forinfo_T; static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval7(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval8(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9_leader(typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp); static int free_unref_items(int copyID); static char_u *make_expanded_name(char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end); /* * Return "n1" divided by "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_divide(varnumber_T n1, varnumber_T n2, int *failed) { varnumber_T result; if (n2 == 0) { if (in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } if (n1 == 0) result = VARNUM_MIN; // similar to NaN else if (n1 < 0) result = -VARNUM_MAX; else result = VARNUM_MAX; } else result = n1 / n2; return result; } /* * Return "n1" modulus "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_modulus(varnumber_T n1, varnumber_T n2, int *failed) { if (n2 == 0 && in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } return (n2 == 0) ? 0 : (n1 % n2); } /* * Initialize the global and v: variables. */ void eval_init(void) { evalvars_init(); func_init(); } #if defined(EXITFREE) || defined(PROTO) void eval_clear(void) { evalvars_clear(); free_scriptnames(); // must come after evalvars_clear(). free_locales(); // autoloaded script names free_autoload_scriptnames(); // unreferenced lists and dicts (void)garbage_collect(FALSE); // functions not garbage collected free_all_functions(); } #endif void fill_evalarg_from_eap(evalarg_T *evalarg, exarg_T *eap, int skip) { init_evalarg(evalarg); evalarg->eval_flags = skip ? 0 : EVAL_EVALUATE; if (eap != NULL) { evalarg->eval_cstack = eap->cstack; if (sourcing_a_script(eap) || eap->getline == get_list_line) { evalarg->eval_getline = eap->getline; evalarg->eval_cookie = eap->cookie; } } } /* * Top level evaluation function, returning a boolean. * Sets "error" to TRUE if there was an error. * Return TRUE or FALSE. */ int eval_to_bool( char_u *arg, int *error, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; varnumber_T retval = FALSE; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL) *error = TRUE; else { *error = FALSE; if (!skip) { if (in_vim9script()) retval = tv_get_bool_chk(&tv, error); else retval = (tv_get_number_chk(&tv, error) != 0); clear_tv(&tv); } } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return (int)retval; } /* * Call eval1() and give an error message if not done at a lower level. */ static int eval1_emsg(char_u **arg, typval_T *rettv, exarg_T *eap) { char_u *start = *arg; int ret; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); ret = eval1(arg, rettv, &evalarg); if (ret == FAIL) { // Report the invalid expression unless the expression evaluation has // been cancelled due to an aborting error, an interrupt, or an // exception, or we already gave a more specific error. // Also check called_emsg for when using assert_fails(). if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), start); } clear_evalarg(&evalarg, eap); return ret; } /* * Return whether a typval is a valid expression to pass to eval_expr_typval() * or eval_expr_to_bool(). An empty string returns FALSE; */ int eval_expr_valid_arg(typval_T *tv) { return tv->v_type != VAR_UNKNOWN && (tv->v_type != VAR_STRING || (tv->vval.v_string != NULL && *tv->vval.v_string != NUL)); } /* * Evaluate an expression, which can be a function, partial or string. * Pass arguments "argv[argc]". * Return the result in "rettv" and OK or FAIL. */ int eval_expr_typval(typval_T *expr, typval_T *argv, int argc, typval_T *rettv) { char_u *s; char_u buf[NUMBUFLEN]; funcexe_T funcexe; if (expr->v_type == VAR_FUNC) { s = expr->vval.v_string; if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } else if (expr->v_type == VAR_PARTIAL) { partial_T *partial = expr->vval.v_partial; if (partial == NULL) return FAIL; if (partial->pt_func != NULL && partial->pt_func->uf_def_status != UF_NOT_COMPILED) { if (call_def_function(partial->pt_func, argc, argv, partial, rettv) == FAIL) return FAIL; } else { s = partial_name(partial); if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; funcexe.fe_partial = partial; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } } else if (expr->v_type == VAR_INSTR) { return exe_typval_instr(expr, rettv); } else { s = tv_get_string_buf_chk_strict(expr, buf, in_vim9script()); if (s == NULL) return FAIL; s = skipwhite(s); if (eval1_emsg(&s, rettv, NULL) == FAIL) return FAIL; if (*skipwhite(s) != NUL) // check for trailing chars after expr { clear_tv(rettv); semsg(_(e_invalid_expression_str), s); return FAIL; } } return OK; } /* * Like eval_to_bool() but using a typval_T instead of a string. * Works for string, funcref and partial. */ int eval_expr_to_bool(typval_T *expr, int *error) { typval_T rettv; int res; if (eval_expr_typval(expr, NULL, 0, &rettv) == FAIL) { *error = TRUE; return FALSE; } res = (tv_get_bool_chk(&rettv, error) != 0); clear_tv(&rettv); return res; } /* * Top level evaluation function, returning a string. If "skip" is TRUE, * only parsing to "nextcmd" is done, without reporting errors. Return * pointer to allocated memory, or NULL for failure or when "skip" is TRUE. */ char_u * eval_to_string_skip( char_u *arg, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL || skip) retval = NULL; else { retval = vim_strsave(tv_get_string(&tv)); clear_tv(&tv); } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return retval; } /* * Skip over an expression at "*pp". * Return FAIL for an error, OK otherwise. */ int skip_expr(char_u **pp, evalarg_T *evalarg) { typval_T rettv; *pp = skipwhite(*pp); return eval1(pp, &rettv, evalarg); } /* * Skip over an expression at "*arg". * If in Vim9 script and line breaks are encountered, the lines are * concatenated. "evalarg->eval_tofree" will be set accordingly. * "arg" is advanced to just after the expression. * "start" is set to the start of the expression, "end" to just after the end. * Also when the expression is copied to allocated memory. * Return FAIL for an error, OK otherwise. */ int skip_expr_concatenate( char_u **arg, char_u **start, char_u **end, evalarg_T *evalarg) { typval_T rettv; int res; int vim9script = in_vim9script(); garray_T *gap = evalarg == NULL ? NULL : &evalarg->eval_ga; garray_T *freegap = evalarg == NULL ? NULL : &evalarg->eval_freega; int save_flags = evalarg == NULL ? 0 : evalarg->eval_flags; int evaluate = evalarg == NULL ? FALSE : (evalarg->eval_flags & EVAL_EVALUATE); if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { ga_init2(gap, sizeof(char_u *), 10); // leave room for "start" if (ga_grow(gap, 1) == OK) ++gap->ga_len; ga_init2(freegap, sizeof(char_u *), 10); } *start = *arg; // Don't evaluate the expression. if (evalarg != NULL) evalarg->eval_flags &= ~EVAL_EVALUATE; *arg = skipwhite(*arg); res = eval1(arg, &rettv, evalarg); *end = *arg; if (evalarg != NULL) evalarg->eval_flags = save_flags; if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { if (evalarg->eval_ga.ga_len == 1) { // just the one line, no need to concatenate ga_clear(gap); gap->ga_itemsize = 0; } else { char_u *p; size_t endoff = STRLEN(*arg); // Line breaks encountered, concatenate all the lines. *((char_u **)gap->ga_data) = *start; p = ga_concat_strings(gap, " "); // free the lines only when using getsourceline() if (evalarg->eval_cookie != NULL) { // Do not free the first line, the caller can still use it. *((char_u **)gap->ga_data) = NULL; // Do not free the last line, "arg" points into it, free it // later. vim_free(evalarg->eval_tofree); evalarg->eval_tofree = ((char_u **)gap->ga_data)[gap->ga_len - 1]; ((char_u **)gap->ga_data)[gap->ga_len - 1] = NULL; ga_clear_strings(gap); } else { ga_clear(gap); // free lines that were explicitly marked for freeing ga_clear_strings(freegap); } gap->ga_itemsize = 0; if (p == NULL) return FAIL; *start = p; vim_free(evalarg->eval_tofree_lambda); evalarg->eval_tofree_lambda = p; // Compute "end" relative to the end. *end = *start + STRLEN(*start) - endoff; } } return res; } /* * Convert "tv" to a string. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Returns an allocated string (NULL when out of memory). */ char_u * typval2string(typval_T *tv, int convert) { garray_T ga; char_u *retval; #ifdef FEAT_FLOAT char_u numbuf[NUMBUFLEN]; #endif if (convert && tv->v_type == VAR_LIST) { ga_init2(&ga, sizeof(char), 80); if (tv->vval.v_list != NULL) { list_join(&ga, tv->vval.v_list, (char_u *)"\n", TRUE, FALSE, 0); if (tv->vval.v_list->lv_len > 0) ga_append(&ga, NL); } ga_append(&ga, NUL); retval = (char_u *)ga.ga_data; } #ifdef FEAT_FLOAT else if (convert && tv->v_type == VAR_FLOAT) { vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); retval = vim_strsave(numbuf); } #endif else retval = vim_strsave(tv_get_string(tv)); return retval; } /* * Top level evaluation function, returning a string. Does not handle line * breaks. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Return pointer to allocated memory, or NULL for failure. */ char_u * eval_to_string_eap( char_u *arg, int convert, exarg_T *eap) { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); if (eval0(arg, &tv, NULL, &evalarg) == FAIL) retval = NULL; else { retval = typval2string(&tv, convert); clear_tv(&tv); } clear_evalarg(&evalarg, NULL); return retval; } char_u * eval_to_string( char_u *arg, int convert) { return eval_to_string_eap(arg, convert, NULL); } /* * Call eval_to_string() without using current local variables and using * textlock. When "use_sandbox" is TRUE use the sandbox. * Use legacy Vim script syntax. */ char_u * eval_to_string_safe( char_u *arg, int use_sandbox, int keep_script_version) { char_u *retval; funccal_entry_T funccal_entry; int save_sc_version = current_sctx.sc_version; int save_garbage = may_garbage_collect; if (!keep_script_version) current_sctx.sc_version = 1; save_funccal(&funccal_entry); if (use_sandbox) ++sandbox; ++textlock; may_garbage_collect = FALSE; retval = eval_to_string(arg, FALSE); if (use_sandbox) --sandbox; --textlock; may_garbage_collect = save_garbage; restore_funccal(); current_sctx.sc_version = save_sc_version; return retval; } /* * Top level evaluation function, returning a number. * Evaluates "expr" silently. * Returns -1 for an error. */ varnumber_T eval_to_number(char_u *expr) { typval_T rettv; varnumber_T retval; char_u *p = skipwhite(expr); ++emsg_off; if (eval1(&p, &rettv, &EVALARG_EVALUATE) == FAIL) retval = -1; else { retval = tv_get_number_chk(&rettv, NULL); clear_tv(&rettv); } --emsg_off; return retval; } /* * Top level evaluation function. * Returns an allocated typval_T with the result. * Returns NULL when there is an error. */ typval_T * eval_expr(char_u *arg, exarg_T *eap) { typval_T *tv; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); tv = ALLOC_ONE(typval_T); if (tv != NULL && eval0(arg, tv, eap, &evalarg) == FAIL) VIM_CLEAR(tv); clear_evalarg(&evalarg, eap); return tv; } /* * "*arg" points to what can be a function name in the form of "import.Name" or * "Funcref". Return the name of the function. Set "tofree" to something that * was allocated. * If "verbose" is FALSE no errors are given. * Return NULL for any failure. */ static char_u * deref_function_name( char_u **arg, char_u **tofree, evalarg_T *evalarg, int verbose) { typval_T ref; char_u *name = *arg; ref.v_type = VAR_UNKNOWN; if (eval9(arg, &ref, evalarg, FALSE) == FAIL) { dictitem_T *v; // If <SID>VarName was used it would not be found, try another way. v = find_var_also_in_script(name, NULL, FALSE); if (v == NULL) return NULL; copy_tv(&v->di_tv, &ref); } if (*skipwhite(*arg) != NUL) { if (verbose) semsg(_(e_trailing_characters_str), *arg); name = NULL; } else if (ref.v_type == VAR_FUNC && ref.vval.v_string != NULL) { name = ref.vval.v_string; ref.vval.v_string = NULL; *tofree = name; } else if (ref.v_type == VAR_PARTIAL && ref.vval.v_partial != NULL) { if (ref.vval.v_partial->pt_argc > 0 || ref.vval.v_partial->pt_dict != NULL) { if (verbose) emsg(_(e_cannot_use_partial_here)); name = NULL; } else { name = vim_strsave(partial_name(ref.vval.v_partial)); *tofree = name; } } else { if (verbose) semsg(_(e_not_callable_type_str), name); name = NULL; } clear_tv(&ref); return name; } /* * Call some Vim script function and return the result in "*rettv". * Uses argv[0] to argv[argc - 1] for the function arguments. argv[argc] * should have type VAR_UNKNOWN. * Returns OK or FAIL. */ int call_vim_function( char_u *func, int argc, typval_T *argv, typval_T *rettv) { int ret; funcexe_T funcexe; char_u *arg; char_u *name; char_u *tofree = NULL; int ignore_errors; rettv->v_type = VAR_UNKNOWN; // clear_tv() uses this CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = TRUE; // The name might be "import.Func" or "Funcref". We don't know, we need to // ignore errors for an undefined name. But we do want errors when an // autoload script has errors. Guess that when there is a dot in the name // showing errors is the right choice. ignore_errors = vim_strchr(func, '.') == NULL; arg = func; if (ignore_errors) ++emsg_off; name = deref_function_name(&arg, &tofree, &EVALARG_EVALUATE, FALSE); if (ignore_errors) --emsg_off; if (name == NULL) name = func; ret = call_func(name, -1, rettv, argc, argv, &funcexe); if (ret == FAIL) clear_tv(rettv); vim_free(tofree); return ret; } /* * Call Vim script function "func" and return the result as a string. * Uses "argv[0]" to "argv[argc - 1]" for the function arguments. "argv[argc]" * should have type VAR_UNKNOWN. * Returns NULL when calling the function fails. */ void * call_func_retstr( char_u *func, int argc, typval_T *argv) { typval_T rettv; char_u *retval; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; retval = vim_strsave(tv_get_string(&rettv)); clear_tv(&rettv); return retval; } /* * Call Vim script function "func" and return the result as a List. * Uses "argv" and "argc" as call_func_retstr(). * Returns NULL when there is something wrong. */ void * call_func_retlist( char_u *func, int argc, typval_T *argv) { typval_T rettv; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; if (rettv.v_type != VAR_LIST) { clear_tv(&rettv); return NULL; } return rettv.vval.v_list; } #if defined(FEAT_FOLDING) || defined(PROTO) /* * Evaluate "arg", which is 'foldexpr'. * Note: caller must set "curwin" to match "arg". * Returns the foldlevel, and any character preceding it in "*cp". Doesn't * give error messages. */ int eval_foldexpr(win_T *wp, int *cp) { char_u *arg; typval_T tv; varnumber_T retval; char_u *s; sctx_T saved_sctx = current_sctx; int use_sandbox = was_set_insecurely((char_u *)"foldexpr", OPT_LOCAL); arg = wp->w_p_fde; current_sctx = wp->w_p_script_ctx[WV_FDE]; ++emsg_off; if (use_sandbox) ++sandbox; ++textlock; *cp = NUL; if (eval0(arg, &tv, NULL, &EVALARG_EVALUATE) == FAIL) retval = 0; else { // If the result is a number, just return the number. if (tv.v_type == VAR_NUMBER) retval = tv.vval.v_number; else if (tv.v_type != VAR_STRING || tv.vval.v_string == NULL) retval = 0; else { // If the result is a string, check if there is a non-digit before // the number. s = tv.vval.v_string; if (!VIM_ISDIGIT(*s) && *s != '-') *cp = *s++; retval = atol((char *)s); } clear_tv(&tv); } --emsg_off; if (use_sandbox) --sandbox; --textlock; clear_evalarg(&EVALARG_EVALUATE, NULL); current_sctx = saved_sctx; return (int)retval; } #endif /* * Get an lval: variable, Dict item or List item that can be assigned a value * to: "name", "na{me}", "name[expr]", "name[expr:expr]", "name[expr][expr]", * "name.key", "name.key[expr]" etc. * Indexing only works if "name" is an existing List or Dictionary. * "name" points to the start of the name. * If "rettv" is not NULL it points to the value to be assigned. * "unlet" is TRUE for ":unlet": slightly different behavior when something is * wrong; must end in space or cmd separator. * * flags: * GLV_QUIET: do not give error messages * GLV_READ_ONLY: will not change the variable * GLV_NO_AUTOLOAD: do not use script autoloading * * Returns a pointer to just after the name, including indexes. * When an evaluation error occurs "lp->ll_name" is NULL; * Returns NULL for a parsing error. Still need to free items in "lp"! */ char_u * get_lval( char_u *name, typval_T *rettv, lval_T *lp, int unlet, int skip, int flags, // GLV_ values int fne_flags) // flags for find_name_end() { char_u *p; char_u *expr_start, *expr_end; int cc; dictitem_T *v; typval_T var1; typval_T var2; int empty1 = FALSE; char_u *key = NULL; int len; hashtab_T *ht = NULL; int quiet = flags & GLV_QUIET; int writing; int vim9script = in_vim9script(); // Clear everything in "lp". CLEAR_POINTER(lp); if (skip || (flags & GLV_COMPILING)) { // When skipping or compiling just find the end of the name. lp->ll_name = name; lp->ll_name_end = find_name_end(name, NULL, NULL, FNE_INCL_BR | fne_flags); return lp->ll_name_end; } // Cannot use "s:var" at the Vim9 script level. "s: type" is OK. if (vim9script && at_script_level() && name[0] == 's' && name[1] == ':' && !VIM_ISWHITE(name[2])) { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), name); return NULL; } // Find the end of the name. p = find_name_end(name, &expr_start, &expr_end, fne_flags); lp->ll_name_end = p; if (expr_start != NULL) { // Don't expand the name when we already know there is an error. if (unlet && !VIM_ISWHITE(*p) && !ends_excmd(*p) && *p != '[' && *p != '.') { semsg(_(e_trailing_characters_str), p); return NULL; } lp->ll_exp_name = make_expanded_name(name, expr_start, expr_end, p); if (lp->ll_exp_name == NULL) { // Report an invalid expression in braces, unless the // expression evaluation has been cancelled due to an // aborting error, an interrupt, or an exception. if (!aborting() && !quiet) { emsg_severe = TRUE; semsg(_(e_invalid_argument_str), name); return NULL; } } lp->ll_name = lp->ll_exp_name; } else { lp->ll_name = name; if (vim9script) { // "a: type" is declaring variable "a" with a type, not "a:". // However, "g:[key]" is indexing a dictionary. if (p == name + 2 && p[-1] == ':' && *p != '[') { --p; lp->ll_name_end = p; } if (*p == ':') { char_u *tp = skipwhite(p + 1); if (tp == p + 1 && !quiet) { semsg(_(e_white_space_required_after_str_str), ":", p); return NULL; } if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) { semsg(_(e_using_type_not_in_script_context_str), p); return NULL; } // parse the type after the name lp->ll_type = parse_type(&tp, &SCRIPT_ITEM(current_sctx.sc_sid)->sn_type_list, !quiet); if (lp->ll_type == NULL && !quiet) return NULL; lp->ll_name_end = tp; } } } if (lp->ll_name == NULL) return p; if (*p == '.') { imported_T *import = find_imported(lp->ll_name, p - lp->ll_name, TRUE); if (import != NULL) { ufunc_T *ufunc; type_T *type; lp->ll_sid = import->imp_sid; lp->ll_name = skipwhite(p + 1); p = find_name_end(lp->ll_name, NULL, NULL, fne_flags); lp->ll_name_end = p; // check the item is exported cc = *p; *p = NUL; if (find_exported(import->imp_sid, lp->ll_name, &ufunc, &type, NULL, NULL, TRUE) == -1) { *p = cc; return NULL; } *p = cc; } } // Without [idx] or .key we are done. if ((*p != '[' && *p != '.')) return p; if (vim9script && lval_root != NULL) { // using local variable lp->ll_tv = lval_root; v = NULL; } else { cc = *p; *p = NUL; // When we would write to the variable pass &ht and prevent autoload. writing = !(flags & GLV_READ_ONLY); v = find_var(lp->ll_name, writing ? &ht : NULL, (flags & GLV_NO_AUTOLOAD) || writing); if (v == NULL && !quiet) semsg(_(e_undefined_variable_str), lp->ll_name); *p = cc; if (v == NULL) return NULL; lp->ll_tv = &v->di_tv; } if (vim9script && (flags & GLV_NO_DECL) == 0) { if (!quiet) semsg(_(e_variable_already_declared), lp->ll_name); return NULL; } /* * Loop until no more [idx] or .key is following. */ var1.v_type = VAR_UNKNOWN; var2.v_type = VAR_UNKNOWN; while (*p == '[' || (*p == '.' && p[1] != '=' && p[1] != '.')) { if (*p == '.' && lp->ll_tv->v_type != VAR_DICT) { if (!quiet) semsg(_(e_dot_can_only_be_used_on_dictionary_str), name); return NULL; } if (lp->ll_tv->v_type != VAR_LIST && lp->ll_tv->v_type != VAR_DICT && lp->ll_tv->v_type != VAR_BLOB) { if (!quiet) emsg(_(e_can_only_index_list_dictionary_or_blob)); return NULL; } // a NULL list/blob works like an empty list/blob, allocate one now. if (lp->ll_tv->v_type == VAR_LIST && lp->ll_tv->vval.v_list == NULL) rettv_list_alloc(lp->ll_tv); else if (lp->ll_tv->v_type == VAR_BLOB && lp->ll_tv->vval.v_blob == NULL) rettv_blob_alloc(lp->ll_tv); if (lp->ll_range) { if (!quiet) emsg(_(e_slice_must_come_last)); return NULL; } if (vim9script && lp->ll_valtype == NULL && v != NULL && lp->ll_tv == &v->di_tv && ht != NULL && ht == get_script_local_ht()) { svar_T *sv = find_typval_in_script(lp->ll_tv, 0, TRUE); // Vim9 script local variable: get the type if (sv != NULL) lp->ll_valtype = sv->sv_type; } len = -1; if (*p == '.') { key = p + 1; for (len = 0; ASCII_ISALNUM(key[len]) || key[len] == '_'; ++len) ; if (len == 0) { if (!quiet) emsg(_(e_cannot_use_empty_key_for_dictionary)); return NULL; } p = key + len; } else { // Get the index [expr] or the first index [expr: ]. p = skipwhite(p + 1); if (*p == ':') empty1 = TRUE; else { empty1 = FALSE; if (eval1(&p, &var1, &EVALARG_EVALUATE) == FAIL) // recursive! return NULL; if (tv_get_string_chk(&var1) == NULL) { // not a number or string clear_tv(&var1); return NULL; } p = skipwhite(p); } // Optionally get the second index [ :expr]. if (*p == ':') { if (lp->ll_tv->v_type == VAR_DICT) { if (!quiet) emsg(_(e_cannot_slice_dictionary)); clear_tv(&var1); return NULL; } if (rettv != NULL && !(rettv->v_type == VAR_LIST && rettv->vval.v_list != NULL) && !(rettv->v_type == VAR_BLOB && rettv->vval.v_blob != NULL)) { if (!quiet) emsg(_(e_slice_requires_list_or_blob_value)); clear_tv(&var1); return NULL; } p = skipwhite(p + 1); if (*p == ']') lp->ll_empty2 = TRUE; else { lp->ll_empty2 = FALSE; // recursive! if (eval1(&p, &var2, &EVALARG_EVALUATE) == FAIL) { clear_tv(&var1); return NULL; } if (tv_get_string_chk(&var2) == NULL) { // not a number or string clear_tv(&var1); clear_tv(&var2); return NULL; } } lp->ll_range = TRUE; } else lp->ll_range = FALSE; if (*p != ']') { if (!quiet) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); clear_tv(&var2); return NULL; } // Skip to past ']'. ++p; } if (lp->ll_tv->v_type == VAR_DICT) { if (len == -1) { // "[key]": get key from "var1" key = tv_get_string_chk(&var1); // is number or string if (key == NULL) { clear_tv(&var1); return NULL; } } lp->ll_list = NULL; // a NULL dict is equivalent with an empty dict if (lp->ll_tv->vval.v_dict == NULL) { lp->ll_tv->vval.v_dict = dict_alloc(); if (lp->ll_tv->vval.v_dict == NULL) { clear_tv(&var1); return NULL; } ++lp->ll_tv->vval.v_dict->dv_refcount; } lp->ll_dict = lp->ll_tv->vval.v_dict; lp->ll_di = dict_find(lp->ll_dict, key, len); // When assigning to a scope dictionary check that a function and // variable name is valid (only variable name unless it is l: or // g: dictionary). Disallow overwriting a builtin function. if (rettv != NULL && lp->ll_dict->dv_scope != 0) { int prevval; int wrong; if (len != -1) { prevval = key[len]; key[len] = NUL; } else prevval = 0; // avoid compiler warning wrong = (lp->ll_dict->dv_scope == VAR_DEF_SCOPE && rettv->v_type == VAR_FUNC && var_wrong_func_name(key, lp->ll_di == NULL)) || !valid_varname(key, -1, TRUE); if (len != -1) key[len] = prevval; if (wrong) { clear_tv(&var1); return NULL; } } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; if (lp->ll_di == NULL) { // Can't add "v:" or "a:" variable. if (lp->ll_dict == get_vimvar_dict() || &lp->ll_dict->dv_hashtab == get_funccal_args_ht()) { semsg(_(e_illegal_variable_name_str), name); clear_tv(&var1); return NULL; } // Key does not exist in dict: may need to add it. if (*p == '[' || *p == '.' || unlet) { if (!quiet) semsg(_(e_key_not_present_in_dictionary), key); clear_tv(&var1); return NULL; } if (len == -1) lp->ll_newkey = vim_strsave(key); else lp->ll_newkey = vim_strnsave(key, len); clear_tv(&var1); if (lp->ll_newkey == NULL) p = NULL; break; } // existing variable, need to check if it can be changed else if ((flags & GLV_READ_ONLY) == 0 && (var_check_ro(lp->ll_di->di_flags, name, FALSE) || var_check_lock(lp->ll_di->di_flags, name, FALSE))) { clear_tv(&var1); return NULL; } clear_tv(&var1); lp->ll_tv = &lp->ll_di->di_tv; } else if (lp->ll_tv->v_type == VAR_BLOB) { long bloblen = blob_len(lp->ll_tv->vval.v_blob); /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); if (check_blob_index(bloblen, lp->ll_n1, quiet) == FAIL) { clear_tv(&var2); return NULL; } if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); clear_tv(&var2); if (check_blob_range(bloblen, lp->ll_n1, lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_blob = lp->ll_tv->vval.v_blob; lp->ll_tv = NULL; break; } else { /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); lp->ll_dict = NULL; lp->ll_list = lp->ll_tv->vval.v_list; lp->ll_li = check_range_index_one(lp->ll_list, &lp->ll_n1, (flags & GLV_ASSIGN_WITH_OP) == 0, quiet); if (lp->ll_li == NULL) { clear_tv(&var2); return NULL; } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; /* * May need to find the item or absolute index for the second * index of a range. * When no index given: "lp->ll_empty2" is TRUE. * Otherwise "lp->ll_n2" is set to the second index. */ if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); // is number or string clear_tv(&var2); if (check_range_index_two(lp->ll_list, &lp->ll_n1, lp->ll_li, &lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_tv = &lp->ll_li->li_tv; } } clear_tv(&var1); lp->ll_name_end = p; return p; } /* * Clear lval "lp" that was filled by get_lval(). */ void clear_lval(lval_T *lp) { vim_free(lp->ll_exp_name); vim_free(lp->ll_newkey); } /* * Set a variable that was parsed by get_lval() to "rettv". * "endp" points to just after the parsed name. * "op" is NULL, "+" for "+=", "-" for "-=", "*" for "*=", "/" for "/=", * "%" for "%=", "." for ".=" or "=" for "=". */ void set_var_lval( lval_T *lp, char_u *endp, typval_T *rettv, int copy, int flags, // ASSIGN_CONST, ASSIGN_NO_DECL char_u *op, int var_idx) // index for "let [a, b] = list" { int cc; dictitem_T *di; if (lp->ll_tv == NULL) { cc = *endp; *endp = NUL; if (in_vim9script() && check_reserved_name(lp->ll_name) == FAIL) return; if (lp->ll_blob != NULL) { int error = FALSE, val; if (op != NULL && *op != '=') { semsg(_(e_wrong_variable_type_for_str_equal), op); return; } if (value_check_lock(lp->ll_blob->bv_lock, lp->ll_name, FALSE)) return; if (lp->ll_range && rettv->v_type == VAR_BLOB) { if (lp->ll_empty2) lp->ll_n2 = blob_len(lp->ll_blob) - 1; if (blob_set_range(lp->ll_blob, lp->ll_n1, lp->ll_n2, rettv) == FAIL) return; } else { val = (int)tv_get_number_chk(rettv, &error); if (!error) blob_set_append(lp->ll_blob, lp->ll_n1, val); } } else if (op != NULL && *op != '=') { typval_T tv; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_modify_existing_variable)); *endp = cc; return; } // handle +=, -=, *=, /=, %= and .= di = NULL; if (eval_variable(lp->ll_name, (int)STRLEN(lp->ll_name), lp->ll_sid, &tv, &di, EVAL_VAR_VERBOSE) == OK) { if ((di == NULL || (!var_check_ro(di->di_flags, lp->ll_name, FALSE) && !tv_check_lock(&di->di_tv, lp->ll_name, FALSE))) && tv_op(&tv, rettv, op) == OK) set_var_const(lp->ll_name, lp->ll_sid, NULL, &tv, FALSE, ASSIGN_NO_DECL, 0); clear_tv(&tv); } } else { if (lp->ll_type != NULL && check_typval_arg_type(lp->ll_type, rettv, NULL, 0) == FAIL) return; set_var_const(lp->ll_name, lp->ll_sid, lp->ll_type, rettv, copy, flags, var_idx); } *endp = cc; } else if (value_check_lock(lp->ll_newkey == NULL ? lp->ll_tv->v_lock : lp->ll_tv->vval.v_dict->dv_lock, lp->ll_name, FALSE)) ; else if (lp->ll_range) { if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_range)); return; } (void)list_assign_range(lp->ll_list, rettv->vval.v_list, lp->ll_n1, lp->ll_n2, lp->ll_empty2, op, lp->ll_name); } else { /* * Assign to a List or Dictionary item. */ if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_list_or_dict)); return; } if (lp->ll_valtype != NULL && check_typval_arg_type(lp->ll_valtype, rettv, NULL, 0) == FAIL) return; if (lp->ll_newkey != NULL) { if (op != NULL && *op != '=') { semsg(_(e_key_not_present_in_dictionary), lp->ll_newkey); return; } if (dict_wrong_func_name(lp->ll_tv->vval.v_dict, rettv, lp->ll_newkey)) return; // Need to add an item to the Dictionary. di = dictitem_alloc(lp->ll_newkey); if (di == NULL) return; if (dict_add(lp->ll_tv->vval.v_dict, di) == FAIL) { vim_free(di); return; } lp->ll_tv = &di->di_tv; } else if (op != NULL && *op != '=') { tv_op(lp->ll_tv, rettv, op); return; } else clear_tv(lp->ll_tv); /* * Assign the value to the variable or list item. */ if (copy) copy_tv(rettv, lp->ll_tv); else { *lp->ll_tv = *rettv; lp->ll_tv->v_lock = 0; init_tv(rettv); } } } /* * Handle "tv1 += tv2", "tv1 -= tv2", "tv1 *= tv2", "tv1 /= tv2", "tv1 %= tv2" * and "tv1 .= tv2" * Returns OK or FAIL. */ int tv_op(typval_T *tv1, typval_T *tv2, char_u *op) { varnumber_T n; char_u numbuf[NUMBUFLEN]; char_u *s; int failed = FALSE; // Can't do anything with a Funcref or Dict on the right. // v:true and friends only work with "..=". if (tv2->v_type != VAR_FUNC && tv2->v_type != VAR_DICT && ((tv2->v_type != VAR_BOOL && tv2->v_type != VAR_SPECIAL) || *op == '.')) { switch (tv1->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_DICT: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; case VAR_BLOB: if (*op != '+' || tv2->v_type != VAR_BLOB) break; // BLOB += BLOB if (tv1->vval.v_blob != NULL && tv2->vval.v_blob != NULL) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; int i, len = blob_len(b2); for (i = 0; i < len; i++) ga_append(&b1->bv_ga, blob_get(b2, i)); } return OK; case VAR_LIST: if (*op != '+' || tv2->v_type != VAR_LIST) break; // List += List if (tv2->vval.v_list != NULL) { if (tv1->vval.v_list == NULL) { tv1->vval.v_list = tv2->vval.v_list; ++tv1->vval.v_list->lv_refcount; } else list_extend(tv1->vval.v_list, tv2->vval.v_list, NULL); } return OK; case VAR_NUMBER: case VAR_STRING: if (tv2->v_type == VAR_LIST) break; if (vim_strchr((char_u *)"+-*/%", *op) != NULL) { // nr += nr , nr -= nr , nr *=nr , nr /= nr , nr %= nr n = tv_get_number(tv1); #ifdef FEAT_FLOAT if (tv2->v_type == VAR_FLOAT) { float_T f = n; if (*op == '%') break; switch (*op) { case '+': f += tv2->vval.v_float; break; case '-': f -= tv2->vval.v_float; break; case '*': f *= tv2->vval.v_float; break; case '/': f /= tv2->vval.v_float; break; } clear_tv(tv1); tv1->v_type = VAR_FLOAT; tv1->vval.v_float = f; } else #endif { switch (*op) { case '+': n += tv_get_number(tv2); break; case '-': n -= tv_get_number(tv2); break; case '*': n *= tv_get_number(tv2); break; case '/': n = num_divide(n, tv_get_number(tv2), &failed); break; case '%': n = num_modulus(n, tv_get_number(tv2), &failed); break; } clear_tv(tv1); tv1->v_type = VAR_NUMBER; tv1->vval.v_number = n; } } else { if (tv2->v_type == VAR_FLOAT) break; // str .= str s = tv_get_string(tv1); s = concat_str(s, tv_get_string_buf(tv2, numbuf)); clear_tv(tv1); tv1->v_type = VAR_STRING; tv1->vval.v_string = s; } return failed ? FAIL : OK; case VAR_FLOAT: #ifdef FEAT_FLOAT { float_T f; if (*op == '%' || *op == '.' || (tv2->v_type != VAR_FLOAT && tv2->v_type != VAR_NUMBER && tv2->v_type != VAR_STRING)) break; if (tv2->v_type == VAR_FLOAT) f = tv2->vval.v_float; else f = tv_get_number(tv2); switch (*op) { case '+': tv1->vval.v_float += f; break; case '-': tv1->vval.v_float -= f; break; case '*': tv1->vval.v_float *= f; break; case '/': tv1->vval.v_float /= f; break; } } #endif return OK; } } semsg(_(e_wrong_variable_type_for_str_equal), op); return FAIL; } /* * Evaluate the expression used in a ":for var in expr" command. * "arg" points to "var". * Set "*errp" to TRUE for an error, FALSE otherwise; * Return a pointer that holds the info. Null when there is an error. */ void * eval_for_line( char_u *arg, int *errp, exarg_T *eap, evalarg_T *evalarg) { forinfo_T *fi; char_u *var_list_end; char_u *expr; typval_T tv; list_T *l; int skip = !(evalarg->eval_flags & EVAL_EVALUATE); *errp = TRUE; // default: there is an error fi = ALLOC_CLEAR_ONE(forinfo_T); if (fi == NULL) return NULL; var_list_end = skip_var_list(arg, TRUE, &fi->fi_varcount, &fi->fi_semicolon, FALSE); if (var_list_end == NULL) return fi; expr = skipwhite_and_linebreak(var_list_end, evalarg); if (expr[0] != 'i' || expr[1] != 'n' || !(expr[2] == NUL || VIM_ISWHITE(expr[2]))) { if (in_vim9script() && *expr == ':' && expr != var_list_end) semsg(_(e_no_white_space_allowed_before_colon_str), expr); else emsg(_(e_missing_in_after_for)); return fi; } if (skip) ++emsg_skip; expr = skipwhite_and_linebreak(expr + 2, evalarg); if (eval0(expr, &tv, eap, evalarg) == OK) { *errp = FALSE; if (!skip) { if (tv.v_type == VAR_LIST) { l = tv.vval.v_list; if (l == NULL) { // a null list is like an empty list: do nothing clear_tv(&tv); } else { // Need a real list here. CHECK_LIST_MATERIALIZE(l); // No need to increment the refcount, it's already set for // the list being used in "tv". fi->fi_list = l; list_add_watch(l, &fi->fi_lw); fi->fi_lw.lw_item = l->lv_first; } } else if (tv.v_type == VAR_BLOB) { fi->fi_bi = 0; if (tv.vval.v_blob != NULL) { typval_T btv; // Make a copy, so that the iteration still works when the // blob is changed. blob_copy(tv.vval.v_blob, &btv); fi->fi_blob = btv.vval.v_blob; } clear_tv(&tv); } else if (tv.v_type == VAR_STRING) { fi->fi_byte_idx = 0; fi->fi_string = tv.vval.v_string; tv.vval.v_string = NULL; if (fi->fi_string == NULL) fi->fi_string = vim_strsave((char_u *)""); } else { emsg(_(e_string_list_or_blob_required)); clear_tv(&tv); } } } if (skip) --emsg_skip; fi->fi_break_count = evalarg->eval_break_count; return fi; } /* * Used when looping over a :for line, skip the "in expr" part. */ void skip_for_lines(void *fi_void, evalarg_T *evalarg) { forinfo_T *fi = (forinfo_T *)fi_void; int i; for (i = 0; i < fi->fi_break_count; ++i) eval_next_line(NULL, evalarg); } /* * Use the first item in a ":for" list. Advance to the next. * Assign the values to the variable (list). "arg" points to the first one. * Return TRUE when a valid item was found, FALSE when at end of list or * something wrong. */ int next_for_item(void *fi_void, char_u *arg) { forinfo_T *fi = (forinfo_T *)fi_void; int result; int flag = ASSIGN_FOR_LOOP | (in_vim9script() ? (ASSIGN_FINAL // first round: error if variable exists | (fi->fi_bi == 0 ? 0 : ASSIGN_DECL) | ASSIGN_NO_MEMBER_TYPE) : 0); listitem_T *item; int skip_assign = in_vim9script() && arg[0] == '_' && !eval_isnamec(arg[1]); if (fi->fi_blob != NULL) { typval_T tv; if (fi->fi_bi >= blob_len(fi->fi_blob)) return FALSE; tv.v_type = VAR_NUMBER; tv.v_lock = VAR_FIXED; tv.vval.v_number = blob_get(fi->fi_blob, fi->fi_bi); ++fi->fi_bi; if (skip_assign) return TRUE; return ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; } if (fi->fi_string != NULL) { typval_T tv; int len; len = mb_ptr2len(fi->fi_string + fi->fi_byte_idx); if (len == 0) return FALSE; tv.v_type = VAR_STRING; tv.v_lock = VAR_FIXED; tv.vval.v_string = vim_strnsave(fi->fi_string + fi->fi_byte_idx, len); fi->fi_byte_idx += len; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; vim_free(tv.vval.v_string); return result; } item = fi->fi_lw.lw_item; if (item == NULL) result = FALSE; else { fi->fi_lw.lw_item = item->li_next; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = (ex_let_vars(arg, &item->li_tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK); } return result; } /* * Free the structure used to store info used by ":for". */ void free_for_info(void *fi_void) { forinfo_T *fi = (forinfo_T *)fi_void; if (fi == NULL) return; if (fi->fi_list != NULL) { list_rem_watch(fi->fi_list, &fi->fi_lw); list_unref(fi->fi_list); } else if (fi->fi_blob != NULL) blob_unref(fi->fi_blob); else vim_free(fi->fi_string); vim_free(fi); } void set_context_for_expression( expand_T *xp, char_u *arg, cmdidx_T cmdidx) { int has_expr = cmdidx != CMD_let && cmdidx != CMD_var; int c; char_u *p; if (cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_const || cmdidx == CMD_final) { xp->xp_context = EXPAND_USER_VARS; if (vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#") == NULL) { // ":let var1 var2 ...": find last space. for (p = arg + STRLEN(arg); p >= arg; ) { xp->xp_pattern = p; MB_PTR_BACK(arg, p); if (VIM_ISWHITE(*p)) break; } return; } } else xp->xp_context = cmdidx == CMD_call ? EXPAND_FUNCTIONS : EXPAND_EXPRESSION; while ((xp->xp_pattern = vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#")) != NULL) { c = *xp->xp_pattern; if (c == '&') { c = xp->xp_pattern[1]; if (c == '&') { ++xp->xp_pattern; xp->xp_context = has_expr ? EXPAND_EXPRESSION : EXPAND_NOTHING; } else if (c != ' ') { xp->xp_context = EXPAND_SETTINGS; if ((c == 'l' || c == 'g') && xp->xp_pattern[2] == ':') xp->xp_pattern += 2; } } else if (c == '$') { // environment variable xp->xp_context = EXPAND_ENV_VARS; } else if (c == '=') { has_expr = TRUE; xp->xp_context = EXPAND_EXPRESSION; } else if (c == '#' && xp->xp_context == EXPAND_EXPRESSION) { // Autoload function/variable contains '#'. break; } else if ((c == '<' || c == '#') && xp->xp_context == EXPAND_FUNCTIONS && vim_strchr(xp->xp_pattern, '(') == NULL) { // Function name can start with "<SNR>" and contain '#'. break; } else if (has_expr) { if (c == '"') // string { while ((c = *++xp->xp_pattern) != NUL && c != '"') if (c == '\\' && xp->xp_pattern[1] != NUL) ++xp->xp_pattern; xp->xp_context = EXPAND_NOTHING; } else if (c == '\'') // literal string { // Trick: '' is like stopping and starting a literal string. while ((c = *++xp->xp_pattern) != NUL && c != '\'') /* skip */ ; xp->xp_context = EXPAND_NOTHING; } else if (c == '|') { if (xp->xp_pattern[1] == '|') { ++xp->xp_pattern; xp->xp_context = EXPAND_EXPRESSION; } else xp->xp_context = EXPAND_COMMANDS; } else xp->xp_context = EXPAND_EXPRESSION; } else // Doesn't look like something valid, expand as an expression // anyway. xp->xp_context = EXPAND_EXPRESSION; arg = xp->xp_pattern; if (*arg != NUL) while ((c = *++arg) != NUL && (c == ' ' || c == '\t')) /* skip */ ; } // ":exe one two" completes "two" if ((cmdidx == CMD_execute || cmdidx == CMD_echo || cmdidx == CMD_echon || cmdidx == CMD_echomsg) && xp->xp_context == EXPAND_EXPRESSION) { for (;;) { char_u *n = skiptowhite(arg); if (n == arg || IS_WHITE_OR_NUL(*skipwhite(n))) break; arg = skipwhite(n); } } xp->xp_pattern = arg; } /* * Return TRUE if "pat" matches "text". * Does not use 'cpo' and always uses 'magic'. */ int pattern_match(char_u *pat, char_u *text, int ic) { int matches = FALSE; char_u *save_cpo; regmatch_T regmatch; // avoid 'l' flag in 'cpoptions' save_cpo = p_cpo; p_cpo = empty_option; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { regmatch.rm_ic = ic; matches = vim_regexec_nl(&regmatch, text, (colnr_T)0); vim_regfree(regmatch.regprog); } p_cpo = save_cpo; return matches; } /* * Handle a name followed by "(". Both for just "name(arg)" and for * "expr->name(arg)". * Returns OK or FAIL. */ static int eval_func( char_u **arg, // points to "(", will be advanced evalarg_T *evalarg, char_u *name, int name_len, typval_T *rettv, int flags, typval_T *basetv) // "expr" for "expr->name(arg)" { int evaluate = flags & EVAL_EVALUATE; char_u *s = name; int len = name_len; partial_T *partial; int ret = OK; type_T *type = NULL; int found_var = FALSE; if (!evaluate) check_vars(s, len); // If "s" is the name of a variable of type VAR_FUNC // use its contents. s = deref_func_name(s, &len, &partial, in_vim9script() ? &type : NULL, !evaluate, FALSE, &found_var); // Need to make a copy, in case evaluating the arguments makes // the name invalid. s = vim_strsave(s); if (s == NULL || (evaluate && (*s == NUL || (flags & EVAL_CONSTANT)))) ret = FAIL; else { funcexe_T funcexe; // Invoke the function. CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = partial; funcexe.fe_basetv = basetv; funcexe.fe_check_type = type; funcexe.fe_found_var = found_var; ret = get_func_tv(s, len, rettv, arg, evalarg, &funcexe); } vim_free(s); // If evaluate is FALSE rettv->v_type was not set in // get_func_tv, but it's needed in handle_subscript() to parse // what follows. So set it here. if (rettv->v_type == VAR_UNKNOWN && !evaluate && **arg == '(') { rettv->vval.v_string = NULL; rettv->v_type = VAR_FUNC; } // Stop the expression evaluation when immediately // aborting on error, or when an interrupt occurred or // an exception was thrown but not caught. if (evaluate && aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } return ret; } /* * After a NL, skip over empty lines and comment-only lines. */ static char_u * newline_skip_comments(char_u *arg) { char_u *p = arg + 1; for (;;) { p = skipwhite(p); if (*p == NUL) break; if (vim9_comment_start(p)) { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = nl; } if (*p != NL) break; ++p; // skip another NL } return p; } /* * Get the next line source line without advancing. But do skip over comment * lines. * Only called for Vim9 script. */ static char_u * getline_peek_skip_comments(evalarg_T *evalarg) { for (;;) { char_u *next = getline_peek(evalarg->eval_getline, evalarg->eval_cookie); char_u *p; if (next == NULL) break; p = skipwhite(next); if (*p != NUL && !vim9_comment_start(p)) return next; if (eval_next_line(NULL, evalarg) == NULL) break; } return NULL; } /* * If inside Vim9 script, "arg" points to the end of a line (ignoring a # * comment) and there is a next line, return the next line (skipping blanks) * and set "getnext". * Otherwise return the next non-white at or after "arg" and set "getnext" to * FALSE. * "arg" must point somewhere inside a line, not at the start. */ char_u * eval_next_non_blank(char_u *arg, evalarg_T *evalarg, int *getnext) { char_u *p = skipwhite(arg); *getnext = FALSE; if (in_vim9script() && evalarg != NULL && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL || *p == NL) && (*p == NUL || *p == NL || (vim9_comment_start(p) && VIM_ISWHITE(p[-1])))) { char_u *next; if (*p == NL) next = newline_skip_comments(p); else if (evalarg->eval_cookie != NULL) next = getline_peek_skip_comments(evalarg); else next = peek_next_line_from_context(evalarg->eval_cctx); if (next != NULL) { *getnext = TRUE; return skipwhite(next); } } return p; } /* * To be called after eval_next_non_blank() sets "getnext" to TRUE. * Only called for Vim9 script. */ char_u * eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { vim_free(evalarg->eval_tofree); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); } /* * Call eval_next_non_blank() and get the next line if needed. */ char_u * skipwhite_and_linebreak(char_u *arg, evalarg_T *evalarg) { int getnext; char_u *p = skipwhite_and_nl(arg); if (evalarg == NULL) return skipwhite(arg); eval_next_non_blank(p, evalarg, &getnext); if (getnext) return eval_next_line(arg, evalarg); return p; } /* * Initialize "evalarg" for use. */ void init_evalarg(evalarg_T *evalarg) { CLEAR_POINTER(evalarg); ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20); } /* * After using "evalarg" filled from "eap": free the memory. */ void clear_evalarg(evalarg_T *evalarg, exarg_T *eap) { if (evalarg != NULL) { if (evalarg->eval_tofree != NULL) { if (eap != NULL) { // We may need to keep the original command line, e.g. for // ":let" it has the variable names. But we may also need the // new one, "nextcmd" points into it. Keep both. vim_free(eap->cmdline_tofree); eap->cmdline_tofree = *eap->cmdlinep; *eap->cmdlinep = evalarg->eval_tofree; } else vim_free(evalarg->eval_tofree); evalarg->eval_tofree = NULL; } ga_clear_strings(&evalarg->eval_tofree_ga); VIM_CLEAR(evalarg->eval_tofree_lambda); } } /* * The "evaluate" argument: When FALSE, the argument is only parsed but not * executed. The function may return OK, but the rettv will be of type * VAR_UNKNOWN. The function still returns FAIL for a syntax error. */ /* * Handle zero level expression. * This calls eval1() and handles error message and nextcmd. * Put the result in "rettv" when returning OK and "evaluate" is TRUE. * Note: "rettv.v_lock" is not set. * "evalarg" can be NULL, EVALARG_EVALUATE or a pointer. * Return OK or FAIL. */ int eval0( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg) { return eval0_retarg(arg, rettv, eap, evalarg, NULL); } /* * Like eval0() but when "retarg" is not NULL store the pointer to after the * expression and don't check what comes after the expression. */ int eval0_retarg( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg, char_u **retarg) { int ret; char_u *p; char_u *expr_end; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; int flags = evalarg == NULL ? 0 : evalarg->eval_flags; int check_for_end = retarg == NULL; int end_error = FALSE; p = skipwhite(arg); ret = eval1(&p, rettv, evalarg); if (ret != FAIL) { expr_end = p; p = skipwhite(p); // In Vim9 script a command block is not split at NL characters for // commands using an expression argument. Skip over a '#' comment to // check for a following NL. Require white space before the '#'. if (in_vim9script() && p > expr_end && retarg == NULL) while (*p == '#') { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = skipwhite(nl + 1); if (eap != NULL && *p != NUL) eap->nextcmd = p; check_for_end = FALSE; } if (check_for_end) end_error = !ends_excmd2(arg, p); } if (ret == FAIL || end_error) { if (ret != FAIL) clear_tv(rettv); /* * Report the invalid expression unless the expression evaluation has * been cancelled due to an aborting error, an interrupt, or an * exception, or we already gave a more specific error. * Also check called_emsg for when using assert_fails(). */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before && (flags & EVAL_CONSTANT) == 0 && (!in_vim9script() || !vim9_bad_comment(p))) { if (end_error) semsg(_(e_trailing_characters_str), p); else semsg(_(e_invalid_expression_str), arg); } // Some of the expression may not have been consumed. Do not check for // a next command to avoid more errors, unless "|" is following, which // could only be a command separator. if (eap != NULL && p != NULL && skipwhite(p)[0] == '|' && skipwhite(p)[1] != '|') eap->nextcmd = check_nextcmd(p); return FAIL; } if (retarg != NULL) *retarg = p; else if (check_for_end && eap != NULL) set_nextcmd(eap, p); return ret; } /* * Handle top level expression: * expr2 ? expr1 : expr1 * expr2 ?? expr1 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Note: "rettv.v_lock" is not set. * * Return OK or FAIL. */ int eval1(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; CLEAR_POINTER(rettv); /* * Get the first variable. */ if (eval2(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); if (*p == '?') { int op_falsy = p[1] == '?'; int result; typval_T var2; evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = evalarg_used->eval_flags & EVAL_EVALUATE; if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = p; } result = FALSE; if (evaluate) { int error = FALSE; if (op_falsy) result = tv2bool(rettv); else if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; if (error || !op_falsy || !result) clear_tv(rettv); if (error) return FAIL; } /* * Get the second variable. Recursive! */ if (op_falsy) ++*arg; if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg - (op_falsy ? 1 : 0), op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = (op_falsy ? !result : result) ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { evalarg_used->eval_flags = orig_flags; return FAIL; } if (!op_falsy || !result) *rettv = var2; if (!op_falsy) { /* * Check for the ":". */ p = eval_next_non_blank(*arg, evalarg_used, &getnext); if (*p != ':') { emsg(_(e_missing_colon_after_questionmark)); if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = p; } /* * Get the third variable. Recursive! */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (evaluate && !result) *rettv = var2; } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle first level expression: * expr2 || expr2 || expr2 logical OR * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval3(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "||" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '|' && p[1] == '|') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int evaluate; int orig_flags; long result = FALSE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "||". */ while (p[0] == '|' && p[1] == '|') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval3(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && !result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) != 0) result = TRUE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle second level expression: * expr3 && expr3 && expr3 logical AND * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval4(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "&&" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '&' && p[1] == '&') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; long result = TRUE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) == 0) result = FALSE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "&&". */ while (p[0] == '&' && p[1] == '&') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = result ? orig_flags : orig_flags & ~EVAL_EVALUATE; CLEAR_FIELD(var2); if (eval4(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) == 0) result = FALSE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle third level expression: * var1 == var2 * var1 =~ var2 * var1 != var2 * var1 !~ var2 * var1 > var2 * var1 >= var2 * var1 < var2 * var1 <= var2 * var1 is var2 * var1 isnot var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; exprtype_T type = EXPR_UNKNOWN; int len = 2; int type_is = FALSE; /* * Get the first expression. */ if (eval5(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); type = get_compare_type(p, &len, &type_is); /* * If there is a comparative operator, use it. */ if (type != EXPR_UNKNOWN) { typval_T var2; int ic; int vim9script = in_vim9script(); int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); long comp_lnum = SOURCING_LNUM; if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, len); clear_tv(rettv); return FAIL; } if (vim9script && type_is && (p[len] == '?' || p[len] == '#')) { semsg(_(e_invalid_expression_str), p); clear_tv(rettv); return FAIL; } // extra question mark appended: ignore case if (p[len] == '?') { ic = TRUE; ++len; } // extra '#' appended: match case else if (p[len] == '#') { ic = FALSE; ++len; } // nothing appended: use 'ignorecase' if not in Vim script else ic = vim9script ? FALSE : p_ic; /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[len])) { error_white_both(p, len); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + len, evalarg); if (eval5(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { int ret; // use the line of the comparison for messages SOURCING_LNUM = comp_lnum; if (vim9script && check_compare_types(type, rettv, &var2) == FAIL) { ret = FAIL; clear_tv(rettv); } else ret = typval_compare(rettv, &var2, type, ic); clear_tv(&var2); return ret; } } return OK; } /* * Make a copy of blob "tv1" and append blob "tv2". */ void eval_addblob(typval_T *tv1, typval_T *tv2) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; blob_T *b = blob_alloc(); int i; if (b != NULL) { for (i = 0; i < blob_len(b1); i++) ga_append(&b->bv_ga, blob_get(b1, i)); for (i = 0; i < blob_len(b2); i++) ga_append(&b->bv_ga, blob_get(b2, i)); clear_tv(tv1); rettv_blob_set(tv1, b); } } /* * Make a copy of list "tv1" and append list "tv2". */ int eval_addlist(typval_T *tv1, typval_T *tv2) { typval_T var3; // concatenate Lists if (list_concat(tv1->vval.v_list, tv2->vval.v_list, &var3) == FAIL) { clear_tv(tv1); clear_tv(tv2); return FAIL; } clear_tv(tv1); *tv1 = var3; return OK; } /* * Handle the bitwise left/right shift operator expression: * var1 << var2 * var1 >> var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval6(arg, rettv, evalarg) == FAIL) return FAIL; /* * Repeat computing, until no '<<' or '>>' is following. */ for (;;) { char_u *p; int getnext; exprtype_T type; int evaluate; typval_T var2; int vim9script; p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '<' && p[1] == '<') type = EXPR_LSHIFT; else if (p[0] == '>' && p[1] == '>') type = EXPR_RSHIFT; else return OK; // Handle a bitwise left or right shift operator if (rettv->v_type != VAR_NUMBER) { // left operand should be a number emsg(_(e_bitshift_ops_must_be_number)); clear_tv(rettv); return FAIL; } evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); vim9script = in_vim9script(); if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[2])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + 2, evalarg); if (eval6(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (var2.v_type != VAR_NUMBER || var2.vval.v_number < 0) { // right operand should be a positive number if (var2.v_type != VAR_NUMBER) emsg(_(e_bitshift_ops_must_be_number)); else emsg(_(e_bitshift_ops_must_be_postive)); clear_tv(rettv); clear_tv(&var2); return FAIL; } if (evaluate) { if (var2.vval.v_number > MAX_LSHIFT_BITS) // shifting more bits than we have always results in zero rettv->vval.v_number = 0; else if (type == EXPR_LSHIFT) rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number << var2.vval.v_number; else rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number >> var2.vval.v_number; } clear_tv(&var2); } return OK; } /* * Handle fifth level expression: * + number addition, concatenation of list or blob * - number subtraction * . string concatenation (if script version is 1) * .. string concatenation * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval7(arg, rettv, evalarg, FALSE) == FAIL) return FAIL; /* * Repeat computing, until no '+', '-' or '.' is following. */ for (;;) { int evaluate; int getnext; char_u *p; int op; int oplen; int concat; typval_T var2; int vim9script = in_vim9script(); // "." is only string concatenation when scriptversion is 1 // "+=", "-=" and "..=" are assignments // "++" and "--" on the next line are a separate command. p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; concat = op == '.' && (*(p + 1) == '.' || in_old_script(2)); if ((op != '+' && op != '-' && !concat) || p[1] == '=' || (p[1] == '.' && p[2] == '=')) break; if (getnext && (op == '+' || op == '-') && p[0] == p[1]) break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); oplen = (concat && p[1] == '.') ? 2 : 1; if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = p; } if ((op != '+' || (rettv->v_type != VAR_LIST && rettv->v_type != VAR_BLOB)) #ifdef FEAT_FLOAT && (op == '.' || rettv->v_type != VAR_FLOAT) #endif && evaluate) { int error = FALSE; // For "list + ...", an illegal use of the first operand as // a number cannot be determined before evaluating the 2nd // operand: if this is also a list, all is ok. // For "something . ...", "something - ..." or "non-list + ...", // we know that the first operand needs to be a string or number // without evaluating the 2nd operand. So check before to avoid // side effects after an error. if (op != '.') tv_get_number_chk(rettv, &error); if ((op == '.' && tv_get_string_chk(rettv) == NULL) || error) { clear_tv(rettv); return FAIL; } } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[oplen])) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + oplen, evalarg); if (eval7(arg, &var2, evalarg, !vim9script && op == '.') == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { /* * Compute the result. */ if (op == '.') { char_u buf1[NUMBUFLEN], buf2[NUMBUFLEN]; char_u *s1 = tv_get_string_buf(rettv, buf1); char_u *s2 = NULL; if (vim9script && (var2.v_type == VAR_VOID || var2.v_type == VAR_CHANNEL || var2.v_type == VAR_JOB)) semsg(_(e_using_invalid_value_as_string_str), vartype_name(var2.v_type)); #ifdef FEAT_FLOAT else if (vim9script && var2.v_type == VAR_FLOAT) { vim_snprintf((char *)buf2, NUMBUFLEN, "%g", var2.vval.v_float); s2 = buf2; } #endif else s2 = tv_get_string_buf_chk(&var2, buf2); if (s2 == NULL) // type error ? { clear_tv(rettv); clear_tv(&var2); return FAIL; } p = concat_str(s1, s2); clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = p; } else if (op == '+' && rettv->v_type == VAR_BLOB && var2.v_type == VAR_BLOB) eval_addblob(rettv, &var2); else if (op == '+' && rettv->v_type == VAR_LIST && var2.v_type == VAR_LIST) { if (eval_addlist(rettv, &var2) == FAIL) return FAIL; } else { int error = FALSE; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1 = 0, f2 = 0; if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; n1 = 0; } else #endif { n1 = tv_get_number_chk(rettv, &error); if (error) { // This can only happen for "list + non-list" or // "blob + non-blob". For "non-list + ..." or // "something - ...", we returned before evaluating the // 2nd operand. clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) f1 = n1; #endif } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); if (error) { clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f2 = n2; #endif } clear_tv(rettv); #ifdef FEAT_FLOAT // If there is a float on either side the result is a float. if (rettv->v_type == VAR_FLOAT || var2.v_type == VAR_FLOAT) { if (op == '+') f1 = f1 + f2; else f1 = f1 - f2; rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { if (op == '+') n1 = n1 + n2; else n1 = n1 - n2; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } clear_tv(&var2); } } return OK; } /* * Handle sixth level expression: * * number multiplication * / number division * % number modulo * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval7( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { #ifdef FEAT_FLOAT int use_float = FALSE; #endif /* * Get the first expression. */ if (eval8(arg, rettv, evalarg, want_string) == FAIL) return FAIL; /* * Repeat computing, until no '*', '/' or '%' is following. */ for (;;) { int evaluate; int getnext; typval_T var2; char_u *p; int op; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1, f2; #endif int error; // "*=", "/=" and "%=" are assignments p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; if ((op != '*' && op != '/' && op != '%') || p[1] == '=') break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && in_vim9script() && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = p; } #ifdef FEAT_FLOAT f1 = 0; f2 = 0; #endif error = FALSE; if (evaluate) { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; use_float = TRUE; n1 = 0; } else #endif n1 = tv_get_number_chk(rettv, &error); clear_tv(rettv); if (error) return FAIL; } else n1 = 0; /* * Get the second variable. */ if (evaluate && in_vim9script() && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (eval8(arg, &var2, evalarg, FALSE) == FAIL) return FAIL; if (evaluate) { #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { if (!use_float) { f1 = n1; use_float = TRUE; } f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); clear_tv(&var2); if (error) return FAIL; #ifdef FEAT_FLOAT if (use_float) f2 = n2; #endif } /* * Compute the result. * When either side is a float the result is a float. */ #ifdef FEAT_FLOAT if (use_float) { if (op == '*') f1 = f1 * f2; else if (op == '/') { # ifdef VMS // VMS crashes on divide by zero, work around it if (f2 == 0.0) { if (f1 == 0) f1 = -1 * __F_FLT_MAX - 1L; // similar to NaN else if (f1 < 0) f1 = -1 * __F_FLT_MAX; else f1 = __F_FLT_MAX; } else f1 = f1 / f2; # else // We rely on the floating point library to handle divide // by zero to result in "inf" and not a crash. f1 = f1 / f2; # endif } else { emsg(_(e_cannot_use_percent_with_float)); return FAIL; } rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { int failed = FALSE; if (op == '*') n1 = n1 * n2; else if (op == '/') n1 = num_divide(n1, n2, &failed); else n1 = num_modulus(n1, n2, &failed); if (failed) return FAIL; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } } return OK; } /* * Handle a type cast before a base level expression. * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * Return OK or FAIL. */ static int eval8( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { type_T *want_type = NULL; garray_T type_list; // list of pointers to allocated types int res; int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); // Recognize <type> in Vim9 script only. if (in_vim9script() && **arg == '<' && eval_isnamec1((*arg)[1]) && STRNCMP(*arg, "<SNR>", 5) != 0) { ++*arg; ga_init2(&type_list, sizeof(type_T *), 10); want_type = parse_type(arg, &type_list, TRUE); if (want_type == NULL && (evaluate || **arg != '>')) { clear_type_list(&type_list); return FAIL; } if (**arg != '>') { if (*skipwhite(*arg) == '>') semsg(_(e_no_white_space_allowed_before_str_str), ">", *arg); else emsg(_(e_missing_gt)); clear_type_list(&type_list); return FAIL; } ++*arg; *arg = skipwhite_and_linebreak(*arg, evalarg); } res = eval9(arg, rettv, evalarg, want_string); if (want_type != NULL && evaluate) { if (res == OK) { type_T *actual = typval2type(rettv, get_copyID(), &type_list, TVTT_DO_MEMBER); if (!equal_type(want_type, actual, 0)) { if (want_type == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { int n = tv2bool(rettv); // can use "0" and "1" for boolean in some places clear_tv(rettv); rettv->v_type = VAR_BOOL; rettv->vval.v_number = n ? VVAL_TRUE : VVAL_FALSE; } else { where_T where = WHERE_INIT; where.wt_variable = TRUE; res = check_type(want_type, actual, TRUE, where); } } } clear_type_list(&type_list); } return res; } int eval_leader(char_u **arg, int vim9) { char_u *s = *arg; char_u *p = *arg; while (*p == '!' || *p == '-' || *p == '+') { char_u *n = skipwhite(p + 1); // ++, --, -+ and +- are not accepted in Vim9 script if (vim9 && (*p == '-' || *p == '+') && (*n == '-' || *n == '+')) { semsg(_(e_invalid_expression_str), s); return FAIL; } p = n; } *arg = p; return OK; } /* * Check for a predefined value "true", "false" and "null.*". * Return OK when recognized. */ int handle_predefined(char_u *s, int len, typval_T *rettv) { switch (len) { case 4: if (STRNCMP(s, "true", 4) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_TRUE; return OK; } if (STRNCMP(s, "null", 4) == 0) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; return OK; } break; case 5: if (STRNCMP(s, "false", 5) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_FALSE; return OK; } break; case 8: if (STRNCMP(s, "null_job", 8) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_JOB; rettv->vval.v_job = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } break; case 9: if (STRNCMP(s, "null_", 5) != 0) break; if (STRNCMP(s + 5, "list", 4) == 0) { rettv->v_type = VAR_LIST; rettv->vval.v_list = NULL; return OK; } if (STRNCMP(s + 5, "dict", 4) == 0) { rettv->v_type = VAR_DICT; rettv->vval.v_dict = NULL; return OK; } if (STRNCMP(s + 5, "blob", 4) == 0) { rettv->v_type = VAR_BLOB; rettv->vval.v_blob = NULL; return OK; } break; case 11: if (STRNCMP(s, "null_string", 11) == 0) { rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; return OK; } break; case 12: if (STRNCMP(s, "null_channel", 12) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_CHANNEL; rettv->vval.v_channel = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } if (STRNCMP(s, "null_partial", 12) == 0) { rettv->v_type = VAR_PARTIAL; rettv->vval.v_partial = NULL; return OK; } break; case 13: if (STRNCMP(s, "null_function", 13) == 0) { rettv->v_type = VAR_FUNC; rettv->vval.v_string = NULL; return OK; } break; } return FAIL; } /* * Handle sixth level expression: * number number constant * 0zFFFFFFFF Blob constant * "string" string constant * 'string' literal string constant * &option-name option value * @r register contents * identifier variable value * function() function call * $VAR environment variable * (expression) nested expression * [expr, expr] List * {arg, arg -> expr} Lambda * {key: val, key: val} Dictionary * #{key: val, key: val} Dictionary with literal keys * * Also handle: * ! in front logical NOT * - in front unary minus * + in front unary plus (ignored) * trailing [] subscript in String or List * trailing .name entry in Dictionary * trailing ->name() method call * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval9( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int len; char_u *s; char_u *name_start = NULL; char_u *start_leader, *end_leader; int ret = OK; char_u *alias; static int recurse = 0; int vim9script = in_vim9script(); /* * Initialise variable so that clear_tv() can't mistake this for a * string and free a string that isn't there. */ rettv->v_type = VAR_UNKNOWN; /* * Skip '!', '-' and '+' characters. They are handled later. */ start_leader = *arg; if (eval_leader(arg, vim9script) == FAIL) return FAIL; end_leader = *arg; if (**arg == '.' && (!isdigit(*(*arg + 1)) #ifdef FEAT_FLOAT || in_old_script(2) #endif )) { semsg(_(e_invalid_expression_str), *arg); ++*arg; return FAIL; } // Limit recursion to 1000 levels. At least at 10000 we run out of stack // and crash. With MSVC the stack is smaller. if (recurse == #ifdef _MSC_VER 300 #else 1000 #endif ) { semsg(_(e_expression_too_recursive_str), *arg); return FAIL; } ++recurse; switch (**arg) { /* * Number constant. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': ret = eval_number(arg, rettv, evaluate, want_string); // Apply prefixed "-" and "+" now. Matters especially when // "->" follows. if (ret == OK && evaluate && end_leader > start_leader && rettv->v_type != VAR_BLOB) ret = eval9_leader(rettv, TRUE, start_leader, &end_leader); break; /* * String constant: "string". */ case '"': ret = eval_string(arg, rettv, evaluate, FALSE); break; /* * Literal string constant: 'str''ing'. */ case '\'': ret = eval_lit_string(arg, rettv, evaluate, FALSE); break; /* * List: [expr, expr] */ case '[': ret = eval_list(arg, rettv, evalarg, TRUE); break; /* * Dictionary: #{key: val, key: val} */ case '#': if (vim9script) { ret = vim9_bad_comment(*arg) ? FAIL : NOTDONE; } else if ((*arg)[1] == '{') { ++*arg; ret = eval_dict(arg, rettv, evalarg, TRUE); } else ret = NOTDONE; break; /* * Lambda: {arg, arg -> expr} * Dictionary: {'key': val, 'key': val} */ case '{': if (vim9script) ret = NOTDONE; else ret = get_lambda_tv(arg, rettv, vim9script, evalarg); if (ret == NOTDONE) ret = eval_dict(arg, rettv, evalarg, FALSE); break; /* * Option value: &name */ case '&': ret = eval_option(arg, rettv, evaluate); break; /* * Environment variable: $VAR. * Interpolated string: $"string" or $'string'. */ case '$': if ((*arg)[1] == '"' || (*arg)[1] == '\'') ret = eval_interp_string(arg, rettv, evaluate); else ret = eval_env_var(arg, rettv, evaluate); break; /* * Register contents: @r. */ case '@': ++*arg; if (evaluate) { if (vim9script && IS_WHITE_OR_NUL(**arg)) semsg(_(e_syntax_error_at_str), *arg); else if (vim9script && !valid_yank_reg(**arg, FALSE)) emsg_invreg(**arg); else { rettv->v_type = VAR_STRING; rettv->vval.v_string = get_reg_contents(**arg, GREG_EXPR_SRC); } } if (**arg != NUL) ++*arg; break; /* * nested expression: (expression). * or lambda: (arg) => expr */ case '(': ret = NOTDONE; if (vim9script) { ret = get_lambda_tv(arg, rettv, TRUE, evalarg); if (ret == OK && evaluate) { ufunc_T *ufunc = rettv->vval.v_partial->pt_func; // Compile it here to get the return type. The return // type is optional, when it's missing use t_unknown. // This is recognized in compile_return(). if (ufunc->uf_ret_type->tt_type == VAR_VOID) ufunc->uf_ret_type = &t_unknown; if (compile_def_function(ufunc, FALSE, get_compile_type(ufunc), NULL) == FAIL) { clear_tv(rettv); ret = FAIL; } } } if (ret == NOTDONE) { *arg = skipwhite_and_linebreak(*arg + 1, evalarg); ret = eval1(arg, rettv, evalarg); // recursive! *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ')') ++*arg; else if (ret == OK) { emsg(_(e_missing_closing_paren)); clear_tv(rettv); ret = FAIL; } } break; default: ret = NOTDONE; break; } if (ret == NOTDONE) { /* * Must be a variable or function name. * Can also be a curly-braces kind of name: {expr}. */ s = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) s = alias; if (len <= 0) ret = FAIL; else { int flags = evalarg == NULL ? 0 : evalarg->eval_flags; if (evaluate && vim9script && len == 1 && *s == '_') { emsg(_(e_cannot_use_underscore_here)); ret = FAIL; } else if (evaluate && vim9script && len > 2 && s[0] == 's' && s[1] == ':') { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), s); ret = FAIL; } else if ((vim9script ? **arg : *skipwhite(*arg)) == '(') { // "name(..." recursive! *arg = skipwhite(*arg); ret = eval_func(arg, evalarg, s, len, rettv, flags, NULL); } else if (flags & EVAL_CONSTANT) ret = FAIL; else if (evaluate) { // get the value of "true", "false", etc. or a variable ret = FAIL; if (vim9script) ret = handle_predefined(s, len, rettv); if (ret == FAIL) { name_start = s; ret = eval_variable(s, len, 0, rettv, NULL, EVAL_VAR_VERBOSE + EVAL_VAR_IMPORT); } } else { // skip the name check_vars(s, len); ret = OK; } } vim_free(alias); } // Handle following '[', '(' and '.' for expr[expr], expr.name, // expr(expr), expr->name(expr) if (ret == OK) ret = handle_subscript(arg, name_start, rettv, evalarg, TRUE); /* * Apply logical NOT and unary '-', from right to left, ignore '+'. */ if (ret == OK && evaluate && end_leader > start_leader) ret = eval9_leader(rettv, FALSE, start_leader, &end_leader); --recurse; return ret; } /* * Apply the leading "!" and "-" before an eval9 expression to "rettv". * When "numeric_only" is TRUE only handle "+" and "-". * Adjusts "end_leaderp" until it is at "start_leader". */ static int eval9_leader( typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp) { char_u *end_leader = *end_leaderp; int ret = OK; int error = FALSE; varnumber_T val = 0; vartype_T type = rettv->v_type; int vim9script = in_vim9script(); #ifdef FEAT_FLOAT float_T f = 0.0; if (rettv->v_type == VAR_FLOAT) f = rettv->vval.v_float; else #endif { while (VIM_ISWHITE(end_leader[-1])) --end_leader; if (vim9script && end_leader[-1] == '!') val = tv2bool(rettv); else val = tv_get_number_chk(rettv, &error); } if (error) { clear_tv(rettv); ret = FAIL; } else { while (end_leader > start_leader) { --end_leader; if (*end_leader == '!') { if (numeric_only) { ++end_leader; break; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { if (vim9script) { rettv->v_type = VAR_BOOL; val = f == 0.0 ? VVAL_TRUE : VVAL_FALSE; } else f = !f; } else #endif { val = !val; type = VAR_BOOL; } } else if (*end_leader == '-') { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f = -f; else #endif { val = -val; type = VAR_NUMBER; } } } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { clear_tv(rettv); rettv->vval.v_float = f; } else #endif { clear_tv(rettv); if (vim9script) rettv->v_type = type; else rettv->v_type = VAR_NUMBER; rettv->vval.v_number = val; } } *end_leaderp = end_leader; return ret; } /* * Call the function referred to in "rettv". */ static int call_func_rettv( char_u **arg, evalarg_T *evalarg, typval_T *rettv, int evaluate, dict_T *selfdict, typval_T *basetv) { partial_T *pt = NULL; funcexe_T funcexe; typval_T functv; char_u *s; int ret; // need to copy the funcref so that we can clear rettv if (evaluate) { functv = *rettv; rettv->v_type = VAR_UNKNOWN; // Invoke the function. Recursive! if (functv.v_type == VAR_PARTIAL) { pt = functv.vval.v_partial; s = partial_name(pt); } else { s = functv.vval.v_string; if (s == NULL || *s == NUL) { emsg(_(e_empty_function_name)); ret = FAIL; goto theend; } } } else s = (char_u *)""; CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = pt; funcexe.fe_selfdict = selfdict; funcexe.fe_basetv = basetv; ret = get_func_tv(s, -1, rettv, arg, evalarg, &funcexe); theend: // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&functv); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_lambda( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); typval_T base = *rettv; int ret; rettv->v_type = VAR_UNKNOWN; if (**arg == '{') { // ->{lambda}() ret = get_lambda_tv(arg, rettv, FALSE, evalarg); } else { // ->(lambda)() ++*arg; ret = eval1(arg, rettv, evalarg); *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ')') { emsg(_(e_missing_closing_paren)); return FAIL; } if (rettv->v_type != VAR_STRING && rettv->v_type != VAR_FUNC && rettv->v_type != VAR_PARTIAL) { emsg(_(e_string_or_function_required_for_arrow_parens_expr)); return FAIL; } ++*arg; } if (ret != OK) return FAIL; if (**arg != '(') { if (verbose) { if (*skipwhite(*arg) == '(') emsg(_(e_no_white_space_allowed_before_parenthesis)); else semsg(_(e_missing_parenthesis_str), "lambda"); } clear_tv(rettv); ret = FAIL; } else ret = call_func_rettv(arg, evalarg, rettv, evaluate, NULL, &base); // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_method( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { char_u *name; long len; char_u *alias; char_u *tofree = NULL; typval_T base = *rettv; int ret = OK; int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); rettv->v_type = VAR_UNKNOWN; name = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) name = alias; if (len <= 0) { if (verbose) emsg(_(e_missing_name_after_method)); ret = FAIL; } else { char_u *paren; // If there is no "(" immediately following, but there is further on, // it can be "import.Func()", "dict.Func()", "list[nr]", etc. // Does not handle anything where "(" is part of the expression. *arg = skipwhite(*arg); if (**arg != '(' && alias == NULL && (paren = vim_strchr(*arg, '(')) != NULL) { char_u *deref; *arg = name; *paren = NUL; deref = deref_function_name(arg, &tofree, evalarg, verbose); if (deref == NULL) { *arg = name + len; ret = FAIL; } else { name = deref; len = (long)STRLEN(name); } *paren = '('; } if (ret == OK) { *arg = skipwhite(*arg); if (**arg != '(') { if (verbose) semsg(_(e_missing_parenthesis_str), name); ret = FAIL; } else if (VIM_ISWHITE((*arg)[-1])) { if (verbose) emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else ret = eval_func(arg, evalarg, name, len, rettv, evaluate ? EVAL_EVALUATE : 0, &base); } } // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); vim_free(tofree); return ret; } /* * Evaluate an "[expr]" or "[expr:expr]" index. Also "dict.key". * "*arg" points to the '[' or '.'. * Returns FAIL or OK. "*arg" is advanced to after the ']'. */ static int eval_index( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int empty1 = FALSE, empty2 = FALSE; typval_T var1, var2; int range = FALSE; char_u *key = NULL; int keylen = -1; int vim9script = in_vim9script(); if (check_can_index(rettv, evaluate, verbose) == FAIL) return FAIL; init_tv(&var1); init_tv(&var2); if (**arg == '.') { /* * dict.name */ key = *arg + 1; for (keylen = 0; eval_isdictc(key[keylen]); ++keylen) ; if (keylen == 0) return FAIL; *arg = key + keylen; } else { /* * something[idx] * * Get the (first) variable from inside the []. */ *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (**arg == ':') empty1 = TRUE; else if (eval1(arg, &var1, evalarg) == FAIL) // recursive! return FAIL; else if (vim9script && **arg == ':') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg); clear_tv(&var1); return FAIL; } else if (evaluate) { int error = FALSE; #ifdef FEAT_FLOAT // allow for indexing with float if (vim9script && rettv->v_type == VAR_DICT && var1.v_type == VAR_FLOAT) { var1.vval.v_string = typval_tostring(&var1, TRUE); var1.v_type = VAR_STRING; } #endif if (vim9script && rettv->v_type == VAR_LIST) tv_get_number_chk(&var1, &error); else error = tv_get_string_chk(&var1) == NULL; if (error) { // not a number or string clear_tv(&var1); return FAIL; } } /* * Get the second variable from inside the [:]. */ *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ':') { range = TRUE; ++*arg; if (vim9script && !IS_WHITE_OR_NUL(**arg) && **arg != ']') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg - 1); if (!empty1) clear_tv(&var1); return FAIL; } *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ']') empty2 = TRUE; else if (eval1(arg, &var2, evalarg) == FAIL) // recursive! { if (!empty1) clear_tv(&var1); return FAIL; } else if (evaluate && tv_get_string_chk(&var2) == NULL) { // not a number or string if (!empty1) clear_tv(&var1); clear_tv(&var2); return FAIL; } } // Check for the ']'. *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ']') { if (verbose) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); if (range) clear_tv(&var2); return FAIL; } *arg = *arg + 1; // skip over the ']' } if (evaluate) { int res = eval_index_inner(rettv, range, empty1 ? NULL : &var1, empty2 ? NULL : &var2, FALSE, key, keylen, verbose); if (!empty1) clear_tv(&var1); if (range) clear_tv(&var2); return res; } return OK; } /* * Check if "rettv" can have an [index] or [sli:ce] */ int check_can_index(typval_T *rettv, int evaluate, int verbose) { switch (rettv->v_type) { case VAR_FUNC: case VAR_PARTIAL: if (verbose) emsg(_(e_cannot_index_a_funcref)); return FAIL; case VAR_FLOAT: #ifdef FEAT_FLOAT if (verbose) emsg(_(e_using_float_as_string)); return FAIL; #endif case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: if (verbose) emsg(_(e_cannot_index_special_variable)); return FAIL; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: if (evaluate) { emsg(_(e_cannot_index_special_variable)); return FAIL; } // FALLTHROUGH case VAR_STRING: case VAR_LIST: case VAR_DICT: case VAR_BLOB: break; case VAR_NUMBER: if (in_vim9script()) emsg(_(e_cannot_index_number)); break; } return OK; } /* * slice() function */ void f_slice(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && ((argvars[0].v_type != VAR_STRING && argvars[0].v_type != VAR_LIST && argvars[0].v_type != VAR_BLOB && check_for_list_arg(argvars, 0) == FAIL) || check_for_number_arg(argvars, 1) == FAIL || check_for_opt_number_arg(argvars, 2) == FAIL)) return; if (check_can_index(argvars, TRUE, FALSE) == OK) { copy_tv(argvars, rettv); eval_index_inner(rettv, TRUE, argvars + 1, argvars[2].v_type == VAR_UNKNOWN ? NULL : argvars + 2, TRUE, NULL, 0, FALSE); } } /* * Apply index or range to "rettv". * "var1" is the first index, NULL for [:expr]. * "var2" is the second index, NULL for [expr] and [expr: ] * "exclusive" is TRUE for slice(): second index is exclusive, use character * index for string. * Alternatively, "key" is not NULL, then key[keylen] is the dict index. */ int eval_index_inner( typval_T *rettv, int is_range, typval_T *var1, typval_T *var2, int exclusive, char_u *key, int keylen, int verbose) { varnumber_T n1, n2 = 0; long len; n1 = 0; if (var1 != NULL && rettv->v_type != VAR_DICT) n1 = tv_get_number(var1); if (is_range) { if (rettv->v_type == VAR_DICT) { if (verbose) emsg(_(e_cannot_slice_dictionary)); return FAIL; } if (var2 != NULL) n2 = tv_get_number(var2); else n2 = VARNUM_MAX; } switch (rettv->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_FUNC: case VAR_PARTIAL: case VAR_FLOAT: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; // not evaluating, skipping over subscript case VAR_NUMBER: case VAR_STRING: { char_u *s = tv_get_string(rettv); len = (long)STRLEN(s); if (in_vim9script() || exclusive) { if (is_range) s = string_slice(s, n1, n2, exclusive); else s = char_from_string(s, n1); } else if (is_range) { // The resulting variable is a substring. If the indexes // are out of range the result is empty. if (n1 < 0) { n1 = len + n1; if (n1 < 0) n1 = 0; } if (n2 < 0) n2 = len + n2; else if (n2 >= len) n2 = len; if (n1 >= len || n2 < 0 || n1 > n2) s = NULL; else s = vim_strnsave(s + n1, n2 - n1 + 1); } else { // The resulting variable is a string of a single // character. If the index is too big or negative the // result is empty. if (n1 >= len || n1 < 0) s = NULL; else s = vim_strnsave(s + n1, 1); } clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = s; } break; case VAR_BLOB: blob_slice_or_index(rettv->vval.v_blob, is_range, n1, n2, exclusive, rettv); break; case VAR_LIST: if (var1 == NULL) n1 = 0; if (var2 == NULL) n2 = VARNUM_MAX; if (list_slice_or_index(rettv->vval.v_list, is_range, n1, n2, exclusive, rettv, verbose) == FAIL) return FAIL; break; case VAR_DICT: { dictitem_T *item; typval_T tmp; if (key == NULL) { key = tv_get_string_chk(var1); if (key == NULL) return FAIL; } item = dict_find(rettv->vval.v_dict, key, keylen); if (item == NULL) { if (verbose) { if (keylen > 0) key[keylen] = NUL; semsg(_(e_key_not_present_in_dictionary), key); } return FAIL; } copy_tv(&item->di_tv, &tmp); clear_tv(rettv); *rettv = tmp; } break; } return OK; } /* * Return the function name of partial "pt". */ char_u * partial_name(partial_T *pt) { if (pt != NULL) { if (pt->pt_name != NULL) return pt->pt_name; if (pt->pt_func != NULL) return pt->pt_func->uf_name; } return (char_u *)""; } static void partial_free(partial_T *pt) { int i; for (i = 0; i < pt->pt_argc; ++i) clear_tv(&pt->pt_argv[i]); vim_free(pt->pt_argv); dict_unref(pt->pt_dict); if (pt->pt_name != NULL) { func_unref(pt->pt_name); vim_free(pt->pt_name); } else func_ptr_unref(pt->pt_func); // "out_up" is no longer used, decrement refcount on partial that owns it. partial_unref(pt->pt_outer.out_up_partial); // Using pt_outer from another partial. partial_unref(pt->pt_outer_partial); // Decrease the reference count for the context of a closure. If down // to the minimum it may be time to free it. if (pt->pt_funcstack != NULL) { --pt->pt_funcstack->fs_refcount; funcstack_check_refcount(pt->pt_funcstack); } vim_free(pt); } /* * Unreference a closure: decrement the reference count and free it when it * becomes zero. */ void partial_unref(partial_T *pt) { if (pt != NULL) { if (--pt->pt_refcount <= 0) partial_free(pt); // If the reference count goes down to one, the funcstack may be the // only reference and can be freed if no other partials reference it. else if (pt->pt_refcount == 1 && pt->pt_funcstack != NULL) funcstack_check_refcount(pt->pt_funcstack); } } /* * Return the next (unique) copy ID. * Used for serializing nested structures. */ int get_copyID(void) { current_copyID += COPYID_INC; return current_copyID; } /* * Garbage collection for lists and dictionaries. * * We use reference counts to be able to free most items right away when they * are no longer used. But for composite items it's possible that it becomes * unused while the reference count is > 0: When there is a recursive * reference. Example: * :let l = [1, 2, 3] * :let d = {9: l} * :let l[1] = d * * Since this is quite unusual we handle this with garbage collection: every * once in a while find out which lists and dicts are not referenced from any * variable. * * Here is a good reference text about garbage collection (refers to Python * but it applies to all reference-counting mechanisms): * http://python.ca/nas/python/gc/ */ /* * Do garbage collection for lists and dicts. * When "testing" is TRUE this is called from test_garbagecollect_now(). * Return TRUE if some memory was freed. */ int garbage_collect(int testing) { int copyID; int abort = FALSE; buf_T *buf; win_T *wp; int did_free = FALSE; tabpage_T *tp; if (!testing) { // Only do this once. want_garbage_collect = FALSE; may_garbage_collect = FALSE; garbage_collect_at_exit = FALSE; } // The execution stack can grow big, limit the size. if (exestack.ga_maxlen - exestack.ga_len > 500) { size_t new_len; char_u *pp; int n; // Keep 150% of the current size, with a minimum of the growth size. n = exestack.ga_len / 2; if (n < exestack.ga_growsize) n = exestack.ga_growsize; // Don't make it bigger though. if (exestack.ga_len + n < exestack.ga_maxlen) { new_len = (size_t)exestack.ga_itemsize * (exestack.ga_len + n); pp = vim_realloc(exestack.ga_data, new_len); if (pp == NULL) return FAIL; exestack.ga_maxlen = exestack.ga_len + n; exestack.ga_data = pp; } } // We advance by two because we add one for items referenced through // previous_funccal. copyID = get_copyID(); /* * 1. Go through all accessible variables and mark all lists and dicts * with copyID. */ // Don't free variables in the previous_funccal list unless they are only // referenced through previous_funccal. This must be first, because if // the item is referenced elsewhere the funccal must not be freed. abort = abort || set_ref_in_previous_funccal(copyID); // script-local variables abort = abort || garbage_collect_scriptvars(copyID); // buffer-local variables FOR_ALL_BUFFERS(buf) abort = abort || set_ref_in_item(&buf->b_bufvar.di_tv, copyID, NULL, NULL); // window-local variables FOR_ALL_TAB_WINDOWS(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); if (aucmd_win != NULL) abort = abort || set_ref_in_item(&aucmd_win->w_winvar.di_tv, copyID, NULL, NULL); #ifdef FEAT_PROP_POPUP FOR_ALL_POPUPWINS(wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); FOR_ALL_TABPAGES(tp) FOR_ALL_POPUPWINS_IN_TAB(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); #endif // tabpage-local variables FOR_ALL_TABPAGES(tp) abort = abort || set_ref_in_item(&tp->tp_winvar.di_tv, copyID, NULL, NULL); // global variables abort = abort || garbage_collect_globvars(copyID); // function-local variables abort = abort || set_ref_in_call_stack(copyID); // named functions (matters for closures) abort = abort || set_ref_in_functions(copyID); // function call arguments, if v:testing is set. abort = abort || set_ref_in_func_args(copyID); // funcstacks keep variables for closures abort = abort || set_ref_in_funcstacks(copyID); // v: vars abort = abort || garbage_collect_vimvars(copyID); // callbacks in buffers abort = abort || set_ref_in_buffers(copyID); // 'completefunc', 'omnifunc' and 'thesaurusfunc' callbacks abort = abort || set_ref_in_insexpand_funcs(copyID); // 'operatorfunc' callback abort = abort || set_ref_in_opfunc(copyID); // 'tagfunc' callback abort = abort || set_ref_in_tagfunc(copyID); // 'imactivatefunc' and 'imstatusfunc' callbacks abort = abort || set_ref_in_im_funcs(copyID); #ifdef FEAT_LUA abort = abort || set_ref_in_lua(copyID); #endif #ifdef FEAT_PYTHON abort = abort || set_ref_in_python(copyID); #endif #ifdef FEAT_PYTHON3 abort = abort || set_ref_in_python3(copyID); #endif #ifdef FEAT_JOB_CHANNEL abort = abort || set_ref_in_channel(copyID); abort = abort || set_ref_in_job(copyID); #endif #ifdef FEAT_NETBEANS_INTG abort = abort || set_ref_in_nb_channel(copyID); #endif #ifdef FEAT_TIMERS abort = abort || set_ref_in_timer(copyID); #endif #ifdef FEAT_QUICKFIX abort = abort || set_ref_in_quickfix(copyID); #endif #ifdef FEAT_TERMINAL abort = abort || set_ref_in_term(copyID); #endif #ifdef FEAT_PROP_POPUP abort = abort || set_ref_in_popups(copyID); #endif if (!abort) { /* * 2. Free lists and dictionaries that are not referenced. */ did_free = free_unref_items(copyID); /* * 3. Check if any funccal can be freed now. * This may call us back recursively. */ free_unref_funccal(copyID, testing); } else if (p_verbose > 0) { verb_msg(_("Not enough memory to set references, garbage collection aborted!")); } return did_free; } /* * Free lists, dictionaries, channels and jobs that are no longer referenced. */ static int free_unref_items(int copyID) { int did_free = FALSE; // Let all "free" functions know that we are here. This means no // dictionaries, lists, channels or jobs are to be freed, because we will // do that here. in_free_unref_items = TRUE; /* * PASS 1: free the contents of the items. We don't free the items * themselves yet, so that it is possible to decrement refcount counters */ // Go through the list of dicts and free items without the copyID. did_free |= dict_free_nonref(copyID); // Go through the list of lists and free items without the copyID. did_free |= list_free_nonref(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. did_free |= free_unused_jobs_contents(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. did_free |= free_unused_channels_contents(copyID, COPYID_MASK); #endif /* * PASS 2: free the items themselves. */ dict_free_items(copyID); list_free_items(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. free_unused_jobs(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. free_unused_channels(copyID, COPYID_MASK); #endif in_free_unref_items = FALSE; return did_free; } /* * Mark all lists and dicts referenced through hashtab "ht" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_ht(hashtab_T *ht, int copyID, list_stack_T **list_stack) { int todo; int abort = FALSE; hashitem_T *hi; hashtab_T *cur_ht; ht_stack_T *ht_stack = NULL; ht_stack_T *tempitem; cur_ht = ht; for (;;) { if (!abort) { // Mark each item in the hashtab. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. todo = (int)cur_ht->ht_used; for (hi = cur_ht->ht_array; todo > 0; ++hi) if (!HASHITEM_EMPTY(hi)) { --todo; abort = abort || set_ref_in_item(&HI2DI(hi)->di_tv, copyID, &ht_stack, list_stack); } } if (ht_stack == NULL) break; // take an item from the stack cur_ht = ht_stack->ht; tempitem = ht_stack; ht_stack = ht_stack->prev; free(tempitem); } return abort; } #if defined(FEAT_LUA) || defined(FEAT_PYTHON) || defined(FEAT_PYTHON3) \ || defined(PROTO) /* * Mark a dict and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_dict(dict_T *d, int copyID) { if (d != NULL && d->dv_copyID != copyID) { d->dv_copyID = copyID; return set_ref_in_ht(&d->dv_hashtab, copyID, NULL); } return FALSE; } #endif /* * Mark a list and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_list(list_T *ll, int copyID) { if (ll != NULL && ll->lv_copyID != copyID) { ll->lv_copyID = copyID; return set_ref_in_list_items(ll, copyID, NULL); } return FALSE; } /* * Mark all lists and dicts referenced through list "l" with "copyID". * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_list_items(list_T *l, int copyID, ht_stack_T **ht_stack) { listitem_T *li; int abort = FALSE; list_T *cur_l; list_stack_T *list_stack = NULL; list_stack_T *tempitem; cur_l = l; for (;;) { if (!abort && cur_l->lv_first != &range_list_item) // Mark each item in the list. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. for (li = cur_l->lv_first; !abort && li != NULL; li = li->li_next) abort = abort || set_ref_in_item(&li->li_tv, copyID, ht_stack, &list_stack); if (list_stack == NULL) break; // take an item from the stack cur_l = list_stack->list; tempitem = list_stack; list_stack = list_stack->prev; free(tempitem); } return abort; } /* * Mark the partial in callback 'cb' with "copyID". */ int set_ref_in_callback(callback_T *cb, int copyID) { typval_T tv; if (cb->cb_name == NULL || *cb->cb_name == NUL || cb->cb_partial == NULL) return FALSE; tv.v_type = VAR_PARTIAL; tv.vval.v_partial = cb->cb_partial; return set_ref_in_item(&tv, copyID, NULL, NULL); } /* * Mark all lists and dicts referenced through typval "tv" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_item( typval_T *tv, int copyID, ht_stack_T **ht_stack, list_stack_T **list_stack) { int abort = FALSE; if (tv->v_type == VAR_DICT) { dict_T *dd = tv->vval.v_dict; if (dd != NULL && dd->dv_copyID != copyID) { // Didn't see this dict yet. dd->dv_copyID = copyID; if (ht_stack == NULL) { abort = set_ref_in_ht(&dd->dv_hashtab, copyID, list_stack); } else { ht_stack_T *newitem = ALLOC_ONE(ht_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->ht = &dd->dv_hashtab; newitem->prev = *ht_stack; *ht_stack = newitem; } } } } else if (tv->v_type == VAR_LIST) { list_T *ll = tv->vval.v_list; if (ll != NULL && ll->lv_copyID != copyID) { // Didn't see this list yet. ll->lv_copyID = copyID; if (list_stack == NULL) { abort = set_ref_in_list_items(ll, copyID, ht_stack); } else { list_stack_T *newitem = ALLOC_ONE(list_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->list = ll; newitem->prev = *list_stack; *list_stack = newitem; } } } } else if (tv->v_type == VAR_FUNC) { abort = set_ref_in_func(tv->vval.v_string, NULL, copyID); } else if (tv->v_type == VAR_PARTIAL) { partial_T *pt = tv->vval.v_partial; int i; if (pt != NULL && pt->pt_copyID != copyID) { // Didn't see this partial yet. pt->pt_copyID = copyID; abort = set_ref_in_func(pt->pt_name, pt->pt_func, copyID); if (pt->pt_dict != NULL) { typval_T dtv; dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } for (i = 0; i < pt->pt_argc; ++i) abort = abort || set_ref_in_item(&pt->pt_argv[i], copyID, ht_stack, list_stack); // pt_funcstack is handled in set_ref_in_funcstacks() } } #ifdef FEAT_JOB_CHANNEL else if (tv->v_type == VAR_JOB) { job_T *job = tv->vval.v_job; typval_T dtv; if (job != NULL && job->jv_copyID != copyID) { job->jv_copyID = copyID; if (job->jv_channel != NULL) { dtv.v_type = VAR_CHANNEL; dtv.vval.v_channel = job->jv_channel; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (job->jv_exit_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = job->jv_exit_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } else if (tv->v_type == VAR_CHANNEL) { channel_T *ch =tv->vval.v_channel; ch_part_T part; typval_T dtv; jsonq_T *jq; cbq_T *cq; if (ch != NULL && ch->ch_copyID != copyID) { ch->ch_copyID = copyID; for (part = PART_SOCK; part < PART_COUNT; ++part) { for (jq = ch->ch_part[part].ch_json_head.jq_next; jq != NULL; jq = jq->jq_next) set_ref_in_item(jq->jq_value, copyID, ht_stack, list_stack); for (cq = ch->ch_part[part].ch_cb_head.cq_next; cq != NULL; cq = cq->cq_next) if (cq->cq_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = cq->cq_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_part[part].ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_part[part].ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } if (ch->ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_close_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_close_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } #endif return abort; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * When "copyID" is not NULL replace recursive lists and dicts with "...". * When both "echo_style" and "composite_val" are FALSE, put quotes around * strings as "string()", otherwise does not put quotes around strings, as * ":echo" displays values. * When "restore_copyID" is FALSE, repeated items in dictionaries and lists * are replaced with "...". * May return NULL. */ char_u * echo_string_core( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID, int echo_style, int restore_copyID, int composite_val) { static int recurse = 0; char_u *r = NULL; if (recurse >= DICT_MAXNEST) { if (!did_echo_string_emsg) { // Only give this message once for a recursive call to avoid // flooding the user with errors. And stop iterating over lists // and dicts. did_echo_string_emsg = TRUE; emsg(_(e_variable_nested_too_deep_for_displaying)); } *tofree = NULL; return (char_u *)"{E724}"; } ++recurse; switch (tv->v_type) { case VAR_STRING: if (echo_style && !composite_val) { *tofree = NULL; r = tv->vval.v_string; if (r == NULL) r = (char_u *)""; } else { *tofree = string_quote(tv->vval.v_string, FALSE); r = *tofree; } break; case VAR_FUNC: { char_u buf[MAX_FUNC_NAME_LEN]; if (echo_style) { r = tv->vval.v_string == NULL ? (char_u *)"function()" : make_ufunc_name_readable(tv->vval.v_string, buf, MAX_FUNC_NAME_LEN); if (r == buf) { r = vim_strsave(buf); *tofree = r; } else *tofree = NULL; } else { *tofree = string_quote(tv->vval.v_string == NULL ? NULL : make_ufunc_name_readable( tv->vval.v_string, buf, MAX_FUNC_NAME_LEN), TRUE); r = *tofree; } } break; case VAR_PARTIAL: { partial_T *pt = tv->vval.v_partial; char_u *fname = string_quote(pt == NULL ? NULL : partial_name(pt), FALSE); garray_T ga; int i; char_u *tf; ga_init2(&ga, 1, 100); ga_concat(&ga, (char_u *)"function("); if (fname != NULL) { // When using uf_name prepend "g:" for a global function. if (pt != NULL && pt->pt_name == NULL && fname[0] == '\'' && vim_isupper(fname[1])) { ga_concat(&ga, (char_u *)"'g:"); ga_concat(&ga, fname + 1); } else ga_concat(&ga, fname); vim_free(fname); } if (pt != NULL && pt->pt_argc > 0) { ga_concat(&ga, (char_u *)", ["); for (i = 0; i < pt->pt_argc; ++i) { if (i > 0) ga_concat(&ga, (char_u *)", "); ga_concat(&ga, tv2string(&pt->pt_argv[i], &tf, numbuf, copyID)); vim_free(tf); } ga_concat(&ga, (char_u *)"]"); } if (pt != NULL && pt->pt_dict != NULL) { typval_T dtv; ga_concat(&ga, (char_u *)", "); dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; ga_concat(&ga, tv2string(&dtv, &tf, numbuf, copyID)); vim_free(tf); } // terminate with ')' and a NUL ga_concat_len(&ga, (char_u *)")", 2); *tofree = ga.ga_data; r = *tofree; break; } case VAR_BLOB: r = blob2string(tv->vval.v_blob, tofree, numbuf); break; case VAR_LIST: if (tv->vval.v_list == NULL) { // NULL list is equivalent to empty list. *tofree = NULL; r = (char_u *)"[]"; } else if (copyID != 0 && tv->vval.v_list->lv_copyID == copyID && tv->vval.v_list->lv_len > 0) { *tofree = NULL; r = (char_u *)"[...]"; } else { int old_copyID = tv->vval.v_list->lv_copyID; tv->vval.v_list->lv_copyID = copyID; *tofree = list2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_list->lv_copyID = old_copyID; r = *tofree; } break; case VAR_DICT: if (tv->vval.v_dict == NULL) { // NULL dict is equivalent to empty dict. *tofree = NULL; r = (char_u *)"{}"; } else if (copyID != 0 && tv->vval.v_dict->dv_copyID == copyID && tv->vval.v_dict->dv_hashtab.ht_used != 0) { *tofree = NULL; r = (char_u *)"{...}"; } else { int old_copyID = tv->vval.v_dict->dv_copyID; tv->vval.v_dict->dv_copyID = copyID; *tofree = dict2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_dict->dv_copyID = old_copyID; r = *tofree; } break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: *tofree = NULL; r = tv_get_string_buf(tv, numbuf); break; case VAR_JOB: case VAR_CHANNEL: #ifdef FEAT_JOB_CHANNEL *tofree = NULL; r = tv->v_type == VAR_JOB ? job_to_string_buf(tv, numbuf) : channel_to_string_buf(tv, numbuf); if (composite_val) { *tofree = string_quote(r, FALSE); r = *tofree; } #endif break; case VAR_INSTR: *tofree = NULL; r = (char_u *)"instructions"; break; case VAR_FLOAT: #ifdef FEAT_FLOAT *tofree = NULL; vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); r = numbuf; break; #endif case VAR_BOOL: case VAR_SPECIAL: *tofree = NULL; r = (char_u *)get_var_special_name(tv->vval.v_number); break; } if (--recurse == 0) did_echo_string_emsg = FALSE; return r; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * Does not put quotes around strings, as ":echo" displays values. * When "copyID" is not NULL replace recursive lists and dicts with "...". * May return NULL. */ char_u * echo_string( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID) { return echo_string_core(tv, tofree, numbuf, copyID, TRUE, FALSE, FALSE); } /* * Convert the specified byte index of line 'lnum' in buffer 'buf' to a * character index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_byteidx_to_charidx(buf_T *buf, int lnum, int byteidx) { char_u *str; char_u *t; int count; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; if (*str == NUL) return 0; // count the number of characters t = str; for (count = 0; *t != NUL && t <= str + byteidx; count++) t += mb_ptr2len(t); // In insert mode, when the cursor is at the end of a non-empty line, // byteidx points to the NUL character immediately past the end of the // string. In this case, add one to the character count. if (*t == NUL && byteidx != 0 && t == str + byteidx) count++; return count - 1; } /* * Convert the specified character index of line 'lnum' in buffer 'buf' to a * byte index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_charidx_to_byteidx(buf_T *buf, int lnum, int charidx) { char_u *str; char_u *t; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; // Convert the character offset to a byte offset t = str; while (*t != NUL && --charidx > 0) t += mb_ptr2len(t); return t - str; } /* * Translate a String variable into a position. * Returns NULL when there is an error. */ pos_T * var2fpos( typval_T *varp, int dollar_lnum, // TRUE when $ is last line int *fnum, // set to fnum for '0, 'A, etc. int charcol) // return character column { char_u *name; static pos_T pos; pos_T *pp; // Argument can be [lnum, col, coladd]. if (varp->v_type == VAR_LIST) { list_T *l; int len; int error = FALSE; listitem_T *li; l = varp->vval.v_list; if (l == NULL) return NULL; // Get the line number pos.lnum = list_find_nr(l, 0L, &error); if (error || pos.lnum <= 0 || pos.lnum > curbuf->b_ml.ml_line_count) return NULL; // invalid line number if (charcol) len = (long)mb_charlen(ml_get(pos.lnum)); else len = (long)STRLEN(ml_get(pos.lnum)); // Get the column number // We accept "$" for the column number: last column. li = list_find(l, 1L); if (li != NULL && li->li_tv.v_type == VAR_STRING && li->li_tv.vval.v_string != NULL && STRCMP(li->li_tv.vval.v_string, "$") == 0) { pos.col = len + 1; } else { pos.col = list_find_nr(l, 1L, &error); if (error) return NULL; } // Accept a position up to the NUL after the line. if (pos.col == 0 || (int)pos.col > len + 1) return NULL; // invalid column number --pos.col; // Get the virtual offset. Defaults to zero. pos.coladd = list_find_nr(l, 2L, &error); if (error) pos.coladd = 0; return &pos; } if (in_vim9script() && check_for_string_arg(varp, 0) == FAIL) return NULL; name = tv_get_string_chk(varp); if (name == NULL) return NULL; pos.lnum = 0; if (name[0] == '.' && (!in_vim9script() || name[1] == NUL)) { // cursor pos = curwin->w_cursor; } else if (name[0] == 'v' && name[1] == NUL) { // Visual start if (VIsual_active) pos = VIsual; else pos = curwin->w_cursor; } else if (name[0] == '\'' && (!in_vim9script() || (name[1] != NUL && name[2] == NUL))) { // mark pp = getmark_buf_fnum(curbuf, name[1], FALSE, fnum); if (pp == NULL || pp == (pos_T *)-1 || pp->lnum <= 0) return NULL; pos = *pp; } if (pos.lnum != 0) { if (charcol) pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col); return &pos; } pos.coladd = 0; if (name[0] == 'w' && dollar_lnum) { pos.col = 0; if (name[1] == '0') // "w0": first visible line { update_topline(); // In silent Ex mode topline is zero, but that's not a valid line // number; use one instead. pos.lnum = curwin->w_topline > 0 ? curwin->w_topline : 1; return &pos; } else if (name[1] == '$') // "w$": last visible line { validate_botline(); // In silent Ex mode botline is zero, return zero then. pos.lnum = curwin->w_botline > 0 ? curwin->w_botline - 1 : 0; return &pos; } } else if (name[0] == '$') // last column or line { if (dollar_lnum) { pos.lnum = curbuf->b_ml.ml_line_count; pos.col = 0; } else { pos.lnum = curwin->w_cursor.lnum; if (charcol) pos.col = (colnr_T)mb_charlen(ml_get_curline()); else pos.col = (colnr_T)STRLEN(ml_get_curline()); } return &pos; } if (in_vim9script()) semsg(_(e_invalid_value_for_line_number_str), name); return NULL; } /* * Convert list in "arg" into a position and optional file number. * When "fnump" is NULL there is no file number, only 3 items. * Note that the column is passed on as-is, the caller may want to decrement * it to use 1 for the first column. * Return FAIL when conversion is not possible, doesn't check the position for * validity. */ int list2fpos( typval_T *arg, pos_T *posp, int *fnump, colnr_T *curswantp, int charcol) { list_T *l = arg->vval.v_list; long i = 0; long n; // List must be: [fnum, lnum, col, coladd, curswant], where "fnum" is only // there when "fnump" isn't NULL; "coladd" and "curswant" are optional. if (arg->v_type != VAR_LIST || l == NULL || l->lv_len < (fnump == NULL ? 2 : 3) || l->lv_len > (fnump == NULL ? 4 : 5)) return FAIL; if (fnump != NULL) { n = list_find_nr(l, i++, NULL); // fnum if (n < 0) return FAIL; if (n == 0) n = curbuf->b_fnum; // current buffer *fnump = n; } n = list_find_nr(l, i++, NULL); // lnum if (n < 0) return FAIL; posp->lnum = n; n = list_find_nr(l, i++, NULL); // col if (n < 0) return FAIL; // If character position is specified, then convert to byte position if (charcol) { buf_T *buf; // Get the text for the specified line in a loaded buffer buf = buflist_findnr(fnump == NULL ? curbuf->b_fnum : *fnump); if (buf == NULL || buf->b_ml.ml_mfp == NULL) return FAIL; n = buf_charidx_to_byteidx(buf, posp->lnum, n) + 1; } posp->col = n; n = list_find_nr(l, i, NULL); // off if (n < 0) posp->coladd = 0; else posp->coladd = n; if (curswantp != NULL) *curswantp = list_find_nr(l, i + 1, NULL); // curswant return OK; } /* * Get the length of an environment variable name. * Advance "arg" to the first character after the name. * Return 0 for error. */ int get_env_len(char_u **arg) { char_u *p; int len; for (p = *arg; vim_isIDc(*p); ++p) ; if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a function or internal variable. * "arg" is advanced to after the name. * Return 0 if something is wrong. */ int get_id_len(char_u **arg) { char_u *p; int len; // Find the end of the name. for (p = *arg; eval_isnamec(*p); ++p) { if (*p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. len = (int)(p - *arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, **arg) == NULL) || len > 1) break; } } if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a variable or function. * Only the name is recognized, does not handle ".key" or "[idx]". * "arg" is advanced to the first non-white character after the name. * Return -1 if curly braces expansion failed. * Return 0 if something else is wrong. * If the name contains 'magic' {}'s, expand them and return the * expanded name in an allocated string via 'alias' - caller must free. */ int get_name_len( char_u **arg, char_u **alias, int evaluate, int verbose) { int len; char_u *p; char_u *expr_start; char_u *expr_end; *alias = NULL; // default to no alias if ((*arg)[0] == K_SPECIAL && (*arg)[1] == KS_EXTRA && (*arg)[2] == (int)KE_SNR) { // hard coded <SNR>, already translated *arg += 3; return get_id_len(arg) + 3; } len = eval_fname_script(*arg); if (len > 0) { // literal "<SID>", "s:" or "<SNR>" *arg += len; } /* * Find the end of the name; check for {} construction. */ p = find_name_end(*arg, &expr_start, &expr_end, len > 0 ? 0 : FNE_CHECK_START); if (expr_start != NULL) { char_u *temp_string; if (!evaluate) { len += (int)(p - *arg); *arg = skipwhite(p); return len; } /* * Include any <SID> etc in the expanded string: * Thus the -len here. */ temp_string = make_expanded_name(*arg - len, expr_start, expr_end, p); if (temp_string == NULL) return -1; *alias = temp_string; *arg = skipwhite(p); return (int)STRLEN(temp_string); } len += get_id_len(arg); // Only give an error when there is something, otherwise it will be // reported at a higher level. if (len == 0 && verbose && **arg != NUL) semsg(_(e_invalid_expression_str), *arg); return len; } /* * Find the end of a variable or function name, taking care of magic braces. * If "expr_start" is not NULL then "expr_start" and "expr_end" are set to the * start and end of the first magic braces item. * "flags" can have FNE_INCL_BR and FNE_CHECK_START. * Return a pointer to just after the name. Equal to "arg" if there is no * valid name. */ char_u * find_name_end( char_u *arg, char_u **expr_start, char_u **expr_end, int flags) { int mb_nest = 0; int br_nest = 0; char_u *p; int len; int vim9script = in_vim9script(); if (expr_start != NULL) { *expr_start = NULL; *expr_end = NULL; } // Quick check for valid starting character. if ((flags & FNE_CHECK_START) && !eval_isnamec1(*arg) && (*arg != '{' || vim9script)) return arg; for (p = arg; *p != NUL && (eval_isnamec(*p) || (*p == '{' && !vim9script) || ((flags & FNE_INCL_BR) && (*p == '[' || (*p == '.' && eval_isdictc(p[1])))) || mb_nest != 0 || br_nest != 0); MB_PTR_ADV(p)) { if (*p == '\'') { // skip over 'string' to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '\''; MB_PTR_ADV(p)) ; if (*p == NUL) break; } else if (*p == '"') { // skip over "str\"ing" to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '"'; MB_PTR_ADV(p)) if (*p == '\\' && p[1] != NUL) ++p; if (*p == NUL) break; } else if (br_nest == 0 && mb_nest == 0 && *p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. But {ns}: is. len = (int)(p - arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, *arg) == NULL) || (len > 1 && p[-1] != '}')) break; } if (mb_nest == 0) { if (*p == '[') ++br_nest; else if (*p == ']') --br_nest; } if (br_nest == 0 && !vim9script) { if (*p == '{') { mb_nest++; if (expr_start != NULL && *expr_start == NULL) *expr_start = p; } else if (*p == '}') { mb_nest--; if (expr_start != NULL && mb_nest == 0 && *expr_end == NULL) *expr_end = p; } } } return p; } /* * Expands out the 'magic' {}'s in a variable/function name. * Note that this can call itself recursively, to deal with * constructs like foo{bar}{baz}{bam} * The four pointer arguments point to "foo{expre}ss{ion}bar" * "in_start" ^ * "expr_start" ^ * "expr_end" ^ * "in_end" ^ * * Returns a new allocated string, which the caller must free. * Returns NULL for failure. */ static char_u * make_expanded_name( char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end) { char_u c1; char_u *retval = NULL; char_u *temp_result; if (expr_end == NULL || in_end == NULL) return NULL; *expr_start = NUL; *expr_end = NUL; c1 = *in_end; *in_end = NUL; temp_result = eval_to_string(expr_start + 1, FALSE); if (temp_result != NULL) { retval = alloc(STRLEN(temp_result) + (expr_start - in_start) + (in_end - expr_end) + 1); if (retval != NULL) { STRCPY(retval, in_start); STRCAT(retval, temp_result); STRCAT(retval, expr_end + 1); } } vim_free(temp_result); *in_end = c1; // put char back for error messages *expr_start = '{'; *expr_end = '}'; if (retval != NULL) { temp_result = find_name_end(retval, &expr_start, &expr_end, 0); if (expr_start != NULL) { // Further expansion! temp_result = make_expanded_name(retval, expr_start, expr_end, temp_result); vim_free(retval); retval = temp_result; } } return retval; } /* * Return TRUE if character "c" can be used in a variable or function name. * Does not include '{' or '}' for magic braces. */ int eval_isnamec(int c) { return ASCII_ISALNUM(c) || c == '_' || c == ':' || c == AUTOLOAD_CHAR; } /* * Return TRUE if character "c" can be used as the first character in a * variable or function name (excluding '{' and '}'). */ int eval_isnamec1(int c) { return ASCII_ISALPHA(c) || c == '_'; } /* * Return TRUE if character "c" can be used as the first character of a * dictionary key. */ int eval_isdictc(int c) { return ASCII_ISALNUM(c) || c == '_'; } /* * Handle: * - expr[expr], expr[expr:expr] subscript * - ".name" lookup * - function call with Funcref variable: func(expr) * - method call: var->method() * * Can all be combined in any order: dict.func(expr)[idx]['func'](expr)->len() * "name_start" points to a variable before the subscript or is NULL. */ int handle_subscript( char_u **arg, char_u *name_start, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int ret = OK; dict_T *selfdict = NULL; int check_white = TRUE; int getnext; char_u *p; while (ret == OK) { // When at the end of the line and ".name" or "->{" or "->X" follows in // the next line then consume the line break. p = eval_next_non_blank(*arg, evalarg, &getnext); if (getnext && ((rettv->v_type == VAR_DICT && *p == '.' && eval_isdictc(p[1])) || (p[0] == '-' && p[1] == '>' && (p[2] == '{' || ASCII_ISALPHA(in_vim9script() ? *skipwhite(p + 2) : p[2]))))) { *arg = eval_next_line(*arg, evalarg); p = *arg; check_white = FALSE; } if (rettv->v_type == VAR_ANY) { char_u *exp_name; int cc; int idx; ufunc_T *ufunc; type_T *type; // Found script from "import {name} as name", script item name must // follow. "rettv->vval.v_number" has the script ID. if (**arg != '.') { if (verbose) semsg(_(e_expected_dot_after_name_str), name_start != NULL ? name_start: *arg); ret = FAIL; break; } ++*arg; if (IS_WHITE_OR_NUL(**arg)) { if (verbose) emsg(_(e_no_white_space_allowed_after_dot)); ret = FAIL; break; } // isolate the name exp_name = *arg; while (eval_isnamec(**arg)) ++*arg; cc = **arg; **arg = NUL; idx = find_exported(rettv->vval.v_number, exp_name, &ufunc, &type, evalarg->eval_cctx, evalarg->eval_cstack, verbose); **arg = cc; if (idx < 0 && ufunc == NULL) { ret = FAIL; break; } if (idx >= 0) { scriptitem_T *si = SCRIPT_ITEM(rettv->vval.v_number); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; copy_tv(sv->sv_tv, rettv); } else { rettv->v_type = VAR_FUNC; rettv->vval.v_string = vim_strsave(ufunc->uf_name); } continue; } if ((**arg == '(' && (!evaluate || rettv->v_type == VAR_FUNC || rettv->v_type == VAR_PARTIAL)) && (!check_white || !VIM_ISWHITE(*(*arg - 1)))) { ret = call_func_rettv(arg, evalarg, rettv, evaluate, selfdict, NULL); // Stop the expression evaluation when immediately aborting on // error, or when an interrupt occurred or an exception was thrown // but not caught. if (aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } dict_unref(selfdict); selfdict = NULL; } else if (p[0] == '-' && p[1] == '>') { if (in_vim9script()) *arg = skipwhite(p + 2); else *arg = p + 2; if (ret == OK) { if (VIM_ISWHITE(**arg)) { emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else if ((**arg == '{' && !in_vim9script()) || **arg == '(') // expr->{lambda}() or expr->(lambda)() ret = eval_lambda(arg, rettv, evalarg, verbose); else // expr->name() ret = eval_method(arg, rettv, evalarg, verbose); } } // "." is ".name" lookup when we found a dict or when evaluating and // scriptversion is at least 2, where string concatenation is "..". else if (**arg == '[' || (**arg == '.' && (rettv->v_type == VAR_DICT || (!evaluate && (*arg)[1] != '.' && !in_old_script(2))))) { dict_unref(selfdict); if (rettv->v_type == VAR_DICT) { selfdict = rettv->vval.v_dict; if (selfdict != NULL) ++selfdict->dv_refcount; } else selfdict = NULL; if (eval_index(arg, rettv, evalarg, verbose) == FAIL) { clear_tv(rettv); ret = FAIL; } } else break; } // Turn "dict.Func" into a partial for "Func" bound to "dict". // Don't do this when "Func" is already a partial that was bound // explicitly (pt_auto is FALSE). if (selfdict != NULL && (rettv->v_type == VAR_FUNC || (rettv->v_type == VAR_PARTIAL && (rettv->vval.v_partial->pt_auto || rettv->vval.v_partial->pt_dict == NULL)))) selfdict = make_partial(selfdict, rettv); dict_unref(selfdict); return ret; } /* * Make a copy of an item. * Lists and Dictionaries are also copied. A deep copy if "deep" is set. * "top" is TRUE for the toplevel of copy(). * For deepcopy() "copyID" is zero for a full copy or the ID for when a * reference to an already copied list/dict can be used. * Returns FAIL or OK. */ int item_copy( typval_T *from, typval_T *to, int deep, int top, int copyID) { static int recurse = 0; int ret = OK; if (recurse >= DICT_MAXNEST) { emsg(_(e_variable_nested_too_deep_for_making_copy)); return FAIL; } ++recurse; switch (from->v_type) { case VAR_NUMBER: case VAR_FLOAT: case VAR_STRING: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: copy_tv(from, to); break; case VAR_LIST: to->v_type = VAR_LIST; to->v_lock = 0; if (from->vval.v_list == NULL) to->vval.v_list = NULL; else if (copyID != 0 && from->vval.v_list->lv_copyID == copyID) { // use the copy made earlier to->vval.v_list = from->vval.v_list->lv_copylist; ++to->vval.v_list->lv_refcount; } else to->vval.v_list = list_copy(from->vval.v_list, deep, top, copyID); if (to->vval.v_list == NULL) ret = FAIL; break; case VAR_BLOB: ret = blob_copy(from->vval.v_blob, to); break; case VAR_DICT: to->v_type = VAR_DICT; to->v_lock = 0; if (from->vval.v_dict == NULL) to->vval.v_dict = NULL; else if (copyID != 0 && from->vval.v_dict->dv_copyID == copyID) { // use the copy made earlier to->vval.v_dict = from->vval.v_dict->dv_copydict; ++to->vval.v_dict->dv_refcount; } else to->vval.v_dict = dict_copy(from->vval.v_dict, deep, top, copyID); if (to->vval.v_dict == NULL) ret = FAIL; break; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: internal_error_no_abort("item_copy(UNKNOWN)"); ret = FAIL; } --recurse; return ret; } void echo_one(typval_T *rettv, int with_space, int *atstart, int *needclr) { char_u *tofree; char_u numbuf[NUMBUFLEN]; char_u *p = echo_string(rettv, &tofree, numbuf, get_copyID()); if (*atstart) { *atstart = FALSE; // Call msg_start() after eval1(), evaluating the expression // may cause a message to appear. if (with_space) { // Mark the saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back // at the more prompt. msg_sb_eol(); msg_start(); } } else if (with_space) msg_puts_attr(" ", echo_attr); if (p != NULL) for ( ; *p != NUL && !got_int; ++p) { if (*p == '\n' || *p == '\r' || *p == TAB) { if (*p != TAB && *needclr) { // remove any text still there from the command msg_clr_eos(); *needclr = FALSE; } msg_putchar_attr(*p, echo_attr); } else { if (has_mbyte) { int i = (*mb_ptr2len)(p); (void)msg_outtrans_len_attr(p, i, echo_attr); p += i - 1; } else (void)msg_outtrans_len_attr(p, 1, echo_attr); } } vim_free(tofree); } /* * ":echo expr1 ..." print each argument separated with a space, add a * newline at the end. * ":echon expr1 ..." print each argument plain. */ void ex_echo(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; char_u *arg_start; int needclr = TRUE; int atstart = TRUE; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap->skip); if (eap->skip) ++emsg_skip; while ((!ends_excmd2(eap->cmd, arg) || *arg == '"') && !got_int) { // If eval1() causes an error message the text from the command may // still need to be cleared. E.g., "echo 22,44". need_clr_eos = needclr; arg_start = arg; if (eval1(&arg, &rettv, &evalarg) == FAIL) { /* * Report the invalid expression unless the expression evaluation * has been cancelled due to an aborting error, an interrupt, or an * exception. */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), arg_start); need_clr_eos = FALSE; break; } need_clr_eos = FALSE; if (!eap->skip) { if (rettv.v_type == VAR_VOID) { semsg(_(e_expression_does_not_result_in_value_str), arg_start); break; } echo_one(&rettv, eap->cmdidx == CMD_echo, &atstart, &needclr); } clear_tv(&rettv); arg = skipwhite(arg); } set_nextcmd(eap, arg); clear_evalarg(&evalarg, eap); if (eap->skip) --emsg_skip; else { // remove text that may still be there from the command if (needclr) msg_clr_eos(); if (eap->cmdidx == CMD_echo) msg_end(); } } /* * ":echohl {name}". */ void ex_echohl(exarg_T *eap) { echo_attr = syn_name2attr(eap->arg); } /* * Returns the :echo attribute */ int get_echo_attr(void) { return echo_attr; } /* * ":execute expr1 ..." execute the result of an expression. * ":echomsg expr1 ..." Print a message * ":echoerr expr1 ..." Print an error * ":echoconsole expr1 ..." Print a message on stdout * Each gets spaces around each argument and a newline at the end for * echo commands */ void ex_execute(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; int ret = OK; char_u *p; garray_T ga; int len; long start_lnum = SOURCING_LNUM; ga_init2(&ga, 1, 80); if (eap->skip) ++emsg_skip; while (!ends_excmd2(eap->cmd, arg) || *arg == '"') { ret = eval1_emsg(&arg, &rettv, eap); if (ret == FAIL) break; if (!eap->skip) { char_u buf[NUMBUFLEN]; if (eap->cmdidx == CMD_execute) { if (rettv.v_type == VAR_CHANNEL || rettv.v_type == VAR_JOB) { semsg(_(e_using_invalid_value_as_string_str), vartype_name(rettv.v_type)); p = NULL; } else p = tv_get_string_buf(&rettv, buf); } else p = tv_stringify(&rettv, buf); if (p == NULL) { clear_tv(&rettv); ret = FAIL; break; } len = (int)STRLEN(p); if (ga_grow(&ga, len + 2) == FAIL) { clear_tv(&rettv); ret = FAIL; break; } if (ga.ga_len) ((char_u *)(ga.ga_data))[ga.ga_len++] = ' '; STRCPY((char_u *)(ga.ga_data) + ga.ga_len, p); ga.ga_len += len; } clear_tv(&rettv); arg = skipwhite(arg); } if (ret != FAIL && ga.ga_data != NULL) { // use the first line of continuation lines for messages SOURCING_LNUM = start_lnum; if (eap->cmdidx == CMD_echomsg || eap->cmdidx == CMD_echoerr) { // Mark the already saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back at the // more prompt. msg_sb_eol(); } if (eap->cmdidx == CMD_echomsg) { msg_attr(ga.ga_data, echo_attr); out_flush(); } else if (eap->cmdidx == CMD_echoconsole) { ui_write(ga.ga_data, (int)STRLEN(ga.ga_data), TRUE); ui_write((char_u *)"\r\n", 2, TRUE); } else if (eap->cmdidx == CMD_echoerr) { int save_did_emsg = did_emsg; // We don't want to abort following commands, restore did_emsg. emsg(ga.ga_data); if (!force_abort) did_emsg = save_did_emsg; } else if (eap->cmdidx == CMD_execute) { int save_sticky_cmdmod_flags = sticky_cmdmod_flags; // "legacy exe cmd" and "vim9cmd exe cmd" applies to "cmd". sticky_cmdmod_flags = cmdmod.cmod_flags & (CMOD_LEGACY | CMOD_VIM9CMD); do_cmdline((char_u *)ga.ga_data, eap->getline, eap->cookie, DOCMD_NOWAIT|DOCMD_VERBOSE); sticky_cmdmod_flags = save_sticky_cmdmod_flags; } } ga_clear(&ga); if (eap->skip) --emsg_skip; set_nextcmd(eap, arg); } /* * Skip over the name of an option: "&option", "&g:option" or "&l:option". * "arg" points to the "&" or '+' when called, to "option" when returning. * Returns NULL when no option name found. Otherwise pointer to the char * after the option name. */ char_u * find_option_end(char_u **arg, int *scope) { char_u *p = *arg; ++p; if (*p == 'g' && p[1] == ':') { *scope = OPT_GLOBAL; p += 2; } else if (*p == 'l' && p[1] == ':') { *scope = OPT_LOCAL; p += 2; } else *scope = 0; if (!ASCII_ISALPHA(*p)) return NULL; *arg = p; if (p[0] == 't' && p[1] == '_' && p[2] != NUL && p[3] != NUL) p += 4; // termcap option else while (ASCII_ISALPHA(*p)) ++p; return p; } /* * Display script name where an item was last set. * Should only be invoked when 'verbose' is non-zero. */ void last_set_msg(sctx_T script_ctx) { char_u *p; if (script_ctx.sc_sid != 0) { p = home_replace_save(NULL, get_scriptname(script_ctx.sc_sid)); if (p != NULL) { verbose_enter(); msg_puts(_("\n\tLast set from ")); msg_puts((char *)p); if (script_ctx.sc_lnum > 0) { msg_puts(_(line_msg)); msg_outnum((long)script_ctx.sc_lnum); } verbose_leave(); vim_free(p); } } } #endif // FEAT_EVAL /* * Perform a substitution on "str" with pattern "pat" and substitute "sub". * When "sub" is NULL "expr" is used, must be a VAR_FUNC or VAR_PARTIAL. * "flags" can be "g" to do a global substitute. * Returns an allocated string, NULL for error. */ char_u * do_string_sub( char_u *str, char_u *pat, char_u *sub, typval_T *expr, char_u *flags) { int sublen; regmatch_T regmatch; int i; int do_all; char_u *tail; char_u *end; garray_T ga; char_u *ret; char_u *save_cpo; char_u *zero_width = NULL; // Make 'cpoptions' empty, so that the 'l' flag doesn't work here save_cpo = p_cpo; p_cpo = empty_option; ga_init2(&ga, 1, 200); do_all = (flags[0] == 'g'); regmatch.rm_ic = p_ic; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { tail = str; end = str + STRLEN(str); while (vim_regexec_nl(&regmatch, str, (colnr_T)(tail - str))) { // Skip empty match except for first match. if (regmatch.startp[0] == regmatch.endp[0]) { if (zero_width == regmatch.startp[0]) { // avoid getting stuck on a match with an empty string i = mb_ptr2len(tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); ga.ga_len += i; tail += i; continue; } zero_width = regmatch.startp[0]; } /* * Get some space for a temporary buffer to do the substitution * into. It will contain: * - The text up to where the match is. * - The substituted text. * - The text after the match. */ sublen = vim_regsub(&regmatch, sub, expr, tail, 0, REGSUB_MAGIC); if (ga_grow(&ga, (int)((end - tail) + sublen - (regmatch.endp[0] - regmatch.startp[0]))) == FAIL) { ga_clear(&ga); break; } // copy the text up to where the match is i = (int)(regmatch.startp[0] - tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); // add the substituted text (void)vim_regsub(&regmatch, sub, expr, (char_u *)ga.ga_data + ga.ga_len + i, sublen, REGSUB_COPY | REGSUB_MAGIC); ga.ga_len += i + sublen - 1; tail = regmatch.endp[0]; if (*tail == NUL) break; if (!do_all) break; } if (ga.ga_data != NULL) STRCPY((char *)ga.ga_data + ga.ga_len, tail); vim_regfree(regmatch.regprog); } ret = vim_strsave(ga.ga_data == NULL ? str : (char_u *)ga.ga_data); ga_clear(&ga); if (p_cpo == empty_option) p_cpo = save_cpo; else { // Darn, evaluating {sub} expression or {expr} changed the value. // If it's still empty it was changed and restored, need to restore in // the complicated way. if (*p_cpo == NUL) set_option_value_give_err((char_u *)"cpo", 0L, save_cpo, 0); free_string_option(save_cpo); } return ret; }
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * eval.c: Expression evaluation. */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) #ifdef VMS # include <float.h> #endif #define NAMESPACE_CHAR (char_u *)"abglstvw" /* * When recursively copying lists and dicts we need to remember which ones we * have done to avoid endless recursiveness. This unique ID is used for that. * The last bit is used for previous_funccal, ignored when comparing. */ static int current_copyID = 0; /* * Info used by a ":for" loop. */ typedef struct { int fi_semicolon; // TRUE if ending in '; var]' int fi_varcount; // nr of variables in the list int fi_break_count; // nr of line breaks encountered listwatch_T fi_lw; // keep an eye on the item used. list_T *fi_list; // list being used int fi_bi; // index of blob blob_T *fi_blob; // blob being used char_u *fi_string; // copy of string being used int fi_byte_idx; // byte index in fi_string } forinfo_T; static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval7(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval8(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9_leader(typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp); static int free_unref_items(int copyID); static char_u *make_expanded_name(char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end); /* * Return "n1" divided by "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_divide(varnumber_T n1, varnumber_T n2, int *failed) { varnumber_T result; if (n2 == 0) { if (in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } if (n1 == 0) result = VARNUM_MIN; // similar to NaN else if (n1 < 0) result = -VARNUM_MAX; else result = VARNUM_MAX; } else result = n1 / n2; return result; } /* * Return "n1" modulus "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_modulus(varnumber_T n1, varnumber_T n2, int *failed) { if (n2 == 0 && in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } return (n2 == 0) ? 0 : (n1 % n2); } /* * Initialize the global and v: variables. */ void eval_init(void) { evalvars_init(); func_init(); } #if defined(EXITFREE) || defined(PROTO) void eval_clear(void) { evalvars_clear(); free_scriptnames(); // must come after evalvars_clear(). free_locales(); // autoloaded script names free_autoload_scriptnames(); // unreferenced lists and dicts (void)garbage_collect(FALSE); // functions not garbage collected free_all_functions(); } #endif void fill_evalarg_from_eap(evalarg_T *evalarg, exarg_T *eap, int skip) { init_evalarg(evalarg); evalarg->eval_flags = skip ? 0 : EVAL_EVALUATE; if (eap != NULL) { evalarg->eval_cstack = eap->cstack; if (sourcing_a_script(eap) || eap->getline == get_list_line) { evalarg->eval_getline = eap->getline; evalarg->eval_cookie = eap->cookie; } } } /* * Top level evaluation function, returning a boolean. * Sets "error" to TRUE if there was an error. * Return TRUE or FALSE. */ int eval_to_bool( char_u *arg, int *error, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; varnumber_T retval = FALSE; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL) *error = TRUE; else { *error = FALSE; if (!skip) { if (in_vim9script()) retval = tv_get_bool_chk(&tv, error); else retval = (tv_get_number_chk(&tv, error) != 0); clear_tv(&tv); } } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return (int)retval; } /* * Call eval1() and give an error message if not done at a lower level. */ static int eval1_emsg(char_u **arg, typval_T *rettv, exarg_T *eap) { char_u *start = *arg; int ret; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); ret = eval1(arg, rettv, &evalarg); if (ret == FAIL) { // Report the invalid expression unless the expression evaluation has // been cancelled due to an aborting error, an interrupt, or an // exception, or we already gave a more specific error. // Also check called_emsg for when using assert_fails(). if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), start); } clear_evalarg(&evalarg, eap); return ret; } /* * Return whether a typval is a valid expression to pass to eval_expr_typval() * or eval_expr_to_bool(). An empty string returns FALSE; */ int eval_expr_valid_arg(typval_T *tv) { return tv->v_type != VAR_UNKNOWN && (tv->v_type != VAR_STRING || (tv->vval.v_string != NULL && *tv->vval.v_string != NUL)); } /* * Evaluate an expression, which can be a function, partial or string. * Pass arguments "argv[argc]". * Return the result in "rettv" and OK or FAIL. */ int eval_expr_typval(typval_T *expr, typval_T *argv, int argc, typval_T *rettv) { char_u *s; char_u buf[NUMBUFLEN]; funcexe_T funcexe; if (expr->v_type == VAR_FUNC) { s = expr->vval.v_string; if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } else if (expr->v_type == VAR_PARTIAL) { partial_T *partial = expr->vval.v_partial; if (partial == NULL) return FAIL; if (partial->pt_func != NULL && partial->pt_func->uf_def_status != UF_NOT_COMPILED) { if (call_def_function(partial->pt_func, argc, argv, partial, rettv) == FAIL) return FAIL; } else { s = partial_name(partial); if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; funcexe.fe_partial = partial; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } } else if (expr->v_type == VAR_INSTR) { return exe_typval_instr(expr, rettv); } else { s = tv_get_string_buf_chk_strict(expr, buf, in_vim9script()); if (s == NULL) return FAIL; s = skipwhite(s); if (eval1_emsg(&s, rettv, NULL) == FAIL) return FAIL; if (*skipwhite(s) != NUL) // check for trailing chars after expr { clear_tv(rettv); semsg(_(e_invalid_expression_str), s); return FAIL; } } return OK; } /* * Like eval_to_bool() but using a typval_T instead of a string. * Works for string, funcref and partial. */ int eval_expr_to_bool(typval_T *expr, int *error) { typval_T rettv; int res; if (eval_expr_typval(expr, NULL, 0, &rettv) == FAIL) { *error = TRUE; return FALSE; } res = (tv_get_bool_chk(&rettv, error) != 0); clear_tv(&rettv); return res; } /* * Top level evaluation function, returning a string. If "skip" is TRUE, * only parsing to "nextcmd" is done, without reporting errors. Return * pointer to allocated memory, or NULL for failure or when "skip" is TRUE. */ char_u * eval_to_string_skip( char_u *arg, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL || skip) retval = NULL; else { retval = vim_strsave(tv_get_string(&tv)); clear_tv(&tv); } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return retval; } /* * Initialize "evalarg" for use. */ void init_evalarg(evalarg_T *evalarg) { CLEAR_POINTER(evalarg); ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20); } /* * If "evalarg->eval_tofree" is not NULL free it later. * Caller is expected to overwrite "evalarg->eval_tofree" next. */ static void free_eval_tofree_later(evalarg_T *evalarg) { if (evalarg->eval_tofree != NULL) { if (ga_grow(&evalarg->eval_tofree_ga, 1) == OK) ((char_u **)evalarg->eval_tofree_ga.ga_data) [evalarg->eval_tofree_ga.ga_len++] = evalarg->eval_tofree; else vim_free(evalarg->eval_tofree); } } /* * After using "evalarg" filled from "eap": free the memory. */ void clear_evalarg(evalarg_T *evalarg, exarg_T *eap) { if (evalarg != NULL) { if (evalarg->eval_tofree != NULL) { if (eap != NULL) { // We may need to keep the original command line, e.g. for // ":let" it has the variable names. But we may also need the // new one, "nextcmd" points into it. Keep both. vim_free(eap->cmdline_tofree); eap->cmdline_tofree = *eap->cmdlinep; *eap->cmdlinep = evalarg->eval_tofree; } else vim_free(evalarg->eval_tofree); evalarg->eval_tofree = NULL; } ga_clear_strings(&evalarg->eval_tofree_ga); VIM_CLEAR(evalarg->eval_tofree_lambda); } } /* * Skip over an expression at "*pp". * Return FAIL for an error, OK otherwise. */ int skip_expr(char_u **pp, evalarg_T *evalarg) { typval_T rettv; *pp = skipwhite(*pp); return eval1(pp, &rettv, evalarg); } /* * Skip over an expression at "*arg". * If in Vim9 script and line breaks are encountered, the lines are * concatenated. "evalarg->eval_tofree" will be set accordingly. * "arg" is advanced to just after the expression. * "start" is set to the start of the expression, "end" to just after the end. * Also when the expression is copied to allocated memory. * Return FAIL for an error, OK otherwise. */ int skip_expr_concatenate( char_u **arg, char_u **start, char_u **end, evalarg_T *evalarg) { typval_T rettv; int res; int vim9script = in_vim9script(); garray_T *gap = evalarg == NULL ? NULL : &evalarg->eval_ga; garray_T *freegap = evalarg == NULL ? NULL : &evalarg->eval_freega; int save_flags = evalarg == NULL ? 0 : evalarg->eval_flags; int evaluate = evalarg == NULL ? FALSE : (evalarg->eval_flags & EVAL_EVALUATE); if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { ga_init2(gap, sizeof(char_u *), 10); // leave room for "start" if (ga_grow(gap, 1) == OK) ++gap->ga_len; ga_init2(freegap, sizeof(char_u *), 10); } *start = *arg; // Don't evaluate the expression. if (evalarg != NULL) evalarg->eval_flags &= ~EVAL_EVALUATE; *arg = skipwhite(*arg); res = eval1(arg, &rettv, evalarg); *end = *arg; if (evalarg != NULL) evalarg->eval_flags = save_flags; if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { if (evalarg->eval_ga.ga_len == 1) { // just the one line, no need to concatenate ga_clear(gap); gap->ga_itemsize = 0; } else { char_u *p; size_t endoff = STRLEN(*arg); // Line breaks encountered, concatenate all the lines. *((char_u **)gap->ga_data) = *start; p = ga_concat_strings(gap, " "); // free the lines only when using getsourceline() if (evalarg->eval_cookie != NULL) { // Do not free the first line, the caller can still use it. *((char_u **)gap->ga_data) = NULL; // Do not free the last line, "arg" points into it, free it // later. Also free "eval_tofree" later if needed. free_eval_tofree_later(evalarg); evalarg->eval_tofree = ((char_u **)gap->ga_data)[gap->ga_len - 1]; ((char_u **)gap->ga_data)[gap->ga_len - 1] = NULL; ga_clear_strings(gap); } else { ga_clear(gap); // free lines that were explicitly marked for freeing ga_clear_strings(freegap); } gap->ga_itemsize = 0; if (p == NULL) return FAIL; *start = p; vim_free(evalarg->eval_tofree_lambda); evalarg->eval_tofree_lambda = p; // Compute "end" relative to the end. *end = *start + STRLEN(*start) - endoff; } } return res; } /* * Convert "tv" to a string. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Returns an allocated string (NULL when out of memory). */ char_u * typval2string(typval_T *tv, int convert) { garray_T ga; char_u *retval; #ifdef FEAT_FLOAT char_u numbuf[NUMBUFLEN]; #endif if (convert && tv->v_type == VAR_LIST) { ga_init2(&ga, sizeof(char), 80); if (tv->vval.v_list != NULL) { list_join(&ga, tv->vval.v_list, (char_u *)"\n", TRUE, FALSE, 0); if (tv->vval.v_list->lv_len > 0) ga_append(&ga, NL); } ga_append(&ga, NUL); retval = (char_u *)ga.ga_data; } #ifdef FEAT_FLOAT else if (convert && tv->v_type == VAR_FLOAT) { vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); retval = vim_strsave(numbuf); } #endif else retval = vim_strsave(tv_get_string(tv)); return retval; } /* * Top level evaluation function, returning a string. Does not handle line * breaks. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Return pointer to allocated memory, or NULL for failure. */ char_u * eval_to_string_eap( char_u *arg, int convert, exarg_T *eap) { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); if (eval0(arg, &tv, NULL, &evalarg) == FAIL) retval = NULL; else { retval = typval2string(&tv, convert); clear_tv(&tv); } clear_evalarg(&evalarg, NULL); return retval; } char_u * eval_to_string( char_u *arg, int convert) { return eval_to_string_eap(arg, convert, NULL); } /* * Call eval_to_string() without using current local variables and using * textlock. When "use_sandbox" is TRUE use the sandbox. * Use legacy Vim script syntax. */ char_u * eval_to_string_safe( char_u *arg, int use_sandbox, int keep_script_version) { char_u *retval; funccal_entry_T funccal_entry; int save_sc_version = current_sctx.sc_version; int save_garbage = may_garbage_collect; if (!keep_script_version) current_sctx.sc_version = 1; save_funccal(&funccal_entry); if (use_sandbox) ++sandbox; ++textlock; may_garbage_collect = FALSE; retval = eval_to_string(arg, FALSE); if (use_sandbox) --sandbox; --textlock; may_garbage_collect = save_garbage; restore_funccal(); current_sctx.sc_version = save_sc_version; return retval; } /* * Top level evaluation function, returning a number. * Evaluates "expr" silently. * Returns -1 for an error. */ varnumber_T eval_to_number(char_u *expr) { typval_T rettv; varnumber_T retval; char_u *p = skipwhite(expr); ++emsg_off; if (eval1(&p, &rettv, &EVALARG_EVALUATE) == FAIL) retval = -1; else { retval = tv_get_number_chk(&rettv, NULL); clear_tv(&rettv); } --emsg_off; return retval; } /* * Top level evaluation function. * Returns an allocated typval_T with the result. * Returns NULL when there is an error. */ typval_T * eval_expr(char_u *arg, exarg_T *eap) { typval_T *tv; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); tv = ALLOC_ONE(typval_T); if (tv != NULL && eval0(arg, tv, eap, &evalarg) == FAIL) VIM_CLEAR(tv); clear_evalarg(&evalarg, eap); return tv; } /* * "*arg" points to what can be a function name in the form of "import.Name" or * "Funcref". Return the name of the function. Set "tofree" to something that * was allocated. * If "verbose" is FALSE no errors are given. * Return NULL for any failure. */ static char_u * deref_function_name( char_u **arg, char_u **tofree, evalarg_T *evalarg, int verbose) { typval_T ref; char_u *name = *arg; ref.v_type = VAR_UNKNOWN; if (eval9(arg, &ref, evalarg, FALSE) == FAIL) { dictitem_T *v; // If <SID>VarName was used it would not be found, try another way. v = find_var_also_in_script(name, NULL, FALSE); if (v == NULL) return NULL; copy_tv(&v->di_tv, &ref); } if (*skipwhite(*arg) != NUL) { if (verbose) semsg(_(e_trailing_characters_str), *arg); name = NULL; } else if (ref.v_type == VAR_FUNC && ref.vval.v_string != NULL) { name = ref.vval.v_string; ref.vval.v_string = NULL; *tofree = name; } else if (ref.v_type == VAR_PARTIAL && ref.vval.v_partial != NULL) { if (ref.vval.v_partial->pt_argc > 0 || ref.vval.v_partial->pt_dict != NULL) { if (verbose) emsg(_(e_cannot_use_partial_here)); name = NULL; } else { name = vim_strsave(partial_name(ref.vval.v_partial)); *tofree = name; } } else { if (verbose) semsg(_(e_not_callable_type_str), name); name = NULL; } clear_tv(&ref); return name; } /* * Call some Vim script function and return the result in "*rettv". * Uses argv[0] to argv[argc - 1] for the function arguments. argv[argc] * should have type VAR_UNKNOWN. * Returns OK or FAIL. */ int call_vim_function( char_u *func, int argc, typval_T *argv, typval_T *rettv) { int ret; funcexe_T funcexe; char_u *arg; char_u *name; char_u *tofree = NULL; int ignore_errors; rettv->v_type = VAR_UNKNOWN; // clear_tv() uses this CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = TRUE; // The name might be "import.Func" or "Funcref". We don't know, we need to // ignore errors for an undefined name. But we do want errors when an // autoload script has errors. Guess that when there is a dot in the name // showing errors is the right choice. ignore_errors = vim_strchr(func, '.') == NULL; arg = func; if (ignore_errors) ++emsg_off; name = deref_function_name(&arg, &tofree, &EVALARG_EVALUATE, FALSE); if (ignore_errors) --emsg_off; if (name == NULL) name = func; ret = call_func(name, -1, rettv, argc, argv, &funcexe); if (ret == FAIL) clear_tv(rettv); vim_free(tofree); return ret; } /* * Call Vim script function "func" and return the result as a string. * Uses "argv[0]" to "argv[argc - 1]" for the function arguments. "argv[argc]" * should have type VAR_UNKNOWN. * Returns NULL when calling the function fails. */ void * call_func_retstr( char_u *func, int argc, typval_T *argv) { typval_T rettv; char_u *retval; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; retval = vim_strsave(tv_get_string(&rettv)); clear_tv(&rettv); return retval; } /* * Call Vim script function "func" and return the result as a List. * Uses "argv" and "argc" as call_func_retstr(). * Returns NULL when there is something wrong. */ void * call_func_retlist( char_u *func, int argc, typval_T *argv) { typval_T rettv; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; if (rettv.v_type != VAR_LIST) { clear_tv(&rettv); return NULL; } return rettv.vval.v_list; } #if defined(FEAT_FOLDING) || defined(PROTO) /* * Evaluate "arg", which is 'foldexpr'. * Note: caller must set "curwin" to match "arg". * Returns the foldlevel, and any character preceding it in "*cp". Doesn't * give error messages. */ int eval_foldexpr(win_T *wp, int *cp) { char_u *arg; typval_T tv; varnumber_T retval; char_u *s; sctx_T saved_sctx = current_sctx; int use_sandbox = was_set_insecurely((char_u *)"foldexpr", OPT_LOCAL); arg = wp->w_p_fde; current_sctx = wp->w_p_script_ctx[WV_FDE]; ++emsg_off; if (use_sandbox) ++sandbox; ++textlock; *cp = NUL; if (eval0(arg, &tv, NULL, &EVALARG_EVALUATE) == FAIL) retval = 0; else { // If the result is a number, just return the number. if (tv.v_type == VAR_NUMBER) retval = tv.vval.v_number; else if (tv.v_type != VAR_STRING || tv.vval.v_string == NULL) retval = 0; else { // If the result is a string, check if there is a non-digit before // the number. s = tv.vval.v_string; if (!VIM_ISDIGIT(*s) && *s != '-') *cp = *s++; retval = atol((char *)s); } clear_tv(&tv); } --emsg_off; if (use_sandbox) --sandbox; --textlock; clear_evalarg(&EVALARG_EVALUATE, NULL); current_sctx = saved_sctx; return (int)retval; } #endif /* * Get an lval: variable, Dict item or List item that can be assigned a value * to: "name", "na{me}", "name[expr]", "name[expr:expr]", "name[expr][expr]", * "name.key", "name.key[expr]" etc. * Indexing only works if "name" is an existing List or Dictionary. * "name" points to the start of the name. * If "rettv" is not NULL it points to the value to be assigned. * "unlet" is TRUE for ":unlet": slightly different behavior when something is * wrong; must end in space or cmd separator. * * flags: * GLV_QUIET: do not give error messages * GLV_READ_ONLY: will not change the variable * GLV_NO_AUTOLOAD: do not use script autoloading * * Returns a pointer to just after the name, including indexes. * When an evaluation error occurs "lp->ll_name" is NULL; * Returns NULL for a parsing error. Still need to free items in "lp"! */ char_u * get_lval( char_u *name, typval_T *rettv, lval_T *lp, int unlet, int skip, int flags, // GLV_ values int fne_flags) // flags for find_name_end() { char_u *p; char_u *expr_start, *expr_end; int cc; dictitem_T *v; typval_T var1; typval_T var2; int empty1 = FALSE; char_u *key = NULL; int len; hashtab_T *ht = NULL; int quiet = flags & GLV_QUIET; int writing; int vim9script = in_vim9script(); // Clear everything in "lp". CLEAR_POINTER(lp); if (skip || (flags & GLV_COMPILING)) { // When skipping or compiling just find the end of the name. lp->ll_name = name; lp->ll_name_end = find_name_end(name, NULL, NULL, FNE_INCL_BR | fne_flags); return lp->ll_name_end; } // Cannot use "s:var" at the Vim9 script level. "s: type" is OK. if (vim9script && at_script_level() && name[0] == 's' && name[1] == ':' && !VIM_ISWHITE(name[2])) { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), name); return NULL; } // Find the end of the name. p = find_name_end(name, &expr_start, &expr_end, fne_flags); lp->ll_name_end = p; if (expr_start != NULL) { // Don't expand the name when we already know there is an error. if (unlet && !VIM_ISWHITE(*p) && !ends_excmd(*p) && *p != '[' && *p != '.') { semsg(_(e_trailing_characters_str), p); return NULL; } lp->ll_exp_name = make_expanded_name(name, expr_start, expr_end, p); if (lp->ll_exp_name == NULL) { // Report an invalid expression in braces, unless the // expression evaluation has been cancelled due to an // aborting error, an interrupt, or an exception. if (!aborting() && !quiet) { emsg_severe = TRUE; semsg(_(e_invalid_argument_str), name); return NULL; } } lp->ll_name = lp->ll_exp_name; } else { lp->ll_name = name; if (vim9script) { // "a: type" is declaring variable "a" with a type, not "a:". // However, "g:[key]" is indexing a dictionary. if (p == name + 2 && p[-1] == ':' && *p != '[') { --p; lp->ll_name_end = p; } if (*p == ':') { char_u *tp = skipwhite(p + 1); if (tp == p + 1 && !quiet) { semsg(_(e_white_space_required_after_str_str), ":", p); return NULL; } if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) { semsg(_(e_using_type_not_in_script_context_str), p); return NULL; } // parse the type after the name lp->ll_type = parse_type(&tp, &SCRIPT_ITEM(current_sctx.sc_sid)->sn_type_list, !quiet); if (lp->ll_type == NULL && !quiet) return NULL; lp->ll_name_end = tp; } } } if (lp->ll_name == NULL) return p; if (*p == '.') { imported_T *import = find_imported(lp->ll_name, p - lp->ll_name, TRUE); if (import != NULL) { ufunc_T *ufunc; type_T *type; lp->ll_sid = import->imp_sid; lp->ll_name = skipwhite(p + 1); p = find_name_end(lp->ll_name, NULL, NULL, fne_flags); lp->ll_name_end = p; // check the item is exported cc = *p; *p = NUL; if (find_exported(import->imp_sid, lp->ll_name, &ufunc, &type, NULL, NULL, TRUE) == -1) { *p = cc; return NULL; } *p = cc; } } // Without [idx] or .key we are done. if ((*p != '[' && *p != '.')) return p; if (vim9script && lval_root != NULL) { // using local variable lp->ll_tv = lval_root; v = NULL; } else { cc = *p; *p = NUL; // When we would write to the variable pass &ht and prevent autoload. writing = !(flags & GLV_READ_ONLY); v = find_var(lp->ll_name, writing ? &ht : NULL, (flags & GLV_NO_AUTOLOAD) || writing); if (v == NULL && !quiet) semsg(_(e_undefined_variable_str), lp->ll_name); *p = cc; if (v == NULL) return NULL; lp->ll_tv = &v->di_tv; } if (vim9script && (flags & GLV_NO_DECL) == 0) { if (!quiet) semsg(_(e_variable_already_declared), lp->ll_name); return NULL; } /* * Loop until no more [idx] or .key is following. */ var1.v_type = VAR_UNKNOWN; var2.v_type = VAR_UNKNOWN; while (*p == '[' || (*p == '.' && p[1] != '=' && p[1] != '.')) { if (*p == '.' && lp->ll_tv->v_type != VAR_DICT) { if (!quiet) semsg(_(e_dot_can_only_be_used_on_dictionary_str), name); return NULL; } if (lp->ll_tv->v_type != VAR_LIST && lp->ll_tv->v_type != VAR_DICT && lp->ll_tv->v_type != VAR_BLOB) { if (!quiet) emsg(_(e_can_only_index_list_dictionary_or_blob)); return NULL; } // a NULL list/blob works like an empty list/blob, allocate one now. if (lp->ll_tv->v_type == VAR_LIST && lp->ll_tv->vval.v_list == NULL) rettv_list_alloc(lp->ll_tv); else if (lp->ll_tv->v_type == VAR_BLOB && lp->ll_tv->vval.v_blob == NULL) rettv_blob_alloc(lp->ll_tv); if (lp->ll_range) { if (!quiet) emsg(_(e_slice_must_come_last)); return NULL; } if (vim9script && lp->ll_valtype == NULL && v != NULL && lp->ll_tv == &v->di_tv && ht != NULL && ht == get_script_local_ht()) { svar_T *sv = find_typval_in_script(lp->ll_tv, 0, TRUE); // Vim9 script local variable: get the type if (sv != NULL) lp->ll_valtype = sv->sv_type; } len = -1; if (*p == '.') { key = p + 1; for (len = 0; ASCII_ISALNUM(key[len]) || key[len] == '_'; ++len) ; if (len == 0) { if (!quiet) emsg(_(e_cannot_use_empty_key_for_dictionary)); return NULL; } p = key + len; } else { // Get the index [expr] or the first index [expr: ]. p = skipwhite(p + 1); if (*p == ':') empty1 = TRUE; else { empty1 = FALSE; if (eval1(&p, &var1, &EVALARG_EVALUATE) == FAIL) // recursive! return NULL; if (tv_get_string_chk(&var1) == NULL) { // not a number or string clear_tv(&var1); return NULL; } p = skipwhite(p); } // Optionally get the second index [ :expr]. if (*p == ':') { if (lp->ll_tv->v_type == VAR_DICT) { if (!quiet) emsg(_(e_cannot_slice_dictionary)); clear_tv(&var1); return NULL; } if (rettv != NULL && !(rettv->v_type == VAR_LIST && rettv->vval.v_list != NULL) && !(rettv->v_type == VAR_BLOB && rettv->vval.v_blob != NULL)) { if (!quiet) emsg(_(e_slice_requires_list_or_blob_value)); clear_tv(&var1); return NULL; } p = skipwhite(p + 1); if (*p == ']') lp->ll_empty2 = TRUE; else { lp->ll_empty2 = FALSE; // recursive! if (eval1(&p, &var2, &EVALARG_EVALUATE) == FAIL) { clear_tv(&var1); return NULL; } if (tv_get_string_chk(&var2) == NULL) { // not a number or string clear_tv(&var1); clear_tv(&var2); return NULL; } } lp->ll_range = TRUE; } else lp->ll_range = FALSE; if (*p != ']') { if (!quiet) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); clear_tv(&var2); return NULL; } // Skip to past ']'. ++p; } if (lp->ll_tv->v_type == VAR_DICT) { if (len == -1) { // "[key]": get key from "var1" key = tv_get_string_chk(&var1); // is number or string if (key == NULL) { clear_tv(&var1); return NULL; } } lp->ll_list = NULL; // a NULL dict is equivalent with an empty dict if (lp->ll_tv->vval.v_dict == NULL) { lp->ll_tv->vval.v_dict = dict_alloc(); if (lp->ll_tv->vval.v_dict == NULL) { clear_tv(&var1); return NULL; } ++lp->ll_tv->vval.v_dict->dv_refcount; } lp->ll_dict = lp->ll_tv->vval.v_dict; lp->ll_di = dict_find(lp->ll_dict, key, len); // When assigning to a scope dictionary check that a function and // variable name is valid (only variable name unless it is l: or // g: dictionary). Disallow overwriting a builtin function. if (rettv != NULL && lp->ll_dict->dv_scope != 0) { int prevval; int wrong; if (len != -1) { prevval = key[len]; key[len] = NUL; } else prevval = 0; // avoid compiler warning wrong = (lp->ll_dict->dv_scope == VAR_DEF_SCOPE && rettv->v_type == VAR_FUNC && var_wrong_func_name(key, lp->ll_di == NULL)) || !valid_varname(key, -1, TRUE); if (len != -1) key[len] = prevval; if (wrong) { clear_tv(&var1); return NULL; } } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; if (lp->ll_di == NULL) { // Can't add "v:" or "a:" variable. if (lp->ll_dict == get_vimvar_dict() || &lp->ll_dict->dv_hashtab == get_funccal_args_ht()) { semsg(_(e_illegal_variable_name_str), name); clear_tv(&var1); return NULL; } // Key does not exist in dict: may need to add it. if (*p == '[' || *p == '.' || unlet) { if (!quiet) semsg(_(e_key_not_present_in_dictionary), key); clear_tv(&var1); return NULL; } if (len == -1) lp->ll_newkey = vim_strsave(key); else lp->ll_newkey = vim_strnsave(key, len); clear_tv(&var1); if (lp->ll_newkey == NULL) p = NULL; break; } // existing variable, need to check if it can be changed else if ((flags & GLV_READ_ONLY) == 0 && (var_check_ro(lp->ll_di->di_flags, name, FALSE) || var_check_lock(lp->ll_di->di_flags, name, FALSE))) { clear_tv(&var1); return NULL; } clear_tv(&var1); lp->ll_tv = &lp->ll_di->di_tv; } else if (lp->ll_tv->v_type == VAR_BLOB) { long bloblen = blob_len(lp->ll_tv->vval.v_blob); /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); if (check_blob_index(bloblen, lp->ll_n1, quiet) == FAIL) { clear_tv(&var2); return NULL; } if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); clear_tv(&var2); if (check_blob_range(bloblen, lp->ll_n1, lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_blob = lp->ll_tv->vval.v_blob; lp->ll_tv = NULL; break; } else { /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); lp->ll_dict = NULL; lp->ll_list = lp->ll_tv->vval.v_list; lp->ll_li = check_range_index_one(lp->ll_list, &lp->ll_n1, (flags & GLV_ASSIGN_WITH_OP) == 0, quiet); if (lp->ll_li == NULL) { clear_tv(&var2); return NULL; } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; /* * May need to find the item or absolute index for the second * index of a range. * When no index given: "lp->ll_empty2" is TRUE. * Otherwise "lp->ll_n2" is set to the second index. */ if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); // is number or string clear_tv(&var2); if (check_range_index_two(lp->ll_list, &lp->ll_n1, lp->ll_li, &lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_tv = &lp->ll_li->li_tv; } } clear_tv(&var1); lp->ll_name_end = p; return p; } /* * Clear lval "lp" that was filled by get_lval(). */ void clear_lval(lval_T *lp) { vim_free(lp->ll_exp_name); vim_free(lp->ll_newkey); } /* * Set a variable that was parsed by get_lval() to "rettv". * "endp" points to just after the parsed name. * "op" is NULL, "+" for "+=", "-" for "-=", "*" for "*=", "/" for "/=", * "%" for "%=", "." for ".=" or "=" for "=". */ void set_var_lval( lval_T *lp, char_u *endp, typval_T *rettv, int copy, int flags, // ASSIGN_CONST, ASSIGN_NO_DECL char_u *op, int var_idx) // index for "let [a, b] = list" { int cc; dictitem_T *di; if (lp->ll_tv == NULL) { cc = *endp; *endp = NUL; if (in_vim9script() && check_reserved_name(lp->ll_name) == FAIL) return; if (lp->ll_blob != NULL) { int error = FALSE, val; if (op != NULL && *op != '=') { semsg(_(e_wrong_variable_type_for_str_equal), op); return; } if (value_check_lock(lp->ll_blob->bv_lock, lp->ll_name, FALSE)) return; if (lp->ll_range && rettv->v_type == VAR_BLOB) { if (lp->ll_empty2) lp->ll_n2 = blob_len(lp->ll_blob) - 1; if (blob_set_range(lp->ll_blob, lp->ll_n1, lp->ll_n2, rettv) == FAIL) return; } else { val = (int)tv_get_number_chk(rettv, &error); if (!error) blob_set_append(lp->ll_blob, lp->ll_n1, val); } } else if (op != NULL && *op != '=') { typval_T tv; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_modify_existing_variable)); *endp = cc; return; } // handle +=, -=, *=, /=, %= and .= di = NULL; if (eval_variable(lp->ll_name, (int)STRLEN(lp->ll_name), lp->ll_sid, &tv, &di, EVAL_VAR_VERBOSE) == OK) { if ((di == NULL || (!var_check_ro(di->di_flags, lp->ll_name, FALSE) && !tv_check_lock(&di->di_tv, lp->ll_name, FALSE))) && tv_op(&tv, rettv, op) == OK) set_var_const(lp->ll_name, lp->ll_sid, NULL, &tv, FALSE, ASSIGN_NO_DECL, 0); clear_tv(&tv); } } else { if (lp->ll_type != NULL && check_typval_arg_type(lp->ll_type, rettv, NULL, 0) == FAIL) return; set_var_const(lp->ll_name, lp->ll_sid, lp->ll_type, rettv, copy, flags, var_idx); } *endp = cc; } else if (value_check_lock(lp->ll_newkey == NULL ? lp->ll_tv->v_lock : lp->ll_tv->vval.v_dict->dv_lock, lp->ll_name, FALSE)) ; else if (lp->ll_range) { if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_range)); return; } (void)list_assign_range(lp->ll_list, rettv->vval.v_list, lp->ll_n1, lp->ll_n2, lp->ll_empty2, op, lp->ll_name); } else { /* * Assign to a List or Dictionary item. */ if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_list_or_dict)); return; } if (lp->ll_valtype != NULL && check_typval_arg_type(lp->ll_valtype, rettv, NULL, 0) == FAIL) return; if (lp->ll_newkey != NULL) { if (op != NULL && *op != '=') { semsg(_(e_key_not_present_in_dictionary), lp->ll_newkey); return; } if (dict_wrong_func_name(lp->ll_tv->vval.v_dict, rettv, lp->ll_newkey)) return; // Need to add an item to the Dictionary. di = dictitem_alloc(lp->ll_newkey); if (di == NULL) return; if (dict_add(lp->ll_tv->vval.v_dict, di) == FAIL) { vim_free(di); return; } lp->ll_tv = &di->di_tv; } else if (op != NULL && *op != '=') { tv_op(lp->ll_tv, rettv, op); return; } else clear_tv(lp->ll_tv); /* * Assign the value to the variable or list item. */ if (copy) copy_tv(rettv, lp->ll_tv); else { *lp->ll_tv = *rettv; lp->ll_tv->v_lock = 0; init_tv(rettv); } } } /* * Handle "tv1 += tv2", "tv1 -= tv2", "tv1 *= tv2", "tv1 /= tv2", "tv1 %= tv2" * and "tv1 .= tv2" * Returns OK or FAIL. */ int tv_op(typval_T *tv1, typval_T *tv2, char_u *op) { varnumber_T n; char_u numbuf[NUMBUFLEN]; char_u *s; int failed = FALSE; // Can't do anything with a Funcref or Dict on the right. // v:true and friends only work with "..=". if (tv2->v_type != VAR_FUNC && tv2->v_type != VAR_DICT && ((tv2->v_type != VAR_BOOL && tv2->v_type != VAR_SPECIAL) || *op == '.')) { switch (tv1->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_DICT: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; case VAR_BLOB: if (*op != '+' || tv2->v_type != VAR_BLOB) break; // BLOB += BLOB if (tv1->vval.v_blob != NULL && tv2->vval.v_blob != NULL) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; int i, len = blob_len(b2); for (i = 0; i < len; i++) ga_append(&b1->bv_ga, blob_get(b2, i)); } return OK; case VAR_LIST: if (*op != '+' || tv2->v_type != VAR_LIST) break; // List += List if (tv2->vval.v_list != NULL) { if (tv1->vval.v_list == NULL) { tv1->vval.v_list = tv2->vval.v_list; ++tv1->vval.v_list->lv_refcount; } else list_extend(tv1->vval.v_list, tv2->vval.v_list, NULL); } return OK; case VAR_NUMBER: case VAR_STRING: if (tv2->v_type == VAR_LIST) break; if (vim_strchr((char_u *)"+-*/%", *op) != NULL) { // nr += nr , nr -= nr , nr *=nr , nr /= nr , nr %= nr n = tv_get_number(tv1); #ifdef FEAT_FLOAT if (tv2->v_type == VAR_FLOAT) { float_T f = n; if (*op == '%') break; switch (*op) { case '+': f += tv2->vval.v_float; break; case '-': f -= tv2->vval.v_float; break; case '*': f *= tv2->vval.v_float; break; case '/': f /= tv2->vval.v_float; break; } clear_tv(tv1); tv1->v_type = VAR_FLOAT; tv1->vval.v_float = f; } else #endif { switch (*op) { case '+': n += tv_get_number(tv2); break; case '-': n -= tv_get_number(tv2); break; case '*': n *= tv_get_number(tv2); break; case '/': n = num_divide(n, tv_get_number(tv2), &failed); break; case '%': n = num_modulus(n, tv_get_number(tv2), &failed); break; } clear_tv(tv1); tv1->v_type = VAR_NUMBER; tv1->vval.v_number = n; } } else { if (tv2->v_type == VAR_FLOAT) break; // str .= str s = tv_get_string(tv1); s = concat_str(s, tv_get_string_buf(tv2, numbuf)); clear_tv(tv1); tv1->v_type = VAR_STRING; tv1->vval.v_string = s; } return failed ? FAIL : OK; case VAR_FLOAT: #ifdef FEAT_FLOAT { float_T f; if (*op == '%' || *op == '.' || (tv2->v_type != VAR_FLOAT && tv2->v_type != VAR_NUMBER && tv2->v_type != VAR_STRING)) break; if (tv2->v_type == VAR_FLOAT) f = tv2->vval.v_float; else f = tv_get_number(tv2); switch (*op) { case '+': tv1->vval.v_float += f; break; case '-': tv1->vval.v_float -= f; break; case '*': tv1->vval.v_float *= f; break; case '/': tv1->vval.v_float /= f; break; } } #endif return OK; } } semsg(_(e_wrong_variable_type_for_str_equal), op); return FAIL; } /* * Evaluate the expression used in a ":for var in expr" command. * "arg" points to "var". * Set "*errp" to TRUE for an error, FALSE otherwise; * Return a pointer that holds the info. Null when there is an error. */ void * eval_for_line( char_u *arg, int *errp, exarg_T *eap, evalarg_T *evalarg) { forinfo_T *fi; char_u *var_list_end; char_u *expr; typval_T tv; list_T *l; int skip = !(evalarg->eval_flags & EVAL_EVALUATE); *errp = TRUE; // default: there is an error fi = ALLOC_CLEAR_ONE(forinfo_T); if (fi == NULL) return NULL; var_list_end = skip_var_list(arg, TRUE, &fi->fi_varcount, &fi->fi_semicolon, FALSE); if (var_list_end == NULL) return fi; expr = skipwhite_and_linebreak(var_list_end, evalarg); if (expr[0] != 'i' || expr[1] != 'n' || !(expr[2] == NUL || VIM_ISWHITE(expr[2]))) { if (in_vim9script() && *expr == ':' && expr != var_list_end) semsg(_(e_no_white_space_allowed_before_colon_str), expr); else emsg(_(e_missing_in_after_for)); return fi; } if (skip) ++emsg_skip; expr = skipwhite_and_linebreak(expr + 2, evalarg); if (eval0(expr, &tv, eap, evalarg) == OK) { *errp = FALSE; if (!skip) { if (tv.v_type == VAR_LIST) { l = tv.vval.v_list; if (l == NULL) { // a null list is like an empty list: do nothing clear_tv(&tv); } else { // Need a real list here. CHECK_LIST_MATERIALIZE(l); // No need to increment the refcount, it's already set for // the list being used in "tv". fi->fi_list = l; list_add_watch(l, &fi->fi_lw); fi->fi_lw.lw_item = l->lv_first; } } else if (tv.v_type == VAR_BLOB) { fi->fi_bi = 0; if (tv.vval.v_blob != NULL) { typval_T btv; // Make a copy, so that the iteration still works when the // blob is changed. blob_copy(tv.vval.v_blob, &btv); fi->fi_blob = btv.vval.v_blob; } clear_tv(&tv); } else if (tv.v_type == VAR_STRING) { fi->fi_byte_idx = 0; fi->fi_string = tv.vval.v_string; tv.vval.v_string = NULL; if (fi->fi_string == NULL) fi->fi_string = vim_strsave((char_u *)""); } else { emsg(_(e_string_list_or_blob_required)); clear_tv(&tv); } } } if (skip) --emsg_skip; fi->fi_break_count = evalarg->eval_break_count; return fi; } /* * Used when looping over a :for line, skip the "in expr" part. */ void skip_for_lines(void *fi_void, evalarg_T *evalarg) { forinfo_T *fi = (forinfo_T *)fi_void; int i; for (i = 0; i < fi->fi_break_count; ++i) eval_next_line(NULL, evalarg); } /* * Use the first item in a ":for" list. Advance to the next. * Assign the values to the variable (list). "arg" points to the first one. * Return TRUE when a valid item was found, FALSE when at end of list or * something wrong. */ int next_for_item(void *fi_void, char_u *arg) { forinfo_T *fi = (forinfo_T *)fi_void; int result; int flag = ASSIGN_FOR_LOOP | (in_vim9script() ? (ASSIGN_FINAL // first round: error if variable exists | (fi->fi_bi == 0 ? 0 : ASSIGN_DECL) | ASSIGN_NO_MEMBER_TYPE) : 0); listitem_T *item; int skip_assign = in_vim9script() && arg[0] == '_' && !eval_isnamec(arg[1]); if (fi->fi_blob != NULL) { typval_T tv; if (fi->fi_bi >= blob_len(fi->fi_blob)) return FALSE; tv.v_type = VAR_NUMBER; tv.v_lock = VAR_FIXED; tv.vval.v_number = blob_get(fi->fi_blob, fi->fi_bi); ++fi->fi_bi; if (skip_assign) return TRUE; return ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; } if (fi->fi_string != NULL) { typval_T tv; int len; len = mb_ptr2len(fi->fi_string + fi->fi_byte_idx); if (len == 0) return FALSE; tv.v_type = VAR_STRING; tv.v_lock = VAR_FIXED; tv.vval.v_string = vim_strnsave(fi->fi_string + fi->fi_byte_idx, len); fi->fi_byte_idx += len; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; vim_free(tv.vval.v_string); return result; } item = fi->fi_lw.lw_item; if (item == NULL) result = FALSE; else { fi->fi_lw.lw_item = item->li_next; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = (ex_let_vars(arg, &item->li_tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK); } return result; } /* * Free the structure used to store info used by ":for". */ void free_for_info(void *fi_void) { forinfo_T *fi = (forinfo_T *)fi_void; if (fi == NULL) return; if (fi->fi_list != NULL) { list_rem_watch(fi->fi_list, &fi->fi_lw); list_unref(fi->fi_list); } else if (fi->fi_blob != NULL) blob_unref(fi->fi_blob); else vim_free(fi->fi_string); vim_free(fi); } void set_context_for_expression( expand_T *xp, char_u *arg, cmdidx_T cmdidx) { int has_expr = cmdidx != CMD_let && cmdidx != CMD_var; int c; char_u *p; if (cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_const || cmdidx == CMD_final) { xp->xp_context = EXPAND_USER_VARS; if (vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#") == NULL) { // ":let var1 var2 ...": find last space. for (p = arg + STRLEN(arg); p >= arg; ) { xp->xp_pattern = p; MB_PTR_BACK(arg, p); if (VIM_ISWHITE(*p)) break; } return; } } else xp->xp_context = cmdidx == CMD_call ? EXPAND_FUNCTIONS : EXPAND_EXPRESSION; while ((xp->xp_pattern = vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#")) != NULL) { c = *xp->xp_pattern; if (c == '&') { c = xp->xp_pattern[1]; if (c == '&') { ++xp->xp_pattern; xp->xp_context = has_expr ? EXPAND_EXPRESSION : EXPAND_NOTHING; } else if (c != ' ') { xp->xp_context = EXPAND_SETTINGS; if ((c == 'l' || c == 'g') && xp->xp_pattern[2] == ':') xp->xp_pattern += 2; } } else if (c == '$') { // environment variable xp->xp_context = EXPAND_ENV_VARS; } else if (c == '=') { has_expr = TRUE; xp->xp_context = EXPAND_EXPRESSION; } else if (c == '#' && xp->xp_context == EXPAND_EXPRESSION) { // Autoload function/variable contains '#'. break; } else if ((c == '<' || c == '#') && xp->xp_context == EXPAND_FUNCTIONS && vim_strchr(xp->xp_pattern, '(') == NULL) { // Function name can start with "<SNR>" and contain '#'. break; } else if (has_expr) { if (c == '"') // string { while ((c = *++xp->xp_pattern) != NUL && c != '"') if (c == '\\' && xp->xp_pattern[1] != NUL) ++xp->xp_pattern; xp->xp_context = EXPAND_NOTHING; } else if (c == '\'') // literal string { // Trick: '' is like stopping and starting a literal string. while ((c = *++xp->xp_pattern) != NUL && c != '\'') /* skip */ ; xp->xp_context = EXPAND_NOTHING; } else if (c == '|') { if (xp->xp_pattern[1] == '|') { ++xp->xp_pattern; xp->xp_context = EXPAND_EXPRESSION; } else xp->xp_context = EXPAND_COMMANDS; } else xp->xp_context = EXPAND_EXPRESSION; } else // Doesn't look like something valid, expand as an expression // anyway. xp->xp_context = EXPAND_EXPRESSION; arg = xp->xp_pattern; if (*arg != NUL) while ((c = *++arg) != NUL && (c == ' ' || c == '\t')) /* skip */ ; } // ":exe one two" completes "two" if ((cmdidx == CMD_execute || cmdidx == CMD_echo || cmdidx == CMD_echon || cmdidx == CMD_echomsg) && xp->xp_context == EXPAND_EXPRESSION) { for (;;) { char_u *n = skiptowhite(arg); if (n == arg || IS_WHITE_OR_NUL(*skipwhite(n))) break; arg = skipwhite(n); } } xp->xp_pattern = arg; } /* * Return TRUE if "pat" matches "text". * Does not use 'cpo' and always uses 'magic'. */ int pattern_match(char_u *pat, char_u *text, int ic) { int matches = FALSE; char_u *save_cpo; regmatch_T regmatch; // avoid 'l' flag in 'cpoptions' save_cpo = p_cpo; p_cpo = empty_option; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { regmatch.rm_ic = ic; matches = vim_regexec_nl(&regmatch, text, (colnr_T)0); vim_regfree(regmatch.regprog); } p_cpo = save_cpo; return matches; } /* * Handle a name followed by "(". Both for just "name(arg)" and for * "expr->name(arg)". * Returns OK or FAIL. */ static int eval_func( char_u **arg, // points to "(", will be advanced evalarg_T *evalarg, char_u *name, int name_len, typval_T *rettv, int flags, typval_T *basetv) // "expr" for "expr->name(arg)" { int evaluate = flags & EVAL_EVALUATE; char_u *s = name; int len = name_len; partial_T *partial; int ret = OK; type_T *type = NULL; int found_var = FALSE; if (!evaluate) check_vars(s, len); // If "s" is the name of a variable of type VAR_FUNC // use its contents. s = deref_func_name(s, &len, &partial, in_vim9script() ? &type : NULL, !evaluate, FALSE, &found_var); // Need to make a copy, in case evaluating the arguments makes // the name invalid. s = vim_strsave(s); if (s == NULL || (evaluate && (*s == NUL || (flags & EVAL_CONSTANT)))) ret = FAIL; else { funcexe_T funcexe; // Invoke the function. CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = partial; funcexe.fe_basetv = basetv; funcexe.fe_check_type = type; funcexe.fe_found_var = found_var; ret = get_func_tv(s, len, rettv, arg, evalarg, &funcexe); } vim_free(s); // If evaluate is FALSE rettv->v_type was not set in // get_func_tv, but it's needed in handle_subscript() to parse // what follows. So set it here. if (rettv->v_type == VAR_UNKNOWN && !evaluate && **arg == '(') { rettv->vval.v_string = NULL; rettv->v_type = VAR_FUNC; } // Stop the expression evaluation when immediately // aborting on error, or when an interrupt occurred or // an exception was thrown but not caught. if (evaluate && aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } return ret; } /* * After a NL, skip over empty lines and comment-only lines. */ static char_u * newline_skip_comments(char_u *arg) { char_u *p = arg + 1; for (;;) { p = skipwhite(p); if (*p == NUL) break; if (vim9_comment_start(p)) { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = nl; } if (*p != NL) break; ++p; // skip another NL } return p; } /* * Get the next line source line without advancing. But do skip over comment * lines. * Only called for Vim9 script. */ static char_u * getline_peek_skip_comments(evalarg_T *evalarg) { for (;;) { char_u *next = getline_peek(evalarg->eval_getline, evalarg->eval_cookie); char_u *p; if (next == NULL) break; p = skipwhite(next); if (*p != NUL && !vim9_comment_start(p)) return next; if (eval_next_line(NULL, evalarg) == NULL) break; } return NULL; } /* * If inside Vim9 script, "arg" points to the end of a line (ignoring a # * comment) and there is a next line, return the next line (skipping blanks) * and set "getnext". * Otherwise return the next non-white at or after "arg" and set "getnext" to * FALSE. * "arg" must point somewhere inside a line, not at the start. */ char_u * eval_next_non_blank(char_u *arg, evalarg_T *evalarg, int *getnext) { char_u *p = skipwhite(arg); *getnext = FALSE; if (in_vim9script() && evalarg != NULL && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL || *p == NL) && (*p == NUL || *p == NL || (vim9_comment_start(p) && VIM_ISWHITE(p[-1])))) { char_u *next; if (*p == NL) next = newline_skip_comments(p); else if (evalarg->eval_cookie != NULL) next = getline_peek_skip_comments(evalarg); else next = peek_next_line_from_context(evalarg->eval_cctx); if (next != NULL) { *getnext = TRUE; return skipwhite(next); } } return p; } /* * To be called after eval_next_non_blank() sets "getnext" to TRUE. * Only called for Vim9 script. */ char_u * eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { free_eval_tofree_later(evalarg); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); } /* * Call eval_next_non_blank() and get the next line if needed. */ char_u * skipwhite_and_linebreak(char_u *arg, evalarg_T *evalarg) { int getnext; char_u *p = skipwhite_and_nl(arg); if (evalarg == NULL) return skipwhite(arg); eval_next_non_blank(p, evalarg, &getnext); if (getnext) return eval_next_line(arg, evalarg); return p; } /* * The "evaluate" argument: When FALSE, the argument is only parsed but not * executed. The function may return OK, but the rettv will be of type * VAR_UNKNOWN. The function still returns FAIL for a syntax error. */ /* * Handle zero level expression. * This calls eval1() and handles error message and nextcmd. * Put the result in "rettv" when returning OK and "evaluate" is TRUE. * Note: "rettv.v_lock" is not set. * "evalarg" can be NULL, EVALARG_EVALUATE or a pointer. * Return OK or FAIL. */ int eval0( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg) { return eval0_retarg(arg, rettv, eap, evalarg, NULL); } /* * Like eval0() but when "retarg" is not NULL store the pointer to after the * expression and don't check what comes after the expression. */ int eval0_retarg( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg, char_u **retarg) { int ret; char_u *p; char_u *expr_end; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; int flags = evalarg == NULL ? 0 : evalarg->eval_flags; int check_for_end = retarg == NULL; int end_error = FALSE; p = skipwhite(arg); ret = eval1(&p, rettv, evalarg); if (ret != FAIL) { expr_end = p; p = skipwhite(p); // In Vim9 script a command block is not split at NL characters for // commands using an expression argument. Skip over a '#' comment to // check for a following NL. Require white space before the '#'. if (in_vim9script() && p > expr_end && retarg == NULL) while (*p == '#') { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = skipwhite(nl + 1); if (eap != NULL && *p != NUL) eap->nextcmd = p; check_for_end = FALSE; } if (check_for_end) end_error = !ends_excmd2(arg, p); } if (ret == FAIL || end_error) { if (ret != FAIL) clear_tv(rettv); /* * Report the invalid expression unless the expression evaluation has * been cancelled due to an aborting error, an interrupt, or an * exception, or we already gave a more specific error. * Also check called_emsg for when using assert_fails(). */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before && (flags & EVAL_CONSTANT) == 0 && (!in_vim9script() || !vim9_bad_comment(p))) { if (end_error) semsg(_(e_trailing_characters_str), p); else semsg(_(e_invalid_expression_str), arg); } // Some of the expression may not have been consumed. Do not check for // a next command to avoid more errors, unless "|" is following, which // could only be a command separator. if (eap != NULL && p != NULL && skipwhite(p)[0] == '|' && skipwhite(p)[1] != '|') eap->nextcmd = check_nextcmd(p); return FAIL; } if (retarg != NULL) *retarg = p; else if (check_for_end && eap != NULL) set_nextcmd(eap, p); return ret; } /* * Handle top level expression: * expr2 ? expr1 : expr1 * expr2 ?? expr1 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Note: "rettv.v_lock" is not set. * * Return OK or FAIL. */ int eval1(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; CLEAR_POINTER(rettv); /* * Get the first variable. */ if (eval2(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); if (*p == '?') { int op_falsy = p[1] == '?'; int result; typval_T var2; evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = evalarg_used->eval_flags & EVAL_EVALUATE; if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = p; } result = FALSE; if (evaluate) { int error = FALSE; if (op_falsy) result = tv2bool(rettv); else if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; if (error || !op_falsy || !result) clear_tv(rettv); if (error) return FAIL; } /* * Get the second variable. Recursive! */ if (op_falsy) ++*arg; if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg - (op_falsy ? 1 : 0), op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = (op_falsy ? !result : result) ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { evalarg_used->eval_flags = orig_flags; return FAIL; } if (!op_falsy || !result) *rettv = var2; if (!op_falsy) { /* * Check for the ":". */ p = eval_next_non_blank(*arg, evalarg_used, &getnext); if (*p != ':') { emsg(_(e_missing_colon_after_questionmark)); if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = p; } /* * Get the third variable. Recursive! */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (evaluate && !result) *rettv = var2; } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle first level expression: * expr2 || expr2 || expr2 logical OR * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval3(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "||" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '|' && p[1] == '|') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int evaluate; int orig_flags; long result = FALSE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "||". */ while (p[0] == '|' && p[1] == '|') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval3(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && !result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) != 0) result = TRUE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle second level expression: * expr3 && expr3 && expr3 logical AND * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval4(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "&&" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '&' && p[1] == '&') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; long result = TRUE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) == 0) result = FALSE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "&&". */ while (p[0] == '&' && p[1] == '&') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = result ? orig_flags : orig_flags & ~EVAL_EVALUATE; CLEAR_FIELD(var2); if (eval4(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) == 0) result = FALSE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle third level expression: * var1 == var2 * var1 =~ var2 * var1 != var2 * var1 !~ var2 * var1 > var2 * var1 >= var2 * var1 < var2 * var1 <= var2 * var1 is var2 * var1 isnot var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; exprtype_T type = EXPR_UNKNOWN; int len = 2; int type_is = FALSE; /* * Get the first expression. */ if (eval5(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); type = get_compare_type(p, &len, &type_is); /* * If there is a comparative operator, use it. */ if (type != EXPR_UNKNOWN) { typval_T var2; int ic; int vim9script = in_vim9script(); int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); long comp_lnum = SOURCING_LNUM; if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, len); clear_tv(rettv); return FAIL; } if (vim9script && type_is && (p[len] == '?' || p[len] == '#')) { semsg(_(e_invalid_expression_str), p); clear_tv(rettv); return FAIL; } // extra question mark appended: ignore case if (p[len] == '?') { ic = TRUE; ++len; } // extra '#' appended: match case else if (p[len] == '#') { ic = FALSE; ++len; } // nothing appended: use 'ignorecase' if not in Vim script else ic = vim9script ? FALSE : p_ic; /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[len])) { error_white_both(p, len); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + len, evalarg); if (eval5(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { int ret; // use the line of the comparison for messages SOURCING_LNUM = comp_lnum; if (vim9script && check_compare_types(type, rettv, &var2) == FAIL) { ret = FAIL; clear_tv(rettv); } else ret = typval_compare(rettv, &var2, type, ic); clear_tv(&var2); return ret; } } return OK; } /* * Make a copy of blob "tv1" and append blob "tv2". */ void eval_addblob(typval_T *tv1, typval_T *tv2) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; blob_T *b = blob_alloc(); int i; if (b != NULL) { for (i = 0; i < blob_len(b1); i++) ga_append(&b->bv_ga, blob_get(b1, i)); for (i = 0; i < blob_len(b2); i++) ga_append(&b->bv_ga, blob_get(b2, i)); clear_tv(tv1); rettv_blob_set(tv1, b); } } /* * Make a copy of list "tv1" and append list "tv2". */ int eval_addlist(typval_T *tv1, typval_T *tv2) { typval_T var3; // concatenate Lists if (list_concat(tv1->vval.v_list, tv2->vval.v_list, &var3) == FAIL) { clear_tv(tv1); clear_tv(tv2); return FAIL; } clear_tv(tv1); *tv1 = var3; return OK; } /* * Handle the bitwise left/right shift operator expression: * var1 << var2 * var1 >> var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval6(arg, rettv, evalarg) == FAIL) return FAIL; /* * Repeat computing, until no '<<' or '>>' is following. */ for (;;) { char_u *p; int getnext; exprtype_T type; int evaluate; typval_T var2; int vim9script; p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '<' && p[1] == '<') type = EXPR_LSHIFT; else if (p[0] == '>' && p[1] == '>') type = EXPR_RSHIFT; else return OK; // Handle a bitwise left or right shift operator if (rettv->v_type != VAR_NUMBER) { // left operand should be a number emsg(_(e_bitshift_ops_must_be_number)); clear_tv(rettv); return FAIL; } evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); vim9script = in_vim9script(); if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[2])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + 2, evalarg); if (eval6(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (var2.v_type != VAR_NUMBER || var2.vval.v_number < 0) { // right operand should be a positive number if (var2.v_type != VAR_NUMBER) emsg(_(e_bitshift_ops_must_be_number)); else emsg(_(e_bitshift_ops_must_be_postive)); clear_tv(rettv); clear_tv(&var2); return FAIL; } if (evaluate) { if (var2.vval.v_number > MAX_LSHIFT_BITS) // shifting more bits than we have always results in zero rettv->vval.v_number = 0; else if (type == EXPR_LSHIFT) rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number << var2.vval.v_number; else rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number >> var2.vval.v_number; } clear_tv(&var2); } return OK; } /* * Handle fifth level expression: * + number addition, concatenation of list or blob * - number subtraction * . string concatenation (if script version is 1) * .. string concatenation * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval7(arg, rettv, evalarg, FALSE) == FAIL) return FAIL; /* * Repeat computing, until no '+', '-' or '.' is following. */ for (;;) { int evaluate; int getnext; char_u *p; int op; int oplen; int concat; typval_T var2; int vim9script = in_vim9script(); // "." is only string concatenation when scriptversion is 1 // "+=", "-=" and "..=" are assignments // "++" and "--" on the next line are a separate command. p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; concat = op == '.' && (*(p + 1) == '.' || in_old_script(2)); if ((op != '+' && op != '-' && !concat) || p[1] == '=' || (p[1] == '.' && p[2] == '=')) break; if (getnext && (op == '+' || op == '-') && p[0] == p[1]) break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); oplen = (concat && p[1] == '.') ? 2 : 1; if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = p; } if ((op != '+' || (rettv->v_type != VAR_LIST && rettv->v_type != VAR_BLOB)) #ifdef FEAT_FLOAT && (op == '.' || rettv->v_type != VAR_FLOAT) #endif && evaluate) { int error = FALSE; // For "list + ...", an illegal use of the first operand as // a number cannot be determined before evaluating the 2nd // operand: if this is also a list, all is ok. // For "something . ...", "something - ..." or "non-list + ...", // we know that the first operand needs to be a string or number // without evaluating the 2nd operand. So check before to avoid // side effects after an error. if (op != '.') tv_get_number_chk(rettv, &error); if ((op == '.' && tv_get_string_chk(rettv) == NULL) || error) { clear_tv(rettv); return FAIL; } } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[oplen])) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + oplen, evalarg); if (eval7(arg, &var2, evalarg, !vim9script && op == '.') == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { /* * Compute the result. */ if (op == '.') { char_u buf1[NUMBUFLEN], buf2[NUMBUFLEN]; char_u *s1 = tv_get_string_buf(rettv, buf1); char_u *s2 = NULL; if (vim9script && (var2.v_type == VAR_VOID || var2.v_type == VAR_CHANNEL || var2.v_type == VAR_JOB)) semsg(_(e_using_invalid_value_as_string_str), vartype_name(var2.v_type)); #ifdef FEAT_FLOAT else if (vim9script && var2.v_type == VAR_FLOAT) { vim_snprintf((char *)buf2, NUMBUFLEN, "%g", var2.vval.v_float); s2 = buf2; } #endif else s2 = tv_get_string_buf_chk(&var2, buf2); if (s2 == NULL) // type error ? { clear_tv(rettv); clear_tv(&var2); return FAIL; } p = concat_str(s1, s2); clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = p; } else if (op == '+' && rettv->v_type == VAR_BLOB && var2.v_type == VAR_BLOB) eval_addblob(rettv, &var2); else if (op == '+' && rettv->v_type == VAR_LIST && var2.v_type == VAR_LIST) { if (eval_addlist(rettv, &var2) == FAIL) return FAIL; } else { int error = FALSE; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1 = 0, f2 = 0; if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; n1 = 0; } else #endif { n1 = tv_get_number_chk(rettv, &error); if (error) { // This can only happen for "list + non-list" or // "blob + non-blob". For "non-list + ..." or // "something - ...", we returned before evaluating the // 2nd operand. clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) f1 = n1; #endif } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); if (error) { clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f2 = n2; #endif } clear_tv(rettv); #ifdef FEAT_FLOAT // If there is a float on either side the result is a float. if (rettv->v_type == VAR_FLOAT || var2.v_type == VAR_FLOAT) { if (op == '+') f1 = f1 + f2; else f1 = f1 - f2; rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { if (op == '+') n1 = n1 + n2; else n1 = n1 - n2; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } clear_tv(&var2); } } return OK; } /* * Handle sixth level expression: * * number multiplication * / number division * % number modulo * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval7( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { #ifdef FEAT_FLOAT int use_float = FALSE; #endif /* * Get the first expression. */ if (eval8(arg, rettv, evalarg, want_string) == FAIL) return FAIL; /* * Repeat computing, until no '*', '/' or '%' is following. */ for (;;) { int evaluate; int getnext; typval_T var2; char_u *p; int op; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1, f2; #endif int error; // "*=", "/=" and "%=" are assignments p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; if ((op != '*' && op != '/' && op != '%') || p[1] == '=') break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && in_vim9script() && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = p; } #ifdef FEAT_FLOAT f1 = 0; f2 = 0; #endif error = FALSE; if (evaluate) { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; use_float = TRUE; n1 = 0; } else #endif n1 = tv_get_number_chk(rettv, &error); clear_tv(rettv); if (error) return FAIL; } else n1 = 0; /* * Get the second variable. */ if (evaluate && in_vim9script() && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (eval8(arg, &var2, evalarg, FALSE) == FAIL) return FAIL; if (evaluate) { #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { if (!use_float) { f1 = n1; use_float = TRUE; } f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); clear_tv(&var2); if (error) return FAIL; #ifdef FEAT_FLOAT if (use_float) f2 = n2; #endif } /* * Compute the result. * When either side is a float the result is a float. */ #ifdef FEAT_FLOAT if (use_float) { if (op == '*') f1 = f1 * f2; else if (op == '/') { # ifdef VMS // VMS crashes on divide by zero, work around it if (f2 == 0.0) { if (f1 == 0) f1 = -1 * __F_FLT_MAX - 1L; // similar to NaN else if (f1 < 0) f1 = -1 * __F_FLT_MAX; else f1 = __F_FLT_MAX; } else f1 = f1 / f2; # else // We rely on the floating point library to handle divide // by zero to result in "inf" and not a crash. f1 = f1 / f2; # endif } else { emsg(_(e_cannot_use_percent_with_float)); return FAIL; } rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { int failed = FALSE; if (op == '*') n1 = n1 * n2; else if (op == '/') n1 = num_divide(n1, n2, &failed); else n1 = num_modulus(n1, n2, &failed); if (failed) return FAIL; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } } return OK; } /* * Handle a type cast before a base level expression. * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * Return OK or FAIL. */ static int eval8( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { type_T *want_type = NULL; garray_T type_list; // list of pointers to allocated types int res; int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); // Recognize <type> in Vim9 script only. if (in_vim9script() && **arg == '<' && eval_isnamec1((*arg)[1]) && STRNCMP(*arg, "<SNR>", 5) != 0) { ++*arg; ga_init2(&type_list, sizeof(type_T *), 10); want_type = parse_type(arg, &type_list, TRUE); if (want_type == NULL && (evaluate || **arg != '>')) { clear_type_list(&type_list); return FAIL; } if (**arg != '>') { if (*skipwhite(*arg) == '>') semsg(_(e_no_white_space_allowed_before_str_str), ">", *arg); else emsg(_(e_missing_gt)); clear_type_list(&type_list); return FAIL; } ++*arg; *arg = skipwhite_and_linebreak(*arg, evalarg); } res = eval9(arg, rettv, evalarg, want_string); if (want_type != NULL && evaluate) { if (res == OK) { type_T *actual = typval2type(rettv, get_copyID(), &type_list, TVTT_DO_MEMBER); if (!equal_type(want_type, actual, 0)) { if (want_type == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { int n = tv2bool(rettv); // can use "0" and "1" for boolean in some places clear_tv(rettv); rettv->v_type = VAR_BOOL; rettv->vval.v_number = n ? VVAL_TRUE : VVAL_FALSE; } else { where_T where = WHERE_INIT; where.wt_variable = TRUE; res = check_type(want_type, actual, TRUE, where); } } } clear_type_list(&type_list); } return res; } int eval_leader(char_u **arg, int vim9) { char_u *s = *arg; char_u *p = *arg; while (*p == '!' || *p == '-' || *p == '+') { char_u *n = skipwhite(p + 1); // ++, --, -+ and +- are not accepted in Vim9 script if (vim9 && (*p == '-' || *p == '+') && (*n == '-' || *n == '+')) { semsg(_(e_invalid_expression_str), s); return FAIL; } p = n; } *arg = p; return OK; } /* * Check for a predefined value "true", "false" and "null.*". * Return OK when recognized. */ int handle_predefined(char_u *s, int len, typval_T *rettv) { switch (len) { case 4: if (STRNCMP(s, "true", 4) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_TRUE; return OK; } if (STRNCMP(s, "null", 4) == 0) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; return OK; } break; case 5: if (STRNCMP(s, "false", 5) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_FALSE; return OK; } break; case 8: if (STRNCMP(s, "null_job", 8) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_JOB; rettv->vval.v_job = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } break; case 9: if (STRNCMP(s, "null_", 5) != 0) break; if (STRNCMP(s + 5, "list", 4) == 0) { rettv->v_type = VAR_LIST; rettv->vval.v_list = NULL; return OK; } if (STRNCMP(s + 5, "dict", 4) == 0) { rettv->v_type = VAR_DICT; rettv->vval.v_dict = NULL; return OK; } if (STRNCMP(s + 5, "blob", 4) == 0) { rettv->v_type = VAR_BLOB; rettv->vval.v_blob = NULL; return OK; } break; case 11: if (STRNCMP(s, "null_string", 11) == 0) { rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; return OK; } break; case 12: if (STRNCMP(s, "null_channel", 12) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_CHANNEL; rettv->vval.v_channel = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } if (STRNCMP(s, "null_partial", 12) == 0) { rettv->v_type = VAR_PARTIAL; rettv->vval.v_partial = NULL; return OK; } break; case 13: if (STRNCMP(s, "null_function", 13) == 0) { rettv->v_type = VAR_FUNC; rettv->vval.v_string = NULL; return OK; } break; } return FAIL; } /* * Handle sixth level expression: * number number constant * 0zFFFFFFFF Blob constant * "string" string constant * 'string' literal string constant * &option-name option value * @r register contents * identifier variable value * function() function call * $VAR environment variable * (expression) nested expression * [expr, expr] List * {arg, arg -> expr} Lambda * {key: val, key: val} Dictionary * #{key: val, key: val} Dictionary with literal keys * * Also handle: * ! in front logical NOT * - in front unary minus * + in front unary plus (ignored) * trailing [] subscript in String or List * trailing .name entry in Dictionary * trailing ->name() method call * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval9( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int len; char_u *s; char_u *name_start = NULL; char_u *start_leader, *end_leader; int ret = OK; char_u *alias; static int recurse = 0; int vim9script = in_vim9script(); /* * Initialise variable so that clear_tv() can't mistake this for a * string and free a string that isn't there. */ rettv->v_type = VAR_UNKNOWN; /* * Skip '!', '-' and '+' characters. They are handled later. */ start_leader = *arg; if (eval_leader(arg, vim9script) == FAIL) return FAIL; end_leader = *arg; if (**arg == '.' && (!isdigit(*(*arg + 1)) #ifdef FEAT_FLOAT || in_old_script(2) #endif )) { semsg(_(e_invalid_expression_str), *arg); ++*arg; return FAIL; } // Limit recursion to 1000 levels. At least at 10000 we run out of stack // and crash. With MSVC the stack is smaller. if (recurse == #ifdef _MSC_VER 300 #else 1000 #endif ) { semsg(_(e_expression_too_recursive_str), *arg); return FAIL; } ++recurse; switch (**arg) { /* * Number constant. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': ret = eval_number(arg, rettv, evaluate, want_string); // Apply prefixed "-" and "+" now. Matters especially when // "->" follows. if (ret == OK && evaluate && end_leader > start_leader && rettv->v_type != VAR_BLOB) ret = eval9_leader(rettv, TRUE, start_leader, &end_leader); break; /* * String constant: "string". */ case '"': ret = eval_string(arg, rettv, evaluate, FALSE); break; /* * Literal string constant: 'str''ing'. */ case '\'': ret = eval_lit_string(arg, rettv, evaluate, FALSE); break; /* * List: [expr, expr] */ case '[': ret = eval_list(arg, rettv, evalarg, TRUE); break; /* * Dictionary: #{key: val, key: val} */ case '#': if (vim9script) { ret = vim9_bad_comment(*arg) ? FAIL : NOTDONE; } else if ((*arg)[1] == '{') { ++*arg; ret = eval_dict(arg, rettv, evalarg, TRUE); } else ret = NOTDONE; break; /* * Lambda: {arg, arg -> expr} * Dictionary: {'key': val, 'key': val} */ case '{': if (vim9script) ret = NOTDONE; else ret = get_lambda_tv(arg, rettv, vim9script, evalarg); if (ret == NOTDONE) ret = eval_dict(arg, rettv, evalarg, FALSE); break; /* * Option value: &name */ case '&': ret = eval_option(arg, rettv, evaluate); break; /* * Environment variable: $VAR. * Interpolated string: $"string" or $'string'. */ case '$': if ((*arg)[1] == '"' || (*arg)[1] == '\'') ret = eval_interp_string(arg, rettv, evaluate); else ret = eval_env_var(arg, rettv, evaluate); break; /* * Register contents: @r. */ case '@': ++*arg; if (evaluate) { if (vim9script && IS_WHITE_OR_NUL(**arg)) semsg(_(e_syntax_error_at_str), *arg); else if (vim9script && !valid_yank_reg(**arg, FALSE)) emsg_invreg(**arg); else { rettv->v_type = VAR_STRING; rettv->vval.v_string = get_reg_contents(**arg, GREG_EXPR_SRC); } } if (**arg != NUL) ++*arg; break; /* * nested expression: (expression). * or lambda: (arg) => expr */ case '(': ret = NOTDONE; if (vim9script) { ret = get_lambda_tv(arg, rettv, TRUE, evalarg); if (ret == OK && evaluate) { ufunc_T *ufunc = rettv->vval.v_partial->pt_func; // Compile it here to get the return type. The return // type is optional, when it's missing use t_unknown. // This is recognized in compile_return(). if (ufunc->uf_ret_type->tt_type == VAR_VOID) ufunc->uf_ret_type = &t_unknown; if (compile_def_function(ufunc, FALSE, get_compile_type(ufunc), NULL) == FAIL) { clear_tv(rettv); ret = FAIL; } } } if (ret == NOTDONE) { *arg = skipwhite_and_linebreak(*arg + 1, evalarg); ret = eval1(arg, rettv, evalarg); // recursive! *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ')') ++*arg; else if (ret == OK) { emsg(_(e_missing_closing_paren)); clear_tv(rettv); ret = FAIL; } } break; default: ret = NOTDONE; break; } if (ret == NOTDONE) { /* * Must be a variable or function name. * Can also be a curly-braces kind of name: {expr}. */ s = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) s = alias; if (len <= 0) ret = FAIL; else { int flags = evalarg == NULL ? 0 : evalarg->eval_flags; if (evaluate && vim9script && len == 1 && *s == '_') { emsg(_(e_cannot_use_underscore_here)); ret = FAIL; } else if (evaluate && vim9script && len > 2 && s[0] == 's' && s[1] == ':') { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), s); ret = FAIL; } else if ((vim9script ? **arg : *skipwhite(*arg)) == '(') { // "name(..." recursive! *arg = skipwhite(*arg); ret = eval_func(arg, evalarg, s, len, rettv, flags, NULL); } else if (flags & EVAL_CONSTANT) ret = FAIL; else if (evaluate) { // get the value of "true", "false", etc. or a variable ret = FAIL; if (vim9script) ret = handle_predefined(s, len, rettv); if (ret == FAIL) { name_start = s; ret = eval_variable(s, len, 0, rettv, NULL, EVAL_VAR_VERBOSE + EVAL_VAR_IMPORT); } } else { // skip the name check_vars(s, len); ret = OK; } } vim_free(alias); } // Handle following '[', '(' and '.' for expr[expr], expr.name, // expr(expr), expr->name(expr) if (ret == OK) ret = handle_subscript(arg, name_start, rettv, evalarg, TRUE); /* * Apply logical NOT and unary '-', from right to left, ignore '+'. */ if (ret == OK && evaluate && end_leader > start_leader) ret = eval9_leader(rettv, FALSE, start_leader, &end_leader); --recurse; return ret; } /* * Apply the leading "!" and "-" before an eval9 expression to "rettv". * When "numeric_only" is TRUE only handle "+" and "-". * Adjusts "end_leaderp" until it is at "start_leader". */ static int eval9_leader( typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp) { char_u *end_leader = *end_leaderp; int ret = OK; int error = FALSE; varnumber_T val = 0; vartype_T type = rettv->v_type; int vim9script = in_vim9script(); #ifdef FEAT_FLOAT float_T f = 0.0; if (rettv->v_type == VAR_FLOAT) f = rettv->vval.v_float; else #endif { while (VIM_ISWHITE(end_leader[-1])) --end_leader; if (vim9script && end_leader[-1] == '!') val = tv2bool(rettv); else val = tv_get_number_chk(rettv, &error); } if (error) { clear_tv(rettv); ret = FAIL; } else { while (end_leader > start_leader) { --end_leader; if (*end_leader == '!') { if (numeric_only) { ++end_leader; break; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { if (vim9script) { rettv->v_type = VAR_BOOL; val = f == 0.0 ? VVAL_TRUE : VVAL_FALSE; } else f = !f; } else #endif { val = !val; type = VAR_BOOL; } } else if (*end_leader == '-') { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f = -f; else #endif { val = -val; type = VAR_NUMBER; } } } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { clear_tv(rettv); rettv->vval.v_float = f; } else #endif { clear_tv(rettv); if (vim9script) rettv->v_type = type; else rettv->v_type = VAR_NUMBER; rettv->vval.v_number = val; } } *end_leaderp = end_leader; return ret; } /* * Call the function referred to in "rettv". */ static int call_func_rettv( char_u **arg, evalarg_T *evalarg, typval_T *rettv, int evaluate, dict_T *selfdict, typval_T *basetv) { partial_T *pt = NULL; funcexe_T funcexe; typval_T functv; char_u *s; int ret; // need to copy the funcref so that we can clear rettv if (evaluate) { functv = *rettv; rettv->v_type = VAR_UNKNOWN; // Invoke the function. Recursive! if (functv.v_type == VAR_PARTIAL) { pt = functv.vval.v_partial; s = partial_name(pt); } else { s = functv.vval.v_string; if (s == NULL || *s == NUL) { emsg(_(e_empty_function_name)); ret = FAIL; goto theend; } } } else s = (char_u *)""; CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = pt; funcexe.fe_selfdict = selfdict; funcexe.fe_basetv = basetv; ret = get_func_tv(s, -1, rettv, arg, evalarg, &funcexe); theend: // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&functv); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_lambda( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); typval_T base = *rettv; int ret; rettv->v_type = VAR_UNKNOWN; if (**arg == '{') { // ->{lambda}() ret = get_lambda_tv(arg, rettv, FALSE, evalarg); } else { // ->(lambda)() ++*arg; ret = eval1(arg, rettv, evalarg); *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ')') { emsg(_(e_missing_closing_paren)); return FAIL; } if (rettv->v_type != VAR_STRING && rettv->v_type != VAR_FUNC && rettv->v_type != VAR_PARTIAL) { emsg(_(e_string_or_function_required_for_arrow_parens_expr)); return FAIL; } ++*arg; } if (ret != OK) return FAIL; if (**arg != '(') { if (verbose) { if (*skipwhite(*arg) == '(') emsg(_(e_no_white_space_allowed_before_parenthesis)); else semsg(_(e_missing_parenthesis_str), "lambda"); } clear_tv(rettv); ret = FAIL; } else ret = call_func_rettv(arg, evalarg, rettv, evaluate, NULL, &base); // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_method( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { char_u *name; long len; char_u *alias; char_u *tofree = NULL; typval_T base = *rettv; int ret = OK; int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); rettv->v_type = VAR_UNKNOWN; name = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) name = alias; if (len <= 0) { if (verbose) emsg(_(e_missing_name_after_method)); ret = FAIL; } else { char_u *paren; // If there is no "(" immediately following, but there is further on, // it can be "import.Func()", "dict.Func()", "list[nr]", etc. // Does not handle anything where "(" is part of the expression. *arg = skipwhite(*arg); if (**arg != '(' && alias == NULL && (paren = vim_strchr(*arg, '(')) != NULL) { char_u *deref; *arg = name; *paren = NUL; deref = deref_function_name(arg, &tofree, evalarg, verbose); if (deref == NULL) { *arg = name + len; ret = FAIL; } else { name = deref; len = (long)STRLEN(name); } *paren = '('; } if (ret == OK) { *arg = skipwhite(*arg); if (**arg != '(') { if (verbose) semsg(_(e_missing_parenthesis_str), name); ret = FAIL; } else if (VIM_ISWHITE((*arg)[-1])) { if (verbose) emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else ret = eval_func(arg, evalarg, name, len, rettv, evaluate ? EVAL_EVALUATE : 0, &base); } } // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); vim_free(tofree); return ret; } /* * Evaluate an "[expr]" or "[expr:expr]" index. Also "dict.key". * "*arg" points to the '[' or '.'. * Returns FAIL or OK. "*arg" is advanced to after the ']'. */ static int eval_index( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int empty1 = FALSE, empty2 = FALSE; typval_T var1, var2; int range = FALSE; char_u *key = NULL; int keylen = -1; int vim9script = in_vim9script(); if (check_can_index(rettv, evaluate, verbose) == FAIL) return FAIL; init_tv(&var1); init_tv(&var2); if (**arg == '.') { /* * dict.name */ key = *arg + 1; for (keylen = 0; eval_isdictc(key[keylen]); ++keylen) ; if (keylen == 0) return FAIL; *arg = key + keylen; } else { /* * something[idx] * * Get the (first) variable from inside the []. */ *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (**arg == ':') empty1 = TRUE; else if (eval1(arg, &var1, evalarg) == FAIL) // recursive! return FAIL; else if (vim9script && **arg == ':') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg); clear_tv(&var1); return FAIL; } else if (evaluate) { int error = FALSE; #ifdef FEAT_FLOAT // allow for indexing with float if (vim9script && rettv->v_type == VAR_DICT && var1.v_type == VAR_FLOAT) { var1.vval.v_string = typval_tostring(&var1, TRUE); var1.v_type = VAR_STRING; } #endif if (vim9script && rettv->v_type == VAR_LIST) tv_get_number_chk(&var1, &error); else error = tv_get_string_chk(&var1) == NULL; if (error) { // not a number or string clear_tv(&var1); return FAIL; } } /* * Get the second variable from inside the [:]. */ *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ':') { range = TRUE; ++*arg; if (vim9script && !IS_WHITE_OR_NUL(**arg) && **arg != ']') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg - 1); if (!empty1) clear_tv(&var1); return FAIL; } *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ']') empty2 = TRUE; else if (eval1(arg, &var2, evalarg) == FAIL) // recursive! { if (!empty1) clear_tv(&var1); return FAIL; } else if (evaluate && tv_get_string_chk(&var2) == NULL) { // not a number or string if (!empty1) clear_tv(&var1); clear_tv(&var2); return FAIL; } } // Check for the ']'. *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ']') { if (verbose) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); if (range) clear_tv(&var2); return FAIL; } *arg = *arg + 1; // skip over the ']' } if (evaluate) { int res = eval_index_inner(rettv, range, empty1 ? NULL : &var1, empty2 ? NULL : &var2, FALSE, key, keylen, verbose); if (!empty1) clear_tv(&var1); if (range) clear_tv(&var2); return res; } return OK; } /* * Check if "rettv" can have an [index] or [sli:ce] */ int check_can_index(typval_T *rettv, int evaluate, int verbose) { switch (rettv->v_type) { case VAR_FUNC: case VAR_PARTIAL: if (verbose) emsg(_(e_cannot_index_a_funcref)); return FAIL; case VAR_FLOAT: #ifdef FEAT_FLOAT if (verbose) emsg(_(e_using_float_as_string)); return FAIL; #endif case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: if (verbose) emsg(_(e_cannot_index_special_variable)); return FAIL; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: if (evaluate) { emsg(_(e_cannot_index_special_variable)); return FAIL; } // FALLTHROUGH case VAR_STRING: case VAR_LIST: case VAR_DICT: case VAR_BLOB: break; case VAR_NUMBER: if (in_vim9script()) emsg(_(e_cannot_index_number)); break; } return OK; } /* * slice() function */ void f_slice(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && ((argvars[0].v_type != VAR_STRING && argvars[0].v_type != VAR_LIST && argvars[0].v_type != VAR_BLOB && check_for_list_arg(argvars, 0) == FAIL) || check_for_number_arg(argvars, 1) == FAIL || check_for_opt_number_arg(argvars, 2) == FAIL)) return; if (check_can_index(argvars, TRUE, FALSE) == OK) { copy_tv(argvars, rettv); eval_index_inner(rettv, TRUE, argvars + 1, argvars[2].v_type == VAR_UNKNOWN ? NULL : argvars + 2, TRUE, NULL, 0, FALSE); } } /* * Apply index or range to "rettv". * "var1" is the first index, NULL for [:expr]. * "var2" is the second index, NULL for [expr] and [expr: ] * "exclusive" is TRUE for slice(): second index is exclusive, use character * index for string. * Alternatively, "key" is not NULL, then key[keylen] is the dict index. */ int eval_index_inner( typval_T *rettv, int is_range, typval_T *var1, typval_T *var2, int exclusive, char_u *key, int keylen, int verbose) { varnumber_T n1, n2 = 0; long len; n1 = 0; if (var1 != NULL && rettv->v_type != VAR_DICT) n1 = tv_get_number(var1); if (is_range) { if (rettv->v_type == VAR_DICT) { if (verbose) emsg(_(e_cannot_slice_dictionary)); return FAIL; } if (var2 != NULL) n2 = tv_get_number(var2); else n2 = VARNUM_MAX; } switch (rettv->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_FUNC: case VAR_PARTIAL: case VAR_FLOAT: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; // not evaluating, skipping over subscript case VAR_NUMBER: case VAR_STRING: { char_u *s = tv_get_string(rettv); len = (long)STRLEN(s); if (in_vim9script() || exclusive) { if (is_range) s = string_slice(s, n1, n2, exclusive); else s = char_from_string(s, n1); } else if (is_range) { // The resulting variable is a substring. If the indexes // are out of range the result is empty. if (n1 < 0) { n1 = len + n1; if (n1 < 0) n1 = 0; } if (n2 < 0) n2 = len + n2; else if (n2 >= len) n2 = len; if (n1 >= len || n2 < 0 || n1 > n2) s = NULL; else s = vim_strnsave(s + n1, n2 - n1 + 1); } else { // The resulting variable is a string of a single // character. If the index is too big or negative the // result is empty. if (n1 >= len || n1 < 0) s = NULL; else s = vim_strnsave(s + n1, 1); } clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = s; } break; case VAR_BLOB: blob_slice_or_index(rettv->vval.v_blob, is_range, n1, n2, exclusive, rettv); break; case VAR_LIST: if (var1 == NULL) n1 = 0; if (var2 == NULL) n2 = VARNUM_MAX; if (list_slice_or_index(rettv->vval.v_list, is_range, n1, n2, exclusive, rettv, verbose) == FAIL) return FAIL; break; case VAR_DICT: { dictitem_T *item; typval_T tmp; if (key == NULL) { key = tv_get_string_chk(var1); if (key == NULL) return FAIL; } item = dict_find(rettv->vval.v_dict, key, keylen); if (item == NULL) { if (verbose) { if (keylen > 0) key[keylen] = NUL; semsg(_(e_key_not_present_in_dictionary), key); } return FAIL; } copy_tv(&item->di_tv, &tmp); clear_tv(rettv); *rettv = tmp; } break; } return OK; } /* * Return the function name of partial "pt". */ char_u * partial_name(partial_T *pt) { if (pt != NULL) { if (pt->pt_name != NULL) return pt->pt_name; if (pt->pt_func != NULL) return pt->pt_func->uf_name; } return (char_u *)""; } static void partial_free(partial_T *pt) { int i; for (i = 0; i < pt->pt_argc; ++i) clear_tv(&pt->pt_argv[i]); vim_free(pt->pt_argv); dict_unref(pt->pt_dict); if (pt->pt_name != NULL) { func_unref(pt->pt_name); vim_free(pt->pt_name); } else func_ptr_unref(pt->pt_func); // "out_up" is no longer used, decrement refcount on partial that owns it. partial_unref(pt->pt_outer.out_up_partial); // Using pt_outer from another partial. partial_unref(pt->pt_outer_partial); // Decrease the reference count for the context of a closure. If down // to the minimum it may be time to free it. if (pt->pt_funcstack != NULL) { --pt->pt_funcstack->fs_refcount; funcstack_check_refcount(pt->pt_funcstack); } vim_free(pt); } /* * Unreference a closure: decrement the reference count and free it when it * becomes zero. */ void partial_unref(partial_T *pt) { if (pt != NULL) { if (--pt->pt_refcount <= 0) partial_free(pt); // If the reference count goes down to one, the funcstack may be the // only reference and can be freed if no other partials reference it. else if (pt->pt_refcount == 1 && pt->pt_funcstack != NULL) funcstack_check_refcount(pt->pt_funcstack); } } /* * Return the next (unique) copy ID. * Used for serializing nested structures. */ int get_copyID(void) { current_copyID += COPYID_INC; return current_copyID; } /* * Garbage collection for lists and dictionaries. * * We use reference counts to be able to free most items right away when they * are no longer used. But for composite items it's possible that it becomes * unused while the reference count is > 0: When there is a recursive * reference. Example: * :let l = [1, 2, 3] * :let d = {9: l} * :let l[1] = d * * Since this is quite unusual we handle this with garbage collection: every * once in a while find out which lists and dicts are not referenced from any * variable. * * Here is a good reference text about garbage collection (refers to Python * but it applies to all reference-counting mechanisms): * http://python.ca/nas/python/gc/ */ /* * Do garbage collection for lists and dicts. * When "testing" is TRUE this is called from test_garbagecollect_now(). * Return TRUE if some memory was freed. */ int garbage_collect(int testing) { int copyID; int abort = FALSE; buf_T *buf; win_T *wp; int did_free = FALSE; tabpage_T *tp; if (!testing) { // Only do this once. want_garbage_collect = FALSE; may_garbage_collect = FALSE; garbage_collect_at_exit = FALSE; } // The execution stack can grow big, limit the size. if (exestack.ga_maxlen - exestack.ga_len > 500) { size_t new_len; char_u *pp; int n; // Keep 150% of the current size, with a minimum of the growth size. n = exestack.ga_len / 2; if (n < exestack.ga_growsize) n = exestack.ga_growsize; // Don't make it bigger though. if (exestack.ga_len + n < exestack.ga_maxlen) { new_len = (size_t)exestack.ga_itemsize * (exestack.ga_len + n); pp = vim_realloc(exestack.ga_data, new_len); if (pp == NULL) return FAIL; exestack.ga_maxlen = exestack.ga_len + n; exestack.ga_data = pp; } } // We advance by two because we add one for items referenced through // previous_funccal. copyID = get_copyID(); /* * 1. Go through all accessible variables and mark all lists and dicts * with copyID. */ // Don't free variables in the previous_funccal list unless they are only // referenced through previous_funccal. This must be first, because if // the item is referenced elsewhere the funccal must not be freed. abort = abort || set_ref_in_previous_funccal(copyID); // script-local variables abort = abort || garbage_collect_scriptvars(copyID); // buffer-local variables FOR_ALL_BUFFERS(buf) abort = abort || set_ref_in_item(&buf->b_bufvar.di_tv, copyID, NULL, NULL); // window-local variables FOR_ALL_TAB_WINDOWS(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); if (aucmd_win != NULL) abort = abort || set_ref_in_item(&aucmd_win->w_winvar.di_tv, copyID, NULL, NULL); #ifdef FEAT_PROP_POPUP FOR_ALL_POPUPWINS(wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); FOR_ALL_TABPAGES(tp) FOR_ALL_POPUPWINS_IN_TAB(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); #endif // tabpage-local variables FOR_ALL_TABPAGES(tp) abort = abort || set_ref_in_item(&tp->tp_winvar.di_tv, copyID, NULL, NULL); // global variables abort = abort || garbage_collect_globvars(copyID); // function-local variables abort = abort || set_ref_in_call_stack(copyID); // named functions (matters for closures) abort = abort || set_ref_in_functions(copyID); // function call arguments, if v:testing is set. abort = abort || set_ref_in_func_args(copyID); // funcstacks keep variables for closures abort = abort || set_ref_in_funcstacks(copyID); // v: vars abort = abort || garbage_collect_vimvars(copyID); // callbacks in buffers abort = abort || set_ref_in_buffers(copyID); // 'completefunc', 'omnifunc' and 'thesaurusfunc' callbacks abort = abort || set_ref_in_insexpand_funcs(copyID); // 'operatorfunc' callback abort = abort || set_ref_in_opfunc(copyID); // 'tagfunc' callback abort = abort || set_ref_in_tagfunc(copyID); // 'imactivatefunc' and 'imstatusfunc' callbacks abort = abort || set_ref_in_im_funcs(copyID); #ifdef FEAT_LUA abort = abort || set_ref_in_lua(copyID); #endif #ifdef FEAT_PYTHON abort = abort || set_ref_in_python(copyID); #endif #ifdef FEAT_PYTHON3 abort = abort || set_ref_in_python3(copyID); #endif #ifdef FEAT_JOB_CHANNEL abort = abort || set_ref_in_channel(copyID); abort = abort || set_ref_in_job(copyID); #endif #ifdef FEAT_NETBEANS_INTG abort = abort || set_ref_in_nb_channel(copyID); #endif #ifdef FEAT_TIMERS abort = abort || set_ref_in_timer(copyID); #endif #ifdef FEAT_QUICKFIX abort = abort || set_ref_in_quickfix(copyID); #endif #ifdef FEAT_TERMINAL abort = abort || set_ref_in_term(copyID); #endif #ifdef FEAT_PROP_POPUP abort = abort || set_ref_in_popups(copyID); #endif if (!abort) { /* * 2. Free lists and dictionaries that are not referenced. */ did_free = free_unref_items(copyID); /* * 3. Check if any funccal can be freed now. * This may call us back recursively. */ free_unref_funccal(copyID, testing); } else if (p_verbose > 0) { verb_msg(_("Not enough memory to set references, garbage collection aborted!")); } return did_free; } /* * Free lists, dictionaries, channels and jobs that are no longer referenced. */ static int free_unref_items(int copyID) { int did_free = FALSE; // Let all "free" functions know that we are here. This means no // dictionaries, lists, channels or jobs are to be freed, because we will // do that here. in_free_unref_items = TRUE; /* * PASS 1: free the contents of the items. We don't free the items * themselves yet, so that it is possible to decrement refcount counters */ // Go through the list of dicts and free items without the copyID. did_free |= dict_free_nonref(copyID); // Go through the list of lists and free items without the copyID. did_free |= list_free_nonref(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. did_free |= free_unused_jobs_contents(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. did_free |= free_unused_channels_contents(copyID, COPYID_MASK); #endif /* * PASS 2: free the items themselves. */ dict_free_items(copyID); list_free_items(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. free_unused_jobs(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. free_unused_channels(copyID, COPYID_MASK); #endif in_free_unref_items = FALSE; return did_free; } /* * Mark all lists and dicts referenced through hashtab "ht" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_ht(hashtab_T *ht, int copyID, list_stack_T **list_stack) { int todo; int abort = FALSE; hashitem_T *hi; hashtab_T *cur_ht; ht_stack_T *ht_stack = NULL; ht_stack_T *tempitem; cur_ht = ht; for (;;) { if (!abort) { // Mark each item in the hashtab. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. todo = (int)cur_ht->ht_used; for (hi = cur_ht->ht_array; todo > 0; ++hi) if (!HASHITEM_EMPTY(hi)) { --todo; abort = abort || set_ref_in_item(&HI2DI(hi)->di_tv, copyID, &ht_stack, list_stack); } } if (ht_stack == NULL) break; // take an item from the stack cur_ht = ht_stack->ht; tempitem = ht_stack; ht_stack = ht_stack->prev; free(tempitem); } return abort; } #if defined(FEAT_LUA) || defined(FEAT_PYTHON) || defined(FEAT_PYTHON3) \ || defined(PROTO) /* * Mark a dict and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_dict(dict_T *d, int copyID) { if (d != NULL && d->dv_copyID != copyID) { d->dv_copyID = copyID; return set_ref_in_ht(&d->dv_hashtab, copyID, NULL); } return FALSE; } #endif /* * Mark a list and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_list(list_T *ll, int copyID) { if (ll != NULL && ll->lv_copyID != copyID) { ll->lv_copyID = copyID; return set_ref_in_list_items(ll, copyID, NULL); } return FALSE; } /* * Mark all lists and dicts referenced through list "l" with "copyID". * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_list_items(list_T *l, int copyID, ht_stack_T **ht_stack) { listitem_T *li; int abort = FALSE; list_T *cur_l; list_stack_T *list_stack = NULL; list_stack_T *tempitem; cur_l = l; for (;;) { if (!abort && cur_l->lv_first != &range_list_item) // Mark each item in the list. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. for (li = cur_l->lv_first; !abort && li != NULL; li = li->li_next) abort = abort || set_ref_in_item(&li->li_tv, copyID, ht_stack, &list_stack); if (list_stack == NULL) break; // take an item from the stack cur_l = list_stack->list; tempitem = list_stack; list_stack = list_stack->prev; free(tempitem); } return abort; } /* * Mark the partial in callback 'cb' with "copyID". */ int set_ref_in_callback(callback_T *cb, int copyID) { typval_T tv; if (cb->cb_name == NULL || *cb->cb_name == NUL || cb->cb_partial == NULL) return FALSE; tv.v_type = VAR_PARTIAL; tv.vval.v_partial = cb->cb_partial; return set_ref_in_item(&tv, copyID, NULL, NULL); } /* * Mark all lists and dicts referenced through typval "tv" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_item( typval_T *tv, int copyID, ht_stack_T **ht_stack, list_stack_T **list_stack) { int abort = FALSE; if (tv->v_type == VAR_DICT) { dict_T *dd = tv->vval.v_dict; if (dd != NULL && dd->dv_copyID != copyID) { // Didn't see this dict yet. dd->dv_copyID = copyID; if (ht_stack == NULL) { abort = set_ref_in_ht(&dd->dv_hashtab, copyID, list_stack); } else { ht_stack_T *newitem = ALLOC_ONE(ht_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->ht = &dd->dv_hashtab; newitem->prev = *ht_stack; *ht_stack = newitem; } } } } else if (tv->v_type == VAR_LIST) { list_T *ll = tv->vval.v_list; if (ll != NULL && ll->lv_copyID != copyID) { // Didn't see this list yet. ll->lv_copyID = copyID; if (list_stack == NULL) { abort = set_ref_in_list_items(ll, copyID, ht_stack); } else { list_stack_T *newitem = ALLOC_ONE(list_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->list = ll; newitem->prev = *list_stack; *list_stack = newitem; } } } } else if (tv->v_type == VAR_FUNC) { abort = set_ref_in_func(tv->vval.v_string, NULL, copyID); } else if (tv->v_type == VAR_PARTIAL) { partial_T *pt = tv->vval.v_partial; int i; if (pt != NULL && pt->pt_copyID != copyID) { // Didn't see this partial yet. pt->pt_copyID = copyID; abort = set_ref_in_func(pt->pt_name, pt->pt_func, copyID); if (pt->pt_dict != NULL) { typval_T dtv; dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } for (i = 0; i < pt->pt_argc; ++i) abort = abort || set_ref_in_item(&pt->pt_argv[i], copyID, ht_stack, list_stack); // pt_funcstack is handled in set_ref_in_funcstacks() } } #ifdef FEAT_JOB_CHANNEL else if (tv->v_type == VAR_JOB) { job_T *job = tv->vval.v_job; typval_T dtv; if (job != NULL && job->jv_copyID != copyID) { job->jv_copyID = copyID; if (job->jv_channel != NULL) { dtv.v_type = VAR_CHANNEL; dtv.vval.v_channel = job->jv_channel; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (job->jv_exit_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = job->jv_exit_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } else if (tv->v_type == VAR_CHANNEL) { channel_T *ch =tv->vval.v_channel; ch_part_T part; typval_T dtv; jsonq_T *jq; cbq_T *cq; if (ch != NULL && ch->ch_copyID != copyID) { ch->ch_copyID = copyID; for (part = PART_SOCK; part < PART_COUNT; ++part) { for (jq = ch->ch_part[part].ch_json_head.jq_next; jq != NULL; jq = jq->jq_next) set_ref_in_item(jq->jq_value, copyID, ht_stack, list_stack); for (cq = ch->ch_part[part].ch_cb_head.cq_next; cq != NULL; cq = cq->cq_next) if (cq->cq_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = cq->cq_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_part[part].ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_part[part].ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } if (ch->ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_close_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_close_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } #endif return abort; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * When "copyID" is not NULL replace recursive lists and dicts with "...". * When both "echo_style" and "composite_val" are FALSE, put quotes around * strings as "string()", otherwise does not put quotes around strings, as * ":echo" displays values. * When "restore_copyID" is FALSE, repeated items in dictionaries and lists * are replaced with "...". * May return NULL. */ char_u * echo_string_core( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID, int echo_style, int restore_copyID, int composite_val) { static int recurse = 0; char_u *r = NULL; if (recurse >= DICT_MAXNEST) { if (!did_echo_string_emsg) { // Only give this message once for a recursive call to avoid // flooding the user with errors. And stop iterating over lists // and dicts. did_echo_string_emsg = TRUE; emsg(_(e_variable_nested_too_deep_for_displaying)); } *tofree = NULL; return (char_u *)"{E724}"; } ++recurse; switch (tv->v_type) { case VAR_STRING: if (echo_style && !composite_val) { *tofree = NULL; r = tv->vval.v_string; if (r == NULL) r = (char_u *)""; } else { *tofree = string_quote(tv->vval.v_string, FALSE); r = *tofree; } break; case VAR_FUNC: { char_u buf[MAX_FUNC_NAME_LEN]; if (echo_style) { r = tv->vval.v_string == NULL ? (char_u *)"function()" : make_ufunc_name_readable(tv->vval.v_string, buf, MAX_FUNC_NAME_LEN); if (r == buf) { r = vim_strsave(buf); *tofree = r; } else *tofree = NULL; } else { *tofree = string_quote(tv->vval.v_string == NULL ? NULL : make_ufunc_name_readable( tv->vval.v_string, buf, MAX_FUNC_NAME_LEN), TRUE); r = *tofree; } } break; case VAR_PARTIAL: { partial_T *pt = tv->vval.v_partial; char_u *fname = string_quote(pt == NULL ? NULL : partial_name(pt), FALSE); garray_T ga; int i; char_u *tf; ga_init2(&ga, 1, 100); ga_concat(&ga, (char_u *)"function("); if (fname != NULL) { // When using uf_name prepend "g:" for a global function. if (pt != NULL && pt->pt_name == NULL && fname[0] == '\'' && vim_isupper(fname[1])) { ga_concat(&ga, (char_u *)"'g:"); ga_concat(&ga, fname + 1); } else ga_concat(&ga, fname); vim_free(fname); } if (pt != NULL && pt->pt_argc > 0) { ga_concat(&ga, (char_u *)", ["); for (i = 0; i < pt->pt_argc; ++i) { if (i > 0) ga_concat(&ga, (char_u *)", "); ga_concat(&ga, tv2string(&pt->pt_argv[i], &tf, numbuf, copyID)); vim_free(tf); } ga_concat(&ga, (char_u *)"]"); } if (pt != NULL && pt->pt_dict != NULL) { typval_T dtv; ga_concat(&ga, (char_u *)", "); dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; ga_concat(&ga, tv2string(&dtv, &tf, numbuf, copyID)); vim_free(tf); } // terminate with ')' and a NUL ga_concat_len(&ga, (char_u *)")", 2); *tofree = ga.ga_data; r = *tofree; break; } case VAR_BLOB: r = blob2string(tv->vval.v_blob, tofree, numbuf); break; case VAR_LIST: if (tv->vval.v_list == NULL) { // NULL list is equivalent to empty list. *tofree = NULL; r = (char_u *)"[]"; } else if (copyID != 0 && tv->vval.v_list->lv_copyID == copyID && tv->vval.v_list->lv_len > 0) { *tofree = NULL; r = (char_u *)"[...]"; } else { int old_copyID = tv->vval.v_list->lv_copyID; tv->vval.v_list->lv_copyID = copyID; *tofree = list2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_list->lv_copyID = old_copyID; r = *tofree; } break; case VAR_DICT: if (tv->vval.v_dict == NULL) { // NULL dict is equivalent to empty dict. *tofree = NULL; r = (char_u *)"{}"; } else if (copyID != 0 && tv->vval.v_dict->dv_copyID == copyID && tv->vval.v_dict->dv_hashtab.ht_used != 0) { *tofree = NULL; r = (char_u *)"{...}"; } else { int old_copyID = tv->vval.v_dict->dv_copyID; tv->vval.v_dict->dv_copyID = copyID; *tofree = dict2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_dict->dv_copyID = old_copyID; r = *tofree; } break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: *tofree = NULL; r = tv_get_string_buf(tv, numbuf); break; case VAR_JOB: case VAR_CHANNEL: #ifdef FEAT_JOB_CHANNEL *tofree = NULL; r = tv->v_type == VAR_JOB ? job_to_string_buf(tv, numbuf) : channel_to_string_buf(tv, numbuf); if (composite_val) { *tofree = string_quote(r, FALSE); r = *tofree; } #endif break; case VAR_INSTR: *tofree = NULL; r = (char_u *)"instructions"; break; case VAR_FLOAT: #ifdef FEAT_FLOAT *tofree = NULL; vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); r = numbuf; break; #endif case VAR_BOOL: case VAR_SPECIAL: *tofree = NULL; r = (char_u *)get_var_special_name(tv->vval.v_number); break; } if (--recurse == 0) did_echo_string_emsg = FALSE; return r; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * Does not put quotes around strings, as ":echo" displays values. * When "copyID" is not NULL replace recursive lists and dicts with "...". * May return NULL. */ char_u * echo_string( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID) { return echo_string_core(tv, tofree, numbuf, copyID, TRUE, FALSE, FALSE); } /* * Convert the specified byte index of line 'lnum' in buffer 'buf' to a * character index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_byteidx_to_charidx(buf_T *buf, int lnum, int byteidx) { char_u *str; char_u *t; int count; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; if (*str == NUL) return 0; // count the number of characters t = str; for (count = 0; *t != NUL && t <= str + byteidx; count++) t += mb_ptr2len(t); // In insert mode, when the cursor is at the end of a non-empty line, // byteidx points to the NUL character immediately past the end of the // string. In this case, add one to the character count. if (*t == NUL && byteidx != 0 && t == str + byteidx) count++; return count - 1; } /* * Convert the specified character index of line 'lnum' in buffer 'buf' to a * byte index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_charidx_to_byteidx(buf_T *buf, int lnum, int charidx) { char_u *str; char_u *t; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; // Convert the character offset to a byte offset t = str; while (*t != NUL && --charidx > 0) t += mb_ptr2len(t); return t - str; } /* * Translate a String variable into a position. * Returns NULL when there is an error. */ pos_T * var2fpos( typval_T *varp, int dollar_lnum, // TRUE when $ is last line int *fnum, // set to fnum for '0, 'A, etc. int charcol) // return character column { char_u *name; static pos_T pos; pos_T *pp; // Argument can be [lnum, col, coladd]. if (varp->v_type == VAR_LIST) { list_T *l; int len; int error = FALSE; listitem_T *li; l = varp->vval.v_list; if (l == NULL) return NULL; // Get the line number pos.lnum = list_find_nr(l, 0L, &error); if (error || pos.lnum <= 0 || pos.lnum > curbuf->b_ml.ml_line_count) return NULL; // invalid line number if (charcol) len = (long)mb_charlen(ml_get(pos.lnum)); else len = (long)STRLEN(ml_get(pos.lnum)); // Get the column number // We accept "$" for the column number: last column. li = list_find(l, 1L); if (li != NULL && li->li_tv.v_type == VAR_STRING && li->li_tv.vval.v_string != NULL && STRCMP(li->li_tv.vval.v_string, "$") == 0) { pos.col = len + 1; } else { pos.col = list_find_nr(l, 1L, &error); if (error) return NULL; } // Accept a position up to the NUL after the line. if (pos.col == 0 || (int)pos.col > len + 1) return NULL; // invalid column number --pos.col; // Get the virtual offset. Defaults to zero. pos.coladd = list_find_nr(l, 2L, &error); if (error) pos.coladd = 0; return &pos; } if (in_vim9script() && check_for_string_arg(varp, 0) == FAIL) return NULL; name = tv_get_string_chk(varp); if (name == NULL) return NULL; pos.lnum = 0; if (name[0] == '.' && (!in_vim9script() || name[1] == NUL)) { // cursor pos = curwin->w_cursor; } else if (name[0] == 'v' && name[1] == NUL) { // Visual start if (VIsual_active) pos = VIsual; else pos = curwin->w_cursor; } else if (name[0] == '\'' && (!in_vim9script() || (name[1] != NUL && name[2] == NUL))) { // mark pp = getmark_buf_fnum(curbuf, name[1], FALSE, fnum); if (pp == NULL || pp == (pos_T *)-1 || pp->lnum <= 0) return NULL; pos = *pp; } if (pos.lnum != 0) { if (charcol) pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col); return &pos; } pos.coladd = 0; if (name[0] == 'w' && dollar_lnum) { pos.col = 0; if (name[1] == '0') // "w0": first visible line { update_topline(); // In silent Ex mode topline is zero, but that's not a valid line // number; use one instead. pos.lnum = curwin->w_topline > 0 ? curwin->w_topline : 1; return &pos; } else if (name[1] == '$') // "w$": last visible line { validate_botline(); // In silent Ex mode botline is zero, return zero then. pos.lnum = curwin->w_botline > 0 ? curwin->w_botline - 1 : 0; return &pos; } } else if (name[0] == '$') // last column or line { if (dollar_lnum) { pos.lnum = curbuf->b_ml.ml_line_count; pos.col = 0; } else { pos.lnum = curwin->w_cursor.lnum; if (charcol) pos.col = (colnr_T)mb_charlen(ml_get_curline()); else pos.col = (colnr_T)STRLEN(ml_get_curline()); } return &pos; } if (in_vim9script()) semsg(_(e_invalid_value_for_line_number_str), name); return NULL; } /* * Convert list in "arg" into a position and optional file number. * When "fnump" is NULL there is no file number, only 3 items. * Note that the column is passed on as-is, the caller may want to decrement * it to use 1 for the first column. * Return FAIL when conversion is not possible, doesn't check the position for * validity. */ int list2fpos( typval_T *arg, pos_T *posp, int *fnump, colnr_T *curswantp, int charcol) { list_T *l = arg->vval.v_list; long i = 0; long n; // List must be: [fnum, lnum, col, coladd, curswant], where "fnum" is only // there when "fnump" isn't NULL; "coladd" and "curswant" are optional. if (arg->v_type != VAR_LIST || l == NULL || l->lv_len < (fnump == NULL ? 2 : 3) || l->lv_len > (fnump == NULL ? 4 : 5)) return FAIL; if (fnump != NULL) { n = list_find_nr(l, i++, NULL); // fnum if (n < 0) return FAIL; if (n == 0) n = curbuf->b_fnum; // current buffer *fnump = n; } n = list_find_nr(l, i++, NULL); // lnum if (n < 0) return FAIL; posp->lnum = n; n = list_find_nr(l, i++, NULL); // col if (n < 0) return FAIL; // If character position is specified, then convert to byte position if (charcol) { buf_T *buf; // Get the text for the specified line in a loaded buffer buf = buflist_findnr(fnump == NULL ? curbuf->b_fnum : *fnump); if (buf == NULL || buf->b_ml.ml_mfp == NULL) return FAIL; n = buf_charidx_to_byteidx(buf, posp->lnum, n) + 1; } posp->col = n; n = list_find_nr(l, i, NULL); // off if (n < 0) posp->coladd = 0; else posp->coladd = n; if (curswantp != NULL) *curswantp = list_find_nr(l, i + 1, NULL); // curswant return OK; } /* * Get the length of an environment variable name. * Advance "arg" to the first character after the name. * Return 0 for error. */ int get_env_len(char_u **arg) { char_u *p; int len; for (p = *arg; vim_isIDc(*p); ++p) ; if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a function or internal variable. * "arg" is advanced to after the name. * Return 0 if something is wrong. */ int get_id_len(char_u **arg) { char_u *p; int len; // Find the end of the name. for (p = *arg; eval_isnamec(*p); ++p) { if (*p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. len = (int)(p - *arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, **arg) == NULL) || len > 1) break; } } if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a variable or function. * Only the name is recognized, does not handle ".key" or "[idx]". * "arg" is advanced to the first non-white character after the name. * Return -1 if curly braces expansion failed. * Return 0 if something else is wrong. * If the name contains 'magic' {}'s, expand them and return the * expanded name in an allocated string via 'alias' - caller must free. */ int get_name_len( char_u **arg, char_u **alias, int evaluate, int verbose) { int len; char_u *p; char_u *expr_start; char_u *expr_end; *alias = NULL; // default to no alias if ((*arg)[0] == K_SPECIAL && (*arg)[1] == KS_EXTRA && (*arg)[2] == (int)KE_SNR) { // hard coded <SNR>, already translated *arg += 3; return get_id_len(arg) + 3; } len = eval_fname_script(*arg); if (len > 0) { // literal "<SID>", "s:" or "<SNR>" *arg += len; } /* * Find the end of the name; check for {} construction. */ p = find_name_end(*arg, &expr_start, &expr_end, len > 0 ? 0 : FNE_CHECK_START); if (expr_start != NULL) { char_u *temp_string; if (!evaluate) { len += (int)(p - *arg); *arg = skipwhite(p); return len; } /* * Include any <SID> etc in the expanded string: * Thus the -len here. */ temp_string = make_expanded_name(*arg - len, expr_start, expr_end, p); if (temp_string == NULL) return -1; *alias = temp_string; *arg = skipwhite(p); return (int)STRLEN(temp_string); } len += get_id_len(arg); // Only give an error when there is something, otherwise it will be // reported at a higher level. if (len == 0 && verbose && **arg != NUL) semsg(_(e_invalid_expression_str), *arg); return len; } /* * Find the end of a variable or function name, taking care of magic braces. * If "expr_start" is not NULL then "expr_start" and "expr_end" are set to the * start and end of the first magic braces item. * "flags" can have FNE_INCL_BR and FNE_CHECK_START. * Return a pointer to just after the name. Equal to "arg" if there is no * valid name. */ char_u * find_name_end( char_u *arg, char_u **expr_start, char_u **expr_end, int flags) { int mb_nest = 0; int br_nest = 0; char_u *p; int len; int vim9script = in_vim9script(); if (expr_start != NULL) { *expr_start = NULL; *expr_end = NULL; } // Quick check for valid starting character. if ((flags & FNE_CHECK_START) && !eval_isnamec1(*arg) && (*arg != '{' || vim9script)) return arg; for (p = arg; *p != NUL && (eval_isnamec(*p) || (*p == '{' && !vim9script) || ((flags & FNE_INCL_BR) && (*p == '[' || (*p == '.' && eval_isdictc(p[1])))) || mb_nest != 0 || br_nest != 0); MB_PTR_ADV(p)) { if (*p == '\'') { // skip over 'string' to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '\''; MB_PTR_ADV(p)) ; if (*p == NUL) break; } else if (*p == '"') { // skip over "str\"ing" to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '"'; MB_PTR_ADV(p)) if (*p == '\\' && p[1] != NUL) ++p; if (*p == NUL) break; } else if (br_nest == 0 && mb_nest == 0 && *p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. But {ns}: is. len = (int)(p - arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, *arg) == NULL) || (len > 1 && p[-1] != '}')) break; } if (mb_nest == 0) { if (*p == '[') ++br_nest; else if (*p == ']') --br_nest; } if (br_nest == 0 && !vim9script) { if (*p == '{') { mb_nest++; if (expr_start != NULL && *expr_start == NULL) *expr_start = p; } else if (*p == '}') { mb_nest--; if (expr_start != NULL && mb_nest == 0 && *expr_end == NULL) *expr_end = p; } } } return p; } /* * Expands out the 'magic' {}'s in a variable/function name. * Note that this can call itself recursively, to deal with * constructs like foo{bar}{baz}{bam} * The four pointer arguments point to "foo{expre}ss{ion}bar" * "in_start" ^ * "expr_start" ^ * "expr_end" ^ * "in_end" ^ * * Returns a new allocated string, which the caller must free. * Returns NULL for failure. */ static char_u * make_expanded_name( char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end) { char_u c1; char_u *retval = NULL; char_u *temp_result; if (expr_end == NULL || in_end == NULL) return NULL; *expr_start = NUL; *expr_end = NUL; c1 = *in_end; *in_end = NUL; temp_result = eval_to_string(expr_start + 1, FALSE); if (temp_result != NULL) { retval = alloc(STRLEN(temp_result) + (expr_start - in_start) + (in_end - expr_end) + 1); if (retval != NULL) { STRCPY(retval, in_start); STRCAT(retval, temp_result); STRCAT(retval, expr_end + 1); } } vim_free(temp_result); *in_end = c1; // put char back for error messages *expr_start = '{'; *expr_end = '}'; if (retval != NULL) { temp_result = find_name_end(retval, &expr_start, &expr_end, 0); if (expr_start != NULL) { // Further expansion! temp_result = make_expanded_name(retval, expr_start, expr_end, temp_result); vim_free(retval); retval = temp_result; } } return retval; } /* * Return TRUE if character "c" can be used in a variable or function name. * Does not include '{' or '}' for magic braces. */ int eval_isnamec(int c) { return ASCII_ISALNUM(c) || c == '_' || c == ':' || c == AUTOLOAD_CHAR; } /* * Return TRUE if character "c" can be used as the first character in a * variable or function name (excluding '{' and '}'). */ int eval_isnamec1(int c) { return ASCII_ISALPHA(c) || c == '_'; } /* * Return TRUE if character "c" can be used as the first character of a * dictionary key. */ int eval_isdictc(int c) { return ASCII_ISALNUM(c) || c == '_'; } /* * Handle: * - expr[expr], expr[expr:expr] subscript * - ".name" lookup * - function call with Funcref variable: func(expr) * - method call: var->method() * * Can all be combined in any order: dict.func(expr)[idx]['func'](expr)->len() * "name_start" points to a variable before the subscript or is NULL. */ int handle_subscript( char_u **arg, char_u *name_start, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int ret = OK; dict_T *selfdict = NULL; int check_white = TRUE; int getnext; char_u *p; while (ret == OK) { // When at the end of the line and ".name" or "->{" or "->X" follows in // the next line then consume the line break. p = eval_next_non_blank(*arg, evalarg, &getnext); if (getnext && ((rettv->v_type == VAR_DICT && *p == '.' && eval_isdictc(p[1])) || (p[0] == '-' && p[1] == '>' && (p[2] == '{' || ASCII_ISALPHA(in_vim9script() ? *skipwhite(p + 2) : p[2]))))) { *arg = eval_next_line(*arg, evalarg); p = *arg; check_white = FALSE; } if (rettv->v_type == VAR_ANY) { char_u *exp_name; int cc; int idx; ufunc_T *ufunc; type_T *type; // Found script from "import {name} as name", script item name must // follow. "rettv->vval.v_number" has the script ID. if (**arg != '.') { if (verbose) semsg(_(e_expected_dot_after_name_str), name_start != NULL ? name_start: *arg); ret = FAIL; break; } ++*arg; if (IS_WHITE_OR_NUL(**arg)) { if (verbose) emsg(_(e_no_white_space_allowed_after_dot)); ret = FAIL; break; } // isolate the name exp_name = *arg; while (eval_isnamec(**arg)) ++*arg; cc = **arg; **arg = NUL; idx = find_exported(rettv->vval.v_number, exp_name, &ufunc, &type, evalarg->eval_cctx, evalarg->eval_cstack, verbose); **arg = cc; if (idx < 0 && ufunc == NULL) { ret = FAIL; break; } if (idx >= 0) { scriptitem_T *si = SCRIPT_ITEM(rettv->vval.v_number); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; copy_tv(sv->sv_tv, rettv); } else { rettv->v_type = VAR_FUNC; rettv->vval.v_string = vim_strsave(ufunc->uf_name); } continue; } if ((**arg == '(' && (!evaluate || rettv->v_type == VAR_FUNC || rettv->v_type == VAR_PARTIAL)) && (!check_white || !VIM_ISWHITE(*(*arg - 1)))) { ret = call_func_rettv(arg, evalarg, rettv, evaluate, selfdict, NULL); // Stop the expression evaluation when immediately aborting on // error, or when an interrupt occurred or an exception was thrown // but not caught. if (aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } dict_unref(selfdict); selfdict = NULL; } else if (p[0] == '-' && p[1] == '>') { if (in_vim9script()) *arg = skipwhite(p + 2); else *arg = p + 2; if (ret == OK) { if (VIM_ISWHITE(**arg)) { emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else if ((**arg == '{' && !in_vim9script()) || **arg == '(') // expr->{lambda}() or expr->(lambda)() ret = eval_lambda(arg, rettv, evalarg, verbose); else // expr->name() ret = eval_method(arg, rettv, evalarg, verbose); } } // "." is ".name" lookup when we found a dict or when evaluating and // scriptversion is at least 2, where string concatenation is "..". else if (**arg == '[' || (**arg == '.' && (rettv->v_type == VAR_DICT || (!evaluate && (*arg)[1] != '.' && !in_old_script(2))))) { dict_unref(selfdict); if (rettv->v_type == VAR_DICT) { selfdict = rettv->vval.v_dict; if (selfdict != NULL) ++selfdict->dv_refcount; } else selfdict = NULL; if (eval_index(arg, rettv, evalarg, verbose) == FAIL) { clear_tv(rettv); ret = FAIL; } } else break; } // Turn "dict.Func" into a partial for "Func" bound to "dict". // Don't do this when "Func" is already a partial that was bound // explicitly (pt_auto is FALSE). if (selfdict != NULL && (rettv->v_type == VAR_FUNC || (rettv->v_type == VAR_PARTIAL && (rettv->vval.v_partial->pt_auto || rettv->vval.v_partial->pt_dict == NULL)))) selfdict = make_partial(selfdict, rettv); dict_unref(selfdict); return ret; } /* * Make a copy of an item. * Lists and Dictionaries are also copied. A deep copy if "deep" is set. * "top" is TRUE for the toplevel of copy(). * For deepcopy() "copyID" is zero for a full copy or the ID for when a * reference to an already copied list/dict can be used. * Returns FAIL or OK. */ int item_copy( typval_T *from, typval_T *to, int deep, int top, int copyID) { static int recurse = 0; int ret = OK; if (recurse >= DICT_MAXNEST) { emsg(_(e_variable_nested_too_deep_for_making_copy)); return FAIL; } ++recurse; switch (from->v_type) { case VAR_NUMBER: case VAR_FLOAT: case VAR_STRING: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: copy_tv(from, to); break; case VAR_LIST: to->v_type = VAR_LIST; to->v_lock = 0; if (from->vval.v_list == NULL) to->vval.v_list = NULL; else if (copyID != 0 && from->vval.v_list->lv_copyID == copyID) { // use the copy made earlier to->vval.v_list = from->vval.v_list->lv_copylist; ++to->vval.v_list->lv_refcount; } else to->vval.v_list = list_copy(from->vval.v_list, deep, top, copyID); if (to->vval.v_list == NULL) ret = FAIL; break; case VAR_BLOB: ret = blob_copy(from->vval.v_blob, to); break; case VAR_DICT: to->v_type = VAR_DICT; to->v_lock = 0; if (from->vval.v_dict == NULL) to->vval.v_dict = NULL; else if (copyID != 0 && from->vval.v_dict->dv_copyID == copyID) { // use the copy made earlier to->vval.v_dict = from->vval.v_dict->dv_copydict; ++to->vval.v_dict->dv_refcount; } else to->vval.v_dict = dict_copy(from->vval.v_dict, deep, top, copyID); if (to->vval.v_dict == NULL) ret = FAIL; break; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: internal_error_no_abort("item_copy(UNKNOWN)"); ret = FAIL; } --recurse; return ret; } void echo_one(typval_T *rettv, int with_space, int *atstart, int *needclr) { char_u *tofree; char_u numbuf[NUMBUFLEN]; char_u *p = echo_string(rettv, &tofree, numbuf, get_copyID()); if (*atstart) { *atstart = FALSE; // Call msg_start() after eval1(), evaluating the expression // may cause a message to appear. if (with_space) { // Mark the saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back // at the more prompt. msg_sb_eol(); msg_start(); } } else if (with_space) msg_puts_attr(" ", echo_attr); if (p != NULL) for ( ; *p != NUL && !got_int; ++p) { if (*p == '\n' || *p == '\r' || *p == TAB) { if (*p != TAB && *needclr) { // remove any text still there from the command msg_clr_eos(); *needclr = FALSE; } msg_putchar_attr(*p, echo_attr); } else { if (has_mbyte) { int i = (*mb_ptr2len)(p); (void)msg_outtrans_len_attr(p, i, echo_attr); p += i - 1; } else (void)msg_outtrans_len_attr(p, 1, echo_attr); } } vim_free(tofree); } /* * ":echo expr1 ..." print each argument separated with a space, add a * newline at the end. * ":echon expr1 ..." print each argument plain. */ void ex_echo(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; char_u *arg_start; int needclr = TRUE; int atstart = TRUE; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap->skip); if (eap->skip) ++emsg_skip; while ((!ends_excmd2(eap->cmd, arg) || *arg == '"') && !got_int) { // If eval1() causes an error message the text from the command may // still need to be cleared. E.g., "echo 22,44". need_clr_eos = needclr; arg_start = arg; if (eval1(&arg, &rettv, &evalarg) == FAIL) { /* * Report the invalid expression unless the expression evaluation * has been cancelled due to an aborting error, an interrupt, or an * exception. */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), arg_start); need_clr_eos = FALSE; break; } need_clr_eos = FALSE; if (!eap->skip) { if (rettv.v_type == VAR_VOID) { semsg(_(e_expression_does_not_result_in_value_str), arg_start); break; } echo_one(&rettv, eap->cmdidx == CMD_echo, &atstart, &needclr); } clear_tv(&rettv); arg = skipwhite(arg); } set_nextcmd(eap, arg); clear_evalarg(&evalarg, eap); if (eap->skip) --emsg_skip; else { // remove text that may still be there from the command if (needclr) msg_clr_eos(); if (eap->cmdidx == CMD_echo) msg_end(); } } /* * ":echohl {name}". */ void ex_echohl(exarg_T *eap) { echo_attr = syn_name2attr(eap->arg); } /* * Returns the :echo attribute */ int get_echo_attr(void) { return echo_attr; } /* * ":execute expr1 ..." execute the result of an expression. * ":echomsg expr1 ..." Print a message * ":echoerr expr1 ..." Print an error * ":echoconsole expr1 ..." Print a message on stdout * Each gets spaces around each argument and a newline at the end for * echo commands */ void ex_execute(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; int ret = OK; char_u *p; garray_T ga; int len; long start_lnum = SOURCING_LNUM; ga_init2(&ga, 1, 80); if (eap->skip) ++emsg_skip; while (!ends_excmd2(eap->cmd, arg) || *arg == '"') { ret = eval1_emsg(&arg, &rettv, eap); if (ret == FAIL) break; if (!eap->skip) { char_u buf[NUMBUFLEN]; if (eap->cmdidx == CMD_execute) { if (rettv.v_type == VAR_CHANNEL || rettv.v_type == VAR_JOB) { semsg(_(e_using_invalid_value_as_string_str), vartype_name(rettv.v_type)); p = NULL; } else p = tv_get_string_buf(&rettv, buf); } else p = tv_stringify(&rettv, buf); if (p == NULL) { clear_tv(&rettv); ret = FAIL; break; } len = (int)STRLEN(p); if (ga_grow(&ga, len + 2) == FAIL) { clear_tv(&rettv); ret = FAIL; break; } if (ga.ga_len) ((char_u *)(ga.ga_data))[ga.ga_len++] = ' '; STRCPY((char_u *)(ga.ga_data) + ga.ga_len, p); ga.ga_len += len; } clear_tv(&rettv); arg = skipwhite(arg); } if (ret != FAIL && ga.ga_data != NULL) { // use the first line of continuation lines for messages SOURCING_LNUM = start_lnum; if (eap->cmdidx == CMD_echomsg || eap->cmdidx == CMD_echoerr) { // Mark the already saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back at the // more prompt. msg_sb_eol(); } if (eap->cmdidx == CMD_echomsg) { msg_attr(ga.ga_data, echo_attr); out_flush(); } else if (eap->cmdidx == CMD_echoconsole) { ui_write(ga.ga_data, (int)STRLEN(ga.ga_data), TRUE); ui_write((char_u *)"\r\n", 2, TRUE); } else if (eap->cmdidx == CMD_echoerr) { int save_did_emsg = did_emsg; // We don't want to abort following commands, restore did_emsg. emsg(ga.ga_data); if (!force_abort) did_emsg = save_did_emsg; } else if (eap->cmdidx == CMD_execute) { int save_sticky_cmdmod_flags = sticky_cmdmod_flags; // "legacy exe cmd" and "vim9cmd exe cmd" applies to "cmd". sticky_cmdmod_flags = cmdmod.cmod_flags & (CMOD_LEGACY | CMOD_VIM9CMD); do_cmdline((char_u *)ga.ga_data, eap->getline, eap->cookie, DOCMD_NOWAIT|DOCMD_VERBOSE); sticky_cmdmod_flags = save_sticky_cmdmod_flags; } } ga_clear(&ga); if (eap->skip) --emsg_skip; set_nextcmd(eap, arg); } /* * Skip over the name of an option: "&option", "&g:option" or "&l:option". * "arg" points to the "&" or '+' when called, to "option" when returning. * Returns NULL when no option name found. Otherwise pointer to the char * after the option name. */ char_u * find_option_end(char_u **arg, int *scope) { char_u *p = *arg; ++p; if (*p == 'g' && p[1] == ':') { *scope = OPT_GLOBAL; p += 2; } else if (*p == 'l' && p[1] == ':') { *scope = OPT_LOCAL; p += 2; } else *scope = 0; if (!ASCII_ISALPHA(*p)) return NULL; *arg = p; if (p[0] == 't' && p[1] == '_' && p[2] != NUL && p[3] != NUL) p += 4; // termcap option else while (ASCII_ISALPHA(*p)) ++p; return p; } /* * Display script name where an item was last set. * Should only be invoked when 'verbose' is non-zero. */ void last_set_msg(sctx_T script_ctx) { char_u *p; if (script_ctx.sc_sid != 0) { p = home_replace_save(NULL, get_scriptname(script_ctx.sc_sid)); if (p != NULL) { verbose_enter(); msg_puts(_("\n\tLast set from ")); msg_puts((char *)p); if (script_ctx.sc_lnum > 0) { msg_puts(_(line_msg)); msg_outnum((long)script_ctx.sc_lnum); } verbose_leave(); vim_free(p); } } } #endif // FEAT_EVAL /* * Perform a substitution on "str" with pattern "pat" and substitute "sub". * When "sub" is NULL "expr" is used, must be a VAR_FUNC or VAR_PARTIAL. * "flags" can be "g" to do a global substitute. * Returns an allocated string, NULL for error. */ char_u * do_string_sub( char_u *str, char_u *pat, char_u *sub, typval_T *expr, char_u *flags) { int sublen; regmatch_T regmatch; int i; int do_all; char_u *tail; char_u *end; garray_T ga; char_u *ret; char_u *save_cpo; char_u *zero_width = NULL; // Make 'cpoptions' empty, so that the 'l' flag doesn't work here save_cpo = p_cpo; p_cpo = empty_option; ga_init2(&ga, 1, 200); do_all = (flags[0] == 'g'); regmatch.rm_ic = p_ic; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { tail = str; end = str + STRLEN(str); while (vim_regexec_nl(&regmatch, str, (colnr_T)(tail - str))) { // Skip empty match except for first match. if (regmatch.startp[0] == regmatch.endp[0]) { if (zero_width == regmatch.startp[0]) { // avoid getting stuck on a match with an empty string i = mb_ptr2len(tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); ga.ga_len += i; tail += i; continue; } zero_width = regmatch.startp[0]; } /* * Get some space for a temporary buffer to do the substitution * into. It will contain: * - The text up to where the match is. * - The substituted text. * - The text after the match. */ sublen = vim_regsub(&regmatch, sub, expr, tail, 0, REGSUB_MAGIC); if (ga_grow(&ga, (int)((end - tail) + sublen - (regmatch.endp[0] - regmatch.startp[0]))) == FAIL) { ga_clear(&ga); break; } // copy the text up to where the match is i = (int)(regmatch.startp[0] - tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); // add the substituted text (void)vim_regsub(&regmatch, sub, expr, (char_u *)ga.ga_data + ga.ga_len + i, sublen, REGSUB_COPY | REGSUB_MAGIC); ga.ga_len += i + sublen - 1; tail = regmatch.endp[0]; if (*tail == NUL) break; if (!do_all) break; } if (ga.ga_data != NULL) STRCPY((char *)ga.ga_data + ga.ga_len, tail); vim_regfree(regmatch.regprog); } ret = vim_strsave(ga.ga_data == NULL ? str : (char_u *)ga.ga_data); ga_clear(&ga); if (p_cpo == empty_option) p_cpo = save_cpo; else { // Darn, evaluating {sub} expression or {expr} changed the value. // If it's still empty it was changed and restored, need to restore in // the complicated way. if (*p_cpo == NUL) set_option_value_give_err((char_u *)"cpo", 0L, save_cpo, 0); free_string_option(save_cpo); } return ret; }
init_evalarg(evalarg_T *evalarg) { CLEAR_POINTER(evalarg); ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20); }
init_evalarg(evalarg_T *evalarg) { CLEAR_POINTER(evalarg); ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20); }
{'added': [(356, '/*'), (357, ' * Initialize "evalarg" for use.'), (358, ' */'), (359, ' void'), (360, 'init_evalarg(evalarg_T *evalarg)'), (361, '{'), (362, ' CLEAR_POINTER(evalarg);'), (363, ' ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20);'), (364, '}'), (365, ''), (366, '/*'), (367, ' * If "evalarg->eval_tofree" is not NULL free it later.'), (368, ' * Caller is expected to overwrite "evalarg->eval_tofree" next.'), (369, ' */'), (370, ' static void'), (371, 'free_eval_tofree_later(evalarg_T *evalarg)'), (372, '{'), (373, ' if (evalarg->eval_tofree != NULL)'), (374, ' {'), (375, '\tif (ga_grow(&evalarg->eval_tofree_ga, 1) == OK)'), (376, '\t ((char_u **)evalarg->eval_tofree_ga.ga_data)'), (377, '\t\t[evalarg->eval_tofree_ga.ga_len++]'), (378, '\t\t= evalarg->eval_tofree;'), (379, '\telse'), (380, '\t vim_free(evalarg->eval_tofree);'), (381, ' }'), (382, '}'), (383, ''), (384, '/*'), (385, ' * After using "evalarg" filled from "eap": free the memory.'), (386, ' */'), (387, ' void'), (388, 'clear_evalarg(evalarg_T *evalarg, exarg_T *eap)'), (389, '{'), (390, ' if (evalarg != NULL)'), (391, ' {'), (392, '\tif (evalarg->eval_tofree != NULL)'), (393, '\t{'), (394, '\t if (eap != NULL)'), (395, '\t {'), (396, '\t\t// We may need to keep the original command line, e.g. for'), (397, '\t\t// ":let" it has the variable names. But we may also need the'), (398, '\t\t// new one, "nextcmd" points into it. Keep both.'), (399, '\t\tvim_free(eap->cmdline_tofree);'), (400, '\t\teap->cmdline_tofree = *eap->cmdlinep;'), (401, '\t\t*eap->cmdlinep = evalarg->eval_tofree;'), (402, '\t }'), (403, '\t else'), (404, '\t\tvim_free(evalarg->eval_tofree);'), (405, '\t evalarg->eval_tofree = NULL;'), (406, '\t}'), (407, ''), (408, '\tga_clear_strings(&evalarg->eval_tofree_ga);'), (409, '\tVIM_CLEAR(evalarg->eval_tofree_lambda);'), (410, ' }'), (411, '}'), (412, ''), (495, '\t\t// later. Also free "eval_tofree" later if needed.'), (496, '\t\tfree_eval_tofree_later(evalarg);'), (2334, '\tfree_eval_tofree_later(evalarg);')], 'deleted': [(438, '\t\t// later.'), (439, '\t\tvim_free(evalarg->eval_tofree);'), (2277, '\tvim_free(evalarg->eval_tofree);'), (2304, '/*'), (2305, ' * Initialize "evalarg" for use.'), (2306, ' */'), (2307, ' void'), (2308, 'init_evalarg(evalarg_T *evalarg)'), (2309, '{'), (2310, ' CLEAR_POINTER(evalarg);'), (2311, ' ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20);'), (2312, '}'), (2313, ''), (2314, '/*'), (2315, ' * After using "evalarg" filled from "eap": free the memory.'), (2316, ' */'), (2317, ' void'), (2318, 'clear_evalarg(evalarg_T *evalarg, exarg_T *eap)'), (2319, '{'), (2320, ' if (evalarg != NULL)'), (2321, ' {'), (2322, '\tif (evalarg->eval_tofree != NULL)'), (2323, '\t{'), (2324, '\t if (eap != NULL)'), (2325, '\t {'), (2326, '\t\t// We may need to keep the original command line, e.g. for'), (2327, '\t\t// ":let" it has the variable names. But we may also need the'), (2328, '\t\t// new one, "nextcmd" points into it. Keep both.'), (2329, '\t\tvim_free(eap->cmdline_tofree);'), (2330, '\t\teap->cmdline_tofree = *eap->cmdlinep;'), (2331, '\t\t*eap->cmdlinep = evalarg->eval_tofree;'), (2332, '\t }'), (2333, '\t else'), (2334, '\t\tvim_free(evalarg->eval_tofree);'), (2335, '\t evalarg->eval_tofree = NULL;'), (2336, '\t}'), (2337, ''), (2338, '\tga_clear_strings(&evalarg->eval_tofree_ga);'), (2339, '\tVIM_CLEAR(evalarg->eval_tofree_lambda);'), (2340, ' }'), (2341, '}'), (2342, '')]}
60
42
5,145
27,604
5
29
1
https://github.com/vim/vim
CVE-2022-2889
CWE-416
1,761
proc.c
C
mrb_proc_init_copy
/* ** proc.c - Proc class ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/class.h> #include <mruby/proc.h> #include <mruby/opcode.h> #include <mruby/data.h> #include <mruby/presym.h> #include <mruby/array.h> #include <mruby/hash.h> static const mrb_code call_iseq[] = { OP_CALL, }; static const mrb_irep call_irep = { 0, /* nlocals */ 2, /* nregs */ 0, /* clen */ MRB_ISEQ_NO_FREE | MRB_IREP_NO_FREE, /* flags */ call_iseq, /* iseq */ NULL, /* pool */ NULL, /* syms */ NULL, /* reps */ NULL, /* lv */ NULL, /* debug_info */ 1, /* ilen */ 0, /* plen */ 0, /* slen */ 1, /* rlen */ 0, /* refcnt */ }; static const struct RProc call_proc = { NULL, NULL, MRB_TT_PROC, MRB_GC_RED, MRB_FL_OBJ_IS_FROZEN | MRB_PROC_SCOPE | MRB_PROC_STRICT, { &call_irep }, NULL, { NULL } }; struct RProc* mrb_proc_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p; mrb_callinfo *ci = mrb->c->ci; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); if (ci) { struct RClass *tc = NULL; if (ci->proc) { if (ci->proc->color != MRB_GC_RED) { tc = MRB_PROC_TARGET_CLASS(ci->proc); } else { tc = mrb_vm_ci_target_class(ci); if (tc && tc->tt == MRB_TT_ICLASS) { tc = tc->c; } } } if (tc == NULL) { tc = mrb_vm_ci_target_class(ci); } p->upper = ci->proc; p->e.target_class = tc; } p->body.irep = irep; if (irep) { mrb_irep_incref(mrb, (mrb_irep*)irep); } return p; } struct REnv* mrb_env_new(mrb_state *mrb, struct mrb_context *c, mrb_callinfo *ci, int nstacks, mrb_value *stack, struct RClass *tc) { struct REnv *e; mrb_int bidx = 1; int n = ci->n; int nk = ci->nk; e = MRB_OBJ_ALLOC(mrb, MRB_TT_ENV, NULL); e->c = tc; MRB_ENV_SET_LEN(e, nstacks); bidx += (n == 15) ? 1 : n; bidx += (nk == 15) ? 1 : (2*nk); MRB_ENV_SET_BIDX(e, bidx); e->mid = ci->mid; e->stack = stack; e->cxt = c; return e; } static void closure_setup(mrb_state *mrb, struct RProc *p) { mrb_callinfo *ci = mrb->c->ci; const struct RProc *up = p->upper; struct REnv *e = NULL; if (ci && (e = mrb_vm_ci_env(ci)) != NULL) { /* do nothing, because e is assigned already */ } else if (up) { struct RClass *tc = ci->u.target_class; e = mrb_env_new(mrb, mrb->c, ci, up->body.irep->nlocals, ci->stack, tc); ci->u.env = e; if (MRB_PROC_ENV_P(up) && MRB_PROC_ENV(up)->cxt == NULL) { e->mid = MRB_PROC_ENV(up)->mid; } } if (e) { p->e.env = e; p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); } } struct RProc* mrb_closure_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p = mrb_proc_new(mrb, irep); closure_setup(mrb, p); return p; } MRB_API struct RProc* mrb_proc_new_cfunc(mrb_state *mrb, mrb_func_t func) { struct RProc *p; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); p->body.func = func; p->flags |= MRB_PROC_CFUNC_FL; p->upper = 0; p->e.target_class = 0; return p; } MRB_API struct RProc* mrb_proc_new_cfunc_with_env(mrb_state *mrb, mrb_func_t func, mrb_int argc, const mrb_value *argv) { struct RProc *p = mrb_proc_new_cfunc(mrb, func); struct REnv *e; int i; p->e.env = e = mrb_env_new(mrb, mrb->c, mrb->c->ci, 0, NULL, NULL); p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); MRB_ENV_CLOSE(e); e->stack = (mrb_value*)mrb_malloc(mrb, sizeof(mrb_value) * argc); MRB_ENV_SET_LEN(e, argc); if (argv) { for (i = 0; i < argc; ++i) { e->stack[i] = argv[i]; } } else { for (i = 0; i < argc; ++i) { SET_NIL_VALUE(e->stack[i]); } } return p; } MRB_API struct RProc* mrb_closure_new_cfunc(mrb_state *mrb, mrb_func_t func, int nlocals) { return mrb_proc_new_cfunc_with_env(mrb, func, nlocals, NULL); } MRB_API mrb_value mrb_proc_cfunc_env_get(mrb_state *mrb, mrb_int idx) { const struct RProc *p = mrb->c->ci->proc; struct REnv *e; if (!p || !MRB_PROC_CFUNC_P(p)) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from non-cfunc proc"); } e = MRB_PROC_ENV(p); if (!e) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from cfunc Proc without REnv"); } if (idx < 0 || MRB_ENV_LEN(e) <= idx) { mrb_raisef(mrb, E_INDEX_ERROR, "Env index out of range: %i (expected: 0 <= index < %i)", idx, MRB_ENV_LEN(e)); } return e->stack[idx]; } void mrb_proc_copy(struct RProc *a, struct RProc *b) { if (a->body.irep) { /* already initialized proc */ return; } a->flags = b->flags; a->body = b->body; if (!MRB_PROC_CFUNC_P(a) && a->body.irep) { mrb_irep_incref(NULL, (mrb_irep*)a->body.irep); } a->upper = b->upper; a->e.env = b->e.env; /* a->e.target_class = a->e.target_class; */ } static mrb_value mrb_proc_s_new(mrb_state *mrb, mrb_value proc_class) { mrb_value blk; mrb_value proc; struct RProc *p; /* Calling Proc.new without a block is not implemented yet */ mrb_get_args(mrb, "&!", &blk); p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb_class_ptr(proc_class)); mrb_proc_copy(p, mrb_proc_ptr(blk)); proc = mrb_obj_value(p); mrb_funcall_with_block(mrb, proc, MRB_SYM(initialize), 0, NULL, proc); if (!MRB_PROC_STRICT_P(p) && mrb->c->ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb->c->ci[-1].u.env) { p->flags |= MRB_PROC_ORPHAN; } return proc; } static mrb_value mrb_proc_init_copy(mrb_state *mrb, mrb_value self) { mrb_value proc = mrb_get_arg1(mrb); if (!mrb_proc_p(proc)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } mrb_proc_copy(mrb_proc_ptr(self), mrb_proc_ptr(proc)); return self; } /* 15.2.17.4.2 */ static mrb_value proc_arity(mrb_state *mrb, mrb_value self) { return mrb_int_value(mrb, mrb_proc_arity(mrb_proc_ptr(self))); } /* 15.3.1.2.6 */ /* 15.3.1.3.27 */ /* * call-seq: * lambda { |...| block } -> a_proc * * Equivalent to <code>Proc.new</code>, except the resulting Proc objects * check the number of parameters passed when called. */ static mrb_value proc_lambda(mrb_state *mrb, mrb_value self) { mrb_value blk; struct RProc *p; mrb_get_args(mrb, "&", &blk); if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Proc object without a block"); } if (!mrb_proc_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p)) { struct RProc *p2 = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, p->c); mrb_proc_copy(p2, p); p2->flags |= MRB_PROC_STRICT; return mrb_obj_value(p2); } return blk; } mrb_int mrb_proc_arity(const struct RProc *p) { const mrb_irep *irep; const mrb_code *pc; mrb_aspec aspec; int ma, op, ra, pa, arity; if (MRB_PROC_CFUNC_P(p)) { /* TODO cfunc aspec not implemented yet */ return -1; } irep = p->body.irep; if (!irep) { return 0; } pc = irep->iseq; /* arity is depend on OP_ENTER */ if (*pc != OP_ENTER) { return 0; } aspec = PEEK_W(pc+1); ma = MRB_ASPEC_REQ(aspec); op = MRB_ASPEC_OPT(aspec); ra = MRB_ASPEC_REST(aspec); pa = MRB_ASPEC_POST(aspec); arity = ra || (MRB_PROC_STRICT_P(p) && op) ? -(ma + pa + 1) : ma + pa; return arity; } mrb_value mrb_proc_local_variables(mrb_state *mrb, const struct RProc *proc) { const mrb_irep *irep; mrb_value vars; size_t i; if (proc == NULL || MRB_PROC_CFUNC_P(proc)) { return mrb_ary_new(mrb); } vars = mrb_hash_new(mrb); while (proc) { if (MRB_PROC_CFUNC_P(proc)) break; irep = proc->body.irep; if (irep->lv) { for (i = 0; i + 1 < irep->nlocals; ++i) { if (irep->lv[i]) { mrb_sym sym = irep->lv[i]; const char *name = mrb_sym_name(mrb, sym); switch (name[0]) { case '*': case '&': break; default: mrb_hash_set(mrb, vars, mrb_symbol_value(sym), mrb_true_value()); break; } } } } if (MRB_PROC_SCOPE_P(proc)) break; proc = proc->upper; } return mrb_hash_keys(mrb, vars); } const struct RProc * mrb_proc_get_caller(mrb_state *mrb, struct REnv **envp) { struct mrb_context *c = mrb->c; mrb_callinfo *ci = (c->ci > c->cibase) ? c->ci - 1 : c->cibase; const struct RProc *proc = ci->proc; if (!proc || MRB_PROC_CFUNC_P(proc)) { if (envp) *envp = NULL; } else { struct RClass *tc = MRB_PROC_TARGET_CLASS(proc); struct REnv *e = mrb_vm_ci_env(ci); if (e == NULL) { int nstacks = proc->body.irep->nlocals; e = mrb_env_new(mrb, c, ci, nstacks, ci->stack, tc); ci->u.env = e; } else if (tc) { e->c = tc; mrb_field_write_barrier(mrb, (struct RBasic*)e, (struct RBasic*)tc); } if (envp) *envp = e; } return proc; } #define IREP_LVAR_MERGE_DEFAULT 50 #define IREP_LVAR_MERGE_MINIMUM 8 #define IREP_LVAR_MERGE_MAXIMUM 240 #ifdef MRB_IREP_LVAR_MERGE_LIMIT # define IREP_LVAR_MERGE_LIMIT \ ((MRB_IREP_LVAR_MERGE_LIMIT) < IREP_LVAR_MERGE_MINIMUM ? IREP_LVAR_MERGE_MINIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT) > IREP_LVAR_MERGE_MAXIMUM ? IREP_LVAR_MERGE_MAXIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT)) #else # define IREP_LVAR_MERGE_LIMIT IREP_LVAR_MERGE_DEFAULT #endif void mrb_proc_merge_lvar(mrb_state *mrb, mrb_irep *irep, struct REnv *env, int num, const mrb_sym *lv, const mrb_value *stack) { mrb_assert(!(irep->flags & MRB_IREP_NO_FREE)); if ((irep->nlocals + num) > IREP_LVAR_MERGE_LIMIT) { mrb_raise(mrb, E_RUNTIME_ERROR, "too many local variables for binding (mruby limitation)"); } if (!lv) { mrb_raise(mrb, E_RUNTIME_ERROR, "unavailable local variable names"); } irep->lv = (mrb_sym*)mrb_realloc(mrb, (mrb_sym*)irep->lv, sizeof(mrb_sym) * (irep->nlocals + num)); env->stack = (mrb_value*)mrb_realloc(mrb, env->stack, sizeof(mrb_value) * (irep->nlocals + 1 /* self */ + num)); mrb_sym *destlv = (mrb_sym*)irep->lv + irep->nlocals - 1 /* self */; mrb_value *destst = env->stack + irep->nlocals; memmove(destlv, lv, sizeof(mrb_sym) * num); if (stack) { memmove(destst, stack, sizeof(mrb_value) * num); for (int i = 0; i < num; i++) { if (!mrb_immediate_p(stack[i])) { mrb_field_write_barrier(mrb, (struct RBasic*)env, (struct RBasic*)mrb_obj_ptr(stack[i])); } } } else { for (int i = num; i > 0; i--, destst++) { *destst = mrb_nil_value(); } } irep->nlocals += num; irep->nregs = irep->nlocals; MRB_ENV_SET_LEN(env, irep->nlocals); } void mrb_init_proc(mrb_state *mrb) { mrb_method_t m; mrb_define_class_method(mrb, mrb->proc_class, "new", mrb_proc_s_new, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); mrb_define_method(mrb, mrb->proc_class, "initialize_copy", mrb_proc_init_copy, MRB_ARGS_REQ(1)); mrb_define_method(mrb, mrb->proc_class, "arity", proc_arity, MRB_ARGS_NONE()); MRB_METHOD_FROM_PROC(m, &call_proc); mrb_define_method_raw(mrb, mrb->proc_class, MRB_SYM(call), m); mrb_define_method_raw(mrb, mrb->proc_class, MRB_OPSYM(aref), m); mrb_define_class_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.2.6 */ mrb_define_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.3.27 */ }
/* ** proc.c - Proc class ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/class.h> #include <mruby/proc.h> #include <mruby/opcode.h> #include <mruby/data.h> #include <mruby/presym.h> #include <mruby/array.h> #include <mruby/hash.h> static const mrb_code call_iseq[] = { OP_CALL, }; static const mrb_irep call_irep = { 0, /* nlocals */ 2, /* nregs */ 0, /* clen */ MRB_ISEQ_NO_FREE | MRB_IREP_NO_FREE, /* flags */ call_iseq, /* iseq */ NULL, /* pool */ NULL, /* syms */ NULL, /* reps */ NULL, /* lv */ NULL, /* debug_info */ 1, /* ilen */ 0, /* plen */ 0, /* slen */ 1, /* rlen */ 0, /* refcnt */ }; static const struct RProc call_proc = { NULL, NULL, MRB_TT_PROC, MRB_GC_RED, MRB_FL_OBJ_IS_FROZEN | MRB_PROC_SCOPE | MRB_PROC_STRICT, { &call_irep }, NULL, { NULL } }; struct RProc* mrb_proc_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p; mrb_callinfo *ci = mrb->c->ci; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); if (ci) { struct RClass *tc = NULL; if (ci->proc) { if (ci->proc->color != MRB_GC_RED) { tc = MRB_PROC_TARGET_CLASS(ci->proc); } else { tc = mrb_vm_ci_target_class(ci); if (tc && tc->tt == MRB_TT_ICLASS) { tc = tc->c; } } } if (tc == NULL) { tc = mrb_vm_ci_target_class(ci); } p->upper = ci->proc; p->e.target_class = tc; } p->body.irep = irep; if (irep) { mrb_irep_incref(mrb, (mrb_irep*)irep); } return p; } struct REnv* mrb_env_new(mrb_state *mrb, struct mrb_context *c, mrb_callinfo *ci, int nstacks, mrb_value *stack, struct RClass *tc) { struct REnv *e; mrb_int bidx = 1; int n = ci->n; int nk = ci->nk; e = MRB_OBJ_ALLOC(mrb, MRB_TT_ENV, NULL); e->c = tc; MRB_ENV_SET_LEN(e, nstacks); bidx += (n == 15) ? 1 : n; bidx += (nk == 15) ? 1 : (2*nk); MRB_ENV_SET_BIDX(e, bidx); e->mid = ci->mid; e->stack = stack; e->cxt = c; return e; } static void closure_setup(mrb_state *mrb, struct RProc *p) { mrb_callinfo *ci = mrb->c->ci; const struct RProc *up = p->upper; struct REnv *e = NULL; if (ci && (e = mrb_vm_ci_env(ci)) != NULL) { /* do nothing, because e is assigned already */ } else if (up) { struct RClass *tc = ci->u.target_class; e = mrb_env_new(mrb, mrb->c, ci, up->body.irep->nlocals, ci->stack, tc); ci->u.env = e; if (MRB_PROC_ENV_P(up) && MRB_PROC_ENV(up)->cxt == NULL) { e->mid = MRB_PROC_ENV(up)->mid; } } if (e) { p->e.env = e; p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); } } struct RProc* mrb_closure_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p = mrb_proc_new(mrb, irep); closure_setup(mrb, p); return p; } MRB_API struct RProc* mrb_proc_new_cfunc(mrb_state *mrb, mrb_func_t func) { struct RProc *p; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); p->body.func = func; p->flags |= MRB_PROC_CFUNC_FL; p->upper = 0; p->e.target_class = 0; return p; } MRB_API struct RProc* mrb_proc_new_cfunc_with_env(mrb_state *mrb, mrb_func_t func, mrb_int argc, const mrb_value *argv) { struct RProc *p = mrb_proc_new_cfunc(mrb, func); struct REnv *e; int i; p->e.env = e = mrb_env_new(mrb, mrb->c, mrb->c->ci, 0, NULL, NULL); p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); MRB_ENV_CLOSE(e); e->stack = (mrb_value*)mrb_malloc(mrb, sizeof(mrb_value) * argc); MRB_ENV_SET_LEN(e, argc); if (argv) { for (i = 0; i < argc; ++i) { e->stack[i] = argv[i]; } } else { for (i = 0; i < argc; ++i) { SET_NIL_VALUE(e->stack[i]); } } return p; } MRB_API struct RProc* mrb_closure_new_cfunc(mrb_state *mrb, mrb_func_t func, int nlocals) { return mrb_proc_new_cfunc_with_env(mrb, func, nlocals, NULL); } MRB_API mrb_value mrb_proc_cfunc_env_get(mrb_state *mrb, mrb_int idx) { const struct RProc *p = mrb->c->ci->proc; struct REnv *e; if (!p || !MRB_PROC_CFUNC_P(p)) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from non-cfunc proc"); } e = MRB_PROC_ENV(p); if (!e) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from cfunc Proc without REnv"); } if (idx < 0 || MRB_ENV_LEN(e) <= idx) { mrb_raisef(mrb, E_INDEX_ERROR, "Env index out of range: %i (expected: 0 <= index < %i)", idx, MRB_ENV_LEN(e)); } return e->stack[idx]; } void mrb_proc_copy(mrb_state *mrb, struct RProc *a, struct RProc *b) { if (a->body.irep) { /* already initialized proc */ return; } a->flags = b->flags; a->body = b->body; a->upper = b->upper; if (!MRB_PROC_CFUNC_P(a) && a->body.irep) { mrb_irep_incref(mrb, (mrb_irep*)a->body.irep); } a->e.env = b->e.env; /* a->e.target_class = a->e.target_class; */ } static mrb_value mrb_proc_s_new(mrb_state *mrb, mrb_value proc_class) { mrb_value blk; mrb_value proc; struct RProc *p; /* Calling Proc.new without a block is not implemented yet */ mrb_get_args(mrb, "&!", &blk); p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb_class_ptr(proc_class)); mrb_proc_copy(mrb, p, mrb_proc_ptr(blk)); proc = mrb_obj_value(p); mrb_funcall_with_block(mrb, proc, MRB_SYM(initialize), 0, NULL, proc); if (!MRB_PROC_STRICT_P(p) && mrb->c->ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb->c->ci[-1].u.env) { p->flags |= MRB_PROC_ORPHAN; } return proc; } static mrb_value mrb_proc_init_copy(mrb_state *mrb, mrb_value self) { mrb_value proc = mrb_get_arg1(mrb); if (!mrb_proc_p(proc)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } mrb_proc_copy(mrb, mrb_proc_ptr(self), mrb_proc_ptr(proc)); return self; } /* 15.2.17.4.2 */ static mrb_value proc_arity(mrb_state *mrb, mrb_value self) { return mrb_int_value(mrb, mrb_proc_arity(mrb_proc_ptr(self))); } /* 15.3.1.2.6 */ /* 15.3.1.3.27 */ /* * call-seq: * lambda { |...| block } -> a_proc * * Equivalent to <code>Proc.new</code>, except the resulting Proc objects * check the number of parameters passed when called. */ static mrb_value proc_lambda(mrb_state *mrb, mrb_value self) { mrb_value blk; struct RProc *p; mrb_get_args(mrb, "&", &blk); if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Proc object without a block"); } if (!mrb_proc_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p)) { struct RProc *p2 = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, p->c); mrb_proc_copy(mrb, p2, p); p2->flags |= MRB_PROC_STRICT; return mrb_obj_value(p2); } return blk; } mrb_int mrb_proc_arity(const struct RProc *p) { const mrb_irep *irep; const mrb_code *pc; mrb_aspec aspec; int ma, op, ra, pa, arity; if (MRB_PROC_CFUNC_P(p)) { /* TODO cfunc aspec not implemented yet */ return -1; } irep = p->body.irep; if (!irep) { return 0; } pc = irep->iseq; /* arity is depend on OP_ENTER */ if (*pc != OP_ENTER) { return 0; } aspec = PEEK_W(pc+1); ma = MRB_ASPEC_REQ(aspec); op = MRB_ASPEC_OPT(aspec); ra = MRB_ASPEC_REST(aspec); pa = MRB_ASPEC_POST(aspec); arity = ra || (MRB_PROC_STRICT_P(p) && op) ? -(ma + pa + 1) : ma + pa; return arity; } mrb_value mrb_proc_local_variables(mrb_state *mrb, const struct RProc *proc) { const mrb_irep *irep; mrb_value vars; size_t i; if (proc == NULL || MRB_PROC_CFUNC_P(proc)) { return mrb_ary_new(mrb); } vars = mrb_hash_new(mrb); while (proc) { if (MRB_PROC_CFUNC_P(proc)) break; irep = proc->body.irep; if (irep->lv) { for (i = 0; i + 1 < irep->nlocals; ++i) { if (irep->lv[i]) { mrb_sym sym = irep->lv[i]; const char *name = mrb_sym_name(mrb, sym); switch (name[0]) { case '*': case '&': break; default: mrb_hash_set(mrb, vars, mrb_symbol_value(sym), mrb_true_value()); break; } } } } if (MRB_PROC_SCOPE_P(proc)) break; proc = proc->upper; } return mrb_hash_keys(mrb, vars); } const struct RProc * mrb_proc_get_caller(mrb_state *mrb, struct REnv **envp) { struct mrb_context *c = mrb->c; mrb_callinfo *ci = (c->ci > c->cibase) ? c->ci - 1 : c->cibase; const struct RProc *proc = ci->proc; if (!proc || MRB_PROC_CFUNC_P(proc)) { if (envp) *envp = NULL; } else { struct RClass *tc = MRB_PROC_TARGET_CLASS(proc); struct REnv *e = mrb_vm_ci_env(ci); if (e == NULL) { int nstacks = proc->body.irep->nlocals; e = mrb_env_new(mrb, c, ci, nstacks, ci->stack, tc); ci->u.env = e; } else if (tc) { e->c = tc; mrb_field_write_barrier(mrb, (struct RBasic*)e, (struct RBasic*)tc); } if (envp) *envp = e; } return proc; } #define IREP_LVAR_MERGE_DEFAULT 50 #define IREP_LVAR_MERGE_MINIMUM 8 #define IREP_LVAR_MERGE_MAXIMUM 240 #ifdef MRB_IREP_LVAR_MERGE_LIMIT # define IREP_LVAR_MERGE_LIMIT \ ((MRB_IREP_LVAR_MERGE_LIMIT) < IREP_LVAR_MERGE_MINIMUM ? IREP_LVAR_MERGE_MINIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT) > IREP_LVAR_MERGE_MAXIMUM ? IREP_LVAR_MERGE_MAXIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT)) #else # define IREP_LVAR_MERGE_LIMIT IREP_LVAR_MERGE_DEFAULT #endif void mrb_proc_merge_lvar(mrb_state *mrb, mrb_irep *irep, struct REnv *env, int num, const mrb_sym *lv, const mrb_value *stack) { mrb_assert(!(irep->flags & MRB_IREP_NO_FREE)); if ((irep->nlocals + num) > IREP_LVAR_MERGE_LIMIT) { mrb_raise(mrb, E_RUNTIME_ERROR, "too many local variables for binding (mruby limitation)"); } if (!lv) { mrb_raise(mrb, E_RUNTIME_ERROR, "unavailable local variable names"); } irep->lv = (mrb_sym*)mrb_realloc(mrb, (mrb_sym*)irep->lv, sizeof(mrb_sym) * (irep->nlocals + num)); env->stack = (mrb_value*)mrb_realloc(mrb, env->stack, sizeof(mrb_value) * (irep->nlocals + 1 /* self */ + num)); mrb_sym *destlv = (mrb_sym*)irep->lv + irep->nlocals - 1 /* self */; mrb_value *destst = env->stack + irep->nlocals; memmove(destlv, lv, sizeof(mrb_sym) * num); if (stack) { memmove(destst, stack, sizeof(mrb_value) * num); for (int i = 0; i < num; i++) { if (!mrb_immediate_p(stack[i])) { mrb_field_write_barrier(mrb, (struct RBasic*)env, (struct RBasic*)mrb_obj_ptr(stack[i])); } } } else { for (int i = num; i > 0; i--, destst++) { *destst = mrb_nil_value(); } } irep->nlocals += num; irep->nregs = irep->nlocals; MRB_ENV_SET_LEN(env, irep->nlocals); } void mrb_init_proc(mrb_state *mrb) { mrb_method_t m; mrb_define_class_method(mrb, mrb->proc_class, "new", mrb_proc_s_new, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); mrb_define_method(mrb, mrb->proc_class, "initialize_copy", mrb_proc_init_copy, MRB_ARGS_REQ(1)); mrb_define_method(mrb, mrb->proc_class, "arity", proc_arity, MRB_ARGS_NONE()); MRB_METHOD_FROM_PROC(m, &call_proc); mrb_define_method_raw(mrb, mrb->proc_class, MRB_SYM(call), m); mrb_define_method_raw(mrb, mrb->proc_class, MRB_OPSYM(aref), m); mrb_define_class_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.2.6 */ mrb_define_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.3.27 */ }
mrb_proc_init_copy(mrb_state *mrb, mrb_value self) { mrb_value proc = mrb_get_arg1(mrb); if (!mrb_proc_p(proc)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } mrb_proc_copy(mrb_proc_ptr(self), mrb_proc_ptr(proc)); return self; }
mrb_proc_init_copy(mrb_state *mrb, mrb_value self) { mrb_value proc = mrb_get_arg1(mrb); if (!mrb_proc_p(proc)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } mrb_proc_copy(mrb, mrb_proc_ptr(self), mrb_proc_ptr(proc)); return self; }
{'added': [(204, 'mrb_proc_copy(mrb_state *mrb, struct RProc *a, struct RProc *b)'), (212, ' a->upper = b->upper;'), (214, ' mrb_irep_incref(mrb, (mrb_irep*)a->body.irep);'), (230, ' mrb_proc_copy(mrb, p, mrb_proc_ptr(blk));'), (248, ' mrb_proc_copy(mrb, mrb_proc_ptr(self), mrb_proc_ptr(proc));'), (284, ' mrb_proc_copy(mrb, p2, p);')], 'deleted': [(204, 'mrb_proc_copy(struct RProc *a, struct RProc *b)'), (213, ' mrb_irep_incref(NULL, (mrb_irep*)a->body.irep);'), (215, ' a->upper = b->upper;'), (230, ' mrb_proc_copy(p, mrb_proc_ptr(blk));'), (248, ' mrb_proc_copy(mrb_proc_ptr(self), mrb_proc_ptr(proc));'), (284, ' mrb_proc_copy(p2, p);')]}
6
6
364
2,483
9
54
2
https://github.com/mruby/mruby
CVE-2021-4110
CWE-476
923
vf_vignette.c
C
filter_frame
/* * Copyright (c) 2013 Clément Bœsch * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <float.h> /* DBL_MAX */ #include "libavutil/opt.h" #include "libavutil/eval.h" #include "libavutil/avassert.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" static const char *const var_names[] = { "w", // stream width "h", // stream height "n", // frame count "pts", // presentation timestamp expressed in AV_TIME_BASE units "r", // frame rate "t", // timestamp expressed in seconds "tb", // timebase NULL }; enum var_name { VAR_W, VAR_H, VAR_N, VAR_PTS, VAR_R, VAR_T, VAR_TB, VAR_NB }; typedef struct { const AVClass *class; const AVPixFmtDescriptor *desc; int backward; enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode; #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name DEF_EXPR_FIELDS(angle); DEF_EXPR_FIELDS(x0); DEF_EXPR_FIELDS(y0); double var_values[VAR_NB]; float *fmap; int fmap_linesize; double dmax; float xscale, yscale; uint32_t dither; int do_dither; AVRational aspect; AVRational scale; } VignetteContext; #define OFFSET(x) offsetof(VignetteContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption vignette_options[] = { { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS }, { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS }, { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS }, { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS }, { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" }, { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"}, { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"}, { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" }, { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" }, { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" }, { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS }, { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS }, { NULL } }; AVFILTER_DEFINE_CLASS(vignette); static av_cold int init(AVFilterContext *ctx) { VignetteContext *s = ctx->priv; #define PARSE_EXPR(name) do { \ int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \ NULL, NULL, NULL, NULL, 0, ctx); \ if (ret < 0) { \ av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \ AV_STRINGIFY(name) "'\n"); \ return ret; \ } \ } while (0) PARSE_EXPR(angle); PARSE_EXPR(x0); PARSE_EXPR(y0); return 0; } static av_cold void uninit(AVFilterContext *ctx) { VignetteContext *s = ctx->priv; av_freep(&s->fmap); av_expr_free(s->angle_pexpr); av_expr_free(s->x0_pexpr); av_expr_free(s->y0_pexpr); } static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; } static double get_natural_factor(const VignetteContext *s, int x, int y) { const int xx = (x - s->x0) * s->xscale; const int yy = (y - s->y0) * s->yscale; const double dnorm = hypot(xx, yy) / s->dmax; if (dnorm > 1) { return 0; } else { const double c = cos(s->angle * dnorm); return (c*c)*(c*c); // do not remove braces, it helps compilers } } #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame) { int x, y; float *dst = s->fmap; int dst_linesize = s->fmap_linesize; if (frame) { s->var_values[VAR_N] = inlink->frame_count; s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base); s->var_values[VAR_PTS] = TS2D(frame->pts); } else { s->var_values[VAR_N] = 0; s->var_values[VAR_T] = NAN; s->var_values[VAR_PTS] = NAN; } s->angle = av_clipf(av_expr_eval(s->angle_pexpr, s->var_values, NULL), 0, M_PI_2); s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL); s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL); if (s->backward) { for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w; x++) dst[x] = 1. / get_natural_factor(s, x, y); dst += dst_linesize; } } else { for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w; x++) dst[x] = get_natural_factor(s, x, y); dst += dst_linesize; } } } static inline double get_dither_value(VignetteContext *s) { double dv = 0; if (s->do_dither) { dv = s->dither / (double)(1LL<<32); s->dither = s->dither * 1664525 + 1013904223; } return dv; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { unsigned x, y; AVFilterContext *ctx = inlink->dst; VignetteContext *s = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *out; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); if (s->eval_mode == EVAL_MODE_FRAME) update_context(s, inlink, in); if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) { uint8_t *dst = out->data[0]; const uint8_t *src = in ->data[0]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[0]; const int src_linesize = in ->linesize[0]; const int fmap_linesize = s->fmap_linesize; for (y = 0; y < inlink->h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) { const float f = fmap[x]; dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s)); dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s)); dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s)); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize; } } else { int plane; for (plane = 0; plane < 4 && in->data[plane]; plane++) { uint8_t *dst = out->data[plane]; const uint8_t *src = in ->data[plane]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[plane]; const int src_linesize = in ->linesize[plane]; const int fmap_linesize = s->fmap_linesize; const int chroma = plane == 1 || plane == 2; const int hsub = chroma ? s->desc->log2_chroma_w : 0; const int vsub = chroma ? s->desc->log2_chroma_h : 0; const int w = FF_CEIL_RSHIFT(inlink->w, hsub); const int h = FF_CEIL_RSHIFT(inlink->h, vsub); for (y = 0; y < h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < w; x++) { const double dv = get_dither_value(s); if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv); else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize << vsub; } } } return ff_filter_frame(outlink, out); } static int config_props(AVFilterLink *inlink) { VignetteContext *s = inlink->dst->priv; AVRational sar = inlink->sample_aspect_ratio; s->desc = av_pix_fmt_desc_get(inlink->format); s->var_values[VAR_W] = inlink->w; s->var_values[VAR_H] = inlink->h; s->var_values[VAR_TB] = av_q2d(inlink->time_base); s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ? NAN : av_q2d(inlink->frame_rate); if (!sar.num || !sar.den) sar.num = sar.den = 1; if (sar.num > sar.den) { s->xscale = av_q2d(av_div_q(sar, s->aspect)); s->yscale = 1; } else { s->yscale = av_q2d(av_div_q(s->aspect, sar)); s->xscale = 1; } s->dmax = hypot(inlink->w / 2., inlink->h / 2.); av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n", s->xscale, s->yscale, s->dmax); s->fmap_linesize = FFALIGN(inlink->w, 32); s->fmap = av_malloc(s->fmap_linesize * inlink->h * sizeof(*s->fmap)); if (!s->fmap) return AVERROR(ENOMEM); if (s->eval_mode == EVAL_MODE_INIT) update_context(s, inlink, NULL); return 0; } static const AVFilterPad vignette_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_props, }, { NULL } }; static const AVFilterPad vignette_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter avfilter_vf_vignette = { .name = "vignette", .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."), .priv_size = sizeof(VignetteContext), .init = init, .uninit = uninit, .query_formats = query_formats, .inputs = vignette_inputs, .outputs = vignette_outputs, .priv_class = &vignette_class, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, };
/* * Copyright (c) 2013 Clément Bœsch * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <float.h> /* DBL_MAX */ #include "libavutil/opt.h" #include "libavutil/eval.h" #include "libavutil/avassert.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" static const char *const var_names[] = { "w", // stream width "h", // stream height "n", // frame count "pts", // presentation timestamp expressed in AV_TIME_BASE units "r", // frame rate "t", // timestamp expressed in seconds "tb", // timebase NULL }; enum var_name { VAR_W, VAR_H, VAR_N, VAR_PTS, VAR_R, VAR_T, VAR_TB, VAR_NB }; typedef struct { const AVClass *class; const AVPixFmtDescriptor *desc; int backward; enum EvalMode { EVAL_MODE_INIT, EVAL_MODE_FRAME, EVAL_MODE_NB } eval_mode; #define DEF_EXPR_FIELDS(name) AVExpr *name##_pexpr; char *name##_expr; double name DEF_EXPR_FIELDS(angle); DEF_EXPR_FIELDS(x0); DEF_EXPR_FIELDS(y0); double var_values[VAR_NB]; float *fmap; int fmap_linesize; double dmax; float xscale, yscale; uint32_t dither; int do_dither; AVRational aspect; AVRational scale; } VignetteContext; #define OFFSET(x) offsetof(VignetteContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption vignette_options[] = { { "angle", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS }, { "a", "set lens angle", OFFSET(angle_expr), AV_OPT_TYPE_STRING, {.str="PI/5"}, .flags = FLAGS }, { "x0", "set circle center position on x-axis", OFFSET(x0_expr), AV_OPT_TYPE_STRING, {.str="w/2"}, .flags = FLAGS }, { "y0", "set circle center position on y-axis", OFFSET(y0_expr), AV_OPT_TYPE_STRING, {.str="h/2"}, .flags = FLAGS }, { "mode", "set forward/backward mode", OFFSET(backward), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, FLAGS, "mode" }, { "forward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, FLAGS, "mode"}, { "backward", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, INT_MIN, INT_MAX, FLAGS, "mode"}, { "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_INIT}, 0, EVAL_MODE_NB-1, FLAGS, "eval" }, { "init", "eval expressions once during initialization", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_INIT}, .flags = FLAGS, .unit = "eval" }, { "frame", "eval expressions for each frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = FLAGS, .unit = "eval" }, { "dither", "set dithering", OFFSET(do_dither), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, FLAGS }, { "aspect", "set aspect ratio", OFFSET(aspect), AV_OPT_TYPE_RATIONAL, {.dbl = 1}, 0, DBL_MAX, .flags = FLAGS }, { NULL } }; AVFILTER_DEFINE_CLASS(vignette); static av_cold int init(AVFilterContext *ctx) { VignetteContext *s = ctx->priv; #define PARSE_EXPR(name) do { \ int ret = av_expr_parse(&s->name##_pexpr, s->name##_expr, var_names, \ NULL, NULL, NULL, NULL, 0, ctx); \ if (ret < 0) { \ av_log(ctx, AV_LOG_ERROR, "Unable to parse expression for '" \ AV_STRINGIFY(name) "'\n"); \ return ret; \ } \ } while (0) PARSE_EXPR(angle); PARSE_EXPR(x0); PARSE_EXPR(y0); return 0; } static av_cold void uninit(AVFilterContext *ctx) { VignetteContext *s = ctx->priv; av_freep(&s->fmap); av_expr_free(s->angle_pexpr); av_expr_free(s->x0_pexpr); av_expr_free(s->y0_pexpr); } static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; } static double get_natural_factor(const VignetteContext *s, int x, int y) { const int xx = (x - s->x0) * s->xscale; const int yy = (y - s->y0) * s->yscale; const double dnorm = hypot(xx, yy) / s->dmax; if (dnorm > 1) { return 0; } else { const double c = cos(s->angle * dnorm); return (c*c)*(c*c); // do not remove braces, it helps compilers } } #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb)) static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame) { int x, y; float *dst = s->fmap; int dst_linesize = s->fmap_linesize; if (frame) { s->var_values[VAR_N] = inlink->frame_count; s->var_values[VAR_T] = TS2T(frame->pts, inlink->time_base); s->var_values[VAR_PTS] = TS2D(frame->pts); } else { s->var_values[VAR_N] = 0; s->var_values[VAR_T] = NAN; s->var_values[VAR_PTS] = NAN; } s->angle = av_clipf(av_expr_eval(s->angle_pexpr, s->var_values, NULL), 0, M_PI_2); s->x0 = av_expr_eval(s->x0_pexpr, s->var_values, NULL); s->y0 = av_expr_eval(s->y0_pexpr, s->var_values, NULL); if (s->backward) { for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w; x++) dst[x] = 1. / get_natural_factor(s, x, y); dst += dst_linesize; } } else { for (y = 0; y < inlink->h; y++) { for (x = 0; x < inlink->w; x++) dst[x] = get_natural_factor(s, x, y); dst += dst_linesize; } } } static inline double get_dither_value(VignetteContext *s) { double dv = 0; if (s->do_dither) { dv = s->dither / (double)(1LL<<32); s->dither = s->dither * 1664525 + 1013904223; } return dv; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { unsigned x, y; AVFilterContext *ctx = inlink->dst; VignetteContext *s = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *out; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); if (s->eval_mode == EVAL_MODE_FRAME) update_context(s, inlink, in); if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) { uint8_t *dst = out->data[0]; const uint8_t *src = in ->data[0]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[0]; const int src_linesize = in ->linesize[0]; const int fmap_linesize = s->fmap_linesize; for (y = 0; y < inlink->h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) { const float f = fmap[x]; dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s)); dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s)); dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s)); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize; } } else { int plane; for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { uint8_t *dst = out->data[plane]; const uint8_t *src = in ->data[plane]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[plane]; const int src_linesize = in ->linesize[plane]; const int fmap_linesize = s->fmap_linesize; const int chroma = plane == 1 || plane == 2; const int hsub = chroma ? s->desc->log2_chroma_w : 0; const int vsub = chroma ? s->desc->log2_chroma_h : 0; const int w = FF_CEIL_RSHIFT(inlink->w, hsub); const int h = FF_CEIL_RSHIFT(inlink->h, vsub); for (y = 0; y < h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < w; x++) { const double dv = get_dither_value(s); if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv); else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize << vsub; } } } return ff_filter_frame(outlink, out); } static int config_props(AVFilterLink *inlink) { VignetteContext *s = inlink->dst->priv; AVRational sar = inlink->sample_aspect_ratio; s->desc = av_pix_fmt_desc_get(inlink->format); s->var_values[VAR_W] = inlink->w; s->var_values[VAR_H] = inlink->h; s->var_values[VAR_TB] = av_q2d(inlink->time_base); s->var_values[VAR_R] = inlink->frame_rate.num == 0 || inlink->frame_rate.den == 0 ? NAN : av_q2d(inlink->frame_rate); if (!sar.num || !sar.den) sar.num = sar.den = 1; if (sar.num > sar.den) { s->xscale = av_q2d(av_div_q(sar, s->aspect)); s->yscale = 1; } else { s->yscale = av_q2d(av_div_q(s->aspect, sar)); s->xscale = 1; } s->dmax = hypot(inlink->w / 2., inlink->h / 2.); av_log(s, AV_LOG_DEBUG, "xscale=%f yscale=%f dmax=%f\n", s->xscale, s->yscale, s->dmax); s->fmap_linesize = FFALIGN(inlink->w, 32); s->fmap = av_malloc(s->fmap_linesize * inlink->h * sizeof(*s->fmap)); if (!s->fmap) return AVERROR(ENOMEM); if (s->eval_mode == EVAL_MODE_INIT) update_context(s, inlink, NULL); return 0; } static const AVFilterPad vignette_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .filter_frame = filter_frame, .config_props = config_props, }, { NULL } }; static const AVFilterPad vignette_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter avfilter_vf_vignette = { .name = "vignette", .description = NULL_IF_CONFIG_SMALL("Make or reverse a vignette effect."), .priv_size = sizeof(VignetteContext), .init = init, .uninit = uninit, .query_formats = query_formats, .inputs = vignette_inputs, .outputs = vignette_outputs, .priv_class = &vignette_class, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, };
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { unsigned x, y; AVFilterContext *ctx = inlink->dst; VignetteContext *s = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *out; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); if (s->eval_mode == EVAL_MODE_FRAME) update_context(s, inlink, in); if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) { uint8_t *dst = out->data[0]; const uint8_t *src = in ->data[0]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[0]; const int src_linesize = in ->linesize[0]; const int fmap_linesize = s->fmap_linesize; for (y = 0; y < inlink->h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) { const float f = fmap[x]; dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s)); dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s)); dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s)); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize; } } else { int plane; for (plane = 0; plane < 4 && in->data[plane]; plane++) { uint8_t *dst = out->data[plane]; const uint8_t *src = in ->data[plane]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[plane]; const int src_linesize = in ->linesize[plane]; const int fmap_linesize = s->fmap_linesize; const int chroma = plane == 1 || plane == 2; const int hsub = chroma ? s->desc->log2_chroma_w : 0; const int vsub = chroma ? s->desc->log2_chroma_h : 0; const int w = FF_CEIL_RSHIFT(inlink->w, hsub); const int h = FF_CEIL_RSHIFT(inlink->h, vsub); for (y = 0; y < h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < w; x++) { const double dv = get_dither_value(s); if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv); else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize << vsub; } } } return ff_filter_frame(outlink, out); }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { unsigned x, y; AVFilterContext *ctx = inlink->dst; VignetteContext *s = ctx->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; AVFrame *out; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); if (s->eval_mode == EVAL_MODE_FRAME) update_context(s, inlink, in); if (s->desc->flags & AV_PIX_FMT_FLAG_RGB) { uint8_t *dst = out->data[0]; const uint8_t *src = in ->data[0]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[0]; const int src_linesize = in ->linesize[0]; const int fmap_linesize = s->fmap_linesize; for (y = 0; y < inlink->h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < inlink->w; x++, dstp += 3, srcp += 3) { const float f = fmap[x]; dstp[0] = av_clip_uint8(srcp[0] * f + get_dither_value(s)); dstp[1] = av_clip_uint8(srcp[1] * f + get_dither_value(s)); dstp[2] = av_clip_uint8(srcp[2] * f + get_dither_value(s)); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize; } } else { int plane; for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { uint8_t *dst = out->data[plane]; const uint8_t *src = in ->data[plane]; const float *fmap = s->fmap; const int dst_linesize = out->linesize[plane]; const int src_linesize = in ->linesize[plane]; const int fmap_linesize = s->fmap_linesize; const int chroma = plane == 1 || plane == 2; const int hsub = chroma ? s->desc->log2_chroma_w : 0; const int vsub = chroma ? s->desc->log2_chroma_h : 0; const int w = FF_CEIL_RSHIFT(inlink->w, hsub); const int h = FF_CEIL_RSHIFT(inlink->h, vsub); for (y = 0; y < h; y++) { uint8_t *dstp = dst; const uint8_t *srcp = src; for (x = 0; x < w; x++) { const double dv = get_dither_value(s); if (chroma) *dstp++ = av_clip_uint8(fmap[x << hsub] * (*srcp++ - 127) + 127 + dv); else *dstp++ = av_clip_uint8(fmap[x ] * *srcp++ + dv); } dst += dst_linesize; src += src_linesize; fmap += fmap_linesize << vsub; } } } return ff_filter_frame(outlink, out); }
{'added': [(242, ' for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {')], 'deleted': [(242, ' for (plane = 0; plane < 4 && in->data[plane]; plane++) {')]}
1
1
267
2,095
65
599
14
https://github.com/FFmpeg/FFmpeg
CVE-2013-4263
CWE-119
979
segment.c
C
build_segment_manager
/* * fs/f2fs/segment.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/prefetch.h> #include <linux/kthread.h> #include <linux/swap.h> #include <linux/timer.h> #include <linux/freezer.h> #include "f2fs.h" #include "segment.h" #include "node.h" #include "trace.h" #include <trace/events/f2fs.h> #define __reverse_ffz(x) __reverse_ffs(~(x)) static struct kmem_cache *discard_entry_slab; static struct kmem_cache *discard_cmd_slab; static struct kmem_cache *sit_entry_set_slab; static struct kmem_cache *inmem_entry_slab; static unsigned long __reverse_ulong(unsigned char *str) { unsigned long tmp = 0; int shift = 24, idx = 0; #if BITS_PER_LONG == 64 shift = 56; #endif while (shift >= 0) { tmp |= (unsigned long)str[idx++] << shift; shift -= BITS_PER_BYTE; } return tmp; } /* * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since * MSB and LSB are reversed in a byte by f2fs_set_bit. */ static inline unsigned long __reverse_ffs(unsigned long word) { int num = 0; #if BITS_PER_LONG == 64 if ((word & 0xffffffff00000000UL) == 0) num += 32; else word >>= 32; #endif if ((word & 0xffff0000) == 0) num += 16; else word >>= 16; if ((word & 0xff00) == 0) num += 8; else word >>= 8; if ((word & 0xf0) == 0) num += 4; else word >>= 4; if ((word & 0xc) == 0) num += 2; else word >>= 2; if ((word & 0x2) == 0) num += 1; return num; } /* * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because * f2fs_set_bit makes MSB and LSB reversed in a byte. * @size must be integral times of unsigned long. * Example: * MSB <--> LSB * f2fs_set_bit(0, bitmap) => 1000 0000 * f2fs_set_bit(7, bitmap) => 0000 0001 */ static unsigned long __find_rev_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = size; unsigned long tmp; if (offset >= size) return size; size -= (offset & ~(BITS_PER_LONG - 1)); offset %= BITS_PER_LONG; while (1) { if (*p == 0) goto pass; tmp = __reverse_ulong((unsigned char *)p); tmp &= ~0UL >> offset; if (size < BITS_PER_LONG) tmp &= (~0UL << (BITS_PER_LONG - size)); if (tmp) goto found; pass: if (size <= BITS_PER_LONG) break; size -= BITS_PER_LONG; offset = 0; p++; } return result; found: return result - size + __reverse_ffs(tmp); } static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = size; unsigned long tmp; if (offset >= size) return size; size -= (offset & ~(BITS_PER_LONG - 1)); offset %= BITS_PER_LONG; while (1) { if (*p == ~0UL) goto pass; tmp = __reverse_ulong((unsigned char *)p); if (offset) tmp |= ~0UL << (BITS_PER_LONG - offset); if (size < BITS_PER_LONG) tmp |= ~0UL >> size; if (tmp != ~0UL) goto found; pass: if (size <= BITS_PER_LONG) break; size -= BITS_PER_LONG; offset = 0; p++; } return result; found: return result - size + __reverse_ffz(tmp); } void register_inmem_page(struct inode *inode, struct page *page) { struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *new; f2fs_trace_pid(page); set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); SetPagePrivate(page); new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); /* add atomic page indices to the list */ new->page = page; INIT_LIST_HEAD(&new->list); /* increase reference count with clean state */ mutex_lock(&fi->inmem_lock); get_page(page); list_add_tail(&new->list, &fi->inmem_pages); inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); mutex_unlock(&fi->inmem_lock); trace_f2fs_register_inmem_page(page, INMEM); } static int __revoke_inmem_pages(struct inode *inode, struct list_head *head, bool drop, bool recover) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct inmem_pages *cur, *tmp; int err = 0; list_for_each_entry_safe(cur, tmp, head, list) { struct page *page = cur->page; if (drop) trace_f2fs_commit_inmem_page(page, INMEM_DROP); lock_page(page); if (recover) { struct dnode_of_data dn; struct node_info ni; trace_f2fs_commit_inmem_page(page, INMEM_REVOKE); set_new_dnode(&dn, inode, NULL, NULL, 0); if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) { err = -EAGAIN; goto next; } get_node_info(sbi, dn.nid, &ni); f2fs_replace_block(sbi, &dn, dn.data_blkaddr, cur->old_addr, ni.version, true, true); f2fs_put_dnode(&dn); } next: /* we don't need to invalidate this in the sccessful status */ if (drop || recover) ClearPageUptodate(page); set_page_private(page, 0); ClearPagePrivate(page); f2fs_put_page(page, 1); list_del(&cur->list); kmem_cache_free(inmem_entry_slab, cur); dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); } return err; } void drop_inmem_pages(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); mutex_lock(&fi->inmem_lock); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_FILE); stat_dec_atomic_write(inode); } void drop_inmem_page(struct inode *inode, struct page *page) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct list_head *head = &fi->inmem_pages; struct inmem_pages *cur = NULL; f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page)); mutex_lock(&fi->inmem_lock); list_for_each_entry(cur, head, list) { if (cur->page == page) break; } f2fs_bug_on(sbi, !cur || cur->page != page); list_del(&cur->list); mutex_unlock(&fi->inmem_lock); dec_page_count(sbi, F2FS_INMEM_PAGES); kmem_cache_free(inmem_entry_slab, cur); ClearPageUptodate(page); set_page_private(page, 0); ClearPagePrivate(page); f2fs_put_page(page, 0); trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE); } static int __commit_inmem_pages(struct inode *inode, struct list_head *revoke_list) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *cur, *tmp; struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, }; pgoff_t last_idx = ULONG_MAX; int err = 0; list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { struct page *page = cur->page; lock_page(page); if (page->mapping == inode->i_mapping) { trace_f2fs_commit_inmem_page(page, INMEM); set_page_dirty(page); f2fs_wait_on_page_writeback(page, DATA, true); if (clear_page_dirty_for_io(page)) { inode_dec_dirty_pages(inode); remove_dirty_inode(inode); } fio.page = page; fio.old_blkaddr = NULL_ADDR; fio.encrypted_page = NULL; fio.need_lock = LOCK_DONE; err = do_write_data_page(&fio); if (err) { unlock_page(page); break; } /* record old blkaddr for revoking */ cur->old_addr = fio.old_blkaddr; last_idx = page->index; } unlock_page(page); list_move_tail(&cur->list, revoke_list); } if (last_idx != ULONG_MAX) f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA); if (!err) __revoke_inmem_pages(inode, revoke_list, false, false); return err; } int commit_inmem_pages(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct list_head revoke_list; int err; INIT_LIST_HEAD(&revoke_list); f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); set_inode_flag(inode, FI_ATOMIC_COMMIT); mutex_lock(&fi->inmem_lock); err = __commit_inmem_pages(inode, &revoke_list); if (err) { int ret; /* * try to revoke all committed pages, but still we could fail * due to no memory or other reason, if that happened, EAGAIN * will be returned, which means in such case, transaction is * already not integrity, caller should use journal to do the * recovery or rewrite & commit last transaction. For other * error number, revoking was done by filesystem itself. */ ret = __revoke_inmem_pages(inode, &revoke_list, false, true); if (ret) err = ret; /* drop all uncommitted pages */ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); } mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_COMMIT); f2fs_unlock_op(sbi); return err; } /* * This function balances dirty node and dentry pages. * In addition, it controls garbage collection. */ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) { #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_CHECKPOINT)) { f2fs_show_injection_info(FAULT_CHECKPOINT); f2fs_stop_checkpoint(sbi, false); } #endif /* balance_fs_bg is able to be pending */ if (need && excess_cached_nats(sbi)) f2fs_balance_fs_bg(sbi); /* * We should do GC or end up with checkpoint, if there are so many dirty * dir/node pages without enough free segments. */ if (has_not_enough_free_secs(sbi, 0, 0)) { mutex_lock(&sbi->gc_mutex); f2fs_gc(sbi, false, false, NULL_SEGNO); } } void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) { /* try to shrink extent cache when there is no enough memory */ if (!available_free_memory(sbi, EXTENT_CACHE)) f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); /* check the # of cached NAT entries */ if (!available_free_memory(sbi, NAT_ENTRIES)) try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); if (!available_free_memory(sbi, FREE_NIDS)) try_to_free_nids(sbi, MAX_FREE_NIDS); else build_free_nids(sbi, false, false); if (!is_idle(sbi) && !excess_dirty_nats(sbi)) return; /* checkpoint is the only way to shrink partial cached entries */ if (!available_free_memory(sbi, NAT_ENTRIES) || !available_free_memory(sbi, INO_ENTRIES) || excess_prefree_segs(sbi) || excess_dirty_nats(sbi) || f2fs_time_over(sbi, CP_TIME)) { if (test_opt(sbi, DATA_FLUSH)) { struct blk_plug plug; blk_start_plug(&plug); sync_dirty_inodes(sbi, FILE_INODE); blk_finish_plug(&plug); } f2fs_sync_fs(sbi->sb, true); stat_inc_bg_cp_count(sbi->stat_info); } } static int __submit_flush_wait(struct f2fs_sb_info *sbi, struct block_device *bdev) { struct bio *bio = f2fs_bio_alloc(0); int ret; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; bio->bi_bdev = bdev; ret = submit_bio_wait(bio); bio_put(bio); trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), test_opt(sbi, FLUSH_MERGE), ret); return ret; } static int submit_flush_wait(struct f2fs_sb_info *sbi) { int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev); int i; if (!sbi->s_ndevs || ret) return ret; for (i = 1; i < sbi->s_ndevs; i++) { ret = __submit_flush_wait(sbi, FDEV(i).bdev); if (ret) break; } return ret; } static int issue_flush_thread(void *data) { struct f2fs_sb_info *sbi = data; struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; wait_queue_head_t *q = &fcc->flush_wait_queue; repeat: if (kthread_should_stop()) return 0; if (!llist_empty(&fcc->issue_list)) { struct flush_cmd *cmd, *next; int ret; fcc->dispatch_list = llist_del_all(&fcc->issue_list); fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); ret = submit_flush_wait(sbi); atomic_inc(&fcc->issued_flush); llist_for_each_entry_safe(cmd, next, fcc->dispatch_list, llnode) { cmd->ret = ret; complete(&cmd->wait); } fcc->dispatch_list = NULL; } wait_event_interruptible(*q, kthread_should_stop() || !llist_empty(&fcc->issue_list)); goto repeat; } int f2fs_issue_flush(struct f2fs_sb_info *sbi) { struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; struct flush_cmd cmd; int ret; if (test_opt(sbi, NOBARRIER)) return 0; if (!test_opt(sbi, FLUSH_MERGE)) { ret = submit_flush_wait(sbi); atomic_inc(&fcc->issued_flush); return ret; } if (!atomic_read(&fcc->issing_flush)) { atomic_inc(&fcc->issing_flush); ret = submit_flush_wait(sbi); atomic_dec(&fcc->issing_flush); atomic_inc(&fcc->issued_flush); return ret; } init_completion(&cmd.wait); atomic_inc(&fcc->issing_flush); llist_add(&cmd.llnode, &fcc->issue_list); if (!fcc->dispatch_list) wake_up(&fcc->flush_wait_queue); if (fcc->f2fs_issue_flush) { wait_for_completion(&cmd.wait); atomic_dec(&fcc->issing_flush); } else { llist_del_all(&fcc->issue_list); atomic_set(&fcc->issing_flush, 0); } return cmd.ret; } int create_flush_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; struct flush_cmd_control *fcc; int err = 0; if (SM_I(sbi)->fcc_info) { fcc = SM_I(sbi)->fcc_info; goto init_thread; } fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); if (!fcc) return -ENOMEM; atomic_set(&fcc->issued_flush, 0); atomic_set(&fcc->issing_flush, 0); init_waitqueue_head(&fcc->flush_wait_queue); init_llist_head(&fcc->issue_list); SM_I(sbi)->fcc_info = fcc; init_thread: fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(fcc->f2fs_issue_flush)) { err = PTR_ERR(fcc->f2fs_issue_flush); kfree(fcc); SM_I(sbi)->fcc_info = NULL; return err; } return err; } void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) { struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; if (fcc && fcc->f2fs_issue_flush) { struct task_struct *flush_thread = fcc->f2fs_issue_flush; fcc->f2fs_issue_flush = NULL; kthread_stop(flush_thread); } if (free) { kfree(fcc); SM_I(sbi)->fcc_info = NULL; } } static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); /* need not be added */ if (IS_CURSEG(sbi, segno)) return; if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]++; if (dirty_type == DIRTY) { struct seg_entry *sentry = get_seg_entry(sbi, segno); enum dirty_type t = sentry->type; if (unlikely(t >= DIRTY)) { f2fs_bug_on(sbi, 1); return; } if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]++; } } static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]--; if (dirty_type == DIRTY) { struct seg_entry *sentry = get_seg_entry(sbi, segno); enum dirty_type t = sentry->type; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]--; if (get_valid_blocks(sbi, segno, true) == 0) clear_bit(GET_SEC_FROM_SEG(sbi, segno), dirty_i->victim_secmap); } } /* * Should not occur error such as -ENOMEM. * Adding dirty entry into seglist is not critical operation. * If a given segment is one of current working segments, it won't be added. */ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned short valid_blocks; if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) return; mutex_lock(&dirty_i->seglist_lock); valid_blocks = get_valid_blocks(sbi, segno, false); if (valid_blocks == 0) { __locate_dirty_segment(sbi, segno, PRE); __remove_dirty_segment(sbi, segno, DIRTY); } else if (valid_blocks < sbi->blocks_per_seg) { __locate_dirty_segment(sbi, segno, DIRTY); } else { /* Recovery routine with SSR needs this */ __remove_dirty_segment(sbi, segno, DIRTY); } mutex_unlock(&dirty_i->seglist_lock); } static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc; f2fs_bug_on(sbi, !len); pend_list = &dcc->pend_list[plist_idx(len)]; dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS); INIT_LIST_HEAD(&dc->list); dc->bdev = bdev; dc->lstart = lstart; dc->start = start; dc->len = len; dc->ref = 0; dc->state = D_PREP; dc->error = 0; init_completion(&dc->wait); list_add_tail(&dc->list, pend_list); atomic_inc(&dcc->discard_cmd_cnt); dcc->undiscard_blks += len; return dc; } static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len, struct rb_node *parent, struct rb_node **p) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_cmd *dc; dc = __create_discard_cmd(sbi, bdev, lstart, start, len); rb_link_node(&dc->rb_node, parent, p); rb_insert_color(&dc->rb_node, &dcc->root); return dc; } static void __detach_discard_cmd(struct discard_cmd_control *dcc, struct discard_cmd *dc) { if (dc->state == D_DONE) atomic_dec(&dcc->issing_discard); list_del(&dc->list); rb_erase(&dc->rb_node, &dcc->root); dcc->undiscard_blks -= dc->len; kmem_cache_free(discard_cmd_slab, dc); atomic_dec(&dcc->discard_cmd_cnt); } static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; if (dc->error == -EOPNOTSUPP) dc->error = 0; if (dc->error) f2fs_msg(sbi->sb, KERN_INFO, "Issue discard(%u, %u, %u) failed, ret: %d", dc->lstart, dc->start, dc->len, dc->error); __detach_discard_cmd(dcc, dc); } static void f2fs_submit_discard_endio(struct bio *bio) { struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; dc->error = bio->bi_error; dc->state = D_DONE; complete_all(&dc->wait); bio_put(bio); } /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct bio *bio = NULL; if (dc->state != D_PREP) return; trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len); dc->error = __blkdev_issue_discard(dc->bdev, SECTOR_FROM_BLOCK(dc->start), SECTOR_FROM_BLOCK(dc->len), GFP_NOFS, 0, &bio); if (!dc->error) { /* should keep before submission to avoid D_DONE right away */ dc->state = D_SUBMIT; atomic_inc(&dcc->issued_discard); atomic_inc(&dcc->issing_discard); if (bio) { bio->bi_private = dc; bio->bi_end_io = f2fs_submit_discard_endio; bio->bi_opf |= REQ_SYNC; submit_bio(bio); list_move_tail(&dc->list, &dcc->wait_list); } } else { __remove_discard_cmd(sbi, dc); } } static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len, struct rb_node **insert_p, struct rb_node *insert_parent) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct rb_node **p = &dcc->root.rb_node; struct rb_node *parent = NULL; struct discard_cmd *dc = NULL; if (insert_p && insert_parent) { parent = insert_parent; p = insert_p; goto do_insert; } p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart); do_insert: dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p); if (!dc) return NULL; return dc; } static void __relocate_discard_cmd(struct discard_cmd_control *dcc, struct discard_cmd *dc) { list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]); } static void __punch_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc, block_t blkaddr) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_info di = dc->di; bool modified = false; if (dc->state == D_DONE || dc->len == 1) { __remove_discard_cmd(sbi, dc); return; } dcc->undiscard_blks -= di.len; if (blkaddr > di.lstart) { dc->len = blkaddr - dc->lstart; dcc->undiscard_blks += dc->len; __relocate_discard_cmd(dcc, dc); modified = true; } if (blkaddr < di.lstart + di.len - 1) { if (modified) { __insert_discard_tree(sbi, dc->bdev, blkaddr + 1, di.start + blkaddr + 1 - di.lstart, di.lstart + di.len - 1 - blkaddr, NULL, NULL); } else { dc->lstart++; dc->len--; dc->start++; dcc->undiscard_blks += dc->len; __relocate_discard_cmd(dcc, dc); } } } static void __update_discard_tree_range(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_cmd *prev_dc = NULL, *next_dc = NULL; struct discard_cmd *dc; struct discard_info di = {0}; struct rb_node **insert_p = NULL, *insert_parent = NULL; block_t end = lstart + len; mutex_lock(&dcc->cmd_lock); dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, NULL, lstart, (struct rb_entry **)&prev_dc, (struct rb_entry **)&next_dc, &insert_p, &insert_parent, true); if (dc) prev_dc = dc; if (!prev_dc) { di.lstart = lstart; di.len = next_dc ? next_dc->lstart - lstart : len; di.len = min(di.len, len); di.start = start; } while (1) { struct rb_node *node; bool merged = false; struct discard_cmd *tdc = NULL; if (prev_dc) { di.lstart = prev_dc->lstart + prev_dc->len; if (di.lstart < lstart) di.lstart = lstart; if (di.lstart >= end) break; if (!next_dc || next_dc->lstart > end) di.len = end - di.lstart; else di.len = next_dc->lstart - di.lstart; di.start = start + di.lstart - lstart; } if (!di.len) goto next; if (prev_dc && prev_dc->state == D_PREP && prev_dc->bdev == bdev && __is_discard_back_mergeable(&di, &prev_dc->di)) { prev_dc->di.len += di.len; dcc->undiscard_blks += di.len; __relocate_discard_cmd(dcc, prev_dc); di = prev_dc->di; tdc = prev_dc; merged = true; } if (next_dc && next_dc->state == D_PREP && next_dc->bdev == bdev && __is_discard_front_mergeable(&di, &next_dc->di)) { next_dc->di.lstart = di.lstart; next_dc->di.len += di.len; next_dc->di.start = di.start; dcc->undiscard_blks += di.len; __relocate_discard_cmd(dcc, next_dc); if (tdc) __remove_discard_cmd(sbi, tdc); merged = true; } if (!merged) { __insert_discard_tree(sbi, bdev, di.lstart, di.start, di.len, NULL, NULL); } next: prev_dc = next_dc; if (!prev_dc) break; node = rb_next(&prev_dc->rb_node); next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); } mutex_unlock(&dcc->cmd_lock); } static int __queue_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { block_t lblkstart = blkstart; trace_f2fs_queue_discard(bdev, blkstart, blklen); if (sbi->s_ndevs) { int devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; } __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); return 0; } static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc, *tmp; struct blk_plug plug; int i, iter = 0; mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); blk_start_plug(&plug); for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { pend_list = &dcc->pend_list[i]; list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); if (!issue_cond || is_idle(sbi)) __submit_discard_cmd(sbi, dc); if (issue_cond && iter++ > DISCARD_ISSUE_RATE) goto out; } } out: blk_finish_plug(&plug); mutex_unlock(&dcc->cmd_lock); } static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *wait_list = &(dcc->wait_list); struct discard_cmd *dc, *tmp; bool need_wait; next: need_wait = false; mutex_lock(&dcc->cmd_lock); list_for_each_entry_safe(dc, tmp, wait_list, list) { if (!wait_cond || (dc->state == D_DONE && !dc->ref)) { wait_for_completion_io(&dc->wait); __remove_discard_cmd(sbi, dc); } else { dc->ref++; need_wait = true; break; } } mutex_unlock(&dcc->cmd_lock); if (need_wait) { wait_for_completion_io(&dc->wait); mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, dc->state != D_DONE); dc->ref--; if (!dc->ref) __remove_discard_cmd(sbi, dc); mutex_unlock(&dcc->cmd_lock); goto next; } } /* This should be covered by global mutex, &sit_i->sentry_lock */ void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_cmd *dc; bool need_wait = false; mutex_lock(&dcc->cmd_lock); dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr); if (dc) { if (dc->state == D_PREP) { __punch_discard_cmd(sbi, dc, blkaddr); } else { dc->ref++; need_wait = true; } } mutex_unlock(&dcc->cmd_lock); if (need_wait) { wait_for_completion_io(&dc->wait); mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, dc->state != D_DONE); dc->ref--; if (!dc->ref) __remove_discard_cmd(sbi, dc); mutex_unlock(&dcc->cmd_lock); } } /* This comes from f2fs_put_super */ void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) { __issue_discard_cmd(sbi, false); __wait_discard_cmd(sbi, false); } static int issue_discard_thread(void *data) { struct f2fs_sb_info *sbi = data; struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; wait_queue_head_t *q = &dcc->discard_wait_queue; set_freezable(); do { wait_event_interruptible(*q, kthread_should_stop() || freezing(current) || atomic_read(&dcc->discard_cmd_cnt)); if (try_to_freeze()) continue; if (kthread_should_stop()) return 0; __issue_discard_cmd(sbi, true); __wait_discard_cmd(sbi, true); congestion_wait(BLK_RW_SYNC, HZ/50); } while (!kthread_should_stop()); return 0; } #ifdef CONFIG_BLK_DEV_ZONED static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { sector_t sector, nr_sects; block_t lblkstart = blkstart; int devi = 0; if (sbi->s_ndevs) { devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; } /* * We need to know the type of the zone: for conventional zones, * use regular discard if the drive supports it. For sequential * zones, reset the zone write pointer. */ switch (get_blkz_type(sbi, bdev, blkstart)) { case BLK_ZONE_TYPE_CONVENTIONAL: if (!blk_queue_discard(bdev_get_queue(bdev))) return 0; return __queue_discard_cmd(sbi, bdev, lblkstart, blklen); case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: sector = SECTOR_FROM_BLOCK(blkstart); nr_sects = SECTOR_FROM_BLOCK(blklen); if (sector & (bdev_zone_sectors(bdev) - 1) || nr_sects != bdev_zone_sectors(bdev)) { f2fs_msg(sbi->sb, KERN_INFO, "(%d) %s: Unaligned discard attempted (block %x + %x)", devi, sbi->s_ndevs ? FDEV(devi).path: "", blkstart, blklen); return -EIO; } trace_f2fs_issue_reset_zone(bdev, blkstart); return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS); default: /* Unknown zone type: broken device ? */ return -EIO; } } #endif static int __issue_discard_async(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { #ifdef CONFIG_BLK_DEV_ZONED if (f2fs_sb_mounted_blkzoned(sbi->sb) && bdev_zoned_model(bdev) != BLK_ZONED_NONE) return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); #endif return __queue_discard_cmd(sbi, bdev, blkstart, blklen); } static int f2fs_issue_discard(struct f2fs_sb_info *sbi, block_t blkstart, block_t blklen) { sector_t start = blkstart, len = 0; struct block_device *bdev; struct seg_entry *se; unsigned int offset; block_t i; int err = 0; bdev = f2fs_target_device(sbi, blkstart, NULL); for (i = blkstart; i < blkstart + blklen; i++, len++) { if (i != start) { struct block_device *bdev2 = f2fs_target_device(sbi, i, NULL); if (bdev2 != bdev) { err = __issue_discard_async(sbi, bdev, start, len); if (err) return err; bdev = bdev2; start = i; len = 0; } } se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); offset = GET_BLKOFF_FROM_SEG0(sbi, i); if (!f2fs_test_and_set_bit(offset, se->discard_map)) sbi->discard_blks--; } if (len) err = __issue_discard_async(sbi, bdev, start, len); return err; } static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, bool check_only) { int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); int max_blocks = sbi->blocks_per_seg; struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); unsigned long *cur_map = (unsigned long *)se->cur_valid_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *discard_map = (unsigned long *)se->discard_map; unsigned long *dmap = SIT_I(sbi)->tmp_map; unsigned int start = 0, end = -1; bool force = (cpc->reason & CP_DISCARD); struct discard_entry *de = NULL; struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; int i; if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi)) return false; if (!force) { if (!test_opt(sbi, DISCARD) || !se->valid_blocks || SM_I(sbi)->dcc_info->nr_discards >= SM_I(sbi)->dcc_info->max_discards) return false; } /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ for (i = 0; i < entries; i++) dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; while (force || SM_I(sbi)->dcc_info->nr_discards <= SM_I(sbi)->dcc_info->max_discards) { start = __find_rev_next_bit(dmap, max_blocks, end + 1); if (start >= max_blocks) break; end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); if (force && start && end != max_blocks && (end - start) < cpc->trim_minlen) continue; if (check_only) return true; if (!de) { de = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_F2FS_ZERO); de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); list_add_tail(&de->list, head); } for (i = start; i < end; i++) __set_bit_le(i, (void *)de->discard_map); SM_I(sbi)->dcc_info->nr_discards += end - start; } return false; } void release_discard_addrs(struct f2fs_sb_info *sbi) { struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); struct discard_entry *entry, *this; /* drop caches */ list_for_each_entry_safe(entry, this, head, list) { list_del(&entry->list); kmem_cache_free(discard_entry_slab, entry); } } /* * Should call clear_prefree_segments after checkpoint is done. */ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int segno; mutex_lock(&dirty_i->seglist_lock); for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) __set_test_and_free(sbi, segno); mutex_unlock(&dirty_i->seglist_lock); } void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); struct discard_entry *entry, *this; struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; unsigned int start = 0, end = -1; unsigned int secno, start_segno; bool force = (cpc->reason & CP_DISCARD); mutex_lock(&dirty_i->seglist_lock); while (1) { int i; start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); if (start >= MAIN_SEGS(sbi)) break; end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), start + 1); for (i = start; i < end; i++) clear_bit(i, prefree_map); dirty_i->nr_dirty[PRE] -= end - start; if (!test_opt(sbi, DISCARD)) continue; if (force && start >= cpc->trim_start && (end - 1) <= cpc->trim_end) continue; if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) { f2fs_issue_discard(sbi, START_BLOCK(sbi, start), (end - start) << sbi->log_blocks_per_seg); continue; } next: secno = GET_SEC_FROM_SEG(sbi, start); start_segno = GET_SEG_FROM_SEC(sbi, secno); if (!IS_CURSEC(sbi, secno) && !get_valid_blocks(sbi, start, true)) f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), sbi->segs_per_sec << sbi->log_blocks_per_seg); start = start_segno + sbi->segs_per_sec; if (start < end) goto next; else end = start - 1; } mutex_unlock(&dirty_i->seglist_lock); /* send small discards */ list_for_each_entry_safe(entry, this, head, list) { unsigned int cur_pos = 0, next_pos, len, total_len = 0; bool is_valid = test_bit_le(0, entry->discard_map); find_next: if (is_valid) { next_pos = find_next_zero_bit_le(entry->discard_map, sbi->blocks_per_seg, cur_pos); len = next_pos - cur_pos; if (f2fs_sb_mounted_blkzoned(sbi->sb) || (force && len < cpc->trim_minlen)) goto skip; f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, len); cpc->trimmed += len; total_len += len; } else { next_pos = find_next_bit_le(entry->discard_map, sbi->blocks_per_seg, cur_pos); } skip: cur_pos = next_pos; is_valid = !is_valid; if (cur_pos < sbi->blocks_per_seg) goto find_next; list_del(&entry->list); SM_I(sbi)->dcc_info->nr_discards -= total_len; kmem_cache_free(discard_entry_slab, entry); } wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue); } static int create_discard_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; struct discard_cmd_control *dcc; int err = 0, i; if (SM_I(sbi)->dcc_info) { dcc = SM_I(sbi)->dcc_info; goto init_thread; } dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL); if (!dcc) return -ENOMEM; INIT_LIST_HEAD(&dcc->entry_list); for (i = 0; i < MAX_PLIST_NUM; i++) INIT_LIST_HEAD(&dcc->pend_list[i]); INIT_LIST_HEAD(&dcc->wait_list); mutex_init(&dcc->cmd_lock); atomic_set(&dcc->issued_discard, 0); atomic_set(&dcc->issing_discard, 0); atomic_set(&dcc->discard_cmd_cnt, 0); dcc->nr_discards = 0; dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; dcc->undiscard_blks = 0; dcc->root = RB_ROOT; init_waitqueue_head(&dcc->discard_wait_queue); SM_I(sbi)->dcc_info = dcc; init_thread: dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(dcc->f2fs_issue_discard)) { err = PTR_ERR(dcc->f2fs_issue_discard); kfree(dcc); SM_I(sbi)->dcc_info = NULL; return err; } return err; } static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; if (!dcc) return; if (dcc->f2fs_issue_discard) { struct task_struct *discard_thread = dcc->f2fs_issue_discard; dcc->f2fs_issue_discard = NULL; kthread_stop(discard_thread); } kfree(dcc); SM_I(sbi)->dcc_info = NULL; } static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) { struct sit_info *sit_i = SIT_I(sbi); if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { sit_i->dirty_sentries++; return false; } return true; } static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, unsigned int segno, int modified) { struct seg_entry *se = get_seg_entry(sbi, segno); se->type = type; if (modified) __mark_sit_entry_dirty(sbi, segno); } static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) { struct seg_entry *se; unsigned int segno, offset; long int new_vblocks; segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); new_vblocks = se->valid_blocks + del; offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || (new_vblocks > sbi->blocks_per_seg))); se->valid_blocks = new_vblocks; se->mtime = get_mtime(sbi); SIT_I(sbi)->max_mtime = se->mtime; /* Update valid block bitmap */ if (del > 0) { if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) { #ifdef CONFIG_F2FS_CHECK_FS if (f2fs_test_and_set_bit(offset, se->cur_valid_map_mir)) f2fs_bug_on(sbi, 1); else WARN_ON(1); #else f2fs_bug_on(sbi, 1); #endif } if (f2fs_discard_en(sbi) && !f2fs_test_and_set_bit(offset, se->discard_map)) sbi->discard_blks--; /* don't overwrite by SSR to keep node chain */ if (se->type == CURSEG_WARM_NODE) { if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks++; } } else { if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { #ifdef CONFIG_F2FS_CHECK_FS if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map_mir)) f2fs_bug_on(sbi, 1); else WARN_ON(1); #else f2fs_bug_on(sbi, 1); #endif } if (f2fs_discard_en(sbi) && f2fs_test_and_clear_bit(offset, se->discard_map)) sbi->discard_blks++; } if (!f2fs_test_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks += del; __mark_sit_entry_dirty(sbi, segno); /* update total number of valid blocks to be written in ckpt area */ SIT_I(sbi)->written_valid_blocks += del; if (sbi->segs_per_sec > 1) get_sec_entry(sbi, segno)->valid_blocks += del; } void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) { update_sit_entry(sbi, new, 1); if (GET_SEGNO(sbi, old) != NULL_SEGNO) update_sit_entry(sbi, old, -1); locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); } void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) { unsigned int segno = GET_SEGNO(sbi, addr); struct sit_info *sit_i = SIT_I(sbi); f2fs_bug_on(sbi, addr == NULL_ADDR); if (addr == NEW_ADDR) return; /* add it into sit main buffer */ mutex_lock(&sit_i->sentry_lock); update_sit_entry(sbi, addr, -1); /* add it into dirty seglist */ locate_dirty_segment(sbi, segno); mutex_unlock(&sit_i->sentry_lock); } bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) { struct sit_info *sit_i = SIT_I(sbi); unsigned int segno, offset; struct seg_entry *se; bool is_cp = false; if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) return true; mutex_lock(&sit_i->sentry_lock); segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); if (f2fs_test_bit(offset, se->ckpt_valid_map)) is_cp = true; mutex_unlock(&sit_i->sentry_lock); return is_cp; } /* * This function should be resided under the curseg_mutex lock */ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, struct f2fs_summary *sum) { struct curseg_info *curseg = CURSEG_I(sbi, type); void *addr = curseg->sum_blk; addr += curseg->next_blkoff * sizeof(struct f2fs_summary); memcpy(addr, sum, sizeof(struct f2fs_summary)); } /* * Calculate the number of current summary pages for writing */ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) { int valid_sum_count = 0; int i, sum_in_page; for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { if (sbi->ckpt->alloc_type[i] == SSR) valid_sum_count += sbi->blocks_per_seg; else { if (for_ra) valid_sum_count += le16_to_cpu( F2FS_CKPT(sbi)->cur_data_blkoff[i]); else valid_sum_count += curseg_blkoff(sbi, i); } } sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE; if (valid_sum_count <= sum_in_page) return 1; else if ((valid_sum_count - sum_in_page) <= (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) return 2; return 3; } /* * Caller should put this summary page */ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) { return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); } void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { struct page *page = grab_meta_page(sbi, blk_addr); void *dst = page_address(page); if (src) memcpy(dst, src, PAGE_SIZE); else memset(dst, 0, PAGE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } static void write_sum_page(struct f2fs_sb_info *sbi, struct f2fs_summary_block *sum_blk, block_t blk_addr) { update_meta_page(sbi, (void *)sum_blk, blk_addr); } static void write_current_sum_page(struct f2fs_sb_info *sbi, int type, block_t blk_addr) { struct curseg_info *curseg = CURSEG_I(sbi, type); struct page *page = grab_meta_page(sbi, blk_addr); struct f2fs_summary_block *src = curseg->sum_blk; struct f2fs_summary_block *dst; dst = (struct f2fs_summary_block *)page_address(page); mutex_lock(&curseg->curseg_mutex); down_read(&curseg->journal_rwsem); memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); up_read(&curseg->journal_rwsem); memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); mutex_unlock(&curseg->curseg_mutex); set_page_dirty(page); f2fs_put_page(page, 1); } static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno = curseg->segno + 1; struct free_segmap_info *free_i = FREE_I(sbi); if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) return !test_bit(segno, free_i->free_segmap); return 0; } /* * Find a new segment from the free segments bitmap to right order * This function should be returned with success, otherwise BUG */ static void get_new_segment(struct f2fs_sb_info *sbi, unsigned int *newseg, bool new_sec, int dir) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno, secno, zoneno; unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); unsigned int left_start = hint; bool init = true; int go_left = 0; int i; spin_lock(&free_i->segmap_lock); if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { segno = find_next_zero_bit(free_i->free_segmap, GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) goto got_it; } find_other_zone: secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); if (secno >= MAIN_SECS(sbi)) { if (dir == ALLOC_RIGHT) { secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), 0); f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); } else { go_left = 1; left_start = hint - 1; } } if (go_left == 0) goto skip_left; while (test_bit(left_start, free_i->free_secmap)) { if (left_start > 0) { left_start--; continue; } left_start = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), 0); f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); break; } secno = left_start; skip_left: hint = secno; segno = GET_SEG_FROM_SEC(sbi, secno); zoneno = GET_ZONE_FROM_SEC(sbi, secno); /* give up on finding another zone */ if (!init) goto got_it; if (sbi->secs_per_zone == 1) goto got_it; if (zoneno == old_zoneno) goto got_it; if (dir == ALLOC_LEFT) { if (!go_left && zoneno + 1 >= total_zones) goto got_it; if (go_left && zoneno == 0) goto got_it; } for (i = 0; i < NR_CURSEG_TYPE; i++) if (CURSEG_I(sbi, i)->zone == zoneno) break; if (i < NR_CURSEG_TYPE) { /* zone is in user, try another */ if (go_left) hint = zoneno * sbi->secs_per_zone - 1; else if (zoneno + 1 >= total_zones) hint = 0; else hint = (zoneno + 1) * sbi->secs_per_zone; init = false; goto find_other_zone; } got_it: /* set it as dirty segment in free segmap */ f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); __set_inuse(sbi, segno); *newseg = segno; spin_unlock(&free_i->segmap_lock); } static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) { struct curseg_info *curseg = CURSEG_I(sbi, type); struct summary_footer *sum_footer; curseg->segno = curseg->next_segno; curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); curseg->next_blkoff = 0; curseg->next_segno = NULL_SEGNO; sum_footer = &(curseg->sum_blk->footer); memset(sum_footer, 0, sizeof(struct summary_footer)); if (IS_DATASEG(type)) SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); if (IS_NODESEG(type)) SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); __set_sit_entry_type(sbi, type, curseg->segno, modified); } static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) { /* if segs_per_sec is large than 1, we need to keep original policy. */ if (sbi->segs_per_sec != 1) return CURSEG_I(sbi, type)->segno; if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) return 0; if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) return SIT_I(sbi)->last_victim[ALLOC_NEXT]; return CURSEG_I(sbi, type)->segno; } /* * Allocate a current working segment. * This function always allocates a free segment in LFS manner. */ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno = curseg->segno; int dir = ALLOC_LEFT; write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) dir = ALLOC_RIGHT; if (test_opt(sbi, NOHEAP)) dir = ALLOC_RIGHT; segno = __get_next_segno(sbi, type); get_new_segment(sbi, &segno, new_sec, dir); curseg->next_segno = segno; reset_curseg(sbi, type, 1); curseg->alloc_type = LFS; } static void __next_free_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg, block_t start) { struct seg_entry *se = get_seg_entry(sbi, seg->segno); int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); unsigned long *target_map = SIT_I(sbi)->tmp_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *cur_map = (unsigned long *)se->cur_valid_map; int i, pos; for (i = 0; i < entries; i++) target_map[i] = ckpt_map[i] | cur_map[i]; pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); seg->next_blkoff = pos; } /* * If a segment is written by LFS manner, next block offset is just obtained * by increasing the current block offset. However, if a segment is written by * SSR manner, next block offset obtained by calling __next_free_blkoff */ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg) { if (seg->alloc_type == SSR) __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); else seg->next_blkoff++; } /* * This function always allocates a used segment(from dirty seglist) by SSR * manner, so it should recover the existing segment information of valid blocks */ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int new_segno = curseg->next_segno; struct f2fs_summary_block *sum_node; struct page *sum_page; write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); __set_test_and_inuse(sbi, new_segno); mutex_lock(&dirty_i->seglist_lock); __remove_dirty_segment(sbi, new_segno, PRE); __remove_dirty_segment(sbi, new_segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); reset_curseg(sbi, type, 1); curseg->alloc_type = SSR; __next_free_blkoff(sbi, curseg, 0); if (reuse) { sum_page = get_sum_page(sbi, new_segno); sum_node = (struct f2fs_summary_block *)page_address(sum_page); memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); f2fs_put_page(sum_page, 1); } } static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; unsigned segno = NULL_SEGNO; int i, cnt; bool reversed = false; /* need_SSR() already forces to do this */ if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { curseg->next_segno = segno; return 1; } /* For node segments, let's do SSR more intensively */ if (IS_NODESEG(type)) { if (type >= CURSEG_WARM_NODE) { reversed = true; i = CURSEG_COLD_NODE; } else { i = CURSEG_HOT_NODE; } cnt = NR_CURSEG_NODE_TYPE; } else { if (type >= CURSEG_WARM_DATA) { reversed = true; i = CURSEG_COLD_DATA; } else { i = CURSEG_HOT_DATA; } cnt = NR_CURSEG_DATA_TYPE; } for (; cnt-- > 0; reversed ? i-- : i++) { if (i == type) continue; if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { curseg->next_segno = segno; return 1; } } return 0; } /* * flush out current segment and replace it with new segment * This function should be returned with success, otherwise BUG */ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, int type, bool force) { struct curseg_info *curseg = CURSEG_I(sbi, type); if (force) new_curseg(sbi, type, true); else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && type == CURSEG_WARM_NODE) new_curseg(sbi, type, false); else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) new_curseg(sbi, type, false); else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) change_curseg(sbi, type, true); else new_curseg(sbi, type, false); stat_inc_seg_type(sbi, curseg); } void allocate_new_segments(struct f2fs_sb_info *sbi) { struct curseg_info *curseg; unsigned int old_segno; int i; for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { curseg = CURSEG_I(sbi, i); old_segno = curseg->segno; SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); locate_dirty_segment(sbi, old_segno); } } static const struct segment_allocation default_salloc_ops = { .allocate_segment = allocate_segment_by_default, }; bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) { __u64 trim_start = cpc->trim_start; bool has_candidate = false; mutex_lock(&SIT_I(sbi)->sentry_lock); for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { if (add_discard_addrs(sbi, cpc, true)) { has_candidate = true; break; } } mutex_unlock(&SIT_I(sbi)->sentry_lock); cpc->trim_start = trim_start; return has_candidate; } int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; unsigned int start_segno, end_segno; struct cp_control cpc; int err = 0; if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) return -EINVAL; cpc.trimmed = 0; if (end <= MAIN_BLKADDR(sbi)) goto out; if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { f2fs_msg(sbi->sb, KERN_WARNING, "Found FS corruption, run fsck to fix."); goto out; } /* start/end segment number in main_area */ start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); cpc.reason = CP_DISCARD; cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); /* do checkpoint to issue discard commands safely */ for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { cpc.trim_start = start_segno; if (sbi->discard_blks == 0) break; else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi)) cpc.trim_end = end_segno; else cpc.trim_end = min_t(unsigned int, rounddown(start_segno + BATCHED_TRIM_SEGMENTS(sbi), sbi->segs_per_sec) - 1, end_segno); mutex_lock(&sbi->gc_mutex); err = write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); if (err) break; schedule(); } out: range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); return err; } static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); if (curseg->next_blkoff < sbi->blocks_per_seg) return true; return false; } static int __get_segment_type_2(struct f2fs_io_info *fio) { if (fio->type == DATA) return CURSEG_HOT_DATA; else return CURSEG_HOT_NODE; } static int __get_segment_type_4(struct f2fs_io_info *fio) { if (fio->type == DATA) { struct inode *inode = fio->page->mapping->host; if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; else return CURSEG_COLD_DATA; } else { if (IS_DNODE(fio->page) && is_cold_node(fio->page)) return CURSEG_WARM_NODE; else return CURSEG_COLD_NODE; } } static int __get_segment_type_6(struct f2fs_io_info *fio) { if (fio->type == DATA) { struct inode *inode = fio->page->mapping->host; if (is_cold_data(fio->page) || file_is_cold(inode)) return CURSEG_COLD_DATA; if (is_inode_flag_set(inode, FI_HOT_DATA)) return CURSEG_HOT_DATA; return CURSEG_WARM_DATA; } else { if (IS_DNODE(fio->page)) return is_cold_node(fio->page) ? CURSEG_WARM_NODE : CURSEG_HOT_NODE; return CURSEG_COLD_NODE; } } static int __get_segment_type(struct f2fs_io_info *fio) { int type = 0; switch (fio->sbi->active_logs) { case 2: type = __get_segment_type_2(fio); break; case 4: type = __get_segment_type_4(fio); break; case 6: type = __get_segment_type_6(fio); break; default: f2fs_bug_on(fio->sbi, true); } if (IS_HOT(type)) fio->temp = HOT; else if (IS_WARM(type)) fio->temp = WARM; else fio->temp = COLD; return type; } void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, struct f2fs_io_info *fio, bool add_list) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); f2fs_wait_discard_bio(sbi, *new_blkaddr); /* * __add_sum_entry should be resided under the curseg_mutex * because, this function updates a summary entry in the * current summary block. */ __add_sum_entry(sbi, type, sum); __refresh_next_blkoff(sbi, curseg); stat_inc_block_count(sbi, curseg); if (!__has_curseg_space(sbi, type)) sit_i->s_ops->allocate_segment(sbi, type, false); /* * SIT information should be updated after segment allocation, * since we need to keep dirty segments precisely under SSR. */ refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); mutex_unlock(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); if (add_list) { struct f2fs_bio_info *io; INIT_LIST_HEAD(&fio->list); fio->in_list = true; io = sbi->write_io[fio->type] + fio->temp; spin_lock(&io->io_lock); list_add_tail(&fio->list, &io->io_list); spin_unlock(&io->io_lock); } mutex_unlock(&curseg->curseg_mutex); } static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) { int type = __get_segment_type(fio); int err; reallocate: allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, &fio->new_blkaddr, sum, type, fio, true); /* writeout dirty page into bdev */ err = f2fs_submit_page_write(fio); if (err == -EAGAIN) { fio->old_blkaddr = fio->new_blkaddr; goto reallocate; } } void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) { struct f2fs_io_info fio = { .sbi = sbi, .type = META, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, .old_blkaddr = page->index, .new_blkaddr = page->index, .page = page, .encrypted_page = NULL, .in_list = false, }; if (unlikely(page->index >= MAIN_BLKADDR(sbi))) fio.op_flags &= ~REQ_META; set_page_writeback(page); f2fs_submit_page_write(&fio); } void write_node_page(unsigned int nid, struct f2fs_io_info *fio) { struct f2fs_summary sum; set_summary(&sum, nid, 0, 0); do_write_page(&sum, fio); } void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; struct f2fs_summary sum; struct node_info ni; f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); do_write_page(&sum, fio); f2fs_update_data_blkaddr(dn, fio->new_blkaddr); } int rewrite_data_page(struct f2fs_io_info *fio) { fio->new_blkaddr = fio->old_blkaddr; stat_inc_inplace_blocks(fio->sbi); return f2fs_submit_page_bio(fio); } void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, block_t old_blkaddr, block_t new_blkaddr, bool recover_curseg, bool recover_newaddr) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg; unsigned int segno, old_cursegno; struct seg_entry *se; int type; unsigned short old_blkoff; segno = GET_SEGNO(sbi, new_blkaddr); se = get_seg_entry(sbi, segno); type = se->type; if (!recover_curseg) { /* for recovery flow */ if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { if (old_blkaddr == NULL_ADDR) type = CURSEG_COLD_DATA; else type = CURSEG_WARM_DATA; } } else { if (!IS_CURSEG(sbi, segno)) type = CURSEG_WARM_DATA; } curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); old_cursegno = curseg->segno; old_blkoff = curseg->next_blkoff; /* change the current segment */ if (segno != curseg->segno) { curseg->next_segno = segno; change_curseg(sbi, type, true); } curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); __add_sum_entry(sbi, type, sum); if (!recover_curseg || recover_newaddr) update_sit_entry(sbi, new_blkaddr, 1); if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) update_sit_entry(sbi, old_blkaddr, -1); locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); locate_dirty_segment(sbi, old_cursegno); if (recover_curseg) { if (old_cursegno != curseg->segno) { curseg->next_segno = old_cursegno; change_curseg(sbi, type, true); } curseg->next_blkoff = old_blkoff; } mutex_unlock(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); } void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, block_t old_addr, block_t new_addr, unsigned char version, bool recover_curseg, bool recover_newaddr) { struct f2fs_summary sum; set_summary(&sum, dn->nid, dn->ofs_in_node, version); __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg, recover_newaddr); f2fs_update_data_blkaddr(dn, new_addr); } void f2fs_wait_on_page_writeback(struct page *page, enum page_type type, bool ordered) { if (PageWriteback(page)) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0, page->index, type); if (ordered) wait_on_page_writeback(page); else wait_for_stable_page(page); } } void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, block_t blkaddr) { struct page *cpage; if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) return; cpage = find_lock_page(META_MAPPING(sbi), blkaddr); if (cpage) { f2fs_wait_on_page_writeback(cpage, DATA, true); f2fs_put_page(cpage, 1); } } static int read_compacted_summaries(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct curseg_info *seg_i; unsigned char *kaddr; struct page *page; block_t start; int i, j, offset; start = start_sum_block(sbi); page = get_meta_page(sbi, start++); kaddr = (unsigned char *)page_address(page); /* Step 1: restore nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); /* Step 2: restore sit cache */ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); offset = 2 * SUM_JOURNAL_SIZE; /* Step 3: restore summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blk_off; unsigned int segno; seg_i = CURSEG_I(sbi, i); segno = le32_to_cpu(ckpt->cur_data_segno[i]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); seg_i->next_segno = segno; reset_curseg(sbi, i, 0); seg_i->alloc_type = ckpt->alloc_type[i]; seg_i->next_blkoff = blk_off; if (seg_i->alloc_type == SSR) blk_off = sbi->blocks_per_seg; for (j = 0; j < blk_off; j++) { struct f2fs_summary *s; s = (struct f2fs_summary *)(kaddr + offset); seg_i->sum_blk->entries[j] = *s; offset += SUMMARY_SIZE; if (offset + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; f2fs_put_page(page, 1); page = NULL; page = get_meta_page(sbi, start++); kaddr = (unsigned char *)page_address(page); offset = 0; } } f2fs_put_page(page, 1); return 0; } static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_summary_block *sum; struct curseg_info *curseg; struct page *new; unsigned short blk_off; unsigned int segno = 0; block_t blk_addr = 0; /* get segment number and block addr */ if (IS_DATASEG(type)) { segno = le32_to_cpu(ckpt->cur_data_segno[type]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); else blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); } else { segno = le32_to_cpu(ckpt->cur_node_segno[type - CURSEG_HOT_NODE]); blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); else blk_addr = GET_SUM_BLOCK(sbi, segno); } new = get_meta_page(sbi, blk_addr); sum = (struct f2fs_summary_block *)page_address(new); if (IS_NODESEG(type)) { if (__exist_node_summaries(sbi)) { struct f2fs_summary *ns = &sum->entries[0]; int i; for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { ns->version = 0; ns->ofs_in_node = 0; } } else { int err; err = restore_node_summary(sbi, segno, sum); if (err) { f2fs_put_page(new, 1); return err; } } } /* set uncompleted segment to curseg */ curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); /* update journal info */ down_write(&curseg->journal_rwsem); memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); up_write(&curseg->journal_rwsem); memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); curseg->next_segno = segno; reset_curseg(sbi, type, 0); curseg->alloc_type = ckpt->alloc_type[type]; curseg->next_blkoff = blk_off; mutex_unlock(&curseg->curseg_mutex); f2fs_put_page(new, 1); return 0; } static int restore_curseg_summaries(struct f2fs_sb_info *sbi) { int type = CURSEG_HOT_DATA; int err; if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { int npages = npages_for_summary_flush(sbi, true); if (npages >= 2) ra_meta_pages(sbi, start_sum_block(sbi), npages, META_CP, true); /* restore for compacted data summary */ if (read_compacted_summaries(sbi)) return -EINVAL; type = CURSEG_HOT_NODE; } if (__exist_node_summaries(sbi)) ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), NR_CURSEG_TYPE - type, META_CP, true); for (; type <= CURSEG_COLD_NODE; type++) { err = read_normal_summaries(sbi, type); if (err) return err; } return 0; } static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) { struct page *page; unsigned char *kaddr; struct f2fs_summary *summary; struct curseg_info *seg_i; int written_size = 0; int i, j; page = grab_meta_page(sbi, blkaddr++); kaddr = (unsigned char *)page_address(page); /* Step 1: write nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); written_size += SUM_JOURNAL_SIZE; /* Step 2: write sit cache */ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); written_size += SUM_JOURNAL_SIZE; /* Step 3: write summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blkoff; seg_i = CURSEG_I(sbi, i); if (sbi->ckpt->alloc_type[i] == SSR) blkoff = sbi->blocks_per_seg; else blkoff = curseg_blkoff(sbi, i); for (j = 0; j < blkoff; j++) { if (!page) { page = grab_meta_page(sbi, blkaddr++); kaddr = (unsigned char *)page_address(page); written_size = 0; } summary = (struct f2fs_summary *)(kaddr + written_size); *summary = seg_i->sum_blk->entries[j]; written_size += SUMMARY_SIZE; if (written_size + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; set_page_dirty(page); f2fs_put_page(page, 1); page = NULL; } } if (page) { set_page_dirty(page); f2fs_put_page(page, 1); } } static void write_normal_summaries(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { int i, end; if (IS_DATASEG(type)) end = type + NR_CURSEG_DATA_TYPE; else end = type + NR_CURSEG_NODE_TYPE; for (i = type; i < end; i++) write_current_sum_page(sbi, i, blkaddr + (i - type)); } void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) write_compacted_summaries(sbi, start_blk); else write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); } void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); } int lookup_journal_in_cursum(struct f2fs_journal *journal, int type, unsigned int val, int alloc) { int i; if (type == NAT_JOURNAL) { for (i = 0; i < nats_in_cursum(journal); i++) { if (le32_to_cpu(nid_in_journal(journal, i)) == val) return i; } if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) return update_nats_in_cursum(journal, 1); } else if (type == SIT_JOURNAL) { for (i = 0; i < sits_in_cursum(journal); i++) if (le32_to_cpu(segno_in_journal(journal, i)) == val) return i; if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) return update_sits_in_cursum(journal, 1); } return -1; } static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, unsigned int segno) { return get_meta_page(sbi, current_sit_addr(sbi, segno)); } static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, unsigned int start) { struct sit_info *sit_i = SIT_I(sbi); struct page *src_page, *dst_page; pgoff_t src_off, dst_off; void *src_addr, *dst_addr; src_off = current_sit_addr(sbi, start); dst_off = next_sit_addr(sbi, src_off); /* get current sit block page without lock */ src_page = get_meta_page(sbi, src_off); dst_page = grab_meta_page(sbi, dst_off); f2fs_bug_on(sbi, PageDirty(src_page)); src_addr = page_address(src_page); dst_addr = page_address(dst_page); memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); set_to_next_sit(sit_i, start); return dst_page; } static struct sit_entry_set *grab_sit_entry_set(void) { struct sit_entry_set *ses = f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS); ses->entry_cnt = 0; INIT_LIST_HEAD(&ses->set_list); return ses; } static void release_sit_entry_set(struct sit_entry_set *ses) { list_del(&ses->set_list); kmem_cache_free(sit_entry_set_slab, ses); } static void adjust_sit_entry_set(struct sit_entry_set *ses, struct list_head *head) { struct sit_entry_set *next = ses; if (list_is_last(&ses->set_list, head)) return; list_for_each_entry_continue(next, head, set_list) if (ses->entry_cnt <= next->entry_cnt) break; list_move_tail(&ses->set_list, &next->set_list); } static void add_sit_entry(unsigned int segno, struct list_head *head) { struct sit_entry_set *ses; unsigned int start_segno = START_SEGNO(segno); list_for_each_entry(ses, head, set_list) { if (ses->start_segno == start_segno) { ses->entry_cnt++; adjust_sit_entry_set(ses, head); return; } } ses = grab_sit_entry_set(); ses->start_segno = start_segno; ses->entry_cnt++; list_add(&ses->set_list, head); } static void add_sits_in_set(struct f2fs_sb_info *sbi) { struct f2fs_sm_info *sm_info = SM_I(sbi); struct list_head *set_list = &sm_info->sit_entry_set; unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; unsigned int segno; for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) add_sit_entry(segno, set_list); } static void remove_sits_in_journal(struct f2fs_sb_info *sbi) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_journal *journal = curseg->journal; int i; down_write(&curseg->journal_rwsem); for (i = 0; i < sits_in_cursum(journal); i++) { unsigned int segno; bool dirtied; segno = le32_to_cpu(segno_in_journal(journal, i)); dirtied = __mark_sit_entry_dirty(sbi, segno); if (!dirtied) add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); } update_sits_in_cursum(journal, -i); up_write(&curseg->journal_rwsem); } /* * CP calls this function, which flushes SIT entries including sit_journal, * and moves prefree segs to free segs. */ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct sit_info *sit_i = SIT_I(sbi); unsigned long *bitmap = sit_i->dirty_sentries_bitmap; struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_journal *journal = curseg->journal; struct sit_entry_set *ses, *tmp; struct list_head *head = &SM_I(sbi)->sit_entry_set; bool to_journal = true; struct seg_entry *se; mutex_lock(&sit_i->sentry_lock); if (!sit_i->dirty_sentries) goto out; /* * add and account sit entries of dirty bitmap in sit entry * set temporarily */ add_sits_in_set(sbi); /* * if there are no enough space in journal to store dirty sit * entries, remove all entries from journal and add and account * them in sit entry set. */ if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL)) remove_sits_in_journal(sbi); /* * there are two steps to flush sit entries: * #1, flush sit entries to journal in current cold data summary block. * #2, flush sit entries to sit page. */ list_for_each_entry_safe(ses, tmp, head, set_list) { struct page *page = NULL; struct f2fs_sit_block *raw_sit = NULL; unsigned int start_segno = ses->start_segno; unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, (unsigned long)MAIN_SEGS(sbi)); unsigned int segno = start_segno; if (to_journal && !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) to_journal = false; if (to_journal) { down_write(&curseg->journal_rwsem); } else { page = get_next_sit_page(sbi, start_segno); raw_sit = page_address(page); } /* flush dirty sit entries in region of current sit set */ for_each_set_bit_from(segno, bitmap, end) { int offset, sit_offset; se = get_seg_entry(sbi, segno); /* add discard candidates */ if (!(cpc->reason & CP_DISCARD)) { cpc->trim_start = segno; add_discard_addrs(sbi, cpc, false); } if (to_journal) { offset = lookup_journal_in_cursum(journal, SIT_JOURNAL, segno, 1); f2fs_bug_on(sbi, offset < 0); segno_in_journal(journal, offset) = cpu_to_le32(segno); seg_info_to_raw_sit(se, &sit_in_journal(journal, offset)); } else { sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); } __clear_bit(segno, bitmap); sit_i->dirty_sentries--; ses->entry_cnt--; } if (to_journal) up_write(&curseg->journal_rwsem); else f2fs_put_page(page, 1); f2fs_bug_on(sbi, ses->entry_cnt); release_sit_entry_set(ses); } f2fs_bug_on(sbi, !list_empty(head)); f2fs_bug_on(sbi, sit_i->dirty_sentries); out: if (cpc->reason & CP_DISCARD) { __u64 trim_start = cpc->trim_start; for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) add_discard_addrs(sbi, cpc, false); cpc->trim_start = trim_start; } mutex_unlock(&sit_i->sentry_lock); set_prefree_as_free_segments(sbi); } static int build_sit_info(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct sit_info *sit_i; unsigned int sit_segs, start; char *src_bitmap; unsigned int bitmap_size; /* allocate memory for SIT information */ sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); if (!sit_i) return -ENOMEM; SM_I(sbi)->sit_info = sit_i; sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry), GFP_KERNEL); if (!sit_i->sentries) return -ENOMEM; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); if (!sit_i->dirty_sentries_bitmap) return -ENOMEM; for (start = 0; start < MAIN_SEGS(sbi); start++) { sit_i->sentries[start].cur_valid_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); sit_i->sentries[start].ckpt_valid_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].cur_valid_map || !sit_i->sentries[start].ckpt_valid_map) return -ENOMEM; #ifdef CONFIG_F2FS_CHECK_FS sit_i->sentries[start].cur_valid_map_mir = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].cur_valid_map_mir) return -ENOMEM; #endif if (f2fs_discard_en(sbi)) { sit_i->sentries[start].discard_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].discard_map) return -ENOMEM; } } sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->tmp_map) return -ENOMEM; if (sbi->segs_per_sec > 1) { sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * sizeof(struct sec_entry), GFP_KERNEL); if (!sit_i->sec_entries) return -ENOMEM; } /* get information related with SIT */ sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; /* setup SIT bitmap from ckeckpoint pack */ bitmap_size = __bitmap_size(sbi, SIT_BITMAP); src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); if (!sit_i->sit_bitmap) return -ENOMEM; #ifdef CONFIG_F2FS_CHECK_FS sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); if (!sit_i->sit_bitmap_mir) return -ENOMEM; #endif /* init SIT information */ sit_i->s_ops = &default_salloc_ops; sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; sit_i->written_valid_blocks = 0; sit_i->bitmap_size = bitmap_size; sit_i->dirty_sentries = 0; sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; mutex_init(&sit_i->sentry_lock); return 0; } static int build_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i; unsigned int bitmap_size, sec_bitmap_size; /* allocate memory for free segmap information */ free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); if (!free_i) return -ENOMEM; SM_I(sbi)->free_info = free_i; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); if (!free_i->free_segmap) return -ENOMEM; sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); if (!free_i->free_secmap) return -ENOMEM; /* set all segments as dirty temporarily */ memset(free_i->free_segmap, 0xff, bitmap_size); memset(free_i->free_secmap, 0xff, sec_bitmap_size); /* init free segmap information */ free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); free_i->free_segments = 0; free_i->free_sections = 0; spin_lock_init(&free_i->segmap_lock); return 0; } static int build_curseg(struct f2fs_sb_info *sbi) { struct curseg_info *array; int i; array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL); if (!array) return -ENOMEM; SM_I(sbi)->curseg_array = array; for (i = 0; i < NR_CURSEG_TYPE; i++) { mutex_init(&array[i].curseg_mutex); array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!array[i].sum_blk) return -ENOMEM; init_rwsem(&array[i].journal_rwsem); array[i].journal = kzalloc(sizeof(struct f2fs_journal), GFP_KERNEL); if (!array[i].journal) return -ENOMEM; array[i].segno = NULL_SEGNO; array[i].next_blkoff = 0; } return restore_curseg_summaries(sbi); } static void build_sit_entries(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_journal *journal = curseg->journal; struct seg_entry *se; struct f2fs_sit_entry sit; int sit_blk_cnt = SIT_BLK_CNT(sbi); unsigned int i, start, end; unsigned int readed, start_blk = 0; do { readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES, META_SIT, true); start = start_blk * sit_i->sents_per_block; end = (start_blk + readed) * sit_i->sents_per_block; for (; start < end && start < MAIN_SEGS(sbi); start++) { struct f2fs_sit_block *sit_blk; struct page *page; se = &sit_i->sentries[start]; page = get_current_sit_page(sbi, start); sit_blk = (struct f2fs_sit_block *)page_address(page); sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; f2fs_put_page(page, 1); check_block_count(sbi, start, &sit); seg_info_from_raw_sit(se, &sit); /* build discard map only one time */ if (f2fs_discard_en(sbi)) { if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); } else { memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks; } } if (sbi->segs_per_sec > 1) get_sec_entry(sbi, start)->valid_blocks += se->valid_blocks; } start_blk += readed; } while (start_blk < sit_blk_cnt); down_read(&curseg->journal_rwsem); for (i = 0; i < sits_in_cursum(journal); i++) { unsigned int old_valid_blocks; start = le32_to_cpu(segno_in_journal(journal, i)); se = &sit_i->sentries[start]; sit = sit_in_journal(journal, i); old_valid_blocks = se->valid_blocks; check_block_count(sbi, start, &sit); seg_info_from_raw_sit(se, &sit); if (f2fs_discard_en(sbi)) { if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); } else { memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); sbi->discard_blks += old_valid_blocks - se->valid_blocks; } } if (sbi->segs_per_sec > 1) get_sec_entry(sbi, start)->valid_blocks += se->valid_blocks - old_valid_blocks; } up_read(&curseg->journal_rwsem); } static void init_free_segmap(struct f2fs_sb_info *sbi) { unsigned int start; int type; for (start = 0; start < MAIN_SEGS(sbi); start++) { struct seg_entry *sentry = get_seg_entry(sbi, start); if (!sentry->valid_blocks) __set_free(sbi, start); else SIT_I(sbi)->written_valid_blocks += sentry->valid_blocks; } /* set use the current segments */ for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { struct curseg_info *curseg_t = CURSEG_I(sbi, type); __set_test_and_inuse(sbi, curseg_t->segno); } } static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno = 0, offset = 0; unsigned short valid_blocks; while (1) { /* find dirty segment based on free segmap */ segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); if (segno >= MAIN_SEGS(sbi)) break; offset = segno + 1; valid_blocks = get_valid_blocks(sbi, segno, false); if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) continue; if (valid_blocks > sbi->blocks_per_seg) { f2fs_bug_on(sbi, 1); continue; } mutex_lock(&dirty_i->seglist_lock); __locate_dirty_segment(sbi, segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); } } static int init_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->victim_secmap) return -ENOMEM; return 0; } static int build_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i; unsigned int bitmap_size, i; /* allocate memory for dirty segments list information */ dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); if (!dirty_i) return -ENOMEM; SM_I(sbi)->dirty_info = dirty_i; mutex_init(&dirty_i->seglist_lock); bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); for (i = 0; i < NR_DIRTY_TYPE; i++) { dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->dirty_segmap[i]) return -ENOMEM; } init_dirty_segmap(sbi); return init_victim_secmap(sbi); } /* * Update min, max modified time for cost-benefit GC algorithm */ static void init_min_max_mtime(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); unsigned int segno; mutex_lock(&sit_i->sentry_lock); sit_i->min_mtime = LLONG_MAX; for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { unsigned int i; unsigned long long mtime = 0; for (i = 0; i < sbi->segs_per_sec; i++) mtime += get_seg_entry(sbi, segno + i)->mtime; mtime = div_u64(mtime, sbi->segs_per_sec); if (sit_i->min_mtime > mtime) sit_i->min_mtime = mtime; } sit_i->max_mtime = get_mtime(sbi); mutex_unlock(&sit_i->sentry_lock); } int build_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_sm_info *sm_info; int err; sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); if (!sm_info) return -ENOMEM; /* init sm info */ sbi->sm_info = sm_info; sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); sm_info->segment_count = le32_to_cpu(raw_super->segment_count); sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); sm_info->rec_prefree_segments = sm_info->main_segments * DEF_RECLAIM_PREFREE_SEGMENTS / 100; if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; if (!test_opt(sbi, LFS)) sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) return err; } err = create_discard_cmd_control(sbi); if (err) return err; err = build_sit_info(sbi); if (err) return err; err = build_free_segmap(sbi); if (err) return err; err = build_curseg(sbi); if (err) return err; /* reinit free segmap based on SIT */ build_sit_entries(sbi); init_free_segmap(sbi); err = build_dirty_segmap(sbi); if (err) return err; init_min_max_mtime(sbi); return 0; } static void discard_dirty_segmap(struct f2fs_sb_info *sbi, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); mutex_lock(&dirty_i->seglist_lock); kvfree(dirty_i->dirty_segmap[dirty_type]); dirty_i->nr_dirty[dirty_type] = 0; mutex_unlock(&dirty_i->seglist_lock); } static void destroy_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); kvfree(dirty_i->victim_secmap); } static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); int i; if (!dirty_i) return; /* discard pre-free/dirty segments list */ for (i = 0; i < NR_DIRTY_TYPE; i++) discard_dirty_segmap(sbi, i); destroy_victim_secmap(sbi); SM_I(sbi)->dirty_info = NULL; kfree(dirty_i); } static void destroy_curseg(struct f2fs_sb_info *sbi) { struct curseg_info *array = SM_I(sbi)->curseg_array; int i; if (!array) return; SM_I(sbi)->curseg_array = NULL; for (i = 0; i < NR_CURSEG_TYPE; i++) { kfree(array[i].sum_blk); kfree(array[i].journal); } kfree(array); } static void destroy_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i = SM_I(sbi)->free_info; if (!free_i) return; SM_I(sbi)->free_info = NULL; kvfree(free_i->free_segmap); kvfree(free_i->free_secmap); kfree(free_i); } static void destroy_sit_info(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); unsigned int start; if (!sit_i) return; if (sit_i->sentries) { for (start = 0; start < MAIN_SEGS(sbi); start++) { kfree(sit_i->sentries[start].cur_valid_map); #ifdef CONFIG_F2FS_CHECK_FS kfree(sit_i->sentries[start].cur_valid_map_mir); #endif kfree(sit_i->sentries[start].ckpt_valid_map); kfree(sit_i->sentries[start].discard_map); } } kfree(sit_i->tmp_map); kvfree(sit_i->sentries); kvfree(sit_i->sec_entries); kvfree(sit_i->dirty_sentries_bitmap); SM_I(sbi)->sit_info = NULL; kfree(sit_i->sit_bitmap); #ifdef CONFIG_F2FS_CHECK_FS kfree(sit_i->sit_bitmap_mir); #endif kfree(sit_i); } void destroy_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_sm_info *sm_info = SM_I(sbi); if (!sm_info) return; destroy_flush_cmd_control(sbi, true); destroy_discard_cmd_control(sbi); destroy_dirty_segmap(sbi); destroy_curseg(sbi); destroy_free_segmap(sbi); destroy_sit_info(sbi); sbi->sm_info = NULL; kfree(sm_info); } int __init create_segment_manager_caches(void) { discard_entry_slab = f2fs_kmem_cache_create("discard_entry", sizeof(struct discard_entry)); if (!discard_entry_slab) goto fail; discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd", sizeof(struct discard_cmd)); if (!discard_cmd_slab) goto destroy_discard_entry; sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", sizeof(struct sit_entry_set)); if (!sit_entry_set_slab) goto destroy_discard_cmd; inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", sizeof(struct inmem_pages)); if (!inmem_entry_slab) goto destroy_sit_entry_set; return 0; destroy_sit_entry_set: kmem_cache_destroy(sit_entry_set_slab); destroy_discard_cmd: kmem_cache_destroy(discard_cmd_slab); destroy_discard_entry: kmem_cache_destroy(discard_entry_slab); fail: return -ENOMEM; } void destroy_segment_manager_caches(void) { kmem_cache_destroy(sit_entry_set_slab); kmem_cache_destroy(discard_cmd_slab); kmem_cache_destroy(discard_entry_slab); kmem_cache_destroy(inmem_entry_slab); }
/* * fs/f2fs/segment.c * * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/fs.h> #include <linux/f2fs_fs.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/prefetch.h> #include <linux/kthread.h> #include <linux/swap.h> #include <linux/timer.h> #include <linux/freezer.h> #include "f2fs.h" #include "segment.h" #include "node.h" #include "trace.h" #include <trace/events/f2fs.h> #define __reverse_ffz(x) __reverse_ffs(~(x)) static struct kmem_cache *discard_entry_slab; static struct kmem_cache *discard_cmd_slab; static struct kmem_cache *sit_entry_set_slab; static struct kmem_cache *inmem_entry_slab; static unsigned long __reverse_ulong(unsigned char *str) { unsigned long tmp = 0; int shift = 24, idx = 0; #if BITS_PER_LONG == 64 shift = 56; #endif while (shift >= 0) { tmp |= (unsigned long)str[idx++] << shift; shift -= BITS_PER_BYTE; } return tmp; } /* * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since * MSB and LSB are reversed in a byte by f2fs_set_bit. */ static inline unsigned long __reverse_ffs(unsigned long word) { int num = 0; #if BITS_PER_LONG == 64 if ((word & 0xffffffff00000000UL) == 0) num += 32; else word >>= 32; #endif if ((word & 0xffff0000) == 0) num += 16; else word >>= 16; if ((word & 0xff00) == 0) num += 8; else word >>= 8; if ((word & 0xf0) == 0) num += 4; else word >>= 4; if ((word & 0xc) == 0) num += 2; else word >>= 2; if ((word & 0x2) == 0) num += 1; return num; } /* * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because * f2fs_set_bit makes MSB and LSB reversed in a byte. * @size must be integral times of unsigned long. * Example: * MSB <--> LSB * f2fs_set_bit(0, bitmap) => 1000 0000 * f2fs_set_bit(7, bitmap) => 0000 0001 */ static unsigned long __find_rev_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = size; unsigned long tmp; if (offset >= size) return size; size -= (offset & ~(BITS_PER_LONG - 1)); offset %= BITS_PER_LONG; while (1) { if (*p == 0) goto pass; tmp = __reverse_ulong((unsigned char *)p); tmp &= ~0UL >> offset; if (size < BITS_PER_LONG) tmp &= (~0UL << (BITS_PER_LONG - size)); if (tmp) goto found; pass: if (size <= BITS_PER_LONG) break; size -= BITS_PER_LONG; offset = 0; p++; } return result; found: return result - size + __reverse_ffs(tmp); } static unsigned long __find_rev_next_zero_bit(const unsigned long *addr, unsigned long size, unsigned long offset) { const unsigned long *p = addr + BIT_WORD(offset); unsigned long result = size; unsigned long tmp; if (offset >= size) return size; size -= (offset & ~(BITS_PER_LONG - 1)); offset %= BITS_PER_LONG; while (1) { if (*p == ~0UL) goto pass; tmp = __reverse_ulong((unsigned char *)p); if (offset) tmp |= ~0UL << (BITS_PER_LONG - offset); if (size < BITS_PER_LONG) tmp |= ~0UL >> size; if (tmp != ~0UL) goto found; pass: if (size <= BITS_PER_LONG) break; size -= BITS_PER_LONG; offset = 0; p++; } return result; found: return result - size + __reverse_ffz(tmp); } void register_inmem_page(struct inode *inode, struct page *page) { struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *new; f2fs_trace_pid(page); set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE); SetPagePrivate(page); new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); /* add atomic page indices to the list */ new->page = page; INIT_LIST_HEAD(&new->list); /* increase reference count with clean state */ mutex_lock(&fi->inmem_lock); get_page(page); list_add_tail(&new->list, &fi->inmem_pages); inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); mutex_unlock(&fi->inmem_lock); trace_f2fs_register_inmem_page(page, INMEM); } static int __revoke_inmem_pages(struct inode *inode, struct list_head *head, bool drop, bool recover) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct inmem_pages *cur, *tmp; int err = 0; list_for_each_entry_safe(cur, tmp, head, list) { struct page *page = cur->page; if (drop) trace_f2fs_commit_inmem_page(page, INMEM_DROP); lock_page(page); if (recover) { struct dnode_of_data dn; struct node_info ni; trace_f2fs_commit_inmem_page(page, INMEM_REVOKE); set_new_dnode(&dn, inode, NULL, NULL, 0); if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) { err = -EAGAIN; goto next; } get_node_info(sbi, dn.nid, &ni); f2fs_replace_block(sbi, &dn, dn.data_blkaddr, cur->old_addr, ni.version, true, true); f2fs_put_dnode(&dn); } next: /* we don't need to invalidate this in the sccessful status */ if (drop || recover) ClearPageUptodate(page); set_page_private(page, 0); ClearPagePrivate(page); f2fs_put_page(page, 1); list_del(&cur->list); kmem_cache_free(inmem_entry_slab, cur); dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES); } return err; } void drop_inmem_pages(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); mutex_lock(&fi->inmem_lock); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_FILE); stat_dec_atomic_write(inode); } void drop_inmem_page(struct inode *inode, struct page *page) { struct f2fs_inode_info *fi = F2FS_I(inode); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct list_head *head = &fi->inmem_pages; struct inmem_pages *cur = NULL; f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page)); mutex_lock(&fi->inmem_lock); list_for_each_entry(cur, head, list) { if (cur->page == page) break; } f2fs_bug_on(sbi, !cur || cur->page != page); list_del(&cur->list); mutex_unlock(&fi->inmem_lock); dec_page_count(sbi, F2FS_INMEM_PAGES); kmem_cache_free(inmem_entry_slab, cur); ClearPageUptodate(page); set_page_private(page, 0); ClearPagePrivate(page); f2fs_put_page(page, 0); trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE); } static int __commit_inmem_pages(struct inode *inode, struct list_head *revoke_list) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct inmem_pages *cur, *tmp; struct f2fs_io_info fio = { .sbi = sbi, .type = DATA, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_PRIO, }; pgoff_t last_idx = ULONG_MAX; int err = 0; list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) { struct page *page = cur->page; lock_page(page); if (page->mapping == inode->i_mapping) { trace_f2fs_commit_inmem_page(page, INMEM); set_page_dirty(page); f2fs_wait_on_page_writeback(page, DATA, true); if (clear_page_dirty_for_io(page)) { inode_dec_dirty_pages(inode); remove_dirty_inode(inode); } fio.page = page; fio.old_blkaddr = NULL_ADDR; fio.encrypted_page = NULL; fio.need_lock = LOCK_DONE; err = do_write_data_page(&fio); if (err) { unlock_page(page); break; } /* record old blkaddr for revoking */ cur->old_addr = fio.old_blkaddr; last_idx = page->index; } unlock_page(page); list_move_tail(&cur->list, revoke_list); } if (last_idx != ULONG_MAX) f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA); if (!err) __revoke_inmem_pages(inode, revoke_list, false, false); return err; } int commit_inmem_pages(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode_info *fi = F2FS_I(inode); struct list_head revoke_list; int err; INIT_LIST_HEAD(&revoke_list); f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); set_inode_flag(inode, FI_ATOMIC_COMMIT); mutex_lock(&fi->inmem_lock); err = __commit_inmem_pages(inode, &revoke_list); if (err) { int ret; /* * try to revoke all committed pages, but still we could fail * due to no memory or other reason, if that happened, EAGAIN * will be returned, which means in such case, transaction is * already not integrity, caller should use journal to do the * recovery or rewrite & commit last transaction. For other * error number, revoking was done by filesystem itself. */ ret = __revoke_inmem_pages(inode, &revoke_list, false, true); if (ret) err = ret; /* drop all uncommitted pages */ __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); } mutex_unlock(&fi->inmem_lock); clear_inode_flag(inode, FI_ATOMIC_COMMIT); f2fs_unlock_op(sbi); return err; } /* * This function balances dirty node and dentry pages. * In addition, it controls garbage collection. */ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) { #ifdef CONFIG_F2FS_FAULT_INJECTION if (time_to_inject(sbi, FAULT_CHECKPOINT)) { f2fs_show_injection_info(FAULT_CHECKPOINT); f2fs_stop_checkpoint(sbi, false); } #endif /* balance_fs_bg is able to be pending */ if (need && excess_cached_nats(sbi)) f2fs_balance_fs_bg(sbi); /* * We should do GC or end up with checkpoint, if there are so many dirty * dir/node pages without enough free segments. */ if (has_not_enough_free_secs(sbi, 0, 0)) { mutex_lock(&sbi->gc_mutex); f2fs_gc(sbi, false, false, NULL_SEGNO); } } void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) { /* try to shrink extent cache when there is no enough memory */ if (!available_free_memory(sbi, EXTENT_CACHE)) f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); /* check the # of cached NAT entries */ if (!available_free_memory(sbi, NAT_ENTRIES)) try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK); if (!available_free_memory(sbi, FREE_NIDS)) try_to_free_nids(sbi, MAX_FREE_NIDS); else build_free_nids(sbi, false, false); if (!is_idle(sbi) && !excess_dirty_nats(sbi)) return; /* checkpoint is the only way to shrink partial cached entries */ if (!available_free_memory(sbi, NAT_ENTRIES) || !available_free_memory(sbi, INO_ENTRIES) || excess_prefree_segs(sbi) || excess_dirty_nats(sbi) || f2fs_time_over(sbi, CP_TIME)) { if (test_opt(sbi, DATA_FLUSH)) { struct blk_plug plug; blk_start_plug(&plug); sync_dirty_inodes(sbi, FILE_INODE); blk_finish_plug(&plug); } f2fs_sync_fs(sbi->sb, true); stat_inc_bg_cp_count(sbi->stat_info); } } static int __submit_flush_wait(struct f2fs_sb_info *sbi, struct block_device *bdev) { struct bio *bio = f2fs_bio_alloc(0); int ret; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; bio->bi_bdev = bdev; ret = submit_bio_wait(bio); bio_put(bio); trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), test_opt(sbi, FLUSH_MERGE), ret); return ret; } static int submit_flush_wait(struct f2fs_sb_info *sbi) { int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev); int i; if (!sbi->s_ndevs || ret) return ret; for (i = 1; i < sbi->s_ndevs; i++) { ret = __submit_flush_wait(sbi, FDEV(i).bdev); if (ret) break; } return ret; } static int issue_flush_thread(void *data) { struct f2fs_sb_info *sbi = data; struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; wait_queue_head_t *q = &fcc->flush_wait_queue; repeat: if (kthread_should_stop()) return 0; if (!llist_empty(&fcc->issue_list)) { struct flush_cmd *cmd, *next; int ret; fcc->dispatch_list = llist_del_all(&fcc->issue_list); fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list); ret = submit_flush_wait(sbi); atomic_inc(&fcc->issued_flush); llist_for_each_entry_safe(cmd, next, fcc->dispatch_list, llnode) { cmd->ret = ret; complete(&cmd->wait); } fcc->dispatch_list = NULL; } wait_event_interruptible(*q, kthread_should_stop() || !llist_empty(&fcc->issue_list)); goto repeat; } int f2fs_issue_flush(struct f2fs_sb_info *sbi) { struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; struct flush_cmd cmd; int ret; if (test_opt(sbi, NOBARRIER)) return 0; if (!test_opt(sbi, FLUSH_MERGE)) { ret = submit_flush_wait(sbi); atomic_inc(&fcc->issued_flush); return ret; } if (!atomic_read(&fcc->issing_flush)) { atomic_inc(&fcc->issing_flush); ret = submit_flush_wait(sbi); atomic_dec(&fcc->issing_flush); atomic_inc(&fcc->issued_flush); return ret; } init_completion(&cmd.wait); atomic_inc(&fcc->issing_flush); llist_add(&cmd.llnode, &fcc->issue_list); if (!fcc->dispatch_list) wake_up(&fcc->flush_wait_queue); if (fcc->f2fs_issue_flush) { wait_for_completion(&cmd.wait); atomic_dec(&fcc->issing_flush); } else { llist_del_all(&fcc->issue_list); atomic_set(&fcc->issing_flush, 0); } return cmd.ret; } int create_flush_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; struct flush_cmd_control *fcc; int err = 0; if (SM_I(sbi)->fcc_info) { fcc = SM_I(sbi)->fcc_info; goto init_thread; } fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL); if (!fcc) return -ENOMEM; atomic_set(&fcc->issued_flush, 0); atomic_set(&fcc->issing_flush, 0); init_waitqueue_head(&fcc->flush_wait_queue); init_llist_head(&fcc->issue_list); SM_I(sbi)->fcc_info = fcc; if (!test_opt(sbi, FLUSH_MERGE)) return err; init_thread: fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(fcc->f2fs_issue_flush)) { err = PTR_ERR(fcc->f2fs_issue_flush); kfree(fcc); SM_I(sbi)->fcc_info = NULL; return err; } return err; } void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free) { struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info; if (fcc && fcc->f2fs_issue_flush) { struct task_struct *flush_thread = fcc->f2fs_issue_flush; fcc->f2fs_issue_flush = NULL; kthread_stop(flush_thread); } if (free) { kfree(fcc); SM_I(sbi)->fcc_info = NULL; } } static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); /* need not be added */ if (IS_CURSEG(sbi, segno)) return; if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]++; if (dirty_type == DIRTY) { struct seg_entry *sentry = get_seg_entry(sbi, segno); enum dirty_type t = sentry->type; if (unlikely(t >= DIRTY)) { f2fs_bug_on(sbi, 1); return; } if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]++; } } static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]--; if (dirty_type == DIRTY) { struct seg_entry *sentry = get_seg_entry(sbi, segno); enum dirty_type t = sentry->type; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]--; if (get_valid_blocks(sbi, segno, true) == 0) clear_bit(GET_SEC_FROM_SEG(sbi, segno), dirty_i->victim_secmap); } } /* * Should not occur error such as -ENOMEM. * Adding dirty entry into seglist is not critical operation. * If a given segment is one of current working segments, it won't be added. */ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned short valid_blocks; if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno)) return; mutex_lock(&dirty_i->seglist_lock); valid_blocks = get_valid_blocks(sbi, segno, false); if (valid_blocks == 0) { __locate_dirty_segment(sbi, segno, PRE); __remove_dirty_segment(sbi, segno, DIRTY); } else if (valid_blocks < sbi->blocks_per_seg) { __locate_dirty_segment(sbi, segno, DIRTY); } else { /* Recovery routine with SSR needs this */ __remove_dirty_segment(sbi, segno, DIRTY); } mutex_unlock(&dirty_i->seglist_lock); } static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc; f2fs_bug_on(sbi, !len); pend_list = &dcc->pend_list[plist_idx(len)]; dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS); INIT_LIST_HEAD(&dc->list); dc->bdev = bdev; dc->lstart = lstart; dc->start = start; dc->len = len; dc->ref = 0; dc->state = D_PREP; dc->error = 0; init_completion(&dc->wait); list_add_tail(&dc->list, pend_list); atomic_inc(&dcc->discard_cmd_cnt); dcc->undiscard_blks += len; return dc; } static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len, struct rb_node *parent, struct rb_node **p) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_cmd *dc; dc = __create_discard_cmd(sbi, bdev, lstart, start, len); rb_link_node(&dc->rb_node, parent, p); rb_insert_color(&dc->rb_node, &dcc->root); return dc; } static void __detach_discard_cmd(struct discard_cmd_control *dcc, struct discard_cmd *dc) { if (dc->state == D_DONE) atomic_dec(&dcc->issing_discard); list_del(&dc->list); rb_erase(&dc->rb_node, &dcc->root); dcc->undiscard_blks -= dc->len; kmem_cache_free(discard_cmd_slab, dc); atomic_dec(&dcc->discard_cmd_cnt); } static void __remove_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; if (dc->error == -EOPNOTSUPP) dc->error = 0; if (dc->error) f2fs_msg(sbi->sb, KERN_INFO, "Issue discard(%u, %u, %u) failed, ret: %d", dc->lstart, dc->start, dc->len, dc->error); __detach_discard_cmd(dcc, dc); } static void f2fs_submit_discard_endio(struct bio *bio) { struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private; dc->error = bio->bi_error; dc->state = D_DONE; complete_all(&dc->wait); bio_put(bio); } /* this function is copied from blkdev_issue_discard from block/blk-lib.c */ static void __submit_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct bio *bio = NULL; if (dc->state != D_PREP) return; trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len); dc->error = __blkdev_issue_discard(dc->bdev, SECTOR_FROM_BLOCK(dc->start), SECTOR_FROM_BLOCK(dc->len), GFP_NOFS, 0, &bio); if (!dc->error) { /* should keep before submission to avoid D_DONE right away */ dc->state = D_SUBMIT; atomic_inc(&dcc->issued_discard); atomic_inc(&dcc->issing_discard); if (bio) { bio->bi_private = dc; bio->bi_end_io = f2fs_submit_discard_endio; bio->bi_opf |= REQ_SYNC; submit_bio(bio); list_move_tail(&dc->list, &dcc->wait_list); } } else { __remove_discard_cmd(sbi, dc); } } static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len, struct rb_node **insert_p, struct rb_node *insert_parent) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct rb_node **p = &dcc->root.rb_node; struct rb_node *parent = NULL; struct discard_cmd *dc = NULL; if (insert_p && insert_parent) { parent = insert_parent; p = insert_p; goto do_insert; } p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart); do_insert: dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p); if (!dc) return NULL; return dc; } static void __relocate_discard_cmd(struct discard_cmd_control *dcc, struct discard_cmd *dc) { list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]); } static void __punch_discard_cmd(struct f2fs_sb_info *sbi, struct discard_cmd *dc, block_t blkaddr) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_info di = dc->di; bool modified = false; if (dc->state == D_DONE || dc->len == 1) { __remove_discard_cmd(sbi, dc); return; } dcc->undiscard_blks -= di.len; if (blkaddr > di.lstart) { dc->len = blkaddr - dc->lstart; dcc->undiscard_blks += dc->len; __relocate_discard_cmd(dcc, dc); modified = true; } if (blkaddr < di.lstart + di.len - 1) { if (modified) { __insert_discard_tree(sbi, dc->bdev, blkaddr + 1, di.start + blkaddr + 1 - di.lstart, di.lstart + di.len - 1 - blkaddr, NULL, NULL); } else { dc->lstart++; dc->len--; dc->start++; dcc->undiscard_blks += dc->len; __relocate_discard_cmd(dcc, dc); } } } static void __update_discard_tree_range(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t lstart, block_t start, block_t len) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_cmd *prev_dc = NULL, *next_dc = NULL; struct discard_cmd *dc; struct discard_info di = {0}; struct rb_node **insert_p = NULL, *insert_parent = NULL; block_t end = lstart + len; mutex_lock(&dcc->cmd_lock); dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root, NULL, lstart, (struct rb_entry **)&prev_dc, (struct rb_entry **)&next_dc, &insert_p, &insert_parent, true); if (dc) prev_dc = dc; if (!prev_dc) { di.lstart = lstart; di.len = next_dc ? next_dc->lstart - lstart : len; di.len = min(di.len, len); di.start = start; } while (1) { struct rb_node *node; bool merged = false; struct discard_cmd *tdc = NULL; if (prev_dc) { di.lstart = prev_dc->lstart + prev_dc->len; if (di.lstart < lstart) di.lstart = lstart; if (di.lstart >= end) break; if (!next_dc || next_dc->lstart > end) di.len = end - di.lstart; else di.len = next_dc->lstart - di.lstart; di.start = start + di.lstart - lstart; } if (!di.len) goto next; if (prev_dc && prev_dc->state == D_PREP && prev_dc->bdev == bdev && __is_discard_back_mergeable(&di, &prev_dc->di)) { prev_dc->di.len += di.len; dcc->undiscard_blks += di.len; __relocate_discard_cmd(dcc, prev_dc); di = prev_dc->di; tdc = prev_dc; merged = true; } if (next_dc && next_dc->state == D_PREP && next_dc->bdev == bdev && __is_discard_front_mergeable(&di, &next_dc->di)) { next_dc->di.lstart = di.lstart; next_dc->di.len += di.len; next_dc->di.start = di.start; dcc->undiscard_blks += di.len; __relocate_discard_cmd(dcc, next_dc); if (tdc) __remove_discard_cmd(sbi, tdc); merged = true; } if (!merged) { __insert_discard_tree(sbi, bdev, di.lstart, di.start, di.len, NULL, NULL); } next: prev_dc = next_dc; if (!prev_dc) break; node = rb_next(&prev_dc->rb_node); next_dc = rb_entry_safe(node, struct discard_cmd, rb_node); } mutex_unlock(&dcc->cmd_lock); } static int __queue_discard_cmd(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { block_t lblkstart = blkstart; trace_f2fs_queue_discard(bdev, blkstart, blklen); if (sbi->s_ndevs) { int devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; } __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen); return 0; } static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *pend_list; struct discard_cmd *dc, *tmp; struct blk_plug plug; int i, iter = 0; mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, !__check_rb_tree_consistence(sbi, &dcc->root)); blk_start_plug(&plug); for (i = MAX_PLIST_NUM - 1; i >= 0; i--) { pend_list = &dcc->pend_list[i]; list_for_each_entry_safe(dc, tmp, pend_list, list) { f2fs_bug_on(sbi, dc->state != D_PREP); if (!issue_cond || is_idle(sbi)) __submit_discard_cmd(sbi, dc); if (issue_cond && iter++ > DISCARD_ISSUE_RATE) goto out; } } out: blk_finish_plug(&plug); mutex_unlock(&dcc->cmd_lock); } static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct list_head *wait_list = &(dcc->wait_list); struct discard_cmd *dc, *tmp; bool need_wait; next: need_wait = false; mutex_lock(&dcc->cmd_lock); list_for_each_entry_safe(dc, tmp, wait_list, list) { if (!wait_cond || (dc->state == D_DONE && !dc->ref)) { wait_for_completion_io(&dc->wait); __remove_discard_cmd(sbi, dc); } else { dc->ref++; need_wait = true; break; } } mutex_unlock(&dcc->cmd_lock); if (need_wait) { wait_for_completion_io(&dc->wait); mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, dc->state != D_DONE); dc->ref--; if (!dc->ref) __remove_discard_cmd(sbi, dc); mutex_unlock(&dcc->cmd_lock); goto next; } } /* This should be covered by global mutex, &sit_i->sentry_lock */ void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; struct discard_cmd *dc; bool need_wait = false; mutex_lock(&dcc->cmd_lock); dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr); if (dc) { if (dc->state == D_PREP) { __punch_discard_cmd(sbi, dc, blkaddr); } else { dc->ref++; need_wait = true; } } mutex_unlock(&dcc->cmd_lock); if (need_wait) { wait_for_completion_io(&dc->wait); mutex_lock(&dcc->cmd_lock); f2fs_bug_on(sbi, dc->state != D_DONE); dc->ref--; if (!dc->ref) __remove_discard_cmd(sbi, dc); mutex_unlock(&dcc->cmd_lock); } } /* This comes from f2fs_put_super */ void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) { __issue_discard_cmd(sbi, false); __wait_discard_cmd(sbi, false); } static int issue_discard_thread(void *data) { struct f2fs_sb_info *sbi = data; struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; wait_queue_head_t *q = &dcc->discard_wait_queue; set_freezable(); do { wait_event_interruptible(*q, kthread_should_stop() || freezing(current) || atomic_read(&dcc->discard_cmd_cnt)); if (try_to_freeze()) continue; if (kthread_should_stop()) return 0; __issue_discard_cmd(sbi, true); __wait_discard_cmd(sbi, true); congestion_wait(BLK_RW_SYNC, HZ/50); } while (!kthread_should_stop()); return 0; } #ifdef CONFIG_BLK_DEV_ZONED static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { sector_t sector, nr_sects; block_t lblkstart = blkstart; int devi = 0; if (sbi->s_ndevs) { devi = f2fs_target_device_index(sbi, blkstart); blkstart -= FDEV(devi).start_blk; } /* * We need to know the type of the zone: for conventional zones, * use regular discard if the drive supports it. For sequential * zones, reset the zone write pointer. */ switch (get_blkz_type(sbi, bdev, blkstart)) { case BLK_ZONE_TYPE_CONVENTIONAL: if (!blk_queue_discard(bdev_get_queue(bdev))) return 0; return __queue_discard_cmd(sbi, bdev, lblkstart, blklen); case BLK_ZONE_TYPE_SEQWRITE_REQ: case BLK_ZONE_TYPE_SEQWRITE_PREF: sector = SECTOR_FROM_BLOCK(blkstart); nr_sects = SECTOR_FROM_BLOCK(blklen); if (sector & (bdev_zone_sectors(bdev) - 1) || nr_sects != bdev_zone_sectors(bdev)) { f2fs_msg(sbi->sb, KERN_INFO, "(%d) %s: Unaligned discard attempted (block %x + %x)", devi, sbi->s_ndevs ? FDEV(devi).path: "", blkstart, blklen); return -EIO; } trace_f2fs_issue_reset_zone(bdev, blkstart); return blkdev_reset_zones(bdev, sector, nr_sects, GFP_NOFS); default: /* Unknown zone type: broken device ? */ return -EIO; } } #endif static int __issue_discard_async(struct f2fs_sb_info *sbi, struct block_device *bdev, block_t blkstart, block_t blklen) { #ifdef CONFIG_BLK_DEV_ZONED if (f2fs_sb_mounted_blkzoned(sbi->sb) && bdev_zoned_model(bdev) != BLK_ZONED_NONE) return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen); #endif return __queue_discard_cmd(sbi, bdev, blkstart, blklen); } static int f2fs_issue_discard(struct f2fs_sb_info *sbi, block_t blkstart, block_t blklen) { sector_t start = blkstart, len = 0; struct block_device *bdev; struct seg_entry *se; unsigned int offset; block_t i; int err = 0; bdev = f2fs_target_device(sbi, blkstart, NULL); for (i = blkstart; i < blkstart + blklen; i++, len++) { if (i != start) { struct block_device *bdev2 = f2fs_target_device(sbi, i, NULL); if (bdev2 != bdev) { err = __issue_discard_async(sbi, bdev, start, len); if (err) return err; bdev = bdev2; start = i; len = 0; } } se = get_seg_entry(sbi, GET_SEGNO(sbi, i)); offset = GET_BLKOFF_FROM_SEG0(sbi, i); if (!f2fs_test_and_set_bit(offset, se->discard_map)) sbi->discard_blks--; } if (len) err = __issue_discard_async(sbi, bdev, start, len); return err; } static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc, bool check_only) { int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); int max_blocks = sbi->blocks_per_seg; struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start); unsigned long *cur_map = (unsigned long *)se->cur_valid_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *discard_map = (unsigned long *)se->discard_map; unsigned long *dmap = SIT_I(sbi)->tmp_map; unsigned int start = 0, end = -1; bool force = (cpc->reason & CP_DISCARD); struct discard_entry *de = NULL; struct list_head *head = &SM_I(sbi)->dcc_info->entry_list; int i; if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi)) return false; if (!force) { if (!test_opt(sbi, DISCARD) || !se->valid_blocks || SM_I(sbi)->dcc_info->nr_discards >= SM_I(sbi)->dcc_info->max_discards) return false; } /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */ for (i = 0; i < entries; i++) dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] : (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i]; while (force || SM_I(sbi)->dcc_info->nr_discards <= SM_I(sbi)->dcc_info->max_discards) { start = __find_rev_next_bit(dmap, max_blocks, end + 1); if (start >= max_blocks) break; end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1); if (force && start && end != max_blocks && (end - start) < cpc->trim_minlen) continue; if (check_only) return true; if (!de) { de = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_F2FS_ZERO); de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start); list_add_tail(&de->list, head); } for (i = start; i < end; i++) __set_bit_le(i, (void *)de->discard_map); SM_I(sbi)->dcc_info->nr_discards += end - start; } return false; } void release_discard_addrs(struct f2fs_sb_info *sbi) { struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); struct discard_entry *entry, *this; /* drop caches */ list_for_each_entry_safe(entry, this, head, list) { list_del(&entry->list); kmem_cache_free(discard_entry_slab, entry); } } /* * Should call clear_prefree_segments after checkpoint is done. */ static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int segno; mutex_lock(&dirty_i->seglist_lock); for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi)) __set_test_and_free(sbi, segno); mutex_unlock(&dirty_i->seglist_lock); } void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list); struct discard_entry *entry, *this; struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned long *prefree_map = dirty_i->dirty_segmap[PRE]; unsigned int start = 0, end = -1; unsigned int secno, start_segno; bool force = (cpc->reason & CP_DISCARD); mutex_lock(&dirty_i->seglist_lock); while (1) { int i; start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1); if (start >= MAIN_SEGS(sbi)) break; end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi), start + 1); for (i = start; i < end; i++) clear_bit(i, prefree_map); dirty_i->nr_dirty[PRE] -= end - start; if (!test_opt(sbi, DISCARD)) continue; if (force && start >= cpc->trim_start && (end - 1) <= cpc->trim_end) continue; if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) { f2fs_issue_discard(sbi, START_BLOCK(sbi, start), (end - start) << sbi->log_blocks_per_seg); continue; } next: secno = GET_SEC_FROM_SEG(sbi, start); start_segno = GET_SEG_FROM_SEC(sbi, secno); if (!IS_CURSEC(sbi, secno) && !get_valid_blocks(sbi, start, true)) f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno), sbi->segs_per_sec << sbi->log_blocks_per_seg); start = start_segno + sbi->segs_per_sec; if (start < end) goto next; else end = start - 1; } mutex_unlock(&dirty_i->seglist_lock); /* send small discards */ list_for_each_entry_safe(entry, this, head, list) { unsigned int cur_pos = 0, next_pos, len, total_len = 0; bool is_valid = test_bit_le(0, entry->discard_map); find_next: if (is_valid) { next_pos = find_next_zero_bit_le(entry->discard_map, sbi->blocks_per_seg, cur_pos); len = next_pos - cur_pos; if (f2fs_sb_mounted_blkzoned(sbi->sb) || (force && len < cpc->trim_minlen)) goto skip; f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos, len); cpc->trimmed += len; total_len += len; } else { next_pos = find_next_bit_le(entry->discard_map, sbi->blocks_per_seg, cur_pos); } skip: cur_pos = next_pos; is_valid = !is_valid; if (cur_pos < sbi->blocks_per_seg) goto find_next; list_del(&entry->list); SM_I(sbi)->dcc_info->nr_discards -= total_len; kmem_cache_free(discard_entry_slab, entry); } wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue); } static int create_discard_cmd_control(struct f2fs_sb_info *sbi) { dev_t dev = sbi->sb->s_bdev->bd_dev; struct discard_cmd_control *dcc; int err = 0, i; if (SM_I(sbi)->dcc_info) { dcc = SM_I(sbi)->dcc_info; goto init_thread; } dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL); if (!dcc) return -ENOMEM; INIT_LIST_HEAD(&dcc->entry_list); for (i = 0; i < MAX_PLIST_NUM; i++) INIT_LIST_HEAD(&dcc->pend_list[i]); INIT_LIST_HEAD(&dcc->wait_list); mutex_init(&dcc->cmd_lock); atomic_set(&dcc->issued_discard, 0); atomic_set(&dcc->issing_discard, 0); atomic_set(&dcc->discard_cmd_cnt, 0); dcc->nr_discards = 0; dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg; dcc->undiscard_blks = 0; dcc->root = RB_ROOT; init_waitqueue_head(&dcc->discard_wait_queue); SM_I(sbi)->dcc_info = dcc; init_thread: dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi, "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev)); if (IS_ERR(dcc->f2fs_issue_discard)) { err = PTR_ERR(dcc->f2fs_issue_discard); kfree(dcc); SM_I(sbi)->dcc_info = NULL; return err; } return err; } static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi) { struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; if (!dcc) return; if (dcc->f2fs_issue_discard) { struct task_struct *discard_thread = dcc->f2fs_issue_discard; dcc->f2fs_issue_discard = NULL; kthread_stop(discard_thread); } kfree(dcc); SM_I(sbi)->dcc_info = NULL; } static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno) { struct sit_info *sit_i = SIT_I(sbi); if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) { sit_i->dirty_sentries++; return false; } return true; } static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type, unsigned int segno, int modified) { struct seg_entry *se = get_seg_entry(sbi, segno); se->type = type; if (modified) __mark_sit_entry_dirty(sbi, segno); } static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) { struct seg_entry *se; unsigned int segno, offset; long int new_vblocks; segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); new_vblocks = se->valid_blocks + del; offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || (new_vblocks > sbi->blocks_per_seg))); se->valid_blocks = new_vblocks; se->mtime = get_mtime(sbi); SIT_I(sbi)->max_mtime = se->mtime; /* Update valid block bitmap */ if (del > 0) { if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) { #ifdef CONFIG_F2FS_CHECK_FS if (f2fs_test_and_set_bit(offset, se->cur_valid_map_mir)) f2fs_bug_on(sbi, 1); else WARN_ON(1); #else f2fs_bug_on(sbi, 1); #endif } if (f2fs_discard_en(sbi) && !f2fs_test_and_set_bit(offset, se->discard_map)) sbi->discard_blks--; /* don't overwrite by SSR to keep node chain */ if (se->type == CURSEG_WARM_NODE) { if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks++; } } else { if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { #ifdef CONFIG_F2FS_CHECK_FS if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map_mir)) f2fs_bug_on(sbi, 1); else WARN_ON(1); #else f2fs_bug_on(sbi, 1); #endif } if (f2fs_discard_en(sbi) && f2fs_test_and_clear_bit(offset, se->discard_map)) sbi->discard_blks++; } if (!f2fs_test_bit(offset, se->ckpt_valid_map)) se->ckpt_valid_blocks += del; __mark_sit_entry_dirty(sbi, segno); /* update total number of valid blocks to be written in ckpt area */ SIT_I(sbi)->written_valid_blocks += del; if (sbi->segs_per_sec > 1) get_sec_entry(sbi, segno)->valid_blocks += del; } void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new) { update_sit_entry(sbi, new, 1); if (GET_SEGNO(sbi, old) != NULL_SEGNO) update_sit_entry(sbi, old, -1); locate_dirty_segment(sbi, GET_SEGNO(sbi, old)); locate_dirty_segment(sbi, GET_SEGNO(sbi, new)); } void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr) { unsigned int segno = GET_SEGNO(sbi, addr); struct sit_info *sit_i = SIT_I(sbi); f2fs_bug_on(sbi, addr == NULL_ADDR); if (addr == NEW_ADDR) return; /* add it into sit main buffer */ mutex_lock(&sit_i->sentry_lock); update_sit_entry(sbi, addr, -1); /* add it into dirty seglist */ locate_dirty_segment(sbi, segno); mutex_unlock(&sit_i->sentry_lock); } bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr) { struct sit_info *sit_i = SIT_I(sbi); unsigned int segno, offset; struct seg_entry *se; bool is_cp = false; if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) return true; mutex_lock(&sit_i->sentry_lock); segno = GET_SEGNO(sbi, blkaddr); se = get_seg_entry(sbi, segno); offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); if (f2fs_test_bit(offset, se->ckpt_valid_map)) is_cp = true; mutex_unlock(&sit_i->sentry_lock); return is_cp; } /* * This function should be resided under the curseg_mutex lock */ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type, struct f2fs_summary *sum) { struct curseg_info *curseg = CURSEG_I(sbi, type); void *addr = curseg->sum_blk; addr += curseg->next_blkoff * sizeof(struct f2fs_summary); memcpy(addr, sum, sizeof(struct f2fs_summary)); } /* * Calculate the number of current summary pages for writing */ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) { int valid_sum_count = 0; int i, sum_in_page; for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { if (sbi->ckpt->alloc_type[i] == SSR) valid_sum_count += sbi->blocks_per_seg; else { if (for_ra) valid_sum_count += le16_to_cpu( F2FS_CKPT(sbi)->cur_data_blkoff[i]); else valid_sum_count += curseg_blkoff(sbi, i); } } sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE; if (valid_sum_count <= sum_in_page) return 1; else if ((valid_sum_count - sum_in_page) <= (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) return 2; return 3; } /* * Caller should put this summary page */ struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno) { return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno)); } void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) { struct page *page = grab_meta_page(sbi, blk_addr); void *dst = page_address(page); if (src) memcpy(dst, src, PAGE_SIZE); else memset(dst, 0, PAGE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } static void write_sum_page(struct f2fs_sb_info *sbi, struct f2fs_summary_block *sum_blk, block_t blk_addr) { update_meta_page(sbi, (void *)sum_blk, blk_addr); } static void write_current_sum_page(struct f2fs_sb_info *sbi, int type, block_t blk_addr) { struct curseg_info *curseg = CURSEG_I(sbi, type); struct page *page = grab_meta_page(sbi, blk_addr); struct f2fs_summary_block *src = curseg->sum_blk; struct f2fs_summary_block *dst; dst = (struct f2fs_summary_block *)page_address(page); mutex_lock(&curseg->curseg_mutex); down_read(&curseg->journal_rwsem); memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE); up_read(&curseg->journal_rwsem); memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE); mutex_unlock(&curseg->curseg_mutex); set_page_dirty(page); f2fs_put_page(page, 1); } static int is_next_segment_free(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno = curseg->segno + 1; struct free_segmap_info *free_i = FREE_I(sbi); if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec) return !test_bit(segno, free_i->free_segmap); return 0; } /* * Find a new segment from the free segments bitmap to right order * This function should be returned with success, otherwise BUG */ static void get_new_segment(struct f2fs_sb_info *sbi, unsigned int *newseg, bool new_sec, int dir) { struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno, secno, zoneno; unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone; unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg); unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg); unsigned int left_start = hint; bool init = true; int go_left = 0; int i; spin_lock(&free_i->segmap_lock); if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) { segno = find_next_zero_bit(free_i->free_segmap, GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1); if (segno < GET_SEG_FROM_SEC(sbi, hint + 1)) goto got_it; } find_other_zone: secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint); if (secno >= MAIN_SECS(sbi)) { if (dir == ALLOC_RIGHT) { secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), 0); f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi)); } else { go_left = 1; left_start = hint - 1; } } if (go_left == 0) goto skip_left; while (test_bit(left_start, free_i->free_secmap)) { if (left_start > 0) { left_start--; continue; } left_start = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), 0); f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi)); break; } secno = left_start; skip_left: hint = secno; segno = GET_SEG_FROM_SEC(sbi, secno); zoneno = GET_ZONE_FROM_SEC(sbi, secno); /* give up on finding another zone */ if (!init) goto got_it; if (sbi->secs_per_zone == 1) goto got_it; if (zoneno == old_zoneno) goto got_it; if (dir == ALLOC_LEFT) { if (!go_left && zoneno + 1 >= total_zones) goto got_it; if (go_left && zoneno == 0) goto got_it; } for (i = 0; i < NR_CURSEG_TYPE; i++) if (CURSEG_I(sbi, i)->zone == zoneno) break; if (i < NR_CURSEG_TYPE) { /* zone is in user, try another */ if (go_left) hint = zoneno * sbi->secs_per_zone - 1; else if (zoneno + 1 >= total_zones) hint = 0; else hint = (zoneno + 1) * sbi->secs_per_zone; init = false; goto find_other_zone; } got_it: /* set it as dirty segment in free segmap */ f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap)); __set_inuse(sbi, segno); *newseg = segno; spin_unlock(&free_i->segmap_lock); } static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified) { struct curseg_info *curseg = CURSEG_I(sbi, type); struct summary_footer *sum_footer; curseg->segno = curseg->next_segno; curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno); curseg->next_blkoff = 0; curseg->next_segno = NULL_SEGNO; sum_footer = &(curseg->sum_blk->footer); memset(sum_footer, 0, sizeof(struct summary_footer)); if (IS_DATASEG(type)) SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA); if (IS_NODESEG(type)) SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE); __set_sit_entry_type(sbi, type, curseg->segno, modified); } static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type) { /* if segs_per_sec is large than 1, we need to keep original policy. */ if (sbi->segs_per_sec != 1) return CURSEG_I(sbi, type)->segno; if (type == CURSEG_HOT_DATA || IS_NODESEG(type)) return 0; if (SIT_I(sbi)->last_victim[ALLOC_NEXT]) return SIT_I(sbi)->last_victim[ALLOC_NEXT]; return CURSEG_I(sbi, type)->segno; } /* * Allocate a current working segment. * This function always allocates a free segment in LFS manner. */ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec) { struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno = curseg->segno; int dir = ALLOC_LEFT; write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno)); if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA) dir = ALLOC_RIGHT; if (test_opt(sbi, NOHEAP)) dir = ALLOC_RIGHT; segno = __get_next_segno(sbi, type); get_new_segment(sbi, &segno, new_sec, dir); curseg->next_segno = segno; reset_curseg(sbi, type, 1); curseg->alloc_type = LFS; } static void __next_free_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg, block_t start) { struct seg_entry *se = get_seg_entry(sbi, seg->segno); int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); unsigned long *target_map = SIT_I(sbi)->tmp_map; unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map; unsigned long *cur_map = (unsigned long *)se->cur_valid_map; int i, pos; for (i = 0; i < entries; i++) target_map[i] = ckpt_map[i] | cur_map[i]; pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start); seg->next_blkoff = pos; } /* * If a segment is written by LFS manner, next block offset is just obtained * by increasing the current block offset. However, if a segment is written by * SSR manner, next block offset obtained by calling __next_free_blkoff */ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, struct curseg_info *seg) { if (seg->alloc_type == SSR) __next_free_blkoff(sbi, seg, seg->next_blkoff + 1); else seg->next_blkoff++; } /* * This function always allocates a used segment(from dirty seglist) by SSR * manner, so it should recover the existing segment information of valid blocks */ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int new_segno = curseg->next_segno; struct f2fs_summary_block *sum_node; struct page *sum_page; write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); __set_test_and_inuse(sbi, new_segno); mutex_lock(&dirty_i->seglist_lock); __remove_dirty_segment(sbi, new_segno, PRE); __remove_dirty_segment(sbi, new_segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); reset_curseg(sbi, type, 1); curseg->alloc_type = SSR; __next_free_blkoff(sbi, curseg, 0); if (reuse) { sum_page = get_sum_page(sbi, new_segno); sum_node = (struct f2fs_summary_block *)page_address(sum_page); memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE); f2fs_put_page(sum_page, 1); } } static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops; unsigned segno = NULL_SEGNO; int i, cnt; bool reversed = false; /* need_SSR() already forces to do this */ if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { curseg->next_segno = segno; return 1; } /* For node segments, let's do SSR more intensively */ if (IS_NODESEG(type)) { if (type >= CURSEG_WARM_NODE) { reversed = true; i = CURSEG_COLD_NODE; } else { i = CURSEG_HOT_NODE; } cnt = NR_CURSEG_NODE_TYPE; } else { if (type >= CURSEG_WARM_DATA) { reversed = true; i = CURSEG_COLD_DATA; } else { i = CURSEG_HOT_DATA; } cnt = NR_CURSEG_DATA_TYPE; } for (; cnt-- > 0; reversed ? i-- : i++) { if (i == type) continue; if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { curseg->next_segno = segno; return 1; } } return 0; } /* * flush out current segment and replace it with new segment * This function should be returned with success, otherwise BUG */ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, int type, bool force) { struct curseg_info *curseg = CURSEG_I(sbi, type); if (force) new_curseg(sbi, type, true); else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && type == CURSEG_WARM_NODE) new_curseg(sbi, type, false); else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type)) new_curseg(sbi, type, false); else if (need_SSR(sbi) && get_ssr_segment(sbi, type)) change_curseg(sbi, type, true); else new_curseg(sbi, type, false); stat_inc_seg_type(sbi, curseg); } void allocate_new_segments(struct f2fs_sb_info *sbi) { struct curseg_info *curseg; unsigned int old_segno; int i; for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { curseg = CURSEG_I(sbi, i); old_segno = curseg->segno; SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); locate_dirty_segment(sbi, old_segno); } } static const struct segment_allocation default_salloc_ops = { .allocate_segment = allocate_segment_by_default, }; bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) { __u64 trim_start = cpc->trim_start; bool has_candidate = false; mutex_lock(&SIT_I(sbi)->sentry_lock); for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) { if (add_discard_addrs(sbi, cpc, true)) { has_candidate = true; break; } } mutex_unlock(&SIT_I(sbi)->sentry_lock); cpc->trim_start = trim_start; return has_candidate; } int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) { __u64 start = F2FS_BYTES_TO_BLK(range->start); __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1; unsigned int start_segno, end_segno; struct cp_control cpc; int err = 0; if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize) return -EINVAL; cpc.trimmed = 0; if (end <= MAIN_BLKADDR(sbi)) goto out; if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { f2fs_msg(sbi->sb, KERN_WARNING, "Found FS corruption, run fsck to fix."); goto out; } /* start/end segment number in main_area */ start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start); end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 : GET_SEGNO(sbi, end); cpc.reason = CP_DISCARD; cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen)); /* do checkpoint to issue discard commands safely */ for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) { cpc.trim_start = start_segno; if (sbi->discard_blks == 0) break; else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi)) cpc.trim_end = end_segno; else cpc.trim_end = min_t(unsigned int, rounddown(start_segno + BATCHED_TRIM_SEGMENTS(sbi), sbi->segs_per_sec) - 1, end_segno); mutex_lock(&sbi->gc_mutex); err = write_checkpoint(sbi, &cpc); mutex_unlock(&sbi->gc_mutex); if (err) break; schedule(); } out: range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); return err; } static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); if (curseg->next_blkoff < sbi->blocks_per_seg) return true; return false; } static int __get_segment_type_2(struct f2fs_io_info *fio) { if (fio->type == DATA) return CURSEG_HOT_DATA; else return CURSEG_HOT_NODE; } static int __get_segment_type_4(struct f2fs_io_info *fio) { if (fio->type == DATA) { struct inode *inode = fio->page->mapping->host; if (S_ISDIR(inode->i_mode)) return CURSEG_HOT_DATA; else return CURSEG_COLD_DATA; } else { if (IS_DNODE(fio->page) && is_cold_node(fio->page)) return CURSEG_WARM_NODE; else return CURSEG_COLD_NODE; } } static int __get_segment_type_6(struct f2fs_io_info *fio) { if (fio->type == DATA) { struct inode *inode = fio->page->mapping->host; if (is_cold_data(fio->page) || file_is_cold(inode)) return CURSEG_COLD_DATA; if (is_inode_flag_set(inode, FI_HOT_DATA)) return CURSEG_HOT_DATA; return CURSEG_WARM_DATA; } else { if (IS_DNODE(fio->page)) return is_cold_node(fio->page) ? CURSEG_WARM_NODE : CURSEG_HOT_NODE; return CURSEG_COLD_NODE; } } static int __get_segment_type(struct f2fs_io_info *fio) { int type = 0; switch (fio->sbi->active_logs) { case 2: type = __get_segment_type_2(fio); break; case 4: type = __get_segment_type_4(fio); break; case 6: type = __get_segment_type_6(fio); break; default: f2fs_bug_on(fio->sbi, true); } if (IS_HOT(type)) fio->temp = HOT; else if (IS_WARM(type)) fio->temp = WARM; else fio->temp = COLD; return type; } void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, struct f2fs_io_info *fio, bool add_list) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); f2fs_wait_discard_bio(sbi, *new_blkaddr); /* * __add_sum_entry should be resided under the curseg_mutex * because, this function updates a summary entry in the * current summary block. */ __add_sum_entry(sbi, type, sum); __refresh_next_blkoff(sbi, curseg); stat_inc_block_count(sbi, curseg); if (!__has_curseg_space(sbi, type)) sit_i->s_ops->allocate_segment(sbi, type, false); /* * SIT information should be updated after segment allocation, * since we need to keep dirty segments precisely under SSR. */ refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr); mutex_unlock(&sit_i->sentry_lock); if (page && IS_NODESEG(type)) fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg)); if (add_list) { struct f2fs_bio_info *io; INIT_LIST_HEAD(&fio->list); fio->in_list = true; io = sbi->write_io[fio->type] + fio->temp; spin_lock(&io->io_lock); list_add_tail(&fio->list, &io->io_list); spin_unlock(&io->io_lock); } mutex_unlock(&curseg->curseg_mutex); } static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) { int type = __get_segment_type(fio); int err; reallocate: allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, &fio->new_blkaddr, sum, type, fio, true); /* writeout dirty page into bdev */ err = f2fs_submit_page_write(fio); if (err == -EAGAIN) { fio->old_blkaddr = fio->new_blkaddr; goto reallocate; } } void write_meta_page(struct f2fs_sb_info *sbi, struct page *page) { struct f2fs_io_info fio = { .sbi = sbi, .type = META, .op = REQ_OP_WRITE, .op_flags = REQ_SYNC | REQ_META | REQ_PRIO, .old_blkaddr = page->index, .new_blkaddr = page->index, .page = page, .encrypted_page = NULL, .in_list = false, }; if (unlikely(page->index >= MAIN_BLKADDR(sbi))) fio.op_flags &= ~REQ_META; set_page_writeback(page); f2fs_submit_page_write(&fio); } void write_node_page(unsigned int nid, struct f2fs_io_info *fio) { struct f2fs_summary sum; set_summary(&sum, nid, 0, 0); do_write_page(&sum, fio); } void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio) { struct f2fs_sb_info *sbi = fio->sbi; struct f2fs_summary sum; struct node_info ni; f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR); get_node_info(sbi, dn->nid, &ni); set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); do_write_page(&sum, fio); f2fs_update_data_blkaddr(dn, fio->new_blkaddr); } int rewrite_data_page(struct f2fs_io_info *fio) { fio->new_blkaddr = fio->old_blkaddr; stat_inc_inplace_blocks(fio->sbi); return f2fs_submit_page_bio(fio); } void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, block_t old_blkaddr, block_t new_blkaddr, bool recover_curseg, bool recover_newaddr) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg; unsigned int segno, old_cursegno; struct seg_entry *se; int type; unsigned short old_blkoff; segno = GET_SEGNO(sbi, new_blkaddr); se = get_seg_entry(sbi, segno); type = se->type; if (!recover_curseg) { /* for recovery flow */ if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) { if (old_blkaddr == NULL_ADDR) type = CURSEG_COLD_DATA; else type = CURSEG_WARM_DATA; } } else { if (!IS_CURSEG(sbi, segno)) type = CURSEG_WARM_DATA; } curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); mutex_lock(&sit_i->sentry_lock); old_cursegno = curseg->segno; old_blkoff = curseg->next_blkoff; /* change the current segment */ if (segno != curseg->segno) { curseg->next_segno = segno; change_curseg(sbi, type, true); } curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); __add_sum_entry(sbi, type, sum); if (!recover_curseg || recover_newaddr) update_sit_entry(sbi, new_blkaddr, 1); if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) update_sit_entry(sbi, old_blkaddr, -1); locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr)); locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr)); locate_dirty_segment(sbi, old_cursegno); if (recover_curseg) { if (old_cursegno != curseg->segno) { curseg->next_segno = old_cursegno; change_curseg(sbi, type, true); } curseg->next_blkoff = old_blkoff; } mutex_unlock(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); } void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, block_t old_addr, block_t new_addr, unsigned char version, bool recover_curseg, bool recover_newaddr) { struct f2fs_summary sum; set_summary(&sum, dn->nid, dn->ofs_in_node, version); __f2fs_replace_block(sbi, &sum, old_addr, new_addr, recover_curseg, recover_newaddr); f2fs_update_data_blkaddr(dn, new_addr); } void f2fs_wait_on_page_writeback(struct page *page, enum page_type type, bool ordered) { if (PageWriteback(page)) { struct f2fs_sb_info *sbi = F2FS_P_SB(page); f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0, page->index, type); if (ordered) wait_on_page_writeback(page); else wait_for_stable_page(page); } } void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi, block_t blkaddr) { struct page *cpage; if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) return; cpage = find_lock_page(META_MAPPING(sbi), blkaddr); if (cpage) { f2fs_wait_on_page_writeback(cpage, DATA, true); f2fs_put_page(cpage, 1); } } static int read_compacted_summaries(struct f2fs_sb_info *sbi) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct curseg_info *seg_i; unsigned char *kaddr; struct page *page; block_t start; int i, j, offset; start = start_sum_block(sbi); page = get_meta_page(sbi, start++); kaddr = (unsigned char *)page_address(page); /* Step 1: restore nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE); /* Step 2: restore sit cache */ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE); offset = 2 * SUM_JOURNAL_SIZE; /* Step 3: restore summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blk_off; unsigned int segno; seg_i = CURSEG_I(sbi, i); segno = le32_to_cpu(ckpt->cur_data_segno[i]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); seg_i->next_segno = segno; reset_curseg(sbi, i, 0); seg_i->alloc_type = ckpt->alloc_type[i]; seg_i->next_blkoff = blk_off; if (seg_i->alloc_type == SSR) blk_off = sbi->blocks_per_seg; for (j = 0; j < blk_off; j++) { struct f2fs_summary *s; s = (struct f2fs_summary *)(kaddr + offset); seg_i->sum_blk->entries[j] = *s; offset += SUMMARY_SIZE; if (offset + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; f2fs_put_page(page, 1); page = NULL; page = get_meta_page(sbi, start++); kaddr = (unsigned char *)page_address(page); offset = 0; } } f2fs_put_page(page, 1); return 0; } static int read_normal_summaries(struct f2fs_sb_info *sbi, int type) { struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_summary_block *sum; struct curseg_info *curseg; struct page *new; unsigned short blk_off; unsigned int segno = 0; block_t blk_addr = 0; /* get segment number and block addr */ if (IS_DATASEG(type)) { segno = le32_to_cpu(ckpt->cur_data_segno[type]); blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type - CURSEG_HOT_DATA]); if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type); else blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type); } else { segno = le32_to_cpu(ckpt->cur_node_segno[type - CURSEG_HOT_NODE]); blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type - CURSEG_HOT_NODE]); if (__exist_node_summaries(sbi)) blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE, type - CURSEG_HOT_NODE); else blk_addr = GET_SUM_BLOCK(sbi, segno); } new = get_meta_page(sbi, blk_addr); sum = (struct f2fs_summary_block *)page_address(new); if (IS_NODESEG(type)) { if (__exist_node_summaries(sbi)) { struct f2fs_summary *ns = &sum->entries[0]; int i; for (i = 0; i < sbi->blocks_per_seg; i++, ns++) { ns->version = 0; ns->ofs_in_node = 0; } } else { int err; err = restore_node_summary(sbi, segno, sum); if (err) { f2fs_put_page(new, 1); return err; } } } /* set uncompleted segment to curseg */ curseg = CURSEG_I(sbi, type); mutex_lock(&curseg->curseg_mutex); /* update journal info */ down_write(&curseg->journal_rwsem); memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE); up_write(&curseg->journal_rwsem); memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE); curseg->next_segno = segno; reset_curseg(sbi, type, 0); curseg->alloc_type = ckpt->alloc_type[type]; curseg->next_blkoff = blk_off; mutex_unlock(&curseg->curseg_mutex); f2fs_put_page(new, 1); return 0; } static int restore_curseg_summaries(struct f2fs_sb_info *sbi) { int type = CURSEG_HOT_DATA; int err; if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) { int npages = npages_for_summary_flush(sbi, true); if (npages >= 2) ra_meta_pages(sbi, start_sum_block(sbi), npages, META_CP, true); /* restore for compacted data summary */ if (read_compacted_summaries(sbi)) return -EINVAL; type = CURSEG_HOT_NODE; } if (__exist_node_summaries(sbi)) ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type), NR_CURSEG_TYPE - type, META_CP, true); for (; type <= CURSEG_COLD_NODE; type++) { err = read_normal_summaries(sbi, type); if (err) return err; } return 0; } static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) { struct page *page; unsigned char *kaddr; struct f2fs_summary *summary; struct curseg_info *seg_i; int written_size = 0; int i, j; page = grab_meta_page(sbi, blkaddr++); kaddr = (unsigned char *)page_address(page); /* Step 1: write nat cache */ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA); memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE); written_size += SUM_JOURNAL_SIZE; /* Step 2: write sit cache */ seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA); memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE); written_size += SUM_JOURNAL_SIZE; /* Step 3: write summary entries */ for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { unsigned short blkoff; seg_i = CURSEG_I(sbi, i); if (sbi->ckpt->alloc_type[i] == SSR) blkoff = sbi->blocks_per_seg; else blkoff = curseg_blkoff(sbi, i); for (j = 0; j < blkoff; j++) { if (!page) { page = grab_meta_page(sbi, blkaddr++); kaddr = (unsigned char *)page_address(page); written_size = 0; } summary = (struct f2fs_summary *)(kaddr + written_size); *summary = seg_i->sum_blk->entries[j]; written_size += SUMMARY_SIZE; if (written_size + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; set_page_dirty(page); f2fs_put_page(page, 1); page = NULL; } } if (page) { set_page_dirty(page); f2fs_put_page(page, 1); } } static void write_normal_summaries(struct f2fs_sb_info *sbi, block_t blkaddr, int type) { int i, end; if (IS_DATASEG(type)) end = type + NR_CURSEG_DATA_TYPE; else end = type + NR_CURSEG_NODE_TYPE; for (i = type; i < end; i++) write_current_sum_page(sbi, i, blkaddr + (i - type)); } void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) write_compacted_summaries(sbi, start_blk); else write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA); } void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk) { write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE); } int lookup_journal_in_cursum(struct f2fs_journal *journal, int type, unsigned int val, int alloc) { int i; if (type == NAT_JOURNAL) { for (i = 0; i < nats_in_cursum(journal); i++) { if (le32_to_cpu(nid_in_journal(journal, i)) == val) return i; } if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL)) return update_nats_in_cursum(journal, 1); } else if (type == SIT_JOURNAL) { for (i = 0; i < sits_in_cursum(journal); i++) if (le32_to_cpu(segno_in_journal(journal, i)) == val) return i; if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL)) return update_sits_in_cursum(journal, 1); } return -1; } static struct page *get_current_sit_page(struct f2fs_sb_info *sbi, unsigned int segno) { return get_meta_page(sbi, current_sit_addr(sbi, segno)); } static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, unsigned int start) { struct sit_info *sit_i = SIT_I(sbi); struct page *src_page, *dst_page; pgoff_t src_off, dst_off; void *src_addr, *dst_addr; src_off = current_sit_addr(sbi, start); dst_off = next_sit_addr(sbi, src_off); /* get current sit block page without lock */ src_page = get_meta_page(sbi, src_off); dst_page = grab_meta_page(sbi, dst_off); f2fs_bug_on(sbi, PageDirty(src_page)); src_addr = page_address(src_page); dst_addr = page_address(dst_page); memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); set_to_next_sit(sit_i, start); return dst_page; } static struct sit_entry_set *grab_sit_entry_set(void) { struct sit_entry_set *ses = f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS); ses->entry_cnt = 0; INIT_LIST_HEAD(&ses->set_list); return ses; } static void release_sit_entry_set(struct sit_entry_set *ses) { list_del(&ses->set_list); kmem_cache_free(sit_entry_set_slab, ses); } static void adjust_sit_entry_set(struct sit_entry_set *ses, struct list_head *head) { struct sit_entry_set *next = ses; if (list_is_last(&ses->set_list, head)) return; list_for_each_entry_continue(next, head, set_list) if (ses->entry_cnt <= next->entry_cnt) break; list_move_tail(&ses->set_list, &next->set_list); } static void add_sit_entry(unsigned int segno, struct list_head *head) { struct sit_entry_set *ses; unsigned int start_segno = START_SEGNO(segno); list_for_each_entry(ses, head, set_list) { if (ses->start_segno == start_segno) { ses->entry_cnt++; adjust_sit_entry_set(ses, head); return; } } ses = grab_sit_entry_set(); ses->start_segno = start_segno; ses->entry_cnt++; list_add(&ses->set_list, head); } static void add_sits_in_set(struct f2fs_sb_info *sbi) { struct f2fs_sm_info *sm_info = SM_I(sbi); struct list_head *set_list = &sm_info->sit_entry_set; unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap; unsigned int segno; for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi)) add_sit_entry(segno, set_list); } static void remove_sits_in_journal(struct f2fs_sb_info *sbi) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_journal *journal = curseg->journal; int i; down_write(&curseg->journal_rwsem); for (i = 0; i < sits_in_cursum(journal); i++) { unsigned int segno; bool dirtied; segno = le32_to_cpu(segno_in_journal(journal, i)); dirtied = __mark_sit_entry_dirty(sbi, segno); if (!dirtied) add_sit_entry(segno, &SM_I(sbi)->sit_entry_set); } update_sits_in_cursum(journal, -i); up_write(&curseg->journal_rwsem); } /* * CP calls this function, which flushes SIT entries including sit_journal, * and moves prefree segs to free segs. */ void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) { struct sit_info *sit_i = SIT_I(sbi); unsigned long *bitmap = sit_i->dirty_sentries_bitmap; struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_journal *journal = curseg->journal; struct sit_entry_set *ses, *tmp; struct list_head *head = &SM_I(sbi)->sit_entry_set; bool to_journal = true; struct seg_entry *se; mutex_lock(&sit_i->sentry_lock); if (!sit_i->dirty_sentries) goto out; /* * add and account sit entries of dirty bitmap in sit entry * set temporarily */ add_sits_in_set(sbi); /* * if there are no enough space in journal to store dirty sit * entries, remove all entries from journal and add and account * them in sit entry set. */ if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL)) remove_sits_in_journal(sbi); /* * there are two steps to flush sit entries: * #1, flush sit entries to journal in current cold data summary block. * #2, flush sit entries to sit page. */ list_for_each_entry_safe(ses, tmp, head, set_list) { struct page *page = NULL; struct f2fs_sit_block *raw_sit = NULL; unsigned int start_segno = ses->start_segno; unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK, (unsigned long)MAIN_SEGS(sbi)); unsigned int segno = start_segno; if (to_journal && !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL)) to_journal = false; if (to_journal) { down_write(&curseg->journal_rwsem); } else { page = get_next_sit_page(sbi, start_segno); raw_sit = page_address(page); } /* flush dirty sit entries in region of current sit set */ for_each_set_bit_from(segno, bitmap, end) { int offset, sit_offset; se = get_seg_entry(sbi, segno); /* add discard candidates */ if (!(cpc->reason & CP_DISCARD)) { cpc->trim_start = segno; add_discard_addrs(sbi, cpc, false); } if (to_journal) { offset = lookup_journal_in_cursum(journal, SIT_JOURNAL, segno, 1); f2fs_bug_on(sbi, offset < 0); segno_in_journal(journal, offset) = cpu_to_le32(segno); seg_info_to_raw_sit(se, &sit_in_journal(journal, offset)); } else { sit_offset = SIT_ENTRY_OFFSET(sit_i, segno); seg_info_to_raw_sit(se, &raw_sit->entries[sit_offset]); } __clear_bit(segno, bitmap); sit_i->dirty_sentries--; ses->entry_cnt--; } if (to_journal) up_write(&curseg->journal_rwsem); else f2fs_put_page(page, 1); f2fs_bug_on(sbi, ses->entry_cnt); release_sit_entry_set(ses); } f2fs_bug_on(sbi, !list_empty(head)); f2fs_bug_on(sbi, sit_i->dirty_sentries); out: if (cpc->reason & CP_DISCARD) { __u64 trim_start = cpc->trim_start; for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) add_discard_addrs(sbi, cpc, false); cpc->trim_start = trim_start; } mutex_unlock(&sit_i->sentry_lock); set_prefree_as_free_segments(sbi); } static int build_sit_info(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct sit_info *sit_i; unsigned int sit_segs, start; char *src_bitmap; unsigned int bitmap_size; /* allocate memory for SIT information */ sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL); if (!sit_i) return -ENOMEM; SM_I(sbi)->sit_info = sit_i; sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry), GFP_KERNEL); if (!sit_i->sentries) return -ENOMEM; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); if (!sit_i->dirty_sentries_bitmap) return -ENOMEM; for (start = 0; start < MAIN_SEGS(sbi); start++) { sit_i->sentries[start].cur_valid_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); sit_i->sentries[start].ckpt_valid_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].cur_valid_map || !sit_i->sentries[start].ckpt_valid_map) return -ENOMEM; #ifdef CONFIG_F2FS_CHECK_FS sit_i->sentries[start].cur_valid_map_mir = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].cur_valid_map_mir) return -ENOMEM; #endif if (f2fs_discard_en(sbi)) { sit_i->sentries[start].discard_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->sentries[start].discard_map) return -ENOMEM; } } sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL); if (!sit_i->tmp_map) return -ENOMEM; if (sbi->segs_per_sec > 1) { sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) * sizeof(struct sec_entry), GFP_KERNEL); if (!sit_i->sec_entries) return -ENOMEM; } /* get information related with SIT */ sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1; /* setup SIT bitmap from ckeckpoint pack */ bitmap_size = __bitmap_size(sbi, SIT_BITMAP); src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP); sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); if (!sit_i->sit_bitmap) return -ENOMEM; #ifdef CONFIG_F2FS_CHECK_FS sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL); if (!sit_i->sit_bitmap_mir) return -ENOMEM; #endif /* init SIT information */ sit_i->s_ops = &default_salloc_ops; sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; sit_i->written_valid_blocks = 0; sit_i->bitmap_size = bitmap_size; sit_i->dirty_sentries = 0; sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK; sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time); sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec; mutex_init(&sit_i->sentry_lock); return 0; } static int build_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i; unsigned int bitmap_size, sec_bitmap_size; /* allocate memory for free segmap information */ free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL); if (!free_i) return -ENOMEM; SM_I(sbi)->free_info = free_i; bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL); if (!free_i->free_segmap) return -ENOMEM; sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL); if (!free_i->free_secmap) return -ENOMEM; /* set all segments as dirty temporarily */ memset(free_i->free_segmap, 0xff, bitmap_size); memset(free_i->free_secmap, 0xff, sec_bitmap_size); /* init free segmap information */ free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi)); free_i->free_segments = 0; free_i->free_sections = 0; spin_lock_init(&free_i->segmap_lock); return 0; } static int build_curseg(struct f2fs_sb_info *sbi) { struct curseg_info *array; int i; array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL); if (!array) return -ENOMEM; SM_I(sbi)->curseg_array = array; for (i = 0; i < NR_CURSEG_TYPE; i++) { mutex_init(&array[i].curseg_mutex); array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!array[i].sum_blk) return -ENOMEM; init_rwsem(&array[i].journal_rwsem); array[i].journal = kzalloc(sizeof(struct f2fs_journal), GFP_KERNEL); if (!array[i].journal) return -ENOMEM; array[i].segno = NULL_SEGNO; array[i].next_blkoff = 0; } return restore_curseg_summaries(sbi); } static void build_sit_entries(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA); struct f2fs_journal *journal = curseg->journal; struct seg_entry *se; struct f2fs_sit_entry sit; int sit_blk_cnt = SIT_BLK_CNT(sbi); unsigned int i, start, end; unsigned int readed, start_blk = 0; do { readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES, META_SIT, true); start = start_blk * sit_i->sents_per_block; end = (start_blk + readed) * sit_i->sents_per_block; for (; start < end && start < MAIN_SEGS(sbi); start++) { struct f2fs_sit_block *sit_blk; struct page *page; se = &sit_i->sentries[start]; page = get_current_sit_page(sbi, start); sit_blk = (struct f2fs_sit_block *)page_address(page); sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)]; f2fs_put_page(page, 1); check_block_count(sbi, start, &sit); seg_info_from_raw_sit(se, &sit); /* build discard map only one time */ if (f2fs_discard_en(sbi)) { if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); } else { memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks; } } if (sbi->segs_per_sec > 1) get_sec_entry(sbi, start)->valid_blocks += se->valid_blocks; } start_blk += readed; } while (start_blk < sit_blk_cnt); down_read(&curseg->journal_rwsem); for (i = 0; i < sits_in_cursum(journal); i++) { unsigned int old_valid_blocks; start = le32_to_cpu(segno_in_journal(journal, i)); se = &sit_i->sentries[start]; sit = sit_in_journal(journal, i); old_valid_blocks = se->valid_blocks; check_block_count(sbi, start, &sit); seg_info_from_raw_sit(se, &sit); if (f2fs_discard_en(sbi)) { if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) { memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE); } else { memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); sbi->discard_blks += old_valid_blocks - se->valid_blocks; } } if (sbi->segs_per_sec > 1) get_sec_entry(sbi, start)->valid_blocks += se->valid_blocks - old_valid_blocks; } up_read(&curseg->journal_rwsem); } static void init_free_segmap(struct f2fs_sb_info *sbi) { unsigned int start; int type; for (start = 0; start < MAIN_SEGS(sbi); start++) { struct seg_entry *sentry = get_seg_entry(sbi, start); if (!sentry->valid_blocks) __set_free(sbi, start); else SIT_I(sbi)->written_valid_blocks += sentry->valid_blocks; } /* set use the current segments */ for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) { struct curseg_info *curseg_t = CURSEG_I(sbi, type); __set_test_and_inuse(sbi, curseg_t->segno); } } static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); unsigned int segno = 0, offset = 0; unsigned short valid_blocks; while (1) { /* find dirty segment based on free segmap */ segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset); if (segno >= MAIN_SEGS(sbi)) break; offset = segno + 1; valid_blocks = get_valid_blocks(sbi, segno, false); if (valid_blocks == sbi->blocks_per_seg || !valid_blocks) continue; if (valid_blocks > sbi->blocks_per_seg) { f2fs_bug_on(sbi, 1); continue; } mutex_lock(&dirty_i->seglist_lock); __locate_dirty_segment(sbi, segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); } } static int init_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->victim_secmap) return -ENOMEM; return 0; } static int build_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i; unsigned int bitmap_size, i; /* allocate memory for dirty segments list information */ dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL); if (!dirty_i) return -ENOMEM; SM_I(sbi)->dirty_info = dirty_i; mutex_init(&dirty_i->seglist_lock); bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi)); for (i = 0; i < NR_DIRTY_TYPE; i++) { dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL); if (!dirty_i->dirty_segmap[i]) return -ENOMEM; } init_dirty_segmap(sbi); return init_victim_secmap(sbi); } /* * Update min, max modified time for cost-benefit GC algorithm */ static void init_min_max_mtime(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); unsigned int segno; mutex_lock(&sit_i->sentry_lock); sit_i->min_mtime = LLONG_MAX; for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) { unsigned int i; unsigned long long mtime = 0; for (i = 0; i < sbi->segs_per_sec; i++) mtime += get_seg_entry(sbi, segno + i)->mtime; mtime = div_u64(mtime, sbi->segs_per_sec); if (sit_i->min_mtime > mtime) sit_i->min_mtime = mtime; } sit_i->max_mtime = get_mtime(sbi); mutex_unlock(&sit_i->sentry_lock); } int build_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_sm_info *sm_info; int err; sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); if (!sm_info) return -ENOMEM; /* init sm info */ sbi->sm_info = sm_info; sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); sm_info->segment_count = le32_to_cpu(raw_super->segment_count); sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); sm_info->rec_prefree_segments = sm_info->main_segments * DEF_RECLAIM_PREFREE_SEGMENTS / 100; if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; if (!test_opt(sbi, LFS)) sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); if (!f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) return err; } err = create_discard_cmd_control(sbi); if (err) return err; err = build_sit_info(sbi); if (err) return err; err = build_free_segmap(sbi); if (err) return err; err = build_curseg(sbi); if (err) return err; /* reinit free segmap based on SIT */ build_sit_entries(sbi); init_free_segmap(sbi); err = build_dirty_segmap(sbi); if (err) return err; init_min_max_mtime(sbi); return 0; } static void discard_dirty_segmap(struct f2fs_sb_info *sbi, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); mutex_lock(&dirty_i->seglist_lock); kvfree(dirty_i->dirty_segmap[dirty_type]); dirty_i->nr_dirty[dirty_type] = 0; mutex_unlock(&dirty_i->seglist_lock); } static void destroy_victim_secmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); kvfree(dirty_i->victim_secmap); } static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); int i; if (!dirty_i) return; /* discard pre-free/dirty segments list */ for (i = 0; i < NR_DIRTY_TYPE; i++) discard_dirty_segmap(sbi, i); destroy_victim_secmap(sbi); SM_I(sbi)->dirty_info = NULL; kfree(dirty_i); } static void destroy_curseg(struct f2fs_sb_info *sbi) { struct curseg_info *array = SM_I(sbi)->curseg_array; int i; if (!array) return; SM_I(sbi)->curseg_array = NULL; for (i = 0; i < NR_CURSEG_TYPE; i++) { kfree(array[i].sum_blk); kfree(array[i].journal); } kfree(array); } static void destroy_free_segmap(struct f2fs_sb_info *sbi) { struct free_segmap_info *free_i = SM_I(sbi)->free_info; if (!free_i) return; SM_I(sbi)->free_info = NULL; kvfree(free_i->free_segmap); kvfree(free_i->free_secmap); kfree(free_i); } static void destroy_sit_info(struct f2fs_sb_info *sbi) { struct sit_info *sit_i = SIT_I(sbi); unsigned int start; if (!sit_i) return; if (sit_i->sentries) { for (start = 0; start < MAIN_SEGS(sbi); start++) { kfree(sit_i->sentries[start].cur_valid_map); #ifdef CONFIG_F2FS_CHECK_FS kfree(sit_i->sentries[start].cur_valid_map_mir); #endif kfree(sit_i->sentries[start].ckpt_valid_map); kfree(sit_i->sentries[start].discard_map); } } kfree(sit_i->tmp_map); kvfree(sit_i->sentries); kvfree(sit_i->sec_entries); kvfree(sit_i->dirty_sentries_bitmap); SM_I(sbi)->sit_info = NULL; kfree(sit_i->sit_bitmap); #ifdef CONFIG_F2FS_CHECK_FS kfree(sit_i->sit_bitmap_mir); #endif kfree(sit_i); } void destroy_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_sm_info *sm_info = SM_I(sbi); if (!sm_info) return; destroy_flush_cmd_control(sbi, true); destroy_discard_cmd_control(sbi); destroy_dirty_segmap(sbi); destroy_curseg(sbi); destroy_free_segmap(sbi); destroy_sit_info(sbi); sbi->sm_info = NULL; kfree(sm_info); } int __init create_segment_manager_caches(void) { discard_entry_slab = f2fs_kmem_cache_create("discard_entry", sizeof(struct discard_entry)); if (!discard_entry_slab) goto fail; discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd", sizeof(struct discard_cmd)); if (!discard_cmd_slab) goto destroy_discard_entry; sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set", sizeof(struct sit_entry_set)); if (!sit_entry_set_slab) goto destroy_discard_cmd; inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry", sizeof(struct inmem_pages)); if (!inmem_entry_slab) goto destroy_sit_entry_set; return 0; destroy_sit_entry_set: kmem_cache_destroy(sit_entry_set_slab); destroy_discard_cmd: kmem_cache_destroy(discard_cmd_slab); destroy_discard_entry: kmem_cache_destroy(discard_entry_slab); fail: return -ENOMEM; } void destroy_segment_manager_caches(void) { kmem_cache_destroy(sit_entry_set_slab); kmem_cache_destroy(discard_cmd_slab); kmem_cache_destroy(discard_entry_slab); kmem_cache_destroy(inmem_entry_slab); }
int build_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_sm_info *sm_info; int err; sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); if (!sm_info) return -ENOMEM; /* init sm info */ sbi->sm_info = sm_info; sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); sm_info->segment_count = le32_to_cpu(raw_super->segment_count); sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); sm_info->rec_prefree_segments = sm_info->main_segments * DEF_RECLAIM_PREFREE_SEGMENTS / 100; if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; if (!test_opt(sbi, LFS)) sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) return err; } err = create_discard_cmd_control(sbi); if (err) return err; err = build_sit_info(sbi); if (err) return err; err = build_free_segmap(sbi); if (err) return err; err = build_curseg(sbi); if (err) return err; /* reinit free segmap based on SIT */ build_sit_entries(sbi); init_free_segmap(sbi); err = build_dirty_segmap(sbi); if (err) return err; init_min_max_mtime(sbi); return 0; }
int build_segment_manager(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); struct f2fs_sm_info *sm_info; int err; sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL); if (!sm_info) return -ENOMEM; /* init sm info */ sbi->sm_info = sm_info; sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); sm_info->segment_count = le32_to_cpu(raw_super->segment_count); sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count); sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count); sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main); sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr); sm_info->rec_prefree_segments = sm_info->main_segments * DEF_RECLAIM_PREFREE_SEGMENTS / 100; if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS) sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS; if (!test_opt(sbi, LFS)) sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC; sm_info->min_ipu_util = DEF_MIN_IPU_UTIL; sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS; sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS; sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS; INIT_LIST_HEAD(&sm_info->sit_entry_set); if (!f2fs_readonly(sbi->sb)) { err = create_flush_cmd_control(sbi); if (err) return err; } err = create_discard_cmd_control(sbi); if (err) return err; err = build_sit_info(sbi); if (err) return err; err = build_free_segmap(sbi); if (err) return err; err = build_curseg(sbi); if (err) return err; /* reinit free segmap based on SIT */ build_sit_entries(sbi); init_free_segmap(sbi); err = build_dirty_segmap(sbi); if (err) return err; init_min_max_mtime(sbi); return 0; }
{'added': [(569, '\tif (!test_opt(sbi, FLUSH_MERGE))'), (570, '\t\treturn err;'), (571, ''), (3246, '\tif (!f2fs_readonly(sbi->sb)) {')], 'deleted': [(3243, '\tif (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {')]}
4
1
2,659
17,637
53
340
12
https://github.com/torvalds/linux
CVE-2017-18241
CWE-476
1,436
i2c.c
C
get_alen
// SPDX-License-Identifier: GPL-2.0+ /* * (C) Copyright 2009 * Sergey Kubushyn, himself, ksi@koi8.net * * Changes for unified multibus/multiadapter I2C support. * * (C) Copyright 2001 * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com. */ /* * I2C Functions similar to the standard memory functions. * * There are several parameters in many of the commands that bear further * explanations: * * {i2c_chip} is the I2C chip address (the first byte sent on the bus). * Each I2C chip on the bus has a unique address. On the I2C data bus, * the address is the upper seven bits and the LSB is the "read/write" * bit. Note that the {i2c_chip} address specified on the command * line is not shifted up: e.g. a typical EEPROM memory chip may have * an I2C address of 0x50, but the data put on the bus will be 0xA0 * for write and 0xA1 for read. This "non shifted" address notation * matches at least half of the data sheets :-/. * * {addr} is the address (or offset) within the chip. Small memory * chips have 8 bit addresses. Large memory chips have 16 bit * addresses. Other memory chips have 9, 10, or 11 bit addresses. * Many non-memory chips have multiple registers and {addr} is used * as the register index. Some non-memory chips have only one register * and therefore don't need any {addr} parameter. * * The default {addr} parameter is one byte (.1) which works well for * memories and registers with 8 bits of address space. * * You can specify the length of the {addr} field with the optional .0, * .1, or .2 modifier (similar to the .b, .w, .l modifier). If you are * manipulating a single register device which doesn't use an address * field, use "0.0" for the address and the ".0" length field will * suppress the address in the I2C data stream. This also works for * successive reads using the I2C auto-incrementing memory pointer. * * If you are manipulating a large memory with 2-byte addresses, use * the .2 address modifier, e.g. 210.2 addresses location 528 (decimal). * * Then there are the unfortunate memory chips that spill the most * significant 1, 2, or 3 bits of address into the chip address byte. * This effectively makes one chip (logically) look like 2, 4, or * 8 chips. This is handled (awkwardly) by #defining * CONFIG_SYS_I2C_EEPROM_ADDR_OVERFLOW and using the .1 modifier on the * {addr} field (since .1 is the default, it doesn't actually have to * be specified). Examples: given a memory chip at I2C chip address * 0x50, the following would happen... * i2c md 50 0 10 display 16 bytes starting at 0x000 * On the bus: <S> A0 00 <E> <S> A1 <rd> ... <rd> * i2c md 50 100 10 display 16 bytes starting at 0x100 * On the bus: <S> A2 00 <E> <S> A3 <rd> ... <rd> * i2c md 50 210 10 display 16 bytes starting at 0x210 * On the bus: <S> A4 10 <E> <S> A5 <rd> ... <rd> * This is awfully ugly. It would be nice if someone would think up * a better way of handling this. * * Adapted from cmd_mem.c which is copyright Wolfgang Denk (wd@denx.de). */ #include <common.h> #include <bootretry.h> #include <cli.h> #include <command.h> #include <console.h> #include <dm.h> #include <edid.h> #include <errno.h> #include <i2c.h> #include <log.h> #include <malloc.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/delay.h> #include <u-boot/crc.h> /* Display values from last command. * Memory modify remembered values are different from display memory. */ static uint i2c_dp_last_chip; static uint i2c_dp_last_addr; static uint i2c_dp_last_alen; static uint i2c_dp_last_length = 0x10; static uint i2c_mm_last_chip; static uint i2c_mm_last_addr; static uint i2c_mm_last_alen; /* If only one I2C bus is present, the list of devices to ignore when * the probe command is issued is represented by a 1D array of addresses. * When multiple buses are present, the list is an array of bus-address * pairs. The following macros take care of this */ #if defined(CONFIG_SYS_I2C_NOPROBES) #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) static struct { uchar bus; uchar addr; } i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM i2c_get_bus_num() #define COMPARE_BUS(b,i) (i2c_no_probes[(i)].bus == (b)) #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)].addr == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)].addr #else /* single bus */ static uchar i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM 0 #define COMPARE_BUS(b,i) ((b) == 0) /* Make compiler happy */ #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)] == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)] #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ #endif #define DISP_LINE_LEN 16 /* * Default for driver model is to use the chip's existing address length. * For legacy code, this is not stored, so we need to use a suitable * default. */ #if CONFIG_IS_ENABLED(DM_I2C) #define DEFAULT_ADDR_LEN (-1) #else #define DEFAULT_ADDR_LEN 1 #endif #if CONFIG_IS_ENABLED(DM_I2C) static struct udevice *i2c_cur_bus; static int cmd_i2c_set_bus_num(unsigned int busnum) { struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, busnum, &bus); if (ret) { debug("%s: No bus %d\n", __func__, busnum); return ret; } i2c_cur_bus = bus; return 0; } static int i2c_get_cur_bus(struct udevice **busp) { #ifdef CONFIG_I2C_SET_DEFAULT_BUS_NUM if (!i2c_cur_bus) { if (cmd_i2c_set_bus_num(CONFIG_I2C_DEFAULT_BUS_NUMBER)) { printf("Default I2C bus %d not found\n", CONFIG_I2C_DEFAULT_BUS_NUMBER); return -ENODEV; } } #endif if (!i2c_cur_bus) { puts("No I2C bus selected\n"); return -ENODEV; } *busp = i2c_cur_bus; return 0; } static int i2c_get_cur_bus_chip(uint chip_addr, struct udevice **devp) { struct udevice *bus; int ret; ret = i2c_get_cur_bus(&bus); if (ret) return ret; return i2c_get_chip(bus, chip_addr, 1, devp); } #endif /** * i2c_init_board() - Board-specific I2C bus init * * This function is the default no-op implementation of I2C bus * initialization. This function can be overridden by board-specific * implementation if needed. */ __weak void i2c_init_board(void) { } /** * get_alen() - Small parser helper function to get address length * * Returns the address length. */ static uint get_alen(char *arg, int default_len) { int j; int alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; } enum i2c_err_op { I2C_ERR_READ, I2C_ERR_WRITE, }; static int i2c_report_err(int ret, enum i2c_err_op op) { printf("Error %s the chip: %d\n", op == I2C_ERR_READ ? "reading" : "writing", ret); return CMD_RET_FAILURE; } /** * do_i2c_read() - Handle the "i2c read" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c read {i2c_chip} {devaddr}{.0, .1, .2} {len} {memaddr} */ static int do_i2c_read(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; int alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 5) return CMD_RET_USAGE; /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * Length is the number of objects, not number of bytes. */ length = hextoul(argv[3], NULL); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (!ret) ret = dm_i2c_read(dev, devaddr, memaddr, length); #else ret = i2c_read(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_write(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; int alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; struct dm_i2c_chip *i2c_chip; #endif if ((argc < 5) || (argc > 6)) return cmd_usage(cmdtp); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[1], NULL); /* * I2C chip address */ chip = hextoul(argv[2], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[3], NULL); alen = get_alen(argv[3], DEFAULT_ADDR_LEN); if (alen > 3) return cmd_usage(cmdtp); /* * Length is the number of bytes. */ length = hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); i2c_chip = dev_get_parent_plat(dev); if (!i2c_chip) return i2c_report_err(ret, I2C_ERR_WRITE); #endif if (argc == 6 && !strcmp(argv[5], "-s")) { /* * Write all bytes in a single I2C transaction. If the target * device is an EEPROM, it is your responsibility to not cross * a page boundary. No write delay upon completion, take this * into account if linking commands. */ #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags &= ~DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr, memaddr, length); #else ret = i2c_write(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); } else { /* * Repeated addressing - perform <length> separate * write transactions of one byte each */ while (length-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags |= DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr++, memaddr++, 1); #else ret = i2c_write(chip, devaddr++, alen, memaddr++, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } } return 0; } #if CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_flags(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint flags; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { flags = hextoul(argv[2], NULL); ret = i2c_set_chip_flags(dev, flags); } else { ret = i2c_get_chip_flags(dev, &flags); if (!ret) printf("%x\n", flags); } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_olen(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint olen; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { olen = hextoul(argv[2], NULL); ret = i2c_set_chip_offset_len(dev, olen); } else { ret = i2c_get_chip_offset_len(dev); if (ret >= 0) { printf("%x\n", ret); ret = 0; } } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } #endif /** * do_i2c_md() - Handle the "i2c md" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c md {i2c_chip} {addr}{.0, .1, .2} {len} */ static int do_i2c_md(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint addr, length; int alen; int j, nbytes, linebytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif /* We use the last specified parameters, unless new ones are * entered. */ chip = i2c_dp_last_chip; addr = i2c_dp_last_addr; alen = i2c_dp_last_alen; length = i2c_dp_last_length; if (argc < 3) return CMD_RET_USAGE; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. */ /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * If another parameter, it is the length to display. * Length is the number of objects, not number of bytes. */ if (argc > 3) length = hextoul(argv[3], NULL); } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Print the lines. * * We buffer all read data, so we can make sure data is read only * once. */ nbytes = length; do { unsigned char linebuf[DISP_LINE_LEN]; unsigned char *cp; linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes; #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, linebuf, linebytes); #else ret = i2c_read(chip, addr, alen, linebuf, linebytes); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); else { printf("%04x:", addr); cp = linebuf; for (j=0; j<linebytes; j++) { printf(" %02x", *cp++); addr++; } puts (" "); cp = linebuf; for (j=0; j<linebytes; j++) { if ((*cp < 0x20) || (*cp > 0x7e)) puts ("."); else printf("%c", *cp); cp++; } putc ('\n'); } nbytes -= linebytes; } while (nbytes > 0); i2c_dp_last_chip = chip; i2c_dp_last_addr = addr; i2c_dp_last_alen = alen; i2c_dp_last_length = length; return 0; } /** * do_i2c_mw() - Handle the "i2c mw" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mw {i2c_chip} {addr}{.0, .1, .2} {data} [{count}] */ static int do_i2c_mw(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; uchar byte; int count; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if ((argc < 4) || (argc > 5)) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Value to write is always specified. */ byte = hextoul(argv[3], NULL); /* * Optional count */ if (argc == 5) count = hextoul(argv[4], NULL); else count = 1; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr++, &byte, 1); #else ret = i2c_write(chip, addr++, alen, &byte, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * Wait for the write to complete. The write can take * up to 10mSec (we allow a little more time). */ /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } return 0; } /** * do_i2c_crc() - Handle the "i2c crc32" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Calculate a CRC on memory * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c crc32 {i2c_chip} {addr}{.0, .1, .2} {count} */ static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; int count; uchar byte; ulong crc; ulong err; int ret = 0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 4) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Count is always specified */ count = hextoul(argv[3], NULL); printf ("CRC32 for %08lx ... %08lx ==> ", addr, addr + count - 1); /* * CRC a byte at a time. This is going to be slooow, but hey, the * memories are small and slow too so hopefully nobody notices. */ crc = 0; err = 0; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, &byte, 1); #else ret = i2c_read(chip, addr, alen, &byte, 1); #endif if (ret) err++; crc = crc32(crc, &byte, 1); addr++; } if (err > 0) i2c_report_err(ret, I2C_ERR_READ); else printf ("%08lx\n", crc); return 0; } /** * mod_i2c_mem() - Handle the "i2c mm" and "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Modify memory. * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} * i2c nm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} */ static int mod_i2c_mem(struct cmd_tbl *cmdtp, int incrflag, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; ulong data; int size = 1; int nbytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 3) return CMD_RET_USAGE; bootretry_reset_cmd_timeout(); /* got a good command to get here */ /* * We use the last specified parameters, unless new ones are * entered. */ chip = i2c_mm_last_chip; addr = i2c_mm_last_addr; alen = i2c_mm_last_alen; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. Check for a size specification. * Defaults to byte if no or incorrect specification. */ size = cmd_get_data_size(argv[0], 1); /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Print the address, followed by value. Then accept input for * the next value. A non-converted value exits. */ do { printf("%08lx:", addr); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, (uchar *)&data, size); #else ret = i2c_read(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); data = cpu_to_be32(data); if (size == 1) printf(" %02lx", (data >> 24) & 0x000000FF); else if (size == 2) printf(" %04lx", (data >> 16) & 0x0000FFFF); else printf(" %08lx", data); nbytes = cli_readline(" ? "); if (nbytes == 0) { /* * <CR> pressed as only input, don't modify current * location and move to next. */ if (incrflag) addr += size; nbytes = size; /* good enough to not time out */ bootretry_reset_cmd_timeout(); } #ifdef CONFIG_BOOT_RETRY_TIME else if (nbytes == -2) break; /* timed out, exit the command */ #endif else { char *endp; data = hextoul(console_buffer, &endp); if (size == 1) data = data << 24; else if (size == 2) data = data << 16; data = be32_to_cpu(data); nbytes = endp - console_buffer; if (nbytes) { /* * good enough to not time out */ bootretry_reset_cmd_timeout(); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr, (uchar *)&data, size); #else ret = i2c_write(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #if CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS > 0 udelay(CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS * 1000); #endif if (incrflag) addr += size; } } } while (nbytes); i2c_mm_last_chip = chip; i2c_mm_last_addr = addr; i2c_mm_last_alen = alen; return 0; } /** * do_i2c_probe() - Handle the "i2c probe" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c probe {addr} * * Returns zero (success) if one or more I2C devices was found */ static int do_i2c_probe(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int j; int addr = -1; int found = 0; #if defined(CONFIG_SYS_I2C_NOPROBES) int k, skip; unsigned int bus = GET_BUS_NUM; #endif /* NOPROBES */ int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus, *dev; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; #endif if (argc == 2) addr = simple_strtol(argv[1], 0, 16); puts ("Valid chip addresses:"); for (j = 0; j < 128; j++) { if ((0 <= addr) && (j != addr)) continue; #if defined(CONFIG_SYS_I2C_NOPROBES) skip = 0; for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus, k) && COMPARE_ADDR(j, k)) { skip = 1; break; } } if (skip) continue; #endif #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_probe(bus, j, 0, &dev); #else ret = i2c_probe(j); #endif if (ret == 0) { printf(" %02X", j); found++; } } putc ('\n'); #if defined(CONFIG_SYS_I2C_NOPROBES) puts ("Excluded chip addresses:"); for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus,k)) printf(" %02X", NO_PROBE_ADDR(k)); } putc ('\n'); #endif return (0 == found); } /** * do_i2c_loop() - Handle the "i2c loop" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c loop {i2c_chip} {addr}{.0, .1, .2} [{length}] [{delay}] * {length} - Number of bytes to read * {delay} - A DECIMAL number and defaults to 1000 uSec */ static int do_i2c_loop(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; int alen; uint addr; uint length; u_char bytes[16]; int delay; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 3) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Length is the number of objects, not number of bytes. */ length = 1; length = hextoul(argv[3], NULL); if (length > sizeof(bytes)) length = sizeof(bytes); /* * The delay time (uSec) is optional. */ delay = 1000; if (argc > 3) delay = dectoul(argv[4], NULL); /* * Run the loop... */ while (1) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, bytes, length); #else ret = i2c_read(chip, addr, alen, bytes, length); #endif if (ret) i2c_report_err(ret, I2C_ERR_READ); udelay(delay); } /* NOTREACHED */ return 0; } /* * The SDRAM command is separately configured because many * (most?) embedded boards don't use SDRAM DIMMs. * * FIXME: Document and probably move elsewhere! */ #if defined(CONFIG_CMD_SDRAM) static void print_ddr2_tcyc (u_char const b) { printf ("%d.", (b >> 4) & 0x0F); switch (b & 0x0F) { case 0x0: case 0x1: case 0x2: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: case 0x8: case 0x9: printf ("%d ns\n", b & 0x0F); break; case 0xA: puts ("25 ns\n"); break; case 0xB: puts ("33 ns\n"); break; case 0xC: puts ("66 ns\n"); break; case 0xD: puts ("75 ns\n"); break; default: puts ("?? ns\n"); break; } } static void decode_bits (u_char const b, char const *str[], int const do_once) { u_char mask; for (mask = 0x80; mask != 0x00; mask >>= 1, ++str) { if (b & mask) { puts (*str); if (do_once) return; } } } /* * Syntax: * i2c sdram {i2c_chip} */ static int do_sdram(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { enum { unknown, EDO, SDRAM, DDR, DDR2, DDR3, DDR4 } type; uint chip; u_char data[128]; u_char cksum; int j, ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif static const char *decode_CAS_DDR2[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " TBD", " TBD" }; static const char *decode_CAS_default[] = { " TBD", " 7", " 6", " 5", " 4", " 3", " 2", " 1" }; static const char *decode_CS_WE_default[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " 1", " 0" }; static const char *decode_byte21_default[] = { " TBD (bit 7)\n", " Redundant row address\n", " Differential clock input\n", " Registerd DQMB inputs\n", " Buffered DQMB inputs\n", " On-card PLL\n", " Registered address/control lines\n", " Buffered address/control lines\n" }; static const char *decode_byte22_DDR2[] = { " TBD (bit 7)\n", " TBD (bit 6)\n", " TBD (bit 5)\n", " TBD (bit 4)\n", " TBD (bit 3)\n", " Supports partial array self refresh\n", " Supports 50 ohm ODT\n", " Supports weak driver\n" }; static const char *decode_row_density_DDR2[] = { "512 MiB", "256 MiB", "128 MiB", "16 GiB", "8 GiB", "4 GiB", "2 GiB", "1 GiB" }; static const char *decode_row_density_default[] = { "512 MiB", "256 MiB", "128 MiB", "64 MiB", "32 MiB", "16 MiB", "8 MiB", "4 MiB" }; if (argc < 2) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, data, sizeof(data)); #else ret = i2c_read(chip, 0, 1, data, sizeof(data)); #endif if (ret) { puts ("No SDRAM Serial Presence Detect found.\n"); return 1; } cksum = 0; for (j = 0; j < 63; j++) { cksum += data[j]; } if (cksum != data[63]) { printf ("WARNING: Configuration data checksum failure:\n" " is 0x%02x, calculated 0x%02x\n", data[63], cksum); } printf ("SPD data revision %d.%d\n", (data[62] >> 4) & 0x0F, data[62] & 0x0F); printf ("Bytes used 0x%02X\n", data[0]); printf ("Serial memory size 0x%02X\n", 1 << data[1]); puts ("Memory type "); switch (data[2]) { case 2: type = EDO; puts ("EDO\n"); break; case 4: type = SDRAM; puts ("SDRAM\n"); break; case 7: type = DDR; puts("DDR\n"); break; case 8: type = DDR2; puts ("DDR2\n"); break; case 11: type = DDR3; puts("DDR3\n"); break; case 12: type = DDR4; puts("DDR4\n"); break; default: type = unknown; puts ("unknown\n"); break; } puts ("Row address bits "); if ((data[3] & 0x00F0) == 0) printf ("%d\n", data[3] & 0x0F); else printf ("%d/%d\n", data[3] & 0x0F, (data[3] >> 4) & 0x0F); puts ("Column address bits "); if ((data[4] & 0x00F0) == 0) printf ("%d\n", data[4] & 0x0F); else printf ("%d/%d\n", data[4] & 0x0F, (data[4] >> 4) & 0x0F); switch (type) { case DDR2: printf ("Number of ranks %d\n", (data[5] & 0x07) + 1); break; default: printf ("Module rows %d\n", data[5]); break; } switch (type) { case DDR2: printf ("Module data width %d bits\n", data[6]); break; default: printf ("Module data width %d bits\n", (data[7] << 8) | data[6]); break; } puts ("Interface signal levels "); switch(data[8]) { case 0: puts ("TTL 5.0 V\n"); break; case 1: puts ("LVTTL\n"); break; case 2: puts ("HSTL 1.5 V\n"); break; case 3: puts ("SSTL 3.3 V\n"); break; case 4: puts ("SSTL 2.5 V\n"); break; case 5: puts ("SSTL 1.8 V\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time "); print_ddr2_tcyc (data[9]); break; default: printf ("SDRAM cycle time %d.%d ns\n", (data[9] >> 4) & 0x0F, data[9] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access time 0.%d%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; default: printf ("SDRAM access time %d.%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; } puts ("EDC configuration "); switch (data[11]) { case 0: puts ("None\n"); break; case 1: puts ("Parity\n"); break; case 2: puts ("ECC\n"); break; default: puts ("unknown\n"); break; } if ((data[12] & 0x80) == 0) puts ("No self refresh, rate "); else puts ("Self refresh, rate "); switch(data[12] & 0x7F) { case 0: puts ("15.625 us\n"); break; case 1: puts ("3.9 us\n"); break; case 2: puts ("7.8 us\n"); break; case 3: puts ("31.3 us\n"); break; case 4: puts ("62.5 us\n"); break; case 5: puts ("125 us\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM width (primary) %d\n", data[13]); break; default: printf ("SDRAM width (primary) %d\n", data[13] & 0x7F); if ((data[13] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[13] & 0x7F)); } break; } switch (type) { case DDR2: if (data[14] != 0) printf ("EDC width %d\n", data[14]); break; default: if (data[14] != 0) { printf ("EDC width %d\n", data[14] & 0x7F); if ((data[14] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[14] & 0x7F)); } } break; } if (DDR2 != type) { printf ("Min clock delay, back-to-back random column addresses " "%d\n", data[15]); } puts ("Burst length(s) "); if (data[16] & 0x80) puts (" Page"); if (data[16] & 0x08) puts (" 8"); if (data[16] & 0x04) puts (" 4"); if (data[16] & 0x02) puts (" 2"); if (data[16] & 0x01) puts (" 1"); putc ('\n'); printf ("Number of banks %d\n", data[17]); switch (type) { case DDR2: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_DDR2, 0); putc ('\n'); break; default: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_default, 0); putc ('\n'); break; } if (DDR2 != type) { puts ("CS latency(s) "); decode_bits (data[19], decode_CS_WE_default, 0); putc ('\n'); } if (DDR2 != type) { puts ("WE latency(s) "); decode_bits (data[20], decode_CS_WE_default, 0); putc ('\n'); } switch (type) { case DDR2: puts ("Module attributes:\n"); if (data[21] & 0x80) puts (" TBD (bit 7)\n"); if (data[21] & 0x40) puts (" Analysis probe installed\n"); if (data[21] & 0x20) puts (" TBD (bit 5)\n"); if (data[21] & 0x10) puts (" FET switch external enable\n"); printf (" %d PLLs on DIMM\n", (data[21] >> 2) & 0x03); if (data[20] & 0x11) { printf (" %d active registers on DIMM\n", (data[21] & 0x03) + 1); } break; default: puts ("Module attributes:\n"); if (!data[21]) puts (" (none)\n"); else decode_bits (data[21], decode_byte21_default, 0); break; } switch (type) { case DDR2: decode_bits (data[22], decode_byte22_DDR2, 0); break; default: puts ("Device attributes:\n"); if (data[22] & 0x80) puts (" TBD (bit 7)\n"); if (data[22] & 0x40) puts (" TBD (bit 6)\n"); if (data[22] & 0x20) puts (" Upper Vcc tolerance 5%\n"); else puts (" Upper Vcc tolerance 10%\n"); if (data[22] & 0x10) puts (" Lower Vcc tolerance 5%\n"); else puts (" Lower Vcc tolerance 10%\n"); if (data[22] & 0x08) puts (" Supports write1/read burst\n"); if (data[22] & 0x04) puts (" Supports precharge all\n"); if (data[22] & 0x02) puts (" Supports auto precharge\n"); if (data[22] & 0x01) puts (" Supports early RAS# precharge\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (2nd highest CAS latency) "); print_ddr2_tcyc (data[23]); break; default: printf ("SDRAM cycle time (2nd highest CAS latency) %d." "%d ns\n", (data[23] >> 4) & 0x0F, data[23] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (2nd highest CAS latency) 0." "%d%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; default: printf ("SDRAM access from clock (2nd highest CAS latency) %d." "%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (3rd highest CAS latency) "); print_ddr2_tcyc (data[25]); break; default: printf ("SDRAM cycle time (3rd highest CAS latency) %d." "%d ns\n", (data[25] >> 4) & 0x0F, data[25] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (3rd highest CAS latency) 0." "%d%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; default: printf ("SDRAM access from clock (3rd highest CAS latency) %d." "%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; } switch (type) { case DDR2: printf ("Minimum row precharge %d.%02d ns\n", (data[27] >> 2) & 0x3F, 25 * (data[27] & 0x03)); break; default: printf ("Minimum row precharge %d ns\n", data[27]); break; } switch (type) { case DDR2: printf ("Row active to row active min %d.%02d ns\n", (data[28] >> 2) & 0x3F, 25 * (data[28] & 0x03)); break; default: printf ("Row active to row active min %d ns\n", data[28]); break; } switch (type) { case DDR2: printf ("RAS to CAS delay min %d.%02d ns\n", (data[29] >> 2) & 0x3F, 25 * (data[29] & 0x03)); break; default: printf ("RAS to CAS delay min %d ns\n", data[29]); break; } printf ("Minimum RAS pulse width %d ns\n", data[30]); switch (type) { case DDR2: puts ("Density of each row "); decode_bits (data[31], decode_row_density_DDR2, 1); putc ('\n'); break; default: puts ("Density of each row "); decode_bits (data[31], decode_row_density_default, 1); putc ('\n'); break; } switch (type) { case DDR2: puts ("Command and Address setup "); if (data[32] >= 0xA0) { printf ("1.%d%d ns\n", ((data[32] >> 4) & 0x0F) - 10, data[32] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[32] >> 4) & 0x0F), data[32] & 0x0F); } break; default: printf ("Command and Address setup %c%d.%d ns\n", (data[32] & 0x80) ? '-' : '+', (data[32] >> 4) & 0x07, data[32] & 0x0F); break; } switch (type) { case DDR2: puts ("Command and Address hold "); if (data[33] >= 0xA0) { printf ("1.%d%d ns\n", ((data[33] >> 4) & 0x0F) - 10, data[33] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[33] >> 4) & 0x0F), data[33] & 0x0F); } break; default: printf ("Command and Address hold %c%d.%d ns\n", (data[33] & 0x80) ? '-' : '+', (data[33] >> 4) & 0x07, data[33] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input setup 0.%d%d ns\n", (data[34] >> 4) & 0x0F, data[34] & 0x0F); break; default: printf ("Data signal input setup %c%d.%d ns\n", (data[34] & 0x80) ? '-' : '+', (data[34] >> 4) & 0x07, data[34] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input hold 0.%d%d ns\n", (data[35] >> 4) & 0x0F, data[35] & 0x0F); break; default: printf ("Data signal input hold %c%d.%d ns\n", (data[35] & 0x80) ? '-' : '+', (data[35] >> 4) & 0x07, data[35] & 0x0F); break; } puts ("Manufacturer's JEDEC ID "); for (j = 64; j <= 71; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Manufacturing Location %02X\n", data[72]); puts ("Manufacturer's Part Number "); for (j = 73; j <= 90; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Revision Code %02X %02X\n", data[91], data[92]); printf ("Manufacturing Date %02X %02X\n", data[93], data[94]); puts ("Assembly Serial Number "); for (j = 95; j <= 98; j++) printf ("%02X ", data[j]); putc ('\n'); if (DDR2 != type) { printf ("Speed rating PC%d\n", data[126] == 0x66 ? 66 : data[126]); } return 0; } #endif /* * Syntax: * i2c edid {i2c_chip} */ #if defined(CONFIG_I2C_EDID) int do_edid(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; struct edid1_info edid; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 2) { cmd_usage(cmdtp); return 1; } chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, (uchar *)&edid, sizeof(edid)); #else ret = i2c_read(chip, 0, 1, (uchar *)&edid, sizeof(edid)); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (edid_check_info(&edid)) { puts("Content isn't valid EDID.\n"); return 1; } edid_print_info(&edid); return 0; } #endif /* CONFIG_I2C_EDID */ #if CONFIG_IS_ENABLED(DM_I2C) static void show_bus(struct udevice *bus) { struct udevice *dev; printf("Bus %d:\t%s", dev_seq(bus), bus->name); if (device_active(bus)) printf(" (active %d)", dev_seq(bus)); printf("\n"); for (device_find_first_child(bus, &dev); dev; device_find_next_child(&dev)) { struct dm_i2c_chip *chip = dev_get_parent_plat(dev); printf(" %02x: %s, offset len %x, flags %x\n", chip->chip_addr, dev->name, chip->offset_len, chip->flags); } } #endif /** * do_i2c_show_bus() - Handle the "i2c bus" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_show_bus(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { if (argc == 1) { /* show all busses */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; struct uclass *uc; int ret; ret = uclass_get(UCLASS_I2C, &uc); if (ret) return CMD_RET_FAILURE; uclass_foreach_dev(bus, uc) show_bus(bus); #else int i; for (i = 0; i < CONFIG_SYS_NUM_I2C_BUSES; i++) { printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); } #endif } else { int i; /* show specific bus */ i = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, i, &bus); if (ret) { printf("Invalid bus %d: err=%d\n", i, ret); return CMD_RET_FAILURE; } show_bus(bus); #else if (i >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", i); return -1; } printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); #endif } return 0; } #endif /** * do_i2c_bus_num() - Handle the "i2c dev" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) || \ CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_bus_num(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int ret = 0; int bus_no; if (argc == 1) { /* querying current setting */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (!i2c_get_cur_bus(&bus)) bus_no = dev_seq(bus); else bus_no = -1; #else bus_no = i2c_get_bus_num(); #endif printf("Current bus is %d\n", bus_no); } else { bus_no = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) if (bus_no >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", bus_no); return -1; } #endif printf("Setting bus to %d\n", bus_no); #if CONFIG_IS_ENABLED(DM_I2C) ret = cmd_i2c_set_bus_num(bus_no); #else ret = i2c_set_bus_num(bus_no); #endif if (ret) printf("Failure changing bus number (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ /** * do_i2c_bus_speed() - Handle the "i2c speed" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_bus_speed(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int speed, ret=0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return 1; #endif if (argc == 1) { #if CONFIG_IS_ENABLED(DM_I2C) speed = dm_i2c_get_bus_speed(bus); #else speed = i2c_get_bus_speed(); #endif /* querying current speed */ printf("Current bus speed=%d\n", speed); } else { speed = dectoul(argv[1], NULL); printf("Setting bus speed to %d Hz\n", speed); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_set_bus_speed(bus, speed); #else ret = i2c_set_bus_speed(speed); #endif if (ret) printf("Failure changing bus speed (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } /** * do_i2c_mm() - Handle the "i2c mm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_mm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 1, flag, argc, argv); } /** * do_i2c_nm() - Handle the "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_nm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 0, flag, argc, argv); } /** * do_i2c_reset() - Handle the "i2c reset" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ static int do_i2c_reset(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; if (i2c_deblock(bus)) { printf("Error: Not supported by the driver\n"); return CMD_RET_FAILURE; } #elif CONFIG_IS_ENABLED(SYS_I2C_LEGACY) i2c_init(I2C_ADAP->speed, I2C_ADAP->slaveaddr); #endif return 0; } static struct cmd_tbl cmd_i2c_sub[] = { #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(bus, 1, 1, do_i2c_show_bus, "", ""), #endif U_BOOT_CMD_MKENT(crc32, 3, 1, do_i2c_crc, "", ""), #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(dev, 1, 1, do_i2c_bus_num, "", ""), #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) U_BOOT_CMD_MKENT(edid, 1, 1, do_edid, "", ""), #endif /* CONFIG_I2C_EDID */ U_BOOT_CMD_MKENT(loop, 3, 1, do_i2c_loop, "", ""), U_BOOT_CMD_MKENT(md, 3, 1, do_i2c_md, "", ""), U_BOOT_CMD_MKENT(mm, 2, 1, do_i2c_mm, "", ""), U_BOOT_CMD_MKENT(mw, 3, 1, do_i2c_mw, "", ""), U_BOOT_CMD_MKENT(nm, 2, 1, do_i2c_nm, "", ""), U_BOOT_CMD_MKENT(probe, 0, 1, do_i2c_probe, "", ""), U_BOOT_CMD_MKENT(read, 5, 1, do_i2c_read, "", ""), U_BOOT_CMD_MKENT(write, 6, 0, do_i2c_write, "", ""), #if CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(flags, 2, 1, do_i2c_flags, "", ""), U_BOOT_CMD_MKENT(olen, 2, 1, do_i2c_olen, "", ""), #endif U_BOOT_CMD_MKENT(reset, 0, 1, do_i2c_reset, "", ""), #if defined(CONFIG_CMD_SDRAM) U_BOOT_CMD_MKENT(sdram, 1, 1, do_sdram, "", ""), #endif U_BOOT_CMD_MKENT(speed, 1, 1, do_i2c_bus_speed, "", ""), }; static __maybe_unused void i2c_reloc(void) { static int relocated; if (!relocated) { fixup_cmdtable(cmd_i2c_sub, ARRAY_SIZE(cmd_i2c_sub)); relocated = 1; }; } /** * do_i2c() - Handle the "i2c" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct cmd_tbl *c; #ifdef CONFIG_NEEDS_MANUAL_RELOC i2c_reloc(); #endif if (argc < 2) return CMD_RET_USAGE; /* Strip off leading 'i2c' command argument */ argc--; argv++; c = find_cmd_tbl(argv[0], &cmd_i2c_sub[0], ARRAY_SIZE(cmd_i2c_sub)); if (c) return c->cmd(cmdtp, flag, argc, argv); else return CMD_RET_USAGE; } /***************************************************/ #ifdef CONFIG_SYS_LONGHELP static char i2c_help_text[] = #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) "bus [muxtype:muxaddr:muxchannel] - show I2C bus info\n" "i2c " /* That's the prefix for the crc32 command below. */ #endif "crc32 chip address[.0, .1, .2] count - compute CRC32 checksum\n" #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) "i2c dev [dev] - show or set current I2C bus\n" #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) "i2c edid chip - print EDID configuration information\n" #endif /* CONFIG_I2C_EDID */ "i2c loop chip address[.0, .1, .2] [# of objects] - looping read of device\n" "i2c md chip address[.0, .1, .2] [# of objects] - read from I2C device\n" "i2c mm chip address[.0, .1, .2] - write to I2C device (auto-incrementing)\n" "i2c mw chip address[.0, .1, .2] value [count] - write to I2C device (fill)\n" "i2c nm chip address[.0, .1, .2] - write to I2C device (constant address)\n" "i2c probe [address] - test for and show device(s) on the I2C bus\n" "i2c read chip address[.0, .1, .2] length memaddress - read to memory\n" "i2c write memaddress chip address[.0, .1, .2] length [-s] - write memory\n" " to I2C; the -s option selects bulk write in a single transaction\n" #if CONFIG_IS_ENABLED(DM_I2C) "i2c flags chip [flags] - set or get chip flags\n" "i2c olen chip [offset_length] - set or get chip offset length\n" #endif "i2c reset - re-init the I2C Controller\n" #if defined(CONFIG_CMD_SDRAM) "i2c sdram chip - print SDRAM configuration information\n" #endif "i2c speed [speed] - show or set I2C bus speed"; #endif U_BOOT_CMD( i2c, 7, 1, do_i2c, "I2C sub-system", i2c_help_text );
// SPDX-License-Identifier: GPL-2.0+ /* * (C) Copyright 2009 * Sergey Kubushyn, himself, ksi@koi8.net * * Changes for unified multibus/multiadapter I2C support. * * (C) Copyright 2001 * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com. */ /* * I2C Functions similar to the standard memory functions. * * There are several parameters in many of the commands that bear further * explanations: * * {i2c_chip} is the I2C chip address (the first byte sent on the bus). * Each I2C chip on the bus has a unique address. On the I2C data bus, * the address is the upper seven bits and the LSB is the "read/write" * bit. Note that the {i2c_chip} address specified on the command * line is not shifted up: e.g. a typical EEPROM memory chip may have * an I2C address of 0x50, but the data put on the bus will be 0xA0 * for write and 0xA1 for read. This "non shifted" address notation * matches at least half of the data sheets :-/. * * {addr} is the address (or offset) within the chip. Small memory * chips have 8 bit addresses. Large memory chips have 16 bit * addresses. Other memory chips have 9, 10, or 11 bit addresses. * Many non-memory chips have multiple registers and {addr} is used * as the register index. Some non-memory chips have only one register * and therefore don't need any {addr} parameter. * * The default {addr} parameter is one byte (.1) which works well for * memories and registers with 8 bits of address space. * * You can specify the length of the {addr} field with the optional .0, * .1, or .2 modifier (similar to the .b, .w, .l modifier). If you are * manipulating a single register device which doesn't use an address * field, use "0.0" for the address and the ".0" length field will * suppress the address in the I2C data stream. This also works for * successive reads using the I2C auto-incrementing memory pointer. * * If you are manipulating a large memory with 2-byte addresses, use * the .2 address modifier, e.g. 210.2 addresses location 528 (decimal). * * Then there are the unfortunate memory chips that spill the most * significant 1, 2, or 3 bits of address into the chip address byte. * This effectively makes one chip (logically) look like 2, 4, or * 8 chips. This is handled (awkwardly) by #defining * CONFIG_SYS_I2C_EEPROM_ADDR_OVERFLOW and using the .1 modifier on the * {addr} field (since .1 is the default, it doesn't actually have to * be specified). Examples: given a memory chip at I2C chip address * 0x50, the following would happen... * i2c md 50 0 10 display 16 bytes starting at 0x000 * On the bus: <S> A0 00 <E> <S> A1 <rd> ... <rd> * i2c md 50 100 10 display 16 bytes starting at 0x100 * On the bus: <S> A2 00 <E> <S> A3 <rd> ... <rd> * i2c md 50 210 10 display 16 bytes starting at 0x210 * On the bus: <S> A4 10 <E> <S> A5 <rd> ... <rd> * This is awfully ugly. It would be nice if someone would think up * a better way of handling this. * * Adapted from cmd_mem.c which is copyright Wolfgang Denk (wd@denx.de). */ #include <common.h> #include <bootretry.h> #include <cli.h> #include <command.h> #include <console.h> #include <dm.h> #include <edid.h> #include <errno.h> #include <i2c.h> #include <log.h> #include <malloc.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/delay.h> #include <u-boot/crc.h> /* Display values from last command. * Memory modify remembered values are different from display memory. */ static uint i2c_dp_last_chip; static uint i2c_dp_last_addr; static uint i2c_dp_last_alen; static uint i2c_dp_last_length = 0x10; static uint i2c_mm_last_chip; static uint i2c_mm_last_addr; static uint i2c_mm_last_alen; /* If only one I2C bus is present, the list of devices to ignore when * the probe command is issued is represented by a 1D array of addresses. * When multiple buses are present, the list is an array of bus-address * pairs. The following macros take care of this */ #if defined(CONFIG_SYS_I2C_NOPROBES) #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) static struct { uchar bus; uchar addr; } i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM i2c_get_bus_num() #define COMPARE_BUS(b,i) (i2c_no_probes[(i)].bus == (b)) #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)].addr == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)].addr #else /* single bus */ static uchar i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM 0 #define COMPARE_BUS(b,i) ((b) == 0) /* Make compiler happy */ #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)] == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)] #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ #endif #define DISP_LINE_LEN 16 /* * Default for driver model is to use the chip's existing address length. * For legacy code, this is not stored, so we need to use a suitable * default. */ #if CONFIG_IS_ENABLED(DM_I2C) #define DEFAULT_ADDR_LEN (-1) #else #define DEFAULT_ADDR_LEN 1 #endif #if CONFIG_IS_ENABLED(DM_I2C) static struct udevice *i2c_cur_bus; static int cmd_i2c_set_bus_num(unsigned int busnum) { struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, busnum, &bus); if (ret) { debug("%s: No bus %d\n", __func__, busnum); return ret; } i2c_cur_bus = bus; return 0; } static int i2c_get_cur_bus(struct udevice **busp) { #ifdef CONFIG_I2C_SET_DEFAULT_BUS_NUM if (!i2c_cur_bus) { if (cmd_i2c_set_bus_num(CONFIG_I2C_DEFAULT_BUS_NUMBER)) { printf("Default I2C bus %d not found\n", CONFIG_I2C_DEFAULT_BUS_NUMBER); return -ENODEV; } } #endif if (!i2c_cur_bus) { puts("No I2C bus selected\n"); return -ENODEV; } *busp = i2c_cur_bus; return 0; } static int i2c_get_cur_bus_chip(uint chip_addr, struct udevice **devp) { struct udevice *bus; int ret; ret = i2c_get_cur_bus(&bus); if (ret) return ret; return i2c_get_chip(bus, chip_addr, 1, devp); } #endif /** * i2c_init_board() - Board-specific I2C bus init * * This function is the default no-op implementation of I2C bus * initialization. This function can be overridden by board-specific * implementation if needed. */ __weak void i2c_init_board(void) { } /** * get_alen() - Small parser helper function to get address length * * Returns the address length. */ static uint get_alen(char *arg, uint default_len) { uint j; uint alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; } enum i2c_err_op { I2C_ERR_READ, I2C_ERR_WRITE, }; static int i2c_report_err(int ret, enum i2c_err_op op) { printf("Error %s the chip: %d\n", op == I2C_ERR_READ ? "reading" : "writing", ret); return CMD_RET_FAILURE; } /** * do_i2c_read() - Handle the "i2c read" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c read {i2c_chip} {devaddr}{.0, .1, .2} {len} {memaddr} */ static int do_i2c_read(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; uint alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 5) return CMD_RET_USAGE; /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * Length is the number of objects, not number of bytes. */ length = hextoul(argv[3], NULL); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (!ret) ret = dm_i2c_read(dev, devaddr, memaddr, length); #else ret = i2c_read(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_write(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; uint alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; struct dm_i2c_chip *i2c_chip; #endif if ((argc < 5) || (argc > 6)) return cmd_usage(cmdtp); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[1], NULL); /* * I2C chip address */ chip = hextoul(argv[2], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[3], NULL); alen = get_alen(argv[3], DEFAULT_ADDR_LEN); if (alen > 3) return cmd_usage(cmdtp); /* * Length is the number of bytes. */ length = hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); i2c_chip = dev_get_parent_plat(dev); if (!i2c_chip) return i2c_report_err(ret, I2C_ERR_WRITE); #endif if (argc == 6 && !strcmp(argv[5], "-s")) { /* * Write all bytes in a single I2C transaction. If the target * device is an EEPROM, it is your responsibility to not cross * a page boundary. No write delay upon completion, take this * into account if linking commands. */ #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags &= ~DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr, memaddr, length); #else ret = i2c_write(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); } else { /* * Repeated addressing - perform <length> separate * write transactions of one byte each */ while (length-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags |= DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr++, memaddr++, 1); #else ret = i2c_write(chip, devaddr++, alen, memaddr++, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } } return 0; } #if CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_flags(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint flags; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { flags = hextoul(argv[2], NULL); ret = i2c_set_chip_flags(dev, flags); } else { ret = i2c_get_chip_flags(dev, &flags); if (!ret) printf("%x\n", flags); } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_olen(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint olen; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { olen = hextoul(argv[2], NULL); ret = i2c_set_chip_offset_len(dev, olen); } else { ret = i2c_get_chip_offset_len(dev); if (ret >= 0) { printf("%x\n", ret); ret = 0; } } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } #endif /** * do_i2c_md() - Handle the "i2c md" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c md {i2c_chip} {addr}{.0, .1, .2} {len} */ static int do_i2c_md(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint addr, length; uint alen; uint j, nbytes, linebytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif /* We use the last specified parameters, unless new ones are * entered. */ chip = i2c_dp_last_chip; addr = i2c_dp_last_addr; alen = i2c_dp_last_alen; length = i2c_dp_last_length; if (argc < 3) return CMD_RET_USAGE; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. */ /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * If another parameter, it is the length to display. * Length is the number of objects, not number of bytes. */ if (argc > 3) length = hextoul(argv[3], NULL); } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Print the lines. * * We buffer all read data, so we can make sure data is read only * once. */ nbytes = length; do { unsigned char linebuf[DISP_LINE_LEN]; unsigned char *cp; linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes; #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, linebuf, linebytes); #else ret = i2c_read(chip, addr, alen, linebuf, linebytes); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); else { printf("%04x:", addr); cp = linebuf; for (j=0; j<linebytes; j++) { printf(" %02x", *cp++); addr++; } puts (" "); cp = linebuf; for (j=0; j<linebytes; j++) { if ((*cp < 0x20) || (*cp > 0x7e)) puts ("."); else printf("%c", *cp); cp++; } putc ('\n'); } nbytes -= linebytes; } while (nbytes > 0); i2c_dp_last_chip = chip; i2c_dp_last_addr = addr; i2c_dp_last_alen = alen; i2c_dp_last_length = length; return 0; } /** * do_i2c_mw() - Handle the "i2c mw" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mw {i2c_chip} {addr}{.0, .1, .2} {data} [{count}] */ static int do_i2c_mw(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; uint alen; uchar byte; uint count; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if ((argc < 4) || (argc > 5)) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Value to write is always specified. */ byte = hextoul(argv[3], NULL); /* * Optional count */ if (argc == 5) count = hextoul(argv[4], NULL); else count = 1; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr++, &byte, 1); #else ret = i2c_write(chip, addr++, alen, &byte, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * Wait for the write to complete. The write can take * up to 10mSec (we allow a little more time). */ /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } return 0; } /** * do_i2c_crc() - Handle the "i2c crc32" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Calculate a CRC on memory * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c crc32 {i2c_chip} {addr}{.0, .1, .2} {count} */ static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; uint alen; uint count; uchar byte; ulong crc; ulong err; int ret = 0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 4) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Count is always specified */ count = hextoul(argv[3], NULL); printf ("CRC32 for %08lx ... %08lx ==> ", addr, addr + count - 1); /* * CRC a byte at a time. This is going to be slooow, but hey, the * memories are small and slow too so hopefully nobody notices. */ crc = 0; err = 0; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, &byte, 1); #else ret = i2c_read(chip, addr, alen, &byte, 1); #endif if (ret) err++; crc = crc32(crc, &byte, 1); addr++; } if (err > 0) i2c_report_err(ret, I2C_ERR_READ); else printf ("%08lx\n", crc); return 0; } /** * mod_i2c_mem() - Handle the "i2c mm" and "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Modify memory. * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} * i2c nm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} */ static int mod_i2c_mem(struct cmd_tbl *cmdtp, int incrflag, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; ulong data; int size = 1; int nbytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 3) return CMD_RET_USAGE; bootretry_reset_cmd_timeout(); /* got a good command to get here */ /* * We use the last specified parameters, unless new ones are * entered. */ chip = i2c_mm_last_chip; addr = i2c_mm_last_addr; alen = i2c_mm_last_alen; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. Check for a size specification. * Defaults to byte if no or incorrect specification. */ size = cmd_get_data_size(argv[0], 1); /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Print the address, followed by value. Then accept input for * the next value. A non-converted value exits. */ do { printf("%08lx:", addr); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, (uchar *)&data, size); #else ret = i2c_read(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); data = cpu_to_be32(data); if (size == 1) printf(" %02lx", (data >> 24) & 0x000000FF); else if (size == 2) printf(" %04lx", (data >> 16) & 0x0000FFFF); else printf(" %08lx", data); nbytes = cli_readline(" ? "); if (nbytes == 0) { /* * <CR> pressed as only input, don't modify current * location and move to next. */ if (incrflag) addr += size; nbytes = size; /* good enough to not time out */ bootretry_reset_cmd_timeout(); } #ifdef CONFIG_BOOT_RETRY_TIME else if (nbytes == -2) break; /* timed out, exit the command */ #endif else { char *endp; data = hextoul(console_buffer, &endp); if (size == 1) data = data << 24; else if (size == 2) data = data << 16; data = be32_to_cpu(data); nbytes = endp - console_buffer; if (nbytes) { /* * good enough to not time out */ bootretry_reset_cmd_timeout(); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr, (uchar *)&data, size); #else ret = i2c_write(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #if CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS > 0 udelay(CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS * 1000); #endif if (incrflag) addr += size; } } } while (nbytes); i2c_mm_last_chip = chip; i2c_mm_last_addr = addr; i2c_mm_last_alen = alen; return 0; } /** * do_i2c_probe() - Handle the "i2c probe" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c probe {addr} * * Returns zero (success) if one or more I2C devices was found */ static int do_i2c_probe(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int j; int addr = -1; int found = 0; #if defined(CONFIG_SYS_I2C_NOPROBES) int k, skip; unsigned int bus = GET_BUS_NUM; #endif /* NOPROBES */ int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus, *dev; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; #endif if (argc == 2) addr = simple_strtol(argv[1], 0, 16); puts ("Valid chip addresses:"); for (j = 0; j < 128; j++) { if ((0 <= addr) && (j != addr)) continue; #if defined(CONFIG_SYS_I2C_NOPROBES) skip = 0; for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus, k) && COMPARE_ADDR(j, k)) { skip = 1; break; } } if (skip) continue; #endif #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_probe(bus, j, 0, &dev); #else ret = i2c_probe(j); #endif if (ret == 0) { printf(" %02X", j); found++; } } putc ('\n'); #if defined(CONFIG_SYS_I2C_NOPROBES) puts ("Excluded chip addresses:"); for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus,k)) printf(" %02X", NO_PROBE_ADDR(k)); } putc ('\n'); #endif return (0 == found); } /** * do_i2c_loop() - Handle the "i2c loop" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c loop {i2c_chip} {addr}{.0, .1, .2} [{length}] [{delay}] * {length} - Number of bytes to read * {delay} - A DECIMAL number and defaults to 1000 uSec */ static int do_i2c_loop(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint alen; uint addr; uint length; u_char bytes[16]; int delay; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 3) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Length is the number of objects, not number of bytes. */ length = 1; length = hextoul(argv[3], NULL); if (length > sizeof(bytes)) length = sizeof(bytes); /* * The delay time (uSec) is optional. */ delay = 1000; if (argc > 3) delay = dectoul(argv[4], NULL); /* * Run the loop... */ while (1) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, bytes, length); #else ret = i2c_read(chip, addr, alen, bytes, length); #endif if (ret) i2c_report_err(ret, I2C_ERR_READ); udelay(delay); } /* NOTREACHED */ return 0; } /* * The SDRAM command is separately configured because many * (most?) embedded boards don't use SDRAM DIMMs. * * FIXME: Document and probably move elsewhere! */ #if defined(CONFIG_CMD_SDRAM) static void print_ddr2_tcyc (u_char const b) { printf ("%d.", (b >> 4) & 0x0F); switch (b & 0x0F) { case 0x0: case 0x1: case 0x2: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: case 0x8: case 0x9: printf ("%d ns\n", b & 0x0F); break; case 0xA: puts ("25 ns\n"); break; case 0xB: puts ("33 ns\n"); break; case 0xC: puts ("66 ns\n"); break; case 0xD: puts ("75 ns\n"); break; default: puts ("?? ns\n"); break; } } static void decode_bits (u_char const b, char const *str[], int const do_once) { u_char mask; for (mask = 0x80; mask != 0x00; mask >>= 1, ++str) { if (b & mask) { puts (*str); if (do_once) return; } } } /* * Syntax: * i2c sdram {i2c_chip} */ static int do_sdram(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { enum { unknown, EDO, SDRAM, DDR, DDR2, DDR3, DDR4 } type; uint chip; u_char data[128]; u_char cksum; int j, ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif static const char *decode_CAS_DDR2[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " TBD", " TBD" }; static const char *decode_CAS_default[] = { " TBD", " 7", " 6", " 5", " 4", " 3", " 2", " 1" }; static const char *decode_CS_WE_default[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " 1", " 0" }; static const char *decode_byte21_default[] = { " TBD (bit 7)\n", " Redundant row address\n", " Differential clock input\n", " Registerd DQMB inputs\n", " Buffered DQMB inputs\n", " On-card PLL\n", " Registered address/control lines\n", " Buffered address/control lines\n" }; static const char *decode_byte22_DDR2[] = { " TBD (bit 7)\n", " TBD (bit 6)\n", " TBD (bit 5)\n", " TBD (bit 4)\n", " TBD (bit 3)\n", " Supports partial array self refresh\n", " Supports 50 ohm ODT\n", " Supports weak driver\n" }; static const char *decode_row_density_DDR2[] = { "512 MiB", "256 MiB", "128 MiB", "16 GiB", "8 GiB", "4 GiB", "2 GiB", "1 GiB" }; static const char *decode_row_density_default[] = { "512 MiB", "256 MiB", "128 MiB", "64 MiB", "32 MiB", "16 MiB", "8 MiB", "4 MiB" }; if (argc < 2) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, data, sizeof(data)); #else ret = i2c_read(chip, 0, 1, data, sizeof(data)); #endif if (ret) { puts ("No SDRAM Serial Presence Detect found.\n"); return 1; } cksum = 0; for (j = 0; j < 63; j++) { cksum += data[j]; } if (cksum != data[63]) { printf ("WARNING: Configuration data checksum failure:\n" " is 0x%02x, calculated 0x%02x\n", data[63], cksum); } printf ("SPD data revision %d.%d\n", (data[62] >> 4) & 0x0F, data[62] & 0x0F); printf ("Bytes used 0x%02X\n", data[0]); printf ("Serial memory size 0x%02X\n", 1 << data[1]); puts ("Memory type "); switch (data[2]) { case 2: type = EDO; puts ("EDO\n"); break; case 4: type = SDRAM; puts ("SDRAM\n"); break; case 7: type = DDR; puts("DDR\n"); break; case 8: type = DDR2; puts ("DDR2\n"); break; case 11: type = DDR3; puts("DDR3\n"); break; case 12: type = DDR4; puts("DDR4\n"); break; default: type = unknown; puts ("unknown\n"); break; } puts ("Row address bits "); if ((data[3] & 0x00F0) == 0) printf ("%d\n", data[3] & 0x0F); else printf ("%d/%d\n", data[3] & 0x0F, (data[3] >> 4) & 0x0F); puts ("Column address bits "); if ((data[4] & 0x00F0) == 0) printf ("%d\n", data[4] & 0x0F); else printf ("%d/%d\n", data[4] & 0x0F, (data[4] >> 4) & 0x0F); switch (type) { case DDR2: printf ("Number of ranks %d\n", (data[5] & 0x07) + 1); break; default: printf ("Module rows %d\n", data[5]); break; } switch (type) { case DDR2: printf ("Module data width %d bits\n", data[6]); break; default: printf ("Module data width %d bits\n", (data[7] << 8) | data[6]); break; } puts ("Interface signal levels "); switch(data[8]) { case 0: puts ("TTL 5.0 V\n"); break; case 1: puts ("LVTTL\n"); break; case 2: puts ("HSTL 1.5 V\n"); break; case 3: puts ("SSTL 3.3 V\n"); break; case 4: puts ("SSTL 2.5 V\n"); break; case 5: puts ("SSTL 1.8 V\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time "); print_ddr2_tcyc (data[9]); break; default: printf ("SDRAM cycle time %d.%d ns\n", (data[9] >> 4) & 0x0F, data[9] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access time 0.%d%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; default: printf ("SDRAM access time %d.%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; } puts ("EDC configuration "); switch (data[11]) { case 0: puts ("None\n"); break; case 1: puts ("Parity\n"); break; case 2: puts ("ECC\n"); break; default: puts ("unknown\n"); break; } if ((data[12] & 0x80) == 0) puts ("No self refresh, rate "); else puts ("Self refresh, rate "); switch(data[12] & 0x7F) { case 0: puts ("15.625 us\n"); break; case 1: puts ("3.9 us\n"); break; case 2: puts ("7.8 us\n"); break; case 3: puts ("31.3 us\n"); break; case 4: puts ("62.5 us\n"); break; case 5: puts ("125 us\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM width (primary) %d\n", data[13]); break; default: printf ("SDRAM width (primary) %d\n", data[13] & 0x7F); if ((data[13] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[13] & 0x7F)); } break; } switch (type) { case DDR2: if (data[14] != 0) printf ("EDC width %d\n", data[14]); break; default: if (data[14] != 0) { printf ("EDC width %d\n", data[14] & 0x7F); if ((data[14] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[14] & 0x7F)); } } break; } if (DDR2 != type) { printf ("Min clock delay, back-to-back random column addresses " "%d\n", data[15]); } puts ("Burst length(s) "); if (data[16] & 0x80) puts (" Page"); if (data[16] & 0x08) puts (" 8"); if (data[16] & 0x04) puts (" 4"); if (data[16] & 0x02) puts (" 2"); if (data[16] & 0x01) puts (" 1"); putc ('\n'); printf ("Number of banks %d\n", data[17]); switch (type) { case DDR2: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_DDR2, 0); putc ('\n'); break; default: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_default, 0); putc ('\n'); break; } if (DDR2 != type) { puts ("CS latency(s) "); decode_bits (data[19], decode_CS_WE_default, 0); putc ('\n'); } if (DDR2 != type) { puts ("WE latency(s) "); decode_bits (data[20], decode_CS_WE_default, 0); putc ('\n'); } switch (type) { case DDR2: puts ("Module attributes:\n"); if (data[21] & 0x80) puts (" TBD (bit 7)\n"); if (data[21] & 0x40) puts (" Analysis probe installed\n"); if (data[21] & 0x20) puts (" TBD (bit 5)\n"); if (data[21] & 0x10) puts (" FET switch external enable\n"); printf (" %d PLLs on DIMM\n", (data[21] >> 2) & 0x03); if (data[20] & 0x11) { printf (" %d active registers on DIMM\n", (data[21] & 0x03) + 1); } break; default: puts ("Module attributes:\n"); if (!data[21]) puts (" (none)\n"); else decode_bits (data[21], decode_byte21_default, 0); break; } switch (type) { case DDR2: decode_bits (data[22], decode_byte22_DDR2, 0); break; default: puts ("Device attributes:\n"); if (data[22] & 0x80) puts (" TBD (bit 7)\n"); if (data[22] & 0x40) puts (" TBD (bit 6)\n"); if (data[22] & 0x20) puts (" Upper Vcc tolerance 5%\n"); else puts (" Upper Vcc tolerance 10%\n"); if (data[22] & 0x10) puts (" Lower Vcc tolerance 5%\n"); else puts (" Lower Vcc tolerance 10%\n"); if (data[22] & 0x08) puts (" Supports write1/read burst\n"); if (data[22] & 0x04) puts (" Supports precharge all\n"); if (data[22] & 0x02) puts (" Supports auto precharge\n"); if (data[22] & 0x01) puts (" Supports early RAS# precharge\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (2nd highest CAS latency) "); print_ddr2_tcyc (data[23]); break; default: printf ("SDRAM cycle time (2nd highest CAS latency) %d." "%d ns\n", (data[23] >> 4) & 0x0F, data[23] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (2nd highest CAS latency) 0." "%d%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; default: printf ("SDRAM access from clock (2nd highest CAS latency) %d." "%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (3rd highest CAS latency) "); print_ddr2_tcyc (data[25]); break; default: printf ("SDRAM cycle time (3rd highest CAS latency) %d." "%d ns\n", (data[25] >> 4) & 0x0F, data[25] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (3rd highest CAS latency) 0." "%d%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; default: printf ("SDRAM access from clock (3rd highest CAS latency) %d." "%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; } switch (type) { case DDR2: printf ("Minimum row precharge %d.%02d ns\n", (data[27] >> 2) & 0x3F, 25 * (data[27] & 0x03)); break; default: printf ("Minimum row precharge %d ns\n", data[27]); break; } switch (type) { case DDR2: printf ("Row active to row active min %d.%02d ns\n", (data[28] >> 2) & 0x3F, 25 * (data[28] & 0x03)); break; default: printf ("Row active to row active min %d ns\n", data[28]); break; } switch (type) { case DDR2: printf ("RAS to CAS delay min %d.%02d ns\n", (data[29] >> 2) & 0x3F, 25 * (data[29] & 0x03)); break; default: printf ("RAS to CAS delay min %d ns\n", data[29]); break; } printf ("Minimum RAS pulse width %d ns\n", data[30]); switch (type) { case DDR2: puts ("Density of each row "); decode_bits (data[31], decode_row_density_DDR2, 1); putc ('\n'); break; default: puts ("Density of each row "); decode_bits (data[31], decode_row_density_default, 1); putc ('\n'); break; } switch (type) { case DDR2: puts ("Command and Address setup "); if (data[32] >= 0xA0) { printf ("1.%d%d ns\n", ((data[32] >> 4) & 0x0F) - 10, data[32] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[32] >> 4) & 0x0F), data[32] & 0x0F); } break; default: printf ("Command and Address setup %c%d.%d ns\n", (data[32] & 0x80) ? '-' : '+', (data[32] >> 4) & 0x07, data[32] & 0x0F); break; } switch (type) { case DDR2: puts ("Command and Address hold "); if (data[33] >= 0xA0) { printf ("1.%d%d ns\n", ((data[33] >> 4) & 0x0F) - 10, data[33] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[33] >> 4) & 0x0F), data[33] & 0x0F); } break; default: printf ("Command and Address hold %c%d.%d ns\n", (data[33] & 0x80) ? '-' : '+', (data[33] >> 4) & 0x07, data[33] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input setup 0.%d%d ns\n", (data[34] >> 4) & 0x0F, data[34] & 0x0F); break; default: printf ("Data signal input setup %c%d.%d ns\n", (data[34] & 0x80) ? '-' : '+', (data[34] >> 4) & 0x07, data[34] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input hold 0.%d%d ns\n", (data[35] >> 4) & 0x0F, data[35] & 0x0F); break; default: printf ("Data signal input hold %c%d.%d ns\n", (data[35] & 0x80) ? '-' : '+', (data[35] >> 4) & 0x07, data[35] & 0x0F); break; } puts ("Manufacturer's JEDEC ID "); for (j = 64; j <= 71; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Manufacturing Location %02X\n", data[72]); puts ("Manufacturer's Part Number "); for (j = 73; j <= 90; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Revision Code %02X %02X\n", data[91], data[92]); printf ("Manufacturing Date %02X %02X\n", data[93], data[94]); puts ("Assembly Serial Number "); for (j = 95; j <= 98; j++) printf ("%02X ", data[j]); putc ('\n'); if (DDR2 != type) { printf ("Speed rating PC%d\n", data[126] == 0x66 ? 66 : data[126]); } return 0; } #endif /* * Syntax: * i2c edid {i2c_chip} */ #if defined(CONFIG_I2C_EDID) int do_edid(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; struct edid1_info edid; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 2) { cmd_usage(cmdtp); return 1; } chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, (uchar *)&edid, sizeof(edid)); #else ret = i2c_read(chip, 0, 1, (uchar *)&edid, sizeof(edid)); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (edid_check_info(&edid)) { puts("Content isn't valid EDID.\n"); return 1; } edid_print_info(&edid); return 0; } #endif /* CONFIG_I2C_EDID */ #if CONFIG_IS_ENABLED(DM_I2C) static void show_bus(struct udevice *bus) { struct udevice *dev; printf("Bus %d:\t%s", dev_seq(bus), bus->name); if (device_active(bus)) printf(" (active %d)", dev_seq(bus)); printf("\n"); for (device_find_first_child(bus, &dev); dev; device_find_next_child(&dev)) { struct dm_i2c_chip *chip = dev_get_parent_plat(dev); printf(" %02x: %s, offset len %x, flags %x\n", chip->chip_addr, dev->name, chip->offset_len, chip->flags); } } #endif /** * do_i2c_show_bus() - Handle the "i2c bus" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_show_bus(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { if (argc == 1) { /* show all busses */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; struct uclass *uc; int ret; ret = uclass_get(UCLASS_I2C, &uc); if (ret) return CMD_RET_FAILURE; uclass_foreach_dev(bus, uc) show_bus(bus); #else int i; for (i = 0; i < CONFIG_SYS_NUM_I2C_BUSES; i++) { printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); } #endif } else { int i; /* show specific bus */ i = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, i, &bus); if (ret) { printf("Invalid bus %d: err=%d\n", i, ret); return CMD_RET_FAILURE; } show_bus(bus); #else if (i >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", i); return -1; } printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); #endif } return 0; } #endif /** * do_i2c_bus_num() - Handle the "i2c dev" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) || \ CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_bus_num(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int ret = 0; int bus_no; if (argc == 1) { /* querying current setting */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (!i2c_get_cur_bus(&bus)) bus_no = dev_seq(bus); else bus_no = -1; #else bus_no = i2c_get_bus_num(); #endif printf("Current bus is %d\n", bus_no); } else { bus_no = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) if (bus_no >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", bus_no); return -1; } #endif printf("Setting bus to %d\n", bus_no); #if CONFIG_IS_ENABLED(DM_I2C) ret = cmd_i2c_set_bus_num(bus_no); #else ret = i2c_set_bus_num(bus_no); #endif if (ret) printf("Failure changing bus number (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ /** * do_i2c_bus_speed() - Handle the "i2c speed" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_bus_speed(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int speed, ret=0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return 1; #endif if (argc == 1) { #if CONFIG_IS_ENABLED(DM_I2C) speed = dm_i2c_get_bus_speed(bus); #else speed = i2c_get_bus_speed(); #endif /* querying current speed */ printf("Current bus speed=%d\n", speed); } else { speed = dectoul(argv[1], NULL); printf("Setting bus speed to %d Hz\n", speed); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_set_bus_speed(bus, speed); #else ret = i2c_set_bus_speed(speed); #endif if (ret) printf("Failure changing bus speed (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } /** * do_i2c_mm() - Handle the "i2c mm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_mm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 1, flag, argc, argv); } /** * do_i2c_nm() - Handle the "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_nm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 0, flag, argc, argv); } /** * do_i2c_reset() - Handle the "i2c reset" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ static int do_i2c_reset(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; if (i2c_deblock(bus)) { printf("Error: Not supported by the driver\n"); return CMD_RET_FAILURE; } #elif CONFIG_IS_ENABLED(SYS_I2C_LEGACY) i2c_init(I2C_ADAP->speed, I2C_ADAP->slaveaddr); #endif return 0; } static struct cmd_tbl cmd_i2c_sub[] = { #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(bus, 1, 1, do_i2c_show_bus, "", ""), #endif U_BOOT_CMD_MKENT(crc32, 3, 1, do_i2c_crc, "", ""), #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(dev, 1, 1, do_i2c_bus_num, "", ""), #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) U_BOOT_CMD_MKENT(edid, 1, 1, do_edid, "", ""), #endif /* CONFIG_I2C_EDID */ U_BOOT_CMD_MKENT(loop, 3, 1, do_i2c_loop, "", ""), U_BOOT_CMD_MKENT(md, 3, 1, do_i2c_md, "", ""), U_BOOT_CMD_MKENT(mm, 2, 1, do_i2c_mm, "", ""), U_BOOT_CMD_MKENT(mw, 3, 1, do_i2c_mw, "", ""), U_BOOT_CMD_MKENT(nm, 2, 1, do_i2c_nm, "", ""), U_BOOT_CMD_MKENT(probe, 0, 1, do_i2c_probe, "", ""), U_BOOT_CMD_MKENT(read, 5, 1, do_i2c_read, "", ""), U_BOOT_CMD_MKENT(write, 6, 0, do_i2c_write, "", ""), #if CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(flags, 2, 1, do_i2c_flags, "", ""), U_BOOT_CMD_MKENT(olen, 2, 1, do_i2c_olen, "", ""), #endif U_BOOT_CMD_MKENT(reset, 0, 1, do_i2c_reset, "", ""), #if defined(CONFIG_CMD_SDRAM) U_BOOT_CMD_MKENT(sdram, 1, 1, do_sdram, "", ""), #endif U_BOOT_CMD_MKENT(speed, 1, 1, do_i2c_bus_speed, "", ""), }; static __maybe_unused void i2c_reloc(void) { static int relocated; if (!relocated) { fixup_cmdtable(cmd_i2c_sub, ARRAY_SIZE(cmd_i2c_sub)); relocated = 1; }; } /** * do_i2c() - Handle the "i2c" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct cmd_tbl *c; #ifdef CONFIG_NEEDS_MANUAL_RELOC i2c_reloc(); #endif if (argc < 2) return CMD_RET_USAGE; /* Strip off leading 'i2c' command argument */ argc--; argv++; c = find_cmd_tbl(argv[0], &cmd_i2c_sub[0], ARRAY_SIZE(cmd_i2c_sub)); if (c) return c->cmd(cmdtp, flag, argc, argv); else return CMD_RET_USAGE; } /***************************************************/ #ifdef CONFIG_SYS_LONGHELP static char i2c_help_text[] = #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) "bus [muxtype:muxaddr:muxchannel] - show I2C bus info\n" "i2c " /* That's the prefix for the crc32 command below. */ #endif "crc32 chip address[.0, .1, .2] count - compute CRC32 checksum\n" #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) "i2c dev [dev] - show or set current I2C bus\n" #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) "i2c edid chip - print EDID configuration information\n" #endif /* CONFIG_I2C_EDID */ "i2c loop chip address[.0, .1, .2] [# of objects] - looping read of device\n" "i2c md chip address[.0, .1, .2] [# of objects] - read from I2C device\n" "i2c mm chip address[.0, .1, .2] - write to I2C device (auto-incrementing)\n" "i2c mw chip address[.0, .1, .2] value [count] - write to I2C device (fill)\n" "i2c nm chip address[.0, .1, .2] - write to I2C device (constant address)\n" "i2c probe [address] - test for and show device(s) on the I2C bus\n" "i2c read chip address[.0, .1, .2] length memaddress - read to memory\n" "i2c write memaddress chip address[.0, .1, .2] length [-s] - write memory\n" " to I2C; the -s option selects bulk write in a single transaction\n" #if CONFIG_IS_ENABLED(DM_I2C) "i2c flags chip [flags] - set or get chip flags\n" "i2c olen chip [offset_length] - set or get chip offset length\n" #endif "i2c reset - re-init the I2C Controller\n" #if defined(CONFIG_CMD_SDRAM) "i2c sdram chip - print SDRAM configuration information\n" #endif "i2c speed [speed] - show or set I2C bus speed"; #endif U_BOOT_CMD( i2c, 7, 1, do_i2c, "I2C sub-system", i2c_help_text );
static uint get_alen(char *arg, int default_len) { int j; int alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; }
static uint get_alen(char *arg, uint default_len) { uint j; uint alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; }
{'added': [(203, 'static uint get_alen(char *arg, uint default_len)'), (205, '\tuint\tj;'), (206, '\tuint\talen;'), (250, '\tuint\talen;'), (304, '\tuint\talen;'), (472, '\tuint\talen;'), (473, '\tuint\tj, nbytes, linebytes;'), (592, '\tuint\talen;'), (594, '\tuint\tcount;'), (679, '\tuint\talen;'), (680, '\tuint\tcount;'), (988, '\tuint\talen;')], 'deleted': [(203, 'static uint get_alen(char *arg, int default_len)'), (205, '\tint\tj;'), (206, '\tint\talen;'), (250, '\tint alen;'), (304, '\tint alen;'), (472, '\tint alen;'), (473, '\tint\tj, nbytes, linebytes;'), (592, '\tint\talen;'), (594, '\tint\tcount;'), (679, '\tint\talen;'), (680, '\tint\tcount;'), (988, '\tint alen;')]}
12
12
1,240
7,675
14
75
4
https://github.com/u-boot/u-boot
CVE-2022-34835
CWE-787
1,430
i2c.c
C
do_i2c_crc
// SPDX-License-Identifier: GPL-2.0+ /* * (C) Copyright 2009 * Sergey Kubushyn, himself, ksi@koi8.net * * Changes for unified multibus/multiadapter I2C support. * * (C) Copyright 2001 * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com. */ /* * I2C Functions similar to the standard memory functions. * * There are several parameters in many of the commands that bear further * explanations: * * {i2c_chip} is the I2C chip address (the first byte sent on the bus). * Each I2C chip on the bus has a unique address. On the I2C data bus, * the address is the upper seven bits and the LSB is the "read/write" * bit. Note that the {i2c_chip} address specified on the command * line is not shifted up: e.g. a typical EEPROM memory chip may have * an I2C address of 0x50, but the data put on the bus will be 0xA0 * for write and 0xA1 for read. This "non shifted" address notation * matches at least half of the data sheets :-/. * * {addr} is the address (or offset) within the chip. Small memory * chips have 8 bit addresses. Large memory chips have 16 bit * addresses. Other memory chips have 9, 10, or 11 bit addresses. * Many non-memory chips have multiple registers and {addr} is used * as the register index. Some non-memory chips have only one register * and therefore don't need any {addr} parameter. * * The default {addr} parameter is one byte (.1) which works well for * memories and registers with 8 bits of address space. * * You can specify the length of the {addr} field with the optional .0, * .1, or .2 modifier (similar to the .b, .w, .l modifier). If you are * manipulating a single register device which doesn't use an address * field, use "0.0" for the address and the ".0" length field will * suppress the address in the I2C data stream. This also works for * successive reads using the I2C auto-incrementing memory pointer. * * If you are manipulating a large memory with 2-byte addresses, use * the .2 address modifier, e.g. 210.2 addresses location 528 (decimal). * * Then there are the unfortunate memory chips that spill the most * significant 1, 2, or 3 bits of address into the chip address byte. * This effectively makes one chip (logically) look like 2, 4, or * 8 chips. This is handled (awkwardly) by #defining * CONFIG_SYS_I2C_EEPROM_ADDR_OVERFLOW and using the .1 modifier on the * {addr} field (since .1 is the default, it doesn't actually have to * be specified). Examples: given a memory chip at I2C chip address * 0x50, the following would happen... * i2c md 50 0 10 display 16 bytes starting at 0x000 * On the bus: <S> A0 00 <E> <S> A1 <rd> ... <rd> * i2c md 50 100 10 display 16 bytes starting at 0x100 * On the bus: <S> A2 00 <E> <S> A3 <rd> ... <rd> * i2c md 50 210 10 display 16 bytes starting at 0x210 * On the bus: <S> A4 10 <E> <S> A5 <rd> ... <rd> * This is awfully ugly. It would be nice if someone would think up * a better way of handling this. * * Adapted from cmd_mem.c which is copyright Wolfgang Denk (wd@denx.de). */ #include <common.h> #include <bootretry.h> #include <cli.h> #include <command.h> #include <console.h> #include <dm.h> #include <edid.h> #include <errno.h> #include <i2c.h> #include <log.h> #include <malloc.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/delay.h> #include <u-boot/crc.h> /* Display values from last command. * Memory modify remembered values are different from display memory. */ static uint i2c_dp_last_chip; static uint i2c_dp_last_addr; static uint i2c_dp_last_alen; static uint i2c_dp_last_length = 0x10; static uint i2c_mm_last_chip; static uint i2c_mm_last_addr; static uint i2c_mm_last_alen; /* If only one I2C bus is present, the list of devices to ignore when * the probe command is issued is represented by a 1D array of addresses. * When multiple buses are present, the list is an array of bus-address * pairs. The following macros take care of this */ #if defined(CONFIG_SYS_I2C_NOPROBES) #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) static struct { uchar bus; uchar addr; } i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM i2c_get_bus_num() #define COMPARE_BUS(b,i) (i2c_no_probes[(i)].bus == (b)) #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)].addr == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)].addr #else /* single bus */ static uchar i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM 0 #define COMPARE_BUS(b,i) ((b) == 0) /* Make compiler happy */ #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)] == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)] #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ #endif #define DISP_LINE_LEN 16 /* * Default for driver model is to use the chip's existing address length. * For legacy code, this is not stored, so we need to use a suitable * default. */ #if CONFIG_IS_ENABLED(DM_I2C) #define DEFAULT_ADDR_LEN (-1) #else #define DEFAULT_ADDR_LEN 1 #endif #if CONFIG_IS_ENABLED(DM_I2C) static struct udevice *i2c_cur_bus; static int cmd_i2c_set_bus_num(unsigned int busnum) { struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, busnum, &bus); if (ret) { debug("%s: No bus %d\n", __func__, busnum); return ret; } i2c_cur_bus = bus; return 0; } static int i2c_get_cur_bus(struct udevice **busp) { #ifdef CONFIG_I2C_SET_DEFAULT_BUS_NUM if (!i2c_cur_bus) { if (cmd_i2c_set_bus_num(CONFIG_I2C_DEFAULT_BUS_NUMBER)) { printf("Default I2C bus %d not found\n", CONFIG_I2C_DEFAULT_BUS_NUMBER); return -ENODEV; } } #endif if (!i2c_cur_bus) { puts("No I2C bus selected\n"); return -ENODEV; } *busp = i2c_cur_bus; return 0; } static int i2c_get_cur_bus_chip(uint chip_addr, struct udevice **devp) { struct udevice *bus; int ret; ret = i2c_get_cur_bus(&bus); if (ret) return ret; return i2c_get_chip(bus, chip_addr, 1, devp); } #endif /** * i2c_init_board() - Board-specific I2C bus init * * This function is the default no-op implementation of I2C bus * initialization. This function can be overridden by board-specific * implementation if needed. */ __weak void i2c_init_board(void) { } /** * get_alen() - Small parser helper function to get address length * * Returns the address length. */ static uint get_alen(char *arg, int default_len) { int j; int alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; } enum i2c_err_op { I2C_ERR_READ, I2C_ERR_WRITE, }; static int i2c_report_err(int ret, enum i2c_err_op op) { printf("Error %s the chip: %d\n", op == I2C_ERR_READ ? "reading" : "writing", ret); return CMD_RET_FAILURE; } /** * do_i2c_read() - Handle the "i2c read" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c read {i2c_chip} {devaddr}{.0, .1, .2} {len} {memaddr} */ static int do_i2c_read(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; int alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 5) return CMD_RET_USAGE; /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * Length is the number of objects, not number of bytes. */ length = hextoul(argv[3], NULL); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (!ret) ret = dm_i2c_read(dev, devaddr, memaddr, length); #else ret = i2c_read(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_write(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; int alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; struct dm_i2c_chip *i2c_chip; #endif if ((argc < 5) || (argc > 6)) return cmd_usage(cmdtp); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[1], NULL); /* * I2C chip address */ chip = hextoul(argv[2], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[3], NULL); alen = get_alen(argv[3], DEFAULT_ADDR_LEN); if (alen > 3) return cmd_usage(cmdtp); /* * Length is the number of bytes. */ length = hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); i2c_chip = dev_get_parent_plat(dev); if (!i2c_chip) return i2c_report_err(ret, I2C_ERR_WRITE); #endif if (argc == 6 && !strcmp(argv[5], "-s")) { /* * Write all bytes in a single I2C transaction. If the target * device is an EEPROM, it is your responsibility to not cross * a page boundary. No write delay upon completion, take this * into account if linking commands. */ #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags &= ~DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr, memaddr, length); #else ret = i2c_write(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); } else { /* * Repeated addressing - perform <length> separate * write transactions of one byte each */ while (length-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags |= DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr++, memaddr++, 1); #else ret = i2c_write(chip, devaddr++, alen, memaddr++, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } } return 0; } #if CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_flags(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint flags; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { flags = hextoul(argv[2], NULL); ret = i2c_set_chip_flags(dev, flags); } else { ret = i2c_get_chip_flags(dev, &flags); if (!ret) printf("%x\n", flags); } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_olen(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint olen; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { olen = hextoul(argv[2], NULL); ret = i2c_set_chip_offset_len(dev, olen); } else { ret = i2c_get_chip_offset_len(dev); if (ret >= 0) { printf("%x\n", ret); ret = 0; } } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } #endif /** * do_i2c_md() - Handle the "i2c md" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c md {i2c_chip} {addr}{.0, .1, .2} {len} */ static int do_i2c_md(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint addr, length; int alen; int j, nbytes, linebytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif /* We use the last specified parameters, unless new ones are * entered. */ chip = i2c_dp_last_chip; addr = i2c_dp_last_addr; alen = i2c_dp_last_alen; length = i2c_dp_last_length; if (argc < 3) return CMD_RET_USAGE; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. */ /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * If another parameter, it is the length to display. * Length is the number of objects, not number of bytes. */ if (argc > 3) length = hextoul(argv[3], NULL); } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Print the lines. * * We buffer all read data, so we can make sure data is read only * once. */ nbytes = length; do { unsigned char linebuf[DISP_LINE_LEN]; unsigned char *cp; linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes; #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, linebuf, linebytes); #else ret = i2c_read(chip, addr, alen, linebuf, linebytes); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); else { printf("%04x:", addr); cp = linebuf; for (j=0; j<linebytes; j++) { printf(" %02x", *cp++); addr++; } puts (" "); cp = linebuf; for (j=0; j<linebytes; j++) { if ((*cp < 0x20) || (*cp > 0x7e)) puts ("."); else printf("%c", *cp); cp++; } putc ('\n'); } nbytes -= linebytes; } while (nbytes > 0); i2c_dp_last_chip = chip; i2c_dp_last_addr = addr; i2c_dp_last_alen = alen; i2c_dp_last_length = length; return 0; } /** * do_i2c_mw() - Handle the "i2c mw" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mw {i2c_chip} {addr}{.0, .1, .2} {data} [{count}] */ static int do_i2c_mw(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; uchar byte; int count; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if ((argc < 4) || (argc > 5)) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Value to write is always specified. */ byte = hextoul(argv[3], NULL); /* * Optional count */ if (argc == 5) count = hextoul(argv[4], NULL); else count = 1; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr++, &byte, 1); #else ret = i2c_write(chip, addr++, alen, &byte, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * Wait for the write to complete. The write can take * up to 10mSec (we allow a little more time). */ /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } return 0; } /** * do_i2c_crc() - Handle the "i2c crc32" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Calculate a CRC on memory * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c crc32 {i2c_chip} {addr}{.0, .1, .2} {count} */ static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; int count; uchar byte; ulong crc; ulong err; int ret = 0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 4) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Count is always specified */ count = hextoul(argv[3], NULL); printf ("CRC32 for %08lx ... %08lx ==> ", addr, addr + count - 1); /* * CRC a byte at a time. This is going to be slooow, but hey, the * memories are small and slow too so hopefully nobody notices. */ crc = 0; err = 0; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, &byte, 1); #else ret = i2c_read(chip, addr, alen, &byte, 1); #endif if (ret) err++; crc = crc32(crc, &byte, 1); addr++; } if (err > 0) i2c_report_err(ret, I2C_ERR_READ); else printf ("%08lx\n", crc); return 0; } /** * mod_i2c_mem() - Handle the "i2c mm" and "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Modify memory. * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} * i2c nm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} */ static int mod_i2c_mem(struct cmd_tbl *cmdtp, int incrflag, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; ulong data; int size = 1; int nbytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 3) return CMD_RET_USAGE; bootretry_reset_cmd_timeout(); /* got a good command to get here */ /* * We use the last specified parameters, unless new ones are * entered. */ chip = i2c_mm_last_chip; addr = i2c_mm_last_addr; alen = i2c_mm_last_alen; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. Check for a size specification. * Defaults to byte if no or incorrect specification. */ size = cmd_get_data_size(argv[0], 1); /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Print the address, followed by value. Then accept input for * the next value. A non-converted value exits. */ do { printf("%08lx:", addr); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, (uchar *)&data, size); #else ret = i2c_read(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); data = cpu_to_be32(data); if (size == 1) printf(" %02lx", (data >> 24) & 0x000000FF); else if (size == 2) printf(" %04lx", (data >> 16) & 0x0000FFFF); else printf(" %08lx", data); nbytes = cli_readline(" ? "); if (nbytes == 0) { /* * <CR> pressed as only input, don't modify current * location and move to next. */ if (incrflag) addr += size; nbytes = size; /* good enough to not time out */ bootretry_reset_cmd_timeout(); } #ifdef CONFIG_BOOT_RETRY_TIME else if (nbytes == -2) break; /* timed out, exit the command */ #endif else { char *endp; data = hextoul(console_buffer, &endp); if (size == 1) data = data << 24; else if (size == 2) data = data << 16; data = be32_to_cpu(data); nbytes = endp - console_buffer; if (nbytes) { /* * good enough to not time out */ bootretry_reset_cmd_timeout(); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr, (uchar *)&data, size); #else ret = i2c_write(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #if CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS > 0 udelay(CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS * 1000); #endif if (incrflag) addr += size; } } } while (nbytes); i2c_mm_last_chip = chip; i2c_mm_last_addr = addr; i2c_mm_last_alen = alen; return 0; } /** * do_i2c_probe() - Handle the "i2c probe" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c probe {addr} * * Returns zero (success) if one or more I2C devices was found */ static int do_i2c_probe(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int j; int addr = -1; int found = 0; #if defined(CONFIG_SYS_I2C_NOPROBES) int k, skip; unsigned int bus = GET_BUS_NUM; #endif /* NOPROBES */ int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus, *dev; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; #endif if (argc == 2) addr = simple_strtol(argv[1], 0, 16); puts ("Valid chip addresses:"); for (j = 0; j < 128; j++) { if ((0 <= addr) && (j != addr)) continue; #if defined(CONFIG_SYS_I2C_NOPROBES) skip = 0; for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus, k) && COMPARE_ADDR(j, k)) { skip = 1; break; } } if (skip) continue; #endif #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_probe(bus, j, 0, &dev); #else ret = i2c_probe(j); #endif if (ret == 0) { printf(" %02X", j); found++; } } putc ('\n'); #if defined(CONFIG_SYS_I2C_NOPROBES) puts ("Excluded chip addresses:"); for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus,k)) printf(" %02X", NO_PROBE_ADDR(k)); } putc ('\n'); #endif return (0 == found); } /** * do_i2c_loop() - Handle the "i2c loop" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c loop {i2c_chip} {addr}{.0, .1, .2} [{length}] [{delay}] * {length} - Number of bytes to read * {delay} - A DECIMAL number and defaults to 1000 uSec */ static int do_i2c_loop(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; int alen; uint addr; uint length; u_char bytes[16]; int delay; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 3) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Length is the number of objects, not number of bytes. */ length = 1; length = hextoul(argv[3], NULL); if (length > sizeof(bytes)) length = sizeof(bytes); /* * The delay time (uSec) is optional. */ delay = 1000; if (argc > 3) delay = dectoul(argv[4], NULL); /* * Run the loop... */ while (1) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, bytes, length); #else ret = i2c_read(chip, addr, alen, bytes, length); #endif if (ret) i2c_report_err(ret, I2C_ERR_READ); udelay(delay); } /* NOTREACHED */ return 0; } /* * The SDRAM command is separately configured because many * (most?) embedded boards don't use SDRAM DIMMs. * * FIXME: Document and probably move elsewhere! */ #if defined(CONFIG_CMD_SDRAM) static void print_ddr2_tcyc (u_char const b) { printf ("%d.", (b >> 4) & 0x0F); switch (b & 0x0F) { case 0x0: case 0x1: case 0x2: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: case 0x8: case 0x9: printf ("%d ns\n", b & 0x0F); break; case 0xA: puts ("25 ns\n"); break; case 0xB: puts ("33 ns\n"); break; case 0xC: puts ("66 ns\n"); break; case 0xD: puts ("75 ns\n"); break; default: puts ("?? ns\n"); break; } } static void decode_bits (u_char const b, char const *str[], int const do_once) { u_char mask; for (mask = 0x80; mask != 0x00; mask >>= 1, ++str) { if (b & mask) { puts (*str); if (do_once) return; } } } /* * Syntax: * i2c sdram {i2c_chip} */ static int do_sdram(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { enum { unknown, EDO, SDRAM, DDR, DDR2, DDR3, DDR4 } type; uint chip; u_char data[128]; u_char cksum; int j, ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif static const char *decode_CAS_DDR2[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " TBD", " TBD" }; static const char *decode_CAS_default[] = { " TBD", " 7", " 6", " 5", " 4", " 3", " 2", " 1" }; static const char *decode_CS_WE_default[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " 1", " 0" }; static const char *decode_byte21_default[] = { " TBD (bit 7)\n", " Redundant row address\n", " Differential clock input\n", " Registerd DQMB inputs\n", " Buffered DQMB inputs\n", " On-card PLL\n", " Registered address/control lines\n", " Buffered address/control lines\n" }; static const char *decode_byte22_DDR2[] = { " TBD (bit 7)\n", " TBD (bit 6)\n", " TBD (bit 5)\n", " TBD (bit 4)\n", " TBD (bit 3)\n", " Supports partial array self refresh\n", " Supports 50 ohm ODT\n", " Supports weak driver\n" }; static const char *decode_row_density_DDR2[] = { "512 MiB", "256 MiB", "128 MiB", "16 GiB", "8 GiB", "4 GiB", "2 GiB", "1 GiB" }; static const char *decode_row_density_default[] = { "512 MiB", "256 MiB", "128 MiB", "64 MiB", "32 MiB", "16 MiB", "8 MiB", "4 MiB" }; if (argc < 2) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, data, sizeof(data)); #else ret = i2c_read(chip, 0, 1, data, sizeof(data)); #endif if (ret) { puts ("No SDRAM Serial Presence Detect found.\n"); return 1; } cksum = 0; for (j = 0; j < 63; j++) { cksum += data[j]; } if (cksum != data[63]) { printf ("WARNING: Configuration data checksum failure:\n" " is 0x%02x, calculated 0x%02x\n", data[63], cksum); } printf ("SPD data revision %d.%d\n", (data[62] >> 4) & 0x0F, data[62] & 0x0F); printf ("Bytes used 0x%02X\n", data[0]); printf ("Serial memory size 0x%02X\n", 1 << data[1]); puts ("Memory type "); switch (data[2]) { case 2: type = EDO; puts ("EDO\n"); break; case 4: type = SDRAM; puts ("SDRAM\n"); break; case 7: type = DDR; puts("DDR\n"); break; case 8: type = DDR2; puts ("DDR2\n"); break; case 11: type = DDR3; puts("DDR3\n"); break; case 12: type = DDR4; puts("DDR4\n"); break; default: type = unknown; puts ("unknown\n"); break; } puts ("Row address bits "); if ((data[3] & 0x00F0) == 0) printf ("%d\n", data[3] & 0x0F); else printf ("%d/%d\n", data[3] & 0x0F, (data[3] >> 4) & 0x0F); puts ("Column address bits "); if ((data[4] & 0x00F0) == 0) printf ("%d\n", data[4] & 0x0F); else printf ("%d/%d\n", data[4] & 0x0F, (data[4] >> 4) & 0x0F); switch (type) { case DDR2: printf ("Number of ranks %d\n", (data[5] & 0x07) + 1); break; default: printf ("Module rows %d\n", data[5]); break; } switch (type) { case DDR2: printf ("Module data width %d bits\n", data[6]); break; default: printf ("Module data width %d bits\n", (data[7] << 8) | data[6]); break; } puts ("Interface signal levels "); switch(data[8]) { case 0: puts ("TTL 5.0 V\n"); break; case 1: puts ("LVTTL\n"); break; case 2: puts ("HSTL 1.5 V\n"); break; case 3: puts ("SSTL 3.3 V\n"); break; case 4: puts ("SSTL 2.5 V\n"); break; case 5: puts ("SSTL 1.8 V\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time "); print_ddr2_tcyc (data[9]); break; default: printf ("SDRAM cycle time %d.%d ns\n", (data[9] >> 4) & 0x0F, data[9] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access time 0.%d%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; default: printf ("SDRAM access time %d.%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; } puts ("EDC configuration "); switch (data[11]) { case 0: puts ("None\n"); break; case 1: puts ("Parity\n"); break; case 2: puts ("ECC\n"); break; default: puts ("unknown\n"); break; } if ((data[12] & 0x80) == 0) puts ("No self refresh, rate "); else puts ("Self refresh, rate "); switch(data[12] & 0x7F) { case 0: puts ("15.625 us\n"); break; case 1: puts ("3.9 us\n"); break; case 2: puts ("7.8 us\n"); break; case 3: puts ("31.3 us\n"); break; case 4: puts ("62.5 us\n"); break; case 5: puts ("125 us\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM width (primary) %d\n", data[13]); break; default: printf ("SDRAM width (primary) %d\n", data[13] & 0x7F); if ((data[13] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[13] & 0x7F)); } break; } switch (type) { case DDR2: if (data[14] != 0) printf ("EDC width %d\n", data[14]); break; default: if (data[14] != 0) { printf ("EDC width %d\n", data[14] & 0x7F); if ((data[14] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[14] & 0x7F)); } } break; } if (DDR2 != type) { printf ("Min clock delay, back-to-back random column addresses " "%d\n", data[15]); } puts ("Burst length(s) "); if (data[16] & 0x80) puts (" Page"); if (data[16] & 0x08) puts (" 8"); if (data[16] & 0x04) puts (" 4"); if (data[16] & 0x02) puts (" 2"); if (data[16] & 0x01) puts (" 1"); putc ('\n'); printf ("Number of banks %d\n", data[17]); switch (type) { case DDR2: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_DDR2, 0); putc ('\n'); break; default: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_default, 0); putc ('\n'); break; } if (DDR2 != type) { puts ("CS latency(s) "); decode_bits (data[19], decode_CS_WE_default, 0); putc ('\n'); } if (DDR2 != type) { puts ("WE latency(s) "); decode_bits (data[20], decode_CS_WE_default, 0); putc ('\n'); } switch (type) { case DDR2: puts ("Module attributes:\n"); if (data[21] & 0x80) puts (" TBD (bit 7)\n"); if (data[21] & 0x40) puts (" Analysis probe installed\n"); if (data[21] & 0x20) puts (" TBD (bit 5)\n"); if (data[21] & 0x10) puts (" FET switch external enable\n"); printf (" %d PLLs on DIMM\n", (data[21] >> 2) & 0x03); if (data[20] & 0x11) { printf (" %d active registers on DIMM\n", (data[21] & 0x03) + 1); } break; default: puts ("Module attributes:\n"); if (!data[21]) puts (" (none)\n"); else decode_bits (data[21], decode_byte21_default, 0); break; } switch (type) { case DDR2: decode_bits (data[22], decode_byte22_DDR2, 0); break; default: puts ("Device attributes:\n"); if (data[22] & 0x80) puts (" TBD (bit 7)\n"); if (data[22] & 0x40) puts (" TBD (bit 6)\n"); if (data[22] & 0x20) puts (" Upper Vcc tolerance 5%\n"); else puts (" Upper Vcc tolerance 10%\n"); if (data[22] & 0x10) puts (" Lower Vcc tolerance 5%\n"); else puts (" Lower Vcc tolerance 10%\n"); if (data[22] & 0x08) puts (" Supports write1/read burst\n"); if (data[22] & 0x04) puts (" Supports precharge all\n"); if (data[22] & 0x02) puts (" Supports auto precharge\n"); if (data[22] & 0x01) puts (" Supports early RAS# precharge\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (2nd highest CAS latency) "); print_ddr2_tcyc (data[23]); break; default: printf ("SDRAM cycle time (2nd highest CAS latency) %d." "%d ns\n", (data[23] >> 4) & 0x0F, data[23] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (2nd highest CAS latency) 0." "%d%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; default: printf ("SDRAM access from clock (2nd highest CAS latency) %d." "%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (3rd highest CAS latency) "); print_ddr2_tcyc (data[25]); break; default: printf ("SDRAM cycle time (3rd highest CAS latency) %d." "%d ns\n", (data[25] >> 4) & 0x0F, data[25] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (3rd highest CAS latency) 0." "%d%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; default: printf ("SDRAM access from clock (3rd highest CAS latency) %d." "%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; } switch (type) { case DDR2: printf ("Minimum row precharge %d.%02d ns\n", (data[27] >> 2) & 0x3F, 25 * (data[27] & 0x03)); break; default: printf ("Minimum row precharge %d ns\n", data[27]); break; } switch (type) { case DDR2: printf ("Row active to row active min %d.%02d ns\n", (data[28] >> 2) & 0x3F, 25 * (data[28] & 0x03)); break; default: printf ("Row active to row active min %d ns\n", data[28]); break; } switch (type) { case DDR2: printf ("RAS to CAS delay min %d.%02d ns\n", (data[29] >> 2) & 0x3F, 25 * (data[29] & 0x03)); break; default: printf ("RAS to CAS delay min %d ns\n", data[29]); break; } printf ("Minimum RAS pulse width %d ns\n", data[30]); switch (type) { case DDR2: puts ("Density of each row "); decode_bits (data[31], decode_row_density_DDR2, 1); putc ('\n'); break; default: puts ("Density of each row "); decode_bits (data[31], decode_row_density_default, 1); putc ('\n'); break; } switch (type) { case DDR2: puts ("Command and Address setup "); if (data[32] >= 0xA0) { printf ("1.%d%d ns\n", ((data[32] >> 4) & 0x0F) - 10, data[32] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[32] >> 4) & 0x0F), data[32] & 0x0F); } break; default: printf ("Command and Address setup %c%d.%d ns\n", (data[32] & 0x80) ? '-' : '+', (data[32] >> 4) & 0x07, data[32] & 0x0F); break; } switch (type) { case DDR2: puts ("Command and Address hold "); if (data[33] >= 0xA0) { printf ("1.%d%d ns\n", ((data[33] >> 4) & 0x0F) - 10, data[33] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[33] >> 4) & 0x0F), data[33] & 0x0F); } break; default: printf ("Command and Address hold %c%d.%d ns\n", (data[33] & 0x80) ? '-' : '+', (data[33] >> 4) & 0x07, data[33] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input setup 0.%d%d ns\n", (data[34] >> 4) & 0x0F, data[34] & 0x0F); break; default: printf ("Data signal input setup %c%d.%d ns\n", (data[34] & 0x80) ? '-' : '+', (data[34] >> 4) & 0x07, data[34] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input hold 0.%d%d ns\n", (data[35] >> 4) & 0x0F, data[35] & 0x0F); break; default: printf ("Data signal input hold %c%d.%d ns\n", (data[35] & 0x80) ? '-' : '+', (data[35] >> 4) & 0x07, data[35] & 0x0F); break; } puts ("Manufacturer's JEDEC ID "); for (j = 64; j <= 71; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Manufacturing Location %02X\n", data[72]); puts ("Manufacturer's Part Number "); for (j = 73; j <= 90; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Revision Code %02X %02X\n", data[91], data[92]); printf ("Manufacturing Date %02X %02X\n", data[93], data[94]); puts ("Assembly Serial Number "); for (j = 95; j <= 98; j++) printf ("%02X ", data[j]); putc ('\n'); if (DDR2 != type) { printf ("Speed rating PC%d\n", data[126] == 0x66 ? 66 : data[126]); } return 0; } #endif /* * Syntax: * i2c edid {i2c_chip} */ #if defined(CONFIG_I2C_EDID) int do_edid(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; struct edid1_info edid; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 2) { cmd_usage(cmdtp); return 1; } chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, (uchar *)&edid, sizeof(edid)); #else ret = i2c_read(chip, 0, 1, (uchar *)&edid, sizeof(edid)); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (edid_check_info(&edid)) { puts("Content isn't valid EDID.\n"); return 1; } edid_print_info(&edid); return 0; } #endif /* CONFIG_I2C_EDID */ #if CONFIG_IS_ENABLED(DM_I2C) static void show_bus(struct udevice *bus) { struct udevice *dev; printf("Bus %d:\t%s", dev_seq(bus), bus->name); if (device_active(bus)) printf(" (active %d)", dev_seq(bus)); printf("\n"); for (device_find_first_child(bus, &dev); dev; device_find_next_child(&dev)) { struct dm_i2c_chip *chip = dev_get_parent_plat(dev); printf(" %02x: %s, offset len %x, flags %x\n", chip->chip_addr, dev->name, chip->offset_len, chip->flags); } } #endif /** * do_i2c_show_bus() - Handle the "i2c bus" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_show_bus(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { if (argc == 1) { /* show all busses */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; struct uclass *uc; int ret; ret = uclass_get(UCLASS_I2C, &uc); if (ret) return CMD_RET_FAILURE; uclass_foreach_dev(bus, uc) show_bus(bus); #else int i; for (i = 0; i < CONFIG_SYS_NUM_I2C_BUSES; i++) { printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); } #endif } else { int i; /* show specific bus */ i = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, i, &bus); if (ret) { printf("Invalid bus %d: err=%d\n", i, ret); return CMD_RET_FAILURE; } show_bus(bus); #else if (i >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", i); return -1; } printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); #endif } return 0; } #endif /** * do_i2c_bus_num() - Handle the "i2c dev" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) || \ CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_bus_num(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int ret = 0; int bus_no; if (argc == 1) { /* querying current setting */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (!i2c_get_cur_bus(&bus)) bus_no = dev_seq(bus); else bus_no = -1; #else bus_no = i2c_get_bus_num(); #endif printf("Current bus is %d\n", bus_no); } else { bus_no = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) if (bus_no >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", bus_no); return -1; } #endif printf("Setting bus to %d\n", bus_no); #if CONFIG_IS_ENABLED(DM_I2C) ret = cmd_i2c_set_bus_num(bus_no); #else ret = i2c_set_bus_num(bus_no); #endif if (ret) printf("Failure changing bus number (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ /** * do_i2c_bus_speed() - Handle the "i2c speed" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_bus_speed(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int speed, ret=0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return 1; #endif if (argc == 1) { #if CONFIG_IS_ENABLED(DM_I2C) speed = dm_i2c_get_bus_speed(bus); #else speed = i2c_get_bus_speed(); #endif /* querying current speed */ printf("Current bus speed=%d\n", speed); } else { speed = dectoul(argv[1], NULL); printf("Setting bus speed to %d Hz\n", speed); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_set_bus_speed(bus, speed); #else ret = i2c_set_bus_speed(speed); #endif if (ret) printf("Failure changing bus speed (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } /** * do_i2c_mm() - Handle the "i2c mm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_mm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 1, flag, argc, argv); } /** * do_i2c_nm() - Handle the "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_nm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 0, flag, argc, argv); } /** * do_i2c_reset() - Handle the "i2c reset" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ static int do_i2c_reset(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; if (i2c_deblock(bus)) { printf("Error: Not supported by the driver\n"); return CMD_RET_FAILURE; } #elif CONFIG_IS_ENABLED(SYS_I2C_LEGACY) i2c_init(I2C_ADAP->speed, I2C_ADAP->slaveaddr); #endif return 0; } static struct cmd_tbl cmd_i2c_sub[] = { #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(bus, 1, 1, do_i2c_show_bus, "", ""), #endif U_BOOT_CMD_MKENT(crc32, 3, 1, do_i2c_crc, "", ""), #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(dev, 1, 1, do_i2c_bus_num, "", ""), #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) U_BOOT_CMD_MKENT(edid, 1, 1, do_edid, "", ""), #endif /* CONFIG_I2C_EDID */ U_BOOT_CMD_MKENT(loop, 3, 1, do_i2c_loop, "", ""), U_BOOT_CMD_MKENT(md, 3, 1, do_i2c_md, "", ""), U_BOOT_CMD_MKENT(mm, 2, 1, do_i2c_mm, "", ""), U_BOOT_CMD_MKENT(mw, 3, 1, do_i2c_mw, "", ""), U_BOOT_CMD_MKENT(nm, 2, 1, do_i2c_nm, "", ""), U_BOOT_CMD_MKENT(probe, 0, 1, do_i2c_probe, "", ""), U_BOOT_CMD_MKENT(read, 5, 1, do_i2c_read, "", ""), U_BOOT_CMD_MKENT(write, 6, 0, do_i2c_write, "", ""), #if CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(flags, 2, 1, do_i2c_flags, "", ""), U_BOOT_CMD_MKENT(olen, 2, 1, do_i2c_olen, "", ""), #endif U_BOOT_CMD_MKENT(reset, 0, 1, do_i2c_reset, "", ""), #if defined(CONFIG_CMD_SDRAM) U_BOOT_CMD_MKENT(sdram, 1, 1, do_sdram, "", ""), #endif U_BOOT_CMD_MKENT(speed, 1, 1, do_i2c_bus_speed, "", ""), }; static __maybe_unused void i2c_reloc(void) { static int relocated; if (!relocated) { fixup_cmdtable(cmd_i2c_sub, ARRAY_SIZE(cmd_i2c_sub)); relocated = 1; }; } /** * do_i2c() - Handle the "i2c" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct cmd_tbl *c; #ifdef CONFIG_NEEDS_MANUAL_RELOC i2c_reloc(); #endif if (argc < 2) return CMD_RET_USAGE; /* Strip off leading 'i2c' command argument */ argc--; argv++; c = find_cmd_tbl(argv[0], &cmd_i2c_sub[0], ARRAY_SIZE(cmd_i2c_sub)); if (c) return c->cmd(cmdtp, flag, argc, argv); else return CMD_RET_USAGE; } /***************************************************/ #ifdef CONFIG_SYS_LONGHELP static char i2c_help_text[] = #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) "bus [muxtype:muxaddr:muxchannel] - show I2C bus info\n" "i2c " /* That's the prefix for the crc32 command below. */ #endif "crc32 chip address[.0, .1, .2] count - compute CRC32 checksum\n" #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) "i2c dev [dev] - show or set current I2C bus\n" #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) "i2c edid chip - print EDID configuration information\n" #endif /* CONFIG_I2C_EDID */ "i2c loop chip address[.0, .1, .2] [# of objects] - looping read of device\n" "i2c md chip address[.0, .1, .2] [# of objects] - read from I2C device\n" "i2c mm chip address[.0, .1, .2] - write to I2C device (auto-incrementing)\n" "i2c mw chip address[.0, .1, .2] value [count] - write to I2C device (fill)\n" "i2c nm chip address[.0, .1, .2] - write to I2C device (constant address)\n" "i2c probe [address] - test for and show device(s) on the I2C bus\n" "i2c read chip address[.0, .1, .2] length memaddress - read to memory\n" "i2c write memaddress chip address[.0, .1, .2] length [-s] - write memory\n" " to I2C; the -s option selects bulk write in a single transaction\n" #if CONFIG_IS_ENABLED(DM_I2C) "i2c flags chip [flags] - set or get chip flags\n" "i2c olen chip [offset_length] - set or get chip offset length\n" #endif "i2c reset - re-init the I2C Controller\n" #if defined(CONFIG_CMD_SDRAM) "i2c sdram chip - print SDRAM configuration information\n" #endif "i2c speed [speed] - show or set I2C bus speed"; #endif U_BOOT_CMD( i2c, 7, 1, do_i2c, "I2C sub-system", i2c_help_text );
// SPDX-License-Identifier: GPL-2.0+ /* * (C) Copyright 2009 * Sergey Kubushyn, himself, ksi@koi8.net * * Changes for unified multibus/multiadapter I2C support. * * (C) Copyright 2001 * Gerald Van Baren, Custom IDEAS, vanbaren@cideas.com. */ /* * I2C Functions similar to the standard memory functions. * * There are several parameters in many of the commands that bear further * explanations: * * {i2c_chip} is the I2C chip address (the first byte sent on the bus). * Each I2C chip on the bus has a unique address. On the I2C data bus, * the address is the upper seven bits and the LSB is the "read/write" * bit. Note that the {i2c_chip} address specified on the command * line is not shifted up: e.g. a typical EEPROM memory chip may have * an I2C address of 0x50, but the data put on the bus will be 0xA0 * for write and 0xA1 for read. This "non shifted" address notation * matches at least half of the data sheets :-/. * * {addr} is the address (or offset) within the chip. Small memory * chips have 8 bit addresses. Large memory chips have 16 bit * addresses. Other memory chips have 9, 10, or 11 bit addresses. * Many non-memory chips have multiple registers and {addr} is used * as the register index. Some non-memory chips have only one register * and therefore don't need any {addr} parameter. * * The default {addr} parameter is one byte (.1) which works well for * memories and registers with 8 bits of address space. * * You can specify the length of the {addr} field with the optional .0, * .1, or .2 modifier (similar to the .b, .w, .l modifier). If you are * manipulating a single register device which doesn't use an address * field, use "0.0" for the address and the ".0" length field will * suppress the address in the I2C data stream. This also works for * successive reads using the I2C auto-incrementing memory pointer. * * If you are manipulating a large memory with 2-byte addresses, use * the .2 address modifier, e.g. 210.2 addresses location 528 (decimal). * * Then there are the unfortunate memory chips that spill the most * significant 1, 2, or 3 bits of address into the chip address byte. * This effectively makes one chip (logically) look like 2, 4, or * 8 chips. This is handled (awkwardly) by #defining * CONFIG_SYS_I2C_EEPROM_ADDR_OVERFLOW and using the .1 modifier on the * {addr} field (since .1 is the default, it doesn't actually have to * be specified). Examples: given a memory chip at I2C chip address * 0x50, the following would happen... * i2c md 50 0 10 display 16 bytes starting at 0x000 * On the bus: <S> A0 00 <E> <S> A1 <rd> ... <rd> * i2c md 50 100 10 display 16 bytes starting at 0x100 * On the bus: <S> A2 00 <E> <S> A3 <rd> ... <rd> * i2c md 50 210 10 display 16 bytes starting at 0x210 * On the bus: <S> A4 10 <E> <S> A5 <rd> ... <rd> * This is awfully ugly. It would be nice if someone would think up * a better way of handling this. * * Adapted from cmd_mem.c which is copyright Wolfgang Denk (wd@denx.de). */ #include <common.h> #include <bootretry.h> #include <cli.h> #include <command.h> #include <console.h> #include <dm.h> #include <edid.h> #include <errno.h> #include <i2c.h> #include <log.h> #include <malloc.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/delay.h> #include <u-boot/crc.h> /* Display values from last command. * Memory modify remembered values are different from display memory. */ static uint i2c_dp_last_chip; static uint i2c_dp_last_addr; static uint i2c_dp_last_alen; static uint i2c_dp_last_length = 0x10; static uint i2c_mm_last_chip; static uint i2c_mm_last_addr; static uint i2c_mm_last_alen; /* If only one I2C bus is present, the list of devices to ignore when * the probe command is issued is represented by a 1D array of addresses. * When multiple buses are present, the list is an array of bus-address * pairs. The following macros take care of this */ #if defined(CONFIG_SYS_I2C_NOPROBES) #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) static struct { uchar bus; uchar addr; } i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM i2c_get_bus_num() #define COMPARE_BUS(b,i) (i2c_no_probes[(i)].bus == (b)) #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)].addr == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)].addr #else /* single bus */ static uchar i2c_no_probes[] = CONFIG_SYS_I2C_NOPROBES; #define GET_BUS_NUM 0 #define COMPARE_BUS(b,i) ((b) == 0) /* Make compiler happy */ #define COMPARE_ADDR(a,i) (i2c_no_probes[(i)] == (a)) #define NO_PROBE_ADDR(i) i2c_no_probes[(i)] #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ #endif #define DISP_LINE_LEN 16 /* * Default for driver model is to use the chip's existing address length. * For legacy code, this is not stored, so we need to use a suitable * default. */ #if CONFIG_IS_ENABLED(DM_I2C) #define DEFAULT_ADDR_LEN (-1) #else #define DEFAULT_ADDR_LEN 1 #endif #if CONFIG_IS_ENABLED(DM_I2C) static struct udevice *i2c_cur_bus; static int cmd_i2c_set_bus_num(unsigned int busnum) { struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, busnum, &bus); if (ret) { debug("%s: No bus %d\n", __func__, busnum); return ret; } i2c_cur_bus = bus; return 0; } static int i2c_get_cur_bus(struct udevice **busp) { #ifdef CONFIG_I2C_SET_DEFAULT_BUS_NUM if (!i2c_cur_bus) { if (cmd_i2c_set_bus_num(CONFIG_I2C_DEFAULT_BUS_NUMBER)) { printf("Default I2C bus %d not found\n", CONFIG_I2C_DEFAULT_BUS_NUMBER); return -ENODEV; } } #endif if (!i2c_cur_bus) { puts("No I2C bus selected\n"); return -ENODEV; } *busp = i2c_cur_bus; return 0; } static int i2c_get_cur_bus_chip(uint chip_addr, struct udevice **devp) { struct udevice *bus; int ret; ret = i2c_get_cur_bus(&bus); if (ret) return ret; return i2c_get_chip(bus, chip_addr, 1, devp); } #endif /** * i2c_init_board() - Board-specific I2C bus init * * This function is the default no-op implementation of I2C bus * initialization. This function can be overridden by board-specific * implementation if needed. */ __weak void i2c_init_board(void) { } /** * get_alen() - Small parser helper function to get address length * * Returns the address length. */ static uint get_alen(char *arg, uint default_len) { uint j; uint alen; alen = default_len; for (j = 0; j < 8; j++) { if (arg[j] == '.') { alen = arg[j+1] - '0'; break; } else if (arg[j] == '\0') break; } return alen; } enum i2c_err_op { I2C_ERR_READ, I2C_ERR_WRITE, }; static int i2c_report_err(int ret, enum i2c_err_op op) { printf("Error %s the chip: %d\n", op == I2C_ERR_READ ? "reading" : "writing", ret); return CMD_RET_FAILURE; } /** * do_i2c_read() - Handle the "i2c read" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c read {i2c_chip} {devaddr}{.0, .1, .2} {len} {memaddr} */ static int do_i2c_read(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; uint alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 5) return CMD_RET_USAGE; /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * Length is the number of objects, not number of bytes. */ length = hextoul(argv[3], NULL); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (!ret) ret = dm_i2c_read(dev, devaddr, memaddr, length); #else ret = i2c_read(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_write(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint devaddr, length; uint alen; u_char *memaddr; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; struct dm_i2c_chip *i2c_chip; #endif if ((argc < 5) || (argc > 6)) return cmd_usage(cmdtp); /* * memaddr is the address where to store things in memory */ memaddr = (u_char *)hextoul(argv[1], NULL); /* * I2C chip address */ chip = hextoul(argv[2], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ devaddr = hextoul(argv[3], NULL); alen = get_alen(argv[3], DEFAULT_ADDR_LEN); if (alen > 3) return cmd_usage(cmdtp); /* * Length is the number of bytes. */ length = hextoul(argv[4], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); i2c_chip = dev_get_parent_plat(dev); if (!i2c_chip) return i2c_report_err(ret, I2C_ERR_WRITE); #endif if (argc == 6 && !strcmp(argv[5], "-s")) { /* * Write all bytes in a single I2C transaction. If the target * device is an EEPROM, it is your responsibility to not cross * a page boundary. No write delay upon completion, take this * into account if linking commands. */ #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags &= ~DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr, memaddr, length); #else ret = i2c_write(chip, devaddr, alen, memaddr, length); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); } else { /* * Repeated addressing - perform <length> separate * write transactions of one byte each */ while (length-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) i2c_chip->flags |= DM_I2C_CHIP_WR_ADDRESS; ret = dm_i2c_write(dev, devaddr++, memaddr++, 1); #else ret = i2c_write(chip, devaddr++, alen, memaddr++, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } } return 0; } #if CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_flags(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint flags; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { flags = hextoul(argv[2], NULL); ret = i2c_set_chip_flags(dev, flags); } else { ret = i2c_get_chip_flags(dev, &flags); if (!ret) printf("%x\n", flags); } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } static int do_i2c_olen(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct udevice *dev; uint olen; int chip; int ret; if (argc < 2) return CMD_RET_USAGE; chip = hextoul(argv[1], NULL); ret = i2c_get_cur_bus_chip(chip, &dev); if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (argc > 2) { olen = hextoul(argv[2], NULL); ret = i2c_set_chip_offset_len(dev, olen); } else { ret = i2c_get_chip_offset_len(dev); if (ret >= 0) { printf("%x\n", ret); ret = 0; } } if (ret) return i2c_report_err(ret, I2C_ERR_READ); return 0; } #endif /** * do_i2c_md() - Handle the "i2c md" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c md {i2c_chip} {addr}{.0, .1, .2} {len} */ static int do_i2c_md(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint addr, length; uint alen; uint j, nbytes, linebytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif /* We use the last specified parameters, unless new ones are * entered. */ chip = i2c_dp_last_chip; addr = i2c_dp_last_addr; alen = i2c_dp_last_alen; length = i2c_dp_last_length; if (argc < 3) return CMD_RET_USAGE; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. */ /* * I2C chip address */ chip = hextoul(argv[1], NULL); /* * I2C data address within the chip. This can be 1 or * 2 bytes long. Some day it might be 3 bytes long :-). */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; /* * If another parameter, it is the length to display. * Length is the number of objects, not number of bytes. */ if (argc > 3) length = hextoul(argv[3], NULL); } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Print the lines. * * We buffer all read data, so we can make sure data is read only * once. */ nbytes = length; do { unsigned char linebuf[DISP_LINE_LEN]; unsigned char *cp; linebytes = (nbytes > DISP_LINE_LEN) ? DISP_LINE_LEN : nbytes; #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, linebuf, linebytes); #else ret = i2c_read(chip, addr, alen, linebuf, linebytes); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); else { printf("%04x:", addr); cp = linebuf; for (j=0; j<linebytes; j++) { printf(" %02x", *cp++); addr++; } puts (" "); cp = linebuf; for (j=0; j<linebytes; j++) { if ((*cp < 0x20) || (*cp > 0x7e)) puts ("."); else printf("%c", *cp); cp++; } putc ('\n'); } nbytes -= linebytes; } while (nbytes > 0); i2c_dp_last_chip = chip; i2c_dp_last_addr = addr; i2c_dp_last_alen = alen; i2c_dp_last_length = length; return 0; } /** * do_i2c_mw() - Handle the "i2c mw" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mw {i2c_chip} {addr}{.0, .1, .2} {data} [{count}] */ static int do_i2c_mw(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; uint alen; uchar byte; uint count; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if ((argc < 4) || (argc > 5)) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Value to write is always specified. */ byte = hextoul(argv[3], NULL); /* * Optional count */ if (argc == 5) count = hextoul(argv[4], NULL); else count = 1; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr++, &byte, 1); #else ret = i2c_write(chip, addr++, alen, &byte, 1); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); /* * Wait for the write to complete. The write can take * up to 10mSec (we allow a little more time). */ /* * No write delay with FRAM devices. */ #if !defined(CONFIG_SYS_I2C_FRAM) udelay(11000); #endif } return 0; } /** * do_i2c_crc() - Handle the "i2c crc32" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Calculate a CRC on memory * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c crc32 {i2c_chip} {addr}{.0, .1, .2} {count} */ static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; uint alen; uint count; uchar byte; ulong crc; ulong err; int ret = 0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 4) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Count is always specified */ count = hextoul(argv[3], NULL); printf ("CRC32 for %08lx ... %08lx ==> ", addr, addr + count - 1); /* * CRC a byte at a time. This is going to be slooow, but hey, the * memories are small and slow too so hopefully nobody notices. */ crc = 0; err = 0; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, &byte, 1); #else ret = i2c_read(chip, addr, alen, &byte, 1); #endif if (ret) err++; crc = crc32(crc, &byte, 1); addr++; } if (err > 0) i2c_report_err(ret, I2C_ERR_READ); else printf ("%08lx\n", crc); return 0; } /** * mod_i2c_mem() - Handle the "i2c mm" and "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Modify memory. * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c mm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} * i2c nm{.b, .w, .l} {i2c_chip} {addr}{.0, .1, .2} */ static int mod_i2c_mem(struct cmd_tbl *cmdtp, int incrflag, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; ulong data; int size = 1; int nbytes; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc != 3) return CMD_RET_USAGE; bootretry_reset_cmd_timeout(); /* got a good command to get here */ /* * We use the last specified parameters, unless new ones are * entered. */ chip = i2c_mm_last_chip; addr = i2c_mm_last_addr; alen = i2c_mm_last_alen; if ((flag & CMD_FLAG_REPEAT) == 0) { /* * New command specified. Check for a size specification. * Defaults to byte if no or incorrect specification. */ size = cmd_get_data_size(argv[0], 1); /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; } #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Print the address, followed by value. Then accept input for * the next value. A non-converted value exits. */ do { printf("%08lx:", addr); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, (uchar *)&data, size); #else ret = i2c_read(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); data = cpu_to_be32(data); if (size == 1) printf(" %02lx", (data >> 24) & 0x000000FF); else if (size == 2) printf(" %04lx", (data >> 16) & 0x0000FFFF); else printf(" %08lx", data); nbytes = cli_readline(" ? "); if (nbytes == 0) { /* * <CR> pressed as only input, don't modify current * location and move to next. */ if (incrflag) addr += size; nbytes = size; /* good enough to not time out */ bootretry_reset_cmd_timeout(); } #ifdef CONFIG_BOOT_RETRY_TIME else if (nbytes == -2) break; /* timed out, exit the command */ #endif else { char *endp; data = hextoul(console_buffer, &endp); if (size == 1) data = data << 24; else if (size == 2) data = data << 16; data = be32_to_cpu(data); nbytes = endp - console_buffer; if (nbytes) { /* * good enough to not time out */ bootretry_reset_cmd_timeout(); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_write(dev, addr, (uchar *)&data, size); #else ret = i2c_write(chip, addr, alen, (uchar *)&data, size); #endif if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #if CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS > 0 udelay(CONFIG_SYS_EEPROM_PAGE_WRITE_DELAY_MS * 1000); #endif if (incrflag) addr += size; } } } while (nbytes); i2c_mm_last_chip = chip; i2c_mm_last_addr = addr; i2c_mm_last_alen = alen; return 0; } /** * do_i2c_probe() - Handle the "i2c probe" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c probe {addr} * * Returns zero (success) if one or more I2C devices was found */ static int do_i2c_probe(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int j; int addr = -1; int found = 0; #if defined(CONFIG_SYS_I2C_NOPROBES) int k, skip; unsigned int bus = GET_BUS_NUM; #endif /* NOPROBES */ int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus, *dev; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; #endif if (argc == 2) addr = simple_strtol(argv[1], 0, 16); puts ("Valid chip addresses:"); for (j = 0; j < 128; j++) { if ((0 <= addr) && (j != addr)) continue; #if defined(CONFIG_SYS_I2C_NOPROBES) skip = 0; for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus, k) && COMPARE_ADDR(j, k)) { skip = 1; break; } } if (skip) continue; #endif #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_probe(bus, j, 0, &dev); #else ret = i2c_probe(j); #endif if (ret == 0) { printf(" %02X", j); found++; } } putc ('\n'); #if defined(CONFIG_SYS_I2C_NOPROBES) puts ("Excluded chip addresses:"); for (k = 0; k < ARRAY_SIZE(i2c_no_probes); k++) { if (COMPARE_BUS(bus,k)) printf(" %02X", NO_PROBE_ADDR(k)); } putc ('\n'); #endif return (0 == found); } /** * do_i2c_loop() - Handle the "i2c loop" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. * * Syntax: * i2c loop {i2c_chip} {addr}{.0, .1, .2} [{length}] [{delay}] * {length} - Number of bytes to read * {delay} - A DECIMAL number and defaults to 1000 uSec */ static int do_i2c_loop(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; uint alen; uint addr; uint length; u_char bytes[16]; int delay; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 3) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_WRITE); #endif /* * Length is the number of objects, not number of bytes. */ length = 1; length = hextoul(argv[3], NULL); if (length > sizeof(bytes)) length = sizeof(bytes); /* * The delay time (uSec) is optional. */ delay = 1000; if (argc > 3) delay = dectoul(argv[4], NULL); /* * Run the loop... */ while (1) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, bytes, length); #else ret = i2c_read(chip, addr, alen, bytes, length); #endif if (ret) i2c_report_err(ret, I2C_ERR_READ); udelay(delay); } /* NOTREACHED */ return 0; } /* * The SDRAM command is separately configured because many * (most?) embedded boards don't use SDRAM DIMMs. * * FIXME: Document and probably move elsewhere! */ #if defined(CONFIG_CMD_SDRAM) static void print_ddr2_tcyc (u_char const b) { printf ("%d.", (b >> 4) & 0x0F); switch (b & 0x0F) { case 0x0: case 0x1: case 0x2: case 0x3: case 0x4: case 0x5: case 0x6: case 0x7: case 0x8: case 0x9: printf ("%d ns\n", b & 0x0F); break; case 0xA: puts ("25 ns\n"); break; case 0xB: puts ("33 ns\n"); break; case 0xC: puts ("66 ns\n"); break; case 0xD: puts ("75 ns\n"); break; default: puts ("?? ns\n"); break; } } static void decode_bits (u_char const b, char const *str[], int const do_once) { u_char mask; for (mask = 0x80; mask != 0x00; mask >>= 1, ++str) { if (b & mask) { puts (*str); if (do_once) return; } } } /* * Syntax: * i2c sdram {i2c_chip} */ static int do_sdram(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { enum { unknown, EDO, SDRAM, DDR, DDR2, DDR3, DDR4 } type; uint chip; u_char data[128]; u_char cksum; int j, ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif static const char *decode_CAS_DDR2[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " TBD", " TBD" }; static const char *decode_CAS_default[] = { " TBD", " 7", " 6", " 5", " 4", " 3", " 2", " 1" }; static const char *decode_CS_WE_default[] = { " TBD", " 6", " 5", " 4", " 3", " 2", " 1", " 0" }; static const char *decode_byte21_default[] = { " TBD (bit 7)\n", " Redundant row address\n", " Differential clock input\n", " Registerd DQMB inputs\n", " Buffered DQMB inputs\n", " On-card PLL\n", " Registered address/control lines\n", " Buffered address/control lines\n" }; static const char *decode_byte22_DDR2[] = { " TBD (bit 7)\n", " TBD (bit 6)\n", " TBD (bit 5)\n", " TBD (bit 4)\n", " TBD (bit 3)\n", " Supports partial array self refresh\n", " Supports 50 ohm ODT\n", " Supports weak driver\n" }; static const char *decode_row_density_DDR2[] = { "512 MiB", "256 MiB", "128 MiB", "16 GiB", "8 GiB", "4 GiB", "2 GiB", "1 GiB" }; static const char *decode_row_density_default[] = { "512 MiB", "256 MiB", "128 MiB", "64 MiB", "32 MiB", "16 MiB", "8 MiB", "4 MiB" }; if (argc < 2) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, data, sizeof(data)); #else ret = i2c_read(chip, 0, 1, data, sizeof(data)); #endif if (ret) { puts ("No SDRAM Serial Presence Detect found.\n"); return 1; } cksum = 0; for (j = 0; j < 63; j++) { cksum += data[j]; } if (cksum != data[63]) { printf ("WARNING: Configuration data checksum failure:\n" " is 0x%02x, calculated 0x%02x\n", data[63], cksum); } printf ("SPD data revision %d.%d\n", (data[62] >> 4) & 0x0F, data[62] & 0x0F); printf ("Bytes used 0x%02X\n", data[0]); printf ("Serial memory size 0x%02X\n", 1 << data[1]); puts ("Memory type "); switch (data[2]) { case 2: type = EDO; puts ("EDO\n"); break; case 4: type = SDRAM; puts ("SDRAM\n"); break; case 7: type = DDR; puts("DDR\n"); break; case 8: type = DDR2; puts ("DDR2\n"); break; case 11: type = DDR3; puts("DDR3\n"); break; case 12: type = DDR4; puts("DDR4\n"); break; default: type = unknown; puts ("unknown\n"); break; } puts ("Row address bits "); if ((data[3] & 0x00F0) == 0) printf ("%d\n", data[3] & 0x0F); else printf ("%d/%d\n", data[3] & 0x0F, (data[3] >> 4) & 0x0F); puts ("Column address bits "); if ((data[4] & 0x00F0) == 0) printf ("%d\n", data[4] & 0x0F); else printf ("%d/%d\n", data[4] & 0x0F, (data[4] >> 4) & 0x0F); switch (type) { case DDR2: printf ("Number of ranks %d\n", (data[5] & 0x07) + 1); break; default: printf ("Module rows %d\n", data[5]); break; } switch (type) { case DDR2: printf ("Module data width %d bits\n", data[6]); break; default: printf ("Module data width %d bits\n", (data[7] << 8) | data[6]); break; } puts ("Interface signal levels "); switch(data[8]) { case 0: puts ("TTL 5.0 V\n"); break; case 1: puts ("LVTTL\n"); break; case 2: puts ("HSTL 1.5 V\n"); break; case 3: puts ("SSTL 3.3 V\n"); break; case 4: puts ("SSTL 2.5 V\n"); break; case 5: puts ("SSTL 1.8 V\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time "); print_ddr2_tcyc (data[9]); break; default: printf ("SDRAM cycle time %d.%d ns\n", (data[9] >> 4) & 0x0F, data[9] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access time 0.%d%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; default: printf ("SDRAM access time %d.%d ns\n", (data[10] >> 4) & 0x0F, data[10] & 0x0F); break; } puts ("EDC configuration "); switch (data[11]) { case 0: puts ("None\n"); break; case 1: puts ("Parity\n"); break; case 2: puts ("ECC\n"); break; default: puts ("unknown\n"); break; } if ((data[12] & 0x80) == 0) puts ("No self refresh, rate "); else puts ("Self refresh, rate "); switch(data[12] & 0x7F) { case 0: puts ("15.625 us\n"); break; case 1: puts ("3.9 us\n"); break; case 2: puts ("7.8 us\n"); break; case 3: puts ("31.3 us\n"); break; case 4: puts ("62.5 us\n"); break; case 5: puts ("125 us\n"); break; default: puts ("unknown\n"); break; } switch (type) { case DDR2: printf ("SDRAM width (primary) %d\n", data[13]); break; default: printf ("SDRAM width (primary) %d\n", data[13] & 0x7F); if ((data[13] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[13] & 0x7F)); } break; } switch (type) { case DDR2: if (data[14] != 0) printf ("EDC width %d\n", data[14]); break; default: if (data[14] != 0) { printf ("EDC width %d\n", data[14] & 0x7F); if ((data[14] & 0x80) != 0) { printf (" (second bank) %d\n", 2 * (data[14] & 0x7F)); } } break; } if (DDR2 != type) { printf ("Min clock delay, back-to-back random column addresses " "%d\n", data[15]); } puts ("Burst length(s) "); if (data[16] & 0x80) puts (" Page"); if (data[16] & 0x08) puts (" 8"); if (data[16] & 0x04) puts (" 4"); if (data[16] & 0x02) puts (" 2"); if (data[16] & 0x01) puts (" 1"); putc ('\n'); printf ("Number of banks %d\n", data[17]); switch (type) { case DDR2: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_DDR2, 0); putc ('\n'); break; default: puts ("CAS latency(s) "); decode_bits (data[18], decode_CAS_default, 0); putc ('\n'); break; } if (DDR2 != type) { puts ("CS latency(s) "); decode_bits (data[19], decode_CS_WE_default, 0); putc ('\n'); } if (DDR2 != type) { puts ("WE latency(s) "); decode_bits (data[20], decode_CS_WE_default, 0); putc ('\n'); } switch (type) { case DDR2: puts ("Module attributes:\n"); if (data[21] & 0x80) puts (" TBD (bit 7)\n"); if (data[21] & 0x40) puts (" Analysis probe installed\n"); if (data[21] & 0x20) puts (" TBD (bit 5)\n"); if (data[21] & 0x10) puts (" FET switch external enable\n"); printf (" %d PLLs on DIMM\n", (data[21] >> 2) & 0x03); if (data[20] & 0x11) { printf (" %d active registers on DIMM\n", (data[21] & 0x03) + 1); } break; default: puts ("Module attributes:\n"); if (!data[21]) puts (" (none)\n"); else decode_bits (data[21], decode_byte21_default, 0); break; } switch (type) { case DDR2: decode_bits (data[22], decode_byte22_DDR2, 0); break; default: puts ("Device attributes:\n"); if (data[22] & 0x80) puts (" TBD (bit 7)\n"); if (data[22] & 0x40) puts (" TBD (bit 6)\n"); if (data[22] & 0x20) puts (" Upper Vcc tolerance 5%\n"); else puts (" Upper Vcc tolerance 10%\n"); if (data[22] & 0x10) puts (" Lower Vcc tolerance 5%\n"); else puts (" Lower Vcc tolerance 10%\n"); if (data[22] & 0x08) puts (" Supports write1/read burst\n"); if (data[22] & 0x04) puts (" Supports precharge all\n"); if (data[22] & 0x02) puts (" Supports auto precharge\n"); if (data[22] & 0x01) puts (" Supports early RAS# precharge\n"); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (2nd highest CAS latency) "); print_ddr2_tcyc (data[23]); break; default: printf ("SDRAM cycle time (2nd highest CAS latency) %d." "%d ns\n", (data[23] >> 4) & 0x0F, data[23] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (2nd highest CAS latency) 0." "%d%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; default: printf ("SDRAM access from clock (2nd highest CAS latency) %d." "%d ns\n", (data[24] >> 4) & 0x0F, data[24] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM cycle time (3rd highest CAS latency) "); print_ddr2_tcyc (data[25]); break; default: printf ("SDRAM cycle time (3rd highest CAS latency) %d." "%d ns\n", (data[25] >> 4) & 0x0F, data[25] & 0x0F); break; } switch (type) { case DDR2: printf ("SDRAM access from clock (3rd highest CAS latency) 0." "%d%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; default: printf ("SDRAM access from clock (3rd highest CAS latency) %d." "%d ns\n", (data[26] >> 4) & 0x0F, data[26] & 0x0F); break; } switch (type) { case DDR2: printf ("Minimum row precharge %d.%02d ns\n", (data[27] >> 2) & 0x3F, 25 * (data[27] & 0x03)); break; default: printf ("Minimum row precharge %d ns\n", data[27]); break; } switch (type) { case DDR2: printf ("Row active to row active min %d.%02d ns\n", (data[28] >> 2) & 0x3F, 25 * (data[28] & 0x03)); break; default: printf ("Row active to row active min %d ns\n", data[28]); break; } switch (type) { case DDR2: printf ("RAS to CAS delay min %d.%02d ns\n", (data[29] >> 2) & 0x3F, 25 * (data[29] & 0x03)); break; default: printf ("RAS to CAS delay min %d ns\n", data[29]); break; } printf ("Minimum RAS pulse width %d ns\n", data[30]); switch (type) { case DDR2: puts ("Density of each row "); decode_bits (data[31], decode_row_density_DDR2, 1); putc ('\n'); break; default: puts ("Density of each row "); decode_bits (data[31], decode_row_density_default, 1); putc ('\n'); break; } switch (type) { case DDR2: puts ("Command and Address setup "); if (data[32] >= 0xA0) { printf ("1.%d%d ns\n", ((data[32] >> 4) & 0x0F) - 10, data[32] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[32] >> 4) & 0x0F), data[32] & 0x0F); } break; default: printf ("Command and Address setup %c%d.%d ns\n", (data[32] & 0x80) ? '-' : '+', (data[32] >> 4) & 0x07, data[32] & 0x0F); break; } switch (type) { case DDR2: puts ("Command and Address hold "); if (data[33] >= 0xA0) { printf ("1.%d%d ns\n", ((data[33] >> 4) & 0x0F) - 10, data[33] & 0x0F); } else { printf ("0.%d%d ns\n", ((data[33] >> 4) & 0x0F), data[33] & 0x0F); } break; default: printf ("Command and Address hold %c%d.%d ns\n", (data[33] & 0x80) ? '-' : '+', (data[33] >> 4) & 0x07, data[33] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input setup 0.%d%d ns\n", (data[34] >> 4) & 0x0F, data[34] & 0x0F); break; default: printf ("Data signal input setup %c%d.%d ns\n", (data[34] & 0x80) ? '-' : '+', (data[34] >> 4) & 0x07, data[34] & 0x0F); break; } switch (type) { case DDR2: printf ("Data signal input hold 0.%d%d ns\n", (data[35] >> 4) & 0x0F, data[35] & 0x0F); break; default: printf ("Data signal input hold %c%d.%d ns\n", (data[35] & 0x80) ? '-' : '+', (data[35] >> 4) & 0x07, data[35] & 0x0F); break; } puts ("Manufacturer's JEDEC ID "); for (j = 64; j <= 71; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Manufacturing Location %02X\n", data[72]); puts ("Manufacturer's Part Number "); for (j = 73; j <= 90; j++) printf ("%02X ", data[j]); putc ('\n'); printf ("Revision Code %02X %02X\n", data[91], data[92]); printf ("Manufacturing Date %02X %02X\n", data[93], data[94]); puts ("Assembly Serial Number "); for (j = 95; j <= 98; j++) printf ("%02X ", data[j]); putc ('\n'); if (DDR2 != type) { printf ("Speed rating PC%d\n", data[126] == 0x66 ? 66 : data[126]); } return 0; } #endif /* * Syntax: * i2c edid {i2c_chip} */ #if defined(CONFIG_I2C_EDID) int do_edid(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; struct edid1_info edid; int ret; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 2) { cmd_usage(cmdtp); return 1; } chip = hextoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret) ret = dm_i2c_read(dev, 0, (uchar *)&edid, sizeof(edid)); #else ret = i2c_read(chip, 0, 1, (uchar *)&edid, sizeof(edid)); #endif if (ret) return i2c_report_err(ret, I2C_ERR_READ); if (edid_check_info(&edid)) { puts("Content isn't valid EDID.\n"); return 1; } edid_print_info(&edid); return 0; } #endif /* CONFIG_I2C_EDID */ #if CONFIG_IS_ENABLED(DM_I2C) static void show_bus(struct udevice *bus) { struct udevice *dev; printf("Bus %d:\t%s", dev_seq(bus), bus->name); if (device_active(bus)) printf(" (active %d)", dev_seq(bus)); printf("\n"); for (device_find_first_child(bus, &dev); dev; device_find_next_child(&dev)) { struct dm_i2c_chip *chip = dev_get_parent_plat(dev); printf(" %02x: %s, offset len %x, flags %x\n", chip->chip_addr, dev->name, chip->offset_len, chip->flags); } } #endif /** * do_i2c_show_bus() - Handle the "i2c bus" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_show_bus(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { if (argc == 1) { /* show all busses */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; struct uclass *uc; int ret; ret = uclass_get(UCLASS_I2C, &uc); if (ret) return CMD_RET_FAILURE; uclass_foreach_dev(bus, uc) show_bus(bus); #else int i; for (i = 0; i < CONFIG_SYS_NUM_I2C_BUSES; i++) { printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); } #endif } else { int i; /* show specific bus */ i = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; int ret; ret = uclass_get_device_by_seq(UCLASS_I2C, i, &bus); if (ret) { printf("Invalid bus %d: err=%d\n", i, ret); return CMD_RET_FAILURE; } show_bus(bus); #else if (i >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", i); return -1; } printf("Bus %d:\t%s", i, I2C_ADAP_NR(i)->name); #ifndef CONFIG_SYS_I2C_DIRECT_BUS int j; for (j = 0; j < CONFIG_SYS_I2C_MAX_HOPS; j++) { if (i2c_bus[i].next_hop[j].chip == 0) break; printf("->%s@0x%2x:%d", i2c_bus[i].next_hop[j].mux.name, i2c_bus[i].next_hop[j].chip, i2c_bus[i].next_hop[j].channel); } #endif printf("\n"); #endif } return 0; } #endif /** * do_i2c_bus_num() - Handle the "i2c dev" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || defined(CONFIG_I2C_MULTI_BUS) || \ CONFIG_IS_ENABLED(DM_I2C) static int do_i2c_bus_num(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int ret = 0; int bus_no; if (argc == 1) { /* querying current setting */ #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (!i2c_get_cur_bus(&bus)) bus_no = dev_seq(bus); else bus_no = -1; #else bus_no = i2c_get_bus_num(); #endif printf("Current bus is %d\n", bus_no); } else { bus_no = dectoul(argv[1], NULL); #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) if (bus_no >= CONFIG_SYS_NUM_I2C_BUSES) { printf("Invalid bus %d\n", bus_no); return -1; } #endif printf("Setting bus to %d\n", bus_no); #if CONFIG_IS_ENABLED(DM_I2C) ret = cmd_i2c_set_bus_num(bus_no); #else ret = i2c_set_bus_num(bus_no); #endif if (ret) printf("Failure changing bus number (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } #endif /* CONFIG_IS_ENABLED(SYS_I2C_LEGACY) */ /** * do_i2c_bus_speed() - Handle the "i2c speed" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_bus_speed(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { int speed, ret=0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return 1; #endif if (argc == 1) { #if CONFIG_IS_ENABLED(DM_I2C) speed = dm_i2c_get_bus_speed(bus); #else speed = i2c_get_bus_speed(); #endif /* querying current speed */ printf("Current bus speed=%d\n", speed); } else { speed = dectoul(argv[1], NULL); printf("Setting bus speed to %d Hz\n", speed); #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_set_bus_speed(bus, speed); #else ret = i2c_set_bus_speed(speed); #endif if (ret) printf("Failure changing bus speed (%d)\n", ret); } return ret ? CMD_RET_FAILURE : 0; } /** * do_i2c_mm() - Handle the "i2c mm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_mm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 1, flag, argc, argv); } /** * do_i2c_nm() - Handle the "i2c nm" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c_nm(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { return mod_i2c_mem (cmdtp, 0, flag, argc, argv); } /** * do_i2c_reset() - Handle the "i2c reset" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero always. */ static int do_i2c_reset(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *bus; if (i2c_get_cur_bus(&bus)) return CMD_RET_FAILURE; if (i2c_deblock(bus)) { printf("Error: Not supported by the driver\n"); return CMD_RET_FAILURE; } #elif CONFIG_IS_ENABLED(SYS_I2C_LEGACY) i2c_init(I2C_ADAP->speed, I2C_ADAP->slaveaddr); #endif return 0; } static struct cmd_tbl cmd_i2c_sub[] = { #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(bus, 1, 1, do_i2c_show_bus, "", ""), #endif U_BOOT_CMD_MKENT(crc32, 3, 1, do_i2c_crc, "", ""), #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(dev, 1, 1, do_i2c_bus_num, "", ""), #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) U_BOOT_CMD_MKENT(edid, 1, 1, do_edid, "", ""), #endif /* CONFIG_I2C_EDID */ U_BOOT_CMD_MKENT(loop, 3, 1, do_i2c_loop, "", ""), U_BOOT_CMD_MKENT(md, 3, 1, do_i2c_md, "", ""), U_BOOT_CMD_MKENT(mm, 2, 1, do_i2c_mm, "", ""), U_BOOT_CMD_MKENT(mw, 3, 1, do_i2c_mw, "", ""), U_BOOT_CMD_MKENT(nm, 2, 1, do_i2c_nm, "", ""), U_BOOT_CMD_MKENT(probe, 0, 1, do_i2c_probe, "", ""), U_BOOT_CMD_MKENT(read, 5, 1, do_i2c_read, "", ""), U_BOOT_CMD_MKENT(write, 6, 0, do_i2c_write, "", ""), #if CONFIG_IS_ENABLED(DM_I2C) U_BOOT_CMD_MKENT(flags, 2, 1, do_i2c_flags, "", ""), U_BOOT_CMD_MKENT(olen, 2, 1, do_i2c_olen, "", ""), #endif U_BOOT_CMD_MKENT(reset, 0, 1, do_i2c_reset, "", ""), #if defined(CONFIG_CMD_SDRAM) U_BOOT_CMD_MKENT(sdram, 1, 1, do_sdram, "", ""), #endif U_BOOT_CMD_MKENT(speed, 1, 1, do_i2c_bus_speed, "", ""), }; static __maybe_unused void i2c_reloc(void) { static int relocated; if (!relocated) { fixup_cmdtable(cmd_i2c_sub, ARRAY_SIZE(cmd_i2c_sub)); relocated = 1; }; } /** * do_i2c() - Handle the "i2c" command-line command * @cmdtp: Command data struct pointer * @flag: Command flag * @argc: Command-line argument count * @argv: Array of command-line arguments * * Returns zero on success, CMD_RET_USAGE in case of misuse and negative * on error. */ static int do_i2c(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { struct cmd_tbl *c; #ifdef CONFIG_NEEDS_MANUAL_RELOC i2c_reloc(); #endif if (argc < 2) return CMD_RET_USAGE; /* Strip off leading 'i2c' command argument */ argc--; argv++; c = find_cmd_tbl(argv[0], &cmd_i2c_sub[0], ARRAY_SIZE(cmd_i2c_sub)); if (c) return c->cmd(cmdtp, flag, argc, argv); else return CMD_RET_USAGE; } /***************************************************/ #ifdef CONFIG_SYS_LONGHELP static char i2c_help_text[] = #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || CONFIG_IS_ENABLED(DM_I2C) "bus [muxtype:muxaddr:muxchannel] - show I2C bus info\n" "i2c " /* That's the prefix for the crc32 command below. */ #endif "crc32 chip address[.0, .1, .2] count - compute CRC32 checksum\n" #if CONFIG_IS_ENABLED(SYS_I2C_LEGACY) || \ defined(CONFIG_I2C_MULTI_BUS) || CONFIG_IS_ENABLED(DM_I2C) "i2c dev [dev] - show or set current I2C bus\n" #endif /* CONFIG_I2C_MULTI_BUS */ #if defined(CONFIG_I2C_EDID) "i2c edid chip - print EDID configuration information\n" #endif /* CONFIG_I2C_EDID */ "i2c loop chip address[.0, .1, .2] [# of objects] - looping read of device\n" "i2c md chip address[.0, .1, .2] [# of objects] - read from I2C device\n" "i2c mm chip address[.0, .1, .2] - write to I2C device (auto-incrementing)\n" "i2c mw chip address[.0, .1, .2] value [count] - write to I2C device (fill)\n" "i2c nm chip address[.0, .1, .2] - write to I2C device (constant address)\n" "i2c probe [address] - test for and show device(s) on the I2C bus\n" "i2c read chip address[.0, .1, .2] length memaddress - read to memory\n" "i2c write memaddress chip address[.0, .1, .2] length [-s] - write memory\n" " to I2C; the -s option selects bulk write in a single transaction\n" #if CONFIG_IS_ENABLED(DM_I2C) "i2c flags chip [flags] - set or get chip flags\n" "i2c olen chip [offset_length] - set or get chip offset length\n" #endif "i2c reset - re-init the I2C Controller\n" #if defined(CONFIG_CMD_SDRAM) "i2c sdram chip - print SDRAM configuration information\n" #endif "i2c speed [speed] - show or set I2C bus speed"; #endif U_BOOT_CMD( i2c, 7, 1, do_i2c, "I2C sub-system", i2c_help_text );
static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; int alen; int count; uchar byte; ulong crc; ulong err; int ret = 0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 4) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Count is always specified */ count = hextoul(argv[3], NULL); printf ("CRC32 for %08lx ... %08lx ==> ", addr, addr + count - 1); /* * CRC a byte at a time. This is going to be slooow, but hey, the * memories are small and slow too so hopefully nobody notices. */ crc = 0; err = 0; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, &byte, 1); #else ret = i2c_read(chip, addr, alen, &byte, 1); #endif if (ret) err++; crc = crc32(crc, &byte, 1); addr++; } if (err > 0) i2c_report_err(ret, I2C_ERR_READ); else printf ("%08lx\n", crc); return 0; }
static int do_i2c_crc(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[]) { uint chip; ulong addr; uint alen; uint count; uchar byte; ulong crc; ulong err; int ret = 0; #if CONFIG_IS_ENABLED(DM_I2C) struct udevice *dev; #endif if (argc < 4) return CMD_RET_USAGE; /* * Chip is always specified. */ chip = hextoul(argv[1], NULL); /* * Address is always specified. */ addr = hextoul(argv[2], NULL); alen = get_alen(argv[2], DEFAULT_ADDR_LEN); if (alen > 3) return CMD_RET_USAGE; #if CONFIG_IS_ENABLED(DM_I2C) ret = i2c_get_cur_bus_chip(chip, &dev); if (!ret && alen != -1) ret = i2c_set_chip_offset_len(dev, alen); if (ret) return i2c_report_err(ret, I2C_ERR_READ); #endif /* * Count is always specified */ count = hextoul(argv[3], NULL); printf ("CRC32 for %08lx ... %08lx ==> ", addr, addr + count - 1); /* * CRC a byte at a time. This is going to be slooow, but hey, the * memories are small and slow too so hopefully nobody notices. */ crc = 0; err = 0; while (count-- > 0) { #if CONFIG_IS_ENABLED(DM_I2C) ret = dm_i2c_read(dev, addr, &byte, 1); #else ret = i2c_read(chip, addr, alen, &byte, 1); #endif if (ret) err++; crc = crc32(crc, &byte, 1); addr++; } if (err > 0) i2c_report_err(ret, I2C_ERR_READ); else printf ("%08lx\n", crc); return 0; }
{'added': [(203, 'static uint get_alen(char *arg, uint default_len)'), (205, '\tuint\tj;'), (206, '\tuint\talen;'), (250, '\tuint\talen;'), (304, '\tuint\talen;'), (472, '\tuint\talen;'), (473, '\tuint\tj, nbytes, linebytes;'), (592, '\tuint\talen;'), (594, '\tuint\tcount;'), (679, '\tuint\talen;'), (680, '\tuint\tcount;'), (988, '\tuint\talen;')], 'deleted': [(203, 'static uint get_alen(char *arg, int default_len)'), (205, '\tint\tj;'), (206, '\tint\talen;'), (250, '\tint alen;'), (304, '\tint alen;'), (472, '\tint alen;'), (473, '\tint\tj, nbytes, linebytes;'), (592, '\tint\talen;'), (594, '\tint\tcount;'), (679, '\tint\talen;'), (680, '\tint\tcount;'), (988, '\tint alen;')]}
12
12
1,240
7,675
42
266
12
https://github.com/u-boot/u-boot
CVE-2022-34835
CWE-787
1,511
jp2_cod.c
C
jp2_box_create
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * JP2 Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include <stdlib.h> #include <inttypes.h> #include "jasper/jas_stream.h" #include "jasper/jas_malloc.h" #include "jasper/jas_debug.h" #include "jp2_cod.h" /******************************************************************************\ * Function prototypes. \******************************************************************************/ #define ONES(n) ((1 << (n)) - 1) jp2_boxinfo_t *jp2_boxinfolookup(int type); static int jp2_getuint8(jas_stream_t *in, uint_fast8_t *val); static int jp2_getuint16(jas_stream_t *in, uint_fast16_t *val); static int jp2_getuint32(jas_stream_t *in, uint_fast32_t *val); static int jp2_getuint64(jas_stream_t *in, uint_fast64_t *val); static int jp2_putuint8(jas_stream_t *out, uint_fast8_t val); static int jp2_putuint16(jas_stream_t *out, uint_fast16_t val); static int jp2_putuint32(jas_stream_t *out, uint_fast32_t val); static int jp2_putuint64(jas_stream_t *out, uint_fast64_t val); static int jp2_getint(jas_stream_t *in, int s, int n, int_fast32_t *val); jp2_box_t *jp2_box_get(jas_stream_t *in); void jp2_box_dump(jp2_box_t *box, FILE *out); static int jp2_jp_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_jp_putdata(jp2_box_t *box, jas_stream_t *out); static int jp2_ftyp_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_ftyp_putdata(jp2_box_t *box, jas_stream_t *out); static int jp2_ihdr_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_ihdr_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_bpcc_destroy(jp2_box_t *box); static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_bpcc_putdata(jp2_box_t *box, jas_stream_t *out); static int jp2_colr_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_colr_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_colr_dumpdata(jp2_box_t *box, FILE *out); static void jp2_colr_destroy(jp2_box_t *box); static void jp2_cdef_destroy(jp2_box_t *box); static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_cdef_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_cdef_dumpdata(jp2_box_t *box, FILE *out); static void jp2_cmap_destroy(jp2_box_t *box); static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_cmap_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_cmap_dumpdata(jp2_box_t *box, FILE *out); static void jp2_pclr_destroy(jp2_box_t *box); static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_pclr_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_pclr_dumpdata(jp2_box_t *box, FILE *out); /******************************************************************************\ * Local data. \******************************************************************************/ jp2_boxinfo_t jp2_boxinfos[] = { {JP2_BOX_JP, "JP", 0, {0, 0, jp2_jp_getdata, jp2_jp_putdata, 0}}, {JP2_BOX_FTYP, "FTYP", 0, {0, 0, jp2_ftyp_getdata, jp2_ftyp_putdata, 0}}, {JP2_BOX_JP2H, "JP2H", JP2_BOX_SUPER, {0, 0, 0, 0, 0}}, {JP2_BOX_IHDR, "IHDR", 0, {0, 0, jp2_ihdr_getdata, jp2_ihdr_putdata, 0}}, {JP2_BOX_BPCC, "BPCC", 0, {0, jp2_bpcc_destroy, jp2_bpcc_getdata, jp2_bpcc_putdata, 0}}, {JP2_BOX_COLR, "COLR", 0, {0, jp2_colr_destroy, jp2_colr_getdata, jp2_colr_putdata, jp2_colr_dumpdata}}, {JP2_BOX_PCLR, "PCLR", 0, {0, jp2_pclr_destroy, jp2_pclr_getdata, jp2_pclr_putdata, jp2_pclr_dumpdata}}, {JP2_BOX_CMAP, "CMAP", 0, {0, jp2_cmap_destroy, jp2_cmap_getdata, jp2_cmap_putdata, jp2_cmap_dumpdata}}, {JP2_BOX_CDEF, "CDEF", 0, {0, jp2_cdef_destroy, jp2_cdef_getdata, jp2_cdef_putdata, jp2_cdef_dumpdata}}, {JP2_BOX_RES, "RES", JP2_BOX_SUPER, {0, 0, 0, 0, 0}}, {JP2_BOX_RESC, "RESC", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_RESD, "RESD", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_JP2C, "JP2C", JP2_BOX_NODATA, {0, 0, 0, 0, 0}}, {JP2_BOX_JP2I, "JP2I", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_XML, "XML", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_UUID, "UUID", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_UINF, "UINF", JP2_BOX_SUPER, {0, 0, 0, 0, 0}}, {JP2_BOX_ULST, "ULST", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_URL, "URL", 0, {0, 0, 0, 0, 0}}, {0, 0, 0, {0, 0, 0, 0, 0}}, }; jp2_boxinfo_t jp2_boxinfo_unk = { 0, "Unknown", 0, {0, 0, 0, 0, 0} }; /******************************************************************************\ * Box constructor. \******************************************************************************/ jp2_box_t *jp2_box_create(int type) { jp2_box_t *box; jp2_boxinfo_t *boxinfo; if (!(box = jas_malloc(sizeof(jp2_box_t)))) { return 0; } memset(box, 0, sizeof(jp2_box_t)); box->type = type; box->len = 0; if (!(boxinfo = jp2_boxinfolookup(type))) { return 0; } box->info = boxinfo; box->ops = &boxinfo->ops; return box; } /******************************************************************************\ * Box destructor. \******************************************************************************/ void jp2_box_destroy(jp2_box_t *box) { if (box->ops->destroy) { (*box->ops->destroy)(box); } jas_free(box); } static void jp2_bpcc_destroy(jp2_box_t *box) { jp2_bpcc_t *bpcc = &box->data.bpcc; if (bpcc->bpcs) { jas_free(bpcc->bpcs); bpcc->bpcs = 0; } } static void jp2_cdef_destroy(jp2_box_t *box) { jp2_cdef_t *cdef = &box->data.cdef; if (cdef->ents) { jas_free(cdef->ents); cdef->ents = 0; } } /******************************************************************************\ * Box input. \******************************************************************************/ jp2_box_t *jp2_box_get(jas_stream_t *in) { jp2_box_t *box; jp2_boxinfo_t *boxinfo; jas_stream_t *tmpstream; uint_fast32_t len; uint_fast64_t extlen; bool dataflag; box = 0; tmpstream = 0; if (!(box = jas_malloc(sizeof(jp2_box_t)))) { goto error; } // Mark the box data as never having been constructed // so that we will not errantly attempt to destroy it later. box->ops = &jp2_boxinfo_unk.ops; if (jp2_getuint32(in, &len) || jp2_getuint32(in, &box->type)) { goto error; } boxinfo = jp2_boxinfolookup(box->type); box->info = boxinfo; box->len = len; JAS_DBGLOG(10, ( "preliminary processing of JP2 box: type=%c%s%c (0x%08x); length=%d\n", '"', boxinfo->name, '"', box->type, box->len )); if (box->len == 1) { if (jp2_getuint64(in, &extlen)) { goto error; } if (extlen > 0xffffffffUL) { jas_eprintf("warning: cannot handle large 64-bit box length\n"); extlen = 0xffffffffUL; } box->len = extlen; box->datalen = extlen - JP2_BOX_HDRLEN(true); } else { box->datalen = box->len - JP2_BOX_HDRLEN(false); } if (box->len != 0 && box->len < 8) { goto error; } dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA)); if (dataflag) { if (!(tmpstream = jas_stream_memopen(0, 0))) { goto error; } if (jas_stream_copy(tmpstream, in, box->datalen)) { jas_eprintf("cannot copy box data\n"); goto error; } jas_stream_rewind(tmpstream); // From here onwards, the box data will need to be destroyed. // So, initialize the box operations. box->ops = &boxinfo->ops; if (box->ops->getdata) { if ((*box->ops->getdata)(box, tmpstream)) { jas_eprintf("cannot parse box data\n"); goto error; } } jas_stream_close(tmpstream); } if (jas_getdbglevel() >= 1) { jp2_box_dump(box, stderr); } return box; error: if (box) { jp2_box_destroy(box); } if (tmpstream) { jas_stream_close(tmpstream); } return 0; } void jp2_box_dump(jp2_box_t *box, FILE *out) { jp2_boxinfo_t *boxinfo; boxinfo = jp2_boxinfolookup(box->type); assert(boxinfo); fprintf(out, "JP2 box: "); fprintf(out, "type=%c%s%c (0x%08"PRIxFAST32"); length=%"PRIuFAST32"\n", '"', boxinfo->name, '"', box->type, box->len); if (box->ops->dumpdata) { (*box->ops->dumpdata)(box, out); } } static int jp2_jp_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_jp_t *jp = &box->data.jp; if (jp2_getuint32(in, &jp->magic)) { return -1; } return 0; } static int jp2_ftyp_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_ftyp_t *ftyp = &box->data.ftyp; unsigned int i; if (jp2_getuint32(in, &ftyp->majver) || jp2_getuint32(in, &ftyp->minver)) { return -1; } ftyp->numcompatcodes = (box->datalen - 8) / 4; if (ftyp->numcompatcodes > JP2_FTYP_MAXCOMPATCODES) { return -1; } for (i = 0; i < ftyp->numcompatcodes; ++i) { if (jp2_getuint32(in, &ftyp->compatcodes[i])) { return -1; } } return 0; } static int jp2_ihdr_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_ihdr_t *ihdr = &box->data.ihdr; if (jp2_getuint32(in, &ihdr->height) || jp2_getuint32(in, &ihdr->width) || jp2_getuint16(in, &ihdr->numcmpts) || jp2_getuint8(in, &ihdr->bpc) || jp2_getuint8(in, &ihdr->comptype) || jp2_getuint8(in, &ihdr->csunk) || jp2_getuint8(in, &ihdr->ipr)) { return -1; } return 0; } static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_bpcc_t *bpcc = &box->data.bpcc; unsigned int i; bpcc->numcmpts = box->datalen; if (!(bpcc->bpcs = jas_alloc2(bpcc->numcmpts, sizeof(uint_fast8_t)))) { return -1; } for (i = 0; i < bpcc->numcmpts; ++i) { if (jp2_getuint8(in, &bpcc->bpcs[i])) { return -1; } } return 0; } static void jp2_colr_dumpdata(jp2_box_t *box, FILE *out) { jp2_colr_t *colr = &box->data.colr; fprintf(out, "method=%d; pri=%d; approx=%d\n", (int)colr->method, (int)colr->pri, (int)colr->approx); switch (colr->method) { case JP2_COLR_ENUM: fprintf(out, "csid=%d\n", (int)colr->csid); break; case JP2_COLR_ICC: jas_memdump(out, colr->iccp, colr->iccplen); break; } } static int jp2_colr_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_colr_t *colr = &box->data.colr; colr->csid = 0; colr->iccp = 0; colr->iccplen = 0; if (jp2_getuint8(in, &colr->method) || jp2_getuint8(in, &colr->pri) || jp2_getuint8(in, &colr->approx)) { return -1; } switch (colr->method) { case JP2_COLR_ENUM: if (jp2_getuint32(in, &colr->csid)) { return -1; } break; case JP2_COLR_ICC: colr->iccplen = box->datalen - 3; if (!(colr->iccp = jas_alloc2(colr->iccplen, sizeof(uint_fast8_t)))) { return -1; } if (jas_stream_read(in, colr->iccp, colr->iccplen) != colr->iccplen) { return -1; } break; } return 0; } static void jp2_cdef_dumpdata(jp2_box_t *box, FILE *out) { jp2_cdef_t *cdef = &box->data.cdef; unsigned int i; for (i = 0; i < cdef->numchans; ++i) { fprintf(out, "channo=%"PRIuFAST16"; type=%"PRIuFAST16"; assoc=%"PRIuFAST16"\n", cdef->ents[i].channo, cdef->ents[i].type, cdef->ents[i].assoc); } } static void jp2_colr_destroy(jp2_box_t *box) { jp2_colr_t *colr = &box->data.colr; if (colr->iccp) { jas_free(colr->iccp); } } static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cdef_t *cdef = &box->data.cdef; jp2_cdefchan_t *chan; unsigned int channo; if (jp2_getuint16(in, &cdef->numchans)) { return -1; } if (!(cdef->ents = jas_alloc2(cdef->numchans, sizeof(jp2_cdefchan_t)))) { return -1; } for (channo = 0; channo < cdef->numchans; ++channo) { chan = &cdef->ents[channo]; if (jp2_getuint16(in, &chan->channo) || jp2_getuint16(in, &chan->type) || jp2_getuint16(in, &chan->assoc)) { return -1; } } return 0; } /******************************************************************************\ * Box output. \******************************************************************************/ int jp2_box_put(jp2_box_t *box, jas_stream_t *out) { jas_stream_t *tmpstream; bool extlen; bool dataflag; tmpstream = 0; dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA)); if (dataflag) { if (!(tmpstream = jas_stream_memopen(0, 0))) { goto error; } if (box->ops->putdata) { if ((*box->ops->putdata)(box, tmpstream)) { goto error; } } box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false); jas_stream_rewind(tmpstream); } extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0; if (jp2_putuint32(out, extlen ? 1 : box->len)) { goto error; } if (jp2_putuint32(out, box->type)) { goto error; } if (extlen) { if (jp2_putuint64(out, box->len)) { goto error; } } if (dataflag) { if (jas_stream_copy(out, tmpstream, box->len - JP2_BOX_HDRLEN(false))) { goto error; } jas_stream_close(tmpstream); } return 0; error: if (tmpstream) { jas_stream_close(tmpstream); } return -1; } static int jp2_jp_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_jp_t *jp = &box->data.jp; if (jp2_putuint32(out, jp->magic)) { return -1; } return 0; } static int jp2_ftyp_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_ftyp_t *ftyp = &box->data.ftyp; unsigned int i; if (jp2_putuint32(out, ftyp->majver) || jp2_putuint32(out, ftyp->minver)) { return -1; } for (i = 0; i < ftyp->numcompatcodes; ++i) { if (jp2_putuint32(out, ftyp->compatcodes[i])) { return -1; } } return 0; } static int jp2_ihdr_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_ihdr_t *ihdr = &box->data.ihdr; if (jp2_putuint32(out, ihdr->height) || jp2_putuint32(out, ihdr->width) || jp2_putuint16(out, ihdr->numcmpts) || jp2_putuint8(out, ihdr->bpc) || jp2_putuint8(out, ihdr->comptype) || jp2_putuint8(out, ihdr->csunk) || jp2_putuint8(out, ihdr->ipr)) { return -1; } return 0; } static int jp2_bpcc_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_bpcc_t *bpcc = &box->data.bpcc; unsigned int i; for (i = 0; i < bpcc->numcmpts; ++i) { if (jp2_putuint8(out, bpcc->bpcs[i])) { return -1; } } return 0; } static int jp2_colr_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_colr_t *colr = &box->data.colr; if (jp2_putuint8(out, colr->method) || jp2_putuint8(out, colr->pri) || jp2_putuint8(out, colr->approx)) { return -1; } switch (colr->method) { case JP2_COLR_ENUM: if (jp2_putuint32(out, colr->csid)) { return -1; } break; case JP2_COLR_ICC: if (jas_stream_write(out, colr->iccp, JAS_CAST(int, colr->iccplen)) != JAS_CAST(int, colr->iccplen)) return -1; break; } return 0; } static int jp2_cdef_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_cdef_t *cdef = &box->data.cdef; unsigned int i; jp2_cdefchan_t *ent; if (jp2_putuint16(out, cdef->numchans)) { return -1; } for (i = 0; i < cdef->numchans; ++i) { ent = &cdef->ents[i]; if (jp2_putuint16(out, ent->channo) || jp2_putuint16(out, ent->type) || jp2_putuint16(out, ent->assoc)) { return -1; } } return 0; } /******************************************************************************\ * Input operations for primitive types. \******************************************************************************/ static int jp2_getuint8(jas_stream_t *in, uint_fast8_t *val) { int c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } if (val) { *val = c; } return 0; } static int jp2_getuint16(jas_stream_t *in, uint_fast16_t *val) { uint_fast16_t v; int c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if (val) { *val = v; } return 0; } static int jp2_getuint32(jas_stream_t *in, uint_fast32_t *val) { uint_fast32_t v; int c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if (val) { *val = v; } return 0; } static int jp2_getuint64(jas_stream_t *in, uint_fast64_t *val) { uint_fast64_t tmpval; int i; int c; tmpval = 0; for (i = 0; i < 8; ++i) { tmpval <<= 8; if ((c = jas_stream_getc(in)) == EOF) { return -1; } tmpval |= (c & 0xff); } *val = tmpval; return 0; } /******************************************************************************\ * Output operations for primitive types. \******************************************************************************/ static int jp2_putuint8(jas_stream_t *out, uint_fast8_t val) { if (jas_stream_putc(out, val & 0xff) == EOF) { return -1; } return 0; } static int jp2_putuint16(jas_stream_t *out, uint_fast16_t val) { if (jas_stream_putc(out, (val >> 8) & 0xff) == EOF || jas_stream_putc(out, val & 0xff) == EOF) { return -1; } return 0; } static int jp2_putuint32(jas_stream_t *out, uint_fast32_t val) { if (jas_stream_putc(out, (val >> 24) & 0xff) == EOF || jas_stream_putc(out, (val >> 16) & 0xff) == EOF || jas_stream_putc(out, (val >> 8) & 0xff) == EOF || jas_stream_putc(out, val & 0xff) == EOF) { return -1; } return 0; } static int jp2_putuint64(jas_stream_t *out, uint_fast64_t val) { if (jp2_putuint32(out, (val >> 32) & 0xffffffffUL) || jp2_putuint32(out, val & 0xffffffffUL)) { return -1; } return 0; } /******************************************************************************\ * Miscellaneous code. \******************************************************************************/ jp2_boxinfo_t *jp2_boxinfolookup(int type) { jp2_boxinfo_t *boxinfo; for (boxinfo = jp2_boxinfos; boxinfo->name; ++boxinfo) { if (boxinfo->type == type) { return boxinfo; } } return &jp2_boxinfo_unk; } static void jp2_cmap_destroy(jp2_box_t *box) { jp2_cmap_t *cmap = &box->data.cmap; if (cmap->ents) { jas_free(cmap->ents); } } static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cmap_t *cmap = &box->data.cmap; jp2_cmapent_t *ent; unsigned int i; cmap->numchans = (box->datalen) / 4; if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) { return -1; } for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; if (jp2_getuint16(in, &ent->cmptno) || jp2_getuint8(in, &ent->map) || jp2_getuint8(in, &ent->pcol)) { return -1; } } return 0; } static int jp2_cmap_putdata(jp2_box_t *box, jas_stream_t *out) { /* Eliminate compiler warning about unused variables. */ box = 0; out = 0; return -1; } static void jp2_cmap_dumpdata(jp2_box_t *box, FILE *out) { jp2_cmap_t *cmap = &box->data.cmap; unsigned int i; jp2_cmapent_t *ent; fprintf(out, "numchans = %d\n", (int) cmap->numchans); for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; fprintf(out, "cmptno=%d; map=%d; pcol=%d\n", (int) ent->cmptno, (int) ent->map, (int) ent->pcol); } } static void jp2_pclr_destroy(jp2_box_t *box) { jp2_pclr_t *pclr = &box->data.pclr; if (pclr->lutdata) { jas_free(pclr->lutdata); } if (pclr->bpc) jas_free(pclr->bpc); } static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_pclr_t *pclr = &box->data.pclr; int lutsize; unsigned int i; unsigned int j; int_fast32_t x; pclr->lutdata = 0; if (jp2_getuint16(in, &pclr->numlutents) || jp2_getuint8(in, &pclr->numchans)) { return -1; } lutsize = pclr->numlutents * pclr->numchans; if (!(pclr->lutdata = jas_alloc2(lutsize, sizeof(int_fast32_t)))) { return -1; } if (!(pclr->bpc = jas_alloc2(pclr->numchans, sizeof(uint_fast8_t)))) { return -1; } for (i = 0; i < pclr->numchans; ++i) { if (jp2_getuint8(in, &pclr->bpc[i])) { return -1; } } for (i = 0; i < pclr->numlutents; ++i) { for (j = 0; j < pclr->numchans; ++j) { if (jp2_getint(in, (pclr->bpc[j] & 0x80) != 0, (pclr->bpc[j] & 0x7f) + 1, &x)) { return -1; } pclr->lutdata[i * pclr->numchans + j] = x; } } return 0; } static int jp2_pclr_putdata(jp2_box_t *box, jas_stream_t *out) { #if 0 jp2_pclr_t *pclr = &box->data.pclr; #endif /* Eliminate warning about unused variable. */ box = 0; out = 0; return -1; } static void jp2_pclr_dumpdata(jp2_box_t *box, FILE *out) { jp2_pclr_t *pclr = &box->data.pclr; unsigned int i; int j; fprintf(out, "numents=%d; numchans=%d\n", (int) pclr->numlutents, (int) pclr->numchans); for (i = 0; i < pclr->numlutents; ++i) { for (j = 0; j < pclr->numchans; ++j) { fprintf(out, "LUT[%d][%d]=%"PRIiFAST32"\n", i, j, pclr->lutdata[i * pclr->numchans + j]); } } } static int jp2_getint(jas_stream_t *in, int s, int n, int_fast32_t *val) { int c; int i; uint_fast32_t v; int m; m = (n + 7) / 8; v = 0; for (i = 0; i < m; ++i) { if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; } v &= ONES(n); if (s) { int sb; sb = v & (1 << (8 * m - 1)); *val = ((~v) + 1) & ONES(8 * m); if (sb) { *val = -*val; } } else { *val = v; } return 0; } jp2_cdefchan_t *jp2_cdef_lookup(jp2_cdef_t *cdef, int channo) { unsigned int i; jp2_cdefchan_t *cdefent; for (i = 0; i < cdef->numchans; ++i) { cdefent = &cdef->ents[i]; if (cdefent->channo == JAS_CAST(unsigned int, channo)) { return cdefent; } } return 0; }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2002 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * JP2 Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #include <stdlib.h> #include <inttypes.h> #include "jasper/jas_stream.h" #include "jasper/jas_malloc.h" #include "jasper/jas_debug.h" #include "jp2_cod.h" /******************************************************************************\ * Function prototypes. \******************************************************************************/ #define ONES(n) ((1 << (n)) - 1) jp2_boxinfo_t *jp2_boxinfolookup(int type); static int jp2_getuint8(jas_stream_t *in, uint_fast8_t *val); static int jp2_getuint16(jas_stream_t *in, uint_fast16_t *val); static int jp2_getuint32(jas_stream_t *in, uint_fast32_t *val); static int jp2_getuint64(jas_stream_t *in, uint_fast64_t *val); static int jp2_putuint8(jas_stream_t *out, uint_fast8_t val); static int jp2_putuint16(jas_stream_t *out, uint_fast16_t val); static int jp2_putuint32(jas_stream_t *out, uint_fast32_t val); static int jp2_putuint64(jas_stream_t *out, uint_fast64_t val); static int jp2_getint(jas_stream_t *in, int s, int n, int_fast32_t *val); jp2_box_t *jp2_box_get(jas_stream_t *in); void jp2_box_dump(jp2_box_t *box, FILE *out); static int jp2_jp_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_jp_putdata(jp2_box_t *box, jas_stream_t *out); static int jp2_ftyp_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_ftyp_putdata(jp2_box_t *box, jas_stream_t *out); static int jp2_ihdr_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_ihdr_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_bpcc_destroy(jp2_box_t *box); static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_bpcc_putdata(jp2_box_t *box, jas_stream_t *out); static int jp2_colr_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_colr_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_colr_dumpdata(jp2_box_t *box, FILE *out); static void jp2_colr_destroy(jp2_box_t *box); static void jp2_cdef_destroy(jp2_box_t *box); static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_cdef_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_cdef_dumpdata(jp2_box_t *box, FILE *out); static void jp2_cmap_destroy(jp2_box_t *box); static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_cmap_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_cmap_dumpdata(jp2_box_t *box, FILE *out); static void jp2_pclr_destroy(jp2_box_t *box); static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in); static int jp2_pclr_putdata(jp2_box_t *box, jas_stream_t *out); static void jp2_pclr_dumpdata(jp2_box_t *box, FILE *out); /******************************************************************************\ * Local data. \******************************************************************************/ jp2_boxinfo_t jp2_boxinfos[] = { {JP2_BOX_JP, "JP", 0, {0, 0, jp2_jp_getdata, jp2_jp_putdata, 0}}, {JP2_BOX_FTYP, "FTYP", 0, {0, 0, jp2_ftyp_getdata, jp2_ftyp_putdata, 0}}, {JP2_BOX_JP2H, "JP2H", JP2_BOX_SUPER, {0, 0, 0, 0, 0}}, {JP2_BOX_IHDR, "IHDR", 0, {0, 0, jp2_ihdr_getdata, jp2_ihdr_putdata, 0}}, {JP2_BOX_BPCC, "BPCC", 0, {0, jp2_bpcc_destroy, jp2_bpcc_getdata, jp2_bpcc_putdata, 0}}, {JP2_BOX_COLR, "COLR", 0, {0, jp2_colr_destroy, jp2_colr_getdata, jp2_colr_putdata, jp2_colr_dumpdata}}, {JP2_BOX_PCLR, "PCLR", 0, {0, jp2_pclr_destroy, jp2_pclr_getdata, jp2_pclr_putdata, jp2_pclr_dumpdata}}, {JP2_BOX_CMAP, "CMAP", 0, {0, jp2_cmap_destroy, jp2_cmap_getdata, jp2_cmap_putdata, jp2_cmap_dumpdata}}, {JP2_BOX_CDEF, "CDEF", 0, {0, jp2_cdef_destroy, jp2_cdef_getdata, jp2_cdef_putdata, jp2_cdef_dumpdata}}, {JP2_BOX_RES, "RES", JP2_BOX_SUPER, {0, 0, 0, 0, 0}}, {JP2_BOX_RESC, "RESC", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_RESD, "RESD", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_JP2C, "JP2C", JP2_BOX_NODATA, {0, 0, 0, 0, 0}}, {JP2_BOX_JP2I, "JP2I", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_XML, "XML", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_UUID, "UUID", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_UINF, "UINF", JP2_BOX_SUPER, {0, 0, 0, 0, 0}}, {JP2_BOX_ULST, "ULST", 0, {0, 0, 0, 0, 0}}, {JP2_BOX_URL, "URL", 0, {0, 0, 0, 0, 0}}, {0, 0, 0, {0, 0, 0, 0, 0}}, }; jp2_boxinfo_t jp2_boxinfo_unk = { 0, "Unknown", 0, {0, 0, 0, 0, 0} }; /******************************************************************************\ * Box constructor. \******************************************************************************/ jp2_box_t *jp2_box_create0() { jp2_box_t *box; if (!(box = jas_malloc(sizeof(jp2_box_t)))) { return 0; } memset(box, 0, sizeof(jp2_box_t)); box->type = 0; box->len = 0; // Mark the box data as never having been constructed // so that we will not errantly attempt to destroy it later. box->ops = &jp2_boxinfo_unk.ops; return box; } jp2_box_t *jp2_box_create(int type) { jp2_box_t *box; jp2_boxinfo_t *boxinfo; if (!(box = jp2_box_create0())) { return 0; } box->type = type; box->len = 0; if (!(boxinfo = jp2_boxinfolookup(type))) { return 0; } box->info = boxinfo; box->ops = &boxinfo->ops; return box; } /******************************************************************************\ * Box destructor. \******************************************************************************/ void jp2_box_destroy(jp2_box_t *box) { if (box->ops->destroy) { (*box->ops->destroy)(box); } jas_free(box); } static void jp2_bpcc_destroy(jp2_box_t *box) { jp2_bpcc_t *bpcc = &box->data.bpcc; if (bpcc->bpcs) { jas_free(bpcc->bpcs); bpcc->bpcs = 0; } } static void jp2_cdef_destroy(jp2_box_t *box) { jp2_cdef_t *cdef = &box->data.cdef; if (cdef->ents) { jas_free(cdef->ents); cdef->ents = 0; } } /******************************************************************************\ * Box input. \******************************************************************************/ jp2_box_t *jp2_box_get(jas_stream_t *in) { jp2_box_t *box; jp2_boxinfo_t *boxinfo; jas_stream_t *tmpstream; uint_fast32_t len; uint_fast64_t extlen; bool dataflag; box = 0; tmpstream = 0; if (!(box = jp2_box_create0())) { goto error; } if (jp2_getuint32(in, &len) || jp2_getuint32(in, &box->type)) { goto error; } boxinfo = jp2_boxinfolookup(box->type); box->info = boxinfo; box->len = len; JAS_DBGLOG(10, ( "preliminary processing of JP2 box: " "type=%c%s%c (0x%08x); length=%"PRIuFAST32"\n", '"', boxinfo->name, '"', box->type, box->len )); if (box->len == 1) { JAS_DBGLOG(10, ("big length\n")); if (jp2_getuint64(in, &extlen)) { goto error; } if (extlen > 0xffffffffUL) { jas_eprintf("warning: cannot handle large 64-bit box length\n"); extlen = 0xffffffffUL; } box->len = extlen; box->datalen = extlen - JP2_BOX_HDRLEN(true); } else { box->datalen = box->len - JP2_BOX_HDRLEN(false); } if (box->len != 0 && box->len < 8) { goto error; } dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA)); if (dataflag) { if (!(tmpstream = jas_stream_memopen(0, 0))) { goto error; } if (jas_stream_copy(tmpstream, in, box->datalen)) { jas_eprintf("cannot copy box data\n"); goto error; } jas_stream_rewind(tmpstream); // From here onwards, the box data will need to be destroyed. // So, initialize the box operations. box->ops = &boxinfo->ops; if (box->ops->getdata) { if ((*box->ops->getdata)(box, tmpstream)) { jas_eprintf("cannot parse box data\n"); goto error; } } jas_stream_close(tmpstream); } if (jas_getdbglevel() >= 1) { jp2_box_dump(box, stderr); } return box; error: if (box) { jp2_box_destroy(box); } if (tmpstream) { jas_stream_close(tmpstream); } return 0; } void jp2_box_dump(jp2_box_t *box, FILE *out) { jp2_boxinfo_t *boxinfo; boxinfo = jp2_boxinfolookup(box->type); assert(boxinfo); fprintf(out, "JP2 box: "); fprintf(out, "type=%c%s%c (0x%08"PRIxFAST32"); length=%"PRIuFAST32"\n", '"', boxinfo->name, '"', box->type, box->len); if (box->ops->dumpdata) { (*box->ops->dumpdata)(box, out); } } static int jp2_jp_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_jp_t *jp = &box->data.jp; if (jp2_getuint32(in, &jp->magic)) { return -1; } return 0; } static int jp2_ftyp_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_ftyp_t *ftyp = &box->data.ftyp; unsigned int i; if (jp2_getuint32(in, &ftyp->majver) || jp2_getuint32(in, &ftyp->minver)) { return -1; } ftyp->numcompatcodes = (box->datalen - 8) / 4; if (ftyp->numcompatcodes > JP2_FTYP_MAXCOMPATCODES) { return -1; } for (i = 0; i < ftyp->numcompatcodes; ++i) { if (jp2_getuint32(in, &ftyp->compatcodes[i])) { return -1; } } return 0; } static int jp2_ihdr_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_ihdr_t *ihdr = &box->data.ihdr; if (jp2_getuint32(in, &ihdr->height) || jp2_getuint32(in, &ihdr->width) || jp2_getuint16(in, &ihdr->numcmpts) || jp2_getuint8(in, &ihdr->bpc) || jp2_getuint8(in, &ihdr->comptype) || jp2_getuint8(in, &ihdr->csunk) || jp2_getuint8(in, &ihdr->ipr)) { return -1; } return 0; } static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_bpcc_t *bpcc = &box->data.bpcc; unsigned int i; bpcc->bpcs = 0; bpcc->numcmpts = box->datalen; if (!(bpcc->bpcs = jas_alloc2(bpcc->numcmpts, sizeof(uint_fast8_t)))) { return -1; } for (i = 0; i < bpcc->numcmpts; ++i) { if (jp2_getuint8(in, &bpcc->bpcs[i])) { return -1; } } return 0; } static void jp2_colr_dumpdata(jp2_box_t *box, FILE *out) { jp2_colr_t *colr = &box->data.colr; fprintf(out, "method=%d; pri=%d; approx=%d\n", (int)colr->method, (int)colr->pri, (int)colr->approx); switch (colr->method) { case JP2_COLR_ENUM: fprintf(out, "csid=%d\n", (int)colr->csid); break; case JP2_COLR_ICC: jas_memdump(out, colr->iccp, colr->iccplen); break; } } static int jp2_colr_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_colr_t *colr = &box->data.colr; colr->csid = 0; colr->iccp = 0; colr->iccplen = 0; if (jp2_getuint8(in, &colr->method) || jp2_getuint8(in, &colr->pri) || jp2_getuint8(in, &colr->approx)) { return -1; } switch (colr->method) { case JP2_COLR_ENUM: if (jp2_getuint32(in, &colr->csid)) { return -1; } break; case JP2_COLR_ICC: colr->iccplen = box->datalen - 3; if (!(colr->iccp = jas_alloc2(colr->iccplen, sizeof(uint_fast8_t)))) { return -1; } if (jas_stream_read(in, colr->iccp, colr->iccplen) != colr->iccplen) { return -1; } break; } return 0; } static void jp2_cdef_dumpdata(jp2_box_t *box, FILE *out) { jp2_cdef_t *cdef = &box->data.cdef; unsigned int i; for (i = 0; i < cdef->numchans; ++i) { fprintf(out, "channo=%"PRIuFAST16"; type=%"PRIuFAST16"; assoc=%"PRIuFAST16"\n", cdef->ents[i].channo, cdef->ents[i].type, cdef->ents[i].assoc); } } static void jp2_colr_destroy(jp2_box_t *box) { jp2_colr_t *colr = &box->data.colr; if (colr->iccp) { jas_free(colr->iccp); } } static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cdef_t *cdef = &box->data.cdef; jp2_cdefchan_t *chan; unsigned int channo; cdef->ents = 0; if (jp2_getuint16(in, &cdef->numchans)) { return -1; } if (!(cdef->ents = jas_alloc2(cdef->numchans, sizeof(jp2_cdefchan_t)))) { return -1; } for (channo = 0; channo < cdef->numchans; ++channo) { chan = &cdef->ents[channo]; if (jp2_getuint16(in, &chan->channo) || jp2_getuint16(in, &chan->type) || jp2_getuint16(in, &chan->assoc)) { return -1; } } return 0; } /******************************************************************************\ * Box output. \******************************************************************************/ int jp2_box_put(jp2_box_t *box, jas_stream_t *out) { jas_stream_t *tmpstream; bool extlen; bool dataflag; tmpstream = 0; dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA)); if (dataflag) { if (!(tmpstream = jas_stream_memopen(0, 0))) { goto error; } if (box->ops->putdata) { if ((*box->ops->putdata)(box, tmpstream)) { goto error; } } box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false); jas_stream_rewind(tmpstream); } extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0; if (jp2_putuint32(out, extlen ? 1 : box->len)) { goto error; } if (jp2_putuint32(out, box->type)) { goto error; } if (extlen) { if (jp2_putuint64(out, box->len)) { goto error; } } if (dataflag) { if (jas_stream_copy(out, tmpstream, box->len - JP2_BOX_HDRLEN(false))) { jas_eprintf("cannot copy box data\n"); goto error; } jas_stream_close(tmpstream); } return 0; error: if (tmpstream) { jas_stream_close(tmpstream); } return -1; } static int jp2_jp_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_jp_t *jp = &box->data.jp; if (jp2_putuint32(out, jp->magic)) { return -1; } return 0; } static int jp2_ftyp_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_ftyp_t *ftyp = &box->data.ftyp; unsigned int i; if (jp2_putuint32(out, ftyp->majver) || jp2_putuint32(out, ftyp->minver)) { return -1; } for (i = 0; i < ftyp->numcompatcodes; ++i) { if (jp2_putuint32(out, ftyp->compatcodes[i])) { return -1; } } return 0; } static int jp2_ihdr_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_ihdr_t *ihdr = &box->data.ihdr; if (jp2_putuint32(out, ihdr->height) || jp2_putuint32(out, ihdr->width) || jp2_putuint16(out, ihdr->numcmpts) || jp2_putuint8(out, ihdr->bpc) || jp2_putuint8(out, ihdr->comptype) || jp2_putuint8(out, ihdr->csunk) || jp2_putuint8(out, ihdr->ipr)) { return -1; } return 0; } static int jp2_bpcc_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_bpcc_t *bpcc = &box->data.bpcc; unsigned int i; for (i = 0; i < bpcc->numcmpts; ++i) { if (jp2_putuint8(out, bpcc->bpcs[i])) { return -1; } } return 0; } static int jp2_colr_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_colr_t *colr = &box->data.colr; if (jp2_putuint8(out, colr->method) || jp2_putuint8(out, colr->pri) || jp2_putuint8(out, colr->approx)) { return -1; } switch (colr->method) { case JP2_COLR_ENUM: if (jp2_putuint32(out, colr->csid)) { return -1; } break; case JP2_COLR_ICC: if (jas_stream_write(out, colr->iccp, JAS_CAST(int, colr->iccplen)) != JAS_CAST(int, colr->iccplen)) return -1; break; } return 0; } static int jp2_cdef_putdata(jp2_box_t *box, jas_stream_t *out) { jp2_cdef_t *cdef = &box->data.cdef; unsigned int i; jp2_cdefchan_t *ent; if (jp2_putuint16(out, cdef->numchans)) { return -1; } for (i = 0; i < cdef->numchans; ++i) { ent = &cdef->ents[i]; if (jp2_putuint16(out, ent->channo) || jp2_putuint16(out, ent->type) || jp2_putuint16(out, ent->assoc)) { return -1; } } return 0; } /******************************************************************************\ * Input operations for primitive types. \******************************************************************************/ static int jp2_getuint8(jas_stream_t *in, uint_fast8_t *val) { int c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } if (val) { *val = c; } return 0; } static int jp2_getuint16(jas_stream_t *in, uint_fast16_t *val) { uint_fast16_t v; int c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if (val) { *val = v; } return 0; } static int jp2_getuint32(jas_stream_t *in, uint_fast32_t *val) { uint_fast32_t v; int c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; if (val) { *val = v; } return 0; } static int jp2_getuint64(jas_stream_t *in, uint_fast64_t *val) { uint_fast64_t tmpval; int i; int c; tmpval = 0; for (i = 0; i < 8; ++i) { tmpval <<= 8; if ((c = jas_stream_getc(in)) == EOF) { return -1; } tmpval |= (c & 0xff); } *val = tmpval; return 0; } /******************************************************************************\ * Output operations for primitive types. \******************************************************************************/ static int jp2_putuint8(jas_stream_t *out, uint_fast8_t val) { if (jas_stream_putc(out, val & 0xff) == EOF) { return -1; } return 0; } static int jp2_putuint16(jas_stream_t *out, uint_fast16_t val) { if (jas_stream_putc(out, (val >> 8) & 0xff) == EOF || jas_stream_putc(out, val & 0xff) == EOF) { return -1; } return 0; } static int jp2_putuint32(jas_stream_t *out, uint_fast32_t val) { if (jas_stream_putc(out, (val >> 24) & 0xff) == EOF || jas_stream_putc(out, (val >> 16) & 0xff) == EOF || jas_stream_putc(out, (val >> 8) & 0xff) == EOF || jas_stream_putc(out, val & 0xff) == EOF) { return -1; } return 0; } static int jp2_putuint64(jas_stream_t *out, uint_fast64_t val) { if (jp2_putuint32(out, (val >> 32) & 0xffffffffUL) || jp2_putuint32(out, val & 0xffffffffUL)) { return -1; } return 0; } /******************************************************************************\ * Miscellaneous code. \******************************************************************************/ jp2_boxinfo_t *jp2_boxinfolookup(int type) { jp2_boxinfo_t *boxinfo; for (boxinfo = jp2_boxinfos; boxinfo->name; ++boxinfo) { if (boxinfo->type == type) { return boxinfo; } } return &jp2_boxinfo_unk; } static void jp2_cmap_destroy(jp2_box_t *box) { jp2_cmap_t *cmap = &box->data.cmap; if (cmap->ents) { jas_free(cmap->ents); } } static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_cmap_t *cmap = &box->data.cmap; jp2_cmapent_t *ent; unsigned int i; cmap->ents = 0; cmap->numchans = (box->datalen) / 4; if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) { return -1; } for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; if (jp2_getuint16(in, &ent->cmptno) || jp2_getuint8(in, &ent->map) || jp2_getuint8(in, &ent->pcol)) { return -1; } } return 0; } static int jp2_cmap_putdata(jp2_box_t *box, jas_stream_t *out) { /* Eliminate compiler warning about unused variables. */ box = 0; out = 0; return -1; } static void jp2_cmap_dumpdata(jp2_box_t *box, FILE *out) { jp2_cmap_t *cmap = &box->data.cmap; unsigned int i; jp2_cmapent_t *ent; fprintf(out, "numchans = %d\n", (int) cmap->numchans); for (i = 0; i < cmap->numchans; ++i) { ent = &cmap->ents[i]; fprintf(out, "cmptno=%d; map=%d; pcol=%d\n", (int) ent->cmptno, (int) ent->map, (int) ent->pcol); } } static void jp2_pclr_destroy(jp2_box_t *box) { jp2_pclr_t *pclr = &box->data.pclr; if (pclr->lutdata) { jas_free(pclr->lutdata); } if (pclr->bpc) jas_free(pclr->bpc); } static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in) { jp2_pclr_t *pclr = &box->data.pclr; int lutsize; unsigned int i; unsigned int j; int_fast32_t x; pclr->lutdata = 0; pclr->bpc = 0; if (jp2_getuint16(in, &pclr->numlutents) || jp2_getuint8(in, &pclr->numchans)) { return -1; } lutsize = pclr->numlutents * pclr->numchans; if (!(pclr->lutdata = jas_alloc2(lutsize, sizeof(int_fast32_t)))) { return -1; } if (!(pclr->bpc = jas_alloc2(pclr->numchans, sizeof(uint_fast8_t)))) { return -1; } for (i = 0; i < pclr->numchans; ++i) { if (jp2_getuint8(in, &pclr->bpc[i])) { return -1; } } for (i = 0; i < pclr->numlutents; ++i) { for (j = 0; j < pclr->numchans; ++j) { if (jp2_getint(in, (pclr->bpc[j] & 0x80) != 0, (pclr->bpc[j] & 0x7f) + 1, &x)) { return -1; } pclr->lutdata[i * pclr->numchans + j] = x; } } return 0; } static int jp2_pclr_putdata(jp2_box_t *box, jas_stream_t *out) { #if 0 jp2_pclr_t *pclr = &box->data.pclr; #endif /* Eliminate warning about unused variable. */ box = 0; out = 0; return -1; } static void jp2_pclr_dumpdata(jp2_box_t *box, FILE *out) { jp2_pclr_t *pclr = &box->data.pclr; unsigned int i; int j; fprintf(out, "numents=%d; numchans=%d\n", (int) pclr->numlutents, (int) pclr->numchans); for (i = 0; i < pclr->numlutents; ++i) { for (j = 0; j < pclr->numchans; ++j) { fprintf(out, "LUT[%d][%d]=%"PRIiFAST32"\n", i, j, pclr->lutdata[i * pclr->numchans + j]); } } } static int jp2_getint(jas_stream_t *in, int s, int n, int_fast32_t *val) { int c; int i; uint_fast32_t v; int m; m = (n + 7) / 8; v = 0; for (i = 0; i < m; ++i) { if ((c = jas_stream_getc(in)) == EOF) { return -1; } v = (v << 8) | c; } v &= ONES(n); if (s) { int sb; sb = v & (1 << (8 * m - 1)); *val = ((~v) + 1) & ONES(8 * m); if (sb) { *val = -*val; } } else { *val = v; } return 0; } jp2_cdefchan_t *jp2_cdef_lookup(jp2_cdef_t *cdef, int channo) { unsigned int i; jp2_cdefchan_t *cdefent; for (i = 0; i < cdef->numchans; ++i) { cdefent = &cdef->ents[i]; if (cdefent->channo == JAS_CAST(unsigned int, channo)) { return cdefent; } } return 0; }
jp2_box_t *jp2_box_create(int type) { jp2_box_t *box; jp2_boxinfo_t *boxinfo; if (!(box = jas_malloc(sizeof(jp2_box_t)))) { return 0; } memset(box, 0, sizeof(jp2_box_t)); box->type = type; box->len = 0; if (!(boxinfo = jp2_boxinfolookup(type))) { return 0; } box->info = boxinfo; box->ops = &boxinfo->ops; return box; }
jp2_box_t *jp2_box_create(int type) { jp2_box_t *box; jp2_boxinfo_t *boxinfo; if (!(box = jp2_box_create0())) { return 0; } box->type = type; box->len = 0; if (!(boxinfo = jp2_boxinfolookup(type))) { return 0; } box->info = boxinfo; box->ops = &boxinfo->ops; return box; }
{'added': [(186, 'jp2_box_t *jp2_box_create0()'), (193, '\tbox->type = 0;'), (194, '\tbox->len = 0;'), (195, '\t// Mark the box data as never having been constructed'), (196, '\t// so that we will not errantly attempt to destroy it later.'), (197, '\tbox->ops = &jp2_boxinfo_unk.ops;'), (198, '\treturn box;'), (199, '}'), (200, ''), (201, 'jp2_box_t *jp2_box_create(int type)'), (202, '{'), (203, '\tjp2_box_t *box;'), (204, '\tjp2_boxinfo_t *boxinfo;'), (205, '\tif (!(box = jp2_box_create0())) {'), (206, '\t\treturn 0;'), (207, '\t}'), (264, '\tif (!(box = jp2_box_create0())) {'), (274, '\t "preliminary processing of JP2 box: "'), (275, '\t "type=%c%s%c (0x%08x); length=%"PRIuFAST32"\\n",'), (279, '\t\tJAS_DBGLOG(10, ("big length\\n"));'), (395, '\tbpcc->bpcs = 0;'), (476, '\tcdef->ents = 0;'), (533, '\t\tif (jas_stream_copy(out, tmpstream, box->len -'), (534, '\t\t JP2_BOX_HDRLEN(false))) {'), (535, '\t\t\tjas_eprintf("cannot copy box data\\n");'), (794, '\tcmap->ents = 0;'), (853, '\tpclr->bpc = 0;'), (888, '\t/* Eliminate warning about unused variable. */'), (889, '\tbox = 0;'), (890, '\tout = 0;')], 'deleted': [(186, 'jp2_box_t *jp2_box_create(int type)'), (189, '\tjp2_boxinfo_t *boxinfo;'), (190, ''), (251, '\tif (!(box = jas_malloc(sizeof(jp2_box_t)))) {'), (254, ''), (255, '\t// Mark the box data as never having been constructed'), (256, '\t// so that we will not errantly attempt to destroy it later.'), (257, '\tbox->ops = &jp2_boxinfo_unk.ops;'), (258, ''), (266, '\t "preliminary processing of JP2 box: type=%c%s%c (0x%08x); length=%d\\n",'), (521, '\t\tif (jas_stream_copy(out, tmpstream, box->len - JP2_BOX_HDRLEN(false))) {'), (872, '/* Eliminate warning about unused variable. */'), (873, 'box = 0;'), (874, 'out = 0;')]}
30
14
747
5,052
17
94
3
https://github.com/mdadams/jasper
CVE-2017-6850
CWE-476
1,650
acsequentialscan.hpp
C++
ACSequentialScan::QMContextSet::DCContextMagnitudeSet::Init
/************************************************************************* This project implements a complete(!) JPEG (Recommendation ITU-T T.81 | ISO/IEC 10918-1) codec, plus a library that can be used to encode and decode JPEG streams. It also implements ISO/IEC 18477 aka JPEG XT which is an extension towards intermediate, high-dynamic-range lossy and lossless coding of JPEG. In specific, it supports ISO/IEC 18477-3/-6/-7/-8 encoding. Note that only Profiles C and D of ISO/IEC 18477-7 are supported here. Check the JPEG XT reference software for a full implementation of ISO/IEC 18477-7. Copyright (C) 2012-2018 Thomas Richter, University of Stuttgart and Accusoft. (C) 2019-2020 Thomas Richter, Fraunhofer IIS. This program is available under two licenses, GPLv3 and the ITU Software licence Annex A Option 2, RAND conditions. For the full text of the GPU license option, see README.license.gpl. For the full text of the ITU license option, see README.license.itu. You may freely select between these two options. For the GPL option, please note the following: This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. *************************************************************************/ /* ** ** Represents the scan including the scan header for the ** arithmetic coding procedure. ** ** $Id: acsequentialscan.hpp,v 1.38 2016/10/28 13:58:53 thor Exp $ ** */ #ifndef CODESTREAM_ACSEQUENTIALSCAN_HPP #define CODESTREAM_ACSEQUENTIALSCAN_HPP /// Includes #include "tools/environment.hpp" #include "coding/qmcoder.hpp" #include "coding/quantizedrow.hpp" #include "codestream/entropyparser.hpp" /// /// Forwards class Tables; class ByteStream; class DCT; class Frame; struct RectangleRequest; class BitmapCtrl; class LineAdapter; class BufferCtrl; class BlockBuffer; class BlockCtrl; /// /// class ACSequentialScan class ACSequentialScan : public EntropyParser { // #if ACCUSOFT_CODE // // The QM coder doing the main work here. class QMCoder m_Coder; // // Last DC value, required for the DPCM coder. LONG m_lDC[4]; // // Last difference value, required for selecting the // AC coding context. LONG m_lDiff[4]; // // Context information struct QMContextSet { // // The DC Coding context set. struct DCContextZeroSet { QMContext S0,SS,SP,SN; // // Initialize #ifdef DEBUG_QMCODER void Init(const char *base) { char string[5] = "Z0S0"; memcpy(string,base,2); S0.Init(string); string[3] = 'S'; SS.Init(string); string[3] = 'P'; SP.Init(string); string[3] = 'N'; SN.Init(string); } #else void Init(void) { S0.Init(); SS.Init(); SP.Init(); SN.Init(); } #endif // } DCZero,DCSmallPositive,DCSmallNegative,DCLargePositive,DCLargeNegative; // // The DC Magnitude coding contexts. struct DCContextMagnitudeSet { QMContext X[19]; QMContext M[19]; // // Initialize void Init(void) { for(int i = 0;i < 19;i++) { #ifdef DEBUG_QMCODER char string[5] = "X0 "; string[1] = (i / 10) + '0'; string[2] = (i % 10) + '0'; X[i].Init(string); string[0] = 'M'; M[i].Init(string); #else X[i].Init(); M[i].Init(); #endif } } } DCMagnitude; // // The AC Coding Contexts. struct ACContextZeroSet { QMContext SE,S0,SP; // // Initialize. #ifdef DEBUG_QMCODER void Init(int i) { char string[5] = "se00"; string[2] = (i / 10) + '0'; string[3] = (i % 10) + '0'; SE.Init(string); string[1] = '0'; S0.Init(string); string[1] = 'p'; SP.Init(string); } #else void Init(void) { SE.Init(); S0.Init(); SP.Init(); } #endif } ACZero[63]; // // The AC Magnitude coder. struct ACContextMagnitudeSet { QMContext X[18]; QMContext M[18]; // #ifdef DEBUG_QMCODER void Init(bool hi) { for(int i = 0;i < 18;i++) { char string[5] = "xl00"; string[1] = (hi)?('h'):('l'); string[2] = (i / 10) + '0'; string[3] = (i % 10) + '0'; X[i].Init(string); string[0] = 'm'; M[i].Init(string); } } #else void Init(void) { for(int i = 0;i < 18;i++) { X[i].Init(); M[i].Init(); } } #endif } ACMagnitudeLow,ACMagnitudeHigh; // Exists only twice. // // The uniform context. QMContext Uniform; // // Initialize the full beast. void Init(void) { #ifdef DEBUG_QMCODER DCZero.Init("Z0"); DCSmallPositive.Init("L+"); DCSmallNegative.Init("L-"); DCLargePositive.Init("U+"); DCLargeNegative.Init("U-"); #else DCZero.Init(); DCSmallPositive.Init(); DCSmallNegative.Init(); DCLargePositive.Init(); DCLargeNegative.Init(); #endif DCMagnitude.Init(); DCMagnitude.Init(); for(int i = 0;i < 63;i++) { #ifdef DEBUG_QMCODER ACZero[i].Init(i); #else ACZero[i].Init(); #endif } #ifdef DEBUG_QMCODER ACMagnitudeLow.Init(false); ACMagnitudeHigh.Init(true); #else ACMagnitudeLow.Init(); ACMagnitudeHigh.Init(); #endif #ifdef DEBUG_QMCODER Uniform.Init(QMCoder::Uniform_State,"uni "); #else Uniform.Init(QMCoder::Uniform_State); #endif } // // Classify the DC difference into five categories, return it. struct DCContextZeroSet &Classify(LONG diff,UBYTE l,UBYTE u); // } m_Context[4]; // // protected: // // The block control helper that maintains all the request/release // logic and the interface to the user. class BlockCtrl *m_pBlockCtrl; // // Scan positions. ULONG m_ulX[4]; // // Scan parameters. UBYTE m_ucScanStart; UBYTE m_ucScanStop; UBYTE m_ucLowBit; // // AC conditioners, one per component. // // Context numbers to use for the conditional. UBYTE m_ucDCContext[4]; UBYTE m_ucACContext[4]; // // Small DC threshold value ('L' in the standard) UBYTE m_ucSmall[4]; // // Large DC threshold value ('U' in the specs) UBYTE m_ucLarge[4]; // // Higher block index discrimination ('kx' in the specs) UBYTE m_ucBlockEnd[4]; // // Will always be false as there is no reason to measure anything. // This is only here to satisfy the expected interface of the // residual scan. bool m_bMeasure; // // Set if this is a differential scan. bool m_bDifferential; // // Set if this is a residual scan. bool m_bResidual; // // Set if this is a large range scan. bool m_bLargeRange; // // Encode a single block void EncodeBlock(const LONG *block, LONG &prevdc,LONG &prevdiff, UBYTE small,UBYTE large,UBYTE blockup, UBYTE dctable,UBYTE actable); // // Decode a single block. void DecodeBlock(LONG *block, LONG &prevdc,LONG &prevdiff, UBYTE small,UBYTE large,UBYTE blockup, UBYTE dctable,UBYTE actable); // #endif // // Flush the remaining bits out to the stream on writing. virtual void Flush(bool final); // // Restart the parser at the next restart interval virtual void Restart(void); // private: // // Write the marker that indicates the frame type fitting to this scan. virtual void WriteFrameType(class ByteStream *io); // // public: // Create an arithmetically coded sequential scan. The highbit is always // ignored as this setting only exists for progressive refinement scans. ACSequentialScan(class Frame *frame,class Scan *scan,UBYTE start,UBYTE stop, UBYTE lowbit,UBYTE highbit, bool differential = false,bool residual = false,bool largerange = false); // ~ACSequentialScan(void); // // Fill in the tables for decoding and decoding parameters in general. virtual void StartParseScan(class ByteStream *io,class Checksum *chk,class BufferCtrl *ctrl); // // Write the default tables for encoding virtual void StartWriteScan(class ByteStream *io,class Checksum *chk,class BufferCtrl *ctrl); // // Measure scan statistics. Not implemented here since it is not // required. The AC coder is adaptive. virtual void StartMeasureScan(class BufferCtrl *ctrl); // // Start making an optimization run to adjust the coefficients. virtual void StartOptimizeScan(class BufferCtrl *ctrl); // // Start a MCU scan. Returns true if there are more rows. False otherwise. virtual bool StartMCURow(void); // // Parse a single MCU in this scan. Return true if there are more // MCUs in this row. virtual bool ParseMCU(void); // // Write a single MCU in this scan. virtual bool WriteMCU(void); // // Make an R/D optimization for the given scan by potentially pushing // coefficients into other bins. virtual void OptimizeBlock(LONG bx,LONG by,UBYTE component,double critical, class DCT *dct,LONG quantized[64]); // // Make an R/D optimization of the DC scan. This includes all DC blocks in // total, not just a single block. This is because the coefficients are not // coded independently. virtual void OptimizeDC(void); }; /// /// #endif
/************************************************************************* This project implements a complete(!) JPEG (Recommendation ITU-T T.81 | ISO/IEC 10918-1) codec, plus a library that can be used to encode and decode JPEG streams. It also implements ISO/IEC 18477 aka JPEG XT which is an extension towards intermediate, high-dynamic-range lossy and lossless coding of JPEG. In specific, it supports ISO/IEC 18477-3/-6/-7/-8 encoding. Note that only Profiles C and D of ISO/IEC 18477-7 are supported here. Check the JPEG XT reference software for a full implementation of ISO/IEC 18477-7. Copyright (C) 2012-2018 Thomas Richter, University of Stuttgart and Accusoft. (C) 2019-2020 Thomas Richter, Fraunhofer IIS. This program is available under two licenses, GPLv3 and the ITU Software licence Annex A Option 2, RAND conditions. For the full text of the GPU license option, see README.license.gpl. For the full text of the ITU license option, see README.license.itu. You may freely select between these two options. For the GPL option, please note the following: This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. *************************************************************************/ /* ** ** Represents the scan including the scan header for the ** arithmetic coding procedure. ** ** $Id: acsequentialscan.hpp,v 1.39 2022/05/23 05:56:51 thor Exp $ ** */ #ifndef CODESTREAM_ACSEQUENTIALSCAN_HPP #define CODESTREAM_ACSEQUENTIALSCAN_HPP /// Includes #include "tools/environment.hpp" #include "coding/qmcoder.hpp" #include "coding/quantizedrow.hpp" #include "codestream/entropyparser.hpp" /// /// Forwards class Tables; class ByteStream; class DCT; class Frame; struct RectangleRequest; class BitmapCtrl; class LineAdapter; class BufferCtrl; class BlockBuffer; class BlockCtrl; /// /// class ACSequentialScan class ACSequentialScan : public EntropyParser { // #if ACCUSOFT_CODE // // The QM coder doing the main work here. class QMCoder m_Coder; // // Last DC value, required for the DPCM coder. LONG m_lDC[4]; // // Last difference value, required for selecting the // AC coding context. LONG m_lDiff[4]; // // Context information struct QMContextSet { // // The DC Coding context set. struct DCContextZeroSet { QMContext S0,SS,SP,SN; // // Initialize #ifdef DEBUG_QMCODER void Init(const char *base) { char string[5] = "Z0S0"; memcpy(string,base,2); S0.Init(string); string[3] = 'S'; SS.Init(string); string[3] = 'P'; SP.Init(string); string[3] = 'N'; SN.Init(string); } #else void Init(void) { S0.Init(); SS.Init(); SP.Init(); SN.Init(); } #endif // } DCZero,DCSmallPositive,DCSmallNegative,DCLargePositive,DCLargeNegative; // // The DC Magnitude coding contexts. struct DCContextMagnitudeSet { enum { MagnitudeContexts = 19 }; // QMContext X[MagnitudeContexts]; QMContext M[MagnitudeContexts]; // // Initialize void Init(void) { for(int i = 0;i < MagnitudeContexts;i++) { #ifdef DEBUG_QMCODER char string[5] = "X0 "; string[1] = (i / 10) + '0'; string[2] = (i % 10) + '0'; X[i].Init(string); string[0] = 'M'; M[i].Init(string); #else X[i].Init(); M[i].Init(); #endif } } } DCMagnitude; // // The AC Coding Contexts. struct ACContextZeroSet { QMContext SE,S0,SP; // // Initialize. #ifdef DEBUG_QMCODER void Init(int i) { char string[5] = "se00"; string[2] = (i / 10) + '0'; string[3] = (i % 10) + '0'; SE.Init(string); string[1] = '0'; S0.Init(string); string[1] = 'p'; SP.Init(string); } #else void Init(void) { SE.Init(); S0.Init(); SP.Init(); } #endif } ACZero[63]; // // The AC Magnitude coder. struct ACContextMagnitudeSet { enum { MagnitudeContexts = 18 }; // QMContext X[MagnitudeContexts]; QMContext M[MagnitudeContexts]; // #ifdef DEBUG_QMCODER void Init(bool hi) { for(int i = 0;i < MagnitudeContexts;i++) { char string[5] = "xl00"; string[1] = (hi)?('h'):('l'); string[2] = (i / 10) + '0'; string[3] = (i % 10) + '0'; X[i].Init(string); string[0] = 'm'; M[i].Init(string); } } #else void Init(void) { for(int i = 0;i < MagnitudeContexts;i++) { X[i].Init(); M[i].Init(); } } #endif } ACMagnitudeLow,ACMagnitudeHigh; // Exists only twice. // // The uniform context. QMContext Uniform; // // Initialize the full beast. void Init(void) { #ifdef DEBUG_QMCODER DCZero.Init("Z0"); DCSmallPositive.Init("L+"); DCSmallNegative.Init("L-"); DCLargePositive.Init("U+"); DCLargeNegative.Init("U-"); #else DCZero.Init(); DCSmallPositive.Init(); DCSmallNegative.Init(); DCLargePositive.Init(); DCLargeNegative.Init(); #endif DCMagnitude.Init(); DCMagnitude.Init(); for(int i = 0;i < 63;i++) { #ifdef DEBUG_QMCODER ACZero[i].Init(i); #else ACZero[i].Init(); #endif } #ifdef DEBUG_QMCODER ACMagnitudeLow.Init(false); ACMagnitudeHigh.Init(true); #else ACMagnitudeLow.Init(); ACMagnitudeHigh.Init(); #endif #ifdef DEBUG_QMCODER Uniform.Init(QMCoder::Uniform_State,"uni "); #else Uniform.Init(QMCoder::Uniform_State); #endif } // // Classify the DC difference into five categories, return it. struct DCContextZeroSet &Classify(LONG diff,UBYTE l,UBYTE u); // } m_Context[4]; // // protected: // // The block control helper that maintains all the request/release // logic and the interface to the user. class BlockCtrl *m_pBlockCtrl; // // Scan positions. ULONG m_ulX[4]; // // Scan parameters. UBYTE m_ucScanStart; UBYTE m_ucScanStop; UBYTE m_ucLowBit; // // AC conditioners, one per component. // // Context numbers to use for the conditional. UBYTE m_ucDCContext[4]; UBYTE m_ucACContext[4]; // // Small DC threshold value ('L' in the standard) UBYTE m_ucSmall[4]; // // Large DC threshold value ('U' in the specs) UBYTE m_ucLarge[4]; // // Higher block index discrimination ('kx' in the specs) UBYTE m_ucBlockEnd[4]; // // Will always be false as there is no reason to measure anything. // This is only here to satisfy the expected interface of the // residual scan. bool m_bMeasure; // // Set if this is a differential scan. bool m_bDifferential; // // Set if this is a residual scan. bool m_bResidual; // // Set if this is a large range scan. bool m_bLargeRange; // // Encode a single block void EncodeBlock(const LONG *block, LONG &prevdc,LONG &prevdiff, UBYTE small,UBYTE large,UBYTE blockup, UBYTE dctable,UBYTE actable); // // Decode a single block. void DecodeBlock(LONG *block, LONG &prevdc,LONG &prevdiff, UBYTE small,UBYTE large,UBYTE blockup, UBYTE dctable,UBYTE actable); // #endif // // Flush the remaining bits out to the stream on writing. virtual void Flush(bool final); // // Restart the parser at the next restart interval virtual void Restart(void); // private: // // Write the marker that indicates the frame type fitting to this scan. virtual void WriteFrameType(class ByteStream *io); // // public: // Create an arithmetically coded sequential scan. The highbit is always // ignored as this setting only exists for progressive refinement scans. ACSequentialScan(class Frame *frame,class Scan *scan,UBYTE start,UBYTE stop, UBYTE lowbit,UBYTE highbit, bool differential = false,bool residual = false,bool largerange = false); // ~ACSequentialScan(void); // // Fill in the tables for decoding and decoding parameters in general. virtual void StartParseScan(class ByteStream *io,class Checksum *chk,class BufferCtrl *ctrl); // // Write the default tables for encoding virtual void StartWriteScan(class ByteStream *io,class Checksum *chk,class BufferCtrl *ctrl); // // Measure scan statistics. Not implemented here since it is not // required. The AC coder is adaptive. virtual void StartMeasureScan(class BufferCtrl *ctrl); // // Start making an optimization run to adjust the coefficients. virtual void StartOptimizeScan(class BufferCtrl *ctrl); // // Start a MCU scan. Returns true if there are more rows. False otherwise. virtual bool StartMCURow(void); // // Parse a single MCU in this scan. Return true if there are more // MCUs in this row. virtual bool ParseMCU(void); // // Write a single MCU in this scan. virtual bool WriteMCU(void); // // Make an R/D optimization for the given scan by potentially pushing // coefficients into other bins. virtual void OptimizeBlock(LONG bx,LONG by,UBYTE component,double critical, class DCT *dct,LONG quantized[64]); // // Make an R/D optimization of the DC scan. This includes all DC blocks in // total, not just a single block. This is because the coefficients are not // coded independently. virtual void OptimizeDC(void); }; /// /// #endif
void Init(void) { for(int i = 0;i < 19;i++) { #ifdef DEBUG_QMCODER char string[5] = "X0 "; string[1] = (i / 10) + '0'; string[2] = (i % 10) + '0'; X[i].Init(string); string[0] = 'M'; M[i].Init(string); #else X[i].Init(); M[i].Init(); #endif } }
void Init(void) { for(int i = 0;i < MagnitudeContexts;i++) { #ifdef DEBUG_QMCODER char string[5] = "X0 "; string[1] = (i / 10) + '0'; string[2] = (i % 10) + '0'; X[i].Init(string); string[0] = 'M'; M[i].Init(string); #else X[i].Init(); M[i].Init(); #endif } }
{'added': [(46, '** $Id: acsequentialscan.hpp,v 1.39 2022/05/23 05:56:51 thor Exp $'), (123, ' enum {'), (124, ' MagnitudeContexts = 19'), (125, ' };'), (126, ' //'), (127, ' QMContext X[MagnitudeContexts];'), (128, ' QMContext M[MagnitudeContexts];'), (133, ' for(int i = 0;i < MagnitudeContexts;i++) {'), (178, ' enum {'), (179, ' MagnitudeContexts = 18'), (180, ' };'), (181, ' //'), (182, ' QMContext X[MagnitudeContexts];'), (183, ' QMContext M[MagnitudeContexts];'), (188, ' for(int i = 0;i < MagnitudeContexts;i++) {'), (201, ' for(int i = 0;i < MagnitudeContexts;i++) {')], 'deleted': [(46, '** $Id: acsequentialscan.hpp,v 1.38 2016/10/28 13:58:53 thor Exp $'), (123, ' QMContext X[19];'), (124, ' QMContext M[19];'), (129, ' for(int i = 0;i < 19;i++) {'), (174, ' QMContext X[18];'), (175, ' QMContext M[18];'), (177, ''), (181, ' for(int i = 0;i < 18;i++) {'), (194, ' for(int i = 0;i < 18;i++) {')]}
16
9
178
1,116
13
101
3
https://github.com/thorfdbg/libjpeg
CVE-2022-31620
CWE-119
538
huffman.c
C
Huff_offsetReceive
/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Quake III Arena source code; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ /* This is based on the Adaptive Huffman algorithm described in Sayood's Data * Compression book. The ranks are not actually stored, but implicitly defined * by the location of a node within a doubly-linked list */ #include "q_shared.h" #include "qcommon.h" static int bloc = 0; void Huff_putBit( int bit, byte *fout, int *offset) { bloc = *offset; if ((bloc&7) == 0) { fout[(bloc>>3)] = 0; } fout[(bloc>>3)] |= bit << (bloc&7); bloc++; *offset = bloc; } int Huff_getBloc(void) { return bloc; } void Huff_setBloc(int _bloc) { bloc = _bloc; } int Huff_getBit( byte *fin, int *offset) { int t; bloc = *offset; t = (fin[(bloc>>3)] >> (bloc&7)) & 0x1; bloc++; *offset = bloc; return t; } /* Add a bit to the output file (buffered) */ static void add_bit (char bit, byte *fout) { if ((bloc&7) == 0) { fout[(bloc>>3)] = 0; } fout[(bloc>>3)] |= bit << (bloc&7); bloc++; } /* Receive one bit from the input file (buffered) */ static int get_bit (byte *fin) { int t; t = (fin[(bloc>>3)] >> (bloc&7)) & 0x1; bloc++; return t; } static node_t **get_ppnode(huff_t* huff) { node_t **tppnode; if (!huff->freelist) { return &(huff->nodePtrs[huff->blocPtrs++]); } else { tppnode = huff->freelist; huff->freelist = (node_t **)*tppnode; return tppnode; } } static void free_ppnode(huff_t* huff, node_t **ppnode) { *ppnode = (node_t *)huff->freelist; huff->freelist = ppnode; } /* Swap the location of these two nodes in the tree */ static void swap (huff_t* huff, node_t *node1, node_t *node2) { node_t *par1, *par2; par1 = node1->parent; par2 = node2->parent; if (par1) { if (par1->left == node1) { par1->left = node2; } else { par1->right = node2; } } else { huff->tree = node2; } if (par2) { if (par2->left == node2) { par2->left = node1; } else { par2->right = node1; } } else { huff->tree = node1; } node1->parent = par2; node2->parent = par1; } /* Swap these two nodes in the linked list (update ranks) */ static void swaplist(node_t *node1, node_t *node2) { node_t *par1; par1 = node1->next; node1->next = node2->next; node2->next = par1; par1 = node1->prev; node1->prev = node2->prev; node2->prev = par1; if (node1->next == node1) { node1->next = node2; } if (node2->next == node2) { node2->next = node1; } if (node1->next) { node1->next->prev = node1; } if (node2->next) { node2->next->prev = node2; } if (node1->prev) { node1->prev->next = node1; } if (node2->prev) { node2->prev->next = node2; } } /* Do the increments */ static void increment(huff_t* huff, node_t *node) { node_t *lnode; if (!node) { return; } if (node->next != NULL && node->next->weight == node->weight) { lnode = *node->head; if (lnode != node->parent) { swap(huff, lnode, node); } swaplist(lnode, node); } if (node->prev && node->prev->weight == node->weight) { *node->head = node->prev; } else { *node->head = NULL; free_ppnode(huff, node->head); } node->weight++; if (node->next && node->next->weight == node->weight) { node->head = node->next->head; } else { node->head = get_ppnode(huff); *node->head = node; } if (node->parent) { increment(huff, node->parent); if (node->prev == node->parent) { swaplist(node, node->parent); if (*node->head == node) { *node->head = node->parent; } } } } void Huff_addRef(huff_t* huff, byte ch) { node_t *tnode, *tnode2; if (huff->loc[ch] == NULL) { /* if this is the first transmission of this node */ tnode = &(huff->nodeList[huff->blocNode++]); tnode2 = &(huff->nodeList[huff->blocNode++]); tnode2->symbol = INTERNAL_NODE; tnode2->weight = 1; tnode2->next = huff->lhead->next; if (huff->lhead->next) { huff->lhead->next->prev = tnode2; if (huff->lhead->next->weight == 1) { tnode2->head = huff->lhead->next->head; } else { tnode2->head = get_ppnode(huff); *tnode2->head = tnode2; } } else { tnode2->head = get_ppnode(huff); *tnode2->head = tnode2; } huff->lhead->next = tnode2; tnode2->prev = huff->lhead; tnode->symbol = ch; tnode->weight = 1; tnode->next = huff->lhead->next; if (huff->lhead->next) { huff->lhead->next->prev = tnode; if (huff->lhead->next->weight == 1) { tnode->head = huff->lhead->next->head; } else { /* this should never happen */ tnode->head = get_ppnode(huff); *tnode->head = tnode2; } } else { /* this should never happen */ tnode->head = get_ppnode(huff); *tnode->head = tnode; } huff->lhead->next = tnode; tnode->prev = huff->lhead; tnode->left = tnode->right = NULL; if (huff->lhead->parent) { if (huff->lhead->parent->left == huff->lhead) { /* lhead is guaranteed to by the NYT */ huff->lhead->parent->left = tnode2; } else { huff->lhead->parent->right = tnode2; } } else { huff->tree = tnode2; } tnode2->right = tnode; tnode2->left = huff->lhead; tnode2->parent = huff->lhead->parent; huff->lhead->parent = tnode->parent = tnode2; huff->loc[ch] = tnode; increment(huff, tnode2->parent); } else { increment(huff, huff->loc[ch]); } } /* Get a symbol */ int Huff_Receive (node_t *node, int *ch, byte *fin) { while (node && node->symbol == INTERNAL_NODE) { if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { return 0; // Com_Error(ERR_DROP, "Illegal tree!"); } return (*ch = node->symbol); } /* Get a symbol */ void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset) { bloc = *offset; while (node && node->symbol == INTERNAL_NODE) { if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { *ch = 0; return; // Com_Error(ERR_DROP, "Illegal tree!"); } *ch = node->symbol; *offset = bloc; } /* Send the prefix code for this node */ static void send(node_t *node, node_t *child, byte *fout) { if (node->parent) { send(node->parent, node, fout); } if (child) { if (node->right == child) { add_bit(1, fout); } else { add_bit(0, fout); } } } /* Send a symbol */ void Huff_transmit (huff_t *huff, int ch, byte *fout) { int i; if (huff->loc[ch] == NULL) { /* node_t hasn't been transmitted, send a NYT, then the symbol */ Huff_transmit(huff, NYT, fout); for (i = 7; i >= 0; i--) { add_bit((char)((ch >> i) & 0x1), fout); } } else { send(huff->loc[ch], NULL, fout); } } void Huff_offsetTransmit (huff_t *huff, int ch, byte *fout, int *offset) { bloc = *offset; send(huff->loc[ch], NULL, fout); *offset = bloc; } void Huff_Decompress(msg_t *mbuf, int offset) { int ch, cch, i, j, size; byte seq[65536]; byte* buffer; huff_t huff; size = mbuf->cursize - offset; buffer = mbuf->data + offset; if ( size <= 0 ) { return; } Com_Memset(&huff, 0, sizeof(huff_t)); // Initialize the tree & list with the NYT node huff.tree = huff.lhead = huff.ltail = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]); huff.tree->symbol = NYT; huff.tree->weight = 0; huff.lhead->next = huff.lhead->prev = NULL; huff.tree->parent = huff.tree->left = huff.tree->right = NULL; cch = buffer[0]*256 + buffer[1]; // don't overflow with bad messages if ( cch > mbuf->maxsize - offset ) { cch = mbuf->maxsize - offset; } bloc = 16; for ( j = 0; j < cch; j++ ) { ch = 0; // don't overflow reading from the messages // FIXME: would it be better to have an overflow check in get_bit ? if ( (bloc >> 3) > size ) { seq[j] = 0; break; } Huff_Receive(huff.tree, &ch, buffer); /* Get a character */ if ( ch == NYT ) { /* We got a NYT, get the symbol associated with it */ ch = 0; for ( i = 0; i < 8; i++ ) { ch = (ch<<1) + get_bit(buffer); } } seq[j] = ch; /* Write symbol */ Huff_addRef(&huff, (byte)ch); /* Increment node */ } mbuf->cursize = cch + offset; Com_Memcpy(mbuf->data + offset, seq, cch); } extern int oldsize; void Huff_Compress(msg_t *mbuf, int offset) { int i, ch, size; byte seq[65536]; byte* buffer; huff_t huff; size = mbuf->cursize - offset; buffer = mbuf->data+ + offset; if (size<=0) { return; } Com_Memset(&huff, 0, sizeof(huff_t)); // Add the NYT (not yet transmitted) node into the tree/list */ huff.tree = huff.lhead = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]); huff.tree->symbol = NYT; huff.tree->weight = 0; huff.lhead->next = huff.lhead->prev = NULL; huff.tree->parent = huff.tree->left = huff.tree->right = NULL; seq[0] = (size>>8); seq[1] = size&0xff; bloc = 16; for (i=0; i<size; i++ ) { ch = buffer[i]; Huff_transmit(&huff, ch, seq); /* Transmit symbol */ Huff_addRef(&huff, (byte)ch); /* Do update */ } bloc += 8; // next byte mbuf->cursize = (bloc>>3) + offset; Com_Memcpy(mbuf->data+offset, seq, (bloc>>3)); } void Huff_Init(huffman_t *huff) { Com_Memset(&huff->compressor, 0, sizeof(huff_t)); Com_Memset(&huff->decompressor, 0, sizeof(huff_t)); // Initialize the tree & list with the NYT node huff->decompressor.tree = huff->decompressor.lhead = huff->decompressor.ltail = huff->decompressor.loc[NYT] = &(huff->decompressor.nodeList[huff->decompressor.blocNode++]); huff->decompressor.tree->symbol = NYT; huff->decompressor.tree->weight = 0; huff->decompressor.lhead->next = huff->decompressor.lhead->prev = NULL; huff->decompressor.tree->parent = huff->decompressor.tree->left = huff->decompressor.tree->right = NULL; // Add the NYT (not yet transmitted) node into the tree/list */ huff->compressor.tree = huff->compressor.lhead = huff->compressor.loc[NYT] = &(huff->compressor.nodeList[huff->compressor.blocNode++]); huff->compressor.tree->symbol = NYT; huff->compressor.tree->weight = 0; huff->compressor.lhead->next = huff->compressor.lhead->prev = NULL; huff->compressor.tree->parent = huff->compressor.tree->left = huff->compressor.tree->right = NULL; }
/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Quake III Arena source code; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ /* This is based on the Adaptive Huffman algorithm described in Sayood's Data * Compression book. The ranks are not actually stored, but implicitly defined * by the location of a node within a doubly-linked list */ #include "q_shared.h" #include "qcommon.h" static int bloc = 0; void Huff_putBit( int bit, byte *fout, int *offset) { bloc = *offset; if ((bloc&7) == 0) { fout[(bloc>>3)] = 0; } fout[(bloc>>3)] |= bit << (bloc&7); bloc++; *offset = bloc; } int Huff_getBloc(void) { return bloc; } void Huff_setBloc(int _bloc) { bloc = _bloc; } int Huff_getBit( byte *fin, int *offset) { int t; bloc = *offset; t = (fin[(bloc>>3)] >> (bloc&7)) & 0x1; bloc++; *offset = bloc; return t; } /* Add a bit to the output file (buffered) */ static void add_bit (char bit, byte *fout) { if ((bloc&7) == 0) { fout[(bloc>>3)] = 0; } fout[(bloc>>3)] |= bit << (bloc&7); bloc++; } /* Receive one bit from the input file (buffered) */ static int get_bit (byte *fin) { int t; t = (fin[(bloc>>3)] >> (bloc&7)) & 0x1; bloc++; return t; } static node_t **get_ppnode(huff_t* huff) { node_t **tppnode; if (!huff->freelist) { return &(huff->nodePtrs[huff->blocPtrs++]); } else { tppnode = huff->freelist; huff->freelist = (node_t **)*tppnode; return tppnode; } } static void free_ppnode(huff_t* huff, node_t **ppnode) { *ppnode = (node_t *)huff->freelist; huff->freelist = ppnode; } /* Swap the location of these two nodes in the tree */ static void swap (huff_t* huff, node_t *node1, node_t *node2) { node_t *par1, *par2; par1 = node1->parent; par2 = node2->parent; if (par1) { if (par1->left == node1) { par1->left = node2; } else { par1->right = node2; } } else { huff->tree = node2; } if (par2) { if (par2->left == node2) { par2->left = node1; } else { par2->right = node1; } } else { huff->tree = node1; } node1->parent = par2; node2->parent = par1; } /* Swap these two nodes in the linked list (update ranks) */ static void swaplist(node_t *node1, node_t *node2) { node_t *par1; par1 = node1->next; node1->next = node2->next; node2->next = par1; par1 = node1->prev; node1->prev = node2->prev; node2->prev = par1; if (node1->next == node1) { node1->next = node2; } if (node2->next == node2) { node2->next = node1; } if (node1->next) { node1->next->prev = node1; } if (node2->next) { node2->next->prev = node2; } if (node1->prev) { node1->prev->next = node1; } if (node2->prev) { node2->prev->next = node2; } } /* Do the increments */ static void increment(huff_t* huff, node_t *node) { node_t *lnode; if (!node) { return; } if (node->next != NULL && node->next->weight == node->weight) { lnode = *node->head; if (lnode != node->parent) { swap(huff, lnode, node); } swaplist(lnode, node); } if (node->prev && node->prev->weight == node->weight) { *node->head = node->prev; } else { *node->head = NULL; free_ppnode(huff, node->head); } node->weight++; if (node->next && node->next->weight == node->weight) { node->head = node->next->head; } else { node->head = get_ppnode(huff); *node->head = node; } if (node->parent) { increment(huff, node->parent); if (node->prev == node->parent) { swaplist(node, node->parent); if (*node->head == node) { *node->head = node->parent; } } } } void Huff_addRef(huff_t* huff, byte ch) { node_t *tnode, *tnode2; if (huff->loc[ch] == NULL) { /* if this is the first transmission of this node */ tnode = &(huff->nodeList[huff->blocNode++]); tnode2 = &(huff->nodeList[huff->blocNode++]); tnode2->symbol = INTERNAL_NODE; tnode2->weight = 1; tnode2->next = huff->lhead->next; if (huff->lhead->next) { huff->lhead->next->prev = tnode2; if (huff->lhead->next->weight == 1) { tnode2->head = huff->lhead->next->head; } else { tnode2->head = get_ppnode(huff); *tnode2->head = tnode2; } } else { tnode2->head = get_ppnode(huff); *tnode2->head = tnode2; } huff->lhead->next = tnode2; tnode2->prev = huff->lhead; tnode->symbol = ch; tnode->weight = 1; tnode->next = huff->lhead->next; if (huff->lhead->next) { huff->lhead->next->prev = tnode; if (huff->lhead->next->weight == 1) { tnode->head = huff->lhead->next->head; } else { /* this should never happen */ tnode->head = get_ppnode(huff); *tnode->head = tnode2; } } else { /* this should never happen */ tnode->head = get_ppnode(huff); *tnode->head = tnode; } huff->lhead->next = tnode; tnode->prev = huff->lhead; tnode->left = tnode->right = NULL; if (huff->lhead->parent) { if (huff->lhead->parent->left == huff->lhead) { /* lhead is guaranteed to by the NYT */ huff->lhead->parent->left = tnode2; } else { huff->lhead->parent->right = tnode2; } } else { huff->tree = tnode2; } tnode2->right = tnode; tnode2->left = huff->lhead; tnode2->parent = huff->lhead->parent; huff->lhead->parent = tnode->parent = tnode2; huff->loc[ch] = tnode; increment(huff, tnode2->parent); } else { increment(huff, huff->loc[ch]); } } /* Get a symbol */ int Huff_Receive (node_t *node, int *ch, byte *fin) { while (node && node->symbol == INTERNAL_NODE) { if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { return 0; // Com_Error(ERR_DROP, "Illegal tree!"); } return (*ch = node->symbol); } /* Get a symbol */ void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset, int maxoffset) { bloc = *offset; while (node && node->symbol == INTERNAL_NODE) { if (bloc >= maxoffset) { *ch = 0; *offset = maxoffset + 1; return; } if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { *ch = 0; return; // Com_Error(ERR_DROP, "Illegal tree!"); } *ch = node->symbol; *offset = bloc; } /* Send the prefix code for this node */ static void send(node_t *node, node_t *child, byte *fout, int maxoffset) { if (node->parent) { send(node->parent, node, fout, maxoffset); } if (child) { if (bloc >= maxoffset) { bloc = maxoffset + 1; return; } if (node->right == child) { add_bit(1, fout); } else { add_bit(0, fout); } } } /* Send a symbol */ void Huff_transmit (huff_t *huff, int ch, byte *fout, int maxoffset) { int i; if (huff->loc[ch] == NULL) { /* node_t hasn't been transmitted, send a NYT, then the symbol */ Huff_transmit(huff, NYT, fout, maxoffset); for (i = 7; i >= 0; i--) { add_bit((char)((ch >> i) & 0x1), fout); } } else { send(huff->loc[ch], NULL, fout, maxoffset); } } void Huff_offsetTransmit (huff_t *huff, int ch, byte *fout, int *offset, int maxoffset) { bloc = *offset; send(huff->loc[ch], NULL, fout, maxoffset); *offset = bloc; } void Huff_Decompress(msg_t *mbuf, int offset) { int ch, cch, i, j, size; byte seq[65536]; byte* buffer; huff_t huff; size = mbuf->cursize - offset; buffer = mbuf->data + offset; if ( size <= 0 ) { return; } Com_Memset(&huff, 0, sizeof(huff_t)); // Initialize the tree & list with the NYT node huff.tree = huff.lhead = huff.ltail = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]); huff.tree->symbol = NYT; huff.tree->weight = 0; huff.lhead->next = huff.lhead->prev = NULL; huff.tree->parent = huff.tree->left = huff.tree->right = NULL; cch = buffer[0]*256 + buffer[1]; // don't overflow with bad messages if ( cch > mbuf->maxsize - offset ) { cch = mbuf->maxsize - offset; } bloc = 16; for ( j = 0; j < cch; j++ ) { ch = 0; // don't overflow reading from the messages // FIXME: would it be better to have an overflow check in get_bit ? if ( (bloc >> 3) > size ) { seq[j] = 0; break; } Huff_Receive(huff.tree, &ch, buffer); /* Get a character */ if ( ch == NYT ) { /* We got a NYT, get the symbol associated with it */ ch = 0; for ( i = 0; i < 8; i++ ) { ch = (ch<<1) + get_bit(buffer); } } seq[j] = ch; /* Write symbol */ Huff_addRef(&huff, (byte)ch); /* Increment node */ } mbuf->cursize = cch + offset; Com_Memcpy(mbuf->data + offset, seq, cch); } extern int oldsize; void Huff_Compress(msg_t *mbuf, int offset) { int i, ch, size; byte seq[65536]; byte* buffer; huff_t huff; size = mbuf->cursize - offset; buffer = mbuf->data+ + offset; if (size<=0) { return; } Com_Memset(&huff, 0, sizeof(huff_t)); // Add the NYT (not yet transmitted) node into the tree/list */ huff.tree = huff.lhead = huff.loc[NYT] = &(huff.nodeList[huff.blocNode++]); huff.tree->symbol = NYT; huff.tree->weight = 0; huff.lhead->next = huff.lhead->prev = NULL; huff.tree->parent = huff.tree->left = huff.tree->right = NULL; seq[0] = (size>>8); seq[1] = size&0xff; bloc = 16; for (i=0; i<size; i++ ) { ch = buffer[i]; Huff_transmit(&huff, ch, seq, size<<3); /* Transmit symbol */ Huff_addRef(&huff, (byte)ch); /* Do update */ } bloc += 8; // next byte mbuf->cursize = (bloc>>3) + offset; Com_Memcpy(mbuf->data+offset, seq, (bloc>>3)); } void Huff_Init(huffman_t *huff) { Com_Memset(&huff->compressor, 0, sizeof(huff_t)); Com_Memset(&huff->decompressor, 0, sizeof(huff_t)); // Initialize the tree & list with the NYT node huff->decompressor.tree = huff->decompressor.lhead = huff->decompressor.ltail = huff->decompressor.loc[NYT] = &(huff->decompressor.nodeList[huff->decompressor.blocNode++]); huff->decompressor.tree->symbol = NYT; huff->decompressor.tree->weight = 0; huff->decompressor.lhead->next = huff->decompressor.lhead->prev = NULL; huff->decompressor.tree->parent = huff->decompressor.tree->left = huff->decompressor.tree->right = NULL; // Add the NYT (not yet transmitted) node into the tree/list */ huff->compressor.tree = huff->compressor.lhead = huff->compressor.loc[NYT] = &(huff->compressor.nodeList[huff->compressor.blocNode++]); huff->compressor.tree->symbol = NYT; huff->compressor.tree->weight = 0; huff->compressor.lhead->next = huff->compressor.lhead->prev = NULL; huff->compressor.tree->parent = huff->compressor.tree->left = huff->compressor.tree->right = NULL; }
void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset) { bloc = *offset; while (node && node->symbol == INTERNAL_NODE) { if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { *ch = 0; return; // Com_Error(ERR_DROP, "Illegal tree!"); } *ch = node->symbol; *offset = bloc; }
void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset, int maxoffset) { bloc = *offset; while (node && node->symbol == INTERNAL_NODE) { if (bloc >= maxoffset) { *ch = 0; *offset = maxoffset + 1; return; } if (get_bit(fin)) { node = node->right; } else { node = node->left; } } if (!node) { *ch = 0; return; // Com_Error(ERR_DROP, "Illegal tree!"); } *ch = node->symbol; *offset = bloc; }
{'added': [(282, 'void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset, int maxoffset) {'), (285, '\t\tif (bloc >= maxoffset) {'), (286, '\t\t\t*ch = 0;'), (287, '\t\t\t*offset = maxoffset + 1;'), (288, '\t\t\treturn;'), (289, '\t\t}'), (306, 'static void send(node_t *node, node_t *child, byte *fout, int maxoffset) {'), (308, '\t\tsend(node->parent, node, fout, maxoffset);'), (311, '\t\tif (bloc >= maxoffset) {'), (312, '\t\t\tbloc = maxoffset + 1;'), (313, '\t\t\treturn;'), (314, '\t\t}'), (324, 'void Huff_transmit (huff_t *huff, int ch, byte *fout, int maxoffset) {'), (328, '\t\tHuff_transmit(huff, NYT, fout, maxoffset);'), (333, '\t\tsend(huff->loc[ch], NULL, fout, maxoffset);'), (337, 'void Huff_offsetTransmit (huff_t *huff, int ch, byte *fout, int *offset, int maxoffset) {'), (339, '\tsend(huff->loc[ch], NULL, fout, maxoffset);'), (425, '\t\tHuff_transmit(&huff, ch, seq, size<<3);\t\t\t\t\t\t/* Transmit symbol */')], 'deleted': [(282, 'void Huff_offsetReceive (node_t *node, int *ch, byte *fin, int *offset) {'), (301, 'static void send(node_t *node, node_t *child, byte *fout) {'), (303, '\t\tsend(node->parent, node, fout);'), (315, 'void Huff_transmit (huff_t *huff, int ch, byte *fout) {'), (319, '\t\tHuff_transmit(huff, NYT, fout);'), (324, '\t\tsend(huff->loc[ch], NULL, fout);'), (328, 'void Huff_offsetTransmit (huff_t *huff, int ch, byte *fout, int *offset) {'), (330, '\tsend(huff->loc[ch], NULL, fout);'), (416, '\t\tHuff_transmit(&huff, ch, seq);\t\t\t\t\t\t/* Transmit symbol */')]}
18
9
350
2,578
16
87
5
https://github.com/ioquake/ioq3
CVE-2017-11721
CWE-119
2,298
zend-string.cpp
C++
HPHP::string_number_format
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) | | Copyright (c) 1998-2010 Zend Technologies Ltd. (http://www.zend.com) | +----------------------------------------------------------------------+ | This source file is subject to version 2.00 of the Zend license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.zend.com/license/2_00.txt. | | If you did not receive a copy of the Zend license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@zend.com so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ // NOTE: See also "hphp/zend/zend-string.*". #include "hphp/runtime/base/zend-string.h" #include "hphp/runtime/base/zend-printf.h" #include "hphp/runtime/base/zend-math.h" #include "hphp/util/lock.h" #include "hphp/util/overflow.h" #include <algorithm> #include <cmath> #ifndef _MSC_VER #include <monetary.h> #endif #include "hphp/util/bstring.h" #include "hphp/runtime/base/exceptions.h" #include "hphp/runtime/base/string-buffer.h" #include "hphp/runtime/base/runtime-error.h" #include "hphp/runtime/base/string-util.h" #include "hphp/runtime/base/builtin-functions.h" #include <folly/portability/String.h> #define PHP_QPRINT_MAXL 75 namespace HPHP { /////////////////////////////////////////////////////////////////////////////// // helpers void string_charmask(const char *sinput, int len, char *mask) { const unsigned char *input = (unsigned char *)sinput; const unsigned char *end; unsigned char c; memset(mask, 0, 256); for (end = input+len; input < end; input++) { c=*input; if ((input+3 < end) && input[1] == '.' && input[2] == '.' && input[3] >= c) { memset(mask+c, 1, input[3] - c + 1); input+=3; } else if ((input+1 < end) && input[0] == '.' && input[1] == '.') { /* Error, try to be as helpful as possible: (a range ending/starting with '.' won't be captured here) */ if (end-len >= input) { /* there was no 'left' char */ throw_invalid_argument ("charlist: Invalid '..'-range, missing left of '..'"); continue; } if (input+2 >= end) { /* there is no 'right' char */ throw_invalid_argument ("charlist: Invalid '..'-range, missing right of '..'"); continue; } if (input[-1] > input[2]) { /* wrong order */ throw_invalid_argument ("charlist: '..'-range needs to be incrementing"); continue; } /* FIXME: better error (a..b..c is the only left possibility?) */ throw_invalid_argument("charlist: Invalid '..'-range"); continue; } else { mask[c]=1; } } } /////////////////////////////////////////////////////////////////////////////// void string_to_case(String& s, int (*tocase)(int)) { assertx(!s.isNull()); assertx(tocase); auto data = s.mutableData(); auto len = s.size(); for (int i = 0; i < len; i++) { data[i] = tocase(data[i]); } } /////////////////////////////////////////////////////////////////////////////// #define STR_PAD_LEFT 0 #define STR_PAD_RIGHT 1 #define STR_PAD_BOTH 2 String string_pad(const char *input, int len, int pad_length, const char *pad_string, int pad_str_len, int pad_type) { assertx(input); int num_pad_chars = pad_length - len; /* If resulting string turns out to be shorter than input string, we simply copy the input and return. */ if (pad_length < 0 || num_pad_chars < 0) { return String(input, len, CopyString); } /* Setup the padding string values if specified. */ if (pad_str_len == 0) { throw_invalid_argument("pad_string: (empty)"); return String(); } String ret(pad_length, ReserveString); char *result = ret.mutableData(); /* We need to figure out the left/right padding lengths. */ int left_pad, right_pad; switch (pad_type) { case STR_PAD_RIGHT: left_pad = 0; right_pad = num_pad_chars; break; case STR_PAD_LEFT: left_pad = num_pad_chars; right_pad = 0; break; case STR_PAD_BOTH: left_pad = num_pad_chars / 2; right_pad = num_pad_chars - left_pad; break; default: throw_invalid_argument("pad_type: %d", pad_type); return String(); } /* First we pad on the left. */ int result_len = 0; for (int i = 0; i < left_pad; i++) { result[result_len++] = pad_string[i % pad_str_len]; } /* Then we copy the input string. */ memcpy(result + result_len, input, len); result_len += len; /* Finally, we pad on the right. */ for (int i = 0; i < right_pad; i++) { result[result_len++] = pad_string[i % pad_str_len]; } ret.setSize(result_len); return ret; } /////////////////////////////////////////////////////////////////////////////// int string_find(const char *input, int len, char ch, int pos, bool case_sensitive) { assertx(input); if (pos < 0 || pos > len) { return -1; } const void *ptr; if (case_sensitive) { ptr = memchr(input + pos, ch, len - pos); } else { ptr = bstrcasechr(input + pos, ch, len - pos); } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } int string_rfind(const char *input, int len, char ch, int pos, bool case_sensitive) { assertx(input); if (pos < -len || pos > len) { return -1; } const void *ptr; if (case_sensitive) { if (pos >= 0) { ptr = memrchr(input + pos, ch, len - pos); } else { ptr = memrchr(input, ch, len + pos + 1); } } else { if (pos >= 0) { ptr = bstrrcasechr(input + pos, ch, len - pos); } else { ptr = bstrrcasechr(input, ch, len + pos + 1); } } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } int string_find(const char *input, int len, const char *s, int s_len, int pos, bool case_sensitive) { assertx(input); assertx(s); if (!s_len || pos < 0 || pos > len) { return -1; } void *ptr; if (case_sensitive) { ptr = (void*)string_memnstr(input + pos, s, s_len, input + len); } else { ptr = bstrcasestr(input + pos, len - pos, s, s_len); } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } int string_rfind(const char *input, int len, const char *s, int s_len, int pos, bool case_sensitive) { assertx(input); assertx(s); if (!s_len || pos < -len || pos > len) { return -1; } void *ptr; if (case_sensitive) { if (pos >= 0) { ptr = bstrrstr(input + pos, len - pos, s, s_len); } else { ptr = bstrrstr(input, len + std::min(pos + s_len, 0), s, s_len); } } else { if (pos >= 0) { ptr = bstrrcasestr(input + pos, len - pos, s, s_len); } else { ptr = bstrrcasestr(input, len + std::min(pos + s_len, 0), s, s_len); } } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } const char *string_memnstr(const char *haystack, const char *needle, int needle_len, const char *end) { const char *p = haystack; char ne = needle[needle_len-1]; end -= needle_len; while (p <= end) { if ((p = (char *)memchr(p, *needle, (end-p+1))) && ne == p[needle_len-1]) { if (!memcmp(needle, p, needle_len-1)) { return p; } } if (p == nullptr) { return nullptr; } p++; } return nullptr; } String string_replace(const char *s, int len, int start, int length, const char *replacement, int len_repl) { assertx(s); assertx(replacement); assertx(len >= 0); // if "start" position is negative, count start position from the end // of the string if (start < 0) { start = len + start; if (start < 0) { start = 0; } } if (start > len) { start = len; } // if "length" position is negative, set it to the length // needed to stop that many chars from the end of the string if (length < 0) { length = (len - start) + length; if (length < 0) { length = 0; } } // check if length is too large if (length > len) { length = len; } // check if the length is too large adjusting for non-zero start // Write this way instead of start + length > len to avoid overflow if (length > len - start) { length = len - start; } String retString(len + len_repl - length, ReserveString); char *ret = retString.mutableData(); int ret_len = 0; if (start) { memcpy(ret, s, start); ret_len += start; } if (len_repl) { memcpy(ret + ret_len, replacement, len_repl); ret_len += len_repl; } len -= (start + length); if (len) { memcpy(ret + ret_len, s + start + length, len); ret_len += len; } retString.setSize(ret_len); return retString; } String string_replace(const char *input, int len, const char *search, int len_search, const char *replacement, int len_replace, int &count, bool case_sensitive) { assertx(input); assertx(search && len_search); assertx(len >= 0); assertx(len_search >= 0); assertx(len_replace >= 0); if (len == 0) { return String(); } req::vector<int> founds; founds.reserve(16); if (len_search == 1) { for (int pos = string_find(input, len, *search, 0, case_sensitive); pos >= 0; pos = string_find(input, len, *search, pos + len_search, case_sensitive)) { founds.push_back(pos); } } else { for (int pos = string_find(input, len, search, len_search, 0, case_sensitive); pos >= 0; pos = string_find(input, len, search, len_search, pos + len_search, case_sensitive)) { founds.push_back(pos); } } count = founds.size(); if (count == 0) { return String(); // not found } int reserve; // Make sure the new size of the string wouldn't overflow int32_t. Don't // bother if the replacement wouldn't make the string longer. if (len_replace > len_search) { auto raise = [&] { raise_error("String too large"); }; if (mul_overflow(len_replace - len_search, count)) { raise(); } int diff = (len_replace - len_search) * count; if (add_overflow(len, diff)) { raise(); } reserve = len + diff; } else { reserve = len + (len_replace - len_search) * count; } String retString(reserve, ReserveString); char *ret = retString.mutableData(); char *p = ret; int pos = 0; // last position in input that hasn't been copied over yet int n; for (unsigned int i = 0; i < founds.size(); i++) { n = founds[i]; if (n > pos) { n -= pos; memcpy(p, input, n); p += n; input += n; pos += n; } if (len_replace) { memcpy(p, replacement, len_replace); p += len_replace; } input += len_search; pos += len_search; } n = len; if (n > pos) { n -= pos; memcpy(p, input, n); p += n; } retString.setSize(p - ret); return retString; } /////////////////////////////////////////////////////////////////////////////// String string_chunk_split(const char *src, int srclen, const char *end, int endlen, int chunklen) { int chunks = srclen / chunklen; // complete chunks! int restlen = srclen - chunks * chunklen; /* srclen % chunklen */ String ret( safe_address( chunks + 1, endlen, srclen ), ReserveString ); char *dest = ret.mutableData(); const char *p; char *q; const char *pMax = src + srclen - chunklen + 1; for (p = src, q = dest; p < pMax; ) { memcpy(q, p, chunklen); q += chunklen; memcpy(q, end, endlen); q += endlen; p += chunklen; } if (restlen) { memcpy(q, p, restlen); q += restlen; memcpy(q, end, endlen); q += endlen; } ret.setSize(q - dest); return ret; } /////////////////////////////////////////////////////////////////////////////// #define PHP_TAG_BUF_SIZE 1023 /** * Check if tag is in a set of tags * * states: * * 0 start tag * 1 first non-whitespace char seen */ static int string_tag_find(const char *tag, int len, const char *set) { char c, *n; const char *t; int state=0, done=0; char *norm; if (len <= 0) { return 0; } norm = (char *)req::malloc_noptrs(len+1); SCOPE_EXIT { req::free(norm); }; n = norm; t = tag; c = tolower(*t); /* normalize the tag removing leading and trailing whitespace and turn any <a whatever...> into just <a> and any </tag> into <tag> */ while (!done) { switch (c) { case '<': *(n++) = c; break; case '>': done =1; break; default: if (!isspace((int)c)) { if (state == 0) { state=1; } if (c != '/') { *(n++) = c; } } else { if (state == 1) done=1; } break; } c = tolower(*(++t)); } *(n++) = '>'; *n = '\0'; if (strstr(set, norm)) { done=1; } else { done=0; } return done; } /** * A simple little state-machine to strip out html and php tags * * State 0 is the output state, State 1 means we are inside a * normal html tag and state 2 means we are inside a php tag. * * The state variable is passed in to allow a function like fgetss * to maintain state across calls to the function. * * lc holds the last significant character read and br is a bracket * counter. * * When an allow string is passed in we keep track of the string * in state 1 and when the tag is closed check it against the * allow string to see if we should allow it. * swm: Added ability to strip <?xml tags without assuming it PHP * code. */ String string_strip_tags(const char *s, const int len, const char *allow, const int allow_len, bool allow_tag_spaces) { const char *abuf, *p; char *rbuf, *tbuf, *tp, *rp, c, lc; int br, i=0, depth=0, in_q = 0; int state = 0, pos; assertx(s); assertx(allow); String retString(s, len, CopyString); rbuf = retString.mutableData(); String allowString; c = *s; lc = '\0'; p = s; rp = rbuf; br = 0; if (allow_len) { assertx(allow); allowString = String(allow_len, ReserveString); char *atmp = allowString.mutableData(); for (const char *tmp = allow; *tmp; tmp++, atmp++) { *atmp = tolower((int)*(const unsigned char *)tmp); } allowString.setSize(allow_len); abuf = allowString.data(); tbuf = (char *)req::malloc_noptrs(PHP_TAG_BUF_SIZE+1); tp = tbuf; } else { abuf = nullptr; tbuf = tp = nullptr; } auto move = [&pos, &tbuf, &tp]() { if (tp - tbuf >= PHP_TAG_BUF_SIZE) { pos = tp - tbuf; tbuf = (char*)req::realloc_noptrs(tbuf, (tp - tbuf) + PHP_TAG_BUF_SIZE + 1); tp = tbuf + pos; } }; while (i < len) { switch (c) { case '\0': break; case '<': if (isspace(*(p + 1)) && !allow_tag_spaces) { goto reg_char; } if (state == 0) { lc = '<'; state = 1; if (allow_len) { move(); *(tp++) = '<'; } } else if (state == 1) { depth++; } break; case '(': if (state == 2) { if (lc != '"' && lc != '\'') { lc = '('; br++; } } else if (allow_len && state == 1) { move(); *(tp++) = c; } else if (state == 0) { *(rp++) = c; } break; case ')': if (state == 2) { if (lc != '"' && lc != '\'') { lc = ')'; br--; } } else if (allow_len && state == 1) { move(); *(tp++) = c; } else if (state == 0) { *(rp++) = c; } break; case '>': if (depth) { depth--; break; } if (in_q) { break; } switch (state) { case 1: /* HTML/XML */ lc = '>'; in_q = state = 0; if (allow_len) { move(); *(tp++) = '>'; *tp='\0'; if (string_tag_find(tbuf, tp-tbuf, abuf)) { memcpy(rp, tbuf, tp-tbuf); rp += tp-tbuf; } tp = tbuf; } break; case 2: /* PHP */ if (!br && lc != '\"' && *(p-1) == '?') { in_q = state = 0; tp = tbuf; } break; case 3: in_q = state = 0; tp = tbuf; break; case 4: /* JavaScript/CSS/etc... */ if (p >= s + 2 && *(p-1) == '-' && *(p-2) == '-') { in_q = state = 0; tp = tbuf; } break; default: *(rp++) = c; break; } break; case '"': case '\'': if (state == 4) { /* Inside <!-- comment --> */ break; } else if (state == 2 && *(p-1) != '\\') { if (lc == c) { lc = '\0'; } else if (lc != '\\') { lc = c; } } else if (state == 0) { *(rp++) = c; } else if (allow_len && state == 1) { move(); *(tp++) = c; } if (state && p != s && *(p-1) != '\\' && (!in_q || *p == in_q)) { if (in_q) { in_q = 0; } else { in_q = *p; } } break; case '!': /* JavaScript & Other HTML scripting languages */ if (state == 1 && *(p-1) == '<') { state = 3; lc = c; } else { if (state == 0) { *(rp++) = c; } else if (allow_len && state == 1) { move(); *(tp++) = c; } } break; case '-': if (state == 3 && p >= s + 2 && *(p-1) == '-' && *(p-2) == '!') { state = 4; } else { goto reg_char; } break; case '?': if (state == 1 && *(p-1) == '<') { br=0; state=2; break; } case 'E': case 'e': /* !DOCTYPE exception */ if (state==3 && p > s+6 && tolower(*(p-1)) == 'p' && tolower(*(p-2)) == 'y' && tolower(*(p-3)) == 't' && tolower(*(p-4)) == 'c' && tolower(*(p-5)) == 'o' && tolower(*(p-6)) == 'd') { state = 1; break; } /* fall-through */ case 'l': /* swm: If we encounter '<?xml' then we shouldn't be in * state == 2 (PHP). Switch back to HTML. */ if (state == 2 && p > s+2 && *(p-1) == 'm' && *(p-2) == 'x') { state = 1; break; } /* fall-through */ default: reg_char: if (state == 0) { *(rp++) = c; } else if (allow_len && state == 1) { move(); *(tp++) = c; } break; } c = *(++p); i++; } if (rp < rbuf + len) { *rp = '\0'; } if (allow_len) { req::free(tbuf); } retString.setSize(rp - rbuf); return retString; } /////////////////////////////////////////////////////////////////////////////// static char string_hex2int(int c) { if (isdigit(c)) { return c - '0'; } if (c >= 'A' && c <= 'F') { return c - 'A' + 10; } if (c >= 'a' && c <= 'f') { return c - 'a' + 10; } return -1; } String string_quoted_printable_encode(const char *input, int len) { size_t length = len; const unsigned char *str = (unsigned char*)input; unsigned long lp = 0; unsigned char c; char *d, *buffer; char *hex = "0123456789ABCDEF"; String ret( safe_address( 3, length + ((safe_address(3, length, 0)/(PHP_QPRINT_MAXL-9)) + 1), 1), ReserveString ); d = buffer = ret.mutableData(); while (length--) { if (((c = *str++) == '\015') && (*str == '\012') && length > 0) { *d++ = '\015'; *d++ = *str++; length--; lp = 0; } else { if (iscntrl (c) || (c == 0x7f) || (c & 0x80) || (c == '=') || ((c == ' ') && (*str == '\015'))) { if ((((lp+= 3) > PHP_QPRINT_MAXL) && (c <= 0x7f)) || ((c > 0x7f) && (c <= 0xdf) && ((lp + 3) > PHP_QPRINT_MAXL)) || ((c > 0xdf) && (c <= 0xef) && ((lp + 6) > PHP_QPRINT_MAXL)) || ((c > 0xef) && (c <= 0xf4) && ((lp + 9) > PHP_QPRINT_MAXL))) { *d++ = '='; *d++ = '\015'; *d++ = '\012'; lp = 3; } *d++ = '='; *d++ = hex[c >> 4]; *d++ = hex[c & 0xf]; } else { if ((++lp) > PHP_QPRINT_MAXL) { *d++ = '='; *d++ = '\015'; *d++ = '\012'; lp = 1; } *d++ = c; } } } len = d - buffer; ret.setSize(len); return ret; } String string_quoted_printable_decode(const char *input, int len, bool is_q) { assertx(input); if (len == 0) { return String(); } int i = 0, j = 0, k; const char *str_in = input; String ret(len, ReserveString); char *str_out = ret.mutableData(); while (i < len && str_in[i]) { switch (str_in[i]) { case '=': if (i + 2 < len && str_in[i + 1] && str_in[i + 2] && isxdigit((int) str_in[i + 1]) && isxdigit((int) str_in[i + 2])) { str_out[j++] = (string_hex2int((int) str_in[i + 1]) << 4) + string_hex2int((int) str_in[i + 2]); i += 3; } else /* check for soft line break according to RFC 2045*/ { k = 1; while (str_in[i + k] && ((str_in[i + k] == 32) || (str_in[i + k] == 9))) { /* Possibly, skip spaces/tabs at the end of line */ k++; } if (!str_in[i + k]) { /* End of line reached */ i += k; } else if ((str_in[i + k] == 13) && (str_in[i + k + 1] == 10)) { /* CRLF */ i += k + 2; } else if ((str_in[i + k] == 13) || (str_in[i + k] == 10)) { /* CR or LF */ i += k + 1; } else { str_out[j++] = str_in[i++]; } } break; case '_': if (is_q) { str_out[j++] = ' '; i++; } else { str_out[j++] = str_in[i++]; } break; default: str_out[j++] = str_in[i++]; } } ret.setSize(j); return ret; } Variant string_base_to_numeric(const char *s, int len, int base) { int64_t num = 0; double fnum = 0; int mode = 0; int64_t cutoff; int cutlim; assertx(string_validate_base(base)); cutoff = LONG_MAX / base; cutlim = LONG_MAX % base; for (int i = len; i > 0; i--) { char c = *s++; /* might not work for EBCDIC */ if (c >= '0' && c <= '9') c -= '0'; else if (c >= 'A' && c <= 'Z') c -= 'A' - 10; else if (c >= 'a' && c <= 'z') c -= 'a' - 10; else continue; if (c >= base) continue; switch (mode) { case 0: /* Integer */ if (num < cutoff || (num == cutoff && c <= cutlim)) { num = num * base + c; break; } else { fnum = num; mode = 1; } /* fall-through */ case 1: /* Float */ fnum = fnum * base + c; } } if (mode == 1) { return fnum; } return num; } String string_long_to_base(unsigned long value, int base) { static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; char buf[(sizeof(unsigned long) << 3) + 1]; char *ptr, *end; assertx(string_validate_base(base)); end = ptr = buf + sizeof(buf) - 1; do { *--ptr = digits[value % base]; value /= base; } while (ptr > buf && value); return String(ptr, end - ptr, CopyString); } String string_numeric_to_base(const Variant& value, int base) { static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; assertx(string_validate_base(base)); if ((!value.isInteger() && !value.isDouble())) { return empty_string(); } if (value.isDouble()) { double fvalue = floor(value.toDouble()); /* floor it just in case */ char *ptr, *end; char buf[(sizeof(double) << 3) + 1]; /* Don't try to convert +/- infinity */ if (fvalue == HUGE_VAL || fvalue == -HUGE_VAL) { raise_warning("Number too large"); return empty_string(); } end = ptr = buf + sizeof(buf) - 1; do { *--ptr = digits[(int) fmod(fvalue, base)]; fvalue /= base; } while (ptr > buf && fabs(fvalue) >= 1); return String(ptr, end - ptr, CopyString); } return string_long_to_base(value.toInt64(), base); } /////////////////////////////////////////////////////////////////////////////// // uuencode #define PHP_UU_ENC(c) \ ((c) ? ((c) & 077) + ' ' : '`') #define PHP_UU_ENC_C2(c) \ PHP_UU_ENC(((*(c) * 16) & 060) | ((*((c) + 1) >> 4) & 017)) #define PHP_UU_ENC_C3(c) \ PHP_UU_ENC(((*(c + 1) * 4) & 074) | ((*((c) + 2) >> 6) & 03)) #define PHP_UU_DEC(c) \ (((c) - ' ') & 077) String string_uuencode(const char *src, int src_len) { assertx(src); assertx(src_len); int len = 45; char *p; const char *s, *e, *ee; char *dest; /* encoded length is ~ 38% greater than the original */ String ret((int)ceil(src_len * 1.38) + 45, ReserveString); p = dest = ret.mutableData(); s = src; e = src + src_len; while ((s + 3) < e) { ee = s + len; if (ee > e) { ee = e; len = ee - s; if (len % 3) { ee = s + (int) (floor(len / 3) * 3); } } *p++ = PHP_UU_ENC(len); while (s < ee) { *p++ = PHP_UU_ENC(*s >> 2); *p++ = PHP_UU_ENC_C2(s); *p++ = PHP_UU_ENC_C3(s); *p++ = PHP_UU_ENC(*(s + 2) & 077); s += 3; } if (len == 45) { *p++ = '\n'; } } if (s < e) { if (len == 45) { *p++ = PHP_UU_ENC(e - s); len = 0; } *p++ = PHP_UU_ENC(*s >> 2); *p++ = PHP_UU_ENC_C2(s); *p++ = ((e - s) > 1) ? PHP_UU_ENC_C3(s) : PHP_UU_ENC('\0'); *p++ = ((e - s) > 2) ? PHP_UU_ENC(*(s + 2) & 077) : PHP_UU_ENC('\0'); } if (len < 45) { *p++ = '\n'; } *p++ = PHP_UU_ENC('\0'); *p++ = '\n'; *p = '\0'; ret.setSize(p - dest); return ret; } String string_uudecode(const char *src, int src_len) { int total_len = 0; int len; const char *s, *e, *ee; char *p, *dest; String ret(ceil(src_len * 0.75), ReserveString); p = dest = ret.mutableData(); s = src; e = src + src_len; while (s < e) { if ((len = PHP_UU_DEC(*s++)) <= 0) { break; } /* sanity check */ if (len > src_len) { goto err; } total_len += len; ee = s + (len == 45 ? 60 : (int) floor(len * 1.33)); /* sanity check */ if (ee > e) { goto err; } while (s < ee) { if (s + 4 > e) goto err; *p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4; *p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2; *p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3)); s += 4; } if (len < 45) { break; } /* skip \n */ s++; } if ((len = total_len > (p - dest))) { *p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4; if (len > 1) { *p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2; if (len > 2) { *p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3)); } } } ret.setSize(total_len); return ret; err: return String(); } /////////////////////////////////////////////////////////////////////////////// // base64 namespace { const char base64_table[] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', '\0' }; const char base64_pad = '='; const short base64_reverse_table[256] = { -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 62, -2, -2, -2, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -2, -2, -2, -2, -2, -2, -2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -2, -2, -2, -2, -2, -2, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2 }; folly::Optional<int> maxEncodedSize(int length) { if ((length + 2) < 0 || ((length + 2) / 3) >= (1 << (sizeof(int) * 8 - 2))) { return folly::none; } return ((length + 2) / 3) * 4; } // outstr must be at least maxEncodedSize(length) bytes size_t php_base64_encode(const unsigned char *str, int length, unsigned char* outstr) { const unsigned char *current = str; unsigned char *p = outstr; while (length > 2) { /* keep going until we have less than 24 bits */ *p++ = base64_table[current[0] >> 2]; *p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)]; *p++ = base64_table[((current[1] & 0x0f) << 2) + (current[2] >> 6)]; *p++ = base64_table[current[2] & 0x3f]; current += 3; length -= 3; /* we just handle 3 octets of data */ } /* now deal with the tail end of things */ if (length != 0) { *p++ = base64_table[current[0] >> 2]; if (length > 1) { *p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)]; *p++ = base64_table[(current[1] & 0x0f) << 2]; *p++ = base64_pad; } else { *p++ = base64_table[(current[0] & 0x03) << 4]; *p++ = base64_pad; *p++ = base64_pad; } } return p - outstr; } // outstr must be at least length bytes ssize_t php_base64_decode(const char *str, int length, bool strict, unsigned char* outstr) { const unsigned char *current = (unsigned char*)str; int ch, i = 0, j = 0, k; /* this sucks for threaded environments */ unsigned char* result = outstr; /* run through the whole string, converting as we go */ while ((ch = *current++) != '\0' && length-- > 0) { if (ch == base64_pad) { if (*current != '=' && ((i % 4) == 1 || (strict && length > 0))) { if ((i % 4) != 1) { while (isspace(*(++current))) { continue; } if (*current == '\0') { continue; } } return -1; } continue; } ch = base64_reverse_table[ch]; if ((!strict && ch < 0) || ch == -1) { /* a space or some other separator character, we simply skip over */ continue; } else if (ch == -2) { return -1; } switch(i % 4) { case 0: result[j] = ch << 2; break; case 1: result[j++] |= ch >> 4; result[j] = (ch & 0x0f) << 4; break; case 2: result[j++] |= ch >>2; result[j] = (ch & 0x03) << 6; break; case 3: result[j++] |= ch; break; } i++; } k = j; /* mop things up if we ended on a boundary */ if (ch == base64_pad) { switch(i % 4) { case 1: return -1; case 2: k++; case 3: result[k] = 0; } } return j; } } String string_base64_encode(const char* input, int len) { if (auto const wantedSize = maxEncodedSize(len)) { String ret(*wantedSize, ReserveString); auto actualSize = php_base64_encode((unsigned char*)input, len, (unsigned char*)ret.mutableData()); ret.setSize(actualSize); return ret; } return String(); } String string_base64_decode(const char* input, int len, bool strict) { String ret(len, ReserveString); auto actualSize = php_base64_decode(input, len, strict, (unsigned char*)ret.mutableData()); if (actualSize < 0) return String(); ret.setSize(actualSize); return ret; } std::string base64_encode(const char* input, int len) { if (auto const wantedSize = maxEncodedSize(len)) { std::string ret; ret.resize(*wantedSize); auto actualSize = php_base64_encode((unsigned char*)input, len, (unsigned char*)ret.data()); ret.resize(actualSize); return ret; } return std::string(); } std::string base64_decode(const char* input, int len, bool strict) { if (!len) return std::string(); std::string ret; ret.resize(len); auto actualSize = php_base64_decode(input, len, strict, (unsigned char*)ret.data()); if (!actualSize) return std::string(); ret.resize(actualSize); return ret; } /////////////////////////////////////////////////////////////////////////////// String string_escape_shell_arg(const char *str) { int x, y, l; char *cmd; y = 0; l = strlen(str); String ret(safe_address(l, 4, 3), ReserveString); /* worst case */ cmd = ret.mutableData(); #ifdef _MSC_VER cmd[y++] = '"'; #else cmd[y++] = '\''; #endif for (x = 0; x < l; x++) { switch (str[x]) { #ifdef _MSC_VER case '"': case '%': case '!': cmd[y++] = ' '; break; #else case '\'': cmd[y++] = '\''; cmd[y++] = '\\'; cmd[y++] = '\''; #endif /* fall-through */ default: cmd[y++] = str[x]; } } #ifdef _MSC_VER if (y > 0 && '\\' == cmd[y - 1]) { int k = 0, n = y - 1; for (; n >= 0 && '\\' == cmd[n]; n--, k++); if (k % 2) { cmd[y++] = '\\'; } } cmd[y++] = '"'; #else cmd[y++] = '\''; #endif ret.setSize(y); return ret; } String string_escape_shell_cmd(const char *str) { register int x, y, l; char *cmd; char *p = nullptr; l = strlen(str); String ret(safe_address(l, 2, 1), ReserveString); cmd = ret.mutableData(); for (x = 0, y = 0; x < l; x++) { switch (str[x]) { #ifndef _MSC_VER case '"': case '\'': if (!p && (p = (char *)memchr(str + x + 1, str[x], l - x - 1))) { /* noop */ } else if (p && *p == str[x]) { p = nullptr; } else { cmd[y++] = '\\'; } cmd[y++] = str[x]; break; #else /* % is Windows specific for environmental variables, ^%PATH% will output PATH while ^%PATH^% will not. escapeshellcmd->val will escape all % and !. */ case '%': case '!': case '"': case '\'': #endif case '#': /* This is character-set independent */ case '&': case ';': case '`': case '|': case '*': case '?': case '~': case '<': case '>': case '^': case '(': case ')': case '[': case ']': case '{': case '}': case '$': case '\\': case '\x0A': /* excluding these two */ case '\xFF': #ifdef _MSC_VER cmd[y++] = '^'; #else cmd[y++] = '\\'; #endif /* fall-through */ default: cmd[y++] = str[x]; } } ret.setSize(y); return ret; } /////////////////////////////////////////////////////////////////////////////// static void string_similar_str(const char *txt1, int len1, const char *txt2, int len2, int *pos1, int *pos2, int *max) { const char *p, *q; const char *end1 = txt1 + len1; const char *end2 = txt2 + len2; int l; *max = 0; for (p = txt1; p < end1; p++) { for (q = txt2; q < end2; q++) { for (l = 0; (p + l < end1) && (q + l < end2) && (p[l] == q[l]); l++); if (l > *max) { *max = l; *pos1 = p - txt1; *pos2 = q - txt2; } } } } static int string_similar_char(const char *txt1, int len1, const char *txt2, int len2) { int sum; int pos1 = 0, pos2 = 0, max; string_similar_str(txt1, len1, txt2, len2, &pos1, &pos2, &max); if ((sum = max)) { if (pos1 && pos2) { sum += string_similar_char(txt1, pos1, txt2, pos2); } if ((pos1 + max < len1) && (pos2 + max < len2)) { sum += string_similar_char(txt1 + pos1 + max, len1 - pos1 - max, txt2 + pos2 + max, len2 - pos2 - max); } } return sum; } int string_similar_text(const char *t1, int len1, const char *t2, int len2, float *percent) { if (len1 == 0 && len2 == 0) { if (percent) *percent = 0.0; return 0; } int sim = string_similar_char(t1, len1, t2, len2); if (percent) *percent = sim * 200.0 / (len1 + len2); return sim; } /////////////////////////////////////////////////////////////////////////////// #define LEVENSHTEIN_MAX_LENTH 255 // reference implementation, only optimized for memory usage, not speed int string_levenshtein(const char *s1, int l1, const char *s2, int l2, int cost_ins, int cost_rep, int cost_del ) { int *p1, *p2, *tmp; int i1, i2, c0, c1, c2; if (l1==0) return l2*cost_ins; if (l2==0) return l1*cost_del; if ((l1>LEVENSHTEIN_MAX_LENTH)||(l2>LEVENSHTEIN_MAX_LENTH)) { raise_warning("levenshtein(): Argument string(s) too long"); return -1; } p1 = (int*)req::malloc_noptrs((l2+1) * sizeof(int)); SCOPE_EXIT { req::free(p1); }; p2 = (int*)req::malloc_noptrs((l2+1) * sizeof(int)); SCOPE_EXIT { req::free(p2); }; for(i2=0;i2<=l2;i2++) { p1[i2] = i2*cost_ins; } for(i1=0;i1<l1;i1++) { p2[0]=p1[0]+cost_del; for(i2=0;i2<l2;i2++) { c0=p1[i2]+((s1[i1]==s2[i2])?0:cost_rep); c1=p1[i2+1]+cost_del; if (c1<c0) c0=c1; c2=p2[i2]+cost_ins; if (c2<c0) c0=c2; p2[i2+1]=c0; } tmp=p1; p1=p2; p2=tmp; } c0=p1[l2]; return c0; } /////////////////////////////////////////////////////////////////////////////// String string_money_format(const char *format, double value) { bool check = false; const char *p = format; while ((p = strchr(p, '%'))) { if (*(p + 1) == '%') { p += 2; } else if (!check) { check = true; p++; } else { throw_invalid_argument ("format: Only a single %%i or %%n token can be used"); return String(); } } int format_len = strlen(format); int str_len = safe_address(format_len, 1, 1024); String ret(str_len, ReserveString); char *str = ret.mutableData(); if ((str_len = strfmon(str, str_len, format, value)) < 0) { return String(); } ret.setSize(str_len); return ret; } /////////////////////////////////////////////////////////////////////////////// String string_number_format(double d, int dec, const String& dec_point, const String& thousand_sep) { char *tmpbuf = nullptr, *resbuf; char *s, *t; /* source, target */ char *dp; int integral; int tmplen, reslen=0; int count=0; int is_negative=0; if (d < 0) { is_negative = 1; d = -d; } if (dec < 0) dec = 0; d = php_math_round(d, dec); // departure from PHP: we got rid of dependencies on spprintf() here. String tmpstr(63, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, 64, "%.*F", dec, d); if (tmplen < 0) return empty_string(); if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) { tmpstr.setSize(tmplen); return tmpstr; } if (tmplen >= 64) { // Uncommon, asked for more than 64 chars worth of precision tmpstr = String(tmplen, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, tmplen + 1, "%.*F", dec, d); if (tmplen < 0) return empty_string(); if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) { tmpstr.setSize(tmplen); return tmpstr; } } /* find decimal point, if expected */ if (dec) { dp = strpbrk(tmpbuf, ".,"); } else { dp = nullptr; } /* calculate the length of the return buffer */ if (dp) { integral = dp - tmpbuf; } else { /* no decimal point was found */ integral = tmplen; } /* allow for thousand separators */ if (!thousand_sep.empty()) { if (integral + thousand_sep.size() * ((integral-1) / 3) < integral) { /* overflow */ raise_error("String overflow"); } integral += ((integral-1) / 3) * thousand_sep.size(); } reslen = integral; if (dec) { reslen += dec; if (!dec_point.empty()) { if (reslen + dec_point.size() < dec_point.size()) { /* overflow */ raise_error("String overflow"); } reslen += dec_point.size(); } } /* add a byte for minus sign */ if (is_negative) { reslen++; } String resstr(reslen, ReserveString); resbuf = resstr.mutableData(); s = tmpbuf+tmplen-1; t = resbuf+reslen-1; /* copy the decimal places. * Take care, as the sprintf implementation may return less places than * we requested due to internal buffer limitations */ if (dec) { int declen = dp ? s - dp : 0; int topad = dec > declen ? dec - declen : 0; /* pad with '0's */ while (topad--) { *t-- = '0'; } if (dp) { s -= declen + 1; /* +1 to skip the point */ t -= declen; /* now copy the chars after the point */ memcpy(t + 1, dp + 1, declen); } /* add decimal point */ if (!dec_point.empty()) { memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size()); t -= dec_point.size(); } } /* copy the numbers before the decimal point, adding thousand * separator every three digits */ while(s >= tmpbuf) { *t-- = *s--; if (thousand_sep && (++count%3)==0 && s>=tmpbuf) { memcpy(t + (1 - thousand_sep.size()), thousand_sep.data(), thousand_sep.size()); t -= thousand_sep.size(); } } /* and a minus sign, if needed */ if (is_negative) { *t-- = '-'; } resstr.setSize(reslen); return resstr; } /////////////////////////////////////////////////////////////////////////////// // soundex /* Simple soundex algorithm as described by Knuth in TAOCP, vol 3 */ String string_soundex(const String& str) { assertx(!str.empty()); int _small, code, last; String retString(4, ReserveString); char* soundex = retString.mutableData(); static char soundex_table[26] = { 0, /* A */ '1', /* B */ '2', /* C */ '3', /* D */ 0, /* E */ '1', /* F */ '2', /* G */ 0, /* H */ 0, /* I */ '2', /* J */ '2', /* K */ '4', /* L */ '5', /* M */ '5', /* N */ 0, /* O */ '1', /* P */ '2', /* Q */ '6', /* R */ '2', /* S */ '3', /* T */ 0, /* U */ '1', /* V */ 0, /* W */ '2', /* X */ 0, /* Y */ '2' /* Z */ }; /* build soundex string */ last = -1; auto p = str.slice().data(); for (_small = 0; *p && _small < 4; p++) { /* convert chars to upper case and strip non-letter chars */ /* BUG: should also map here accented letters used in non */ /* English words or names (also found in English text!): */ /* esstsett, thorn, n-tilde, c-cedilla, s-caron, ... */ code = toupper((int)(unsigned char)(*p)); if (code >= 'A' && code <= 'Z') { if (_small == 0) { /* remember first valid char */ soundex[_small++] = code; last = soundex_table[code - 'A']; } else { /* ignore sequences of consonants with same soundex */ /* code in trail, and vowels unless they separate */ /* consonant letters */ code = soundex_table[code - 'A']; if (code != last) { if (code != 0) { soundex[_small++] = code; } last = code; } } } } /* pad with '0' and terminate with 0 ;-) */ while (_small < 4) { soundex[_small++] = '0'; } retString.setSize(4); return retString; } /////////////////////////////////////////////////////////////////////////////// // metaphone /** * this is now the original code by Michael G Schwern: * i've changed it just a slightly bit (use emalloc, * get rid of includes etc) * - thies - 13.09.1999 */ /*----------------------------- */ /* this used to be "metaphone.h" */ /*----------------------------- */ /* Special encodings */ #define SH 'X' #define TH '0' /*----------------------------- */ /* end of "metaphone.h" */ /*----------------------------- */ /*----------------------------- */ /* this used to be "metachar.h" */ /*----------------------------- */ /* Metachar.h ... little bits about characters for metaphone */ /*-- Character encoding array & accessing macros --*/ /* Stolen directly out of the book... */ char _codes[26] = { 1,16,4,16,9,2,4,16,9,2,0,2,2,2,1,4,0,2,4,4,1,0,0,0,8,0}; #define ENCODE(c) (isalpha(c) ? _codes[((toupper(c)) - 'A')] : 0) #define isvowel(c) (ENCODE(c) & 1) /* AEIOU */ /* These letters are passed through unchanged */ #define NOCHANGE(c) (ENCODE(c) & 2) /* FJMNR */ /* These form dipthongs when preceding H */ #define AFFECTH(c) (ENCODE(c) & 4) /* CGPST */ /* These make C and G soft */ #define MAKESOFT(c) (ENCODE(c) & 8) /* EIY */ /* These prevent GH from becoming F */ #define NOGHTOF(c) (ENCODE(c) & 16) /* BDH */ /*----------------------------- */ /* end of "metachar.h" */ /*----------------------------- */ /* I suppose I could have been using a character pointer instead of * accesssing the array directly... */ /* Look at the next letter in the word */ #define Next_Letter ((char)toupper(word[w_idx+1])) /* Look at the current letter in the word */ #define Curr_Letter ((char)toupper(word[w_idx])) /* Go N letters back. */ #define Look_Back_Letter(n) (w_idx >= n ? (char)toupper(word[w_idx-n]) : '\0') /* Previous letter. I dunno, should this return null on failure? */ #define Prev_Letter (Look_Back_Letter(1)) /* Look two letters down. It makes sure you don't walk off the string. */ #define After_Next_Letter (Next_Letter != '\0' ? (char)toupper(word[w_idx+2]) \ : '\0') #define Look_Ahead_Letter(n) ((char)toupper(Lookahead(word+w_idx, n))) /* Allows us to safely look ahead an arbitrary # of letters */ /* I probably could have just used strlen... */ static char Lookahead(unsigned char *word, int how_far) { char letter_ahead = '\0'; /* null by default */ int idx; for (idx = 0; word[idx] != '\0' && idx < how_far; idx++); /* Edge forward in the string... */ letter_ahead = (char)word[idx]; /* idx will be either == to how_far or * at the end of the string */ return letter_ahead; } /* phonize one letter * We don't know the buffers size in advance. On way to solve this is to just * re-allocate the buffer size. We're using an extra of 2 characters (this * could be one though; or more too). */ #define Phonize(c) { buffer.append(c); } /* How long is the phoned word? */ #define Phone_Len (buffer.size()) /* Note is a letter is a 'break' in the word */ #define Isbreak(c) (!isalpha(c)) String string_metaphone(const char *input, int word_len, long max_phonemes, int traditional) { unsigned char *word = (unsigned char *)input; int w_idx = 0; /* point in the phonization we're at. */ int max_buffer_len = 0; /* maximum length of the destination buffer */ /*-- Parameter checks --*/ /* Negative phoneme length is meaningless */ if (max_phonemes < 0) return String(); /* Empty/null string is meaningless */ /* Overly paranoid */ /* always_assert(word != NULL && word[0] != '\0'); */ if (word == nullptr) return String(); /*-- Allocate memory for our phoned_phrase --*/ if (max_phonemes == 0) { /* Assume largest possible */ max_buffer_len = word_len; } else { max_buffer_len = max_phonemes; } StringBuffer buffer(max_buffer_len); /*-- The first phoneme has to be processed specially. --*/ /* Find our first letter */ for (; !isalpha(Curr_Letter); w_idx++) { /* On the off chance we were given nothing but crap... */ if (Curr_Letter == '\0') { return buffer.detach(); /* For testing */ } } switch (Curr_Letter) { /* AE becomes E */ case 'A': if (Next_Letter == 'E') { Phonize('E'); w_idx += 2; } /* Remember, preserve vowels at the beginning */ else { Phonize('A'); w_idx++; } break; /* [GKP]N becomes N */ case 'G': case 'K': case 'P': if (Next_Letter == 'N') { Phonize('N'); w_idx += 2; } break; /* WH becomes H, WR becomes R W if followed by a vowel */ case 'W': if (Next_Letter == 'H' || Next_Letter == 'R') { Phonize(Next_Letter); w_idx += 2; } else if (isvowel(Next_Letter)) { Phonize('W'); w_idx += 2; } /* else ignore */ break; /* X becomes S */ case 'X': Phonize('S'); w_idx++; break; /* Vowels are kept */ /* We did A already case 'A': case 'a': */ case 'E': case 'I': case 'O': case 'U': Phonize(Curr_Letter); w_idx++; break; default: /* do nothing */ break; } /* On to the metaphoning */ for (; Curr_Letter != '\0' && (max_phonemes == 0 || Phone_Len < max_phonemes); w_idx++) { /* How many letters to skip because an eariler encoding handled * multiple letters */ unsigned short int skip_letter = 0; /* THOUGHT: It would be nice if, rather than having things like... * well, SCI. For SCI you encode the S, then have to remember * to skip the C. So the phonome SCI invades both S and C. It would * be better, IMHO, to skip the C from the S part of the encoding. * Hell, I'm trying it. */ /* Ignore non-alphas */ if (!isalpha(Curr_Letter)) continue; /* Drop duplicates, except CC */ if (Curr_Letter == Prev_Letter && Curr_Letter != 'C') continue; switch (Curr_Letter) { /* B -> B unless in MB */ case 'B': if (Prev_Letter != 'M') Phonize('B'); break; /* 'sh' if -CIA- or -CH, but not SCH, except SCHW. * (SCHW is handled in S) * S if -CI-, -CE- or -CY- * dropped if -SCI-, SCE-, -SCY- (handed in S) * else K */ case 'C': if (MAKESOFT(Next_Letter)) { /* C[IEY] */ if (After_Next_Letter == 'A' && Next_Letter == 'I') { /* CIA */ Phonize(SH); } /* SC[IEY] */ else if (Prev_Letter == 'S') { /* Dropped */ } else { Phonize('S'); } } else if (Next_Letter == 'H') { if ((!traditional) && (After_Next_Letter == 'R' || Prev_Letter == 'S')) { /* Christ, School */ Phonize('K'); } else { Phonize(SH); } skip_letter++; } else { Phonize('K'); } break; /* J if in -DGE-, -DGI- or -DGY- * else T */ case 'D': if (Next_Letter == 'G' && MAKESOFT(After_Next_Letter)) { Phonize('J'); skip_letter++; } else Phonize('T'); break; /* F if in -GH and not B--GH, D--GH, -H--GH, -H---GH * else dropped if -GNED, -GN, * else dropped if -DGE-, -DGI- or -DGY- (handled in D) * else J if in -GE-, -GI, -GY and not GG * else K */ case 'G': if (Next_Letter == 'H') { if (!(NOGHTOF(Look_Back_Letter(3)) || Look_Back_Letter(4) == 'H')) { Phonize('F'); skip_letter++; } else { /* silent */ } } else if (Next_Letter == 'N') { if (Isbreak(After_Next_Letter) || (After_Next_Letter == 'E' && Look_Ahead_Letter(3) == 'D')) { /* dropped */ } else Phonize('K'); } else if (MAKESOFT(Next_Letter) && Prev_Letter != 'G') { Phonize('J'); } else { Phonize('K'); } break; /* H if before a vowel and not after C,G,P,S,T */ case 'H': if (isvowel(Next_Letter) && !AFFECTH(Prev_Letter)) Phonize('H'); break; /* dropped if after C * else K */ case 'K': if (Prev_Letter != 'C') Phonize('K'); break; /* F if before H * else P */ case 'P': if (Next_Letter == 'H') { Phonize('F'); } else { Phonize('P'); } break; /* K */ case 'Q': Phonize('K'); break; /* 'sh' in -SH-, -SIO- or -SIA- or -SCHW- * else S */ case 'S': if (Next_Letter == 'I' && (After_Next_Letter == 'O' || After_Next_Letter == 'A')) { Phonize(SH); } else if (Next_Letter == 'H') { Phonize(SH); skip_letter++; } else if ((!traditional) && (Next_Letter == 'C' && Look_Ahead_Letter(2) == 'H' && Look_Ahead_Letter(3) == 'W')) { Phonize(SH); skip_letter += 2; } else { Phonize('S'); } break; /* 'sh' in -TIA- or -TIO- * else 'th' before H * else T */ case 'T': if (Next_Letter == 'I' && (After_Next_Letter == 'O' || After_Next_Letter == 'A')) { Phonize(SH); } else if (Next_Letter == 'H') { Phonize(TH); skip_letter++; } else { Phonize('T'); } break; /* F */ case 'V': Phonize('F'); break; /* W before a vowel, else dropped */ case 'W': if (isvowel(Next_Letter)) Phonize('W'); break; /* KS */ case 'X': Phonize('K'); Phonize('S'); break; /* Y if followed by a vowel */ case 'Y': if (isvowel(Next_Letter)) Phonize('Y'); break; /* S */ case 'Z': Phonize('S'); break; /* No transformation */ case 'F': case 'J': case 'L': case 'M': case 'N': case 'R': Phonize(Curr_Letter); break; default: /* nothing */ break; } /* END SWITCH */ w_idx += skip_letter; } /* END FOR */ return buffer.detach(); } /////////////////////////////////////////////////////////////////////////////// // Cyrillic /** * This is codetables for different Cyrillic charsets (relative to koi8-r). * Each table contains data for 128-255 symbols from ASCII table. * First 256 symbols are for conversion from koi8-r to corresponding charset, * second 256 symbols are for reverse conversion, from charset to koi8-r. * * Here we have the following tables: * _cyr_win1251 - for windows-1251 charset * _cyr_iso88595 - for iso8859-5 charset * _cyr_cp866 - for x-cp866 charset * _cyr_mac - for x-mac-cyrillic charset */ typedef unsigned char _cyr_charset_table[512]; static const _cyr_charset_table _cyr_win1251 = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46, 46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46, 154,174,190,46,159,189,46,46,179,191,180,157,46,46,156,183, 46,46,182,166,173,46,46,158,163,152,164,155,46,46,46,167, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,184,186,32,179,191,32,32,32,32,32,180,162,32, 32,32,32,168,170,32,178,175,32,32,32,32,32,165,161,169, 254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238, 239,255,240,241,242,243,230,226,252,251,231,248,253,249,247,250, 222,192,193,214,196,197,212,195,213,200,201,202,203,204,205,206, 207,223,208,209,210,211,198,194,220,219,199,216,221,217,215,218, }; static const _cyr_charset_table _cyr_cp866 = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 35,35,35,124,124,124,124,43,43,124,124,43,43,43,43,43, 43,45,45,124,45,43,124,124,43,43,45,45,124,45,43,45, 45,45,45,43,43,43,43,43,43,43,43,35,35,124,124,35, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209, 179,163,180,164,183,167,190,174,32,149,158,32,152,159,148,154, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 205,186,213,241,243,201,32,245,187,212,211,200,190,32,247,198, 199,204,181,240,242,185,32,244,203,207,208,202,216,32,246,32, 238,160,161,230,164,165,228,163,229,168,169,170,171,172,173,174, 175,239,224,225,226,227,166,162,236,235,167,232,237,233,231,234, 158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142, 143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154, }; static const _cyr_charset_table _cyr_iso88595 = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,179,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209, 32,163,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,241,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,161,32,32,32,32,32,32,32,32,32,32,32,32, 238,208,209,230,212,213,228,211,229,216,217,218,219,220,221,222, 223,239,224,225,226,227,214,210,236,235,215,232,237,233,231,234, 206,176,177,198,180,181,196,179,197,184,185,186,187,188,189,190, 191,207,192,193,194,195,182,178,204,203,183,200,205,201,199,202, }; static const _cyr_charset_table _cyr_mac = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,179,163,209, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,255, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 160,161,162,222,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,221,180,181,182,183,184,185,186,187,188,189,190,191, 254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238, 239,223,240,241,242,243,230,226,252,251,231,248,253,249,247,250, 158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142, 143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154, }; /** * This is the function that performs real in-place conversion of the string * between charsets. * Parameters: * str - string to be converted * from,to - one-symbol label of source and destination charset * The following symbols are used as labels: * k - koi8-r * w - windows-1251 * i - iso8859-5 * a - x-cp866 * d - x-cp866 * m - x-mac-cyrillic */ String string_convert_cyrillic_string(const String& input, char from, char to) { const unsigned char *from_table, *to_table; unsigned char tmp; auto uinput = (unsigned char*)input.slice().data(); String retString(input.size(), ReserveString); unsigned char *str = (unsigned char *)retString.mutableData(); from_table = nullptr; to_table = nullptr; switch (toupper((int)(unsigned char)from)) { case 'W': from_table = _cyr_win1251; break; case 'A': case 'D': from_table = _cyr_cp866; break; case 'I': from_table = _cyr_iso88595; break; case 'M': from_table = _cyr_mac; break; case 'K': break; default: throw_invalid_argument("Unknown source charset: %c", from); break; } switch (toupper((int)(unsigned char)to)) { case 'W': to_table = _cyr_win1251; break; case 'A': case 'D': to_table = _cyr_cp866; break; case 'I': to_table = _cyr_iso88595; break; case 'M': to_table = _cyr_mac; break; case 'K': break; default: throw_invalid_argument("Unknown destination charset: %c", to); break; } for (int i = 0; i < input.size(); i++) { tmp = from_table == nullptr ? uinput[i] : from_table[uinput[i]]; str[i] = to_table == nullptr ? tmp : to_table[tmp + 256]; } retString.setSize(input.size()); return retString; } /////////////////////////////////////////////////////////////////////////////// // Hebrew #define HEB_BLOCK_TYPE_ENG 1 #define HEB_BLOCK_TYPE_HEB 2 #define isheb(c) \ (((((unsigned char) c) >= 224) && (((unsigned char) c) <= 250)) ? 1 : 0) #define _isblank(c) \ (((((unsigned char) c) == ' ' || ((unsigned char) c) == '\t')) ? 1 : 0) #define _isnewline(c) \ (((((unsigned char) c) == '\n' || ((unsigned char) c) == '\r')) ? 1 : 0) /** * Converts Logical Hebrew text (Hebrew Windows style) to Visual text * Cheers/complaints/flames - Zeev Suraski <zeev@php.net> */ String string_convert_hebrew_string(const String& inStr, int /*max_chars_per_line*/, int convert_newlines) { assertx(!inStr.empty()); auto str = inStr.data(); auto str_len = inStr.size(); const char *tmp; char *heb_str, *broken_str; char *target; int block_start, block_end, block_type, block_length, i; long max_chars=0; int begin, end, char_count, orig_begin; tmp = str; block_start=block_end=0; heb_str = (char *) req::malloc_noptrs(str_len + 1); SCOPE_EXIT { req::free(heb_str); }; target = heb_str+str_len; *target = 0; target--; block_length=0; if (isheb(*tmp)) { block_type = HEB_BLOCK_TYPE_HEB; } else { block_type = HEB_BLOCK_TYPE_ENG; } do { if (block_type == HEB_BLOCK_TYPE_HEB) { while ((isheb((int)*(tmp+1)) || _isblank((int)*(tmp+1)) || ispunct((int)*(tmp+1)) || (int)*(tmp+1)=='\n' ) && block_end<str_len-1) { tmp++; block_end++; block_length++; } for (i = block_start; i<= block_end; i++) { *target = str[i]; switch (*target) { case '(': *target = ')'; break; case ')': *target = '('; break; case '[': *target = ']'; break; case ']': *target = '['; break; case '{': *target = '}'; break; case '}': *target = '{'; break; case '<': *target = '>'; break; case '>': *target = '<'; break; case '\\': *target = '/'; break; case '/': *target = '\\'; break; default: break; } target--; } block_type = HEB_BLOCK_TYPE_ENG; } else { while (!isheb(*(tmp+1)) && (int)*(tmp+1)!='\n' && block_end < str_len-1) { tmp++; block_end++; block_length++; } while ((_isblank((int)*tmp) || ispunct((int)*tmp)) && *tmp!='/' && *tmp!='-' && block_end > block_start) { tmp--; block_end--; } for (i = block_end; i >= block_start; i--) { *target = str[i]; target--; } block_type = HEB_BLOCK_TYPE_HEB; } block_start=block_end+1; } while (block_end < str_len-1); String brokenStr(str_len, ReserveString); broken_str = brokenStr.mutableData(); begin=end=str_len-1; target = broken_str; while (1) { char_count=0; while ((!max_chars || char_count < max_chars) && begin > 0) { char_count++; begin--; if (begin <= 0 || _isnewline(heb_str[begin])) { while (begin > 0 && _isnewline(heb_str[begin-1])) { begin--; char_count++; } break; } } if (char_count == max_chars) { /* try to avoid breaking words */ int new_char_count=char_count, new_begin=begin; while (new_char_count > 0) { if (_isblank(heb_str[new_begin]) || _isnewline(heb_str[new_begin])) { break; } new_begin++; new_char_count--; } if (new_char_count > 0) { char_count=new_char_count; begin=new_begin; } } orig_begin=begin; if (_isblank(heb_str[begin])) { heb_str[begin]='\n'; } while (begin <= end && _isnewline(heb_str[begin])) { /* skip leading newlines */ begin++; } for (i = begin; i <= end; i++) { /* copy content */ *target = heb_str[i]; target++; } for (i = orig_begin; i <= end && _isnewline(heb_str[i]); i++) { *target = heb_str[i]; target++; } begin=orig_begin; if (begin <= 0) { *target = 0; break; } begin--; end=begin; } if (convert_newlines) { int count; auto ret = string_replace(broken_str, str_len, "\n", strlen("\n"), "<br />\n", strlen("<br />\n"), count, true); if (!ret.isNull()) { return ret; } } brokenStr.setSize(str_len); return brokenStr; } /////////////////////////////////////////////////////////////////////////////// }
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) | | Copyright (c) 1998-2010 Zend Technologies Ltd. (http://www.zend.com) | +----------------------------------------------------------------------+ | This source file is subject to version 2.00 of the Zend license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.zend.com/license/2_00.txt. | | If you did not receive a copy of the Zend license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@zend.com so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ // NOTE: See also "hphp/zend/zend-string.*". #include "hphp/runtime/base/zend-string.h" #include "hphp/runtime/base/zend-printf.h" #include "hphp/runtime/base/zend-math.h" #include "hphp/util/lock.h" #include "hphp/util/overflow.h" #include <algorithm> #include <cmath> #ifndef _MSC_VER #include <monetary.h> #endif #include "hphp/util/bstring.h" #include "hphp/runtime/base/exceptions.h" #include "hphp/runtime/base/string-buffer.h" #include "hphp/runtime/base/runtime-error.h" #include "hphp/runtime/base/string-util.h" #include "hphp/runtime/base/builtin-functions.h" #include <folly/portability/String.h> #define PHP_QPRINT_MAXL 75 namespace HPHP { /////////////////////////////////////////////////////////////////////////////// // helpers void string_charmask(const char *sinput, int len, char *mask) { const unsigned char *input = (unsigned char *)sinput; const unsigned char *end; unsigned char c; memset(mask, 0, 256); for (end = input+len; input < end; input++) { c=*input; if ((input+3 < end) && input[1] == '.' && input[2] == '.' && input[3] >= c) { memset(mask+c, 1, input[3] - c + 1); input+=3; } else if ((input+1 < end) && input[0] == '.' && input[1] == '.') { /* Error, try to be as helpful as possible: (a range ending/starting with '.' won't be captured here) */ if (end-len >= input) { /* there was no 'left' char */ throw_invalid_argument ("charlist: Invalid '..'-range, missing left of '..'"); continue; } if (input+2 >= end) { /* there is no 'right' char */ throw_invalid_argument ("charlist: Invalid '..'-range, missing right of '..'"); continue; } if (input[-1] > input[2]) { /* wrong order */ throw_invalid_argument ("charlist: '..'-range needs to be incrementing"); continue; } /* FIXME: better error (a..b..c is the only left possibility?) */ throw_invalid_argument("charlist: Invalid '..'-range"); continue; } else { mask[c]=1; } } } /////////////////////////////////////////////////////////////////////////////// void string_to_case(String& s, int (*tocase)(int)) { assertx(!s.isNull()); assertx(tocase); auto data = s.mutableData(); auto len = s.size(); for (int i = 0; i < len; i++) { data[i] = tocase(data[i]); } } /////////////////////////////////////////////////////////////////////////////// #define STR_PAD_LEFT 0 #define STR_PAD_RIGHT 1 #define STR_PAD_BOTH 2 String string_pad(const char *input, int len, int pad_length, const char *pad_string, int pad_str_len, int pad_type) { assertx(input); int num_pad_chars = pad_length - len; /* If resulting string turns out to be shorter than input string, we simply copy the input and return. */ if (pad_length < 0 || num_pad_chars < 0) { return String(input, len, CopyString); } /* Setup the padding string values if specified. */ if (pad_str_len == 0) { throw_invalid_argument("pad_string: (empty)"); return String(); } String ret(pad_length, ReserveString); char *result = ret.mutableData(); /* We need to figure out the left/right padding lengths. */ int left_pad, right_pad; switch (pad_type) { case STR_PAD_RIGHT: left_pad = 0; right_pad = num_pad_chars; break; case STR_PAD_LEFT: left_pad = num_pad_chars; right_pad = 0; break; case STR_PAD_BOTH: left_pad = num_pad_chars / 2; right_pad = num_pad_chars - left_pad; break; default: throw_invalid_argument("pad_type: %d", pad_type); return String(); } /* First we pad on the left. */ int result_len = 0; for (int i = 0; i < left_pad; i++) { result[result_len++] = pad_string[i % pad_str_len]; } /* Then we copy the input string. */ memcpy(result + result_len, input, len); result_len += len; /* Finally, we pad on the right. */ for (int i = 0; i < right_pad; i++) { result[result_len++] = pad_string[i % pad_str_len]; } ret.setSize(result_len); return ret; } /////////////////////////////////////////////////////////////////////////////// int string_find(const char *input, int len, char ch, int pos, bool case_sensitive) { assertx(input); if (pos < 0 || pos > len) { return -1; } const void *ptr; if (case_sensitive) { ptr = memchr(input + pos, ch, len - pos); } else { ptr = bstrcasechr(input + pos, ch, len - pos); } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } int string_rfind(const char *input, int len, char ch, int pos, bool case_sensitive) { assertx(input); if (pos < -len || pos > len) { return -1; } const void *ptr; if (case_sensitive) { if (pos >= 0) { ptr = memrchr(input + pos, ch, len - pos); } else { ptr = memrchr(input, ch, len + pos + 1); } } else { if (pos >= 0) { ptr = bstrrcasechr(input + pos, ch, len - pos); } else { ptr = bstrrcasechr(input, ch, len + pos + 1); } } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } int string_find(const char *input, int len, const char *s, int s_len, int pos, bool case_sensitive) { assertx(input); assertx(s); if (!s_len || pos < 0 || pos > len) { return -1; } void *ptr; if (case_sensitive) { ptr = (void*)string_memnstr(input + pos, s, s_len, input + len); } else { ptr = bstrcasestr(input + pos, len - pos, s, s_len); } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } int string_rfind(const char *input, int len, const char *s, int s_len, int pos, bool case_sensitive) { assertx(input); assertx(s); if (!s_len || pos < -len || pos > len) { return -1; } void *ptr; if (case_sensitive) { if (pos >= 0) { ptr = bstrrstr(input + pos, len - pos, s, s_len); } else { ptr = bstrrstr(input, len + std::min(pos + s_len, 0), s, s_len); } } else { if (pos >= 0) { ptr = bstrrcasestr(input + pos, len - pos, s, s_len); } else { ptr = bstrrcasestr(input, len + std::min(pos + s_len, 0), s, s_len); } } if (ptr != nullptr) { return (int)((const char *)ptr - input); } return -1; } const char *string_memnstr(const char *haystack, const char *needle, int needle_len, const char *end) { const char *p = haystack; char ne = needle[needle_len-1]; end -= needle_len; while (p <= end) { if ((p = (char *)memchr(p, *needle, (end-p+1))) && ne == p[needle_len-1]) { if (!memcmp(needle, p, needle_len-1)) { return p; } } if (p == nullptr) { return nullptr; } p++; } return nullptr; } String string_replace(const char *s, int len, int start, int length, const char *replacement, int len_repl) { assertx(s); assertx(replacement); assertx(len >= 0); // if "start" position is negative, count start position from the end // of the string if (start < 0) { start = len + start; if (start < 0) { start = 0; } } if (start > len) { start = len; } // if "length" position is negative, set it to the length // needed to stop that many chars from the end of the string if (length < 0) { length = (len - start) + length; if (length < 0) { length = 0; } } // check if length is too large if (length > len) { length = len; } // check if the length is too large adjusting for non-zero start // Write this way instead of start + length > len to avoid overflow if (length > len - start) { length = len - start; } String retString(len + len_repl - length, ReserveString); char *ret = retString.mutableData(); int ret_len = 0; if (start) { memcpy(ret, s, start); ret_len += start; } if (len_repl) { memcpy(ret + ret_len, replacement, len_repl); ret_len += len_repl; } len -= (start + length); if (len) { memcpy(ret + ret_len, s + start + length, len); ret_len += len; } retString.setSize(ret_len); return retString; } String string_replace(const char *input, int len, const char *search, int len_search, const char *replacement, int len_replace, int &count, bool case_sensitive) { assertx(input); assertx(search && len_search); assertx(len >= 0); assertx(len_search >= 0); assertx(len_replace >= 0); if (len == 0) { return String(); } req::vector<int> founds; founds.reserve(16); if (len_search == 1) { for (int pos = string_find(input, len, *search, 0, case_sensitive); pos >= 0; pos = string_find(input, len, *search, pos + len_search, case_sensitive)) { founds.push_back(pos); } } else { for (int pos = string_find(input, len, search, len_search, 0, case_sensitive); pos >= 0; pos = string_find(input, len, search, len_search, pos + len_search, case_sensitive)) { founds.push_back(pos); } } count = founds.size(); if (count == 0) { return String(); // not found } int reserve; // Make sure the new size of the string wouldn't overflow int32_t. Don't // bother if the replacement wouldn't make the string longer. if (len_replace > len_search) { auto raise = [&] { raise_error("String too large"); }; if (mul_overflow(len_replace - len_search, count)) { raise(); } int diff = (len_replace - len_search) * count; if (add_overflow(len, diff)) { raise(); } reserve = len + diff; } else { reserve = len + (len_replace - len_search) * count; } String retString(reserve, ReserveString); char *ret = retString.mutableData(); char *p = ret; int pos = 0; // last position in input that hasn't been copied over yet int n; for (unsigned int i = 0; i < founds.size(); i++) { n = founds[i]; if (n > pos) { n -= pos; memcpy(p, input, n); p += n; input += n; pos += n; } if (len_replace) { memcpy(p, replacement, len_replace); p += len_replace; } input += len_search; pos += len_search; } n = len; if (n > pos) { n -= pos; memcpy(p, input, n); p += n; } retString.setSize(p - ret); return retString; } /////////////////////////////////////////////////////////////////////////////// String string_chunk_split(const char *src, int srclen, const char *end, int endlen, int chunklen) { int chunks = srclen / chunklen; // complete chunks! int restlen = srclen - chunks * chunklen; /* srclen % chunklen */ String ret( safe_address( chunks + 1, endlen, srclen ), ReserveString ); char *dest = ret.mutableData(); const char *p; char *q; const char *pMax = src + srclen - chunklen + 1; for (p = src, q = dest; p < pMax; ) { memcpy(q, p, chunklen); q += chunklen; memcpy(q, end, endlen); q += endlen; p += chunklen; } if (restlen) { memcpy(q, p, restlen); q += restlen; memcpy(q, end, endlen); q += endlen; } ret.setSize(q - dest); return ret; } /////////////////////////////////////////////////////////////////////////////// #define PHP_TAG_BUF_SIZE 1023 /** * Check if tag is in a set of tags * * states: * * 0 start tag * 1 first non-whitespace char seen */ static int string_tag_find(const char *tag, int len, const char *set) { char c, *n; const char *t; int state=0, done=0; char *norm; if (len <= 0) { return 0; } norm = (char *)req::malloc_noptrs(len+1); SCOPE_EXIT { req::free(norm); }; n = norm; t = tag; c = tolower(*t); /* normalize the tag removing leading and trailing whitespace and turn any <a whatever...> into just <a> and any </tag> into <tag> */ while (!done) { switch (c) { case '<': *(n++) = c; break; case '>': done =1; break; default: if (!isspace((int)c)) { if (state == 0) { state=1; } if (c != '/') { *(n++) = c; } } else { if (state == 1) done=1; } break; } c = tolower(*(++t)); } *(n++) = '>'; *n = '\0'; if (strstr(set, norm)) { done=1; } else { done=0; } return done; } /** * A simple little state-machine to strip out html and php tags * * State 0 is the output state, State 1 means we are inside a * normal html tag and state 2 means we are inside a php tag. * * The state variable is passed in to allow a function like fgetss * to maintain state across calls to the function. * * lc holds the last significant character read and br is a bracket * counter. * * When an allow string is passed in we keep track of the string * in state 1 and when the tag is closed check it against the * allow string to see if we should allow it. * swm: Added ability to strip <?xml tags without assuming it PHP * code. */ String string_strip_tags(const char *s, const int len, const char *allow, const int allow_len, bool allow_tag_spaces) { const char *abuf, *p; char *rbuf, *tbuf, *tp, *rp, c, lc; int br, i=0, depth=0, in_q = 0; int state = 0, pos; assertx(s); assertx(allow); String retString(s, len, CopyString); rbuf = retString.mutableData(); String allowString; c = *s; lc = '\0'; p = s; rp = rbuf; br = 0; if (allow_len) { assertx(allow); allowString = String(allow_len, ReserveString); char *atmp = allowString.mutableData(); for (const char *tmp = allow; *tmp; tmp++, atmp++) { *atmp = tolower((int)*(const unsigned char *)tmp); } allowString.setSize(allow_len); abuf = allowString.data(); tbuf = (char *)req::malloc_noptrs(PHP_TAG_BUF_SIZE+1); tp = tbuf; } else { abuf = nullptr; tbuf = tp = nullptr; } auto move = [&pos, &tbuf, &tp]() { if (tp - tbuf >= PHP_TAG_BUF_SIZE) { pos = tp - tbuf; tbuf = (char*)req::realloc_noptrs(tbuf, (tp - tbuf) + PHP_TAG_BUF_SIZE + 1); tp = tbuf + pos; } }; while (i < len) { switch (c) { case '\0': break; case '<': if (isspace(*(p + 1)) && !allow_tag_spaces) { goto reg_char; } if (state == 0) { lc = '<'; state = 1; if (allow_len) { move(); *(tp++) = '<'; } } else if (state == 1) { depth++; } break; case '(': if (state == 2) { if (lc != '"' && lc != '\'') { lc = '('; br++; } } else if (allow_len && state == 1) { move(); *(tp++) = c; } else if (state == 0) { *(rp++) = c; } break; case ')': if (state == 2) { if (lc != '"' && lc != '\'') { lc = ')'; br--; } } else if (allow_len && state == 1) { move(); *(tp++) = c; } else if (state == 0) { *(rp++) = c; } break; case '>': if (depth) { depth--; break; } if (in_q) { break; } switch (state) { case 1: /* HTML/XML */ lc = '>'; in_q = state = 0; if (allow_len) { move(); *(tp++) = '>'; *tp='\0'; if (string_tag_find(tbuf, tp-tbuf, abuf)) { memcpy(rp, tbuf, tp-tbuf); rp += tp-tbuf; } tp = tbuf; } break; case 2: /* PHP */ if (!br && lc != '\"' && *(p-1) == '?') { in_q = state = 0; tp = tbuf; } break; case 3: in_q = state = 0; tp = tbuf; break; case 4: /* JavaScript/CSS/etc... */ if (p >= s + 2 && *(p-1) == '-' && *(p-2) == '-') { in_q = state = 0; tp = tbuf; } break; default: *(rp++) = c; break; } break; case '"': case '\'': if (state == 4) { /* Inside <!-- comment --> */ break; } else if (state == 2 && *(p-1) != '\\') { if (lc == c) { lc = '\0'; } else if (lc != '\\') { lc = c; } } else if (state == 0) { *(rp++) = c; } else if (allow_len && state == 1) { move(); *(tp++) = c; } if (state && p != s && *(p-1) != '\\' && (!in_q || *p == in_q)) { if (in_q) { in_q = 0; } else { in_q = *p; } } break; case '!': /* JavaScript & Other HTML scripting languages */ if (state == 1 && *(p-1) == '<') { state = 3; lc = c; } else { if (state == 0) { *(rp++) = c; } else if (allow_len && state == 1) { move(); *(tp++) = c; } } break; case '-': if (state == 3 && p >= s + 2 && *(p-1) == '-' && *(p-2) == '!') { state = 4; } else { goto reg_char; } break; case '?': if (state == 1 && *(p-1) == '<') { br=0; state=2; break; } case 'E': case 'e': /* !DOCTYPE exception */ if (state==3 && p > s+6 && tolower(*(p-1)) == 'p' && tolower(*(p-2)) == 'y' && tolower(*(p-3)) == 't' && tolower(*(p-4)) == 'c' && tolower(*(p-5)) == 'o' && tolower(*(p-6)) == 'd') { state = 1; break; } /* fall-through */ case 'l': /* swm: If we encounter '<?xml' then we shouldn't be in * state == 2 (PHP). Switch back to HTML. */ if (state == 2 && p > s+2 && *(p-1) == 'm' && *(p-2) == 'x') { state = 1; break; } /* fall-through */ default: reg_char: if (state == 0) { *(rp++) = c; } else if (allow_len && state == 1) { move(); *(tp++) = c; } break; } c = *(++p); i++; } if (rp < rbuf + len) { *rp = '\0'; } if (allow_len) { req::free(tbuf); } retString.setSize(rp - rbuf); return retString; } /////////////////////////////////////////////////////////////////////////////// static char string_hex2int(int c) { if (isdigit(c)) { return c - '0'; } if (c >= 'A' && c <= 'F') { return c - 'A' + 10; } if (c >= 'a' && c <= 'f') { return c - 'a' + 10; } return -1; } String string_quoted_printable_encode(const char *input, int len) { size_t length = len; const unsigned char *str = (unsigned char*)input; unsigned long lp = 0; unsigned char c; char *d, *buffer; char *hex = "0123456789ABCDEF"; String ret( safe_address( 3, length + ((safe_address(3, length, 0)/(PHP_QPRINT_MAXL-9)) + 1), 1), ReserveString ); d = buffer = ret.mutableData(); while (length--) { if (((c = *str++) == '\015') && (*str == '\012') && length > 0) { *d++ = '\015'; *d++ = *str++; length--; lp = 0; } else { if (iscntrl (c) || (c == 0x7f) || (c & 0x80) || (c == '=') || ((c == ' ') && (*str == '\015'))) { if ((((lp+= 3) > PHP_QPRINT_MAXL) && (c <= 0x7f)) || ((c > 0x7f) && (c <= 0xdf) && ((lp + 3) > PHP_QPRINT_MAXL)) || ((c > 0xdf) && (c <= 0xef) && ((lp + 6) > PHP_QPRINT_MAXL)) || ((c > 0xef) && (c <= 0xf4) && ((lp + 9) > PHP_QPRINT_MAXL))) { *d++ = '='; *d++ = '\015'; *d++ = '\012'; lp = 3; } *d++ = '='; *d++ = hex[c >> 4]; *d++ = hex[c & 0xf]; } else { if ((++lp) > PHP_QPRINT_MAXL) { *d++ = '='; *d++ = '\015'; *d++ = '\012'; lp = 1; } *d++ = c; } } } len = d - buffer; ret.setSize(len); return ret; } String string_quoted_printable_decode(const char *input, int len, bool is_q) { assertx(input); if (len == 0) { return String(); } int i = 0, j = 0, k; const char *str_in = input; String ret(len, ReserveString); char *str_out = ret.mutableData(); while (i < len && str_in[i]) { switch (str_in[i]) { case '=': if (i + 2 < len && str_in[i + 1] && str_in[i + 2] && isxdigit((int) str_in[i + 1]) && isxdigit((int) str_in[i + 2])) { str_out[j++] = (string_hex2int((int) str_in[i + 1]) << 4) + string_hex2int((int) str_in[i + 2]); i += 3; } else /* check for soft line break according to RFC 2045*/ { k = 1; while (str_in[i + k] && ((str_in[i + k] == 32) || (str_in[i + k] == 9))) { /* Possibly, skip spaces/tabs at the end of line */ k++; } if (!str_in[i + k]) { /* End of line reached */ i += k; } else if ((str_in[i + k] == 13) && (str_in[i + k + 1] == 10)) { /* CRLF */ i += k + 2; } else if ((str_in[i + k] == 13) || (str_in[i + k] == 10)) { /* CR or LF */ i += k + 1; } else { str_out[j++] = str_in[i++]; } } break; case '_': if (is_q) { str_out[j++] = ' '; i++; } else { str_out[j++] = str_in[i++]; } break; default: str_out[j++] = str_in[i++]; } } ret.setSize(j); return ret; } Variant string_base_to_numeric(const char *s, int len, int base) { int64_t num = 0; double fnum = 0; int mode = 0; int64_t cutoff; int cutlim; assertx(string_validate_base(base)); cutoff = LONG_MAX / base; cutlim = LONG_MAX % base; for (int i = len; i > 0; i--) { char c = *s++; /* might not work for EBCDIC */ if (c >= '0' && c <= '9') c -= '0'; else if (c >= 'A' && c <= 'Z') c -= 'A' - 10; else if (c >= 'a' && c <= 'z') c -= 'a' - 10; else continue; if (c >= base) continue; switch (mode) { case 0: /* Integer */ if (num < cutoff || (num == cutoff && c <= cutlim)) { num = num * base + c; break; } else { fnum = num; mode = 1; } /* fall-through */ case 1: /* Float */ fnum = fnum * base + c; } } if (mode == 1) { return fnum; } return num; } String string_long_to_base(unsigned long value, int base) { static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; char buf[(sizeof(unsigned long) << 3) + 1]; char *ptr, *end; assertx(string_validate_base(base)); end = ptr = buf + sizeof(buf) - 1; do { *--ptr = digits[value % base]; value /= base; } while (ptr > buf && value); return String(ptr, end - ptr, CopyString); } String string_numeric_to_base(const Variant& value, int base) { static char digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; assertx(string_validate_base(base)); if ((!value.isInteger() && !value.isDouble())) { return empty_string(); } if (value.isDouble()) { double fvalue = floor(value.toDouble()); /* floor it just in case */ char *ptr, *end; char buf[(sizeof(double) << 3) + 1]; /* Don't try to convert +/- infinity */ if (fvalue == HUGE_VAL || fvalue == -HUGE_VAL) { raise_warning("Number too large"); return empty_string(); } end = ptr = buf + sizeof(buf) - 1; do { *--ptr = digits[(int) fmod(fvalue, base)]; fvalue /= base; } while (ptr > buf && fabs(fvalue) >= 1); return String(ptr, end - ptr, CopyString); } return string_long_to_base(value.toInt64(), base); } /////////////////////////////////////////////////////////////////////////////// // uuencode #define PHP_UU_ENC(c) \ ((c) ? ((c) & 077) + ' ' : '`') #define PHP_UU_ENC_C2(c) \ PHP_UU_ENC(((*(c) * 16) & 060) | ((*((c) + 1) >> 4) & 017)) #define PHP_UU_ENC_C3(c) \ PHP_UU_ENC(((*(c + 1) * 4) & 074) | ((*((c) + 2) >> 6) & 03)) #define PHP_UU_DEC(c) \ (((c) - ' ') & 077) String string_uuencode(const char *src, int src_len) { assertx(src); assertx(src_len); int len = 45; char *p; const char *s, *e, *ee; char *dest; /* encoded length is ~ 38% greater than the original */ String ret((int)ceil(src_len * 1.38) + 45, ReserveString); p = dest = ret.mutableData(); s = src; e = src + src_len; while ((s + 3) < e) { ee = s + len; if (ee > e) { ee = e; len = ee - s; if (len % 3) { ee = s + (int) (floor(len / 3) * 3); } } *p++ = PHP_UU_ENC(len); while (s < ee) { *p++ = PHP_UU_ENC(*s >> 2); *p++ = PHP_UU_ENC_C2(s); *p++ = PHP_UU_ENC_C3(s); *p++ = PHP_UU_ENC(*(s + 2) & 077); s += 3; } if (len == 45) { *p++ = '\n'; } } if (s < e) { if (len == 45) { *p++ = PHP_UU_ENC(e - s); len = 0; } *p++ = PHP_UU_ENC(*s >> 2); *p++ = PHP_UU_ENC_C2(s); *p++ = ((e - s) > 1) ? PHP_UU_ENC_C3(s) : PHP_UU_ENC('\0'); *p++ = ((e - s) > 2) ? PHP_UU_ENC(*(s + 2) & 077) : PHP_UU_ENC('\0'); } if (len < 45) { *p++ = '\n'; } *p++ = PHP_UU_ENC('\0'); *p++ = '\n'; *p = '\0'; ret.setSize(p - dest); return ret; } String string_uudecode(const char *src, int src_len) { int total_len = 0; int len; const char *s, *e, *ee; char *p, *dest; String ret(ceil(src_len * 0.75), ReserveString); p = dest = ret.mutableData(); s = src; e = src + src_len; while (s < e) { if ((len = PHP_UU_DEC(*s++)) <= 0) { break; } /* sanity check */ if (len > src_len) { goto err; } total_len += len; ee = s + (len == 45 ? 60 : (int) floor(len * 1.33)); /* sanity check */ if (ee > e) { goto err; } while (s < ee) { if (s + 4 > e) goto err; *p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4; *p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2; *p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3)); s += 4; } if (len < 45) { break; } /* skip \n */ s++; } if ((len = total_len > (p - dest))) { *p++ = PHP_UU_DEC(*s) << 2 | PHP_UU_DEC(*(s + 1)) >> 4; if (len > 1) { *p++ = PHP_UU_DEC(*(s + 1)) << 4 | PHP_UU_DEC(*(s + 2)) >> 2; if (len > 2) { *p++ = PHP_UU_DEC(*(s + 2)) << 6 | PHP_UU_DEC(*(s + 3)); } } } ret.setSize(total_len); return ret; err: return String(); } /////////////////////////////////////////////////////////////////////////////// // base64 namespace { const char base64_table[] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/', '\0' }; const char base64_pad = '='; const short base64_reverse_table[256] = { -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -1, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -1, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 62, -2, -2, -2, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, -2, -2, -2, -2, -2, -2, -2, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, -2, -2, -2, -2, -2, -2, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2 }; folly::Optional<int> maxEncodedSize(int length) { if ((length + 2) < 0 || ((length + 2) / 3) >= (1 << (sizeof(int) * 8 - 2))) { return folly::none; } return ((length + 2) / 3) * 4; } // outstr must be at least maxEncodedSize(length) bytes size_t php_base64_encode(const unsigned char *str, int length, unsigned char* outstr) { const unsigned char *current = str; unsigned char *p = outstr; while (length > 2) { /* keep going until we have less than 24 bits */ *p++ = base64_table[current[0] >> 2]; *p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)]; *p++ = base64_table[((current[1] & 0x0f) << 2) + (current[2] >> 6)]; *p++ = base64_table[current[2] & 0x3f]; current += 3; length -= 3; /* we just handle 3 octets of data */ } /* now deal with the tail end of things */ if (length != 0) { *p++ = base64_table[current[0] >> 2]; if (length > 1) { *p++ = base64_table[((current[0] & 0x03) << 4) + (current[1] >> 4)]; *p++ = base64_table[(current[1] & 0x0f) << 2]; *p++ = base64_pad; } else { *p++ = base64_table[(current[0] & 0x03) << 4]; *p++ = base64_pad; *p++ = base64_pad; } } return p - outstr; } // outstr must be at least length bytes ssize_t php_base64_decode(const char *str, int length, bool strict, unsigned char* outstr) { const unsigned char *current = (unsigned char*)str; int ch, i = 0, j = 0, k; /* this sucks for threaded environments */ unsigned char* result = outstr; /* run through the whole string, converting as we go */ while ((ch = *current++) != '\0' && length-- > 0) { if (ch == base64_pad) { if (*current != '=' && ((i % 4) == 1 || (strict && length > 0))) { if ((i % 4) != 1) { while (isspace(*(++current))) { continue; } if (*current == '\0') { continue; } } return -1; } continue; } ch = base64_reverse_table[ch]; if ((!strict && ch < 0) || ch == -1) { /* a space or some other separator character, we simply skip over */ continue; } else if (ch == -2) { return -1; } switch(i % 4) { case 0: result[j] = ch << 2; break; case 1: result[j++] |= ch >> 4; result[j] = (ch & 0x0f) << 4; break; case 2: result[j++] |= ch >>2; result[j] = (ch & 0x03) << 6; break; case 3: result[j++] |= ch; break; } i++; } k = j; /* mop things up if we ended on a boundary */ if (ch == base64_pad) { switch(i % 4) { case 1: return -1; case 2: k++; case 3: result[k] = 0; } } return j; } } String string_base64_encode(const char* input, int len) { if (auto const wantedSize = maxEncodedSize(len)) { String ret(*wantedSize, ReserveString); auto actualSize = php_base64_encode((unsigned char*)input, len, (unsigned char*)ret.mutableData()); ret.setSize(actualSize); return ret; } return String(); } String string_base64_decode(const char* input, int len, bool strict) { String ret(len, ReserveString); auto actualSize = php_base64_decode(input, len, strict, (unsigned char*)ret.mutableData()); if (actualSize < 0) return String(); ret.setSize(actualSize); return ret; } std::string base64_encode(const char* input, int len) { if (auto const wantedSize = maxEncodedSize(len)) { std::string ret; ret.resize(*wantedSize); auto actualSize = php_base64_encode((unsigned char*)input, len, (unsigned char*)ret.data()); ret.resize(actualSize); return ret; } return std::string(); } std::string base64_decode(const char* input, int len, bool strict) { if (!len) return std::string(); std::string ret; ret.resize(len); auto actualSize = php_base64_decode(input, len, strict, (unsigned char*)ret.data()); if (!actualSize) return std::string(); ret.resize(actualSize); return ret; } /////////////////////////////////////////////////////////////////////////////// String string_escape_shell_arg(const char *str) { int x, y, l; char *cmd; y = 0; l = strlen(str); String ret(safe_address(l, 4, 3), ReserveString); /* worst case */ cmd = ret.mutableData(); #ifdef _MSC_VER cmd[y++] = '"'; #else cmd[y++] = '\''; #endif for (x = 0; x < l; x++) { switch (str[x]) { #ifdef _MSC_VER case '"': case '%': case '!': cmd[y++] = ' '; break; #else case '\'': cmd[y++] = '\''; cmd[y++] = '\\'; cmd[y++] = '\''; #endif /* fall-through */ default: cmd[y++] = str[x]; } } #ifdef _MSC_VER if (y > 0 && '\\' == cmd[y - 1]) { int k = 0, n = y - 1; for (; n >= 0 && '\\' == cmd[n]; n--, k++); if (k % 2) { cmd[y++] = '\\'; } } cmd[y++] = '"'; #else cmd[y++] = '\''; #endif ret.setSize(y); return ret; } String string_escape_shell_cmd(const char *str) { register int x, y, l; char *cmd; char *p = nullptr; l = strlen(str); String ret(safe_address(l, 2, 1), ReserveString); cmd = ret.mutableData(); for (x = 0, y = 0; x < l; x++) { switch (str[x]) { #ifndef _MSC_VER case '"': case '\'': if (!p && (p = (char *)memchr(str + x + 1, str[x], l - x - 1))) { /* noop */ } else if (p && *p == str[x]) { p = nullptr; } else { cmd[y++] = '\\'; } cmd[y++] = str[x]; break; #else /* % is Windows specific for environmental variables, ^%PATH% will output PATH while ^%PATH^% will not. escapeshellcmd->val will escape all % and !. */ case '%': case '!': case '"': case '\'': #endif case '#': /* This is character-set independent */ case '&': case ';': case '`': case '|': case '*': case '?': case '~': case '<': case '>': case '^': case '(': case ')': case '[': case ']': case '{': case '}': case '$': case '\\': case '\x0A': /* excluding these two */ case '\xFF': #ifdef _MSC_VER cmd[y++] = '^'; #else cmd[y++] = '\\'; #endif /* fall-through */ default: cmd[y++] = str[x]; } } ret.setSize(y); return ret; } /////////////////////////////////////////////////////////////////////////////// static void string_similar_str(const char *txt1, int len1, const char *txt2, int len2, int *pos1, int *pos2, int *max) { const char *p, *q; const char *end1 = txt1 + len1; const char *end2 = txt2 + len2; int l; *max = 0; for (p = txt1; p < end1; p++) { for (q = txt2; q < end2; q++) { for (l = 0; (p + l < end1) && (q + l < end2) && (p[l] == q[l]); l++); if (l > *max) { *max = l; *pos1 = p - txt1; *pos2 = q - txt2; } } } } static int string_similar_char(const char *txt1, int len1, const char *txt2, int len2) { int sum; int pos1 = 0, pos2 = 0, max; string_similar_str(txt1, len1, txt2, len2, &pos1, &pos2, &max); if ((sum = max)) { if (pos1 && pos2) { sum += string_similar_char(txt1, pos1, txt2, pos2); } if ((pos1 + max < len1) && (pos2 + max < len2)) { sum += string_similar_char(txt1 + pos1 + max, len1 - pos1 - max, txt2 + pos2 + max, len2 - pos2 - max); } } return sum; } int string_similar_text(const char *t1, int len1, const char *t2, int len2, float *percent) { if (len1 == 0 && len2 == 0) { if (percent) *percent = 0.0; return 0; } int sim = string_similar_char(t1, len1, t2, len2); if (percent) *percent = sim * 200.0 / (len1 + len2); return sim; } /////////////////////////////////////////////////////////////////////////////// #define LEVENSHTEIN_MAX_LENTH 255 // reference implementation, only optimized for memory usage, not speed int string_levenshtein(const char *s1, int l1, const char *s2, int l2, int cost_ins, int cost_rep, int cost_del ) { int *p1, *p2, *tmp; int i1, i2, c0, c1, c2; if (l1==0) return l2*cost_ins; if (l2==0) return l1*cost_del; if ((l1>LEVENSHTEIN_MAX_LENTH)||(l2>LEVENSHTEIN_MAX_LENTH)) { raise_warning("levenshtein(): Argument string(s) too long"); return -1; } p1 = (int*)req::malloc_noptrs((l2+1) * sizeof(int)); SCOPE_EXIT { req::free(p1); }; p2 = (int*)req::malloc_noptrs((l2+1) * sizeof(int)); SCOPE_EXIT { req::free(p2); }; for(i2=0;i2<=l2;i2++) { p1[i2] = i2*cost_ins; } for(i1=0;i1<l1;i1++) { p2[0]=p1[0]+cost_del; for(i2=0;i2<l2;i2++) { c0=p1[i2]+((s1[i1]==s2[i2])?0:cost_rep); c1=p1[i2+1]+cost_del; if (c1<c0) c0=c1; c2=p2[i2]+cost_ins; if (c2<c0) c0=c2; p2[i2+1]=c0; } tmp=p1; p1=p2; p2=tmp; } c0=p1[l2]; return c0; } /////////////////////////////////////////////////////////////////////////////// String string_money_format(const char *format, double value) { bool check = false; const char *p = format; while ((p = strchr(p, '%'))) { if (*(p + 1) == '%') { p += 2; } else if (!check) { check = true; p++; } else { throw_invalid_argument ("format: Only a single %%i or %%n token can be used"); return String(); } } int format_len = strlen(format); int str_len = safe_address(format_len, 1, 1024); String ret(str_len, ReserveString); char *str = ret.mutableData(); if ((str_len = strfmon(str, str_len, format, value)) < 0) { return String(); } ret.setSize(str_len); return ret; } /////////////////////////////////////////////////////////////////////////////// String string_number_format(double d, int dec, const String& dec_point, const String& thousand_sep) { char *tmpbuf = nullptr, *resbuf; char *s, *t; /* source, target */ char *dp; int integral; int tmplen, reslen=0; int count=0; int is_negative=0; if (d < 0) { is_negative = 1; d = -d; } if (dec < 0) dec = 0; d = php_math_round(d, dec); // departure from PHP: we got rid of dependencies on spprintf() here. // This actually means 63 bytes for characters + 1 byte for '\0' String tmpstr(63, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, 64, "%.*F", dec, d); // From the man page of snprintf, the return value is: // The number of characters that would have been written if n had been // sufficiently large, not counting the terminating null character. if (tmplen < 0) return empty_string(); if (tmplen < 64 && (tmpbuf == nullptr || !isdigit((int)tmpbuf[0]))) { tmpstr.setSize(tmplen); return tmpstr; } if (tmplen >= 64) { // Uncommon, asked for more than 64 chars worth of precision tmpstr = String(tmplen, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, tmplen + 1, "%.*F", dec, d); if (tmplen < 0) return empty_string(); if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) { tmpstr.setSize(tmplen); return tmpstr; } } /* find decimal point, if expected */ if (dec) { dp = strpbrk(tmpbuf, ".,"); } else { dp = nullptr; } /* calculate the length of the return buffer */ if (dp) { integral = dp - tmpbuf; } else { /* no decimal point was found */ integral = tmplen; } /* allow for thousand separators */ if (!thousand_sep.empty()) { if (integral + thousand_sep.size() * ((integral-1) / 3) < integral) { /* overflow */ raise_error("String overflow"); } integral += ((integral-1) / 3) * thousand_sep.size(); } reslen = integral; if (dec) { reslen += dec; if (!dec_point.empty()) { if (reslen + dec_point.size() < dec_point.size()) { /* overflow */ raise_error("String overflow"); } reslen += dec_point.size(); } } /* add a byte for minus sign */ if (is_negative) { reslen++; } String resstr(reslen, ReserveString); resbuf = resstr.mutableData(); s = tmpbuf+tmplen-1; t = resbuf+reslen-1; /* copy the decimal places. * Take care, as the sprintf implementation may return less places than * we requested due to internal buffer limitations */ if (dec) { int declen = dp ? s - dp : 0; int topad = dec > declen ? dec - declen : 0; /* pad with '0's */ while (topad--) { *t-- = '0'; } if (dp) { s -= declen + 1; /* +1 to skip the point */ t -= declen; /* now copy the chars after the point */ memcpy(t + 1, dp + 1, declen); } /* add decimal point */ if (!dec_point.empty()) { memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size()); t -= dec_point.size(); } } /* copy the numbers before the decimal point, adding thousand * separator every three digits */ while(s >= tmpbuf) { *t-- = *s--; if (thousand_sep && (++count%3)==0 && s>=tmpbuf) { memcpy(t + (1 - thousand_sep.size()), thousand_sep.data(), thousand_sep.size()); t -= thousand_sep.size(); } } /* and a minus sign, if needed */ if (is_negative) { *t-- = '-'; } resstr.setSize(reslen); return resstr; } /////////////////////////////////////////////////////////////////////////////// // soundex /* Simple soundex algorithm as described by Knuth in TAOCP, vol 3 */ String string_soundex(const String& str) { assertx(!str.empty()); int _small, code, last; String retString(4, ReserveString); char* soundex = retString.mutableData(); static char soundex_table[26] = { 0, /* A */ '1', /* B */ '2', /* C */ '3', /* D */ 0, /* E */ '1', /* F */ '2', /* G */ 0, /* H */ 0, /* I */ '2', /* J */ '2', /* K */ '4', /* L */ '5', /* M */ '5', /* N */ 0, /* O */ '1', /* P */ '2', /* Q */ '6', /* R */ '2', /* S */ '3', /* T */ 0, /* U */ '1', /* V */ 0, /* W */ '2', /* X */ 0, /* Y */ '2' /* Z */ }; /* build soundex string */ last = -1; auto p = str.slice().data(); for (_small = 0; *p && _small < 4; p++) { /* convert chars to upper case and strip non-letter chars */ /* BUG: should also map here accented letters used in non */ /* English words or names (also found in English text!): */ /* esstsett, thorn, n-tilde, c-cedilla, s-caron, ... */ code = toupper((int)(unsigned char)(*p)); if (code >= 'A' && code <= 'Z') { if (_small == 0) { /* remember first valid char */ soundex[_small++] = code; last = soundex_table[code - 'A']; } else { /* ignore sequences of consonants with same soundex */ /* code in trail, and vowels unless they separate */ /* consonant letters */ code = soundex_table[code - 'A']; if (code != last) { if (code != 0) { soundex[_small++] = code; } last = code; } } } } /* pad with '0' and terminate with 0 ;-) */ while (_small < 4) { soundex[_small++] = '0'; } retString.setSize(4); return retString; } /////////////////////////////////////////////////////////////////////////////// // metaphone /** * this is now the original code by Michael G Schwern: * i've changed it just a slightly bit (use emalloc, * get rid of includes etc) * - thies - 13.09.1999 */ /*----------------------------- */ /* this used to be "metaphone.h" */ /*----------------------------- */ /* Special encodings */ #define SH 'X' #define TH '0' /*----------------------------- */ /* end of "metaphone.h" */ /*----------------------------- */ /*----------------------------- */ /* this used to be "metachar.h" */ /*----------------------------- */ /* Metachar.h ... little bits about characters for metaphone */ /*-- Character encoding array & accessing macros --*/ /* Stolen directly out of the book... */ char _codes[26] = { 1,16,4,16,9,2,4,16,9,2,0,2,2,2,1,4,0,2,4,4,1,0,0,0,8,0}; #define ENCODE(c) (isalpha(c) ? _codes[((toupper(c)) - 'A')] : 0) #define isvowel(c) (ENCODE(c) & 1) /* AEIOU */ /* These letters are passed through unchanged */ #define NOCHANGE(c) (ENCODE(c) & 2) /* FJMNR */ /* These form dipthongs when preceding H */ #define AFFECTH(c) (ENCODE(c) & 4) /* CGPST */ /* These make C and G soft */ #define MAKESOFT(c) (ENCODE(c) & 8) /* EIY */ /* These prevent GH from becoming F */ #define NOGHTOF(c) (ENCODE(c) & 16) /* BDH */ /*----------------------------- */ /* end of "metachar.h" */ /*----------------------------- */ /* I suppose I could have been using a character pointer instead of * accesssing the array directly... */ /* Look at the next letter in the word */ #define Next_Letter ((char)toupper(word[w_idx+1])) /* Look at the current letter in the word */ #define Curr_Letter ((char)toupper(word[w_idx])) /* Go N letters back. */ #define Look_Back_Letter(n) (w_idx >= n ? (char)toupper(word[w_idx-n]) : '\0') /* Previous letter. I dunno, should this return null on failure? */ #define Prev_Letter (Look_Back_Letter(1)) /* Look two letters down. It makes sure you don't walk off the string. */ #define After_Next_Letter (Next_Letter != '\0' ? (char)toupper(word[w_idx+2]) \ : '\0') #define Look_Ahead_Letter(n) ((char)toupper(Lookahead(word+w_idx, n))) /* Allows us to safely look ahead an arbitrary # of letters */ /* I probably could have just used strlen... */ static char Lookahead(unsigned char *word, int how_far) { char letter_ahead = '\0'; /* null by default */ int idx; for (idx = 0; word[idx] != '\0' && idx < how_far; idx++); /* Edge forward in the string... */ letter_ahead = (char)word[idx]; /* idx will be either == to how_far or * at the end of the string */ return letter_ahead; } /* phonize one letter * We don't know the buffers size in advance. On way to solve this is to just * re-allocate the buffer size. We're using an extra of 2 characters (this * could be one though; or more too). */ #define Phonize(c) { buffer.append(c); } /* How long is the phoned word? */ #define Phone_Len (buffer.size()) /* Note is a letter is a 'break' in the word */ #define Isbreak(c) (!isalpha(c)) String string_metaphone(const char *input, int word_len, long max_phonemes, int traditional) { unsigned char *word = (unsigned char *)input; int w_idx = 0; /* point in the phonization we're at. */ int max_buffer_len = 0; /* maximum length of the destination buffer */ /*-- Parameter checks --*/ /* Negative phoneme length is meaningless */ if (max_phonemes < 0) return String(); /* Empty/null string is meaningless */ /* Overly paranoid */ /* always_assert(word != NULL && word[0] != '\0'); */ if (word == nullptr) return String(); /*-- Allocate memory for our phoned_phrase --*/ if (max_phonemes == 0) { /* Assume largest possible */ max_buffer_len = word_len; } else { max_buffer_len = max_phonemes; } StringBuffer buffer(max_buffer_len); /*-- The first phoneme has to be processed specially. --*/ /* Find our first letter */ for (; !isalpha(Curr_Letter); w_idx++) { /* On the off chance we were given nothing but crap... */ if (Curr_Letter == '\0') { return buffer.detach(); /* For testing */ } } switch (Curr_Letter) { /* AE becomes E */ case 'A': if (Next_Letter == 'E') { Phonize('E'); w_idx += 2; } /* Remember, preserve vowels at the beginning */ else { Phonize('A'); w_idx++; } break; /* [GKP]N becomes N */ case 'G': case 'K': case 'P': if (Next_Letter == 'N') { Phonize('N'); w_idx += 2; } break; /* WH becomes H, WR becomes R W if followed by a vowel */ case 'W': if (Next_Letter == 'H' || Next_Letter == 'R') { Phonize(Next_Letter); w_idx += 2; } else if (isvowel(Next_Letter)) { Phonize('W'); w_idx += 2; } /* else ignore */ break; /* X becomes S */ case 'X': Phonize('S'); w_idx++; break; /* Vowels are kept */ /* We did A already case 'A': case 'a': */ case 'E': case 'I': case 'O': case 'U': Phonize(Curr_Letter); w_idx++; break; default: /* do nothing */ break; } /* On to the metaphoning */ for (; Curr_Letter != '\0' && (max_phonemes == 0 || Phone_Len < max_phonemes); w_idx++) { /* How many letters to skip because an eariler encoding handled * multiple letters */ unsigned short int skip_letter = 0; /* THOUGHT: It would be nice if, rather than having things like... * well, SCI. For SCI you encode the S, then have to remember * to skip the C. So the phonome SCI invades both S and C. It would * be better, IMHO, to skip the C from the S part of the encoding. * Hell, I'm trying it. */ /* Ignore non-alphas */ if (!isalpha(Curr_Letter)) continue; /* Drop duplicates, except CC */ if (Curr_Letter == Prev_Letter && Curr_Letter != 'C') continue; switch (Curr_Letter) { /* B -> B unless in MB */ case 'B': if (Prev_Letter != 'M') Phonize('B'); break; /* 'sh' if -CIA- or -CH, but not SCH, except SCHW. * (SCHW is handled in S) * S if -CI-, -CE- or -CY- * dropped if -SCI-, SCE-, -SCY- (handed in S) * else K */ case 'C': if (MAKESOFT(Next_Letter)) { /* C[IEY] */ if (After_Next_Letter == 'A' && Next_Letter == 'I') { /* CIA */ Phonize(SH); } /* SC[IEY] */ else if (Prev_Letter == 'S') { /* Dropped */ } else { Phonize('S'); } } else if (Next_Letter == 'H') { if ((!traditional) && (After_Next_Letter == 'R' || Prev_Letter == 'S')) { /* Christ, School */ Phonize('K'); } else { Phonize(SH); } skip_letter++; } else { Phonize('K'); } break; /* J if in -DGE-, -DGI- or -DGY- * else T */ case 'D': if (Next_Letter == 'G' && MAKESOFT(After_Next_Letter)) { Phonize('J'); skip_letter++; } else Phonize('T'); break; /* F if in -GH and not B--GH, D--GH, -H--GH, -H---GH * else dropped if -GNED, -GN, * else dropped if -DGE-, -DGI- or -DGY- (handled in D) * else J if in -GE-, -GI, -GY and not GG * else K */ case 'G': if (Next_Letter == 'H') { if (!(NOGHTOF(Look_Back_Letter(3)) || Look_Back_Letter(4) == 'H')) { Phonize('F'); skip_letter++; } else { /* silent */ } } else if (Next_Letter == 'N') { if (Isbreak(After_Next_Letter) || (After_Next_Letter == 'E' && Look_Ahead_Letter(3) == 'D')) { /* dropped */ } else Phonize('K'); } else if (MAKESOFT(Next_Letter) && Prev_Letter != 'G') { Phonize('J'); } else { Phonize('K'); } break; /* H if before a vowel and not after C,G,P,S,T */ case 'H': if (isvowel(Next_Letter) && !AFFECTH(Prev_Letter)) Phonize('H'); break; /* dropped if after C * else K */ case 'K': if (Prev_Letter != 'C') Phonize('K'); break; /* F if before H * else P */ case 'P': if (Next_Letter == 'H') { Phonize('F'); } else { Phonize('P'); } break; /* K */ case 'Q': Phonize('K'); break; /* 'sh' in -SH-, -SIO- or -SIA- or -SCHW- * else S */ case 'S': if (Next_Letter == 'I' && (After_Next_Letter == 'O' || After_Next_Letter == 'A')) { Phonize(SH); } else if (Next_Letter == 'H') { Phonize(SH); skip_letter++; } else if ((!traditional) && (Next_Letter == 'C' && Look_Ahead_Letter(2) == 'H' && Look_Ahead_Letter(3) == 'W')) { Phonize(SH); skip_letter += 2; } else { Phonize('S'); } break; /* 'sh' in -TIA- or -TIO- * else 'th' before H * else T */ case 'T': if (Next_Letter == 'I' && (After_Next_Letter == 'O' || After_Next_Letter == 'A')) { Phonize(SH); } else if (Next_Letter == 'H') { Phonize(TH); skip_letter++; } else { Phonize('T'); } break; /* F */ case 'V': Phonize('F'); break; /* W before a vowel, else dropped */ case 'W': if (isvowel(Next_Letter)) Phonize('W'); break; /* KS */ case 'X': Phonize('K'); Phonize('S'); break; /* Y if followed by a vowel */ case 'Y': if (isvowel(Next_Letter)) Phonize('Y'); break; /* S */ case 'Z': Phonize('S'); break; /* No transformation */ case 'F': case 'J': case 'L': case 'M': case 'N': case 'R': Phonize(Curr_Letter); break; default: /* nothing */ break; } /* END SWITCH */ w_idx += skip_letter; } /* END FOR */ return buffer.detach(); } /////////////////////////////////////////////////////////////////////////////// // Cyrillic /** * This is codetables for different Cyrillic charsets (relative to koi8-r). * Each table contains data for 128-255 symbols from ASCII table. * First 256 symbols are for conversion from koi8-r to corresponding charset, * second 256 symbols are for reverse conversion, from charset to koi8-r. * * Here we have the following tables: * _cyr_win1251 - for windows-1251 charset * _cyr_iso88595 - for iso8859-5 charset * _cyr_cp866 - for x-cp866 charset * _cyr_mac - for x-mac-cyrillic charset */ typedef unsigned char _cyr_charset_table[512]; static const _cyr_charset_table _cyr_win1251 = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46, 46,46,46,46,46,46,46,46,46,46,46,46,46,46,46,46, 154,174,190,46,159,189,46,46,179,191,180,157,46,46,156,183, 46,46,182,166,173,46,46,158,163,152,164,155,46,46,46,167, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,184,186,32,179,191,32,32,32,32,32,180,162,32, 32,32,32,168,170,32,178,175,32,32,32,32,32,165,161,169, 254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238, 239,255,240,241,242,243,230,226,252,251,231,248,253,249,247,250, 222,192,193,214,196,197,212,195,213,200,201,202,203,204,205,206, 207,223,208,209,210,211,198,194,220,219,199,216,221,217,215,218, }; static const _cyr_charset_table _cyr_cp866 = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 35,35,35,124,124,124,124,43,43,124,124,43,43,43,43,43, 43,45,45,124,45,43,124,124,43,43,45,45,124,45,43,45, 45,45,45,43,43,43,43,43,43,43,43,35,35,124,124,35, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209, 179,163,180,164,183,167,190,174,32,149,158,32,152,159,148,154, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 205,186,213,241,243,201,32,245,187,212,211,200,190,32,247,198, 199,204,181,240,242,185,32,244,203,207,208,202,216,32,246,32, 238,160,161,230,164,165,228,163,229,168,169,170,171,172,173,174, 175,239,224,225,226,227,166,162,236,235,167,232,237,233,231,234, 158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142, 143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154, }; static const _cyr_charset_table _cyr_iso88595 = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,179,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,209, 32,163,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,32,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,241,32,32,32,32,32,32,32,32,32,32,32,32, 32,32,32,161,32,32,32,32,32,32,32,32,32,32,32,32, 238,208,209,230,212,213,228,211,229,216,217,218,219,220,221,222, 223,239,224,225,226,227,214,210,236,235,215,232,237,233,231,234, 206,176,177,198,180,181,196,179,197,184,185,186,187,188,189,190, 191,207,192,193,194,195,182,178,204,203,183,200,205,201,199,202, }; static const _cyr_charset_table _cyr_mac = { 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 225,226,247,231,228,229,246,250,233,234,235,236,237,238,239,240, 242,243,244,245,230,232,227,254,251,253,255,249,248,252,224,241, 160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191, 128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143, 144,145,146,147,148,149,150,151,152,153,154,155,156,179,163,209, 193,194,215,199,196,197,214,218,201,202,203,204,205,206,207,208, 210,211,212,213,198,200,195,222,219,221,223,217,216,220,192,255, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31, 32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47, 48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63, 64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79, 80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95, 96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111, 112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127, 192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207, 208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223, 160,161,162,222,164,165,166,167,168,169,170,171,172,173,174,175, 176,177,178,221,180,181,182,183,184,185,186,187,188,189,190,191, 254,224,225,246,228,229,244,227,245,232,233,234,235,236,237,238, 239,223,240,241,242,243,230,226,252,251,231,248,253,249,247,250, 158,128,129,150,132,133,148,131,149,136,137,138,139,140,141,142, 143,159,144,145,146,147,134,130,156,155,135,152,157,153,151,154, }; /** * This is the function that performs real in-place conversion of the string * between charsets. * Parameters: * str - string to be converted * from,to - one-symbol label of source and destination charset * The following symbols are used as labels: * k - koi8-r * w - windows-1251 * i - iso8859-5 * a - x-cp866 * d - x-cp866 * m - x-mac-cyrillic */ String string_convert_cyrillic_string(const String& input, char from, char to) { const unsigned char *from_table, *to_table; unsigned char tmp; auto uinput = (unsigned char*)input.slice().data(); String retString(input.size(), ReserveString); unsigned char *str = (unsigned char *)retString.mutableData(); from_table = nullptr; to_table = nullptr; switch (toupper((int)(unsigned char)from)) { case 'W': from_table = _cyr_win1251; break; case 'A': case 'D': from_table = _cyr_cp866; break; case 'I': from_table = _cyr_iso88595; break; case 'M': from_table = _cyr_mac; break; case 'K': break; default: throw_invalid_argument("Unknown source charset: %c", from); break; } switch (toupper((int)(unsigned char)to)) { case 'W': to_table = _cyr_win1251; break; case 'A': case 'D': to_table = _cyr_cp866; break; case 'I': to_table = _cyr_iso88595; break; case 'M': to_table = _cyr_mac; break; case 'K': break; default: throw_invalid_argument("Unknown destination charset: %c", to); break; } for (int i = 0; i < input.size(); i++) { tmp = from_table == nullptr ? uinput[i] : from_table[uinput[i]]; str[i] = to_table == nullptr ? tmp : to_table[tmp + 256]; } retString.setSize(input.size()); return retString; } /////////////////////////////////////////////////////////////////////////////// // Hebrew #define HEB_BLOCK_TYPE_ENG 1 #define HEB_BLOCK_TYPE_HEB 2 #define isheb(c) \ (((((unsigned char) c) >= 224) && (((unsigned char) c) <= 250)) ? 1 : 0) #define _isblank(c) \ (((((unsigned char) c) == ' ' || ((unsigned char) c) == '\t')) ? 1 : 0) #define _isnewline(c) \ (((((unsigned char) c) == '\n' || ((unsigned char) c) == '\r')) ? 1 : 0) /** * Converts Logical Hebrew text (Hebrew Windows style) to Visual text * Cheers/complaints/flames - Zeev Suraski <zeev@php.net> */ String string_convert_hebrew_string(const String& inStr, int /*max_chars_per_line*/, int convert_newlines) { assertx(!inStr.empty()); auto str = inStr.data(); auto str_len = inStr.size(); const char *tmp; char *heb_str, *broken_str; char *target; int block_start, block_end, block_type, block_length, i; long max_chars=0; int begin, end, char_count, orig_begin; tmp = str; block_start=block_end=0; heb_str = (char *) req::malloc_noptrs(str_len + 1); SCOPE_EXIT { req::free(heb_str); }; target = heb_str+str_len; *target = 0; target--; block_length=0; if (isheb(*tmp)) { block_type = HEB_BLOCK_TYPE_HEB; } else { block_type = HEB_BLOCK_TYPE_ENG; } do { if (block_type == HEB_BLOCK_TYPE_HEB) { while ((isheb((int)*(tmp+1)) || _isblank((int)*(tmp+1)) || ispunct((int)*(tmp+1)) || (int)*(tmp+1)=='\n' ) && block_end<str_len-1) { tmp++; block_end++; block_length++; } for (i = block_start; i<= block_end; i++) { *target = str[i]; switch (*target) { case '(': *target = ')'; break; case ')': *target = '('; break; case '[': *target = ']'; break; case ']': *target = '['; break; case '{': *target = '}'; break; case '}': *target = '{'; break; case '<': *target = '>'; break; case '>': *target = '<'; break; case '\\': *target = '/'; break; case '/': *target = '\\'; break; default: break; } target--; } block_type = HEB_BLOCK_TYPE_ENG; } else { while (!isheb(*(tmp+1)) && (int)*(tmp+1)!='\n' && block_end < str_len-1) { tmp++; block_end++; block_length++; } while ((_isblank((int)*tmp) || ispunct((int)*tmp)) && *tmp!='/' && *tmp!='-' && block_end > block_start) { tmp--; block_end--; } for (i = block_end; i >= block_start; i--) { *target = str[i]; target--; } block_type = HEB_BLOCK_TYPE_HEB; } block_start=block_end+1; } while (block_end < str_len-1); String brokenStr(str_len, ReserveString); broken_str = brokenStr.mutableData(); begin=end=str_len-1; target = broken_str; while (1) { char_count=0; while ((!max_chars || char_count < max_chars) && begin > 0) { char_count++; begin--; if (begin <= 0 || _isnewline(heb_str[begin])) { while (begin > 0 && _isnewline(heb_str[begin-1])) { begin--; char_count++; } break; } } if (char_count == max_chars) { /* try to avoid breaking words */ int new_char_count=char_count, new_begin=begin; while (new_char_count > 0) { if (_isblank(heb_str[new_begin]) || _isnewline(heb_str[new_begin])) { break; } new_begin++; new_char_count--; } if (new_char_count > 0) { char_count=new_char_count; begin=new_begin; } } orig_begin=begin; if (_isblank(heb_str[begin])) { heb_str[begin]='\n'; } while (begin <= end && _isnewline(heb_str[begin])) { /* skip leading newlines */ begin++; } for (i = begin; i <= end; i++) { /* copy content */ *target = heb_str[i]; target++; } for (i = orig_begin; i <= end && _isnewline(heb_str[i]); i++) { *target = heb_str[i]; target++; } begin=orig_begin; if (begin <= 0) { *target = 0; break; } begin--; end=begin; } if (convert_newlines) { int count; auto ret = string_replace(broken_str, str_len, "\n", strlen("\n"), "<br />\n", strlen("<br />\n"), count, true); if (!ret.isNull()) { return ret; } } brokenStr.setSize(str_len); return brokenStr; } /////////////////////////////////////////////////////////////////////////////// }
String string_number_format(double d, int dec, const String& dec_point, const String& thousand_sep) { char *tmpbuf = nullptr, *resbuf; char *s, *t; /* source, target */ char *dp; int integral; int tmplen, reslen=0; int count=0; int is_negative=0; if (d < 0) { is_negative = 1; d = -d; } if (dec < 0) dec = 0; d = php_math_round(d, dec); // departure from PHP: we got rid of dependencies on spprintf() here. String tmpstr(63, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, 64, "%.*F", dec, d); if (tmplen < 0) return empty_string(); if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) { tmpstr.setSize(tmplen); return tmpstr; } if (tmplen >= 64) { // Uncommon, asked for more than 64 chars worth of precision tmpstr = String(tmplen, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, tmplen + 1, "%.*F", dec, d); if (tmplen < 0) return empty_string(); if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) { tmpstr.setSize(tmplen); return tmpstr; } } /* find decimal point, if expected */ if (dec) { dp = strpbrk(tmpbuf, ".,"); } else { dp = nullptr; } /* calculate the length of the return buffer */ if (dp) { integral = dp - tmpbuf; } else { /* no decimal point was found */ integral = tmplen; } /* allow for thousand separators */ if (!thousand_sep.empty()) { if (integral + thousand_sep.size() * ((integral-1) / 3) < integral) { /* overflow */ raise_error("String overflow"); } integral += ((integral-1) / 3) * thousand_sep.size(); } reslen = integral; if (dec) { reslen += dec; if (!dec_point.empty()) { if (reslen + dec_point.size() < dec_point.size()) { /* overflow */ raise_error("String overflow"); } reslen += dec_point.size(); } } /* add a byte for minus sign */ if (is_negative) { reslen++; } String resstr(reslen, ReserveString); resbuf = resstr.mutableData(); s = tmpbuf+tmplen-1; t = resbuf+reslen-1; /* copy the decimal places. * Take care, as the sprintf implementation may return less places than * we requested due to internal buffer limitations */ if (dec) { int declen = dp ? s - dp : 0; int topad = dec > declen ? dec - declen : 0; /* pad with '0's */ while (topad--) { *t-- = '0'; } if (dp) { s -= declen + 1; /* +1 to skip the point */ t -= declen; /* now copy the chars after the point */ memcpy(t + 1, dp + 1, declen); } /* add decimal point */ if (!dec_point.empty()) { memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size()); t -= dec_point.size(); } } /* copy the numbers before the decimal point, adding thousand * separator every three digits */ while(s >= tmpbuf) { *t-- = *s--; if (thousand_sep && (++count%3)==0 && s>=tmpbuf) { memcpy(t + (1 - thousand_sep.size()), thousand_sep.data(), thousand_sep.size()); t -= thousand_sep.size(); } } /* and a minus sign, if needed */ if (is_negative) { *t-- = '-'; } resstr.setSize(reslen); return resstr; }
String string_number_format(double d, int dec, const String& dec_point, const String& thousand_sep) { char *tmpbuf = nullptr, *resbuf; char *s, *t; /* source, target */ char *dp; int integral; int tmplen, reslen=0; int count=0; int is_negative=0; if (d < 0) { is_negative = 1; d = -d; } if (dec < 0) dec = 0; d = php_math_round(d, dec); // departure from PHP: we got rid of dependencies on spprintf() here. // This actually means 63 bytes for characters + 1 byte for '\0' String tmpstr(63, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, 64, "%.*F", dec, d); // From the man page of snprintf, the return value is: // The number of characters that would have been written if n had been // sufficiently large, not counting the terminating null character. if (tmplen < 0) return empty_string(); if (tmplen < 64 && (tmpbuf == nullptr || !isdigit((int)tmpbuf[0]))) { tmpstr.setSize(tmplen); return tmpstr; } if (tmplen >= 64) { // Uncommon, asked for more than 64 chars worth of precision tmpstr = String(tmplen, ReserveString); tmpbuf = tmpstr.mutableData(); tmplen = snprintf(tmpbuf, tmplen + 1, "%.*F", dec, d); if (tmplen < 0) return empty_string(); if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) { tmpstr.setSize(tmplen); return tmpstr; } } /* find decimal point, if expected */ if (dec) { dp = strpbrk(tmpbuf, ".,"); } else { dp = nullptr; } /* calculate the length of the return buffer */ if (dp) { integral = dp - tmpbuf; } else { /* no decimal point was found */ integral = tmplen; } /* allow for thousand separators */ if (!thousand_sep.empty()) { if (integral + thousand_sep.size() * ((integral-1) / 3) < integral) { /* overflow */ raise_error("String overflow"); } integral += ((integral-1) / 3) * thousand_sep.size(); } reslen = integral; if (dec) { reslen += dec; if (!dec_point.empty()) { if (reslen + dec_point.size() < dec_point.size()) { /* overflow */ raise_error("String overflow"); } reslen += dec_point.size(); } } /* add a byte for minus sign */ if (is_negative) { reslen++; } String resstr(reslen, ReserveString); resbuf = resstr.mutableData(); s = tmpbuf+tmplen-1; t = resbuf+reslen-1; /* copy the decimal places. * Take care, as the sprintf implementation may return less places than * we requested due to internal buffer limitations */ if (dec) { int declen = dp ? s - dp : 0; int topad = dec > declen ? dec - declen : 0; /* pad with '0's */ while (topad--) { *t-- = '0'; } if (dp) { s -= declen + 1; /* +1 to skip the point */ t -= declen; /* now copy the chars after the point */ memcpy(t + 1, dp + 1, declen); } /* add decimal point */ if (!dec_point.empty()) { memcpy(t + (1 - dec_point.size()), dec_point.data(), dec_point.size()); t -= dec_point.size(); } } /* copy the numbers before the decimal point, adding thousand * separator every three digits */ while(s >= tmpbuf) { *t-- = *s--; if (thousand_sep && (++count%3)==0 && s>=tmpbuf) { memcpy(t + (1 - thousand_sep.size()), thousand_sep.data(), thousand_sep.size()); t -= thousand_sep.size(); } } /* and a minus sign, if needed */ if (is_negative) { *t-- = '-'; } resstr.setSize(reslen); return resstr; }
{'added': [(1621, " // This actually means 63 bytes for characters + 1 byte for '\\0'"), (1625, ' // From the man page of snprintf, the return value is:'), (1626, ' // The number of characters that would have been written if n had been'), (1627, ' // sufficiently large, not counting the terminating null character.'), (1629, ' if (tmplen < 64 && (tmpbuf == nullptr || !isdigit((int)tmpbuf[0]))) {')], 'deleted': [(1625, ' if (tmpbuf == nullptr || !isdigit((int)tmpbuf[0])) {')]}
5
1
1,941
16,443
98
657
29
https://github.com/facebook/hhvm
CVE-2019-11929
CWE-119
3,067
hns_dsaf_xgmac.c
C
hns_xgmac_get_sset_count
/* * Copyright (c) 2014-2015 Hisilicon Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/of_mdio.h> #include "hns_dsaf_main.h" #include "hns_dsaf_mac.h" #include "hns_dsaf_xgmac.h" #include "hns_dsaf_reg.h" static const struct mac_stats_string g_xgmac_stats_string[] = { {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)}, {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)}, {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)}, {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)}, {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)}, {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)}, {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)}, {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)}, {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)}, {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)}, {"xgmac_tx_good_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax_good)}, {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)}, {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)}, {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)}, {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)}, {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)}, {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)}, {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)}, {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)}, {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)}, {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}, {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)}, {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)}, {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)}, {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)}, {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)}, {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)}, {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)}, {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)}, {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)}, {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)}, {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)}, {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)}, {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)}, {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)}, {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)}, {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)}, {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)}, {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)}, {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)}, {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)}, {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)}, {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)}, {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)}, {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)}, {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)}, {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)}, {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)}, {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)}, {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)}, {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)}, {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)}, {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)}, {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)}, {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)}, {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)}, {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)}, {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)}, {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)}, {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)}, {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)}, {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)} }; /** *hns_xgmac_tx_enable - xgmac port tx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value); } /** *hns_xgmac_rx_enable - xgmac port rx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value); } /** * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac * @mac_drv: mac driver * @mode: inserf rf or lf */ static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode) { dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode); } /** * hns_xgmac__lf_rf_control_init - initial the lf rf control register * @mac_drv: mac driver */ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) { u32 val = 0; dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** *hns_xgmac_enable - enable xgmac port *@drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT); /*enable XGE rX/tX */ if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 1); hns_xgmac_rx_enable(drv, 1); } else { dev_err(drv->dev, "error mac mode:%d\n", mode); } } /** *hns_xgmac_disable - disable xgmac port *@mac_drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 0); hns_xgmac_rx_enable(drv, 0); } hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT); } /** *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable *@drv: mac driver *@tx_value: tx value *@rx_value: rx value *return status */ static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value, u32 rx_value) { u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value); dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin); } /* clr exc irq for xge*/ static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en) { u32 clr_vlue = 0xfffffffful; u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue); dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue); } /** *hns_xgmac_init - initialize XGE *@mac_drv: mac driver */ static void hns_xgmac_init(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); mdelay(100); dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); mdelay(100); hns_xgmac_lf_rf_control_init(drv); hns_xgmac_exc_irq_en(drv, 0); hns_xgmac_pma_fec_enable(drv, 0x0, 0x0); hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); } /** *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval); dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin); } /** *hns_xgmac_pausefrm_cfg - set pause param about xgmac *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin); } static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 high_val = mac_addr[1] | (mac_addr[0] << 8); u32 low_val = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | (mac_addr[2] << 24); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val); } /** *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac *@mac_drv: mac driver *@enable:enable rx pause param */ static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_RX_B, !!enable); } /** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver *@enable:enable tx pause param */ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_TX_B, !!enable); /*if enable is not zero ,set tx pause time */ if (enable) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable); } /** *hns_xgmac_config_max_frame_length - set xgmac max frame length *@mac_drv: mac driver *@newval:xgmac max frame length */ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval); } void hns_xgmac_update_stats(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats; /* TX */ hw_stats->tx_fragment_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hw_stats->tx_undersize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hw_stats->tx_under_min_pkts = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hw_stats->tx_65to127 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hw_stats->tx_128to255 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hw_stats->tx_256to511 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hw_stats->tx_512to1023 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hw_stats->tx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hw_stats->tx_1519tomax = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hw_stats->tx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hw_stats->tx_total_bytes = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hw_stats->rx_good_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hw_stats->rx_bad_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); /* RX */ hw_stats->rx_fragment_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hw_stats->rx_undersize = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hw_stats->rx_under_min = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hw_stats->rx_65to127 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hw_stats->rx_128to255 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hw_stats->rx_256to511 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hw_stats->rx_512to1023 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hw_stats->rx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hw_stats->rx_1519tomax = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hw_stats->rx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hw_stats->rx_total_bytes = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hw_stats->rx_unknown_ctrl = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hw_stats->tx_good_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hw_stats->tx_bad_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hw_stats->rx_symbol_err = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); } /** *hns_xgmac_free - free xgmac driver *@mac_drv: mac driver */ static void hns_xgmac_free(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 mac_id = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0); } /** *hns_xgmac_get_info - get xgmac information *@mac_drv: mac driver *@mac_info:mac information */ static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_time, pause_ctrl, port_mode, ctrl_val; ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B); mac_info->auto_neg = 0; pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); mac_info->tx_pause_time = pause_time; port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M, XGMAC_PORT_MODE_TX_S) && dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M, XGMAC_PORT_MODE_RX_S); mac_info->duplex = 1; mac_info->speed = MAC_SPEED_10000; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_pausefrm_cfg - get xgmac pause param *@mac_drv: mac driver *@rx_en:xgmac rx pause enable *@tx_en:xgmac tx pause enable */ static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_ctrl; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_link_status - get xgmac link status *@mac_drv: mac driver *@link_stat: xgmac link stat */ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) { struct mac_driver *drv = (struct mac_driver *)mac_drv; *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); } /** *hns_xgmac_get_regs - dump xgmac regs *@mac_drv: mac driver *@cmd:ethtool cmd *@data:data for value of regs */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { u32 i = 0; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; /* base config registers */ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG); regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG); regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG); regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG); regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG); regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG); regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG); regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG); regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG); regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG); regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG); regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG); regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG); regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG); regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG); regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG); regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG); regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG); regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG); regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG); regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG); regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG); regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG); regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG); regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG); regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG); regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG); regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG); regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG); regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG); regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG); regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG); regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG); regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG); regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG); regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG); regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG); regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG); regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG); regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG); regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG); regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG); regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG); regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG); regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG); regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG); regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG); regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG); regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG); regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG); regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG); regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG); regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG); regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG); regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG); regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG); regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG); regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG); regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG); regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG); regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG); regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG); regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG); regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG); regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG); regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG); regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG); /* status registers */ #define hns_xgmac_cpy_q(p, q) \ do {\ *(p) = (u32)(q);\ *((p) + 1) = (u32)((q) >> 32);\ } while (0) qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[73], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hns_xgmac_cpy_q(&regs[75], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[77], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[79], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[81], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[83], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[85], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[87], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[89], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[91], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[93], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[95], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[97], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hns_xgmac_cpy_q(&regs[99], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hns_xgmac_cpy_q(&regs[101], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[103], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[105], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[107], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[109], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[111], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[113], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[115], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[117], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[119], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[121], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[123], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[125], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[127], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[129], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hns_xgmac_cpy_q(&regs[131], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hns_xgmac_cpy_q(&regs[133], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hns_xgmac_cpy_q(&regs[135], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hns_xgmac_cpy_q(&regs[137], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); hns_xgmac_cpy_q(&regs[139], qtmp); /* RX */ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[141], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hns_xgmac_cpy_q(&regs[143], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[145], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[147], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[149], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[151], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[153], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[155], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[157], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[159], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[161], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[163], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[165], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hns_xgmac_cpy_q(&regs[167], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hns_xgmac_cpy_q(&regs[169], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[171], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[173], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[175], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[177], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[179], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[181], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[183], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[185], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[187], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[189], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[191], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[193], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[195], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[197], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hns_xgmac_cpy_q(&regs[199], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hns_xgmac_cpy_q(&regs[201], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hns_xgmac_cpy_q(&regs[203], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hns_xgmac_cpy_q(&regs[205], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); hns_xgmac_cpy_q(&regs[207], qtmp); /* mark end of mac regs */ for (i = 208; i < 214; i++) regs[i] = 0xaaaaaaaa; } /** *hns_xgmac_get_stats - get xgmac statistic *@mac_drv: mac driver *@data:data for value of stats regs */ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) { u32 i; u64 *buf = data; struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = NULL; hw_stats = &drv->mac_cb->hw_stats; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { buf[i] = DSAF_STATS_READ(hw_stats, g_xgmac_stats_string[i].offset); } } /** *hns_xgmac_get_strings - get xgmac strings name *@stringset: type of values in data *@data:data for value of string name */ static void hns_xgmac_get_strings(u32 stringset, u8 *data) { char *buff = (char *)data; u32 i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc); buff = buff + ETH_GSTRING_LEN; } } /** *hns_xgmac_get_sset_count - get xgmac string set count *@stringset: type of values in data *return xgmac string set count */ static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; } /** *hns_xgmac_get_regs_count - get xgmac regs count *return xgmac regs count */ static int hns_xgmac_get_regs_count(void) { return HNS_XGMAC_DUMP_NUM; } void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) { struct mac_driver *mac_drv; mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL); if (!mac_drv) return NULL; mac_drv->mac_init = hns_xgmac_init; mac_drv->mac_enable = hns_xgmac_enable; mac_drv->mac_disable = hns_xgmac_disable; mac_drv->mac_id = mac_param->mac_id; mac_drv->mac_mode = mac_param->mac_mode; mac_drv->io_base = mac_param->vaddr; mac_drv->dev = mac_param->dev; mac_drv->mac_cb = mac_cb; mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr; mac_drv->set_an_mode = NULL; mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; mac_drv->config_half_duplex = NULL; mac_drv->set_rx_ignore_pause_frames = hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length; mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg; mac_drv->autoneg_stat = NULL; mac_drv->get_info = hns_xgmac_get_info; mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg; mac_drv->get_link_status = hns_xgmac_get_link_status; mac_drv->get_regs = hns_xgmac_get_regs; mac_drv->get_ethtool_stats = hns_xgmac_get_stats; mac_drv->get_sset_count = hns_xgmac_get_sset_count; mac_drv->get_regs_count = hns_xgmac_get_regs_count; mac_drv->get_strings = hns_xgmac_get_strings; mac_drv->update_stats = hns_xgmac_update_stats; return (void *)mac_drv; }
/* * Copyright (c) 2014-2015 Hisilicon Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/of_mdio.h> #include "hns_dsaf_main.h" #include "hns_dsaf_mac.h" #include "hns_dsaf_xgmac.h" #include "hns_dsaf_reg.h" static const struct mac_stats_string g_xgmac_stats_string[] = { {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)}, {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)}, {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)}, {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)}, {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)}, {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)}, {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)}, {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)}, {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)}, {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)}, {"xgmac_tx_good_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax_good)}, {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)}, {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)}, {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)}, {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)}, {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)}, {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)}, {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)}, {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)}, {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)}, {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}, {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)}, {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)}, {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)}, {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)}, {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)}, {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)}, {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)}, {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)}, {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)}, {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)}, {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)}, {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)}, {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)}, {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)}, {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)}, {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)}, {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)}, {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)}, {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)}, {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)}, {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)}, {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)}, {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)}, {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)}, {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)}, {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)}, {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)}, {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)}, {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)}, {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)}, {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)}, {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)}, {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)}, {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)}, {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)}, {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)}, {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)}, {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)}, {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)}, {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)}, {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)}, {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)}, {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)}, {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)}, {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)}, {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)}, {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)} }; /** *hns_xgmac_tx_enable - xgmac port tx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value); } /** *hns_xgmac_rx_enable - xgmac port rx enable *@drv: mac driver *@value: value of enable */ static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value) { dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value); } /** * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac * @mac_drv: mac driver * @mode: inserf rf or lf */ static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode) { dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode); } /** * hns_xgmac__lf_rf_control_init - initial the lf rf control register * @mac_drv: mac driver */ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv) { u32 val = 0; dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); } /** *hns_xgmac_enable - enable xgmac port *@drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT); /*enable XGE rX/tX */ if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 1); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 1); hns_xgmac_rx_enable(drv, 1); } else { dev_err(drv->dev, "error mac mode:%d\n", mode); } } /** *hns_xgmac_disable - disable xgmac port *@mac_drv: mac driver *@mode: mode of mac port */ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode) { struct mac_driver *drv = (struct mac_driver *)mac_drv; if (mode == MAC_COMM_MODE_TX) { hns_xgmac_tx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX) { hns_xgmac_rx_enable(drv, 0); } else if (mode == MAC_COMM_MODE_RX_AND_TX) { hns_xgmac_tx_enable(drv, 0); hns_xgmac_rx_enable(drv, 0); } hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT); } /** *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable *@drv: mac driver *@tx_value: tx value *@rx_value: rx value *return status */ static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value, u32 rx_value) { u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value); dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value); dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin); } /* clr exc irq for xge*/ static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en) { u32 clr_vlue = 0xfffffffful; u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue); dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue); } /** *hns_xgmac_init - initialize XGE *@mac_drv: mac driver */ static void hns_xgmac_init(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 port = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0); mdelay(100); dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1); mdelay(100); hns_xgmac_lf_rf_control_init(drv); hns_xgmac_exc_irq_en(drv, 0); hns_xgmac_pma_fec_enable(drv, 0x0, 0x0); hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX); } /** *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval); dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval); dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin); } /** *hns_xgmac_pausefrm_cfg - set pause param about xgmac *@mac_drv: mac driver *@newval:enable of pad and crc */ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en); dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin); } static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 high_val = mac_addr[1] | (mac_addr[0] << 8); u32 low_val = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | (mac_addr[2] << 24); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val); dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val); } /** *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac *@mac_drv: mac driver *@enable:enable rx pause param */ static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_RX_B, !!enable); } /** *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac *@mac_drv: mac driver *@enable:enable tx pause param */ static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG, XGMAC_PAUSE_CTL_TX_B, !!enable); /*if enable is not zero ,set tx pause time */ if (enable) dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable); } /** *hns_xgmac_config_max_frame_length - set xgmac max frame length *@mac_drv: mac driver *@newval:xgmac max frame length */ static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval) { struct mac_driver *drv = (struct mac_driver *)mac_drv; dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval); } void hns_xgmac_update_stats(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats; /* TX */ hw_stats->tx_fragment_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hw_stats->tx_undersize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hw_stats->tx_under_min_pkts = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hw_stats->tx_65to127 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hw_stats->tx_128to255 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hw_stats->tx_256to511 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hw_stats->tx_512to1023 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hw_stats->tx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hw_stats->tx_1519tomax = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hw_stats->tx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hw_stats->tx_total_bytes = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hw_stats->rx_good_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hw_stats->rx_bad_from_sw = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); /* RX */ hw_stats->rx_fragment_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hw_stats->rx_undersize = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hw_stats->rx_under_min = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hw_stats->rx_65to127 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hw_stats->rx_128to255 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hw_stats->rx_256to511 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hw_stats->rx_512to1023 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hw_stats->rx_1024to1518 = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hw_stats->rx_1519tomax = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hw_stats->rx_1519tomax_good = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hw_stats->rx_total_bytes = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hw_stats->rx_unknown_ctrl = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hw_stats->tx_good_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hw_stats->tx_bad_to_sw = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hw_stats->rx_symbol_err = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); } /** *hns_xgmac_free - free xgmac driver *@mac_drv: mac driver */ static void hns_xgmac_free(void *mac_drv) { struct mac_driver *drv = (struct mac_driver *)mac_drv; struct dsaf_device *dsaf_dev = (struct dsaf_device *)dev_get_drvdata(drv->dev); u32 mac_id = drv->mac_id; dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0); } /** *hns_xgmac_get_info - get xgmac information *@mac_drv: mac driver *@mac_info:mac information */ static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_time, pause_ctrl, port_mode, ctrl_val; ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B); mac_info->auto_neg = 0; pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); mac_info->tx_pause_time = pause_time; port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M, XGMAC_PORT_MODE_TX_S) && dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M, XGMAC_PORT_MODE_RX_S); mac_info->duplex = 1; mac_info->speed = MAC_SPEED_10000; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_pausefrm_cfg - get xgmac pause param *@mac_drv: mac driver *@rx_en:xgmac rx pause enable *@tx_en:xgmac tx pause enable */ static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en) { struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 pause_ctrl; pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B); *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B); } /** *hns_xgmac_get_link_status - get xgmac link status *@mac_drv: mac driver *@link_stat: xgmac link stat */ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat) { struct mac_driver *drv = (struct mac_driver *)mac_drv; *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); } /** *hns_xgmac_get_regs - dump xgmac regs *@mac_drv: mac driver *@cmd:ethtool cmd *@data:data for value of regs */ static void hns_xgmac_get_regs(void *mac_drv, void *data) { u32 i = 0; struct mac_driver *drv = (struct mac_driver *)mac_drv; u32 *regs = data; u64 qtmp; /* base config registers */ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG); regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG); regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG); regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG); regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG); regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG); regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG); regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG); regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG); regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG); regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG); regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG); regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG); regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG); regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG); regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG); regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG); regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG); regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG); regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG); regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG); regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG); regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG); regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG); regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG); regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG); regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG); regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG); regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG); regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG); regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG); regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG); regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG); regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG); regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG); regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG); regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG); regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG); regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG); regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG); regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG); regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG); regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG); regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG); regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG); regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG); regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG); regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG); regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG); regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG); regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG); regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG); regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG); regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG); regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG); regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG); regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG); regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG); regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG); regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG); regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG); regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG); regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG); regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG); regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG); regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG); regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG); regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG); regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG); regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG); regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG); regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG); regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG); /* status registers */ #define hns_xgmac_cpy_q(p, q) \ do {\ *(p) = (u32)(q);\ *((p) + 1) = (u32)((q) >> 32);\ } while (0) qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[73], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE); hns_xgmac_cpy_q(&regs[75], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[77], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[79], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[81], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[83], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[85], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[87], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[89], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[91], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[93], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[95], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[97], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS); hns_xgmac_cpy_q(&regs[99], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS); hns_xgmac_cpy_q(&regs[101], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[103], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[105], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[107], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[109], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[111], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[113], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[115], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[117], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[119], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[121], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[123], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[125], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[127], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[129], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS); hns_xgmac_cpy_q(&regs[131], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS); hns_xgmac_cpy_q(&regs[133], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS); hns_xgmac_cpy_q(&regs[135], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS); hns_xgmac_cpy_q(&regs[137], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS); hns_xgmac_cpy_q(&regs[139], qtmp); /* RX */ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT); hns_xgmac_cpy_q(&regs[141], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE); hns_xgmac_cpy_q(&regs[143], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN); hns_xgmac_cpy_q(&regs[145], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS); hns_xgmac_cpy_q(&regs[147], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS); hns_xgmac_cpy_q(&regs[149], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS); hns_xgmac_cpy_q(&regs[151], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS); hns_xgmac_cpy_q(&regs[153], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS); hns_xgmac_cpy_q(&regs[155], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS); hns_xgmac_cpy_q(&regs[157], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS); hns_xgmac_cpy_q(&regs[159], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK); hns_xgmac_cpy_q(&regs[161], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE); hns_xgmac_cpy_q(&regs[163], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER); hns_xgmac_cpy_q(&regs[165], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS); hns_xgmac_cpy_q(&regs[167], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS); hns_xgmac_cpy_q(&regs[169], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS); hns_xgmac_cpy_q(&regs[171], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS); hns_xgmac_cpy_q(&regs[173], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS); hns_xgmac_cpy_q(&regs[175], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS); hns_xgmac_cpy_q(&regs[177], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS); hns_xgmac_cpy_q(&regs[179], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS); hns_xgmac_cpy_q(&regs[181], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS); hns_xgmac_cpy_q(&regs[183], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS); hns_xgmac_cpy_q(&regs[185], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS); hns_xgmac_cpy_q(&regs[187], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS); hns_xgmac_cpy_q(&regs[189], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS); hns_xgmac_cpy_q(&regs[191], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS); hns_xgmac_cpy_q(&regs[193], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS); hns_xgmac_cpy_q(&regs[195], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS); hns_xgmac_cpy_q(&regs[197], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS); hns_xgmac_cpy_q(&regs[199], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS); hns_xgmac_cpy_q(&regs[201], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS); hns_xgmac_cpy_q(&regs[203], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS); hns_xgmac_cpy_q(&regs[205], qtmp); qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS); hns_xgmac_cpy_q(&regs[207], qtmp); /* mark end of mac regs */ for (i = 208; i < 214; i++) regs[i] = 0xaaaaaaaa; } /** *hns_xgmac_get_stats - get xgmac statistic *@mac_drv: mac driver *@data:data for value of stats regs */ static void hns_xgmac_get_stats(void *mac_drv, u64 *data) { u32 i; u64 *buf = data; struct mac_driver *drv = (struct mac_driver *)mac_drv; struct mac_hw_stats *hw_stats = NULL; hw_stats = &drv->mac_cb->hw_stats; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { buf[i] = DSAF_STATS_READ(hw_stats, g_xgmac_stats_string[i].offset); } } /** *hns_xgmac_get_strings - get xgmac strings name *@stringset: type of values in data *@data:data for value of string name */ static void hns_xgmac_get_strings(u32 stringset, u8 *data) { char *buff = (char *)data; u32 i; if (stringset != ETH_SS_STATS) return; for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) { snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc); buff = buff + ETH_GSTRING_LEN; } } /** *hns_xgmac_get_sset_count - get xgmac string set count *@stringset: type of values in data *return xgmac string set count */ static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; } /** *hns_xgmac_get_regs_count - get xgmac regs count *return xgmac regs count */ static int hns_xgmac_get_regs_count(void) { return HNS_XGMAC_DUMP_NUM; } void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param) { struct mac_driver *mac_drv; mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL); if (!mac_drv) return NULL; mac_drv->mac_init = hns_xgmac_init; mac_drv->mac_enable = hns_xgmac_enable; mac_drv->mac_disable = hns_xgmac_disable; mac_drv->mac_id = mac_param->mac_id; mac_drv->mac_mode = mac_param->mac_mode; mac_drv->io_base = mac_param->vaddr; mac_drv->dev = mac_param->dev; mac_drv->mac_cb = mac_cb; mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr; mac_drv->set_an_mode = NULL; mac_drv->config_loopback = NULL; mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc; mac_drv->config_half_duplex = NULL; mac_drv->set_rx_ignore_pause_frames = hns_xgmac_set_rx_ignore_pause_frames; mac_drv->mac_free = hns_xgmac_free; mac_drv->adjust_link = NULL; mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames; mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length; mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg; mac_drv->autoneg_stat = NULL; mac_drv->get_info = hns_xgmac_get_info; mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg; mac_drv->get_link_status = hns_xgmac_get_link_status; mac_drv->get_regs = hns_xgmac_get_regs; mac_drv->get_ethtool_stats = hns_xgmac_get_stats; mac_drv->get_sset_count = hns_xgmac_get_sset_count; mac_drv->get_regs_count = hns_xgmac_get_regs_count; mac_drv->get_strings = hns_xgmac_get_strings; mac_drv->update_stats = hns_xgmac_update_stats; return (void *)mac_drv; }
static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; }
static int hns_xgmac_get_sset_count(int stringset) { if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; }
{'added': [(784, '\tif (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)')], 'deleted': [(784, '\tif (stringset == ETH_SS_STATS)')]}
1
1
632
5,357
6
22
2
https://github.com/torvalds/linux
CVE-2017-18222
CWE-119
3,284
events_base.c
C
clear_evtchn_to_irq_row
// SPDX-License-Identifier: GPL-2.0-only /* * Xen event channels * * Xen models interrupts with abstract event channels. Because each * domain gets 1024 event channels, but NR_IRQ is not that large, we * must dynamically map irqs<->event channels. The event channels * interface with the rest of the kernel by defining a xen interrupt * chip. When an event is received, it is mapped to an irq and sent * through the normal interrupt processing path. * * There are four kinds of events which can be mapped to an event * channel: * * 1. Inter-domain notifications. This includes all the virtual * device events, since they're driven by front-ends in another domain * (typically dom0). * 2. VIRQs, typically used for timers. These are per-cpu events. * 3. IPIs. * 4. PIRQs - Hardware interrupts. * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/linkage.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> #ifdef CONFIG_X86 #include <asm/desc.h> #include <asm/ptrace.h> #include <asm/idtentry.h> #include <asm/irq.h> #include <asm/io_apic.h> #include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/page.h> #include <xen/xen.h> #include <xen/hvm.h> #include <xen/xen-ops.h> #include <xen/events.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> #include <xen/interface/hvm/hvm_op.h> #include <xen/interface/hvm/params.h> #include <xen/interface/physdev.h> #include <xen/interface/sched.h> #include <xen/interface/vcpu.h> #include <asm/hw_irq.h> #include "events_internal.h" const struct evtchn_ops *evtchn_ops; /* * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ static DEFINE_MUTEX(irq_mapping_update_lock); static LIST_HEAD(xen_irq_list_head); /* IRQ <-> VIRQ mapping. */ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; /* IRQ <-> IPI mapping */ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; int **evtchn_to_irq; #ifdef CONFIG_X86 static unsigned long *pirq_eoi_map; #endif static bool (*pirq_needs_eoi)(unsigned irq); #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; static void enable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data); static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) evtchn_to_irq[row][col] = -1; } static void clear_evtchn_to_irq_all(void) { unsigned row; for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { if (evtchn_to_irq[row] == NULL) continue; clear_evtchn_to_irq_row(row); } } static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) { unsigned row; unsigned col; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; row = EVTCHN_ROW(evtchn); col = EVTCHN_COL(evtchn); if (evtchn_to_irq[row] == NULL) { /* Unallocated irq entries return -1 anyway */ if (irq == -1) return 0; evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); if (evtchn_to_irq[row] == NULL) return -ENOMEM; clear_evtchn_to_irq_row(row); } evtchn_to_irq[row][col] = irq; return 0; } int get_evtchn_to_irq(evtchn_port_t evtchn) { if (evtchn >= xen_evtchn_max_channels()) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; return evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]; } /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { if (irq < nr_legacy_irqs()) return legacy_info_ptrs[irq]; else return irq_get_chip_data(irq); } static void set_info_for_irq(unsigned int irq, struct irq_info *info) { if (irq < nr_legacy_irqs()) legacy_info_ptrs[irq] = info; else irq_set_chip_data(irq, info); } /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, enum xen_irq_type type, evtchn_port_t evtchn, unsigned short cpu) { int ret; BUG_ON(info->type != IRQT_UNBOUND && info->type != type); info->type = type; info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) return ret; irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); return xen_evtchn_port_setup(info); } static int xen_irq_info_evtchn_setup(unsigned irq, evtchn_port_t evtchn) { struct irq_info *info = info_for_irq(irq); return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); } static int xen_irq_info_ipi_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, enum ipi_vector ipi) { struct irq_info *info = info_for_irq(irq); info->u.ipi = ipi; per_cpu(ipi_to_irq, cpu)[ipi] = irq; return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); } static int xen_irq_info_virq_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, unsigned virq) { struct irq_info *info = info_for_irq(irq); info->u.virq = virq; per_cpu(virq_to_irq, cpu)[virq] = irq; return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); } static int xen_irq_info_pirq_setup(unsigned irq, evtchn_port_t evtchn, unsigned pirq, unsigned gsi, uint16_t domid, unsigned char flags) { struct irq_info *info = info_for_irq(irq); info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.domid = domid; info->u.pirq.flags = flags; return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); } static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); info->evtchn = 0; } /* * Accessors for packed IRQ information. */ evtchn_port_t evtchn_from_irq(unsigned irq) { if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq)) return 0; return info_for_irq(irq)->evtchn; } unsigned int irq_from_evtchn(evtchn_port_t evtchn) { return get_evtchn_to_irq(evtchn); } EXPORT_SYMBOL_GPL(irq_from_evtchn); int irq_from_virq(unsigned int cpu, unsigned int virq) { return per_cpu(virq_to_irq, cpu)[virq]; } static enum ipi_vector ipi_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); return info->u.ipi; } static unsigned virq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_VIRQ); return info->u.virq; } static unsigned pirq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; } static enum xen_irq_type type_from_irq(unsigned irq) { return info_for_irq(irq)->type; } unsigned cpu_from_irq(unsigned irq) { return info_for_irq(irq)->cpu; } unsigned int cpu_from_evtchn(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); unsigned ret = 0; if (irq != -1) ret = cpu_from_irq(irq); return ret; } #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { return test_bit(pirq_from_irq(irq), pirq_eoi_map); } #endif static bool pirq_needs_eoi_flag(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.flags & PIRQ_NEEDS_EOI; } static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = info_for_irq(irq); BUG_ON(irq == -1); #ifdef CONFIG_SMP cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); #endif xen_evtchn_port_bind_to_cpu(info, cpu); info->cpu = cpu; } /** * notify_remote_via_irq - send event to remote end of event channel via irq * @irq: irq of event channel to send event to * * Unlike notify_remote_via_evtchn(), this is safe to use across * save/restore. Notifications on a broken connection are silently * dropped. */ void notify_remote_via_irq(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL_GPL(notify_remote_via_irq); static void xen_irq_init(unsigned irq) { struct irq_info *info; #ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); #endif info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) panic("Unable to allocate metadata for IRQ%d\n", irq); info->type = IRQT_UNBOUND; info->refcnt = -1; set_info_for_irq(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } static int __must_check xen_allocate_irqs_dynamic(int nvec) { int i, irq = irq_alloc_descs(-1, 0, nvec, -1); if (irq >= 0) { for (i = 0; i < nvec; i++) xen_irq_init(irq + i); } return irq; } static inline int __must_check xen_allocate_irq_dynamic(void) { return xen_allocate_irqs_dynamic(1); } static int __must_check xen_allocate_irq_gsi(unsigned gsi) { int irq; /* * A PV guest has no concept of a GSI (since it has no ACPI * nor access to/knowledge of the physical APICs). Therefore * all IRQs are dynamically allocated from the entire IRQ * space. */ if (xen_pv_domain() && !xen_initial_domain()) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); xen_irq_init(irq); return irq; } static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; list_del(&info->list); set_info_for_irq(irq, NULL); WARN_ON(info->refcnt > 0); kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); } static void xen_evtchn_close(evtchn_port_t port) { struct evtchn_close close; close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); } static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); irq_status.irq = pirq_from_irq(irq); if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) irq_status.flags = 0; info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; if (irq_status.flags & XENIRQSTAT_needs_eoi) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } static void eoi_pirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); WARN_ON(rc); } } static void mask_ack_pirq(struct irq_data *data) { disable_dynirq(data); eoi_pirq(data); } static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); int rc; BUG_ON(info->type != IRQT_PIRQ); if (VALID_EVTCHN(evtchn)) goto out; bind_pirq.pirq = pirq_from_irq(irq); /* NB. We are happy to share unless we are probing. */ bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { pr_warn("Failed to obtain physical IRQ %d\n", irq); return 0; } evtchn = bind_pirq.port; pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); if (rc) goto err; info->evtchn = evtchn; bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) goto err; out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; err: pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); xen_evtchn_close(evtchn); return 0; } static unsigned int startup_pirq(struct irq_data *data) { return __startup_pirq(data->irq); } static void shutdown_pirq(struct irq_data *data) { unsigned int irq = data->irq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); BUG_ON(info->type != IRQT_PIRQ); if (!VALID_EVTCHN(evtchn)) return; mask_evtchn(evtchn); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } static void enable_pirq(struct irq_data *data) { enable_dynirq(data); } static void disable_pirq(struct irq_data *data) { disable_dynirq(data); } int xen_irq_from_gsi(unsigned gsi) { struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; if (info->u.pirq.gsi == gsi) return info->irq; } return -1; } EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); struct irq_info *info = info_for_irq(irq); if (info->refcnt > 0) { info->refcnt--; if (info->refcnt != 0) return; } if (VALID_EVTCHN(evtchn)) { unsigned int cpu = cpu_from_irq(irq); xen_evtchn_close(evtchn); switch (type_from_irq(irq)) { case IRQT_VIRQ: per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; break; case IRQT_IPI: per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; break; default: break; } xen_irq_info_cleanup(info); } xen_free_irq(irq); } /* * Do not make any assumptions regarding the relationship between the * IRQ number returned here and the Xen pirq argument. * * Note: We don't assign an event channel until the irq actually started * up. Return an existing irq if we've already got one for the gsi. * * Shareable implies level triggered, not shareable implies edge * triggered here. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) { int irq = -1; struct physdev_irq irq_op; int ret; mutex_lock(&irq_mapping_update_lock); irq = xen_irq_from_gsi(gsi); if (irq != -1) { pr_info("%s: returning irq %d for gsi %u\n", __func__, irq, gsi); goto out; } irq = xen_allocate_irq_gsi(gsi); if (irq < 0) goto out; irq_op.irq = irq; irq_op.vector = 0; /* Only the privileged domain can do this. For non-priv, the pcifront * driver provides a PCI bus that does the call to do exactly * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { xen_free_irq(irq); irq = -ENOSPC; goto out; } ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } pirq_query_unmask(irq); /* We try to use the handler with the appropriate semantic for the * type of interrupt: if the interrupt is an edge triggered * interrupt we use handle_edge_irq. * * On the other hand if the interrupt is level triggered we use * handle_fasteoi_irq like the native code does for this kind of * interrupts. * * Depending on the Xen version, pirq_needs_eoi might return true * not only for level triggered interrupts but for edge triggered * interrupts too. In any case Xen always honors the eoi mechanism, * not injecting any more pirqs of the same kind if the first one * hasn't received an eoi yet. Therefore using the fasteoi handler * is the right choice either way. */ if (shareable) irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_fasteoi_irq, name); else irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); out: mutex_unlock(&irq_mapping_update_lock); return irq; } #ifdef CONFIG_PCI_MSI int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) { int rc; struct physdev_get_free_pirq op_get_free_pirq; op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); WARN_ONCE(rc == -ENOSYS, "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); return rc ? -1 : op_get_free_pirq.pirq; } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int pirq, int nvec, const char *name, domid_t domid) { int i, irq, ret; mutex_lock(&irq_mapping_update_lock); irq = xen_allocate_irqs_dynamic(nvec); if (irq < 0) goto out; for (i = 0; i < nvec; i++) { irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, i == 0 ? 0 : PIRQ_MSI_GROUP); if (ret < 0) goto error_irq; } ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; out: mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: while (nvec--) __unbind_from_irq(irq + nvec); mutex_unlock(&irq_mapping_update_lock); return ret; } #endif int xen_destroy_irq(int irq) { struct physdev_unmap_pirq unmap_irq; struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; mutex_lock(&irq_mapping_update_lock); /* * If trying to remove a vector in a MSI group different * than the first one skip the PIRQ unmap unless this vector * is the first one in the group. */ if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = info->u.pirq.domid; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); /* If another domain quits without making the pci_disable_msix * call, the Xen hypervisor takes care of freeing the PIRQs * (free_domain_pirqs). */ if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) pr_info("domain %d does not have %d anymore\n", info->u.pirq.domid, info->u.pirq.pirq); else if (rc) { pr_warn("unmap irq failed %d\n", rc); goto out; } } xen_free_irq(irq); out: mutex_unlock(&irq_mapping_update_lock); return rc; } int xen_irq_from_pirq(unsigned pirq) { int irq; struct irq_info *info; mutex_lock(&irq_mapping_update_lock); list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; irq = info->irq; if (info->u.pirq.pirq == pirq) goto out; } irq = -1; out: mutex_unlock(&irq_mapping_update_lock); return irq; } int xen_pirq_from_irq(unsigned irq) { return pirq_from_irq(irq); } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); int bind_evtchn_to_irq(evtchn_port_t evtchn) { int irq; int ret; if (evtchn >= xen_evtchn_max_channels()) return -ENOMEM; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, handle_edge_irq, "event"); ret = xen_irq_info_evtchn_setup(irq, evtchn); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } /* New interdomain events are bound to VCPU 0. */ bind_evtchn_to_cpu(evtchn, 0); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ret, irq; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(ipi_to_irq, cpu)[ipi]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "ipi"); bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_IPI); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, evtchn_port_t remote_port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = remote_domain; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) { struct evtchn_status status; evtchn_port_t port; int rc = -ENOENT; memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); if (rc < 0) continue; if (status.status != EVTCHNSTAT_virq) continue; if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; break; } } return rc; } /** * xen_evtchn_nr_channels - number of usable event channel ports * * This may be less than the maximum supported by the current * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum * supported. */ unsigned xen_evtchn_nr_channels(void) { return evtchn_ops->nr_channels(); } EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn = 0; int irq, ret; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(virq_to_irq, cpu)[virq]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; if (percpu) irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "virq"); else irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, handle_edge_irq, "virq"); bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (ret == 0) evtchn = bind_virq.port; else { if (ret == -EEXIST) ret = find_virq(virq, cpu, &evtchn); BUG_ON(ret < 0); } ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_VIRQ); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } static void unbind_from_irq(unsigned int irq) { mutex_lock(&irq_mapping_update_lock); __unbind_from_irq(irq); mutex_unlock(&irq_mapping_update_lock); } int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_evtchn_to_irq(evtchn); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_ipi_to_irq(ipi, cpu); if (irq < 0) return irq; irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } void unbind_from_irqhandler(unsigned int irq, void *dev_id) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; free_irq(irq, dev_id); unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(unbind_from_irqhandler); /** * xen_set_irq_priority() - set an event channel priority. * @irq:irq bound to an event channel. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. */ int xen_set_irq_priority(unsigned irq, unsigned priority) { struct evtchn_set_priority set_priority; set_priority.port = evtchn_from_irq(irq); set_priority.priority = priority; return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, &set_priority); } EXPORT_SYMBOL_GPL(xen_set_irq_priority); int evtchn_make_refcounted(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info; if (irq == -1) return -ENOENT; info = info_for_irq(irq); if (!info) return -ENOENT; WARN_ON(info->refcnt != -1); info->refcnt = 1; return 0; } EXPORT_SYMBOL_GPL(evtchn_make_refcounted); int evtchn_get(evtchn_port_t evtchn) { int irq; struct irq_info *info; int err = -ENOENT; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) goto done; info = info_for_irq(irq); if (!info) goto done; err = -EINVAL; if (info->refcnt <= 0) goto done; info->refcnt++; err = 0; done: mutex_unlock(&irq_mapping_update_lock); return err; } EXPORT_SYMBOL_GPL(evtchn_get); void evtchn_put(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); if (WARN_ON(irq == -1)) return; unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(evtchn_put); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { int irq; #ifdef CONFIG_X86 if (unlikely(vector == XEN_NMI_VECTOR)) { int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu), NULL); if (rc < 0) printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); return; } #endif irq = per_cpu(ipi_to_irq, cpu)[vector]; BUG_ON(irq < 0); notify_remote_via_irq(irq); } static void __xen_evtchn_do_upcall(void) { struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); int cpu = smp_processor_id(); do { vcpu_info->evtchn_upcall_pending = 0; xen_evtchn_handle_events(cpu); BUG_ON(!irqs_disabled()); virt_rmb(); /* Hypervisor can set upcall pending. */ } while (vcpu_info->evtchn_upcall_pending); } void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); __xen_evtchn_do_upcall(); irq_exit(); set_irq_regs(old_regs); } void xen_hvm_evtchn_do_upcall(void) { __xen_evtchn_do_upcall(); } EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); /* Rebind a new event channel to an existing irq. */ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; /* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); mutex_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(get_evtchn_to_irq(evtchn) != -1); /* Expect irq to have been bound before, so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); (void)xen_irq_info_evtchn_setup(irq, evtchn); mutex_unlock(&irq_mapping_update_lock); bind_evtchn_to_cpu(evtchn, info->cpu); /* This will be deferred until interrupt is processed */ irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); } /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; int masked; if (!VALID_EVTCHN(evtchn)) return -1; if (!xen_support_evtchn_rebind()) return -1; /* Send future instances of this interrupt to other vcpu. */ bind_vcpu.port = evtchn; bind_vcpu.vcpu = xen_vcpu_nr(tcpu); /* * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ masked = test_and_set_mask(evtchn); /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); if (!masked) unmask_evtchn(evtchn); return 0; } static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); return ret; } /* To be called with desc->lock held. */ int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu) { struct irq_data *d = irq_desc_get_irq_data(desc); return set_affinity_irq(d, cpumask_of(tcpu), false); } EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); static void enable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } static void disable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) mask_evtchn(evtchn); } static void ack_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); } static void mask_ack_dynirq(struct irq_data *data) { disable_dynirq(data); ack_dynirq(data); } static int retrigger_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); int masked; if (!VALID_EVTCHN(evtchn)) return 0; masked = test_and_set_mask(evtchn); set_evtchn(evtchn); if (!masked) unmask_evtchn(evtchn); return 1; } static void restore_pirqs(void) { int pirq, rc, irq, gsi; struct physdev_map_pirq map_irq; struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; pirq = info->u.pirq.pirq; gsi = info->u.pirq.gsi; irq = info->irq; /* save/restore of PT devices doesn't work, so at this point the * only devices present are GSI based emulated devices */ if (!gsi) continue; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", gsi, irq, pirq, rc); xen_free_irq(irq); continue; } printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); __startup_pirq(irq); } } static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn; int virq, irq; for (virq = 0; virq < NR_VIRQS; virq++) { if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; BUG_ON(virq_from_irq(irq) != virq); /* Get a new binding from Xen. */ bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0) BUG(); evtchn = bind_virq.port; /* Record the new mapping. */ (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); } } static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ipi, irq; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; BUG_ON(ipi_from_irq(irq) != ipi); /* Get a new binding from Xen. */ bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; /* Record the new mapping. */ (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); } } /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) clear_evtchn(evtchn); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) set_evtchn(evtchn); } bool xen_test_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); bool ret = false; if (VALID_EVTCHN(evtchn)) ret = test_evtchn(evtchn); return ret; } /* Poll waiting for an irq to become pending with timeout. In the usual case, * the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq_timeout(int irq, u64 timeout) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) { struct sched_poll poll; poll.nr_ports = 1; poll.timeout = timeout; set_xen_guest_handle(poll.ports, &evtchn); if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) BUG(); } } EXPORT_SYMBOL(xen_poll_irq_timeout); /* Poll waiting for an irq to become pending. In the usual case, the * irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq) { xen_poll_irq_timeout(irq, 0 /* no timeout */); } /* Check whether the IRQ line is shared with other guests. */ int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); struct physdev_irq_status_query irq_status; if (WARN_ON(!info)) return -ENOENT; irq_status.irq = info->u.pirq.pirq; if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; return !(irq_status.flags & XENIRQSTAT_shared); } EXPORT_SYMBOL_GPL(xen_test_irq_shared); void xen_irq_resume(void) { unsigned int cpu; struct irq_info *info; /* New event-channel space is not 'live' yet. */ xen_evtchn_resume(); /* No IRQ <-> event-channel mappings. */ list_for_each_entry(info, &xen_irq_list_head, list) info->evtchn = 0; /* zap event-channel binding */ clear_evtchn_to_irq_all(); for_each_possible_cpu(cpu) { restore_cpu_virqs(cpu); restore_cpu_ipis(cpu); } restore_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { .name = "xen-dyn", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_pirq_chip __read_mostly = { .name = "xen-pirq", .irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, .irq_enable = enable_pirq, .irq_disable = disable_pirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = eoi_pirq, .irq_eoi = eoi_pirq, .irq_mask_ack = mask_ack_pirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_percpu_chip __read_mostly = { .name = "xen-percpu", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, }; int xen_set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } EXPORT_SYMBOL_GPL(xen_set_callback_via); #ifdef CONFIG_XEN_PVHVM /* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any * vcpu and we don't need PCI support or APIC interactions. */ void xen_setup_callback_vector(void) { uint64_t callback_via; if (xen_have_vector_callback) { callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); if (xen_set_callback_via(callback_via)) { pr_err("Request for Xen HVM callback vector failed\n"); xen_have_vector_callback = 0; } } } static __init void xen_alloc_callback_vector(void) { if (!xen_have_vector_callback) return; pr_info("Xen HVM callback vector for event delivery is enabled\n"); alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback); } #else void xen_setup_callback_vector(void) {} static inline void xen_alloc_callback_vector(void) {} #endif #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "xen." static bool fifo_events = true; module_param(fifo_events, bool, 0); void __init xen_init_IRQ(void) { int ret = -EINVAL; evtchn_port_t evtchn; if (fifo_events) ret = xen_evtchn_fifo_init(); if (ret < 0) xen_evtchn_2l_init(); evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), sizeof(*evtchn_to_irq), GFP_KERNEL); BUG_ON(!evtchn_to_irq); /* No event channels are 'live' right now. */ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) mask_evtchn(evtchn); pirq_needs_eoi = pirq_needs_eoi_flag; #ifdef CONFIG_X86 if (xen_pv_domain()) { if (xen_initial_domain()) pci_xen_initial_domain(); } if (xen_feature(XENFEAT_hvm_callback_vector)) { xen_setup_callback_vector(); xen_alloc_callback_vector(); } if (xen_hvm_domain()) { native_init_IRQ(); /* pci_xen_hvm_init must be called after native_init_IRQ so that * __acpi_register_gsi can point at the right function */ pci_xen_hvm_init(); } else { int rc; struct physdev_pirq_eoi_gmfn eoi_gmfn; pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); if (rc != 0) { free_page((unsigned long) pirq_eoi_map); pirq_eoi_map = NULL; } else pirq_needs_eoi = pirq_check_eoi_map; } #endif }
// SPDX-License-Identifier: GPL-2.0-only /* * Xen event channels * * Xen models interrupts with abstract event channels. Because each * domain gets 1024 event channels, but NR_IRQ is not that large, we * must dynamically map irqs<->event channels. The event channels * interface with the rest of the kernel by defining a xen interrupt * chip. When an event is received, it is mapped to an irq and sent * through the normal interrupt processing path. * * There are four kinds of events which can be mapped to an event * channel: * * 1. Inter-domain notifications. This includes all the virtual * device events, since they're driven by front-ends in another domain * (typically dom0). * 2. VIRQs, typically used for timers. These are per-cpu events. * 3. IPIs. * 4. PIRQs - Hardware interrupts. * * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 */ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt #include <linux/linkage.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/moduleparam.h> #include <linux/string.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/irqnr.h> #include <linux/pci.h> #include <linux/spinlock.h> #ifdef CONFIG_X86 #include <asm/desc.h> #include <asm/ptrace.h> #include <asm/idtentry.h> #include <asm/irq.h> #include <asm/io_apic.h> #include <asm/i8259.h> #include <asm/xen/pci.h> #endif #include <asm/sync_bitops.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/page.h> #include <xen/xen.h> #include <xen/hvm.h> #include <xen/xen-ops.h> #include <xen/events.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> #include <xen/interface/hvm/hvm_op.h> #include <xen/interface/hvm/params.h> #include <xen/interface/physdev.h> #include <xen/interface/sched.h> #include <xen/interface/vcpu.h> #include <asm/hw_irq.h> #include "events_internal.h" const struct evtchn_ops *evtchn_ops; /* * This lock protects updates to the following mapping and reference-count * arrays. The lock does not need to be acquired to read the mapping tables. */ static DEFINE_MUTEX(irq_mapping_update_lock); /* * Lock protecting event handling loop against removing event channels. * Adding of event channels is no issue as the associated IRQ becomes active * only after everything is setup (before request_[threaded_]irq() the handler * can't be entered for an event, as the event channel will be unmasked only * then). */ static DEFINE_RWLOCK(evtchn_rwlock); /* * Lock hierarchy: * * irq_mapping_update_lock * evtchn_rwlock * IRQ-desc lock */ static LIST_HEAD(xen_irq_list_head); /* IRQ <-> VIRQ mapping. */ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; /* IRQ <-> IPI mapping */ static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; int **evtchn_to_irq; #ifdef CONFIG_X86 static unsigned long *pirq_eoi_map; #endif static bool (*pirq_needs_eoi)(unsigned irq); #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq))) #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq)) /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; static void enable_dynirq(struct irq_data *data); static void disable_dynirq(struct irq_data *data); static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) WRITE_ONCE(evtchn_to_irq[row][col], -1); } static void clear_evtchn_to_irq_all(void) { unsigned row; for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { if (evtchn_to_irq[row] == NULL) continue; clear_evtchn_to_irq_row(row); } } static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq) { unsigned row; unsigned col; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; row = EVTCHN_ROW(evtchn); col = EVTCHN_COL(evtchn); if (evtchn_to_irq[row] == NULL) { /* Unallocated irq entries return -1 anyway */ if (irq == -1) return 0; evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); if (evtchn_to_irq[row] == NULL) return -ENOMEM; clear_evtchn_to_irq_row(row); } WRITE_ONCE(evtchn_to_irq[row][col], irq); return 0; } int get_evtchn_to_irq(evtchn_port_t evtchn) { if (evtchn >= xen_evtchn_max_channels()) return -1; if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL) return -1; return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]); } /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { if (irq < nr_legacy_irqs()) return legacy_info_ptrs[irq]; else return irq_get_chip_data(irq); } static void set_info_for_irq(unsigned int irq, struct irq_info *info) { if (irq < nr_legacy_irqs()) legacy_info_ptrs[irq] = info; else irq_set_chip_data(irq, info); } /* Constructors for packed IRQ information. */ static int xen_irq_info_common_setup(struct irq_info *info, unsigned irq, enum xen_irq_type type, evtchn_port_t evtchn, unsigned short cpu) { int ret; BUG_ON(info->type != IRQT_UNBOUND && info->type != type); info->type = type; info->irq = irq; info->evtchn = evtchn; info->cpu = cpu; ret = set_evtchn_to_irq(evtchn, irq); if (ret < 0) return ret; irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN); return xen_evtchn_port_setup(info); } static int xen_irq_info_evtchn_setup(unsigned irq, evtchn_port_t evtchn) { struct irq_info *info = info_for_irq(irq); return xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0); } static int xen_irq_info_ipi_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, enum ipi_vector ipi) { struct irq_info *info = info_for_irq(irq); info->u.ipi = ipi; per_cpu(ipi_to_irq, cpu)[ipi] = irq; return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0); } static int xen_irq_info_virq_setup(unsigned cpu, unsigned irq, evtchn_port_t evtchn, unsigned virq) { struct irq_info *info = info_for_irq(irq); info->u.virq = virq; per_cpu(virq_to_irq, cpu)[virq] = irq; return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0); } static int xen_irq_info_pirq_setup(unsigned irq, evtchn_port_t evtchn, unsigned pirq, unsigned gsi, uint16_t domid, unsigned char flags) { struct irq_info *info = info_for_irq(irq); info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.domid = domid; info->u.pirq.flags = flags; return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0); } static void xen_irq_info_cleanup(struct irq_info *info) { set_evtchn_to_irq(info->evtchn, -1); info->evtchn = 0; } /* * Accessors for packed IRQ information. */ evtchn_port_t evtchn_from_irq(unsigned irq) { const struct irq_info *info = NULL; if (likely(irq < nr_irqs)) info = info_for_irq(irq); if (!info) return 0; return info->evtchn; } unsigned int irq_from_evtchn(evtchn_port_t evtchn) { return get_evtchn_to_irq(evtchn); } EXPORT_SYMBOL_GPL(irq_from_evtchn); int irq_from_virq(unsigned int cpu, unsigned int virq) { return per_cpu(virq_to_irq, cpu)[virq]; } static enum ipi_vector ipi_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_IPI); return info->u.ipi; } static unsigned virq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_VIRQ); return info->u.virq; } static unsigned pirq_from_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info == NULL); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.pirq; } static enum xen_irq_type type_from_irq(unsigned irq) { return info_for_irq(irq)->type; } unsigned cpu_from_irq(unsigned irq) { return info_for_irq(irq)->cpu; } unsigned int cpu_from_evtchn(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); unsigned ret = 0; if (irq != -1) ret = cpu_from_irq(irq); return ret; } #ifdef CONFIG_X86 static bool pirq_check_eoi_map(unsigned irq) { return test_bit(pirq_from_irq(irq), pirq_eoi_map); } #endif static bool pirq_needs_eoi_flag(unsigned irq) { struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); return info->u.pirq.flags & PIRQ_NEEDS_EOI; } static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info = info_for_irq(irq); BUG_ON(irq == -1); #ifdef CONFIG_SMP cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); #endif xen_evtchn_port_bind_to_cpu(info, cpu); info->cpu = cpu; } /** * notify_remote_via_irq - send event to remote end of event channel via irq * @irq: irq of event channel to send event to * * Unlike notify_remote_via_evtchn(), this is safe to use across * save/restore. Notifications on a broken connection are silently * dropped. */ void notify_remote_via_irq(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) notify_remote_via_evtchn(evtchn); } EXPORT_SYMBOL_GPL(notify_remote_via_irq); static void xen_irq_init(unsigned irq) { struct irq_info *info; #ifdef CONFIG_SMP /* By default all event channels notify CPU#0. */ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0)); #endif info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) panic("Unable to allocate metadata for IRQ%d\n", irq); info->type = IRQT_UNBOUND; info->refcnt = -1; set_info_for_irq(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } static int __must_check xen_allocate_irqs_dynamic(int nvec) { int i, irq = irq_alloc_descs(-1, 0, nvec, -1); if (irq >= 0) { for (i = 0; i < nvec; i++) xen_irq_init(irq + i); } return irq; } static inline int __must_check xen_allocate_irq_dynamic(void) { return xen_allocate_irqs_dynamic(1); } static int __must_check xen_allocate_irq_gsi(unsigned gsi) { int irq; /* * A PV guest has no concept of a GSI (since it has no ACPI * nor access to/knowledge of the physical APICs). Therefore * all IRQs are dynamically allocated from the entire IRQ * space. */ if (xen_pv_domain() && !xen_initial_domain()) return xen_allocate_irq_dynamic(); /* Legacy IRQ descriptors are already allocated by the arch. */ if (gsi < nr_legacy_irqs()) irq = gsi; else irq = irq_alloc_desc_at(gsi, -1); xen_irq_init(irq); return irq; } static void xen_free_irq(unsigned irq) { struct irq_info *info = info_for_irq(irq); unsigned long flags; if (WARN_ON(!info)) return; write_lock_irqsave(&evtchn_rwlock, flags); list_del(&info->list); set_info_for_irq(irq, NULL); WARN_ON(info->refcnt > 0); write_unlock_irqrestore(&evtchn_rwlock, flags); kfree(info); /* Legacy IRQ descriptors are managed by the arch. */ if (irq < nr_legacy_irqs()) return; irq_free_desc(irq); } static void xen_evtchn_close(evtchn_port_t port) { struct evtchn_close close; close.port = port; if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) BUG(); } static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; struct irq_info *info = info_for_irq(irq); BUG_ON(info->type != IRQT_PIRQ); irq_status.irq = pirq_from_irq(irq); if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) irq_status.flags = 0; info->u.pirq.flags &= ~PIRQ_NEEDS_EOI; if (irq_status.flags & XENIRQSTAT_needs_eoi) info->u.pirq.flags |= PIRQ_NEEDS_EOI; } static void eoi_pirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); WARN_ON(rc); } } static void mask_ack_pirq(struct irq_data *data) { disable_dynirq(data); eoi_pirq(data); } static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); int rc; BUG_ON(info->type != IRQT_PIRQ); if (VALID_EVTCHN(evtchn)) goto out; bind_pirq.pirq = pirq_from_irq(irq); /* NB. We are happy to share unless we are probing. */ bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ? BIND_PIRQ__WILL_SHARE : 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq); if (rc != 0) { pr_warn("Failed to obtain physical IRQ %d\n", irq); return 0; } evtchn = bind_pirq.port; pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); if (rc) goto err; info->evtchn = evtchn; bind_evtchn_to_cpu(evtchn, 0); rc = xen_evtchn_port_setup(info); if (rc) goto err; out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; err: pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); xen_evtchn_close(evtchn); return 0; } static unsigned int startup_pirq(struct irq_data *data) { return __startup_pirq(data->irq); } static void shutdown_pirq(struct irq_data *data) { unsigned int irq = data->irq; struct irq_info *info = info_for_irq(irq); evtchn_port_t evtchn = evtchn_from_irq(irq); BUG_ON(info->type != IRQT_PIRQ); if (!VALID_EVTCHN(evtchn)) return; mask_evtchn(evtchn); xen_evtchn_close(evtchn); xen_irq_info_cleanup(info); } static void enable_pirq(struct irq_data *data) { enable_dynirq(data); } static void disable_pirq(struct irq_data *data) { disable_dynirq(data); } int xen_irq_from_gsi(unsigned gsi) { struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; if (info->u.pirq.gsi == gsi) return info->irq; } return -1; } EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); struct irq_info *info = info_for_irq(irq); if (info->refcnt > 0) { info->refcnt--; if (info->refcnt != 0) return; } if (VALID_EVTCHN(evtchn)) { unsigned int cpu = cpu_from_irq(irq); xen_evtchn_close(evtchn); switch (type_from_irq(irq)) { case IRQT_VIRQ: per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; break; case IRQT_IPI: per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; break; default: break; } xen_irq_info_cleanup(info); } xen_free_irq(irq); } /* * Do not make any assumptions regarding the relationship between the * IRQ number returned here and the Xen pirq argument. * * Note: We don't assign an event channel until the irq actually started * up. Return an existing irq if we've already got one for the gsi. * * Shareable implies level triggered, not shareable implies edge * triggered here. */ int xen_bind_pirq_gsi_to_irq(unsigned gsi, unsigned pirq, int shareable, char *name) { int irq = -1; struct physdev_irq irq_op; int ret; mutex_lock(&irq_mapping_update_lock); irq = xen_irq_from_gsi(gsi); if (irq != -1) { pr_info("%s: returning irq %d for gsi %u\n", __func__, irq, gsi); goto out; } irq = xen_allocate_irq_gsi(gsi); if (irq < 0) goto out; irq_op.irq = irq; irq_op.vector = 0; /* Only the privileged domain can do this. For non-priv, the pcifront * driver provides a PCI bus that does the call to do exactly * this in the priv domain. */ if (xen_initial_domain() && HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { xen_free_irq(irq); irq = -ENOSPC; goto out; } ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } pirq_query_unmask(irq); /* We try to use the handler with the appropriate semantic for the * type of interrupt: if the interrupt is an edge triggered * interrupt we use handle_edge_irq. * * On the other hand if the interrupt is level triggered we use * handle_fasteoi_irq like the native code does for this kind of * interrupts. * * Depending on the Xen version, pirq_needs_eoi might return true * not only for level triggered interrupts but for edge triggered * interrupts too. In any case Xen always honors the eoi mechanism, * not injecting any more pirqs of the same kind if the first one * hasn't received an eoi yet. Therefore using the fasteoi handler * is the right choice either way. */ if (shareable) irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_fasteoi_irq, name); else irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, name); out: mutex_unlock(&irq_mapping_update_lock); return irq; } #ifdef CONFIG_PCI_MSI int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) { int rc; struct physdev_get_free_pirq op_get_free_pirq; op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); WARN_ONCE(rc == -ENOSYS, "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); return rc ? -1 : op_get_free_pirq.pirq; } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, int pirq, int nvec, const char *name, domid_t domid) { int i, irq, ret; mutex_lock(&irq_mapping_update_lock); irq = xen_allocate_irqs_dynamic(nvec); if (irq < 0) goto out; for (i = 0; i < nvec; i++) { irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name); ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid, i == 0 ? 0 : PIRQ_MSI_GROUP); if (ret < 0) goto error_irq; } ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; out: mutex_unlock(&irq_mapping_update_lock); return irq; error_irq: while (nvec--) __unbind_from_irq(irq + nvec); mutex_unlock(&irq_mapping_update_lock); return ret; } #endif int xen_destroy_irq(int irq) { struct physdev_unmap_pirq unmap_irq; struct irq_info *info = info_for_irq(irq); int rc = -ENOENT; mutex_lock(&irq_mapping_update_lock); /* * If trying to remove a vector in a MSI group different * than the first one skip the PIRQ unmap unless this vector * is the first one in the group. */ if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) { unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = info->u.pirq.domid; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); /* If another domain quits without making the pci_disable_msix * call, the Xen hypervisor takes care of freeing the PIRQs * (free_domain_pirqs). */ if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) pr_info("domain %d does not have %d anymore\n", info->u.pirq.domid, info->u.pirq.pirq); else if (rc) { pr_warn("unmap irq failed %d\n", rc); goto out; } } xen_free_irq(irq); out: mutex_unlock(&irq_mapping_update_lock); return rc; } int xen_irq_from_pirq(unsigned pirq) { int irq; struct irq_info *info; mutex_lock(&irq_mapping_update_lock); list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; irq = info->irq; if (info->u.pirq.pirq == pirq) goto out; } irq = -1; out: mutex_unlock(&irq_mapping_update_lock); return irq; } int xen_pirq_from_irq(unsigned irq) { return pirq_from_irq(irq); } EXPORT_SYMBOL_GPL(xen_pirq_from_irq); int bind_evtchn_to_irq(evtchn_port_t evtchn) { int irq; int ret; if (evtchn >= xen_evtchn_max_channels()) return -ENOMEM; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, handle_edge_irq, "event"); ret = xen_irq_info_evtchn_setup(irq, evtchn); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } /* New interdomain events are bound to VCPU 0. */ bind_evtchn_to_cpu(evtchn, 0); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_EVTCHN); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ret, irq; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(ipi_to_irq, cpu)[ipi]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "ipi"); bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_IPI); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } int bind_interdomain_evtchn_to_irq(unsigned int remote_domain, evtchn_port_t remote_port) { struct evtchn_bind_interdomain bind_interdomain; int err; bind_interdomain.remote_dom = remote_domain; bind_interdomain.remote_port = remote_port; err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); return err ? : bind_evtchn_to_irq(bind_interdomain.local_port); } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq); static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn) { struct evtchn_status status; evtchn_port_t port; int rc = -ENOENT; memset(&status, 0, sizeof(status)); for (port = 0; port < xen_evtchn_max_channels(); port++) { status.dom = DOMID_SELF; status.port = port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status); if (rc < 0) continue; if (status.status != EVTCHNSTAT_virq) continue; if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { *evtchn = port; break; } } return rc; } /** * xen_evtchn_nr_channels - number of usable event channel ports * * This may be less than the maximum supported by the current * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum * supported. */ unsigned xen_evtchn_nr_channels(void) { return evtchn_ops->nr_channels(); } EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn = 0; int irq, ret; mutex_lock(&irq_mapping_update_lock); irq = per_cpu(virq_to_irq, cpu)[virq]; if (irq == -1) { irq = xen_allocate_irq_dynamic(); if (irq < 0) goto out; if (percpu) irq_set_chip_and_handler_name(irq, &xen_percpu_chip, handle_percpu_irq, "virq"); else irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, handle_edge_irq, "virq"); bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (ret == 0) evtchn = bind_virq.port; else { if (ret == -EEXIST) ret = find_virq(virq, cpu, &evtchn); BUG_ON(ret < 0); } ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq); if (ret < 0) { __unbind_from_irq(irq); irq = ret; goto out; } bind_evtchn_to_cpu(evtchn, cpu); } else { struct irq_info *info = info_for_irq(irq); WARN_ON(info == NULL || info->type != IRQT_VIRQ); } out: mutex_unlock(&irq_mapping_update_lock); return irq; } static void unbind_from_irq(unsigned int irq) { mutex_lock(&irq_mapping_update_lock); __unbind_from_irq(irq); mutex_unlock(&irq_mapping_update_lock); } int bind_evtchn_to_irqhandler(evtchn_port_t evtchn, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_evtchn_to_irq(evtchn); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler); int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain, evtchn_port_t remote_port, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler); int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); if (irq < 0) return irq; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler); int bind_ipi_to_irqhandler(enum ipi_vector ipi, unsigned int cpu, irq_handler_t handler, unsigned long irqflags, const char *devname, void *dev_id) { int irq, retval; irq = bind_ipi_to_irq(ipi, cpu); if (irq < 0) return irq; irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME; retval = request_irq(irq, handler, irqflags, devname, dev_id); if (retval != 0) { unbind_from_irq(irq); return retval; } return irq; } void unbind_from_irqhandler(unsigned int irq, void *dev_id) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; free_irq(irq, dev_id); unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(unbind_from_irqhandler); /** * xen_set_irq_priority() - set an event channel priority. * @irq:irq bound to an event channel. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN. */ int xen_set_irq_priority(unsigned irq, unsigned priority) { struct evtchn_set_priority set_priority; set_priority.port = evtchn_from_irq(irq); set_priority.priority = priority; return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority, &set_priority); } EXPORT_SYMBOL_GPL(xen_set_irq_priority); int evtchn_make_refcounted(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); struct irq_info *info; if (irq == -1) return -ENOENT; info = info_for_irq(irq); if (!info) return -ENOENT; WARN_ON(info->refcnt != -1); info->refcnt = 1; return 0; } EXPORT_SYMBOL_GPL(evtchn_make_refcounted); int evtchn_get(evtchn_port_t evtchn) { int irq; struct irq_info *info; int err = -ENOENT; if (evtchn >= xen_evtchn_max_channels()) return -EINVAL; mutex_lock(&irq_mapping_update_lock); irq = get_evtchn_to_irq(evtchn); if (irq == -1) goto done; info = info_for_irq(irq); if (!info) goto done; err = -EINVAL; if (info->refcnt <= 0) goto done; info->refcnt++; err = 0; done: mutex_unlock(&irq_mapping_update_lock); return err; } EXPORT_SYMBOL_GPL(evtchn_get); void evtchn_put(evtchn_port_t evtchn) { int irq = get_evtchn_to_irq(evtchn); if (WARN_ON(irq == -1)) return; unbind_from_irq(irq); } EXPORT_SYMBOL_GPL(evtchn_put); void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { int irq; #ifdef CONFIG_X86 if (unlikely(vector == XEN_NMI_VECTOR)) { int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu), NULL); if (rc < 0) printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); return; } #endif irq = per_cpu(ipi_to_irq, cpu)[vector]; BUG_ON(irq < 0); notify_remote_via_irq(irq); } static void __xen_evtchn_do_upcall(void) { struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); int cpu = smp_processor_id(); read_lock(&evtchn_rwlock); do { vcpu_info->evtchn_upcall_pending = 0; xen_evtchn_handle_events(cpu); BUG_ON(!irqs_disabled()); virt_rmb(); /* Hypervisor can set upcall pending. */ } while (vcpu_info->evtchn_upcall_pending); read_unlock(&evtchn_rwlock); } void xen_evtchn_do_upcall(struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); __xen_evtchn_do_upcall(); irq_exit(); set_irq_regs(old_regs); } void xen_hvm_evtchn_do_upcall(void) { __xen_evtchn_do_upcall(); } EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); /* Rebind a new event channel to an existing irq. */ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) { struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; /* Make sure the irq is masked, since the new event channel will also be masked. */ disable_irq(irq); mutex_lock(&irq_mapping_update_lock); /* After resume the irq<->evtchn mappings are all cleared out */ BUG_ON(get_evtchn_to_irq(evtchn) != -1); /* Expect irq to have been bound before, so there should be a proper type */ BUG_ON(info->type == IRQT_UNBOUND); (void)xen_irq_info_evtchn_setup(irq, evtchn); mutex_unlock(&irq_mapping_update_lock); bind_evtchn_to_cpu(evtchn, info->cpu); /* This will be deferred until interrupt is processed */ irq_set_affinity(irq, cpumask_of(info->cpu)); /* Unmask the event channel. */ enable_irq(irq); } /* Rebind an evtchn so that it gets delivered to a specific cpu */ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) { struct evtchn_bind_vcpu bind_vcpu; int masked; if (!VALID_EVTCHN(evtchn)) return -1; if (!xen_support_evtchn_rebind()) return -1; /* Send future instances of this interrupt to other vcpu. */ bind_vcpu.port = evtchn; bind_vcpu.vcpu = xen_vcpu_nr(tcpu); /* * Mask the event while changing the VCPU binding to prevent * it being delivered on an unexpected VCPU. */ masked = test_and_set_mask(evtchn); /* * If this fails, it usually just indicates that we're dealing with a * virq or IPI channel, which don't actually need to be rebound. Ignore * it, but don't do the xenlinux-level rebind in that case. */ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) bind_evtchn_to_cpu(evtchn, tcpu); if (!masked) unmask_evtchn(evtchn); return 0; } static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, bool force) { unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); if (!ret) irq_data_update_effective_affinity(data, cpumask_of(tcpu)); return ret; } /* To be called with desc->lock held. */ int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu) { struct irq_data *d = irq_desc_get_irq_data(desc); return set_affinity_irq(d, cpumask_of(tcpu), false); } EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn); static void enable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) unmask_evtchn(evtchn); } static void disable_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (VALID_EVTCHN(evtchn)) mask_evtchn(evtchn); } static void ack_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); if (!VALID_EVTCHN(evtchn)) return; if (unlikely(irqd_is_setaffinity_pending(data)) && likely(!irqd_irq_disabled(data))) { int masked = test_and_set_mask(evtchn); clear_evtchn(evtchn); irq_move_masked_irq(data); if (!masked) unmask_evtchn(evtchn); } else clear_evtchn(evtchn); } static void mask_ack_dynirq(struct irq_data *data) { disable_dynirq(data); ack_dynirq(data); } static int retrigger_dynirq(struct irq_data *data) { evtchn_port_t evtchn = evtchn_from_irq(data->irq); int masked; if (!VALID_EVTCHN(evtchn)) return 0; masked = test_and_set_mask(evtchn); set_evtchn(evtchn); if (!masked) unmask_evtchn(evtchn); return 1; } static void restore_pirqs(void) { int pirq, rc, irq, gsi; struct physdev_map_pirq map_irq; struct irq_info *info; list_for_each_entry(info, &xen_irq_list_head, list) { if (info->type != IRQT_PIRQ) continue; pirq = info->u.pirq.pirq; gsi = info->u.pirq.gsi; irq = info->irq; /* save/restore of PT devices doesn't work, so at this point the * only devices present are GSI based emulated devices */ if (!gsi) continue; map_irq.domid = DOMID_SELF; map_irq.type = MAP_PIRQ_TYPE_GSI; map_irq.index = gsi; map_irq.pirq = pirq; rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); if (rc) { pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", gsi, irq, pirq, rc); xen_free_irq(irq); continue; } printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); __startup_pirq(irq); } } static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; evtchn_port_t evtchn; int virq, irq; for (virq = 0; virq < NR_VIRQS; virq++) { if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) continue; BUG_ON(virq_from_irq(irq) != virq); /* Get a new binding from Xen. */ bind_virq.virq = virq; bind_virq.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq) != 0) BUG(); evtchn = bind_virq.port; /* Record the new mapping. */ (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); } } static void restore_cpu_ipis(unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; evtchn_port_t evtchn; int ipi, irq; for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) { if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) continue; BUG_ON(ipi_from_irq(irq) != ipi); /* Get a new binding from Xen. */ bind_ipi.vcpu = xen_vcpu_nr(cpu); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi) != 0) BUG(); evtchn = bind_ipi.port; /* Record the new mapping. */ (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); } } /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) clear_evtchn(evtchn); } EXPORT_SYMBOL(xen_clear_irq_pending); void xen_set_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) set_evtchn(evtchn); } bool xen_test_irq_pending(int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); bool ret = false; if (VALID_EVTCHN(evtchn)) ret = test_evtchn(evtchn); return ret; } /* Poll waiting for an irq to become pending with timeout. In the usual case, * the irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq_timeout(int irq, u64 timeout) { evtchn_port_t evtchn = evtchn_from_irq(irq); if (VALID_EVTCHN(evtchn)) { struct sched_poll poll; poll.nr_ports = 1; poll.timeout = timeout; set_xen_guest_handle(poll.ports, &evtchn); if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0) BUG(); } } EXPORT_SYMBOL(xen_poll_irq_timeout); /* Poll waiting for an irq to become pending. In the usual case, the * irq will be disabled so it won't deliver an interrupt. */ void xen_poll_irq(int irq) { xen_poll_irq_timeout(irq, 0 /* no timeout */); } /* Check whether the IRQ line is shared with other guests. */ int xen_test_irq_shared(int irq) { struct irq_info *info = info_for_irq(irq); struct physdev_irq_status_query irq_status; if (WARN_ON(!info)) return -ENOENT; irq_status.irq = info->u.pirq.pirq; if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) return 0; return !(irq_status.flags & XENIRQSTAT_shared); } EXPORT_SYMBOL_GPL(xen_test_irq_shared); void xen_irq_resume(void) { unsigned int cpu; struct irq_info *info; /* New event-channel space is not 'live' yet. */ xen_evtchn_resume(); /* No IRQ <-> event-channel mappings. */ list_for_each_entry(info, &xen_irq_list_head, list) info->evtchn = 0; /* zap event-channel binding */ clear_evtchn_to_irq_all(); for_each_possible_cpu(cpu) { restore_cpu_virqs(cpu); restore_cpu_ipis(cpu); } restore_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { .name = "xen-dyn", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, .irq_mask_ack = mask_ack_dynirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_pirq_chip __read_mostly = { .name = "xen-pirq", .irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, .irq_enable = enable_pirq, .irq_disable = disable_pirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = eoi_pirq, .irq_eoi = eoi_pirq, .irq_mask_ack = mask_ack_pirq, .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; static struct irq_chip xen_percpu_chip __read_mostly = { .name = "xen-percpu", .irq_disable = disable_dynirq, .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, .irq_ack = ack_dynirq, }; int xen_set_callback_via(uint64_t via) { struct xen_hvm_param a; a.domid = DOMID_SELF; a.index = HVM_PARAM_CALLBACK_IRQ; a.value = via; return HYPERVISOR_hvm_op(HVMOP_set_param, &a); } EXPORT_SYMBOL_GPL(xen_set_callback_via); #ifdef CONFIG_XEN_PVHVM /* Vector callbacks are better than PCI interrupts to receive event * channel notifications because we can receive vector callbacks on any * vcpu and we don't need PCI support or APIC interactions. */ void xen_setup_callback_vector(void) { uint64_t callback_via; if (xen_have_vector_callback) { callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR); if (xen_set_callback_via(callback_via)) { pr_err("Request for Xen HVM callback vector failed\n"); xen_have_vector_callback = 0; } } } static __init void xen_alloc_callback_vector(void) { if (!xen_have_vector_callback) return; pr_info("Xen HVM callback vector for event delivery is enabled\n"); alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback); } #else void xen_setup_callback_vector(void) {} static inline void xen_alloc_callback_vector(void) {} #endif #undef MODULE_PARAM_PREFIX #define MODULE_PARAM_PREFIX "xen." static bool fifo_events = true; module_param(fifo_events, bool, 0); void __init xen_init_IRQ(void) { int ret = -EINVAL; evtchn_port_t evtchn; if (fifo_events) ret = xen_evtchn_fifo_init(); if (ret < 0) xen_evtchn_2l_init(); evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()), sizeof(*evtchn_to_irq), GFP_KERNEL); BUG_ON(!evtchn_to_irq); /* No event channels are 'live' right now. */ for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++) mask_evtchn(evtchn); pirq_needs_eoi = pirq_needs_eoi_flag; #ifdef CONFIG_X86 if (xen_pv_domain()) { if (xen_initial_domain()) pci_xen_initial_domain(); } if (xen_feature(XENFEAT_hvm_callback_vector)) { xen_setup_callback_vector(); xen_alloc_callback_vector(); } if (xen_hvm_domain()) { native_init_IRQ(); /* pci_xen_hvm_init must be called after native_init_IRQ so that * __acpi_register_gsi can point at the right function */ pci_xen_hvm_init(); } else { int rc; struct physdev_pirq_eoi_gmfn eoi_gmfn; pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); if (rc != 0) { free_page((unsigned long) pirq_eoi_map); pirq_eoi_map = NULL; } else pirq_needs_eoi = pirq_check_eoi_map; } #endif }
static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) evtchn_to_irq[row][col] = -1; }
static void clear_evtchn_to_irq_row(unsigned row) { unsigned col; for (col = 0; col < EVTCHN_PER_ROW; col++) WRITE_ONCE(evtchn_to_irq[row][col], -1); }
{'added': [(36, '#include <linux/spinlock.h>'), (75, '/*'), (76, ' * Lock protecting event handling loop against removing event channels.'), (77, ' * Adding of event channels is no issue as the associated IRQ becomes active'), (78, ' * only after everything is setup (before request_[threaded_]irq() the handler'), (79, " * can't be entered for an event, as the event channel will be unmasked only"), (80, ' * then).'), (81, ' */'), (82, 'static DEFINE_RWLOCK(evtchn_rwlock);'), (83, ''), (84, '/*'), (85, ' * Lock hierarchy:'), (86, ' *'), (87, ' * irq_mapping_update_lock'), (88, ' * evtchn_rwlock'), (89, ' * IRQ-desc lock'), (90, ' */'), (91, ''), (126, '\t\tWRITE_ONCE(evtchn_to_irq[row][col], -1);'), (163, '\tWRITE_ONCE(evtchn_to_irq[row][col], irq);'), (173, '\treturn READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);'), (282, '\tconst struct irq_info *info = NULL;'), (283, ''), (284, '\tif (likely(irq < nr_irqs))'), (285, '\t\tinfo = info_for_irq(irq);'), (286, '\tif (!info)'), (289, '\treturn info->evtchn;'), (465, '\tunsigned long flags;'), (470, '\twrite_lock_irqsave(&evtchn_rwlock, flags);'), (471, ''), (478, '\twrite_unlock_irqrestore(&evtchn_rwlock, flags);'), (479, ''), (1263, '\tread_lock(&evtchn_rwlock);'), (1264, ''), (1275, ''), (1276, '\tread_unlock(&evtchn_rwlock);')], 'deleted': [(108, '\t\tevtchn_to_irq[row][col] = -1;'), (145, '\tevtchn_to_irq[row][col] = irq;'), (155, '\treturn evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)];'), (264, '\tif (WARN(irq >= nr_irqs, "Invalid irq %d!\\n", irq))'), (267, '\treturn info_for_irq(irq)->evtchn;')]}
36
5
1,214
6,641
6
34
2
https://github.com/torvalds/linux
CVE-2020-27675
CWE-362
2,411
cil_reset_ast.c
C
cil_reset_classperms_set
#include "cil_internal.h" #include "cil_log.h" #include "cil_list.h" #include "cil_reset_ast.h" #include "cil_symtab.h" static inline void cil_reset_classperms_list(struct cil_list *cp_list); static inline void cil_reset_level(struct cil_level *level); static inline void cil_reset_levelrange(struct cil_levelrange *levelrange); static inline void cil_reset_context(struct cil_context *context); static int __class_reset_perm_values(__attribute__((unused)) hashtab_key_t k, hashtab_datum_t d, void *args) { struct cil_perm *perm = (struct cil_perm *)d; perm->value -= *((int *)args); return SEPOL_OK; } static void cil_reset_class(struct cil_class *class) { if (class->common != NULL) { /* Must assume that the common has been destroyed */ int num_common_perms = class->num_perms - class->perms.nprim; cil_symtab_map(&class->perms, __class_reset_perm_values, &num_common_perms); /* during a re-resolve, we need to reset the common, so a classcommon * statement isn't seen as a duplicate */ class->num_perms = class->perms.nprim; class->common = NULL; /* Must make this NULL or there will be an error when re-resolving */ } class->ordered = CIL_FALSE; } static void cil_reset_perm(struct cil_perm *perm) { cil_list_destroy(&perm->classperms, CIL_FALSE); } static inline void cil_reset_classperms(struct cil_classperms *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->perms, CIL_FALSE); } static void cil_reset_classpermission(struct cil_classpermission *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->classperms, CIL_FALSE); } static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { cil_reset_classpermission(cp_set->set); } static inline void cil_reset_classperms_list(struct cil_list *cp_list) { struct cil_list_item *curr; if (cp_list == NULL) { return; } cil_list_for_each(curr, cp_list) { if (curr->flavor == CIL_CLASSPERMS) { /* KERNEL or MAP */ cil_reset_classperms(curr->data); } else if (curr->flavor == CIL_CLASSPERMS_SET) { /* SET */ cil_reset_classperms_set(curr->data); } } } static void cil_reset_classpermissionset(struct cil_classpermissionset *cps) { cil_reset_classperms_list(cps->classperms); } static void cil_reset_classmapping(struct cil_classmapping *cm) { cil_reset_classperms_list(cm->classperms); } static void cil_reset_alias(struct cil_alias *alias) { /* reset actual to NULL during a re-resolve */ alias->actual = NULL; } static void cil_reset_user(struct cil_user *user) { /* reset the bounds to NULL during a re-resolve */ user->bounds = NULL; user->dftlevel = NULL; user->range = NULL; } static void cil_reset_userattr(struct cil_userattribute *attr) { struct cil_list_item *expr = NULL; struct cil_list_item *next = NULL; /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a userattribute statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ expr = attr->expr_list->head; while (expr != NULL) { next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_userattributeset(struct cil_userattributeset *uas) { cil_list_destroy(&uas->datum_expr, CIL_FALSE); } static void cil_reset_selinuxuser(struct cil_selinuxuser *selinuxuser) { if (selinuxuser->range_str == NULL) { cil_reset_levelrange(selinuxuser->range); } } static void cil_reset_role(struct cil_role *role) { /* reset the bounds to NULL during a re-resolve */ role->bounds = NULL; } static void cil_reset_roleattr(struct cil_roleattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributeroles statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_roleattributeset(struct cil_roleattributeset *ras) { cil_list_destroy(&ras->datum_expr, CIL_FALSE); } static void cil_reset_type(struct cil_type *type) { /* reset the bounds to NULL during a re-resolve */ type->bounds = NULL; } static void cil_reset_typeattr(struct cil_typeattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributetypes statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } attr->used = CIL_FALSE; attr->keep = CIL_FALSE; } static void cil_reset_typeattributeset(struct cil_typeattributeset *tas) { cil_list_destroy(&tas->datum_expr, CIL_FALSE); } static void cil_reset_avrule(struct cil_avrule *rule) { cil_reset_classperms_list(rule->perms.classperms); } static void cil_reset_rangetransition(struct cil_rangetransition *rangetrans) { if (rangetrans->range_str == NULL) { cil_reset_levelrange(rangetrans->range); } } static void cil_reset_sens(struct cil_sens *sens) { /* during a re-resolve, we need to reset the categories associated with * this sensitivity from a (sensitivitycategory) statement */ cil_list_destroy(&sens->cats_list, CIL_FALSE); sens->ordered = CIL_FALSE; } static void cil_reset_cat(struct cil_cat *cat) { cat->ordered = CIL_FALSE; } static inline void cil_reset_cats(struct cil_cats *cats) { if (cats != NULL) { cats->evaluated = CIL_FALSE; cil_list_destroy(&cats->datum_expr, CIL_FALSE); } } static void cil_reset_senscat(struct cil_senscat *senscat) { cil_reset_cats(senscat->cats); } static void cil_reset_catset(struct cil_catset *catset) { cil_reset_cats(catset->cats); } static inline void cil_reset_level(struct cil_level *level) { cil_reset_cats(level->cats); } static inline void cil_reset_levelrange(struct cil_levelrange *levelrange) { if (levelrange->low_str == NULL) { cil_reset_level(levelrange->low); } if (levelrange->high_str == NULL) { cil_reset_level(levelrange->high); } } static inline void cil_reset_userlevel(struct cil_userlevel *userlevel) { if (userlevel->level_str == NULL) { cil_reset_level(userlevel->level); } } static inline void cil_reset_userrange(struct cil_userrange *userrange) { if (userrange->range_str == NULL) { cil_reset_levelrange(userrange->range); } } static inline void cil_reset_context(struct cil_context *context) { if (context->range_str == NULL) { cil_reset_levelrange(context->range); } } static void cil_reset_sidcontext(struct cil_sidcontext *sidcontext) { if (sidcontext->context_str == NULL) { cil_reset_context(sidcontext->context); } } static void cil_reset_filecon(struct cil_filecon *filecon) { if (filecon->context_str == NULL && filecon->context != NULL) { cil_reset_context(filecon->context); } } static void cil_reset_ibpkeycon(struct cil_ibpkeycon *ibpkeycon) { if (!ibpkeycon->context_str) cil_reset_context(ibpkeycon->context); } static void cil_reset_portcon(struct cil_portcon *portcon) { if (portcon->context_str == NULL) { cil_reset_context(portcon->context); } } static void cil_reset_nodecon(struct cil_nodecon *nodecon) { if (nodecon->context_str == NULL) { cil_reset_context(nodecon->context); } } static void cil_reset_genfscon(struct cil_genfscon *genfscon) { if (genfscon->context_str == NULL) { cil_reset_context(genfscon->context); } } static void cil_reset_netifcon(struct cil_netifcon *netifcon) { if (netifcon->if_context_str == NULL) { cil_reset_context(netifcon->if_context); } if (netifcon->packet_context_str == NULL) { cil_reset_context(netifcon->packet_context); } } static void cil_reset_ibendportcon(struct cil_ibendportcon *ibendportcon) { if (!ibendportcon->context_str) { cil_reset_context(ibendportcon->context); } } static void cil_reset_pirqcon(struct cil_pirqcon *pirqcon) { if (pirqcon->context_str == NULL) { cil_reset_context(pirqcon->context); } } static void cil_reset_iomemcon(struct cil_iomemcon *iomemcon) { if (iomemcon->context_str == NULL) { cil_reset_context(iomemcon->context); } } static void cil_reset_ioportcon(struct cil_ioportcon *ioportcon) { if (ioportcon->context_str == NULL) { cil_reset_context(ioportcon->context); } } static void cil_reset_pcidevicecon(struct cil_pcidevicecon *pcidevicecon) { if (pcidevicecon->context_str == NULL) { cil_reset_context(pcidevicecon->context); } } static void cil_reset_devicetreecon(struct cil_devicetreecon *devicetreecon) { if (devicetreecon->context_str == NULL) { cil_reset_context(devicetreecon->context); } } static void cil_reset_fsuse(struct cil_fsuse *fsuse) { if (fsuse->context_str == NULL) { cil_reset_context(fsuse->context); } } static void cil_reset_sid(struct cil_sid *sid) { /* reset the context to NULL during a re-resolve */ sid->context = NULL; sid->ordered = CIL_FALSE; } static void cil_reset_constrain(struct cil_constrain *con) { cil_reset_classperms_list(con->classperms); cil_list_destroy(&con->datum_expr, CIL_FALSE); } static void cil_reset_validatetrans(struct cil_validatetrans *vt) { cil_list_destroy(&vt->datum_expr, CIL_FALSE); } static void cil_reset_default(struct cil_default *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_defaultrange(struct cil_defaultrange *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_booleanif(struct cil_booleanif *bif) { cil_list_destroy(&bif->datum_expr, CIL_FALSE); } int __cil_reset_node(struct cil_tree_node *node, __attribute__((unused)) uint32_t *finished, __attribute__((unused)) void *extra_args) { switch (node->flavor) { case CIL_CLASS: cil_reset_class(node->data); break; case CIL_PERM: case CIL_MAP_PERM: cil_reset_perm(node->data); break; case CIL_CLASSPERMISSION: cil_reset_classpermission(node->data); break; case CIL_CLASSPERMISSIONSET: cil_reset_classpermissionset(node->data); break; case CIL_CLASSMAPPING: cil_reset_classmapping(node->data); break; case CIL_TYPEALIAS: case CIL_SENSALIAS: case CIL_CATALIAS: cil_reset_alias(node->data); break; case CIL_USERRANGE: cil_reset_userrange(node->data); break; case CIL_USERLEVEL: cil_reset_userlevel(node->data); break; case CIL_USER: cil_reset_user(node->data); break; case CIL_USERATTRIBUTE: cil_reset_userattr(node->data); break; case CIL_USERATTRIBUTESET: cil_reset_userattributeset(node->data); break; case CIL_SELINUXUSERDEFAULT: case CIL_SELINUXUSER: cil_reset_selinuxuser(node->data); break; case CIL_ROLE: cil_reset_role(node->data); break; case CIL_ROLEATTRIBUTE: cil_reset_roleattr(node->data); break; case CIL_ROLEATTRIBUTESET: cil_reset_roleattributeset(node->data); break; case CIL_TYPE: cil_reset_type(node->data); break; case CIL_TYPEATTRIBUTE: cil_reset_typeattr(node->data); break; case CIL_TYPEATTRIBUTESET: cil_reset_typeattributeset(node->data); break; case CIL_RANGETRANSITION: cil_reset_rangetransition(node->data); break; case CIL_AVRULE: cil_reset_avrule(node->data); break; case CIL_SENS: cil_reset_sens(node->data); break; case CIL_CAT: cil_reset_cat(node->data); break; case CIL_SENSCAT: cil_reset_senscat(node->data); break; case CIL_CATSET: cil_reset_catset(node->data); break; case CIL_LEVEL: cil_reset_level(node->data); break; case CIL_LEVELRANGE: cil_reset_levelrange(node->data); break; case CIL_CONTEXT: cil_reset_context(node->data); break; case CIL_SIDCONTEXT: cil_reset_sidcontext(node->data); break; case CIL_FILECON: cil_reset_filecon(node->data); break; case CIL_IBPKEYCON: cil_reset_ibpkeycon(node->data); break; case CIL_IBENDPORTCON: cil_reset_ibendportcon(node->data); break; case CIL_PORTCON: cil_reset_portcon(node->data); break; case CIL_NODECON: cil_reset_nodecon(node->data); break; case CIL_GENFSCON: cil_reset_genfscon(node->data); break; case CIL_NETIFCON: cil_reset_netifcon(node->data); break; case CIL_PIRQCON: cil_reset_pirqcon(node->data); break; case CIL_IOMEMCON: cil_reset_iomemcon(node->data); break; case CIL_IOPORTCON: cil_reset_ioportcon(node->data); break; case CIL_PCIDEVICECON: cil_reset_pcidevicecon(node->data); break; case CIL_DEVICETREECON: cil_reset_devicetreecon(node->data); break; case CIL_FSUSE: cil_reset_fsuse(node->data); break; case CIL_SID: cil_reset_sid(node->data); break; case CIL_CONSTRAIN: case CIL_MLSCONSTRAIN: cil_reset_constrain(node->data); break; case CIL_VALIDATETRANS: case CIL_MLSVALIDATETRANS: cil_reset_validatetrans(node->data); break; case CIL_DEFAULTUSER: case CIL_DEFAULTROLE: case CIL_DEFAULTTYPE: cil_reset_default(node->data); break; case CIL_DEFAULTRANGE: cil_reset_defaultrange(node->data); break; case CIL_BOOLEANIF: cil_reset_booleanif(node->data); break; case CIL_TUNABLEIF: case CIL_CALL: break; /* Not effected by optional block disabling */ case CIL_MACRO: case CIL_SIDORDER: case CIL_CLASSORDER: case CIL_CATORDER: case CIL_SENSITIVITYORDER: case CIL_EXPANDTYPEATTRIBUTE: break; /* Nothing to reset */ default: break; } return SEPOL_OK; } int cil_reset_ast(struct cil_tree_node *current) { int rc = SEPOL_ERR; rc = cil_tree_walk(current, __cil_reset_node, NULL, NULL, NULL); if (rc != SEPOL_OK) { cil_log(CIL_ERR, "Failed to reset AST\n"); return SEPOL_ERR; } return SEPOL_OK; }
#include "cil_internal.h" #include "cil_log.h" #include "cil_list.h" #include "cil_reset_ast.h" #include "cil_symtab.h" static inline void cil_reset_classperms_list(struct cil_list *cp_list); static inline void cil_reset_level(struct cil_level *level); static inline void cil_reset_levelrange(struct cil_levelrange *levelrange); static inline void cil_reset_context(struct cil_context *context); static int __class_reset_perm_values(__attribute__((unused)) hashtab_key_t k, hashtab_datum_t d, void *args) { struct cil_perm *perm = (struct cil_perm *)d; perm->value -= *((int *)args); return SEPOL_OK; } static void cil_reset_class(struct cil_class *class) { if (class->common != NULL) { /* Must assume that the common has been destroyed */ int num_common_perms = class->num_perms - class->perms.nprim; cil_symtab_map(&class->perms, __class_reset_perm_values, &num_common_perms); /* during a re-resolve, we need to reset the common, so a classcommon * statement isn't seen as a duplicate */ class->num_perms = class->perms.nprim; class->common = NULL; /* Must make this NULL or there will be an error when re-resolving */ } class->ordered = CIL_FALSE; } static void cil_reset_perm(struct cil_perm *perm) { cil_list_destroy(&perm->classperms, CIL_FALSE); } static inline void cil_reset_classperms(struct cil_classperms *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->perms, CIL_FALSE); } static void cil_reset_classpermission(struct cil_classpermission *cp) { if (cp == NULL) { return; } cil_list_destroy(&cp->classperms, CIL_FALSE); } static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { if (cp_set == NULL) { return; } cp_set->set = NULL; } static inline void cil_reset_classperms_list(struct cil_list *cp_list) { struct cil_list_item *curr; if (cp_list == NULL) { return; } cil_list_for_each(curr, cp_list) { if (curr->flavor == CIL_CLASSPERMS) { /* KERNEL or MAP */ cil_reset_classperms(curr->data); } else if (curr->flavor == CIL_CLASSPERMS_SET) { /* SET */ cil_reset_classperms_set(curr->data); } } } static void cil_reset_classpermissionset(struct cil_classpermissionset *cps) { cil_reset_classperms_list(cps->classperms); } static void cil_reset_classmapping(struct cil_classmapping *cm) { cil_reset_classperms_list(cm->classperms); } static void cil_reset_alias(struct cil_alias *alias) { /* reset actual to NULL during a re-resolve */ alias->actual = NULL; } static void cil_reset_user(struct cil_user *user) { /* reset the bounds to NULL during a re-resolve */ user->bounds = NULL; user->dftlevel = NULL; user->range = NULL; } static void cil_reset_userattr(struct cil_userattribute *attr) { struct cil_list_item *expr = NULL; struct cil_list_item *next = NULL; /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a userattribute statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ expr = attr->expr_list->head; while (expr != NULL) { next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_userattributeset(struct cil_userattributeset *uas) { cil_list_destroy(&uas->datum_expr, CIL_FALSE); } static void cil_reset_selinuxuser(struct cil_selinuxuser *selinuxuser) { if (selinuxuser->range_str == NULL) { cil_reset_levelrange(selinuxuser->range); } } static void cil_reset_role(struct cil_role *role) { /* reset the bounds to NULL during a re-resolve */ role->bounds = NULL; } static void cil_reset_roleattr(struct cil_roleattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributeroles statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } } static void cil_reset_roleattributeset(struct cil_roleattributeset *ras) { cil_list_destroy(&ras->datum_expr, CIL_FALSE); } static void cil_reset_type(struct cil_type *type) { /* reset the bounds to NULL during a re-resolve */ type->bounds = NULL; } static void cil_reset_typeattr(struct cil_typeattribute *attr) { /* during a re-resolve, we need to reset the lists of expression stacks associated with this attribute from a attributetypes statement */ if (attr->expr_list != NULL) { /* we don't want to destroy the expression stacks (cil_list) inside * this list cil_list_destroy destroys sublists, so we need to do it * manually */ struct cil_list_item *expr = attr->expr_list->head; while (expr != NULL) { struct cil_list_item *next = expr->next; cil_list_item_destroy(&expr, CIL_FALSE); expr = next; } free(attr->expr_list); attr->expr_list = NULL; } attr->used = CIL_FALSE; attr->keep = CIL_FALSE; } static void cil_reset_typeattributeset(struct cil_typeattributeset *tas) { cil_list_destroy(&tas->datum_expr, CIL_FALSE); } static void cil_reset_avrule(struct cil_avrule *rule) { cil_reset_classperms_list(rule->perms.classperms); } static void cil_reset_rangetransition(struct cil_rangetransition *rangetrans) { if (rangetrans->range_str == NULL) { cil_reset_levelrange(rangetrans->range); } } static void cil_reset_sens(struct cil_sens *sens) { /* during a re-resolve, we need to reset the categories associated with * this sensitivity from a (sensitivitycategory) statement */ cil_list_destroy(&sens->cats_list, CIL_FALSE); sens->ordered = CIL_FALSE; } static void cil_reset_cat(struct cil_cat *cat) { cat->ordered = CIL_FALSE; } static inline void cil_reset_cats(struct cil_cats *cats) { if (cats != NULL) { cats->evaluated = CIL_FALSE; cil_list_destroy(&cats->datum_expr, CIL_FALSE); } } static void cil_reset_senscat(struct cil_senscat *senscat) { cil_reset_cats(senscat->cats); } static void cil_reset_catset(struct cil_catset *catset) { cil_reset_cats(catset->cats); } static inline void cil_reset_level(struct cil_level *level) { cil_reset_cats(level->cats); } static inline void cil_reset_levelrange(struct cil_levelrange *levelrange) { if (levelrange->low_str == NULL) { cil_reset_level(levelrange->low); } if (levelrange->high_str == NULL) { cil_reset_level(levelrange->high); } } static inline void cil_reset_userlevel(struct cil_userlevel *userlevel) { if (userlevel->level_str == NULL) { cil_reset_level(userlevel->level); } } static inline void cil_reset_userrange(struct cil_userrange *userrange) { if (userrange->range_str == NULL) { cil_reset_levelrange(userrange->range); } } static inline void cil_reset_context(struct cil_context *context) { if (context->range_str == NULL) { cil_reset_levelrange(context->range); } } static void cil_reset_sidcontext(struct cil_sidcontext *sidcontext) { if (sidcontext->context_str == NULL) { cil_reset_context(sidcontext->context); } } static void cil_reset_filecon(struct cil_filecon *filecon) { if (filecon->context_str == NULL && filecon->context != NULL) { cil_reset_context(filecon->context); } } static void cil_reset_ibpkeycon(struct cil_ibpkeycon *ibpkeycon) { if (!ibpkeycon->context_str) cil_reset_context(ibpkeycon->context); } static void cil_reset_portcon(struct cil_portcon *portcon) { if (portcon->context_str == NULL) { cil_reset_context(portcon->context); } } static void cil_reset_nodecon(struct cil_nodecon *nodecon) { if (nodecon->context_str == NULL) { cil_reset_context(nodecon->context); } } static void cil_reset_genfscon(struct cil_genfscon *genfscon) { if (genfscon->context_str == NULL) { cil_reset_context(genfscon->context); } } static void cil_reset_netifcon(struct cil_netifcon *netifcon) { if (netifcon->if_context_str == NULL) { cil_reset_context(netifcon->if_context); } if (netifcon->packet_context_str == NULL) { cil_reset_context(netifcon->packet_context); } } static void cil_reset_ibendportcon(struct cil_ibendportcon *ibendportcon) { if (!ibendportcon->context_str) { cil_reset_context(ibendportcon->context); } } static void cil_reset_pirqcon(struct cil_pirqcon *pirqcon) { if (pirqcon->context_str == NULL) { cil_reset_context(pirqcon->context); } } static void cil_reset_iomemcon(struct cil_iomemcon *iomemcon) { if (iomemcon->context_str == NULL) { cil_reset_context(iomemcon->context); } } static void cil_reset_ioportcon(struct cil_ioportcon *ioportcon) { if (ioportcon->context_str == NULL) { cil_reset_context(ioportcon->context); } } static void cil_reset_pcidevicecon(struct cil_pcidevicecon *pcidevicecon) { if (pcidevicecon->context_str == NULL) { cil_reset_context(pcidevicecon->context); } } static void cil_reset_devicetreecon(struct cil_devicetreecon *devicetreecon) { if (devicetreecon->context_str == NULL) { cil_reset_context(devicetreecon->context); } } static void cil_reset_fsuse(struct cil_fsuse *fsuse) { if (fsuse->context_str == NULL) { cil_reset_context(fsuse->context); } } static void cil_reset_sid(struct cil_sid *sid) { /* reset the context to NULL during a re-resolve */ sid->context = NULL; sid->ordered = CIL_FALSE; } static void cil_reset_constrain(struct cil_constrain *con) { cil_reset_classperms_list(con->classperms); cil_list_destroy(&con->datum_expr, CIL_FALSE); } static void cil_reset_validatetrans(struct cil_validatetrans *vt) { cil_list_destroy(&vt->datum_expr, CIL_FALSE); } static void cil_reset_default(struct cil_default *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_defaultrange(struct cil_defaultrange *def) { cil_list_destroy(&def->class_datums, CIL_FALSE); } static void cil_reset_booleanif(struct cil_booleanif *bif) { cil_list_destroy(&bif->datum_expr, CIL_FALSE); } int __cil_reset_node(struct cil_tree_node *node, __attribute__((unused)) uint32_t *finished, __attribute__((unused)) void *extra_args) { switch (node->flavor) { case CIL_CLASS: cil_reset_class(node->data); break; case CIL_PERM: case CIL_MAP_PERM: cil_reset_perm(node->data); break; case CIL_CLASSPERMISSION: cil_reset_classpermission(node->data); break; case CIL_CLASSPERMISSIONSET: cil_reset_classpermissionset(node->data); break; case CIL_CLASSMAPPING: cil_reset_classmapping(node->data); break; case CIL_TYPEALIAS: case CIL_SENSALIAS: case CIL_CATALIAS: cil_reset_alias(node->data); break; case CIL_USERRANGE: cil_reset_userrange(node->data); break; case CIL_USERLEVEL: cil_reset_userlevel(node->data); break; case CIL_USER: cil_reset_user(node->data); break; case CIL_USERATTRIBUTE: cil_reset_userattr(node->data); break; case CIL_USERATTRIBUTESET: cil_reset_userattributeset(node->data); break; case CIL_SELINUXUSERDEFAULT: case CIL_SELINUXUSER: cil_reset_selinuxuser(node->data); break; case CIL_ROLE: cil_reset_role(node->data); break; case CIL_ROLEATTRIBUTE: cil_reset_roleattr(node->data); break; case CIL_ROLEATTRIBUTESET: cil_reset_roleattributeset(node->data); break; case CIL_TYPE: cil_reset_type(node->data); break; case CIL_TYPEATTRIBUTE: cil_reset_typeattr(node->data); break; case CIL_TYPEATTRIBUTESET: cil_reset_typeattributeset(node->data); break; case CIL_RANGETRANSITION: cil_reset_rangetransition(node->data); break; case CIL_AVRULE: cil_reset_avrule(node->data); break; case CIL_SENS: cil_reset_sens(node->data); break; case CIL_CAT: cil_reset_cat(node->data); break; case CIL_SENSCAT: cil_reset_senscat(node->data); break; case CIL_CATSET: cil_reset_catset(node->data); break; case CIL_LEVEL: cil_reset_level(node->data); break; case CIL_LEVELRANGE: cil_reset_levelrange(node->data); break; case CIL_CONTEXT: cil_reset_context(node->data); break; case CIL_SIDCONTEXT: cil_reset_sidcontext(node->data); break; case CIL_FILECON: cil_reset_filecon(node->data); break; case CIL_IBPKEYCON: cil_reset_ibpkeycon(node->data); break; case CIL_IBENDPORTCON: cil_reset_ibendportcon(node->data); break; case CIL_PORTCON: cil_reset_portcon(node->data); break; case CIL_NODECON: cil_reset_nodecon(node->data); break; case CIL_GENFSCON: cil_reset_genfscon(node->data); break; case CIL_NETIFCON: cil_reset_netifcon(node->data); break; case CIL_PIRQCON: cil_reset_pirqcon(node->data); break; case CIL_IOMEMCON: cil_reset_iomemcon(node->data); break; case CIL_IOPORTCON: cil_reset_ioportcon(node->data); break; case CIL_PCIDEVICECON: cil_reset_pcidevicecon(node->data); break; case CIL_DEVICETREECON: cil_reset_devicetreecon(node->data); break; case CIL_FSUSE: cil_reset_fsuse(node->data); break; case CIL_SID: cil_reset_sid(node->data); break; case CIL_CONSTRAIN: case CIL_MLSCONSTRAIN: cil_reset_constrain(node->data); break; case CIL_VALIDATETRANS: case CIL_MLSVALIDATETRANS: cil_reset_validatetrans(node->data); break; case CIL_DEFAULTUSER: case CIL_DEFAULTROLE: case CIL_DEFAULTTYPE: cil_reset_default(node->data); break; case CIL_DEFAULTRANGE: cil_reset_defaultrange(node->data); break; case CIL_BOOLEANIF: cil_reset_booleanif(node->data); break; case CIL_TUNABLEIF: case CIL_CALL: break; /* Not effected by optional block disabling */ case CIL_MACRO: case CIL_SIDORDER: case CIL_CLASSORDER: case CIL_CATORDER: case CIL_SENSITIVITYORDER: case CIL_EXPANDTYPEATTRIBUTE: break; /* Nothing to reset */ default: break; } return SEPOL_OK; } int cil_reset_ast(struct cil_tree_node *current) { int rc = SEPOL_ERR; rc = cil_tree_walk(current, __cil_reset_node, NULL, NULL, NULL); if (rc != SEPOL_OK) { cil_log(CIL_ERR, "Failed to reset AST\n"); return SEPOL_ERR; } return SEPOL_OK; }
static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { cil_reset_classpermission(cp_set->set); }
static void cil_reset_classperms_set(struct cil_classperms_set *cp_set) { if (cp_set == NULL) { return; } cp_set->set = NULL; }
{'added': [(62, '\tif (cp_set == NULL) {'), (63, '\t\treturn;'), (64, '\t}'), (65, ''), (66, '\tcp_set->set = NULL;')], 'deleted': [(62, '\tcil_reset_classpermission(cp_set->set);')]}
5
1
505
2,377
4
16
1
https://github.com/SELinuxProject/selinux
CVE-2021-36086
CWE-416
380
str.c
C++
caml_bitvect_test
/***********************************************************************/ /* */ /* OCaml */ /* */ /* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ /* */ /* Copyright 1996 Institut National de Recherche en Informatique et */ /* en Automatique. All rights reserved. This file is distributed */ /* under the terms of the GNU Library General Public License, with */ /* the special exception on linking described in file ../LICENSE. */ /* */ /***********************************************************************/ /* Operations on strings */ #include <string.h> #include <ctype.h> #include <stdio.h> #include <stdarg.h> #include "caml/alloc.h" #include "caml/fail.h" #include "caml/mlvalues.h" #include "caml/misc.h" /* returns a number of bytes (chars) */ CAMLexport mlsize_t caml_string_length(value s) { mlsize_t temp; temp = Bosize_val(s) - 1; Assert (Byte (s, temp - Byte (s, temp)) == 0); return temp - Byte (s, temp); } /* returns a value that represents a number of bytes (chars) */ CAMLprim value caml_ml_string_length(value s) { mlsize_t temp; temp = Bosize_val(s) - 1; Assert (Byte (s, temp - Byte (s, temp)) == 0); return Val_long(temp - Byte (s, temp)); } /* [len] is a value that represents a number of bytes (chars) */ CAMLprim value caml_create_string(value len) { mlsize_t size = Long_val(len); if (size > Bsize_wsize (Max_wosize) - 1){ caml_invalid_argument("String.create"); } return caml_alloc_string(size); } CAMLprim value caml_string_get(value str, value index) { intnat idx = Long_val(index); if (idx < 0 || idx >= caml_string_length(str)) caml_array_bound_error(); return Val_int(Byte_u(str, idx)); } CAMLprim value caml_string_set(value str, value index, value newval) { intnat idx = Long_val(index); if (idx < 0 || idx >= caml_string_length(str)) caml_array_bound_error(); Byte_u(str, idx) = Int_val(newval); return Val_unit; } CAMLprim value caml_string_get16(value str, value index) { intnat res; unsigned char b1, b2; intnat idx = Long_val(index); if (idx < 0 || idx + 1 >= caml_string_length(str)) caml_array_bound_error(); b1 = Byte_u(str, idx); b2 = Byte_u(str, idx + 1); #ifdef ARCH_BIG_ENDIAN res = b1 << 8 | b2; #else res = b2 << 8 | b1; #endif return Val_int(res); } CAMLprim value caml_string_get32(value str, value index) { intnat res; unsigned char b1, b2, b3, b4; intnat idx = Long_val(index); if (idx < 0 || idx + 3 >= caml_string_length(str)) caml_array_bound_error(); b1 = Byte_u(str, idx); b2 = Byte_u(str, idx + 1); b3 = Byte_u(str, idx + 2); b4 = Byte_u(str, idx + 3); #ifdef ARCH_BIG_ENDIAN res = b1 << 24 | b2 << 16 | b3 << 8 | b4; #else res = b4 << 24 | b3 << 16 | b2 << 8 | b1; #endif return caml_copy_int32(res); } CAMLprim value caml_string_get64(value str, value index) { uint64_t res; unsigned char b1, b2, b3, b4, b5, b6, b7, b8; intnat idx = Long_val(index); if (idx < 0 || idx + 7 >= caml_string_length(str)) caml_array_bound_error(); b1 = Byte_u(str, idx); b2 = Byte_u(str, idx + 1); b3 = Byte_u(str, idx + 2); b4 = Byte_u(str, idx + 3); b5 = Byte_u(str, idx + 4); b6 = Byte_u(str, idx + 5); b7 = Byte_u(str, idx + 6); b8 = Byte_u(str, idx + 7); #ifdef ARCH_BIG_ENDIAN res = (uint64_t) b1 << 56 | (uint64_t) b2 << 48 | (uint64_t) b3 << 40 | (uint64_t) b4 << 32 | (uint64_t) b5 << 24 | (uint64_t) b6 << 16 | (uint64_t) b7 << 8 | (uint64_t) b8; #else res = (uint64_t) b8 << 56 | (uint64_t) b7 << 48 | (uint64_t) b6 << 40 | (uint64_t) b5 << 32 | (uint64_t) b4 << 24 | (uint64_t) b3 << 16 | (uint64_t) b2 << 8 | (uint64_t) b1; #endif return caml_copy_int64(res); } CAMLprim value caml_string_set16(value str, value index, value newval) { unsigned char b1, b2; intnat val; intnat idx = Long_val(index); if (idx < 0 || idx + 1 >= caml_string_length(str)) caml_array_bound_error(); val = Long_val(newval); #ifdef ARCH_BIG_ENDIAN b1 = 0xFF & val >> 8; b2 = 0xFF & val; #else b2 = 0xFF & val >> 8; b1 = 0xFF & val; #endif Byte_u(str, idx) = b1; Byte_u(str, idx + 1) = b2; return Val_unit; } CAMLprim value caml_string_set32(value str, value index, value newval) { unsigned char b1, b2, b3, b4; intnat val; intnat idx = Long_val(index); if (idx < 0 || idx + 3 >= caml_string_length(str)) caml_array_bound_error(); val = Int32_val(newval); #ifdef ARCH_BIG_ENDIAN b1 = 0xFF & val >> 24; b2 = 0xFF & val >> 16; b3 = 0xFF & val >> 8; b4 = 0xFF & val; #else b4 = 0xFF & val >> 24; b3 = 0xFF & val >> 16; b2 = 0xFF & val >> 8; b1 = 0xFF & val; #endif Byte_u(str, idx) = b1; Byte_u(str, idx + 1) = b2; Byte_u(str, idx + 2) = b3; Byte_u(str, idx + 3) = b4; return Val_unit; } CAMLprim value caml_string_set64(value str, value index, value newval) { unsigned char b1, b2, b3, b4, b5, b6, b7, b8; int64_t val; intnat idx = Long_val(index); if (idx < 0 || idx + 7 >= caml_string_length(str)) caml_array_bound_error(); val = Int64_val(newval); #ifdef ARCH_BIG_ENDIAN b1 = 0xFF & val >> 56; b2 = 0xFF & val >> 48; b3 = 0xFF & val >> 40; b4 = 0xFF & val >> 32; b5 = 0xFF & val >> 24; b6 = 0xFF & val >> 16; b7 = 0xFF & val >> 8; b8 = 0xFF & val; #else b8 = 0xFF & val >> 56; b7 = 0xFF & val >> 48; b6 = 0xFF & val >> 40; b5 = 0xFF & val >> 32; b4 = 0xFF & val >> 24; b3 = 0xFF & val >> 16; b2 = 0xFF & val >> 8; b1 = 0xFF & val; #endif Byte_u(str, idx) = b1; Byte_u(str, idx + 1) = b2; Byte_u(str, idx + 2) = b3; Byte_u(str, idx + 3) = b4; Byte_u(str, idx + 4) = b5; Byte_u(str, idx + 5) = b6; Byte_u(str, idx + 6) = b7; Byte_u(str, idx + 7) = b8; return Val_unit; } CAMLprim value caml_string_equal(value s1, value s2) { mlsize_t sz1, sz2; value * p1, * p2; if (s1 == s2) return Val_true; sz1 = Wosize_val(s1); sz2 = Wosize_val(s2); if (sz1 != sz2) return Val_false; for(p1 = Op_val(s1), p2 = Op_val(s2); sz1 > 0; sz1--, p1++, p2++) if (*p1 != *p2) return Val_false; return Val_true; } CAMLprim value caml_string_notequal(value s1, value s2) { return Val_not(caml_string_equal(s1, s2)); } CAMLprim value caml_string_compare(value s1, value s2) { mlsize_t len1, len2; int res; if (s1 == s2) return Val_int(0); len1 = caml_string_length(s1); len2 = caml_string_length(s2); res = memcmp(String_val(s1), String_val(s2), len1 <= len2 ? len1 : len2); if (res < 0) return Val_int(-1); if (res > 0) return Val_int(1); if (len1 < len2) return Val_int(-1); if (len1 > len2) return Val_int(1); return Val_int(0); } CAMLprim value caml_string_lessthan(value s1, value s2) { return caml_string_compare(s1, s2) < Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_string_lessequal(value s1, value s2) { return caml_string_compare(s1, s2) <= Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_string_greaterthan(value s1, value s2) { return caml_string_compare(s1, s2) > Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_string_greaterequal(value s1, value s2) { return caml_string_compare(s1, s2) >= Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_blit_string(value s1, value ofs1, value s2, value ofs2, value n) { memmove(&Byte(s2, Long_val(ofs2)), &Byte(s1, Long_val(ofs1)), Int_val(n)); return Val_unit; } CAMLprim value caml_fill_string(value s, value offset, value len, value init) { memset(&Byte(s, Long_val(offset)), Int_val(init), Long_val(len)); return Val_unit; } CAMLprim value caml_bitvect_test(value bv, value n) { int pos = Int_val(n); return Val_int(Byte_u(bv, pos >> 3) & (1 << (pos & 7))); } CAMLexport value caml_alloc_sprintf(const char * format, ...) { va_list args; char buf[64]; int n; value res; #ifndef _WIN32 /* C99-compliant implementation */ va_start(args, format); /* "vsnprintf(dest, sz, format, args)" writes at most "sz" characters into "dest", including the terminating '\0'. It returns the number of characters of the formatted string, excluding the terminating '\0'. */ n = vsnprintf(buf, sizeof(buf), format, args); va_end(args); /* Allocate a Caml string with length "n" as computed by vsnprintf. */ res = caml_alloc_string(n); if (n < sizeof(buf)) { /* All output characters were written to buf, including the terminating '\0'. Just copy them to the result. */ memcpy(String_val(res), buf, n); } else { /* Re-do the formatting, outputting directly in the Caml string. Note that caml_alloc_string left room for a '\0' at position n, so the size passed to vsnprintf is n+1. */ va_start(args, format); vsnprintf(String_val(res), n + 1, format, args); va_end(args); } return res; #else /* Implementation specific to the Microsoft CRT library */ va_start(args, format); /* "_vsnprintf(dest, sz, format, args)" writes at most "sz" characters into "dest". Let "len" be the number of characters of the formatted string. If "len" < "sz", a null terminator was appended, and "len" is returned. If "len" == "sz", no null termination, and "len" is returned. If "len" > "sz", a negative value is returned. */ n = _vsnprintf(buf, sizeof(buf), format, args); va_end(args); if (n >= 0 && n <= sizeof(buf)) { /* All output characters were written to buf. "n" is the actual length of the output. Copy the characters to a Caml string of length n. */ res = caml_alloc_string(n); memcpy(String_val(res), buf, n); } else { /* Determine actual length of output, excluding final '\0' */ va_start(args, format); n = _vscprintf(format, args); va_end(args); res = caml_alloc_string(n); /* Re-do the formatting, outputting directly in the Caml string. Note that caml_alloc_string left room for a '\0' at position n, so the size passed to _vsnprintf is n+1. */ va_start(args, format); _vsnprintf(String_val(res), n + 1, format, args); va_end(args); } return res; #endif }
/***********************************************************************/ /* */ /* OCaml */ /* */ /* Xavier Leroy, projet Cristal, INRIA Rocquencourt */ /* */ /* Copyright 1996 Institut National de Recherche en Informatique et */ /* en Automatique. All rights reserved. This file is distributed */ /* under the terms of the GNU Library General Public License, with */ /* the special exception on linking described in file ../LICENSE. */ /* */ /***********************************************************************/ /* Operations on strings */ #include <string.h> #include <ctype.h> #include <stdio.h> #include <stdarg.h> #include "caml/alloc.h" #include "caml/fail.h" #include "caml/mlvalues.h" #include "caml/misc.h" /* returns a number of bytes (chars) */ CAMLexport mlsize_t caml_string_length(value s) { mlsize_t temp; temp = Bosize_val(s) - 1; Assert (Byte (s, temp - Byte (s, temp)) == 0); return temp - Byte (s, temp); } /* returns a value that represents a number of bytes (chars) */ CAMLprim value caml_ml_string_length(value s) { mlsize_t temp; temp = Bosize_val(s) - 1; Assert (Byte (s, temp - Byte (s, temp)) == 0); return Val_long(temp - Byte (s, temp)); } /* [len] is a value that represents a number of bytes (chars) */ CAMLprim value caml_create_string(value len) { mlsize_t size = Long_val(len); if (size > Bsize_wsize (Max_wosize) - 1){ caml_invalid_argument("String.create"); } return caml_alloc_string(size); } CAMLprim value caml_string_get(value str, value index) { intnat idx = Long_val(index); if (idx < 0 || idx >= caml_string_length(str)) caml_array_bound_error(); return Val_int(Byte_u(str, idx)); } CAMLprim value caml_string_set(value str, value index, value newval) { intnat idx = Long_val(index); if (idx < 0 || idx >= caml_string_length(str)) caml_array_bound_error(); Byte_u(str, idx) = Int_val(newval); return Val_unit; } CAMLprim value caml_string_get16(value str, value index) { intnat res; unsigned char b1, b2; intnat idx = Long_val(index); if (idx < 0 || idx + 1 >= caml_string_length(str)) caml_array_bound_error(); b1 = Byte_u(str, idx); b2 = Byte_u(str, idx + 1); #ifdef ARCH_BIG_ENDIAN res = b1 << 8 | b2; #else res = b2 << 8 | b1; #endif return Val_int(res); } CAMLprim value caml_string_get32(value str, value index) { intnat res; unsigned char b1, b2, b3, b4; intnat idx = Long_val(index); if (idx < 0 || idx + 3 >= caml_string_length(str)) caml_array_bound_error(); b1 = Byte_u(str, idx); b2 = Byte_u(str, idx + 1); b3 = Byte_u(str, idx + 2); b4 = Byte_u(str, idx + 3); #ifdef ARCH_BIG_ENDIAN res = b1 << 24 | b2 << 16 | b3 << 8 | b4; #else res = b4 << 24 | b3 << 16 | b2 << 8 | b1; #endif return caml_copy_int32(res); } CAMLprim value caml_string_get64(value str, value index) { uint64_t res; unsigned char b1, b2, b3, b4, b5, b6, b7, b8; intnat idx = Long_val(index); if (idx < 0 || idx + 7 >= caml_string_length(str)) caml_array_bound_error(); b1 = Byte_u(str, idx); b2 = Byte_u(str, idx + 1); b3 = Byte_u(str, idx + 2); b4 = Byte_u(str, idx + 3); b5 = Byte_u(str, idx + 4); b6 = Byte_u(str, idx + 5); b7 = Byte_u(str, idx + 6); b8 = Byte_u(str, idx + 7); #ifdef ARCH_BIG_ENDIAN res = (uint64_t) b1 << 56 | (uint64_t) b2 << 48 | (uint64_t) b3 << 40 | (uint64_t) b4 << 32 | (uint64_t) b5 << 24 | (uint64_t) b6 << 16 | (uint64_t) b7 << 8 | (uint64_t) b8; #else res = (uint64_t) b8 << 56 | (uint64_t) b7 << 48 | (uint64_t) b6 << 40 | (uint64_t) b5 << 32 | (uint64_t) b4 << 24 | (uint64_t) b3 << 16 | (uint64_t) b2 << 8 | (uint64_t) b1; #endif return caml_copy_int64(res); } CAMLprim value caml_string_set16(value str, value index, value newval) { unsigned char b1, b2; intnat val; intnat idx = Long_val(index); if (idx < 0 || idx + 1 >= caml_string_length(str)) caml_array_bound_error(); val = Long_val(newval); #ifdef ARCH_BIG_ENDIAN b1 = 0xFF & val >> 8; b2 = 0xFF & val; #else b2 = 0xFF & val >> 8; b1 = 0xFF & val; #endif Byte_u(str, idx) = b1; Byte_u(str, idx + 1) = b2; return Val_unit; } CAMLprim value caml_string_set32(value str, value index, value newval) { unsigned char b1, b2, b3, b4; intnat val; intnat idx = Long_val(index); if (idx < 0 || idx + 3 >= caml_string_length(str)) caml_array_bound_error(); val = Int32_val(newval); #ifdef ARCH_BIG_ENDIAN b1 = 0xFF & val >> 24; b2 = 0xFF & val >> 16; b3 = 0xFF & val >> 8; b4 = 0xFF & val; #else b4 = 0xFF & val >> 24; b3 = 0xFF & val >> 16; b2 = 0xFF & val >> 8; b1 = 0xFF & val; #endif Byte_u(str, idx) = b1; Byte_u(str, idx + 1) = b2; Byte_u(str, idx + 2) = b3; Byte_u(str, idx + 3) = b4; return Val_unit; } CAMLprim value caml_string_set64(value str, value index, value newval) { unsigned char b1, b2, b3, b4, b5, b6, b7, b8; int64_t val; intnat idx = Long_val(index); if (idx < 0 || idx + 7 >= caml_string_length(str)) caml_array_bound_error(); val = Int64_val(newval); #ifdef ARCH_BIG_ENDIAN b1 = 0xFF & val >> 56; b2 = 0xFF & val >> 48; b3 = 0xFF & val >> 40; b4 = 0xFF & val >> 32; b5 = 0xFF & val >> 24; b6 = 0xFF & val >> 16; b7 = 0xFF & val >> 8; b8 = 0xFF & val; #else b8 = 0xFF & val >> 56; b7 = 0xFF & val >> 48; b6 = 0xFF & val >> 40; b5 = 0xFF & val >> 32; b4 = 0xFF & val >> 24; b3 = 0xFF & val >> 16; b2 = 0xFF & val >> 8; b1 = 0xFF & val; #endif Byte_u(str, idx) = b1; Byte_u(str, idx + 1) = b2; Byte_u(str, idx + 2) = b3; Byte_u(str, idx + 3) = b4; Byte_u(str, idx + 4) = b5; Byte_u(str, idx + 5) = b6; Byte_u(str, idx + 6) = b7; Byte_u(str, idx + 7) = b8; return Val_unit; } CAMLprim value caml_string_equal(value s1, value s2) { mlsize_t sz1, sz2; value * p1, * p2; if (s1 == s2) return Val_true; sz1 = Wosize_val(s1); sz2 = Wosize_val(s2); if (sz1 != sz2) return Val_false; for(p1 = Op_val(s1), p2 = Op_val(s2); sz1 > 0; sz1--, p1++, p2++) if (*p1 != *p2) return Val_false; return Val_true; } CAMLprim value caml_string_notequal(value s1, value s2) { return Val_not(caml_string_equal(s1, s2)); } CAMLprim value caml_string_compare(value s1, value s2) { mlsize_t len1, len2; int res; if (s1 == s2) return Val_int(0); len1 = caml_string_length(s1); len2 = caml_string_length(s2); res = memcmp(String_val(s1), String_val(s2), len1 <= len2 ? len1 : len2); if (res < 0) return Val_int(-1); if (res > 0) return Val_int(1); if (len1 < len2) return Val_int(-1); if (len1 > len2) return Val_int(1); return Val_int(0); } CAMLprim value caml_string_lessthan(value s1, value s2) { return caml_string_compare(s1, s2) < Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_string_lessequal(value s1, value s2) { return caml_string_compare(s1, s2) <= Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_string_greaterthan(value s1, value s2) { return caml_string_compare(s1, s2) > Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_string_greaterequal(value s1, value s2) { return caml_string_compare(s1, s2) >= Val_int(0) ? Val_true : Val_false; } CAMLprim value caml_blit_string(value s1, value ofs1, value s2, value ofs2, value n) { memmove(&Byte(s2, Long_val(ofs2)), &Byte(s1, Long_val(ofs1)), Long_val(n)); return Val_unit; } CAMLprim value caml_fill_string(value s, value offset, value len, value init) { memset(&Byte(s, Long_val(offset)), Int_val(init), Long_val(len)); return Val_unit; } CAMLprim value caml_bitvect_test(value bv, value n) { intnat pos = Long_val(n); return Val_int(Byte_u(bv, pos >> 3) & (1 << (pos & 7))); } CAMLexport value caml_alloc_sprintf(const char * format, ...) { va_list args; char buf[64]; int n; value res; #ifndef _WIN32 /* C99-compliant implementation */ va_start(args, format); /* "vsnprintf(dest, sz, format, args)" writes at most "sz" characters into "dest", including the terminating '\0'. It returns the number of characters of the formatted string, excluding the terminating '\0'. */ n = vsnprintf(buf, sizeof(buf), format, args); va_end(args); /* Allocate a Caml string with length "n" as computed by vsnprintf. */ res = caml_alloc_string(n); if (n < sizeof(buf)) { /* All output characters were written to buf, including the terminating '\0'. Just copy them to the result. */ memcpy(String_val(res), buf, n); } else { /* Re-do the formatting, outputting directly in the Caml string. Note that caml_alloc_string left room for a '\0' at position n, so the size passed to vsnprintf is n+1. */ va_start(args, format); vsnprintf(String_val(res), n + 1, format, args); va_end(args); } return res; #else /* Implementation specific to the Microsoft CRT library */ va_start(args, format); /* "_vsnprintf(dest, sz, format, args)" writes at most "sz" characters into "dest". Let "len" be the number of characters of the formatted string. If "len" < "sz", a null terminator was appended, and "len" is returned. If "len" == "sz", no null termination, and "len" is returned. If "len" > "sz", a negative value is returned. */ n = _vsnprintf(buf, sizeof(buf), format, args); va_end(args); if (n >= 0 && n <= sizeof(buf)) { /* All output characters were written to buf. "n" is the actual length of the output. Copy the characters to a Caml string of length n. */ res = caml_alloc_string(n); memcpy(String_val(res), buf, n); } else { /* Determine actual length of output, excluding final '\0' */ va_start(args, format); n = _vscprintf(format, args); va_end(args); res = caml_alloc_string(n); /* Re-do the formatting, outputting directly in the Caml string. Note that caml_alloc_string left room for a '\0' at position n, so the size passed to _vsnprintf is n+1. */ va_start(args, format); _vsnprintf(String_val(res), n + 1, format, args); va_end(args); } return res; #endif }
CAMLprim value caml_bitvect_test(value bv, value n) { int pos = Int_val(n); return Val_int(Byte_u(bv, pos >> 3) & (1 << (pos & 7))); }
CAMLprim value caml_bitvect_test(value bv, value n) { intnat pos = Long_val(n); return Val_int(Byte_u(bv, pos >> 3) & (1 << (pos & 7))); }
{'added': [(269, ' memmove(&Byte(s2, Long_val(ofs2)), &Byte(s1, Long_val(ofs1)), Long_val(n));'), (281, ' intnat pos = Long_val(n);')], 'deleted': [(269, ' memmove(&Byte(s2, Long_val(ofs2)), &Byte(s1, Long_val(ofs1)), Int_val(n));'), (281, ' int pos = Int_val(n);')]}
2
2
259
2,103
5
43
1
https://github.com/ocaml/ocaml
CVE-2015-8869
CWE-119
1,295
pixel-accessor.h
C
GetPixelChannel
/* Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization dedicated to making software imaging solutions freely available. You may not use this file except in compliance with the License. You may obtain a copy of the License at https://imagemagick.org/script/license.php Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. MagickCore pixel accessor methods. */ #ifndef MAGICKCORE_PIXEL_ACCESSOR_H #define MAGICKCORE_PIXEL_ACCESSOR_H #include <assert.h> #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/colorspace.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #undef index static inline Quantum ClampPixel(const MagickRealType pixel) { if (pixel < 0.0f) return((Quantum) 0); if (pixel >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) (pixel+0.5f)); #else return((Quantum) pixel); #endif } static inline Quantum GetPixela(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[aPixelChannel].offset]); } static inline Quantum GetPixelAlpha(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits == UndefinedPixelTrait) return(OpaqueAlpha); return(pixel[image->channel_map[AlphaPixelChannel].offset]); } static inline PixelTrait GetPixelAlphaTraits( const Image *magick_restrict image) { return(image->channel_map[AlphaPixelChannel].traits); } static inline Quantum GetPixelb(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[bPixelChannel].offset]); } static inline Quantum GetPixelBlack(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[BlackPixelChannel].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[BlackPixelChannel].offset]); } static inline PixelTrait GetPixelBlackTraits( const Image *magick_restrict image) { return(image->channel_map[BlackPixelChannel].traits); } static inline Quantum GetPixelBlue(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[BluePixelChannel].offset]); } static inline PixelTrait GetPixelBlueTraits(const Image *magick_restrict image) { return(image->channel_map[BluePixelChannel].traits); } static inline Quantum GetPixelCb(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[CbPixelChannel].offset]); } static inline PixelTrait GetPixelCbTraits(const Image *magick_restrict image) { return(image->channel_map[CbPixelChannel].traits); } static inline Quantum GetPixelChannel(const Image *magick_restrict image, const PixelChannel channel,const Quantum *magick_restrict pixel) { if (image->channel_map[channel].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[channel].offset]); } static inline PixelChannel GetPixelChannelChannel( const Image *magick_restrict image,const ssize_t offset) { return(image->channel_map[offset].channel); } static inline ssize_t GetPixelChannelOffset(const Image *magick_restrict image, const PixelChannel channel) { return(image->channel_map[channel].offset); } static inline PixelTrait GetPixelChannelTraits( const Image *magick_restrict image,const PixelChannel channel) { return(image->channel_map[channel].traits); } static inline size_t GetPixelChannels(const Image *magick_restrict image) { return(image->number_channels); } static inline Quantum GetPixelCompositeMask(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[CompositeMaskPixelChannel].traits == UndefinedPixelTrait) return((Quantum) QuantumRange); return(pixel[image->channel_map[CompositeMaskPixelChannel].offset]); } static inline Quantum GetPixelCr(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[CrPixelChannel].offset]); } static inline PixelTrait GetPixelCrTraits(const Image *magick_restrict image) { return(image->channel_map[CrPixelChannel].traits); } static inline Quantum GetPixelCyan(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[CyanPixelChannel].offset]); } static inline PixelTrait GetPixelCyanTraits(const Image *magick_restrict image) { return(image->channel_map[CyanPixelChannel].traits); } static inline Quantum GetPixelGray(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[GrayPixelChannel].offset]); } static inline PixelTrait GetPixelGrayTraits(const Image *magick_restrict image) { return(image->channel_map[GrayPixelChannel].traits); } static inline Quantum GetPixelGreen(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[GreenPixelChannel].offset]); } static inline PixelTrait GetPixelGreenTraits( const Image *magick_restrict image) { return(image->channel_map[GreenPixelChannel].traits); } static inline Quantum GetPixelIndex(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[IndexPixelChannel].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[IndexPixelChannel].offset]); } static inline PixelTrait GetPixelIndexTraits( const Image *magick_restrict image) { return(image->channel_map[IndexPixelChannel].traits); } static inline MagickRealType GetPixelInfoChannel( const PixelInfo *magick_restrict pixel_info,const PixelChannel channel) { switch (channel) { case RedPixelChannel: return(pixel_info->red); case GreenPixelChannel: return(pixel_info->green); case BluePixelChannel: return(pixel_info->blue); case BlackPixelChannel: return(pixel_info->black); case AlphaPixelChannel: return(pixel_info->alpha); case IndexPixelChannel: return(pixel_info->index); default: return((MagickRealType) 0.0); } } static inline double PerceptibleReciprocal(const double x) { double sign; /* Return 1/x where x is perceptible (not unlimited or infinitesimal). */ sign=x < 0.0 ? -1.0 : 1.0; if ((sign*x) >= MagickEpsilon) return(1.0/x); return(sign/MagickEpsilon); } static inline MagickRealType GetPixelInfoLuma( const PixelInfo *magick_restrict pixel) { MagickRealType intensity; if (pixel->colorspace == sRGBColorspace) { intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+ 0.072186f*pixel->blue); return(intensity); } intensity=(MagickRealType) (0.212656f*EncodePixelGamma(pixel->red)+ 0.715158f*EncodePixelGamma(pixel->green)+ 0.072186f*EncodePixelGamma(pixel->blue)); return(intensity); } static inline MagickRealType GetPixelInfoLuminance( const PixelInfo *magick_restrict pixel) { MagickRealType intensity; if (pixel->colorspace != sRGBColorspace) { intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+ 0.072186f*pixel->blue); return(intensity); } intensity=(MagickRealType) (0.212656f*DecodePixelGamma(pixel->red)+ 0.715158f*DecodePixelGamma(pixel->green)+ 0.072186f*DecodePixelGamma(pixel->blue)); return(intensity); } static inline Quantum GetPixelL(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[LPixelChannel].offset]); } static inline ssize_t GetPixelLabel(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return((ssize_t) pixel[image->channel_map[LabelPixelChannel].offset]); } static inline MagickRealType GetPixelLuma(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { MagickRealType intensity; intensity=(MagickRealType) ( 0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); return(intensity); } static inline MagickRealType GetPixelLuminance( const Image *magick_restrict image,const Quantum *magick_restrict pixel) { MagickRealType intensity; if (image->colorspace != sRGBColorspace) { intensity=(MagickRealType) ( 0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); return(intensity); } intensity=(MagickRealType) (0.212656f*DecodePixelGamma((MagickRealType) pixel[image->channel_map[RedPixelChannel].offset])+0.715158f* DecodePixelGamma((MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset])+0.072186f* DecodePixelGamma((MagickRealType) pixel[image->channel_map[BluePixelChannel].offset])); return(intensity); } static inline Quantum GetPixelMagenta(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[MagentaPixelChannel].offset]); } static inline PixelTrait GetPixelMagentaTraits( const Image *magick_restrict image) { return(image->channel_map[MagentaPixelChannel].traits); } static inline Quantum GetPixelReadMask(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait) return((Quantum) QuantumRange); return(pixel[image->channel_map[ReadMaskPixelChannel].offset]); } static inline Quantum GetPixelWriteMask(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[WriteMaskPixelChannel].traits == UndefinedPixelTrait) return((Quantum) QuantumRange); return(pixel[image->channel_map[WriteMaskPixelChannel].offset]); } static inline PixelTrait GetPixelReadMaskTraits( const Image *magick_restrict image) { return(image->channel_map[ReadMaskPixelChannel].traits); } static inline size_t GetPixelMetaChannels(const Image *magick_restrict image) { return(image->number_meta_channels); } static inline size_t GetPixelMetacontentExtent( const Image *magick_restrict image) { return(image->metacontent_extent); } static inline Quantum GetPixelOpacity(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits != BlendPixelTrait) return(QuantumRange-OpaqueAlpha); return(QuantumRange-pixel[image->channel_map[AlphaPixelChannel].offset]); } static inline Quantum GetPixelRed(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[RedPixelChannel].offset]); } static inline PixelTrait GetPixelRedTraits(const Image *magick_restrict image) { return(image->channel_map[RedPixelChannel].traits); } static inline void GetPixelInfoPixel(const Image *magick_restrict image, const Quantum *magick_restrict pixel,PixelInfo *magick_restrict pixel_info) { (void) ResetMagickMemory(pixel_info,0,sizeof(*pixel_info)); pixel_info->storage_class=DirectClass; pixel_info->colorspace=sRGBColorspace; pixel_info->depth=MAGICKCORE_QUANTUM_DEPTH; pixel_info->alpha_trait=UndefinedPixelTrait; pixel_info->alpha=(MagickRealType) OpaqueAlpha; if (image != (Image *) NULL) { pixel_info->storage_class=image->storage_class; pixel_info->colorspace=image->colorspace; pixel_info->fuzz=image->fuzz; pixel_info->depth=image->depth; pixel_info->alpha_trait=image->alpha_trait; if (pixel != (Quantum *) NULL) { pixel_info->red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]; pixel_info->green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]; pixel_info->blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset]; if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel_info->black=(MagickRealType) pixel[image->channel_map[BlackPixelChannel].offset]; if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) { pixel_info->alpha=(MagickRealType) pixel[image->channel_map[AlphaPixelChannel].offset]; pixel_info->alpha_trait=BlendPixelTrait; } if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait) pixel_info->index=(MagickRealType) pixel[image->channel_map[IndexPixelChannel].offset]; } } } static inline PixelTrait GetPixelTraits(const Image *magick_restrict image, const PixelChannel channel) { return(image->channel_map[channel].traits); } static inline Quantum GetPixelY(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[YPixelChannel].offset]); } static inline PixelTrait GetPixelYTraits(const Image *magick_restrict image) { return(image->channel_map[YPixelChannel].traits); } static inline Quantum GetPixelYellow(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[YellowPixelChannel].offset]); } static inline PixelTrait GetPixelYellowTraits( const Image *magick_restrict image) { return(image->channel_map[YellowPixelChannel].traits); } static inline MagickRealType AbsolutePixelValue(const MagickRealType x) { return(x < 0.0f ? -x : x); } static inline MagickBooleanType IsPixelAtDepth(const Quantum pixel, const QuantumAny range) { Quantum quantum; if (range == 0) return(MagickTrue); #if !defined(MAGICKCORE_HDRI_SUPPORT) quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny) (((MagickRealType) range*pixel)/QuantumRange+0.5)))/range+0.5); #else quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny) (((MagickRealType) range*pixel)/QuantumRange+0.5)))/range); #endif return(pixel == quantum ? MagickTrue : MagickFalse); } static inline MagickBooleanType IsPixelEquivalent( const Image *magick_restrict image,const Quantum *magick_restrict p, const PixelInfo *magick_restrict q) { MagickRealType alpha, beta, color; color=(MagickRealType) p[image->channel_map[AlphaPixelChannel].offset]; alpha=image->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : color; beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : q->alpha; if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon) return(MagickFalse); if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) || (AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon)) return(MagickTrue); /* no color component if pixel is transparent */ color=(MagickRealType) p[image->channel_map[RedPixelChannel].offset]; if (AbsolutePixelValue(color-q->red) >= MagickEpsilon) return(MagickFalse); color=(MagickRealType) p[image->channel_map[GreenPixelChannel].offset]; if (AbsolutePixelValue(color-q->green) >= MagickEpsilon) return(MagickFalse); color=(MagickRealType) p[image->channel_map[BluePixelChannel].offset]; if (AbsolutePixelValue(color-q->blue) >= MagickEpsilon) return(MagickFalse); if (image->colorspace == CMYKColorspace) { color=(MagickRealType) p[image->channel_map[BlackPixelChannel].offset]; if (AbsolutePixelValue(color-q->black) >= MagickEpsilon) return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelGray(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { MagickRealType green_blue, red_green; red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]- pixel[image->channel_map[GreenPixelChannel].offset]; green_blue=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]- pixel[image->channel_map[BluePixelChannel].offset]; if ((AbsolutePixelValue(red_green) < MagickEpsilon) && (AbsolutePixelValue(green_blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline MagickBooleanType IsPixelInfoEquivalent( const PixelInfo *magick_restrict p,const PixelInfo *magick_restrict q) { MagickRealType alpha, beta; alpha=p->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : p->alpha; beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : q->alpha; if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon) return(MagickFalse); if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) || (AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon)) return(MagickTrue); /* no color component if pixel is transparent */ if (AbsolutePixelValue(p->red-q->red) >= MagickEpsilon) return(MagickFalse); if (AbsolutePixelValue(p->green-q->green) >= MagickEpsilon) return(MagickFalse); if (AbsolutePixelValue(p->blue-q->blue) >= MagickEpsilon) return(MagickFalse); if (p->colorspace == CMYKColorspace) { if (AbsolutePixelValue(p->black-q->black) >= MagickEpsilon) return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelMonochrome( const Image *magick_restrict image,const Quantum *magick_restrict pixel) { MagickRealType green_blue, red, red_green; red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]; if ((AbsolutePixelValue(red) >= MagickEpsilon) && (AbsolutePixelValue(red-QuantumRange) >= MagickEpsilon)) return(MagickFalse); red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]- pixel[image->channel_map[GreenPixelChannel].offset]; green_blue=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]- pixel[image->channel_map[BluePixelChannel].offset]; if ((AbsolutePixelValue(red_green) < MagickEpsilon) && (AbsolutePixelValue(green_blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline MagickBooleanType IsPixelInfoGray( const PixelInfo *magick_restrict pixel) { if ((AbsolutePixelValue(pixel->red-pixel->green) < MagickEpsilon) && (AbsolutePixelValue(pixel->green-pixel->blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline MagickBooleanType IsPixelInfoMonochrome( const PixelInfo *magick_restrict pixel_info) { MagickRealType green_blue, red_green; if ((AbsolutePixelValue(pixel_info->red) >= MagickEpsilon) || (AbsolutePixelValue(pixel_info->red-QuantumRange) >= MagickEpsilon)) return(MagickFalse); red_green=pixel_info->red-pixel_info->green; green_blue=pixel_info->green-pixel_info->blue; if ((AbsolutePixelValue(red_green) < MagickEpsilon) && (AbsolutePixelValue(green_blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline void SetPixela(const Image *magick_restrict image, const Quantum a,Quantum *magick_restrict pixel) { if (image->channel_map[aPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[aPixelChannel].offset]=a; } static inline void SetPixelAlpha(const Image *magick_restrict image, const Quantum alpha,Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]=alpha; } static inline void SetPixelAlphaTraits(Image *image,const PixelTrait traits) { image->channel_map[AlphaPixelChannel].traits=traits; } static inline void SetPixelb(const Image *magick_restrict image, const Quantum b,Quantum *magick_restrict pixel) { if (image->channel_map[bPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[bPixelChannel].offset]=b; } static inline void SetPixelBackgoundColor(const Image *magick_restrict image, Quantum *magick_restrict pixel) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=(Quantum) 0; pixel[image->channel_map[RedPixelChannel].offset]= ClampToQuantum(image->background_color.red); pixel[image->channel_map[GreenPixelChannel].offset]= ClampToQuantum(image->background_color.green); pixel[image->channel_map[BluePixelChannel].offset]= ClampToQuantum(image->background_color.blue); if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[BlackPixelChannel].offset]= ClampToQuantum(image->background_color.black); if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]= image->background_color.alpha_trait == UndefinedPixelTrait ? OpaqueAlpha : ClampToQuantum(image->background_color.alpha); } static inline void SetPixelBlack(const Image *magick_restrict image, const Quantum black,Quantum *magick_restrict pixel) { if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[BlackPixelChannel].offset]=black; } static inline void SetPixelBlackTraits(Image *image,const PixelTrait traits) { image->channel_map[BlackPixelChannel].traits=traits; } static inline void SetPixelBlue(const Image *magick_restrict image, const Quantum blue,Quantum *magick_restrict pixel) { pixel[image->channel_map[BluePixelChannel].offset]=blue; } static inline void SetPixelBlueTraits(Image *image,const PixelTrait traits) { image->channel_map[BluePixelChannel].traits=traits; } static inline void SetPixelCb(const Image *magick_restrict image, const Quantum cb,Quantum *magick_restrict pixel) { pixel[image->channel_map[CbPixelChannel].offset]=cb; } static inline void SetPixelCbTraits(Image *image,const PixelTrait traits) { image->channel_map[CbPixelChannel].traits=traits; } static inline void SetPixelChannel(const Image *magick_restrict image, const PixelChannel channel,const Quantum quantum, Quantum *magick_restrict pixel) { if (image->channel_map[channel].traits != UndefinedPixelTrait) pixel[image->channel_map[channel].offset]=quantum; } static inline void SetPixelChannelAttributes( const Image *magick_restrict image,const PixelChannel channel, const PixelTrait traits,const ssize_t offset) { assert((ssize_t) channel < MaxPixelChannels); assert(offset < MaxPixelChannels); image->channel_map[offset].channel=channel; image->channel_map[channel].offset=offset; image->channel_map[channel].traits=traits; } static inline void SetPixelChannelChannel(const Image *magick_restrict image, const PixelChannel channel,const ssize_t offset) { image->channel_map[offset].channel=channel; image->channel_map[channel].offset=offset; } static inline void SetPixelChannels(Image *image,const size_t number_channels) { image->number_channels=number_channels; } static inline void SetPixelChannelTraits(Image *image, const PixelChannel channel,const PixelTrait traits) { image->channel_map[channel].traits=traits; } static inline void SetPixelCompositeMask(const Image *magick_restrict image, const Quantum mask,Quantum *magick_restrict pixel) { if (image->channel_map[CompositeMaskPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[CompositeMaskPixelChannel].offset]=mask; } static inline void SetPixelCr(const Image *magick_restrict image, const Quantum cr,Quantum *magick_restrict pixel) { pixel[image->channel_map[CrPixelChannel].offset]=cr; } static inline void SetPixelCrTraits(Image *image,const PixelTrait traits) { image->channel_map[CrPixelChannel].traits=traits; } static inline void SetPixelCyan(const Image *magick_restrict image, const Quantum cyan,Quantum *magick_restrict pixel) { pixel[image->channel_map[CyanPixelChannel].offset]=cyan; } static inline void SetPixelGray(const Image *magick_restrict image, const Quantum gray,Quantum *magick_restrict pixel) { pixel[image->channel_map[GrayPixelChannel].offset]=gray; } static inline void SetPixelGrayTraits(Image *image,const PixelTrait traits) { image->channel_map[GrayPixelChannel].traits=traits; } static inline void SetPixelGreen(const Image *magick_restrict image, const Quantum green,Quantum *magick_restrict pixel) { pixel[image->channel_map[GreenPixelChannel].offset]=green; } static inline void SetPixelGreenTraits(Image *image,const PixelTrait traits) { image->channel_map[GreenPixelChannel].traits=traits; } static inline void SetPixelIndex(const Image *magick_restrict image, const Quantum index,Quantum *magick_restrict pixel) { if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[IndexPixelChannel].offset]=index; } static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits) { image->channel_map[IndexPixelChannel].traits=traits; } static inline void SetPixelViaPixelInfo(const Image *magick_restrict image, const PixelInfo *magick_restrict pixel_info,Quantum *magick_restrict pixel) { pixel[image->channel_map[RedPixelChannel].offset]= ClampToQuantum(pixel_info->red); pixel[image->channel_map[GreenPixelChannel].offset]= ClampToQuantum(pixel_info->green); pixel[image->channel_map[BluePixelChannel].offset]= ClampToQuantum(pixel_info->blue); if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[BlackPixelChannel].offset]= ClampToQuantum(pixel_info->black); if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]= pixel_info->alpha_trait == UndefinedPixelTrait ? OpaqueAlpha : ClampToQuantum(pixel_info->alpha); } static inline void SetPixelL(const Image *magick_restrict image,const Quantum L, Quantum *magick_restrict pixel) { if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[LPixelChannel].offset]=L; } static inline void SetPixelMagenta(const Image *magick_restrict image, const Quantum magenta,Quantum *magick_restrict pixel) { pixel[image->channel_map[MagentaPixelChannel].offset]=magenta; } static inline void SetPixelMagentaTraits(Image *image,const PixelTrait traits) { image->channel_map[MagentaPixelChannel].traits=traits; } static inline void SetPixelReadMask(const Image *magick_restrict image, const Quantum mask,Quantum *magick_restrict pixel) { if (image->channel_map[ReadMaskPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[ReadMaskPixelChannel].offset]=mask; } static inline void SetPixelWriteMask(const Image *magick_restrict image, const Quantum mask,Quantum *magick_restrict pixel) { if (image->channel_map[WriteMaskPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[WriteMaskPixelChannel].offset]=mask; } static inline void SetPixelMetacontentExtent(Image *image,const size_t extent) { image->metacontent_extent=extent; } static inline void SetPixelOpacity(const Image *magick_restrict image, const Quantum alpha,Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha; } static inline void SetPixelRed(const Image *magick_restrict image, const Quantum red,Quantum *magick_restrict pixel) { pixel[image->channel_map[RedPixelChannel].offset]=red; } static inline void SetPixelRedTraits(Image *image,const PixelTrait traits) { image->channel_map[RedPixelChannel].traits=traits; } static inline void SetPixelYellow(const Image *magick_restrict image, const Quantum yellow,Quantum *magick_restrict pixel) { pixel[image->channel_map[YellowPixelChannel].offset]=yellow; } static inline void SetPixelYellowTraits(Image *image,const PixelTrait traits) { image->channel_map[YellowPixelChannel].traits=traits; } static inline void SetPixelY(const Image *magick_restrict image, const Quantum y,Quantum *magick_restrict pixel) { pixel[image->channel_map[YPixelChannel].offset]=y; } static inline void SetPixelYTraits(Image *image,const PixelTrait traits) { image->channel_map[YPixelChannel].traits=traits; } #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif
/* Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization dedicated to making software imaging solutions freely available. You may not use this file except in compliance with the License. You may obtain a copy of the License at https://imagemagick.org/script/license.php Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. MagickCore pixel accessor methods. */ #ifndef MAGICKCORE_PIXEL_ACCESSOR_H #define MAGICKCORE_PIXEL_ACCESSOR_H #include <assert.h> #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/colorspace.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif #undef index static inline Quantum ClampPixel(const MagickRealType pixel) { if (pixel < 0.0f) return((Quantum) 0); if (pixel >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) (pixel+0.5f)); #else return((Quantum) pixel); #endif } static inline Quantum GetPixela(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[aPixelChannel].offset]); } static inline Quantum GetPixelAlpha(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits == UndefinedPixelTrait) return(OpaqueAlpha); return(pixel[image->channel_map[AlphaPixelChannel].offset]); } static inline PixelTrait GetPixelAlphaTraits( const Image *magick_restrict image) { return(image->channel_map[AlphaPixelChannel].traits); } static inline Quantum GetPixelb(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[bPixelChannel].offset]); } static inline Quantum GetPixelBlack(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[BlackPixelChannel].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[BlackPixelChannel].offset]); } static inline PixelTrait GetPixelBlackTraits( const Image *magick_restrict image) { return(image->channel_map[BlackPixelChannel].traits); } static inline Quantum GetPixelBlue(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[BluePixelChannel].offset]); } static inline PixelTrait GetPixelBlueTraits(const Image *magick_restrict image) { return(image->channel_map[BluePixelChannel].traits); } static inline Quantum GetPixelCb(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[CbPixelChannel].offset]); } static inline PixelTrait GetPixelCbTraits(const Image *magick_restrict image) { return(image->channel_map[CbPixelChannel].traits); } static inline Quantum GetPixelChannel(const Image *magick_restrict image, const PixelChannel channel,const Quantum *magick_restrict pixel) { if (image->channel_map[image->channel_map[channel].offset].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[channel].offset]); } static inline PixelChannel GetPixelChannelChannel( const Image *magick_restrict image,const ssize_t offset) { return(image->channel_map[offset].channel); } static inline ssize_t GetPixelChannelOffset(const Image *magick_restrict image, const PixelChannel channel) { return(image->channel_map[channel].offset); } static inline PixelTrait GetPixelChannelTraits( const Image *magick_restrict image,const PixelChannel channel) { return(image->channel_map[channel].traits); } static inline size_t GetPixelChannels(const Image *magick_restrict image) { return(image->number_channels); } static inline Quantum GetPixelCompositeMask(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[CompositeMaskPixelChannel].traits == UndefinedPixelTrait) return((Quantum) QuantumRange); return(pixel[image->channel_map[CompositeMaskPixelChannel].offset]); } static inline Quantum GetPixelCr(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[CrPixelChannel].offset]); } static inline PixelTrait GetPixelCrTraits(const Image *magick_restrict image) { return(image->channel_map[CrPixelChannel].traits); } static inline Quantum GetPixelCyan(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[CyanPixelChannel].offset]); } static inline PixelTrait GetPixelCyanTraits(const Image *magick_restrict image) { return(image->channel_map[CyanPixelChannel].traits); } static inline Quantum GetPixelGray(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[GrayPixelChannel].offset]); } static inline PixelTrait GetPixelGrayTraits(const Image *magick_restrict image) { return(image->channel_map[GrayPixelChannel].traits); } static inline Quantum GetPixelGreen(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[GreenPixelChannel].offset]); } static inline PixelTrait GetPixelGreenTraits( const Image *magick_restrict image) { return(image->channel_map[GreenPixelChannel].traits); } static inline Quantum GetPixelIndex(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[IndexPixelChannel].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[IndexPixelChannel].offset]); } static inline PixelTrait GetPixelIndexTraits( const Image *magick_restrict image) { return(image->channel_map[IndexPixelChannel].traits); } static inline MagickRealType GetPixelInfoChannel( const PixelInfo *magick_restrict pixel_info,const PixelChannel channel) { switch (channel) { case RedPixelChannel: return(pixel_info->red); case GreenPixelChannel: return(pixel_info->green); case BluePixelChannel: return(pixel_info->blue); case BlackPixelChannel: return(pixel_info->black); case AlphaPixelChannel: return(pixel_info->alpha); case IndexPixelChannel: return(pixel_info->index); default: return((MagickRealType) 0.0); } } static inline double PerceptibleReciprocal(const double x) { double sign; /* Return 1/x where x is perceptible (not unlimited or infinitesimal). */ sign=x < 0.0 ? -1.0 : 1.0; if ((sign*x) >= MagickEpsilon) return(1.0/x); return(sign/MagickEpsilon); } static inline MagickRealType GetPixelInfoLuma( const PixelInfo *magick_restrict pixel) { MagickRealType intensity; if (pixel->colorspace == sRGBColorspace) { intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+ 0.072186f*pixel->blue); return(intensity); } intensity=(MagickRealType) (0.212656f*EncodePixelGamma(pixel->red)+ 0.715158f*EncodePixelGamma(pixel->green)+ 0.072186f*EncodePixelGamma(pixel->blue)); return(intensity); } static inline MagickRealType GetPixelInfoLuminance( const PixelInfo *magick_restrict pixel) { MagickRealType intensity; if (pixel->colorspace != sRGBColorspace) { intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+ 0.072186f*pixel->blue); return(intensity); } intensity=(MagickRealType) (0.212656f*DecodePixelGamma(pixel->red)+ 0.715158f*DecodePixelGamma(pixel->green)+ 0.072186f*DecodePixelGamma(pixel->blue)); return(intensity); } static inline Quantum GetPixelL(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[LPixelChannel].offset]); } static inline ssize_t GetPixelLabel(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return((ssize_t) pixel[image->channel_map[LabelPixelChannel].offset]); } static inline MagickRealType GetPixelLuma(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { MagickRealType intensity; intensity=(MagickRealType) ( 0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); return(intensity); } static inline MagickRealType GetPixelLuminance( const Image *magick_restrict image,const Quantum *magick_restrict pixel) { MagickRealType intensity; if (image->colorspace != sRGBColorspace) { intensity=(MagickRealType) ( 0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); return(intensity); } intensity=(MagickRealType) (0.212656f*DecodePixelGamma((MagickRealType) pixel[image->channel_map[RedPixelChannel].offset])+0.715158f* DecodePixelGamma((MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset])+0.072186f* DecodePixelGamma((MagickRealType) pixel[image->channel_map[BluePixelChannel].offset])); return(intensity); } static inline Quantum GetPixelMagenta(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[MagentaPixelChannel].offset]); } static inline PixelTrait GetPixelMagentaTraits( const Image *magick_restrict image) { return(image->channel_map[MagentaPixelChannel].traits); } static inline Quantum GetPixelReadMask(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait) return((Quantum) QuantumRange); return(pixel[image->channel_map[ReadMaskPixelChannel].offset]); } static inline Quantum GetPixelWriteMask(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[WriteMaskPixelChannel].traits == UndefinedPixelTrait) return((Quantum) QuantumRange); return(pixel[image->channel_map[WriteMaskPixelChannel].offset]); } static inline PixelTrait GetPixelReadMaskTraits( const Image *magick_restrict image) { return(image->channel_map[ReadMaskPixelChannel].traits); } static inline size_t GetPixelMetaChannels(const Image *magick_restrict image) { return(image->number_meta_channels); } static inline size_t GetPixelMetacontentExtent( const Image *magick_restrict image) { return(image->metacontent_extent); } static inline Quantum GetPixelOpacity(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits != BlendPixelTrait) return(QuantumRange-OpaqueAlpha); return(QuantumRange-pixel[image->channel_map[AlphaPixelChannel].offset]); } static inline Quantum GetPixelRed(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[RedPixelChannel].offset]); } static inline PixelTrait GetPixelRedTraits(const Image *magick_restrict image) { return(image->channel_map[RedPixelChannel].traits); } static inline void GetPixelInfoPixel(const Image *magick_restrict image, const Quantum *magick_restrict pixel,PixelInfo *magick_restrict pixel_info) { (void) ResetMagickMemory(pixel_info,0,sizeof(*pixel_info)); pixel_info->storage_class=DirectClass; pixel_info->colorspace=sRGBColorspace; pixel_info->depth=MAGICKCORE_QUANTUM_DEPTH; pixel_info->alpha_trait=UndefinedPixelTrait; pixel_info->alpha=(MagickRealType) OpaqueAlpha; if (image != (Image *) NULL) { pixel_info->storage_class=image->storage_class; pixel_info->colorspace=image->colorspace; pixel_info->fuzz=image->fuzz; pixel_info->depth=image->depth; pixel_info->alpha_trait=image->alpha_trait; if (pixel != (Quantum *) NULL) { pixel_info->red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]; pixel_info->green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]; pixel_info->blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset]; if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel_info->black=(MagickRealType) pixel[image->channel_map[BlackPixelChannel].offset]; if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) { pixel_info->alpha=(MagickRealType) pixel[image->channel_map[AlphaPixelChannel].offset]; pixel_info->alpha_trait=BlendPixelTrait; } if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait) pixel_info->index=(MagickRealType) pixel[image->channel_map[IndexPixelChannel].offset]; } } } static inline PixelTrait GetPixelTraits(const Image *magick_restrict image, const PixelChannel channel) { return(image->channel_map[channel].traits); } static inline Quantum GetPixelY(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[YPixelChannel].offset]); } static inline PixelTrait GetPixelYTraits(const Image *magick_restrict image) { return(image->channel_map[YPixelChannel].traits); } static inline Quantum GetPixelYellow(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { return(pixel[image->channel_map[YellowPixelChannel].offset]); } static inline PixelTrait GetPixelYellowTraits( const Image *magick_restrict image) { return(image->channel_map[YellowPixelChannel].traits); } static inline MagickRealType AbsolutePixelValue(const MagickRealType x) { return(x < 0.0f ? -x : x); } static inline MagickBooleanType IsPixelAtDepth(const Quantum pixel, const QuantumAny range) { Quantum quantum; if (range == 0) return(MagickTrue); #if !defined(MAGICKCORE_HDRI_SUPPORT) quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny) (((MagickRealType) range*pixel)/QuantumRange+0.5)))/range+0.5); #else quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny) (((MagickRealType) range*pixel)/QuantumRange+0.5)))/range); #endif return(pixel == quantum ? MagickTrue : MagickFalse); } static inline MagickBooleanType IsPixelEquivalent( const Image *magick_restrict image,const Quantum *magick_restrict p, const PixelInfo *magick_restrict q) { MagickRealType alpha, beta, color; color=(MagickRealType) p[image->channel_map[AlphaPixelChannel].offset]; alpha=image->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : color; beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : q->alpha; if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon) return(MagickFalse); if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) || (AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon)) return(MagickTrue); /* no color component if pixel is transparent */ color=(MagickRealType) p[image->channel_map[RedPixelChannel].offset]; if (AbsolutePixelValue(color-q->red) >= MagickEpsilon) return(MagickFalse); color=(MagickRealType) p[image->channel_map[GreenPixelChannel].offset]; if (AbsolutePixelValue(color-q->green) >= MagickEpsilon) return(MagickFalse); color=(MagickRealType) p[image->channel_map[BluePixelChannel].offset]; if (AbsolutePixelValue(color-q->blue) >= MagickEpsilon) return(MagickFalse); if (image->colorspace == CMYKColorspace) { color=(MagickRealType) p[image->channel_map[BlackPixelChannel].offset]; if (AbsolutePixelValue(color-q->black) >= MagickEpsilon) return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelGray(const Image *magick_restrict image, const Quantum *magick_restrict pixel) { MagickRealType green_blue, red_green; red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]- pixel[image->channel_map[GreenPixelChannel].offset]; green_blue=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]- pixel[image->channel_map[BluePixelChannel].offset]; if ((AbsolutePixelValue(red_green) < MagickEpsilon) && (AbsolutePixelValue(green_blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline MagickBooleanType IsPixelInfoEquivalent( const PixelInfo *magick_restrict p,const PixelInfo *magick_restrict q) { MagickRealType alpha, beta; alpha=p->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : p->alpha; beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha : q->alpha; if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon) return(MagickFalse); if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) || (AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon)) return(MagickTrue); /* no color component if pixel is transparent */ if (AbsolutePixelValue(p->red-q->red) >= MagickEpsilon) return(MagickFalse); if (AbsolutePixelValue(p->green-q->green) >= MagickEpsilon) return(MagickFalse); if (AbsolutePixelValue(p->blue-q->blue) >= MagickEpsilon) return(MagickFalse); if (p->colorspace == CMYKColorspace) { if (AbsolutePixelValue(p->black-q->black) >= MagickEpsilon) return(MagickFalse); } return(MagickTrue); } static inline MagickBooleanType IsPixelMonochrome( const Image *magick_restrict image,const Quantum *magick_restrict pixel) { MagickRealType green_blue, red, red_green; red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]; if ((AbsolutePixelValue(red) >= MagickEpsilon) && (AbsolutePixelValue(red-QuantumRange) >= MagickEpsilon)) return(MagickFalse); red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]- pixel[image->channel_map[GreenPixelChannel].offset]; green_blue=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset]- pixel[image->channel_map[BluePixelChannel].offset]; if ((AbsolutePixelValue(red_green) < MagickEpsilon) && (AbsolutePixelValue(green_blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline MagickBooleanType IsPixelInfoGray( const PixelInfo *magick_restrict pixel) { if ((AbsolutePixelValue(pixel->red-pixel->green) < MagickEpsilon) && (AbsolutePixelValue(pixel->green-pixel->blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline MagickBooleanType IsPixelInfoMonochrome( const PixelInfo *magick_restrict pixel_info) { MagickRealType green_blue, red_green; if ((AbsolutePixelValue(pixel_info->red) >= MagickEpsilon) || (AbsolutePixelValue(pixel_info->red-QuantumRange) >= MagickEpsilon)) return(MagickFalse); red_green=pixel_info->red-pixel_info->green; green_blue=pixel_info->green-pixel_info->blue; if ((AbsolutePixelValue(red_green) < MagickEpsilon) && (AbsolutePixelValue(green_blue) < MagickEpsilon)) return(MagickTrue); return(MagickFalse); } static inline void SetPixela(const Image *magick_restrict image, const Quantum a,Quantum *magick_restrict pixel) { if (image->channel_map[aPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[aPixelChannel].offset]=a; } static inline void SetPixelAlpha(const Image *magick_restrict image, const Quantum alpha,Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]=alpha; } static inline void SetPixelAlphaTraits(Image *image,const PixelTrait traits) { image->channel_map[AlphaPixelChannel].traits=traits; } static inline void SetPixelb(const Image *magick_restrict image, const Quantum b,Quantum *magick_restrict pixel) { if (image->channel_map[bPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[bPixelChannel].offset]=b; } static inline void SetPixelBackgoundColor(const Image *magick_restrict image, Quantum *magick_restrict pixel) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=(Quantum) 0; pixel[image->channel_map[RedPixelChannel].offset]= ClampToQuantum(image->background_color.red); pixel[image->channel_map[GreenPixelChannel].offset]= ClampToQuantum(image->background_color.green); pixel[image->channel_map[BluePixelChannel].offset]= ClampToQuantum(image->background_color.blue); if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[BlackPixelChannel].offset]= ClampToQuantum(image->background_color.black); if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]= image->background_color.alpha_trait == UndefinedPixelTrait ? OpaqueAlpha : ClampToQuantum(image->background_color.alpha); } static inline void SetPixelBlack(const Image *magick_restrict image, const Quantum black,Quantum *magick_restrict pixel) { if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[BlackPixelChannel].offset]=black; } static inline void SetPixelBlackTraits(Image *image,const PixelTrait traits) { image->channel_map[BlackPixelChannel].traits=traits; } static inline void SetPixelBlue(const Image *magick_restrict image, const Quantum blue,Quantum *magick_restrict pixel) { pixel[image->channel_map[BluePixelChannel].offset]=blue; } static inline void SetPixelBlueTraits(Image *image,const PixelTrait traits) { image->channel_map[BluePixelChannel].traits=traits; } static inline void SetPixelCb(const Image *magick_restrict image, const Quantum cb,Quantum *magick_restrict pixel) { pixel[image->channel_map[CbPixelChannel].offset]=cb; } static inline void SetPixelCbTraits(Image *image,const PixelTrait traits) { image->channel_map[CbPixelChannel].traits=traits; } static inline void SetPixelChannel(const Image *magick_restrict image, const PixelChannel channel,const Quantum quantum, Quantum *magick_restrict pixel) { if (image->channel_map[channel].traits != UndefinedPixelTrait) pixel[image->channel_map[channel].offset]=quantum; } static inline void SetPixelChannelAttributes( const Image *magick_restrict image,const PixelChannel channel, const PixelTrait traits,const ssize_t offset) { assert((ssize_t) channel < MaxPixelChannels); assert(offset < MaxPixelChannels); image->channel_map[offset].channel=channel; image->channel_map[channel].offset=offset; image->channel_map[channel].traits=traits; } static inline void SetPixelChannelChannel(const Image *magick_restrict image, const PixelChannel channel,const ssize_t offset) { image->channel_map[offset].channel=channel; image->channel_map[channel].offset=offset; } static inline void SetPixelChannels(Image *image,const size_t number_channels) { image->number_channels=number_channels; } static inline void SetPixelChannelTraits(Image *image, const PixelChannel channel,const PixelTrait traits) { image->channel_map[channel].traits=traits; } static inline void SetPixelCompositeMask(const Image *magick_restrict image, const Quantum mask,Quantum *magick_restrict pixel) { if (image->channel_map[CompositeMaskPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[CompositeMaskPixelChannel].offset]=mask; } static inline void SetPixelCr(const Image *magick_restrict image, const Quantum cr,Quantum *magick_restrict pixel) { pixel[image->channel_map[CrPixelChannel].offset]=cr; } static inline void SetPixelCrTraits(Image *image,const PixelTrait traits) { image->channel_map[CrPixelChannel].traits=traits; } static inline void SetPixelCyan(const Image *magick_restrict image, const Quantum cyan,Quantum *magick_restrict pixel) { pixel[image->channel_map[CyanPixelChannel].offset]=cyan; } static inline void SetPixelGray(const Image *magick_restrict image, const Quantum gray,Quantum *magick_restrict pixel) { pixel[image->channel_map[GrayPixelChannel].offset]=gray; } static inline void SetPixelGrayTraits(Image *image,const PixelTrait traits) { image->channel_map[GrayPixelChannel].traits=traits; } static inline void SetPixelGreen(const Image *magick_restrict image, const Quantum green,Quantum *magick_restrict pixel) { pixel[image->channel_map[GreenPixelChannel].offset]=green; } static inline void SetPixelGreenTraits(Image *image,const PixelTrait traits) { image->channel_map[GreenPixelChannel].traits=traits; } static inline void SetPixelIndex(const Image *magick_restrict image, const Quantum index,Quantum *magick_restrict pixel) { if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[IndexPixelChannel].offset]=index; } static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits) { image->channel_map[IndexPixelChannel].traits=traits; } static inline void SetPixelViaPixelInfo(const Image *magick_restrict image, const PixelInfo *magick_restrict pixel_info,Quantum *magick_restrict pixel) { pixel[image->channel_map[RedPixelChannel].offset]= ClampToQuantum(pixel_info->red); pixel[image->channel_map[GreenPixelChannel].offset]= ClampToQuantum(pixel_info->green); pixel[image->channel_map[BluePixelChannel].offset]= ClampToQuantum(pixel_info->blue); if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[BlackPixelChannel].offset]= ClampToQuantum(pixel_info->black); if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]= pixel_info->alpha_trait == UndefinedPixelTrait ? OpaqueAlpha : ClampToQuantum(pixel_info->alpha); } static inline void SetPixelL(const Image *magick_restrict image,const Quantum L, Quantum *magick_restrict pixel) { if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[LPixelChannel].offset]=L; } static inline void SetPixelMagenta(const Image *magick_restrict image, const Quantum magenta,Quantum *magick_restrict pixel) { pixel[image->channel_map[MagentaPixelChannel].offset]=magenta; } static inline void SetPixelMagentaTraits(Image *image,const PixelTrait traits) { image->channel_map[MagentaPixelChannel].traits=traits; } static inline void SetPixelReadMask(const Image *magick_restrict image, const Quantum mask,Quantum *magick_restrict pixel) { if (image->channel_map[ReadMaskPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[ReadMaskPixelChannel].offset]=mask; } static inline void SetPixelWriteMask(const Image *magick_restrict image, const Quantum mask,Quantum *magick_restrict pixel) { if (image->channel_map[WriteMaskPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[WriteMaskPixelChannel].offset]=mask; } static inline void SetPixelMetacontentExtent(Image *image,const size_t extent) { image->metacontent_extent=extent; } static inline void SetPixelOpacity(const Image *magick_restrict image, const Quantum alpha,Quantum *magick_restrict pixel) { if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait) pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha; } static inline void SetPixelRed(const Image *magick_restrict image, const Quantum red,Quantum *magick_restrict pixel) { pixel[image->channel_map[RedPixelChannel].offset]=red; } static inline void SetPixelRedTraits(Image *image,const PixelTrait traits) { image->channel_map[RedPixelChannel].traits=traits; } static inline void SetPixelYellow(const Image *magick_restrict image, const Quantum yellow,Quantum *magick_restrict pixel) { pixel[image->channel_map[YellowPixelChannel].offset]=yellow; } static inline void SetPixelYellowTraits(Image *image,const PixelTrait traits) { image->channel_map[YellowPixelChannel].traits=traits; } static inline void SetPixelY(const Image *magick_restrict image, const Quantum y,Quantum *magick_restrict pixel) { pixel[image->channel_map[YPixelChannel].offset]=y; } static inline void SetPixelYTraits(Image *image,const PixelTrait traits) { image->channel_map[YPixelChannel].traits=traits; } #if defined(__cplusplus) || defined(c_plusplus) } #endif #endif
static inline Quantum GetPixelChannel(const Image *magick_restrict image, const PixelChannel channel,const Quantum *magick_restrict pixel) { if (image->channel_map[channel].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[channel].offset]); }
static inline Quantum GetPixelChannel(const Image *magick_restrict image, const PixelChannel channel,const Quantum *magick_restrict pixel) { if (image->channel_map[image->channel_map[channel].offset].traits == UndefinedPixelTrait) return((Quantum) 0); return(pixel[image->channel_map[channel].offset]); }
{'added': [(114, ' if (image->channel_map[image->channel_map[channel].offset].traits == UndefinedPixelTrait)')], 'deleted': [(114, ' if (image->channel_map[channel].traits == UndefinedPixelTrait)')]}
1
1
740
5,255
7
56
2
https://github.com/ImageMagick/ImageMagick
CVE-2019-13299
CWE-125
1,786
catc.c
C
catc_probe
/* * Copyright (c) 2001 Vojtech Pavlik * * CATC EL1210A NetMate USB Ethernet driver * * Sponsored by SuSE * * Based on the work of * Donald Becker * * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 * - adds support for Belkin F5U011 */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/uaccess.h> #undef DEBUG #include <linux/usb.h> /* * Version information. */ #define DRIVER_VERSION "v2.8" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char driver_name[] = "catc"; /* * Some defines. */ #define STATS_UPDATE (HZ) /* Time between stats updates */ #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ #define PKT_SZ 1536 /* Max Ethernet packet size */ #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ /* * Control requests. */ enum control_requests { ReadMem = 0xf1, GetMac = 0xf2, Reset = 0xf4, SetMac = 0xf5, SetRxMode = 0xf5, /* F5U011 only */ WriteROM = 0xf8, SetReg = 0xfa, GetReg = 0xfb, WriteMem = 0xfc, ReadROM = 0xfd, }; /* * Registers. */ enum register_offsets { TxBufCount = 0x20, RxBufCount = 0x21, OpModes = 0x22, TxQed = 0x23, RxQed = 0x24, MaxBurst = 0x25, RxUnit = 0x60, EthStatus = 0x61, StationAddr0 = 0x67, EthStats = 0x69, LEDCtrl = 0x81, }; enum eth_stats { TxSingleColl = 0x00, TxMultiColl = 0x02, TxExcessColl = 0x04, RxFramErr = 0x06, }; enum op_mode_bits { Op3MemWaits = 0x03, OpLenInclude = 0x08, OpRxMerge = 0x10, OpTxMerge = 0x20, OpWin95bugfix = 0x40, OpLoopback = 0x80, }; enum rx_filter_bits { RxEnable = 0x01, RxPolarity = 0x02, RxForceOK = 0x04, RxMultiCast = 0x08, RxPromisc = 0x10, AltRxPromisc = 0x20, /* F5U011 uses different bit */ }; enum led_values { LEDFast = 0x01, LEDSlow = 0x02, LEDFlash = 0x03, LEDPulse = 0x04, LEDLink = 0x08, }; enum link_status { LinkNoChange = 0, LinkGood = 1, LinkBad = 2 }; /* * The catc struct. */ #define CTRL_RUNNING 0 #define RX_RUNNING 1 #define TX_RUNNING 2 struct catc { struct net_device *netdev; struct usb_device *usbdev; unsigned long flags; unsigned int tx_ptr, tx_idx; unsigned int ctrl_head, ctrl_tail; spinlock_t tx_lock, ctrl_lock; u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; u8 irq_buf[2]; u8 ctrl_buf[64]; struct usb_ctrlrequest ctrl_dr; struct timer_list timer; u8 stats_buf[8]; u16 stats_vals[4]; unsigned long last_stats; u8 multicast[64]; struct ctrl_queue { u8 dir; u8 request; u16 value; u16 index; void *buf; int len; void (*callback)(struct catc *catc, struct ctrl_queue *q); } ctrl_queue[CTRL_QUEUE]; struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; u8 is_f5u011; /* Set if device is an F5U011 */ u8 rxmode[2]; /* Used for F5U011 */ atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ }; /* * Useful macros. */ #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) /* * Receive routines. */ static void catc_rx_done(struct urb *urb) { struct catc *catc = urb->context; u8 *pkt_start = urb->transfer_buffer; struct sk_buff *skb; int pkt_len, pkt_offset = 0; int status = urb->status; if (!catc->is_f5u011) { clear_bit(RX_RUNNING, &catc->flags); pkt_offset = 2; } if (status) { dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n", status, urb->actual_length); return; } do { if(!catc->is_f5u011) { pkt_len = le16_to_cpup((__le16*)pkt_start); if (pkt_len > urb->actual_length) { catc->netdev->stats.rx_length_errors++; catc->netdev->stats.rx_errors++; break; } } else { pkt_len = urb->actual_length; } if (!(skb = dev_alloc_skb(pkt_len))) return; skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); netif_rx(skb); catc->netdev->stats.rx_packets++; catc->netdev->stats.rx_bytes += pkt_len; /* F5U011 only does one packet per RX */ if (catc->is_f5u011) break; pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); if (catc->is_f5u011) { if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); netdev_dbg(catc->netdev, "getting extra packet\n"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { netdev_dbg(catc->netdev, "submit(rx_urb) status %d\n", state); } } else { clear_bit(RX_RUNNING, &catc->flags); } } } static void catc_irq_done(struct urb *urb) { struct catc *catc = urb->context; u8 *data = urb->transfer_buffer; int status = urb->status; unsigned int hasdata = 0, linksts = LinkNoChange; int res; if (!catc->is_f5u011) { hasdata = data[1] & 0x80; if (data[1] & 0x40) linksts = LinkGood; else if (data[1] & 0x20) linksts = LinkBad; } else { hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); if (data[0] == 0x90) linksts = LinkGood; else if (data[0] == 0xA0) linksts = LinkBad; } switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ dev_dbg(&urb->dev->dev, "irq_done, status %d, data %02x %02x.\n", status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); netdev_dbg(catc->netdev, "link ok\n"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); netdev_dbg(catc->netdev, "link bad\n"); } if (hasdata) { if (test_and_set_bit(RX_RUNNING, &catc->flags)) { if (catc->is_f5u011) atomic_inc(&catc->recq_sz); } else { catc->rx_urb->dev = catc->usbdev; if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { dev_err(&catc->usbdev->dev, "submit(rx_urb) status %d\n", res); } } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res) dev_err(&catc->usbdev->dev, "can't resubmit intr, %s-%s, status %d\n", catc->usbdev->bus->bus_name, catc->usbdev->devpath, res); } /* * Transmit routines. */ static int catc_tx_run(struct catc *catc) { int status; if (catc->is_f5u011) catc->tx_ptr = (catc->tx_ptr + 63) & ~63; catc->tx_urb->transfer_buffer_length = catc->tx_ptr; catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; catc->tx_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n", status); catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; netif_trans_update(catc->netdev); return status; } static void catc_tx_done(struct urb *urb) { struct catc *catc = urb->context; unsigned long flags; int r, status = urb->status; if (status == -ECONNRESET) { dev_dbg(&urb->dev->dev, "Tx Reset.\n"); urb->status = 0; netif_trans_update(catc->netdev); catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); return; } if (status) { dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n", status, urb->actual_length); return; } spin_lock_irqsave(&catc->tx_lock, flags); if (catc->tx_ptr) { r = catc_tx_run(catc); if (unlikely(r < 0)) clear_bit(TX_RUNNING, &catc->flags); } else { clear_bit(TX_RUNNING, &catc->flags); } netif_wake_queue(catc->netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); } static netdev_tx_t catc_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); unsigned long flags; int r = 0; char *tx_buf; spin_lock_irqsave(&catc->tx_lock, flags); catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; if (catc->is_f5u011) *(__be16 *)tx_buf = cpu_to_be16(skb->len); else *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { r = catc_tx_run(catc); if (r < 0) clear_bit(TX_RUNNING, &catc->flags); } if ((catc->is_f5u011 && catc->tx_ptr) || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) netif_stop_queue(netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); if (r >= 0) { catc->netdev->stats.tx_bytes += skb->len; catc->netdev->stats.tx_packets++; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void catc_tx_timeout(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); dev_warn(&netdev->dev, "Transmit timed out.\n"); usb_unlink_urb(catc->tx_urb); } /* * Control messages. */ static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) { int retval = usb_control_msg(catc->usbdev, dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), request, 0x40 | dir, value, index, buf, len, 1000); return retval < 0 ? retval : 0; } static void catc_ctrl_run(struct catc *catc) { struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; struct usb_device *usbdev = catc->usbdev; struct urb *urb = catc->ctrl_urb; struct usb_ctrlrequest *dr = &catc->ctrl_dr; int status; dr->bRequest = q->request; dr->bRequestType = 0x40 | q->dir; dr->wValue = cpu_to_le16(q->value); dr->wIndex = cpu_to_le16(q->index); dr->wLength = cpu_to_le16(q->len); urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); urb->transfer_buffer_length = q->len; urb->transfer_buffer = catc->ctrl_buf; urb->setup_packet = (void *) dr; urb->dev = usbdev; if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC))) dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n", status); } static void catc_ctrl_done(struct urb *urb) { struct catc *catc = urb->context; struct ctrl_queue *q; unsigned long flags; int status = urb->status; if (status) dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n", status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_tail; if (q->dir) { if (q->buf && q->len) memcpy(q->buf, catc->ctrl_buf, q->len); else q->buf = catc->ctrl_buf; } if (q->callback) q->callback(catc, q); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head != catc->ctrl_tail) catc_ctrl_run(catc); else clear_bit(CTRL_RUNNING, &catc->flags); spin_unlock_irqrestore(&catc->ctrl_lock, flags); } static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) { struct ctrl_queue *q; int retval = 0; unsigned long flags; spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_head; q->dir = dir; q->request = request; q->value = value; q->index = index; q->buf = buf; q->len = len; q->callback = callback; catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head == catc->ctrl_tail) { dev_err(&catc->usbdev->dev, "ctrl queue full\n"); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); retval = -1; } if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) catc_ctrl_run(catc); spin_unlock_irqrestore(&catc->ctrl_lock, flags); return retval; } /* * Statistics. */ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) { int index = q->index - EthStats; u16 data, last; catc->stats_buf[index] = *((char *)q->buf); if (index & 1) return; data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; last = catc->stats_vals[index >> 1]; switch (index) { case TxSingleColl: case TxMultiColl: catc->netdev->stats.collisions += data - last; break; case TxExcessColl: catc->netdev->stats.tx_aborted_errors += data - last; catc->netdev->stats.tx_errors += data - last; break; case RxFramErr: catc->netdev->stats.rx_frame_errors += data - last; catc->netdev->stats.rx_errors += data - last; break; } catc->stats_vals[index >> 1] = data; } static void catc_stats_timer(unsigned long data) { struct catc *catc = (void *) data; int i; for (i = 0; i < 8; i++) catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); mod_timer(&catc->timer, jiffies + STATS_UPDATE); } /* * Receive modes. Broadcast, Multicast, Promisc. */ static void catc_multicast(unsigned char *addr, u8 *multicast) { u32 crc; crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 broadcast[ETH_ALEN]; u8 rx = RxEnable | RxPolarity | RxMultiCast; eth_broadcast_addr(broadcast); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); if (netdev->flags & IFF_PROMISC) { memset(catc->multicast, 0xff, 64); rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; } if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); } } } if (!catc->is_f5u011) { catc_set_reg_async(catc, RxUnit, rx); catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); } else { f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; netdev_dbg(catc->netdev, "Setting RX mode to %2.2X %2.2X\n", catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } } static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); strlcpy(info->driver, driver_name, sizeof(info->driver)); strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); } static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = DUPLEX_HALF; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, .get_settings = catc_get_settings, .get_link = ethtool_op_get_link }; /* * Open, close. */ static int catc_open(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); int status; catc->irq_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n", status); return -1; } netif_start_queue(netdev); if (!catc->is_f5u011) mod_timer(&catc->timer, jiffies + STATS_UPDATE); return 0; } static int catc_stop(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); netif_stop_queue(netdev); if (!catc->is_f5u011) del_timer_sync(&catc->timer); usb_kill_urb(catc->rx_urb); usb_kill_urb(catc->tx_urb); usb_kill_urb(catc->irq_urb); usb_kill_urb(catc->ctrl_urb); return 0; } static const struct net_device_ops catc_netdev_ops = { .ndo_open = catc_open, .ndo_stop = catc_stop, .ndo_start_xmit = catc_start_xmit, .ndo_tx_timeout = catc_tx_timeout, .ndo_set_rx_mode = catc_set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* * USB probe, disconnect. */ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct device *dev = &intf->dev; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; int i, pktsz, ret; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; netdev->ethtool_ops = &ops; catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); ret = -ENOMEM; goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dev_dbg(dev, "Testing for f5u011\n"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { dev_dbg(dev, "Checking memory size\n"); i = 0x12345678; catc_write_mem(catc, 0x7a80, &i, 4); i = 0x87654321; catc_write_mem(catc, 0xfa80, &i, 4); catc_read_mem(catc, 0x7a80, &i, 4); switch (i) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dev_dbg(dev, "64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dev_dbg(dev, "32k Memory\n"); break; } dev_dbg(dev, "Getting MAC from SEEROM.\n"); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting MAC into registers.\n"); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dev_dbg(dev, "Filling the multicast list.\n"); eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dev_dbg(dev, "Clearing error counters.\n"); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dev_dbg(dev, "Enabling.\n"); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dev_dbg(dev, "Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dev_dbg(dev, "Init done.\n"); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); ret = register_netdev(netdev); if (ret) goto fail_clear_intfdata; return 0; fail_clear_intfdata: usb_set_intfdata(intf, NULL); fail_free: usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return ret; } static void catc_disconnect(struct usb_interface *intf) { struct catc *catc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (catc) { unregister_netdev(catc->netdev); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(catc->netdev); } } /* * Module functions and tables. */ static struct usb_device_id catc_id_table [] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ { } }; MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { .name = driver_name, .probe = catc_probe, .disconnect = catc_disconnect, .id_table = catc_id_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(catc_driver);
/* * Copyright (c) 2001 Vojtech Pavlik * * CATC EL1210A NetMate USB Ethernet driver * * Sponsored by SuSE * * Based on the work of * Donald Becker * * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 * - adds support for Belkin F5U011 */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/uaccess.h> #undef DEBUG #include <linux/usb.h> /* * Version information. */ #define DRIVER_VERSION "v2.8" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char driver_name[] = "catc"; /* * Some defines. */ #define STATS_UPDATE (HZ) /* Time between stats updates */ #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ #define PKT_SZ 1536 /* Max Ethernet packet size */ #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ /* * Control requests. */ enum control_requests { ReadMem = 0xf1, GetMac = 0xf2, Reset = 0xf4, SetMac = 0xf5, SetRxMode = 0xf5, /* F5U011 only */ WriteROM = 0xf8, SetReg = 0xfa, GetReg = 0xfb, WriteMem = 0xfc, ReadROM = 0xfd, }; /* * Registers. */ enum register_offsets { TxBufCount = 0x20, RxBufCount = 0x21, OpModes = 0x22, TxQed = 0x23, RxQed = 0x24, MaxBurst = 0x25, RxUnit = 0x60, EthStatus = 0x61, StationAddr0 = 0x67, EthStats = 0x69, LEDCtrl = 0x81, }; enum eth_stats { TxSingleColl = 0x00, TxMultiColl = 0x02, TxExcessColl = 0x04, RxFramErr = 0x06, }; enum op_mode_bits { Op3MemWaits = 0x03, OpLenInclude = 0x08, OpRxMerge = 0x10, OpTxMerge = 0x20, OpWin95bugfix = 0x40, OpLoopback = 0x80, }; enum rx_filter_bits { RxEnable = 0x01, RxPolarity = 0x02, RxForceOK = 0x04, RxMultiCast = 0x08, RxPromisc = 0x10, AltRxPromisc = 0x20, /* F5U011 uses different bit */ }; enum led_values { LEDFast = 0x01, LEDSlow = 0x02, LEDFlash = 0x03, LEDPulse = 0x04, LEDLink = 0x08, }; enum link_status { LinkNoChange = 0, LinkGood = 1, LinkBad = 2 }; /* * The catc struct. */ #define CTRL_RUNNING 0 #define RX_RUNNING 1 #define TX_RUNNING 2 struct catc { struct net_device *netdev; struct usb_device *usbdev; unsigned long flags; unsigned int tx_ptr, tx_idx; unsigned int ctrl_head, ctrl_tail; spinlock_t tx_lock, ctrl_lock; u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; u8 irq_buf[2]; u8 ctrl_buf[64]; struct usb_ctrlrequest ctrl_dr; struct timer_list timer; u8 stats_buf[8]; u16 stats_vals[4]; unsigned long last_stats; u8 multicast[64]; struct ctrl_queue { u8 dir; u8 request; u16 value; u16 index; void *buf; int len; void (*callback)(struct catc *catc, struct ctrl_queue *q); } ctrl_queue[CTRL_QUEUE]; struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; u8 is_f5u011; /* Set if device is an F5U011 */ u8 rxmode[2]; /* Used for F5U011 */ atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ }; /* * Useful macros. */ #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) /* * Receive routines. */ static void catc_rx_done(struct urb *urb) { struct catc *catc = urb->context; u8 *pkt_start = urb->transfer_buffer; struct sk_buff *skb; int pkt_len, pkt_offset = 0; int status = urb->status; if (!catc->is_f5u011) { clear_bit(RX_RUNNING, &catc->flags); pkt_offset = 2; } if (status) { dev_dbg(&urb->dev->dev, "rx_done, status %d, length %d\n", status, urb->actual_length); return; } do { if(!catc->is_f5u011) { pkt_len = le16_to_cpup((__le16*)pkt_start); if (pkt_len > urb->actual_length) { catc->netdev->stats.rx_length_errors++; catc->netdev->stats.rx_errors++; break; } } else { pkt_len = urb->actual_length; } if (!(skb = dev_alloc_skb(pkt_len))) return; skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); netif_rx(skb); catc->netdev->stats.rx_packets++; catc->netdev->stats.rx_bytes += pkt_len; /* F5U011 only does one packet per RX */ if (catc->is_f5u011) break; pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); if (catc->is_f5u011) { if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); netdev_dbg(catc->netdev, "getting extra packet\n"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { netdev_dbg(catc->netdev, "submit(rx_urb) status %d\n", state); } } else { clear_bit(RX_RUNNING, &catc->flags); } } } static void catc_irq_done(struct urb *urb) { struct catc *catc = urb->context; u8 *data = urb->transfer_buffer; int status = urb->status; unsigned int hasdata = 0, linksts = LinkNoChange; int res; if (!catc->is_f5u011) { hasdata = data[1] & 0x80; if (data[1] & 0x40) linksts = LinkGood; else if (data[1] & 0x20) linksts = LinkBad; } else { hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); if (data[0] == 0x90) linksts = LinkGood; else if (data[0] == 0xA0) linksts = LinkBad; } switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ dev_dbg(&urb->dev->dev, "irq_done, status %d, data %02x %02x.\n", status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); netdev_dbg(catc->netdev, "link ok\n"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); netdev_dbg(catc->netdev, "link bad\n"); } if (hasdata) { if (test_and_set_bit(RX_RUNNING, &catc->flags)) { if (catc->is_f5u011) atomic_inc(&catc->recq_sz); } else { catc->rx_urb->dev = catc->usbdev; if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { dev_err(&catc->usbdev->dev, "submit(rx_urb) status %d\n", res); } } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res) dev_err(&catc->usbdev->dev, "can't resubmit intr, %s-%s, status %d\n", catc->usbdev->bus->bus_name, catc->usbdev->devpath, res); } /* * Transmit routines. */ static int catc_tx_run(struct catc *catc) { int status; if (catc->is_f5u011) catc->tx_ptr = (catc->tx_ptr + 63) & ~63; catc->tx_urb->transfer_buffer_length = catc->tx_ptr; catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; catc->tx_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) dev_err(&catc->usbdev->dev, "submit(tx_urb), status %d\n", status); catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; netif_trans_update(catc->netdev); return status; } static void catc_tx_done(struct urb *urb) { struct catc *catc = urb->context; unsigned long flags; int r, status = urb->status; if (status == -ECONNRESET) { dev_dbg(&urb->dev->dev, "Tx Reset.\n"); urb->status = 0; netif_trans_update(catc->netdev); catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); return; } if (status) { dev_dbg(&urb->dev->dev, "tx_done, status %d, length %d\n", status, urb->actual_length); return; } spin_lock_irqsave(&catc->tx_lock, flags); if (catc->tx_ptr) { r = catc_tx_run(catc); if (unlikely(r < 0)) clear_bit(TX_RUNNING, &catc->flags); } else { clear_bit(TX_RUNNING, &catc->flags); } netif_wake_queue(catc->netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); } static netdev_tx_t catc_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); unsigned long flags; int r = 0; char *tx_buf; spin_lock_irqsave(&catc->tx_lock, flags); catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; if (catc->is_f5u011) *(__be16 *)tx_buf = cpu_to_be16(skb->len); else *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { r = catc_tx_run(catc); if (r < 0) clear_bit(TX_RUNNING, &catc->flags); } if ((catc->is_f5u011 && catc->tx_ptr) || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) netif_stop_queue(netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); if (r >= 0) { catc->netdev->stats.tx_bytes += skb->len; catc->netdev->stats.tx_packets++; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void catc_tx_timeout(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); dev_warn(&netdev->dev, "Transmit timed out.\n"); usb_unlink_urb(catc->tx_urb); } /* * Control messages. */ static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) { int retval = usb_control_msg(catc->usbdev, dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), request, 0x40 | dir, value, index, buf, len, 1000); return retval < 0 ? retval : 0; } static void catc_ctrl_run(struct catc *catc) { struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; struct usb_device *usbdev = catc->usbdev; struct urb *urb = catc->ctrl_urb; struct usb_ctrlrequest *dr = &catc->ctrl_dr; int status; dr->bRequest = q->request; dr->bRequestType = 0x40 | q->dir; dr->wValue = cpu_to_le16(q->value); dr->wIndex = cpu_to_le16(q->index); dr->wLength = cpu_to_le16(q->len); urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); urb->transfer_buffer_length = q->len; urb->transfer_buffer = catc->ctrl_buf; urb->setup_packet = (void *) dr; urb->dev = usbdev; if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC))) dev_err(&catc->usbdev->dev, "submit(ctrl_urb) status %d\n", status); } static void catc_ctrl_done(struct urb *urb) { struct catc *catc = urb->context; struct ctrl_queue *q; unsigned long flags; int status = urb->status; if (status) dev_dbg(&urb->dev->dev, "ctrl_done, status %d, len %d.\n", status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_tail; if (q->dir) { if (q->buf && q->len) memcpy(q->buf, catc->ctrl_buf, q->len); else q->buf = catc->ctrl_buf; } if (q->callback) q->callback(catc, q); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head != catc->ctrl_tail) catc_ctrl_run(catc); else clear_bit(CTRL_RUNNING, &catc->flags); spin_unlock_irqrestore(&catc->ctrl_lock, flags); } static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) { struct ctrl_queue *q; int retval = 0; unsigned long flags; spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_head; q->dir = dir; q->request = request; q->value = value; q->index = index; q->buf = buf; q->len = len; q->callback = callback; catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head == catc->ctrl_tail) { dev_err(&catc->usbdev->dev, "ctrl queue full\n"); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); retval = -1; } if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) catc_ctrl_run(catc); spin_unlock_irqrestore(&catc->ctrl_lock, flags); return retval; } /* * Statistics. */ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) { int index = q->index - EthStats; u16 data, last; catc->stats_buf[index] = *((char *)q->buf); if (index & 1) return; data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; last = catc->stats_vals[index >> 1]; switch (index) { case TxSingleColl: case TxMultiColl: catc->netdev->stats.collisions += data - last; break; case TxExcessColl: catc->netdev->stats.tx_aborted_errors += data - last; catc->netdev->stats.tx_errors += data - last; break; case RxFramErr: catc->netdev->stats.rx_frame_errors += data - last; catc->netdev->stats.rx_errors += data - last; break; } catc->stats_vals[index >> 1] = data; } static void catc_stats_timer(unsigned long data) { struct catc *catc = (void *) data; int i; for (i = 0; i < 8; i++) catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); mod_timer(&catc->timer, jiffies + STATS_UPDATE); } /* * Receive modes. Broadcast, Multicast, Promisc. */ static void catc_multicast(unsigned char *addr, u8 *multicast) { u32 crc; crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 broadcast[ETH_ALEN]; u8 rx = RxEnable | RxPolarity | RxMultiCast; eth_broadcast_addr(broadcast); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); if (netdev->flags & IFF_PROMISC) { memset(catc->multicast, 0xff, 64); rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; } if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); } } } if (!catc->is_f5u011) { catc_set_reg_async(catc, RxUnit, rx); catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); } else { f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; netdev_dbg(catc->netdev, "Setting RX mode to %2.2X %2.2X\n", catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } } static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); strlcpy(info->driver, driver_name, sizeof(info->driver)); strlcpy(info->version, DRIVER_VERSION, sizeof(info->version)); usb_make_path(catc->usbdev, info->bus_info, sizeof(info->bus_info)); } static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = DUPLEX_HALF; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, .get_settings = catc_get_settings, .get_link = ethtool_op_get_link }; /* * Open, close. */ static int catc_open(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); int status; catc->irq_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { dev_err(&catc->usbdev->dev, "submit(irq_urb) status %d\n", status); return -1; } netif_start_queue(netdev); if (!catc->is_f5u011) mod_timer(&catc->timer, jiffies + STATS_UPDATE); return 0; } static int catc_stop(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); netif_stop_queue(netdev); if (!catc->is_f5u011) del_timer_sync(&catc->timer); usb_kill_urb(catc->rx_urb); usb_kill_urb(catc->tx_urb); usb_kill_urb(catc->irq_urb); usb_kill_urb(catc->ctrl_urb); return 0; } static const struct net_device_ops catc_netdev_ops = { .ndo_open = catc_open, .ndo_stop = catc_stop, .ndo_start_xmit = catc_start_xmit, .ndo_tx_timeout = catc_tx_timeout, .ndo_set_rx_mode = catc_set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* * USB probe, disconnect. */ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct device *dev = &intf->dev; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; int pktsz, ret; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; netdev->ethtool_ops = &ops; catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); ret = -ENOMEM; goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dev_dbg(dev, "Testing for f5u011\n"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { u32 *buf; int i; dev_dbg(dev, "Checking memory size\n"); buf = kmalloc(4, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto fail_free; } *buf = 0x12345678; catc_write_mem(catc, 0x7a80, buf, 4); *buf = 0x87654321; catc_write_mem(catc, 0xfa80, buf, 4); catc_read_mem(catc, 0x7a80, buf, 4); switch (*buf) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dev_dbg(dev, "64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dev_dbg(dev, "32k Memory\n"); break; } kfree(buf); dev_dbg(dev, "Getting MAC from SEEROM.\n"); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting MAC into registers.\n"); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dev_dbg(dev, "Filling the multicast list.\n"); eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dev_dbg(dev, "Clearing error counters.\n"); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dev_dbg(dev, "Enabling.\n"); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dev_dbg(dev, "Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dev_dbg(dev, "Init done.\n"); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); ret = register_netdev(netdev); if (ret) goto fail_clear_intfdata; return 0; fail_clear_intfdata: usb_set_intfdata(intf, NULL); fail_free: usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return ret; } static void catc_disconnect(struct usb_interface *intf) { struct catc *catc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (catc) { unregister_netdev(catc->netdev); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(catc->netdev); } } /* * Module functions and tables. */ static struct usb_device_id catc_id_table [] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ { } }; MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { .name = driver_name, .probe = catc_probe, .disconnect = catc_disconnect, .id_table = catc_id_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(catc_driver);
static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct device *dev = &intf->dev; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; int i, pktsz, ret; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; netdev->ethtool_ops = &ops; catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); ret = -ENOMEM; goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dev_dbg(dev, "Testing for f5u011\n"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { dev_dbg(dev, "Checking memory size\n"); i = 0x12345678; catc_write_mem(catc, 0x7a80, &i, 4); i = 0x87654321; catc_write_mem(catc, 0xfa80, &i, 4); catc_read_mem(catc, 0x7a80, &i, 4); switch (i) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dev_dbg(dev, "64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dev_dbg(dev, "32k Memory\n"); break; } dev_dbg(dev, "Getting MAC from SEEROM.\n"); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting MAC into registers.\n"); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dev_dbg(dev, "Filling the multicast list.\n"); eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dev_dbg(dev, "Clearing error counters.\n"); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dev_dbg(dev, "Enabling.\n"); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dev_dbg(dev, "Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dev_dbg(dev, "Init done.\n"); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); ret = register_netdev(netdev); if (ret) goto fail_clear_intfdata; return 0; fail_clear_intfdata: usb_set_intfdata(intf, NULL); fail_free: usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return ret; }
static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct device *dev = &intf->dev; struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[ETH_ALEN]; int pktsz, ret; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { dev_err(dev, "Can't set altsetting 1.\n"); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; netdev->ethtool_ops = &ops; catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { dev_err(&intf->dev, "No free urbs available.\n"); ret = -ENOMEM; goto fail_free; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dev_dbg(dev, "Testing for f5u011\n"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { u32 *buf; int i; dev_dbg(dev, "Checking memory size\n"); buf = kmalloc(4, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto fail_free; } *buf = 0x12345678; catc_write_mem(catc, 0x7a80, buf, 4); *buf = 0x87654321; catc_write_mem(catc, 0xfa80, buf, 4); catc_read_mem(catc, 0x7a80, buf, 4); switch (*buf) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dev_dbg(dev, "64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dev_dbg(dev, "32k Memory\n"); break; } kfree(buf); dev_dbg(dev, "Getting MAC from SEEROM.\n"); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting MAC into registers.\n"); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dev_dbg(dev, "Filling the multicast list.\n"); eth_broadcast_addr(broadcast); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dev_dbg(dev, "Clearing error counters.\n"); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dev_dbg(dev, "Enabling.\n"); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dev_dbg(dev, "Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dev_dbg(dev, "Setting RX Mode\n"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dev_dbg(dev, "Init done.\n"); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); ret = register_netdev(netdev); if (ret) goto fail_clear_intfdata; return 0; fail_clear_intfdata: usb_set_intfdata(intf, NULL); fail_free: usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return ret; }
{'added': [(779, '\tint pktsz, ret;'), (843, '\t\tu32 *buf;'), (844, '\t\tint i;'), (845, ''), (848, '\t\tbuf = kmalloc(4, GFP_KERNEL);'), (849, '\t\tif (!buf) {'), (850, '\t\t\tret = -ENOMEM;'), (851, '\t\t\tgoto fail_free;'), (852, '\t\t}'), (853, ''), (854, '\t\t*buf = 0x12345678;'), (855, '\t\tcatc_write_mem(catc, 0x7a80, buf, 4);'), (856, '\t\t*buf = 0x87654321;'), (857, '\t\tcatc_write_mem(catc, 0xfa80, buf, 4);'), (858, '\t\tcatc_read_mem(catc, 0x7a80, buf, 4);'), (860, '\t\tswitch (*buf) {'), (875, ''), (876, '\t\tkfree(buf);')], 'deleted': [(779, '\tint i, pktsz, ret;'), (845, '\t\ti = 0x12345678;'), (846, '\t\tcatc_write_mem(catc, 0x7a80, &i, 4);'), (847, '\t\ti = 0x87654321;'), (848, '\t\tcatc_write_mem(catc, 0xfa80, &i, 4);'), (849, '\t\tcatc_read_mem(catc, 0x7a80, &i, 4);'), (851, '\t\tswitch (i) {')]}
18
7
694
4,711
125
976
17
https://github.com/torvalds/linux
CVE-2017-8070
CWE-119
2,008
PcxDecode.c
C
ImagingPcxDecode
/* * The Python Imaging Library. * $Id$ * * decoder for PCX image data. * * history: * 95-09-14 fl Created * * Copyright (c) Fredrik Lundh 1995. * Copyright (c) Secret Labs AB 1997. * * See the README file for information on usage and redistribution. */ #include "Imaging.h" int ImagingPcxDecode(Imaging im, ImagingCodecState state, UINT8* buf, Py_ssize_t bytes) { UINT8 n; UINT8* ptr; if (strcmp(im->mode, "1") == 0 && state->xsize > state->bytes * 8) { state->errcode = IMAGING_CODEC_OVERRUN; return -1; } else if (strcmp(im->mode, "P") == 0 && state->xsize > state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; return -1; } ptr = buf; for (;;) { if (bytes < 1) return ptr - buf; if ((*ptr & 0xC0) == 0xC0) { /* Run */ if (bytes < 2) return ptr - buf; n = ptr[0] & 0x3F; while (n > 0) { if (state->x >= state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; break; } state->buffer[state->x++] = ptr[1]; n--; } ptr += 2; bytes -= 2; } else { /* Literal */ state->buffer[state->x++] = ptr[0]; ptr++; bytes--; } if (state->x >= state->bytes) { if (state->bytes % state->xsize && state->bytes > state->xsize) { int bands = state->bytes / state->xsize; int stride = state->bytes / bands; int i; for (i=1; i< bands; i++) { // note -- skipping first band memmove(&state->buffer[i*state->xsize], &state->buffer[i*stride], state->xsize); } } /* Got a full line, unpack it */ state->shuffle((UINT8*) im->image[state->y + state->yoff] + state->xoff * im->pixelsize, state->buffer, state->xsize); state->x = 0; if (++state->y >= state->ysize) { /* End of file (errcode = 0) */ return -1; } } } }
/* * The Python Imaging Library. * $Id$ * * decoder for PCX image data. * * history: * 95-09-14 fl Created * * Copyright (c) Fredrik Lundh 1995. * Copyright (c) Secret Labs AB 1997. * * See the README file for information on usage and redistribution. */ #include "Imaging.h" int ImagingPcxDecode(Imaging im, ImagingCodecState state, UINT8* buf, Py_ssize_t bytes) { UINT8 n; UINT8* ptr; if ((state->xsize * state->bits + 7) / 8 > state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; return -1; } ptr = buf; for (;;) { if (bytes < 1) return ptr - buf; if ((*ptr & 0xC0) == 0xC0) { /* Run */ if (bytes < 2) return ptr - buf; n = ptr[0] & 0x3F; while (n > 0) { if (state->x >= state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; break; } state->buffer[state->x++] = ptr[1]; n--; } ptr += 2; bytes -= 2; } else { /* Literal */ state->buffer[state->x++] = ptr[0]; ptr++; bytes--; } if (state->x >= state->bytes) { if (state->bytes % state->xsize && state->bytes > state->xsize) { int bands = state->bytes / state->xsize; int stride = state->bytes / bands; int i; for (i=1; i< bands; i++) { // note -- skipping first band memmove(&state->buffer[i*state->xsize], &state->buffer[i*stride], state->xsize); } } /* Got a full line, unpack it */ state->shuffle((UINT8*) im->image[state->y + state->yoff] + state->xoff * im->pixelsize, state->buffer, state->xsize); state->x = 0; if (++state->y >= state->ysize) { /* End of file (errcode = 0) */ return -1; } } } }
ImagingPcxDecode(Imaging im, ImagingCodecState state, UINT8* buf, Py_ssize_t bytes) { UINT8 n; UINT8* ptr; if (strcmp(im->mode, "1") == 0 && state->xsize > state->bytes * 8) { state->errcode = IMAGING_CODEC_OVERRUN; return -1; } else if (strcmp(im->mode, "P") == 0 && state->xsize > state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; return -1; } ptr = buf; for (;;) { if (bytes < 1) return ptr - buf; if ((*ptr & 0xC0) == 0xC0) { /* Run */ if (bytes < 2) return ptr - buf; n = ptr[0] & 0x3F; while (n > 0) { if (state->x >= state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; break; } state->buffer[state->x++] = ptr[1]; n--; } ptr += 2; bytes -= 2; } else { /* Literal */ state->buffer[state->x++] = ptr[0]; ptr++; bytes--; } if (state->x >= state->bytes) { if (state->bytes % state->xsize && state->bytes > state->xsize) { int bands = state->bytes / state->xsize; int stride = state->bytes / bands; int i; for (i=1; i< bands; i++) { // note -- skipping first band memmove(&state->buffer[i*state->xsize], &state->buffer[i*stride], state->xsize); } } /* Got a full line, unpack it */ state->shuffle((UINT8*) im->image[state->y + state->yoff] + state->xoff * im->pixelsize, state->buffer, state->xsize); state->x = 0; if (++state->y >= state->ysize) { /* End of file (errcode = 0) */ return -1; } } } }
ImagingPcxDecode(Imaging im, ImagingCodecState state, UINT8* buf, Py_ssize_t bytes) { UINT8 n; UINT8* ptr; if ((state->xsize * state->bits + 7) / 8 > state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; return -1; } ptr = buf; for (;;) { if (bytes < 1) return ptr - buf; if ((*ptr & 0xC0) == 0xC0) { /* Run */ if (bytes < 2) return ptr - buf; n = ptr[0] & 0x3F; while (n > 0) { if (state->x >= state->bytes) { state->errcode = IMAGING_CODEC_OVERRUN; break; } state->buffer[state->x++] = ptr[1]; n--; } ptr += 2; bytes -= 2; } else { /* Literal */ state->buffer[state->x++] = ptr[0]; ptr++; bytes--; } if (state->x >= state->bytes) { if (state->bytes % state->xsize && state->bytes > state->xsize) { int bands = state->bytes / state->xsize; int stride = state->bytes / bands; int i; for (i=1; i< bands; i++) { // note -- skipping first band memmove(&state->buffer[i*state->xsize], &state->buffer[i*stride], state->xsize); } } /* Got a full line, unpack it */ state->shuffle((UINT8*) im->image[state->y + state->yoff] + state->xoff * im->pixelsize, state->buffer, state->xsize); state->x = 0; if (++state->y >= state->ysize) { /* End of file (errcode = 0) */ return -1; } } } }
{'added': [(25, ' if ((state->xsize * state->bits + 7) / 8 > state->bytes) {')], 'deleted': [(25, ' if (strcmp(im->mode, "1") == 0 && state->xsize > state->bytes * 8) {'), (26, ' state->errcode = IMAGING_CODEC_OVERRUN;'), (27, ' return -1;'), (28, ' } else if (strcmp(im->mode, "P") == 0 && state->xsize > state->bytes) {')]}
1
4
52
352
53
386
16
https://github.com/python-pillow/Pillow
CVE-2020-10378
CWE-125
197
pooling.cc
C++
tflite::ops::builtin::pooling::MaxEval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h" #include <stddef.h> #include <stdint.h> #include <cstdlib> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" #include "tensorflow/lite/kernels/internal/reference/pooling.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" namespace tflite { namespace ops { namespace builtin { namespace pooling { // This file has two implementation of each pooling op. enum KernelType { kReference, kGenericOptimized, }; enum PoolType { kAverage, kMax, kL2, }; struct OpData { TfLitePaddingValues padding; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } template <PoolType pool_type> TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); int batches = input->dims->data[0]; int height = input->dims->data[1]; int width = input->dims->data[2]; int channels_out = input->dims->data[3]; // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, 1, 1, height, width, params->filter_height, params->filter_width, padding, &out_height, &out_width); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (pool_type == kAverage || pool_type == kMax) { TFLITE_DCHECK_LE(std::abs(input->params.scale - output->params.scale), 1.0e-6); TFLITE_DCHECK_EQ(input->params.zero_point, output->params.zero_point); } if (pool_type == kL2) { // We currently don't have a quantized implementation of L2Pool TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); } } TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = batches; output_size->data[1] = out_height; output_size->data[2] = out_width; output_size->data[3] = channels_out; return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type> void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.float_activation_min = activation_min; \ op_params.float_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<float>(input), GetTensorShape(output), \ GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<uint8_t>(input), GetTensorShape(output), \ GetTensorData<uint8_t>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<int8_t>(input), GetTensorShape(output), \ GetTensorData<int8_t>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_integer_ops); } else { TF_LITE_AVERAGE_POOL(optimized_integer_ops); } #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<int16_t>(input), GetTensorShape(output), \ GetTensorData<int16_t>(output)) TF_LITE_AVERAGE_POOL(reference_integer_ops); #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.float_activation_min = activation_min; \ op_params.float_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), GetTensorData<float>(input), \ GetTensorShape(output), GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_ops); } else { TF_LITE_MAX_POOL(optimized_ops); } #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), \ GetTensorData<uint8_t>(input), GetTensorShape(output), \ GetTensorData<uint8_t>(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_ops); } else { TF_LITE_MAX_POOL(optimized_ops); } #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void MaxEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), \ GetTensorData<int8_t>(input), GetTensorShape(output), \ GetTensorData<int8_t>(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_integer_ops); } else { TF_LITE_MAX_POOL(optimized_integer_ops); } #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void MaxEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), \ GetTensorData<int16_t>(input), GetTensorShape(output), \ GetTensorData<int16_t>(output)) TF_LITE_MAX_POOL(reference_integer_ops); #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void L2EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); #define TF_LITE_L2_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.float_activation_min = activation_min; \ op_params.float_activation_max = activation_max; \ type::L2Pool(op_params, GetTensorShape(input), GetTensorData<float>(input), \ GetTensorShape(output), GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_L2_POOL(reference_ops); } else { TF_LITE_L2_POOL(optimized_ops); } #undef TF_LITE_L2_POOL } #undef TF_LITE_KERNEL_TYPE_DISPATCH template <KernelType kernel_type> TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; } } // namespace pooling TfLiteRegistration* Register_AVERAGE_POOL_REF() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kAverage>, pooling::AverageEval<pooling::kReference>}; return &r; } TfLiteRegistration* Register_MAX_POOL_REF() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kMax>, pooling::MaxEval<pooling::kReference>}; return &r; } TfLiteRegistration* Register_L2_POOL_REF() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kL2>, pooling::L2Eval<pooling::kReference>}; return &r; } TfLiteRegistration* Register_AVERAGE_POOL_GENERIC_OPT() { static TfLiteRegistration r = { pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kAverage>, pooling::AverageEval<pooling::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_MAX_POOL_GENERIC_OPT() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kMax>, pooling::MaxEval<pooling::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_L2_POOL_GENERIC_OPT() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kL2>, pooling::L2Eval<pooling::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_AVERAGE_POOL_2D() { return Register_AVERAGE_POOL_GENERIC_OPT(); } TfLiteRegistration* Register_MAX_POOL_2D() { return Register_MAX_POOL_GENERIC_OPT(); } TfLiteRegistration* Register_L2_POOL_2D() { return Register_L2_POOL_GENERIC_OPT(); } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h" #include <stddef.h> #include <stdint.h> #include <cstdlib> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" #include "tensorflow/lite/kernels/internal/reference/pooling.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/kernels/padding.h" namespace tflite { namespace ops { namespace builtin { namespace pooling { // This file has two implementation of each pooling op. enum KernelType { kReference, kGenericOptimized, }; enum PoolType { kAverage, kMax, kL2, }; struct OpData { TfLitePaddingValues padding; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } template <PoolType pool_type> TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); int batches = input->dims->data[0]; int height = input->dims->data[1]; int width = input->dims->data[2]; int channels_out = input->dims->data[3]; // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, 1, 1, height, width, params->filter_height, params->filter_width, padding, &out_height, &out_width); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (pool_type == kAverage || pool_type == kMax) { TFLITE_DCHECK_LE(std::abs(input->params.scale - output->params.scale), 1.0e-6); TFLITE_DCHECK_EQ(input->params.zero_point, output->params.zero_point); } if (pool_type == kL2) { // We currently don't have a quantized implementation of L2Pool TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); } } TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = batches; output_size->data[1] = out_height; output_size->data[2] = out_width; output_size->data[3] = channels_out; return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type> void AverageEvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.float_activation_min = activation_min; \ op_params.float_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<float>(input), GetTensorShape(output), \ GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void AverageEvalQuantizedUint8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<uint8_t>(input), GetTensorShape(output), \ GetTensorData<uint8_t>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_ops); } else { TF_LITE_AVERAGE_POOL(optimized_ops); } #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void AverageEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<int8_t>(input), GetTensorShape(output), \ GetTensorData<int8_t>(output)) if (kernel_type == kReference) { TF_LITE_AVERAGE_POOL(reference_integer_ops); } else { TF_LITE_AVERAGE_POOL(optimized_integer_ops); } #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void AverageEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_AVERAGE_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::AveragePool(op_params, GetTensorShape(input), \ GetTensorData<int16_t>(input), GetTensorShape(output), \ GetTensorData<int16_t>(output)) TF_LITE_AVERAGE_POOL(reference_integer_ops); #undef TF_LITE_AVERAGE_POOL } template <KernelType kernel_type> void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.float_activation_min = activation_min; \ op_params.float_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), GetTensorData<float>(input), \ GetTensorShape(output), GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_ops); } else { TF_LITE_MAX_POOL(optimized_ops); } #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), \ GetTensorData<uint8_t>(input), GetTensorShape(output), \ GetTensorData<uint8_t>(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_ops); } else { TF_LITE_MAX_POOL(optimized_ops); } #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void MaxEvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; (void)CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), \ GetTensorData<int8_t>(input), GetTensorShape(output), \ GetTensorData<int8_t>(output)) if (kernel_type == kReference) { TF_LITE_MAX_POOL(reference_integer_ops); } else { TF_LITE_MAX_POOL(optimized_integer_ops); } #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void MaxEvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { int32_t activation_min; int32_t activation_max; CalculateActivationRangeQuantized(context, params->activation, output, &activation_min, &activation_max); #define TF_LITE_MAX_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.quantized_activation_min = activation_min; \ op_params.quantized_activation_max = activation_max; \ type::MaxPool(op_params, GetTensorShape(input), \ GetTensorData<int16_t>(input), GetTensorShape(output), \ GetTensorData<int16_t>(output)) TF_LITE_MAX_POOL(reference_integer_ops); #undef TF_LITE_MAX_POOL } template <KernelType kernel_type> void L2EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLitePoolParams* params, OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { float activation_min, activation_max; CalculateActivationRange(params->activation, &activation_min, &activation_max); #define TF_LITE_L2_POOL(type) \ tflite::PoolParams op_params; \ op_params.stride_height = params->stride_height; \ op_params.stride_width = params->stride_width; \ op_params.filter_height = params->filter_height; \ op_params.filter_width = params->filter_width; \ op_params.padding_values.height = data->padding.height; \ op_params.padding_values.width = data->padding.width; \ op_params.float_activation_min = activation_min; \ op_params.float_activation_max = activation_max; \ type::L2Pool(op_params, GetTensorShape(input), GetTensorData<float>(input), \ GetTensorShape(output), GetTensorData<float>(output)) if (kernel_type == kReference) { TF_LITE_L2_POOL(reference_ops); } else { TF_LITE_L2_POOL(optimized_ops); } #undef TF_LITE_L2_POOL } #undef TF_LITE_KERNEL_TYPE_DISPATCH template <KernelType kernel_type> TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } template <KernelType kernel_type> TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; } } // namespace pooling TfLiteRegistration* Register_AVERAGE_POOL_REF() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kAverage>, pooling::AverageEval<pooling::kReference>}; return &r; } TfLiteRegistration* Register_MAX_POOL_REF() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kMax>, pooling::MaxEval<pooling::kReference>}; return &r; } TfLiteRegistration* Register_L2_POOL_REF() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kL2>, pooling::L2Eval<pooling::kReference>}; return &r; } TfLiteRegistration* Register_AVERAGE_POOL_GENERIC_OPT() { static TfLiteRegistration r = { pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kAverage>, pooling::AverageEval<pooling::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_MAX_POOL_GENERIC_OPT() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kMax>, pooling::MaxEval<pooling::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_L2_POOL_GENERIC_OPT() { static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::GenericPrepare<pooling::kL2>, pooling::L2Eval<pooling::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_AVERAGE_POOL_2D() { return Register_AVERAGE_POOL_GENERIC_OPT(); } TfLiteRegistration* Register_MAX_POOL_2D() { return Register_MAX_POOL_GENERIC_OPT(); } TfLiteRegistration* Register_L2_POOL_2D() { return Register_L2_POOL_GENERIC_OPT(); } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
{'added': [(74, ' TfLiteTensor* output;'), (75, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (76, ' const TfLiteTensor* input;'), (77, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (373, ' TfLiteTensor* output;'), (374, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (375, ' const TfLiteTensor* input;'), (376, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (406, ' TfLiteTensor* output;'), (407, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (408, ' const TfLiteTensor* input;'), (409, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (439, ' TfLiteTensor* output;'), (440, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (441, ' const TfLiteTensor* input;'), (442, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));')], 'deleted': [(74, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (75, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (371, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (372, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (402, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (403, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (433, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (434, ' const TfLiteTensor* input = GetInput(context, node, 0);')]}
16
8
325
2,047
28
191
5
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
227
shm.c
C
shm_destroy
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> * * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> #include <asm/uaccess.h> #include "util.h" struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_rmid_forced = 0; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); } /* * Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); } #ifdef CONFIG_IPC_NS void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); void __init shm_init (void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); } static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); if (IS_ERR(ipcp)) return (struct shmid_kernel *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); ipc_lock_object(&ipcp->shm_perm); } static void shm_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct shmid_kernel *shp = ipc_rcu_to_struct(p); security_shm_free(shp); ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { ipc_rmid(&shm_ids(ns), &s->shm_perm); } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); } /* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shp->shm_file)) shmem_lock(shp->shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shp->shm_file)->i_size, shp->mlock_user); fput (shp->shm_file); ipc_rcu_putref(shp, shm_rcu_free); } /* * shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_current(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_creator != current) return 0; /* * Mark it as orphaned to destroy the segment when * kernel.shm_rmid_forced is changed. * It is noop if the following shm_may_destroy() returns true. */ shp->shm_creator = NULL; /* * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID * is not set, it shouldn't be deleted here. */ if (!ns->shm_rmid_forced) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); /* * We want to destroy segments without users and with already * exit'ed originating process. * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ if (shp->shm_creator != NULL) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); } void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; if (shm_ids(ns).in_use == 0) return; /* Destroy all already created segments, but not mapped yet */ down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); return sfd->vm_ops->fault(vma, vmf); } #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); else if (vma->vm_policy) pol = vma->vm_policy; return pol; } #endif static int shm_mmap(struct file * file, struct vm_area_struct * vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; ret = sfd->file->f_op->mmap(sfd->file, vma); if (ret != 0) return ret; sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU BUG_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; shm_open(vma); return ret; } static int shm_release(struct inode *ino, struct file *file) { struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); shm_file_data(file) = NULL; kfree(sfd); return 0; } static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, #ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) { return file->f_op == &shm_file_operations_huge; } static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, #endif }; /** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * * Called with shm_ids.rwsem held as a writer. */ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; if (ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { ipc_rcu_putref(shp, ipc_rcu_free); return error; } sprintf (name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; } shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); return error; no_id: if (is_file_hugepages(file) && shp->mlock_user) user_shm_unlock(size, shp->mlock_user); fput(file); no_file: ipc_rcu_putref(shp, shm_rcu_free); return error; } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); return security_shm_associate(shp, shmflg); } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) return -EINVAL; return 0; } SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; struct ipc_ops shm_ops; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; shm_ops.getnew = newseg; shm_ops.associate = shm_security; shm_ops.more_checks = shm_more_checks; shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; if(in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } /* * Calculate and add used RSS and swap pages of a shm. * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; inode = file_inode(shp->shm_file); if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; spin_unlock(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif } } /* * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) { int next_id; int total, in_use; *rss = 0; *swp = 0; in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { struct kern_ipc_perm *ipc; struct shmid_kernel *shp; ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) continue; shp = container_of(ipc, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, rss, swp); total++; } } /* * This function handles some shmctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, struct shmid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; if (cmd == IPC_SET) { if (copy_shmid_from_user(&shmid64, buf, version)) return -EFAULT; } down_write(&shm_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: ipc_lock_object(&shp->shm_perm); err = ipc_update_perm(&shmid64.shm_perm, ipcp); if (err) goto out_unlock0; shp->shm_ctim = get_seconds(); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&shm_ids(ns).rwsem); return err; } static int shmctl_nolock(struct ipc_namespace *ns, int shmid, int cmd, int version, void __user *buf) { int err; struct shmid_kernel *shp; /* preliminary security checks for *_INFO */ if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; } switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; if(copy_shminfo_to_user (buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if(err<0) err = 0; goto out; } case SHM_INFO: { struct shm_info shm_info; memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } err = err < 0 ? 0 : err; goto out; } case SHM_STAT: case IPC_STAT: { struct shmid64_ds tbuf; int result; rcu_read_lock(); if (cmd == SHM_STAT) { shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = shp->shm_perm.id; } else { shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = 0; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; tbuf.shm_dtime = shp->shm_dtim; tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; rcu_read_unlock(); if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } default: return -EINVAL; } out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { struct shmid_kernel *shp; int err, version; struct ipc_namespace *ns; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case SHM_INFO: case SHM_STAT: case IPC_STAT: return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: case IPC_SET: return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { struct file *shm_file; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; } audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; ipc_lock_object(&shp->shm_perm); if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); err = -EPERM; if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) goto out_unlock0; if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) goto out_unlock0; } shm_file = shp->shm_file; if (is_file_hugepages(shm_file)) goto out_unlock0; if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; } goto out_unlock0; } /* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; } default: return -EINVAL; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); return err; } /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; struct file * file; int err; unsigned long flags; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; fmode_t f_mode; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ ns = current->nsproxy->ipc_ns; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; ipc_lock_object(&shp->shm_perm); path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; size = i_size_read(path.dentry->d_inode); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); if (!sfd) { path_put(&path); goto out_nattch; } file = alloc_file(&path, f_mode, is_file_hugepages(shp->shm_file) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); if (IS_ERR(file)) { kfree(sfd); path_put(&path); goto out_nattch; } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); sfd->file = shp->shm_file; sfd->vm_ops = NULL; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; down_write(&current->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* * If shm segment goes below stack, make sure there is some * space left for the stack to grow (at least 4 pages). */ if (addr < current->mm->start_stack && addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: up_write(&current->mm->mmap_sem); if (populate) mm_populate(addr, populate); out_fput: fput(file); out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); return err; out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; struct vm_area_struct *next; #endif if (addr & ~PAGE_MASK) return retval; down_write(&mm->mmap_sem); /* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that * are within the initially determined size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); #ifdef CONFIG_MMU while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { size = file_inode(vma->vm_file)->i_size; do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } #else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be * given */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; } #endif up_write(&mm->mmap_sem); return retval; } #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; unsigned long rss = 0, swp = 0; shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif return seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); } #endif
/* * linux/ipc/shm.c * Copyright (C) 1992, 1993 Krishna Balasubramanian * Many improvements/fixes by Bruno Haible. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. * * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * BIGMEM support, Andrea Arcangeli <andrea@suse.de> * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> * HIGHMEM support, Ingo Molnar <mingo@redhat.com> * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> * * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> * * Better ipc lock (kern_ipc_perm.lock) handling * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/file.h> #include <linux/mman.h> #include <linux/shmem_fs.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/nsproxy.h> #include <linux/mount.h> #include <linux/ipc_namespace.h> #include <asm/uaccess.h> #include "util.h" struct shm_file_data { int id; struct ipc_namespace *ns; struct file *file; const struct vm_operations_struct *vm_ops; }; #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif void shm_init_ns(struct ipc_namespace *ns) { ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; ns->shm_rmid_forced = 0; ns->shm_tot = 0; ipc_init_ids(&shm_ids(ns)); } /* * Called with shm_ids.rwsem (writer) and the shp structure locked. * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_nattch){ shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; shm_unlock(shp); } else shm_destroy(ns, shp); } #ifdef CONFIG_IPC_NS void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); return 0; } pure_initcall(ipc_ns_init); void __init shm_init (void) { ipc_init_proc_interface("sysvipc/shm", #if BITS_PER_LONG <= 32 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #else " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", #endif IPC_SHM_IDS, sysvipc_shm_proc_show); } static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* * shm_lock_(check_) routines are called in the paths where the rwsem * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); if (IS_ERR(ipcp)) return (struct shmid_kernel *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); } static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { rcu_read_lock(); ipc_lock_object(&ipcp->shm_perm); } static void shm_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); struct shmid_kernel *shp = ipc_rcu_to_struct(p); security_shm_free(shp); ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) { ipc_rmid(&shm_ids(ns), &s->shm_perm); } /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); } /* * shm_destroy - free the struct shmid_kernel * * @ns: namespace * @shp: struct to free * * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); } /* * shm_may_destroy - identifies whether shm segment should be destroyed now * * Returns true if and only if there are no active users of the segment and * one of the following is true: * * 1) shmctl(id, IPC_RMID, NULL) was called for this shp * * 2) sysctl kernel.shm_rmid_forced is set to 1. */ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { return (shp->shm_nattch == 0) && (ns->shm_rmid_forced || (shp->shm_perm.mode & SHM_DEST)); } /* * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ static void shm_close(struct vm_area_struct *vma) { struct file * file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_current(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_creator != current) return 0; /* * Mark it as orphaned to destroy the segment when * kernel.shm_rmid_forced is changed. * It is noop if the following shm_may_destroy() returns true. */ shp->shm_creator = NULL; /* * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID * is not set, it shouldn't be deleted here. */ if (!ns->shm_rmid_forced) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } /* Called with ns->shm_ids(ns).rwsem locked */ static int shm_try_destroy_orphaned(int id, void *p, void *data) { struct ipc_namespace *ns = data; struct kern_ipc_perm *ipcp = p; struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); /* * We want to destroy segments without users and with already * exit'ed originating process. * * As shp->* are changed under rwsem, it's safe to skip shp locking. */ if (shp->shm_creator != NULL) return 0; if (shm_may_destroy(ns, shp)) { shm_lock_by_ptr(shp); shm_destroy(ns, shp); } return 0; } void shm_destroy_orphaned(struct ipc_namespace *ns) { down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); up_write(&shm_ids(ns).rwsem); } void exit_shm(struct task_struct *task) { struct ipc_namespace *ns = task->nsproxy->ipc_ns; if (shm_ids(ns).in_use == 0) return; /* Destroy all already created segments, but not mapped yet */ down_write(&shm_ids(ns).rwsem); if (shm_ids(ns).in_use) idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); return sfd->vm_ops->fault(vma, vmf); } #ifdef CONFIG_NUMA static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); int err = 0; if (sfd->vm_ops->set_policy) err = sfd->vm_ops->set_policy(vma, new); return err; } static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, unsigned long addr) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct mempolicy *pol = NULL; if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); else if (vma->vm_policy) pol = vma->vm_policy; return pol; } #endif static int shm_mmap(struct file * file, struct vm_area_struct * vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; ret = sfd->file->f_op->mmap(sfd->file, vma); if (ret != 0) return ret; sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU BUG_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; shm_open(vma); return ret; } static int shm_release(struct inode *ino, struct file *file) { struct shm_file_data *sfd = shm_file_data(file); put_ipc_ns(sfd->ns); shm_file_data(file) = NULL; kfree(sfd); return 0; } static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fsync) return -EINVAL; return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } static long shm_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct shm_file_data *sfd = shm_file_data(file); if (!sfd->file->f_op->fallocate) return -EOPNOTSUPP; return sfd->file->f_op->fallocate(file, mode, offset, len); } static unsigned long shm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct shm_file_data *sfd = shm_file_data(file); return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, #ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, #endif .llseek = noop_llseek, .fallocate = shm_fallocate, }; static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, }; int is_file_shm_hugepages(struct file *file) { return file->f_op == &shm_file_operations_huge; } static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, #if defined(CONFIG_NUMA) .set_policy = shm_set_policy, .get_policy = shm_get_policy, #endif }; /** * newseg - Create a new shared memory segment * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * * Called with shm_ids.rwsem held as a writer. */ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; int shmflg = params->flg; size_t size = params->u.size; int error; struct shmid_kernel *shp; size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; if (ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); if (!shp) return -ENOMEM; shp->shm_perm.key = key; shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { ipc_rcu_putref(shp, ipc_rcu_free); return error; } sprintf (name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { struct hstate *hs; size_t hugesize; hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); if (!hs) { error = -EINVAL; goto no_file; } hugesize = ALIGN(size, huge_page_size(hs)); /* hugetlb_file_setup applies strict accounting */ if (shmflg & SHM_NORESERVE) acctflag = VM_NORESERVE; file = hugetlb_file_setup(name, hugesize, acctflag, &shp->mlock_user, HUGETLB_SHMFS_INODE, (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; } shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); return error; no_id: if (is_file_hugepages(file) && shp->mlock_user) user_shm_unlock(size, shp->mlock_user); fput(file); no_file: ipc_rcu_putref(shp, shm_rcu_free); return error; } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); return security_shm_associate(shp, shmflg); } /* * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); if (shp->shm_segsz < params->u.size) return -EINVAL; return 0; } SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; struct ipc_ops shm_ops; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; shm_ops.getnew = newseg; shm_ops.associate = shm_security; shm_ops.more_checks = shm_more_checks; shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; out.shm_dtime = in->shm_dtime; out.shm_ctime = in->shm_ctime; out.shm_cpid = in->shm_cpid; out.shm_lpid = in->shm_lpid; out.shm_nattch = in->shm_nattch; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } static inline unsigned long copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { switch(version) { case IPC_64: if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; return 0; case IPC_OLD: { struct shmid_ds tbuf_old; if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; out->shm_perm.uid = tbuf_old.shm_perm.uid; out->shm_perm.gid = tbuf_old.shm_perm.gid; out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } default: return -EINVAL; } } static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { switch(version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; if(in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } default: return -EINVAL; } } /* * Calculate and add used RSS and swap pages of a shm. * Called with shm_ids.rwsem held as a reader */ static void shm_add_rss_swap(struct shmid_kernel *shp, unsigned long *rss_add, unsigned long *swp_add) { struct inode *inode; inode = file_inode(shp->shm_file); if (is_file_hugepages(shp->shm_file)) { struct address_space *mapping = inode->i_mapping; struct hstate *h = hstate_file(shp->shm_file); *rss_add += pages_per_huge_page(h) * mapping->nrpages; } else { #ifdef CONFIG_SHMEM struct shmem_inode_info *info = SHMEM_I(inode); spin_lock(&info->lock); *rss_add += inode->i_mapping->nrpages; *swp_add += info->swapped; spin_unlock(&info->lock); #else *rss_add += inode->i_mapping->nrpages; #endif } } /* * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) { int next_id; int total, in_use; *rss = 0; *swp = 0; in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { struct kern_ipc_perm *ipc; struct shmid_kernel *shp; ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); if (ipc == NULL) continue; shp = container_of(ipc, struct shmid_kernel, shm_perm); shm_add_rss_swap(shp, rss, swp); total++; } } /* * This function handles some shmctl commands which require the rwsem * to be held in write mode. * NOTE: no locks must be held, the rwsem is taken inside this function. */ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, struct shmid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; struct shmid64_ds shmid64; struct shmid_kernel *shp; int err; if (cmd == IPC_SET) { if (copy_shmid_from_user(&shmid64, buf, version)) return -EFAULT; } down_write(&shm_ids(ns).rwsem); rcu_read_lock(); ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_unlock1; } shp = container_of(ipcp, struct shmid_kernel, shm_perm); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; switch (cmd) { case IPC_RMID: ipc_lock_object(&shp->shm_perm); /* do_shm_rmid unlocks the ipc object and rcu */ do_shm_rmid(ns, ipcp); goto out_up; case IPC_SET: ipc_lock_object(&shp->shm_perm); err = ipc_update_perm(&shmid64.shm_perm, ipcp); if (err) goto out_unlock0; shp->shm_ctim = get_seconds(); break; default: err = -EINVAL; goto out_unlock1; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); out_up: up_write(&shm_ids(ns).rwsem); return err; } static int shmctl_nolock(struct ipc_namespace *ns, int shmid, int cmd, int version, void __user *buf) { int err; struct shmid_kernel *shp; /* preliminary security checks for *_INFO */ if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; } switch (cmd) { case IPC_INFO: { struct shminfo64 shminfo; memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; if(copy_shminfo_to_user (buf, &shminfo, version)) return -EFAULT; down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if(err<0) err = 0; goto out; } case SHM_INFO: { struct shm_info shm_info; memset(&shm_info, 0, sizeof(shm_info)); down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); up_read(&shm_ids(ns).rwsem); if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } err = err < 0 ? 0 : err; goto out; } case SHM_STAT: case IPC_STAT: { struct shmid64_ds tbuf; int result; rcu_read_lock(); if (cmd == SHM_STAT) { shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = shp->shm_perm.id; } else { shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } result = 0; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; tbuf.shm_dtime = shp->shm_dtim; tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; rcu_read_unlock(); if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } default: return -EINVAL; } out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) { struct shmid_kernel *shp; int err, version; struct ipc_namespace *ns; if (cmd < 0 || shmid < 0) return -EINVAL; version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; switch (cmd) { case IPC_INFO: case SHM_INFO: case SHM_STAT: case IPC_STAT: return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: case IPC_SET: return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { struct file *shm_file; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock1; } audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock1; ipc_lock_object(&shp->shm_perm); if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { kuid_t euid = current_euid(); err = -EPERM; if (!uid_eq(euid, shp->shm_perm.uid) && !uid_eq(euid, shp->shm_perm.cuid)) goto out_unlock0; if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) goto out_unlock0; } shm_file = shp->shm_file; /* check if shm_destroy() is tearing down shp */ if (shm_file == NULL) { err = -EIDRM; goto out_unlock0; } if (is_file_hugepages(shm_file)) goto out_unlock0; if (cmd == SHM_LOCK) { struct user_struct *user = current_user(); err = shmem_lock(shm_file, 1, user); if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { shp->shm_perm.mode |= SHM_LOCKED; shp->mlock_user = user; } goto out_unlock0; } /* SHM_UNLOCK */ if (!(shp->shm_perm.mode & SHM_LOCKED)) goto out_unlock0; shmem_lock(shm_file, 0, shp->mlock_user); shp->shm_perm.mode &= ~SHM_LOCKED; shp->mlock_user = NULL; get_file(shm_file); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); shmem_unlock_mapping(shm_file->f_mapping); fput(shm_file); return err; } default: return -EINVAL; } out_unlock0: ipc_unlock_object(&shp->shm_perm); out_unlock1: rcu_read_unlock(); return err; } /* * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. * * NOTE! Despite the name, this is NOT a direct system call entrypoint. The * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; struct file * file; int err; unsigned long flags; unsigned long prot; int acc_mode; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; fmode_t f_mode; unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; acc_mode = S_IRUGO; f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; acc_mode = S_IRUGO | S_IWUGO; f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; acc_mode |= S_IXUGO; } /* * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ ns = current->nsproxy->ipc_ns; rcu_read_lock(); shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); goto out_unlock; } err = -EACCES; if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; ipc_lock_object(&shp->shm_perm); /* check if shm_destroy() is tearing down shp */ if (shp->shm_file == NULL) { ipc_unlock_object(&shp->shm_perm); err = -EIDRM; goto out_unlock; } path = shp->shm_file->f_path; path_get(&path); shp->shm_nattch++; size = i_size_read(path.dentry->d_inode); ipc_unlock_object(&shp->shm_perm); rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); if (!sfd) { path_put(&path); goto out_nattch; } file = alloc_file(&path, f_mode, is_file_hugepages(shp->shm_file) ? &shm_file_operations_huge : &shm_file_operations); err = PTR_ERR(file); if (IS_ERR(file)) { kfree(sfd); path_put(&path); goto out_nattch; } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; sfd->id = shp->shm_perm.id; sfd->ns = get_ipc_ns(ns); sfd->file = shp->shm_file; sfd->vm_ops = NULL; err = security_mmap_file(file, prot, flags); if (err) goto out_fput; down_write(&current->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* * If shm segment goes below stack, make sure there is some * space left for the stack to grow (at least 4 pages). */ if (addr < current->mm->start_stack && addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); *raddr = addr; err = 0; if (IS_ERR_VALUE(addr)) err = (long)addr; invalid: up_write(&current->mm->mmap_sem); if (populate) mm_populate(addr, populate); out_fput: fput(file); out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); up_write(&shm_ids(ns).rwsem); return err; out_unlock: rcu_read_unlock(); out: return err; } SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); return (long)ret; } /* * detach and kill segment if marked destroyed. * The work is done in shm_close. */ SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; int retval = -EINVAL; #ifdef CONFIG_MMU loff_t size = 0; struct vm_area_struct *next; #endif if (addr & ~PAGE_MASK) return retval; down_write(&mm->mmap_sem); /* * This function tries to be smart and unmap shm segments that * were modified by partial mlock or munmap calls: * - It first determines the size of the shm segment that should be * unmapped: It searches for a vma that is backed by shm and that * started at address shmaddr. It records it's size and then unmaps * it. * - Then it unmaps all shm vmas that started at shmaddr and that * are within the initially determined size. * Errors from do_munmap are ignored: the function only fails if * it's called with invalid parameters or if it's called to unmap * a part of a vma. Both calls in this function are for full vmas, * the parameters are directly copied from the vma itself and always * valid - therefore do_munmap cannot fail. (famous last words?) */ /* * If it had been mremap()'d, the starting address would not * match the usual checks anyway. So assume all vma's are * above the starting address given. */ vma = find_vma(mm, addr); #ifdef CONFIG_MMU while (vma) { next = vma->vm_next; /* * Check if the starting address would match, i.e. it's * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { size = file_inode(vma->vm_file)->i_size; do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so * break out of here and fall through to the next * loop that uses the size information to stop * searching for matching vma's. */ retval = 0; vma = next; break; } vma = next; } /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } #else /* CONFIG_MMU */ /* under NOMMU conditions, the exact address to be destroyed must be * given */ if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); retval = 0; } #endif up_write(&mm->mmap_sem); return retval; } #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; unsigned long rss = 0, swp = 0; shm_add_rss_swap(shp, &rss, &swp); #if BITS_PER_LONG <= 32 #define SIZE_SPEC "%10lu" #else #define SIZE_SPEC "%21lu" #endif return seq_printf(s, "%10d %10d %4o " SIZE_SPEC " %5u %5u " "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, from_kuid_munged(user_ns, shp->shm_perm.uid), from_kgid_munged(user_ns, shp->shm_perm.gid), from_kuid_munged(user_ns, shp->shm_perm.cuid), from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, shp->shm_ctim, rss * PAGE_SIZE, swp * PAGE_SIZE); } #endif
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shp->shm_file)) shmem_lock(shp->shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shp->shm_file)->i_size, shp->mlock_user); fput (shp->shm_file); ipc_rcu_putref(shp, shm_rcu_free); }
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { struct file *shm_file; shm_file = shp->shm_file; shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); if (!is_file_hugepages(shm_file)) shmem_lock(shm_file, 0, shp->mlock_user); else if (shp->mlock_user) user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); fput(shm_file); ipc_rcu_putref(shp, shm_rcu_free); }
{'added': [(211, '\tstruct file *shm_file;'), (212, ''), (213, '\tshm_file = shp->shm_file;'), (214, '\tshp->shm_file = NULL;'), (218, '\tif (!is_file_hugepages(shm_file))'), (219, '\t\tshmem_lock(shm_file, 0, shp->mlock_user);'), (221, '\t\tuser_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user);'), (222, '\tfput(shm_file);'), (989, ''), (990, '\t\t/* check if shm_destroy() is tearing down shp */'), (991, '\t\tif (shm_file == NULL) {'), (992, '\t\t\terr = -EIDRM;'), (993, '\t\t\tgoto out_unlock0;'), (994, '\t\t}'), (995, ''), (1114, ''), (1115, '\t/* check if shm_destroy() is tearing down shp */'), (1116, '\tif (shp->shm_file == NULL) {'), (1117, '\t\tipc_unlock_object(&shp->shm_perm);'), (1118, '\t\terr = -EIDRM;'), (1119, '\t\tgoto out_unlock;'), (1120, '\t}'), (1121, '')], 'deleted': [(214, '\tif (!is_file_hugepages(shp->shm_file))'), (215, '\t\tshmem_lock(shp->shm_file, 0, shp->mlock_user);'), (217, '\t\tuser_shm_unlock(file_inode(shp->shm_file)->i_size,'), (218, '\t\t\t\t\t\tshp->mlock_user);'), (219, '\tfput (shp->shm_file);')]}
23
5
947
5,897
13
103
3
https://github.com/torvalds/linux
CVE-2013-7026
CWE-362
176
vgacon.c
C
vgacon_startup
/* * linux/drivers/video/vgacon.c -- Low level VGA based console driver * * Created 28 Sep 1997 by Geert Uytterhoeven * * Rewritten by Martin Mares <mj@ucw.cz>, July 1998 * * This file is based on the old console.c, vga.c and vesa_blank.c drivers. * * Copyright (C) 1991, 1992 Linus Torvalds * 1995 Jay Estabrook * * User definable mapping table and font loading by Eugene G. Crosser, * <crosser@average.org> * * Improved loadable font/UTF-8 support by H. Peter Anvin * Feb-Sep 1995 <peter.anvin@linux.org> * * Colour palette handling, by Simon Tatham * 17-Jun-95 <sgt20@cam.ac.uk> * * if 512 char mode is already enabled don't re-enable it, * because it causes screen to flicker, by Mitja Horvat * 5-May-96 <mitja.horvat@guest.arnes.si> * * Use 2 outw instead of 4 outb_p to reduce erroneous text * flashing on RHS of screen during heavy console scrolling . * Oct 1996, Paul Gortmaker. * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/console.h> #include <linux/string.h> #include <linux/kd.h> #include <linux/slab.h> #include <linux/vt_kern.h> #include <linux/sched.h> #include <linux/selection.h> #include <linux/spinlock.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/screen_info.h> #include <video/vga.h> #include <asm/io.h> static DEFINE_RAW_SPINLOCK(vga_lock); static int cursor_size_lastfrom; static int cursor_size_lastto; static u32 vgacon_xres; static u32 vgacon_yres; static struct vgastate vgastate; #define BLANK 0x0020 #define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */ /* * Interface used by the world */ static const char *vgacon_startup(void); static void vgacon_init(struct vc_data *c, int init); static void vgacon_deinit(struct vc_data *c); static void vgacon_cursor(struct vc_data *c, int mode); static int vgacon_switch(struct vc_data *c); static int vgacon_blank(struct vc_data *c, int blank, int mode_switch); static void vgacon_scrolldelta(struct vc_data *c, int lines); static int vgacon_set_origin(struct vc_data *c); static void vgacon_save_screen(struct vc_data *c); static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); static struct uni_pagedir *vgacon_uni_pagedir; static int vgacon_refcount; /* Description of the hardware situation */ static bool vga_init_done; static unsigned long vga_vram_base __read_mostly; /* Base of video memory */ static unsigned long vga_vram_end __read_mostly; /* End of video memory */ static unsigned int vga_vram_size __read_mostly; /* Size of video memory */ static u16 vga_video_port_reg __read_mostly; /* Video register select port */ static u16 vga_video_port_val __read_mostly; /* Video register value port */ static unsigned int vga_video_num_columns; /* Number of text columns */ static unsigned int vga_video_num_lines; /* Number of text lines */ static bool vga_can_do_color; /* Do we support colors? */ static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */ static unsigned char vga_video_type __read_mostly; /* Card type */ static bool vga_font_is_default = true; static int vga_vesa_blanked; static bool vga_palette_blanked; static bool vga_is_gfx; static bool vga_512_chars; static int vga_video_font_height; static int vga_scan_lines __read_mostly; static unsigned int vga_rolled_over; static bool vgacon_text_mode_force; static bool vga_hardscroll_enabled; static bool vga_hardscroll_user_enable = true; bool vgacon_text_force(void) { return vgacon_text_mode_force; } EXPORT_SYMBOL(vgacon_text_force); static int __init text_mode(char *str) { vgacon_text_mode_force = true; pr_warn("You have booted with nomodeset. This means your GPU drivers are DISABLED\n"); pr_warn("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n"); pr_warn("Unless you actually understand what nomodeset does, you should reboot without enabling it\n"); return 1; } /* force text mode - used by kernel modesetting */ __setup("nomodeset", text_mode); static int __init no_scroll(char *str) { /* * Disabling scrollback is required for the Braillex ib80-piezo * Braille reader made by F.H. Papenmeier (Germany). * Use the "no-scroll" bootflag. */ vga_hardscroll_user_enable = vga_hardscroll_enabled = false; return 1; } __setup("no-scroll", no_scroll); /* * By replacing the four outb_p with two back to back outw, we can reduce * the window of opportunity to see text mislocated to the RHS of the * console during heavy scrolling activity. However there is the remote * possibility that some pre-dinosaur hardware won't like the back to back * I/O. Since the Xservers get away with it, we should be able to as well. */ static inline void write_vga(unsigned char reg, unsigned int val) { unsigned int v1, v2; unsigned long flags; /* * ddprintk might set the console position from interrupt * handlers, thus the write has to be IRQ-atomic. */ raw_spin_lock_irqsave(&vga_lock, flags); v1 = reg + (val & 0xff00); v2 = reg + 1 + ((val << 8) & 0xff00); outw(v1, vga_video_port_reg); outw(v2, vga_video_port_reg); raw_spin_unlock_irqrestore(&vga_lock, flags); } static inline void vga_set_mem_top(struct vc_data *c) { write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } #ifdef CONFIG_VGACON_SOFT_SCROLLBACK /* software scrollback */ struct vgacon_scrollback_info { void *data; int tail; int size; int rows; int cnt; int cur; int save; int restore; }; static struct vgacon_scrollback_info *vgacon_scrollback_cur; static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES]; static bool scrollback_persistent = \ IS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT); module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000); MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles"); static void vgacon_scrollback_reset(int vc_num, size_t reset_size) { struct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num]; if (scrollback->data && reset_size > 0) memset(scrollback->data, 0, reset_size); scrollback->cnt = 0; scrollback->tail = 0; scrollback->cur = 0; } static void vgacon_scrollback_init(int vc_num) { int pitch = vga_video_num_columns * 2; size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; int rows = size / pitch; void *data; data = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024, GFP_NOWAIT); vgacon_scrollbacks[vc_num].data = data; vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; vgacon_scrollback_cur->rows = rows - 1; vgacon_scrollback_cur->size = rows * pitch; vgacon_scrollback_reset(vc_num, size); } static void vgacon_scrollback_switch(int vc_num) { if (!scrollback_persistent) vc_num = 0; if (!vgacon_scrollbacks[vc_num].data) { vgacon_scrollback_init(vc_num); } else { if (scrollback_persistent) { vgacon_scrollback_cur = &vgacon_scrollbacks[vc_num]; } else { size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; vgacon_scrollback_reset(vc_num, size); } } } static void vgacon_scrollback_startup(void) { vgacon_scrollback_cur = &vgacon_scrollbacks[0]; vgacon_scrollback_init(0); } static void vgacon_scrollback_update(struct vc_data *c, int t, int count) { void *p; if (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size || c->vc_num != fg_console) return; p = (void *) (c->vc_origin + t * c->vc_size_row); while (count--) { if ((vgacon_scrollback_cur->tail + c->vc_size_row) > vgacon_scrollback_cur->size) vgacon_scrollback_cur->tail = 0; scr_memcpyw(vgacon_scrollback_cur->data + vgacon_scrollback_cur->tail, p, c->vc_size_row); vgacon_scrollback_cur->cnt++; p += c->vc_size_row; vgacon_scrollback_cur->tail += c->vc_size_row; if (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size) vgacon_scrollback_cur->tail = 0; if (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows) vgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows; vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; } } static void vgacon_restore_screen(struct vc_data *c) { c->vc_origin = c->vc_visible_origin; vgacon_scrollback_cur->save = 0; if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); vgacon_scrollback_cur->restore = 1; vgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt; } } static void vgacon_scrolldelta(struct vc_data *c, int lines) { int start, end, count, soff; if (!lines) { vgacon_restore_screen(c); return; } if (!vgacon_scrollback_cur->data) return; if (!vgacon_scrollback_cur->save) { vgacon_cursor(c, CM_ERASE); vgacon_save_screen(c); c->vc_origin = (unsigned long)c->vc_screenbuf; vgacon_scrollback_cur->save = 1; } vgacon_scrollback_cur->restore = 0; start = vgacon_scrollback_cur->cur + lines; end = start + abs(lines); if (start < 0) start = 0; if (start > vgacon_scrollback_cur->cnt) start = vgacon_scrollback_cur->cnt; if (end < 0) end = 0; if (end > vgacon_scrollback_cur->cnt) end = vgacon_scrollback_cur->cnt; vgacon_scrollback_cur->cur = start; count = end - start; soff = vgacon_scrollback_cur->tail - ((vgacon_scrollback_cur->cnt - end) * c->vc_size_row); soff -= count * c->vc_size_row; if (soff < 0) soff += vgacon_scrollback_cur->size; count = vgacon_scrollback_cur->cnt - start; if (count > c->vc_rows) count = c->vc_rows; if (count) { int copysize; int diff = c->vc_rows - count; void *d = (void *) c->vc_visible_origin; void *s = (void *) c->vc_screenbuf; count *= c->vc_size_row; /* how much memory to end of buffer left? */ copysize = min(count, vgacon_scrollback_cur->size - soff); scr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize); d += copysize; count -= copysize; if (count) { scr_memcpyw(d, vgacon_scrollback_cur->data, count); d += count; } if (diff) scr_memcpyw(d, s, diff * c->vc_size_row); } else vgacon_cursor(c, CM_MOVE); } static void vgacon_flush_scrollback(struct vc_data *c) { size_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024; vgacon_scrollback_reset(c->vc_num, size); } #else #define vgacon_scrollback_startup(...) do { } while (0) #define vgacon_scrollback_init(...) do { } while (0) #define vgacon_scrollback_update(...) do { } while (0) #define vgacon_scrollback_switch(...) do { } while (0) static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) vgacon_scrolldelta(c, 0); } static void vgacon_scrolldelta(struct vc_data *c, int lines) { vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base, vga_vram_size); vga_set_mem_top(c); } static void vgacon_flush_scrollback(struct vc_data *c) { } #endif /* CONFIG_VGACON_SOFT_SCROLLBACK */ static const char *vgacon_startup(void) { const char *display_desc = NULL; u16 saved1, saved2; volatile u16 *p; if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB || screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { no_vga: #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; return conswitchp->con_startup(); #else return NULL; #endif } /* boot_params.screen_info reasonably initialized? */ if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; /* VGA16 modes are not handled by VGACON */ if ((screen_info.orig_video_mode == 0x0D) || /* 320x200/4 */ (screen_info.orig_video_mode == 0x0E) || /* 640x200/4 */ (screen_info.orig_video_mode == 0x10) || /* 640x350/4 */ (screen_info.orig_video_mode == 0x12) || /* 640x480/4 */ (screen_info.orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */ goto no_vga; vga_video_num_lines = screen_info.orig_video_lines; vga_video_num_columns = screen_info.orig_video_cols; vgastate.vgabase = NULL; if (screen_info.orig_video_mode == 7) { /* Monochrome display */ vga_vram_base = 0xb0000; vga_video_port_reg = VGA_CRT_IM; vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource mda1_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BB }; static struct resource mda2_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3BF, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; request_resource(&ioport_resource, &mda1_console_resource); request_resource(&ioport_resource, &mda2_console_resource); vga_video_font_height = 14; } } else { /* If not, it is color. */ vga_can_do_color = true; vga_vram_base = 0xb8000; vga_video_port_reg = VGA_CRT_IC; vga_video_port_val = VGA_CRT_DC; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { int i; vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource vga_console_resource = { .name = "vga+", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, &vga_console_resource); /* * Normalise the palette registers, to point * the 16 screen colours to the first 16 * DAC entries. */ for (i = 0; i < 16; i++) { inb_p(VGA_IS1_RC); outb_p(i, VGA_ATT_W); outb_p(i, VGA_ATT_W); } outb_p(0x20, VGA_ATT_W); /* * Now set the DAC registers back to their * default values */ for (i = 0; i < 16; i++) { outb_p(color_table[i], VGA_PEL_IW); outb_p(default_red[i], VGA_PEL_D); outb_p(default_grn[i], VGA_PEL_D); outb_p(default_blu[i], VGA_PEL_D); } } } else { static struct resource cga_console_resource = { .name = "cga", .flags = IORESOURCE_IO, .start = 0x3D4, .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; request_resource(&ioport_resource, &cga_console_resource); vga_video_font_height = 8; } } vga_vram_base = VGA_MAP_MEM(vga_vram_base, vga_vram_size); vga_vram_end = vga_vram_base + vga_vram_size; /* * Find out if there is a graphics card present. * Are there smarter methods around? */ p = (volatile u16 *) vga_vram_base; saved1 = scr_readw(p); saved2 = scr_readw(p + 1); scr_writew(0xAA55, p); scr_writew(0x55AA, p + 1); if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(0x55AA, p); scr_writew(0xAA55, p + 1); if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(saved1, p); scr_writew(saved2, p + 1); if (vga_video_type == VIDEO_TYPE_EGAC || vga_video_type == VIDEO_TYPE_VGAC || vga_video_type == VIDEO_TYPE_EGAM) { vga_hardscroll_enabled = vga_hardscroll_user_enable; vga_default_font_height = screen_info.orig_video_points; vga_video_font_height = screen_info.orig_video_points; /* This may be suboptimal but is a safe bet - go with it */ vga_scan_lines = vga_video_font_height * vga_video_num_lines; } vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; if (!vga_init_done) { vgacon_scrollback_startup(); vga_init_done = true; } return display_desc; } static void vgacon_init(struct vc_data *c, int init) { struct uni_pagedir *p; /* * We cannot be loaded as a module, therefore init is always 1, * but vgacon_init can be called more than once, and init will * not be 1. */ c->vc_can_do_color = vga_can_do_color; /* set dimensions manually if init != 0 since vc_resize() will fail */ if (init) { c->vc_cols = vga_video_num_columns; c->vc_rows = vga_video_num_lines; } else vc_resize(c, vga_video_num_columns, vga_video_num_lines); c->vc_scan_lines = vga_scan_lines; c->vc_font.height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; p = *c->vc_uni_pagedir_loc; if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) { con_free_unimap(c); c->vc_uni_pagedir_loc = &vgacon_uni_pagedir; vgacon_refcount++; } if (!vgacon_uni_pagedir && p) con_set_default_unimap(c); /* Only set the default if the user didn't deliberately override it */ if (global_cursor_default == -1) global_cursor_default = !(screen_info.flags & VIDEO_FLAGS_NOCURSOR); } static void vgacon_deinit(struct vc_data *c) { /* When closing the active console, reset video origin */ if (con_is_visible(c)) { c->vc_visible_origin = vga_vram_base; vga_set_mem_top(c); } if (!--vgacon_refcount) con_free_unimap(c); c->vc_uni_pagedir_loc = &c->vc_uni_pagedir; con_set_default_unimap(c); } static u8 vgacon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, bool blink, bool underline, bool reverse, bool italic) { u8 attr = color; if (vga_can_do_color) { if (italic) attr = (attr & 0xF0) | c->vc_itcolor; else if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | c->vc_halfcolor; } if (reverse) attr = ((attr) & 0x88) | ((((attr) >> 4) | ((attr) << 4)) & 0x77); if (blink) attr ^= 0x80; if (intensity == VCI_BOLD) attr ^= 0x08; if (!vga_can_do_color) { if (italic) attr = (attr & 0xF8) | 0x02; else if (underline) attr = (attr & 0xf8) | 0x01; else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | 0x08; } return attr; } static void vgacon_invert_region(struct vc_data *c, u16 * p, int count) { const bool col = vga_can_do_color; while (count--) { u16 a = scr_readw(p); if (col) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); else a ^= ((a & 0x0700) == 0x0100) ? 0x7000 : 0x7700; scr_writew(a, p++); } } static void vgacon_set_cursor_size(int xpos, int from, int to) { unsigned long flags; int curs, cure; if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto)) return; cursor_size_lastfrom = from; cursor_size_lastto = to; raw_spin_lock_irqsave(&vga_lock, flags); if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); curs = inb_p(vga_video_port_val); outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); cure = inb_p(vga_video_port_val); } else { curs = 0; cure = 0; } curs = (curs & 0xc0) | from; cure = (cure & 0xe0) | to; outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); outb_p(curs, vga_video_port_val); outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); outb_p(cure, vga_video_port_val); raw_spin_unlock_irqrestore(&vga_lock, flags); } static void vgacon_cursor(struct vc_data *c, int mode) { if (c->vc_mode != KD_TEXT) return; vgacon_restore_screen(c); switch (mode) { case CM_ERASE: write_vga(14, (c->vc_pos - vga_vram_base) / 2); if (vga_video_type >= VIDEO_TYPE_VGAC) vgacon_set_cursor_size(c->state.x, 31, 30); else vgacon_set_cursor_size(c->state.x, 31, 31); break; case CM_MOVE: case CM_DRAW: write_vga(14, (c->vc_pos - vga_vram_base) / 2); switch (CUR_SIZE(c->vc_cursor_type)) { case CUR_UNDERLINE: vgacon_set_cursor_size(c->state.x, c->vc_font.height - (c->vc_font.height < 10 ? 2 : 3), c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: vgacon_set_cursor_size(c->state.x, c->vc_font.height / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: vgacon_set_cursor_size(c->state.x, (c->vc_font.height * 2) / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: vgacon_set_cursor_size(c->state.x, c->vc_font.height / 2, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_NONE: if (vga_video_type >= VIDEO_TYPE_VGAC) vgacon_set_cursor_size(c->state.x, 31, 30); else vgacon_set_cursor_size(c->state.x, 31, 31); break; default: vgacon_set_cursor_size(c->state.x, 1, c->vc_font.height); break; } break; } } static int vgacon_doresize(struct vc_data *c, unsigned int width, unsigned int height) { unsigned long flags; unsigned int scanlines = height * c->vc_font.height; u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; raw_spin_lock_irqsave(&vga_lock, flags); vgacon_xres = width * VGA_FONTWIDTH; vgacon_yres = height * c->vc_font.height; if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg); max_scan = inb_p(vga_video_port_val); if (max_scan & 0x80) scanlines <<= 1; outb_p(VGA_CRTC_MODE, vga_video_port_reg); mode = inb_p(vga_video_port_val); if (mode & 0x04) scanlines >>= 1; scanlines -= 1; scanlines_lo = scanlines & 0xff; outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg); r7 = inb_p(vga_video_port_val) & ~0x42; if (scanlines & 0x100) r7 |= 0x02; if (scanlines & 0x200) r7 |= 0x40; /* deprotect registers */ outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); vsync_end = inb_p(vga_video_port_val); outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); outb_p(vsync_end & ~0x80, vga_video_port_val); } outb_p(VGA_CRTC_H_DISP, vga_video_port_reg); outb_p(width - 1, vga_video_port_val); outb_p(VGA_CRTC_OFFSET, vga_video_port_reg); outb_p(width >> 1, vga_video_port_val); if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_V_DISP_END, vga_video_port_reg); outb_p(scanlines_lo, vga_video_port_val); outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg); outb_p(r7,vga_video_port_val); /* reprotect registers */ outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); outb_p(vsync_end, vga_video_port_val); } raw_spin_unlock_irqrestore(&vga_lock, flags); return 0; } static int vgacon_switch(struct vc_data *c) { int x = c->vc_cols * VGA_FONTWIDTH; int y = c->vc_rows * c->vc_font.height; int rows = screen_info.orig_video_lines * vga_default_font_height/ c->vc_font.height; /* * We need to save screen size here as it's the only way * we can spot the screen has been resized and we need to * set size of freshly allocated screens ourselves. */ vga_video_num_columns = c->vc_cols; vga_video_num_lines = c->vc_rows; /* We can only copy out the size of the video buffer here, * otherwise we get into VGA BIOS */ if (!vga_is_gfx) { scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); if ((vgacon_xres != x || vgacon_yres != y) && (!(vga_video_num_columns % 2) && vga_video_num_columns <= screen_info.orig_video_cols && vga_video_num_lines <= rows)) vgacon_doresize(c, c->vc_cols, c->vc_rows); } vgacon_scrollback_switch(c->vc_num); return 0; /* Redrawing not needed */ } static void vga_set_palette(struct vc_data *vc, const unsigned char *table) { int i, j; vga_w(vgastate.vgabase, VGA_PEL_MSK, 0xff); for (i = j = 0; i < 16; i++) { vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]); vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); } } static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table) { if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked || !con_is_visible(vc)) return; vga_set_palette(vc, table); } /* structure holding original VGA register settings */ static struct { unsigned char SeqCtrlIndex; /* Sequencer Index reg. */ unsigned char CrtCtrlIndex; /* CRT-Contr. Index reg. */ unsigned char CrtMiscIO; /* Miscellaneous register */ unsigned char HorizontalTotal; /* CRT-Controller:00h */ unsigned char HorizDisplayEnd; /* CRT-Controller:01h */ unsigned char StartHorizRetrace; /* CRT-Controller:04h */ unsigned char EndHorizRetrace; /* CRT-Controller:05h */ unsigned char Overflow; /* CRT-Controller:07h */ unsigned char StartVertRetrace; /* CRT-Controller:10h */ unsigned char EndVertRetrace; /* CRT-Controller:11h */ unsigned char ModeControl; /* CRT-Controller:17h */ unsigned char ClockingMode; /* Seq-Controller:01h */ } vga_state; static void vga_vesa_blank(struct vgastate *state, int mode) { /* save original values of VGA controller registers */ if (!vga_vesa_blanked) { raw_spin_lock_irq(&vga_lock); vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); raw_spin_unlock_irq(&vga_lock); outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ vga_state.HorizontalTotal = inb_p(vga_video_port_val); outb_p(0x01, vga_video_port_reg); /* HorizDisplayEnd */ vga_state.HorizDisplayEnd = inb_p(vga_video_port_val); outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ vga_state.StartHorizRetrace = inb_p(vga_video_port_val); outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ vga_state.EndHorizRetrace = inb_p(vga_video_port_val); outb_p(0x07, vga_video_port_reg); /* Overflow */ vga_state.Overflow = inb_p(vga_video_port_val); outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ vga_state.StartVertRetrace = inb_p(vga_video_port_val); outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ vga_state.EndVertRetrace = inb_p(vga_video_port_val); outb_p(0x17, vga_video_port_reg); /* ModeControl */ vga_state.ModeControl = inb_p(vga_video_port_val); vga_state.ClockingMode = vga_rseq(state->vgabase, VGA_SEQ_CLOCK_MODE); } /* assure that video is enabled */ /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ raw_spin_lock_irq(&vga_lock); vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); /* test for vertical retrace in process.... */ if ((vga_state.CrtMiscIO & 0x80) == 0x80) vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO & 0xEF); /* * Set <End of vertical retrace> to minimum (0) and * <Start of vertical Retrace> to maximum (incl. overflow) * Result: turn off vertical sync (VSync) pulse. */ if (mode & VESA_VSYNC_SUSPEND) { outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ outb_p(0xff, vga_video_port_val); /* maximum value */ outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ outb_p(0x40, vga_video_port_val); /* minimum (bits 0..3) */ outb_p(0x07, vga_video_port_reg); /* Overflow */ outb_p(vga_state.Overflow | 0x84, vga_video_port_val); /* bits 9,10 of vert. retrace */ } if (mode & VESA_HSYNC_SUSPEND) { /* * Set <End of horizontal retrace> to minimum (0) and * <Start of horizontal Retrace> to maximum * Result: turn off horizontal sync (HSync) pulse. */ outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ outb_p(0xff, vga_video_port_val); /* maximum */ outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ outb_p(0x00, vga_video_port_val); /* minimum (0) */ } /* restore both index registers */ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); raw_spin_unlock_irq(&vga_lock); } static void vga_vesa_unblank(struct vgastate *state) { /* restore original values of VGA controller registers */ raw_spin_lock_irq(&vga_lock); vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ outb_p(vga_state.HorizontalTotal, vga_video_port_val); outb_p(0x01, vga_video_port_reg); /* HorizDisplayEnd */ outb_p(vga_state.HorizDisplayEnd, vga_video_port_val); outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ outb_p(vga_state.StartHorizRetrace, vga_video_port_val); outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ outb_p(vga_state.EndHorizRetrace, vga_video_port_val); outb_p(0x07, vga_video_port_reg); /* Overflow */ outb_p(vga_state.Overflow, vga_video_port_val); outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ outb_p(vga_state.StartVertRetrace, vga_video_port_val); outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ outb_p(vga_state.EndVertRetrace, vga_video_port_val); outb_p(0x17, vga_video_port_reg); /* ModeControl */ outb_p(vga_state.ModeControl, vga_video_port_val); /* ClockingMode */ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode); /* restore index/control registers */ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); raw_spin_unlock_irq(&vga_lock); } static void vga_pal_blank(struct vgastate *state) { int i; vga_w(state->vgabase, VGA_PEL_MSK, 0xff); for (i = 0; i < 16; i++) { vga_w(state->vgabase, VGA_PEL_IW, i); vga_w(state->vgabase, VGA_PEL_D, 0); vga_w(state->vgabase, VGA_PEL_D, 0); vga_w(state->vgabase, VGA_PEL_D, 0); } } static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) { switch (blank) { case 0: /* Unblank */ if (vga_vesa_blanked) { vga_vesa_unblank(&vgastate); vga_vesa_blanked = 0; } if (vga_palette_blanked) { vga_set_palette(c, color_table); vga_palette_blanked = false; return 0; } vga_is_gfx = false; /* Tell console.c that it has to restore the screen itself */ return 1; case 1: /* Normal blanking */ case -1: /* Obsolete */ if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { vga_pal_blank(&vgastate); vga_palette_blanked = true; return 0; } vgacon_set_origin(c); scr_memsetw((void *) vga_vram_base, BLANK, c->vc_screenbuf_size); if (mode_switch) vga_is_gfx = true; return 1; default: /* VESA blanking */ if (vga_video_type == VIDEO_TYPE_VGAC) { vga_vesa_blank(&vgastate, blank - 1); vga_vesa_blanked = blank; } return 0; } } /* * PIO_FONT support. * * The font loading code goes back to the codepage package by * Joel Hoffman (joel@wam.umd.edu). (He reports that the original * reference is: "From: p. 307 of _Programmer's Guide to PC & PS/2 * Video Systems_ by Richard Wilton. 1987. Microsoft Press".) * * Change for certain monochrome monitors by Yury Shevchuck * (sizif@botik.yaroslavl.su). */ #define colourmap 0xa0000 /* Pauline Middelink <middelin@polyware.iaf.nl> reports that we should use 0xA0000 for the bwmap as well.. */ #define blackwmap 0xa0000 #define cmapsz 8192 static int vgacon_do_font_op(struct vgastate *state, char *arg, int set, bool ch512) { unsigned short video_port_status = vga_video_port_reg + 6; int font_select = 0x00, beg, i; char *charmap; bool clear_attribs = false; if (vga_video_type != VIDEO_TYPE_EGAM) { charmap = (char *) VGA_MAP_MEM(colourmap, 0); beg = 0x0e; } else { charmap = (char *) VGA_MAP_MEM(blackwmap, 0); beg = 0x0a; } #ifdef BROKEN_GRAPHICS_PROGRAMS /* * All fonts are loaded in slot 0 (0:1 for 512 ch) */ if (!arg) return -EINVAL; /* Return to default font not supported */ vga_font_is_default = false; font_select = ch512 ? 0x04 : 0x00; #else /* * The default font is kept in slot 0 and is never touched. * A custom font is loaded in slot 2 (256 ch) or 2:3 (512 ch) */ if (set) { vga_font_is_default = !arg; if (!arg) ch512 = false; /* Default font is always 256 */ font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00; } if (!vga_font_is_default) charmap += 4 * cmapsz; #endif raw_spin_lock_irq(&vga_lock); /* First, the Sequencer */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); /* CPU writes only to map 2 */ vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x04); /* Sequential addressing */ vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x07); /* Clear synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03); /* Now, the graphics controller, select map 2 */ vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x02); /* disable odd-even addressing */ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); /* map start at A000:0000 */ vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); raw_spin_unlock_irq(&vga_lock); if (arg) { if (set) for (i = 0; i < cmapsz; i++) { vga_writeb(arg[i], charmap + i); cond_resched(); } else for (i = 0; i < cmapsz; i++) { arg[i] = vga_readb(charmap + i); cond_resched(); } /* * In 512-character mode, the character map is not contiguous if * we want to remain EGA compatible -- which we do */ if (ch512) { charmap += 2 * cmapsz; arg += cmapsz; if (set) for (i = 0; i < cmapsz; i++) { vga_writeb(arg[i], charmap + i); cond_resched(); } else for (i = 0; i < cmapsz; i++) { arg[i] = vga_readb(charmap + i); cond_resched(); } } } raw_spin_lock_irq(&vga_lock); /* First, the sequencer, Synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); /* CPU writes to maps 0 and 1 */ vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x03); /* odd-even addressing */ vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x03); /* Character Map Select */ if (set) vga_wseq(state->vgabase, VGA_SEQ_CHARACTER_MAP, font_select); /* clear synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03); /* Now, the graphics controller, select map 0 for CPU */ vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x00); /* enable even-odd addressing */ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x10); /* map starts at b800:0 or b000:0 */ vga_wgfx(state->vgabase, VGA_GFX_MISC, beg); /* if 512 char mode is already enabled don't re-enable it. */ if ((set) && (ch512 != vga_512_chars)) { vga_512_chars = ch512; /* 256-char: enable intensity bit 512-char: disable intensity bit */ inb_p(video_port_status); /* clear address flip-flop */ /* color plane enable register */ vga_wattr(state->vgabase, VGA_ATC_PLANE_ENABLE, ch512 ? 0x07 : 0x0f); /* Wilton (1987) mentions the following; I don't know what it means, but it works, and it appears necessary */ inb_p(video_port_status); vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); clear_attribs = true; } raw_spin_unlock_irq(&vga_lock); if (clear_attribs) { for (i = 0; i < MAX_NR_CONSOLES; i++) { struct vc_data *c = vc_cons[i].d; if (c && c->vc_sw == &vga_con) { /* force hi font mask to 0, so we always clear the bit on either transition */ c->vc_hi_font_mask = 0x00; clear_buffer_attributes(c); c->vc_hi_font_mask = ch512 ? 0x0800 : 0; } } } return 0; } /* * Adjust the screen to fit a font of a certain height */ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) { unsigned char ovr, vde, fsr; int rows, maxscan, i; rows = vc->vc_scan_lines / fontheight; /* Number of video rows we end up with */ maxscan = rows * fontheight - 1; /* Scan lines to actually display-1 */ /* Reprogram the CRTC for the new font size Note: the attempt to read the overflow register will fail on an EGA, but using 0xff for the previous value appears to be OK for EGA text modes in the range 257-512 scan lines, so I guess we don't need to worry about it. The same applies for the spill bits in the font size and cursor registers; they are write-only on EGA, but it appears that they are all don't care bits on EGA, so I guess it doesn't matter. */ raw_spin_lock_irq(&vga_lock); outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ ovr = inb_p(vga_video_port_val); outb_p(0x09, vga_video_port_reg); /* Font size register */ fsr = inb_p(vga_video_port_val); raw_spin_unlock_irq(&vga_lock); vde = maxscan & 0xff; /* Vertical display end reg */ ovr = (ovr & 0xbd) + /* Overflow register */ ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ raw_spin_lock_irq(&vga_lock); outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ outb_p(ovr, vga_video_port_val); outb_p(0x09, vga_video_port_reg); /* Font size */ outb_p(fsr, vga_video_port_val); outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ outb_p(vde, vga_video_port_val); raw_spin_unlock_irq(&vga_lock); vga_video_font_height = fontheight; for (i = 0; i < MAX_NR_CONSOLES; i++) { struct vc_data *c = vc_cons[i].d; if (c && c->vc_sw == &vga_con) { if (con_is_visible(c)) { /* void size to cause regs to be rewritten */ cursor_size_lastfrom = 0; cursor_size_lastto = 0; c->vc_sw->con_cursor(c, CM_DRAW); } c->vc_font.height = fontheight; vc_resize(c, 0, rows); /* Adjust console size */ } } return 0; } static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned int flags) { unsigned charcount = font->charcount; int rc; if (vga_video_type < VIDEO_TYPE_EGAM) return -EINVAL; if (font->width != VGA_FONTWIDTH || (charcount != 256 && charcount != 512)) return -EINVAL; rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512); if (rc) return rc; if (!(flags & KD_FONT_FLAG_DONT_RECALC)) rc = vgacon_adjust_height(c, font->height); return rc; } static int vgacon_font_get(struct vc_data *c, struct console_font *font) { if (vga_video_type < VIDEO_TYPE_EGAM) return -EINVAL; font->width = VGA_FONTWIDTH; font->height = c->vc_font.height; font->charcount = vga_512_chars ? 512 : 256; if (!font->data) return 0; return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars); } static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { if ((width << 1) * height > vga_vram_size) return -EINVAL; if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ c->vc_font.height) /* let svgatextmode tinker with video timings and return success */ return (user) ? 0 : -EINVAL; if (con_is_visible(c) && !vga_is_gfx) /* who knows */ vgacon_doresize(c, width, height); return 0; } static int vgacon_set_origin(struct vc_data *c) { if (vga_is_gfx || /* We don't play origin tricks in graphic modes */ (console_blanked && !vga_palette_blanked)) /* Nor we write to blanked screens */ return 0; c->vc_origin = c->vc_visible_origin = vga_vram_base; vga_set_mem_top(c); vga_rolled_over = 0; return 1; } static void vgacon_save_screen(struct vc_data *c) { static int vga_bootup_console = 0; if (!vga_bootup_console) { /* This is a gross hack, but here is the only place we can * set bootup console parameters without messing up generic * console initialization routines. */ vga_bootup_console = 1; c->state.x = screen_info.orig_x; c->state.y = screen_info.orig_y; } /* We can't copy in more than the size of the video buffer, * or we'll be copying in VGA BIOS */ if (!vga_is_gfx) scr_memcpyw((u16 *) c->vc_screenbuf, (u16 *) c->vc_origin, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); } static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int lines) { unsigned long oldo; unsigned int delta; if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT) return false; if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) return false; vgacon_restore_screen(c); oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { vgacon_scrollback_update(c, t, lines); if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_base; vga_rolled_over = oldo - vga_vram_base; } else c->vc_origin += delta; scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - delta), c->vc_video_erase_char, delta); } else { if (oldo - delta < vga_vram_base) { scr_memmovew((u16 *) (vga_vram_end - c->vc_screenbuf_size + delta), (u16 *) oldo, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; } else c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, delta); } c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c->vc_visible_origin = c->vc_origin; vga_set_mem_top(c); c->vc_pos = (c->vc_pos - oldo) + c->vc_origin; return true; } /* * The console `switch' structure for the VGA based console */ static void vgacon_clear(struct vc_data *vc, int sy, int sx, int height, int width) { } static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } static void vgacon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos) { } const struct consw vga_con = { .owner = THIS_MODULE, .con_startup = vgacon_startup, .con_init = vgacon_init, .con_deinit = vgacon_deinit, .con_clear = vgacon_clear, .con_putc = vgacon_putc, .con_putcs = vgacon_putcs, .con_cursor = vgacon_cursor, .con_scroll = vgacon_scroll, .con_switch = vgacon_switch, .con_blank = vgacon_blank, .con_font_set = vgacon_font_set, .con_font_get = vgacon_font_get, .con_resize = vgacon_resize, .con_set_palette = vgacon_set_palette, .con_scrolldelta = vgacon_scrolldelta, .con_set_origin = vgacon_set_origin, .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, .con_flush_scrollback = vgacon_flush_scrollback, }; EXPORT_SYMBOL(vga_con); MODULE_LICENSE("GPL");
/* * linux/drivers/video/vgacon.c -- Low level VGA based console driver * * Created 28 Sep 1997 by Geert Uytterhoeven * * Rewritten by Martin Mares <mj@ucw.cz>, July 1998 * * This file is based on the old console.c, vga.c and vesa_blank.c drivers. * * Copyright (C) 1991, 1992 Linus Torvalds * 1995 Jay Estabrook * * User definable mapping table and font loading by Eugene G. Crosser, * <crosser@average.org> * * Improved loadable font/UTF-8 support by H. Peter Anvin * Feb-Sep 1995 <peter.anvin@linux.org> * * Colour palette handling, by Simon Tatham * 17-Jun-95 <sgt20@cam.ac.uk> * * if 512 char mode is already enabled don't re-enable it, * because it causes screen to flicker, by Mitja Horvat * 5-May-96 <mitja.horvat@guest.arnes.si> * * Use 2 outw instead of 4 outb_p to reduce erroneous text * flashing on RHS of screen during heavy console scrolling . * Oct 1996, Paul Gortmaker. * * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/console.h> #include <linux/string.h> #include <linux/kd.h> #include <linux/slab.h> #include <linux/vt_kern.h> #include <linux/sched.h> #include <linux/selection.h> #include <linux/spinlock.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/screen_info.h> #include <video/vga.h> #include <asm/io.h> static DEFINE_RAW_SPINLOCK(vga_lock); static int cursor_size_lastfrom; static int cursor_size_lastto; static u32 vgacon_xres; static u32 vgacon_yres; static struct vgastate vgastate; #define BLANK 0x0020 #define VGA_FONTWIDTH 8 /* VGA does not support fontwidths != 8 */ /* * Interface used by the world */ static const char *vgacon_startup(void); static void vgacon_init(struct vc_data *c, int init); static void vgacon_deinit(struct vc_data *c); static void vgacon_cursor(struct vc_data *c, int mode); static int vgacon_switch(struct vc_data *c); static int vgacon_blank(struct vc_data *c, int blank, int mode_switch); static void vgacon_scrolldelta(struct vc_data *c, int lines); static int vgacon_set_origin(struct vc_data *c); static void vgacon_save_screen(struct vc_data *c); static void vgacon_invert_region(struct vc_data *c, u16 * p, int count); static struct uni_pagedir *vgacon_uni_pagedir; static int vgacon_refcount; /* Description of the hardware situation */ static bool vga_init_done; static unsigned long vga_vram_base __read_mostly; /* Base of video memory */ static unsigned long vga_vram_end __read_mostly; /* End of video memory */ static unsigned int vga_vram_size __read_mostly; /* Size of video memory */ static u16 vga_video_port_reg __read_mostly; /* Video register select port */ static u16 vga_video_port_val __read_mostly; /* Video register value port */ static unsigned int vga_video_num_columns; /* Number of text columns */ static unsigned int vga_video_num_lines; /* Number of text lines */ static bool vga_can_do_color; /* Do we support colors? */ static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */ static unsigned char vga_video_type __read_mostly; /* Card type */ static bool vga_font_is_default = true; static int vga_vesa_blanked; static bool vga_palette_blanked; static bool vga_is_gfx; static bool vga_512_chars; static int vga_video_font_height; static int vga_scan_lines __read_mostly; static unsigned int vga_rolled_over; static bool vgacon_text_mode_force; static bool vga_hardscroll_enabled; static bool vga_hardscroll_user_enable = true; bool vgacon_text_force(void) { return vgacon_text_mode_force; } EXPORT_SYMBOL(vgacon_text_force); static int __init text_mode(char *str) { vgacon_text_mode_force = true; pr_warn("You have booted with nomodeset. This means your GPU drivers are DISABLED\n"); pr_warn("Any video related functionality will be severely degraded, and you may not even be able to suspend the system properly\n"); pr_warn("Unless you actually understand what nomodeset does, you should reboot without enabling it\n"); return 1; } /* force text mode - used by kernel modesetting */ __setup("nomodeset", text_mode); static int __init no_scroll(char *str) { /* * Disabling scrollback is required for the Braillex ib80-piezo * Braille reader made by F.H. Papenmeier (Germany). * Use the "no-scroll" bootflag. */ vga_hardscroll_user_enable = vga_hardscroll_enabled = false; return 1; } __setup("no-scroll", no_scroll); /* * By replacing the four outb_p with two back to back outw, we can reduce * the window of opportunity to see text mislocated to the RHS of the * console during heavy scrolling activity. However there is the remote * possibility that some pre-dinosaur hardware won't like the back to back * I/O. Since the Xservers get away with it, we should be able to as well. */ static inline void write_vga(unsigned char reg, unsigned int val) { unsigned int v1, v2; unsigned long flags; /* * ddprintk might set the console position from interrupt * handlers, thus the write has to be IRQ-atomic. */ raw_spin_lock_irqsave(&vga_lock, flags); v1 = reg + (val & 0xff00); v2 = reg + 1 + ((val << 8) & 0xff00); outw(v1, vga_video_port_reg); outw(v2, vga_video_port_reg); raw_spin_unlock_irqrestore(&vga_lock, flags); } static inline void vga_set_mem_top(struct vc_data *c) { write_vga(12, (c->vc_visible_origin - vga_vram_base) / 2); } static void vgacon_restore_screen(struct vc_data *c) { if (c->vc_origin != c->vc_visible_origin) vgacon_scrolldelta(c, 0); } static void vgacon_scrolldelta(struct vc_data *c, int lines) { vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base, vga_vram_size); vga_set_mem_top(c); } static const char *vgacon_startup(void) { const char *display_desc = NULL; u16 saved1, saved2; volatile u16 *p; if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB || screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { no_vga: #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; return conswitchp->con_startup(); #else return NULL; #endif } /* boot_params.screen_info reasonably initialized? */ if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; /* VGA16 modes are not handled by VGACON */ if ((screen_info.orig_video_mode == 0x0D) || /* 320x200/4 */ (screen_info.orig_video_mode == 0x0E) || /* 640x200/4 */ (screen_info.orig_video_mode == 0x10) || /* 640x350/4 */ (screen_info.orig_video_mode == 0x12) || /* 640x480/4 */ (screen_info.orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */ goto no_vga; vga_video_num_lines = screen_info.orig_video_lines; vga_video_num_columns = screen_info.orig_video_cols; vgastate.vgabase = NULL; if (screen_info.orig_video_mode == 7) { /* Monochrome display */ vga_vram_base = 0xb0000; vga_video_port_reg = VGA_CRT_IM; vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource mda1_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BB }; static struct resource mda2_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3BF, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; request_resource(&ioport_resource, &mda1_console_resource); request_resource(&ioport_resource, &mda2_console_resource); vga_video_font_height = 14; } } else { /* If not, it is color. */ vga_can_do_color = true; vga_vram_base = 0xb8000; vga_video_port_reg = VGA_CRT_IC; vga_video_port_val = VGA_CRT_DC; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { int i; vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource vga_console_resource = { .name = "vga+", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, &vga_console_resource); /* * Normalise the palette registers, to point * the 16 screen colours to the first 16 * DAC entries. */ for (i = 0; i < 16; i++) { inb_p(VGA_IS1_RC); outb_p(i, VGA_ATT_W); outb_p(i, VGA_ATT_W); } outb_p(0x20, VGA_ATT_W); /* * Now set the DAC registers back to their * default values */ for (i = 0; i < 16; i++) { outb_p(color_table[i], VGA_PEL_IW); outb_p(default_red[i], VGA_PEL_D); outb_p(default_grn[i], VGA_PEL_D); outb_p(default_blu[i], VGA_PEL_D); } } } else { static struct resource cga_console_resource = { .name = "cga", .flags = IORESOURCE_IO, .start = 0x3D4, .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; request_resource(&ioport_resource, &cga_console_resource); vga_video_font_height = 8; } } vga_vram_base = VGA_MAP_MEM(vga_vram_base, vga_vram_size); vga_vram_end = vga_vram_base + vga_vram_size; /* * Find out if there is a graphics card present. * Are there smarter methods around? */ p = (volatile u16 *) vga_vram_base; saved1 = scr_readw(p); saved2 = scr_readw(p + 1); scr_writew(0xAA55, p); scr_writew(0x55AA, p + 1); if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(0x55AA, p); scr_writew(0xAA55, p + 1); if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(saved1, p); scr_writew(saved2, p + 1); if (vga_video_type == VIDEO_TYPE_EGAC || vga_video_type == VIDEO_TYPE_VGAC || vga_video_type == VIDEO_TYPE_EGAM) { vga_hardscroll_enabled = vga_hardscroll_user_enable; vga_default_font_height = screen_info.orig_video_points; vga_video_font_height = screen_info.orig_video_points; /* This may be suboptimal but is a safe bet - go with it */ vga_scan_lines = vga_video_font_height * vga_video_num_lines; } vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; vga_init_done = true; return display_desc; } static void vgacon_init(struct vc_data *c, int init) { struct uni_pagedir *p; /* * We cannot be loaded as a module, therefore init is always 1, * but vgacon_init can be called more than once, and init will * not be 1. */ c->vc_can_do_color = vga_can_do_color; /* set dimensions manually if init != 0 since vc_resize() will fail */ if (init) { c->vc_cols = vga_video_num_columns; c->vc_rows = vga_video_num_lines; } else vc_resize(c, vga_video_num_columns, vga_video_num_lines); c->vc_scan_lines = vga_scan_lines; c->vc_font.height = vga_video_font_height; c->vc_complement_mask = 0x7700; if (vga_512_chars) c->vc_hi_font_mask = 0x0800; p = *c->vc_uni_pagedir_loc; if (c->vc_uni_pagedir_loc != &vgacon_uni_pagedir) { con_free_unimap(c); c->vc_uni_pagedir_loc = &vgacon_uni_pagedir; vgacon_refcount++; } if (!vgacon_uni_pagedir && p) con_set_default_unimap(c); /* Only set the default if the user didn't deliberately override it */ if (global_cursor_default == -1) global_cursor_default = !(screen_info.flags & VIDEO_FLAGS_NOCURSOR); } static void vgacon_deinit(struct vc_data *c) { /* When closing the active console, reset video origin */ if (con_is_visible(c)) { c->vc_visible_origin = vga_vram_base; vga_set_mem_top(c); } if (!--vgacon_refcount) con_free_unimap(c); c->vc_uni_pagedir_loc = &c->vc_uni_pagedir; con_set_default_unimap(c); } static u8 vgacon_build_attr(struct vc_data *c, u8 color, enum vc_intensity intensity, bool blink, bool underline, bool reverse, bool italic) { u8 attr = color; if (vga_can_do_color) { if (italic) attr = (attr & 0xF0) | c->vc_itcolor; else if (underline) attr = (attr & 0xf0) | c->vc_ulcolor; else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | c->vc_halfcolor; } if (reverse) attr = ((attr) & 0x88) | ((((attr) >> 4) | ((attr) << 4)) & 0x77); if (blink) attr ^= 0x80; if (intensity == VCI_BOLD) attr ^= 0x08; if (!vga_can_do_color) { if (italic) attr = (attr & 0xF8) | 0x02; else if (underline) attr = (attr & 0xf8) | 0x01; else if (intensity == VCI_HALF_BRIGHT) attr = (attr & 0xf0) | 0x08; } return attr; } static void vgacon_invert_region(struct vc_data *c, u16 * p, int count) { const bool col = vga_can_do_color; while (count--) { u16 a = scr_readw(p); if (col) a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | (((a) & 0x0700) << 4); else a ^= ((a & 0x0700) == 0x0100) ? 0x7000 : 0x7700; scr_writew(a, p++); } } static void vgacon_set_cursor_size(int xpos, int from, int to) { unsigned long flags; int curs, cure; if ((from == cursor_size_lastfrom) && (to == cursor_size_lastto)) return; cursor_size_lastfrom = from; cursor_size_lastto = to; raw_spin_lock_irqsave(&vga_lock, flags); if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); curs = inb_p(vga_video_port_val); outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); cure = inb_p(vga_video_port_val); } else { curs = 0; cure = 0; } curs = (curs & 0xc0) | from; cure = (cure & 0xe0) | to; outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); outb_p(curs, vga_video_port_val); outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); outb_p(cure, vga_video_port_val); raw_spin_unlock_irqrestore(&vga_lock, flags); } static void vgacon_cursor(struct vc_data *c, int mode) { if (c->vc_mode != KD_TEXT) return; vgacon_restore_screen(c); switch (mode) { case CM_ERASE: write_vga(14, (c->vc_pos - vga_vram_base) / 2); if (vga_video_type >= VIDEO_TYPE_VGAC) vgacon_set_cursor_size(c->state.x, 31, 30); else vgacon_set_cursor_size(c->state.x, 31, 31); break; case CM_MOVE: case CM_DRAW: write_vga(14, (c->vc_pos - vga_vram_base) / 2); switch (CUR_SIZE(c->vc_cursor_type)) { case CUR_UNDERLINE: vgacon_set_cursor_size(c->state.x, c->vc_font.height - (c->vc_font.height < 10 ? 2 : 3), c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_TWO_THIRDS: vgacon_set_cursor_size(c->state.x, c->vc_font.height / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_THIRD: vgacon_set_cursor_size(c->state.x, (c->vc_font.height * 2) / 3, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_LOWER_HALF: vgacon_set_cursor_size(c->state.x, c->vc_font.height / 2, c->vc_font.height - (c->vc_font.height < 10 ? 1 : 2)); break; case CUR_NONE: if (vga_video_type >= VIDEO_TYPE_VGAC) vgacon_set_cursor_size(c->state.x, 31, 30); else vgacon_set_cursor_size(c->state.x, 31, 31); break; default: vgacon_set_cursor_size(c->state.x, 1, c->vc_font.height); break; } break; } } static int vgacon_doresize(struct vc_data *c, unsigned int width, unsigned int height) { unsigned long flags; unsigned int scanlines = height * c->vc_font.height; u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; raw_spin_lock_irqsave(&vga_lock, flags); vgacon_xres = width * VGA_FONTWIDTH; vgacon_yres = height * c->vc_font.height; if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg); max_scan = inb_p(vga_video_port_val); if (max_scan & 0x80) scanlines <<= 1; outb_p(VGA_CRTC_MODE, vga_video_port_reg); mode = inb_p(vga_video_port_val); if (mode & 0x04) scanlines >>= 1; scanlines -= 1; scanlines_lo = scanlines & 0xff; outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg); r7 = inb_p(vga_video_port_val) & ~0x42; if (scanlines & 0x100) r7 |= 0x02; if (scanlines & 0x200) r7 |= 0x40; /* deprotect registers */ outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); vsync_end = inb_p(vga_video_port_val); outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); outb_p(vsync_end & ~0x80, vga_video_port_val); } outb_p(VGA_CRTC_H_DISP, vga_video_port_reg); outb_p(width - 1, vga_video_port_val); outb_p(VGA_CRTC_OFFSET, vga_video_port_reg); outb_p(width >> 1, vga_video_port_val); if (vga_video_type >= VIDEO_TYPE_VGAC) { outb_p(VGA_CRTC_V_DISP_END, vga_video_port_reg); outb_p(scanlines_lo, vga_video_port_val); outb_p(VGA_CRTC_OVERFLOW, vga_video_port_reg); outb_p(r7,vga_video_port_val); /* reprotect registers */ outb_p(VGA_CRTC_V_SYNC_END, vga_video_port_reg); outb_p(vsync_end, vga_video_port_val); } raw_spin_unlock_irqrestore(&vga_lock, flags); return 0; } static int vgacon_switch(struct vc_data *c) { int x = c->vc_cols * VGA_FONTWIDTH; int y = c->vc_rows * c->vc_font.height; int rows = screen_info.orig_video_lines * vga_default_font_height/ c->vc_font.height; /* * We need to save screen size here as it's the only way * we can spot the screen has been resized and we need to * set size of freshly allocated screens ourselves. */ vga_video_num_columns = c->vc_cols; vga_video_num_lines = c->vc_rows; /* We can only copy out the size of the video buffer here, * otherwise we get into VGA BIOS */ if (!vga_is_gfx) { scr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); if ((vgacon_xres != x || vgacon_yres != y) && (!(vga_video_num_columns % 2) && vga_video_num_columns <= screen_info.orig_video_cols && vga_video_num_lines <= rows)) vgacon_doresize(c, c->vc_cols, c->vc_rows); } return 0; /* Redrawing not needed */ } static void vga_set_palette(struct vc_data *vc, const unsigned char *table) { int i, j; vga_w(vgastate.vgabase, VGA_PEL_MSK, 0xff); for (i = j = 0; i < 16; i++) { vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]); vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); } } static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table) { if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked || !con_is_visible(vc)) return; vga_set_palette(vc, table); } /* structure holding original VGA register settings */ static struct { unsigned char SeqCtrlIndex; /* Sequencer Index reg. */ unsigned char CrtCtrlIndex; /* CRT-Contr. Index reg. */ unsigned char CrtMiscIO; /* Miscellaneous register */ unsigned char HorizontalTotal; /* CRT-Controller:00h */ unsigned char HorizDisplayEnd; /* CRT-Controller:01h */ unsigned char StartHorizRetrace; /* CRT-Controller:04h */ unsigned char EndHorizRetrace; /* CRT-Controller:05h */ unsigned char Overflow; /* CRT-Controller:07h */ unsigned char StartVertRetrace; /* CRT-Controller:10h */ unsigned char EndVertRetrace; /* CRT-Controller:11h */ unsigned char ModeControl; /* CRT-Controller:17h */ unsigned char ClockingMode; /* Seq-Controller:01h */ } vga_state; static void vga_vesa_blank(struct vgastate *state, int mode) { /* save original values of VGA controller registers */ if (!vga_vesa_blanked) { raw_spin_lock_irq(&vga_lock); vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); raw_spin_unlock_irq(&vga_lock); outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ vga_state.HorizontalTotal = inb_p(vga_video_port_val); outb_p(0x01, vga_video_port_reg); /* HorizDisplayEnd */ vga_state.HorizDisplayEnd = inb_p(vga_video_port_val); outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ vga_state.StartHorizRetrace = inb_p(vga_video_port_val); outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ vga_state.EndHorizRetrace = inb_p(vga_video_port_val); outb_p(0x07, vga_video_port_reg); /* Overflow */ vga_state.Overflow = inb_p(vga_video_port_val); outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ vga_state.StartVertRetrace = inb_p(vga_video_port_val); outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ vga_state.EndVertRetrace = inb_p(vga_video_port_val); outb_p(0x17, vga_video_port_reg); /* ModeControl */ vga_state.ModeControl = inb_p(vga_video_port_val); vga_state.ClockingMode = vga_rseq(state->vgabase, VGA_SEQ_CLOCK_MODE); } /* assure that video is enabled */ /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ raw_spin_lock_irq(&vga_lock); vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); /* test for vertical retrace in process.... */ if ((vga_state.CrtMiscIO & 0x80) == 0x80) vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO & 0xEF); /* * Set <End of vertical retrace> to minimum (0) and * <Start of vertical Retrace> to maximum (incl. overflow) * Result: turn off vertical sync (VSync) pulse. */ if (mode & VESA_VSYNC_SUSPEND) { outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ outb_p(0xff, vga_video_port_val); /* maximum value */ outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ outb_p(0x40, vga_video_port_val); /* minimum (bits 0..3) */ outb_p(0x07, vga_video_port_reg); /* Overflow */ outb_p(vga_state.Overflow | 0x84, vga_video_port_val); /* bits 9,10 of vert. retrace */ } if (mode & VESA_HSYNC_SUSPEND) { /* * Set <End of horizontal retrace> to minimum (0) and * <Start of horizontal Retrace> to maximum * Result: turn off horizontal sync (HSync) pulse. */ outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ outb_p(0xff, vga_video_port_val); /* maximum */ outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ outb_p(0x00, vga_video_port_val); /* minimum (0) */ } /* restore both index registers */ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); raw_spin_unlock_irq(&vga_lock); } static void vga_vesa_unblank(struct vgastate *state) { /* restore original values of VGA controller registers */ raw_spin_lock_irq(&vga_lock); vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ outb_p(vga_state.HorizontalTotal, vga_video_port_val); outb_p(0x01, vga_video_port_reg); /* HorizDisplayEnd */ outb_p(vga_state.HorizDisplayEnd, vga_video_port_val); outb_p(0x04, vga_video_port_reg); /* StartHorizRetrace */ outb_p(vga_state.StartHorizRetrace, vga_video_port_val); outb_p(0x05, vga_video_port_reg); /* EndHorizRetrace */ outb_p(vga_state.EndHorizRetrace, vga_video_port_val); outb_p(0x07, vga_video_port_reg); /* Overflow */ outb_p(vga_state.Overflow, vga_video_port_val); outb_p(0x10, vga_video_port_reg); /* StartVertRetrace */ outb_p(vga_state.StartVertRetrace, vga_video_port_val); outb_p(0x11, vga_video_port_reg); /* EndVertRetrace */ outb_p(vga_state.EndVertRetrace, vga_video_port_val); outb_p(0x17, vga_video_port_reg); /* ModeControl */ outb_p(vga_state.ModeControl, vga_video_port_val); /* ClockingMode */ vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode); /* restore index/control registers */ vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); raw_spin_unlock_irq(&vga_lock); } static void vga_pal_blank(struct vgastate *state) { int i; vga_w(state->vgabase, VGA_PEL_MSK, 0xff); for (i = 0; i < 16; i++) { vga_w(state->vgabase, VGA_PEL_IW, i); vga_w(state->vgabase, VGA_PEL_D, 0); vga_w(state->vgabase, VGA_PEL_D, 0); vga_w(state->vgabase, VGA_PEL_D, 0); } } static int vgacon_blank(struct vc_data *c, int blank, int mode_switch) { switch (blank) { case 0: /* Unblank */ if (vga_vesa_blanked) { vga_vesa_unblank(&vgastate); vga_vesa_blanked = 0; } if (vga_palette_blanked) { vga_set_palette(c, color_table); vga_palette_blanked = false; return 0; } vga_is_gfx = false; /* Tell console.c that it has to restore the screen itself */ return 1; case 1: /* Normal blanking */ case -1: /* Obsolete */ if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { vga_pal_blank(&vgastate); vga_palette_blanked = true; return 0; } vgacon_set_origin(c); scr_memsetw((void *) vga_vram_base, BLANK, c->vc_screenbuf_size); if (mode_switch) vga_is_gfx = true; return 1; default: /* VESA blanking */ if (vga_video_type == VIDEO_TYPE_VGAC) { vga_vesa_blank(&vgastate, blank - 1); vga_vesa_blanked = blank; } return 0; } } /* * PIO_FONT support. * * The font loading code goes back to the codepage package by * Joel Hoffman (joel@wam.umd.edu). (He reports that the original * reference is: "From: p. 307 of _Programmer's Guide to PC & PS/2 * Video Systems_ by Richard Wilton. 1987. Microsoft Press".) * * Change for certain monochrome monitors by Yury Shevchuck * (sizif@botik.yaroslavl.su). */ #define colourmap 0xa0000 /* Pauline Middelink <middelin@polyware.iaf.nl> reports that we should use 0xA0000 for the bwmap as well.. */ #define blackwmap 0xa0000 #define cmapsz 8192 static int vgacon_do_font_op(struct vgastate *state, char *arg, int set, bool ch512) { unsigned short video_port_status = vga_video_port_reg + 6; int font_select = 0x00, beg, i; char *charmap; bool clear_attribs = false; if (vga_video_type != VIDEO_TYPE_EGAM) { charmap = (char *) VGA_MAP_MEM(colourmap, 0); beg = 0x0e; } else { charmap = (char *) VGA_MAP_MEM(blackwmap, 0); beg = 0x0a; } #ifdef BROKEN_GRAPHICS_PROGRAMS /* * All fonts are loaded in slot 0 (0:1 for 512 ch) */ if (!arg) return -EINVAL; /* Return to default font not supported */ vga_font_is_default = false; font_select = ch512 ? 0x04 : 0x00; #else /* * The default font is kept in slot 0 and is never touched. * A custom font is loaded in slot 2 (256 ch) or 2:3 (512 ch) */ if (set) { vga_font_is_default = !arg; if (!arg) ch512 = false; /* Default font is always 256 */ font_select = arg ? (ch512 ? 0x0e : 0x0a) : 0x00; } if (!vga_font_is_default) charmap += 4 * cmapsz; #endif raw_spin_lock_irq(&vga_lock); /* First, the Sequencer */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); /* CPU writes only to map 2 */ vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x04); /* Sequential addressing */ vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x07); /* Clear synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03); /* Now, the graphics controller, select map 2 */ vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x02); /* disable odd-even addressing */ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); /* map start at A000:0000 */ vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); raw_spin_unlock_irq(&vga_lock); if (arg) { if (set) for (i = 0; i < cmapsz; i++) { vga_writeb(arg[i], charmap + i); cond_resched(); } else for (i = 0; i < cmapsz; i++) { arg[i] = vga_readb(charmap + i); cond_resched(); } /* * In 512-character mode, the character map is not contiguous if * we want to remain EGA compatible -- which we do */ if (ch512) { charmap += 2 * cmapsz; arg += cmapsz; if (set) for (i = 0; i < cmapsz; i++) { vga_writeb(arg[i], charmap + i); cond_resched(); } else for (i = 0; i < cmapsz; i++) { arg[i] = vga_readb(charmap + i); cond_resched(); } } } raw_spin_lock_irq(&vga_lock); /* First, the sequencer, Synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); /* CPU writes to maps 0 and 1 */ vga_wseq(state->vgabase, VGA_SEQ_PLANE_WRITE, 0x03); /* odd-even addressing */ vga_wseq(state->vgabase, VGA_SEQ_MEMORY_MODE, 0x03); /* Character Map Select */ if (set) vga_wseq(state->vgabase, VGA_SEQ_CHARACTER_MAP, font_select); /* clear synchronous reset */ vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x03); /* Now, the graphics controller, select map 0 for CPU */ vga_wgfx(state->vgabase, VGA_GFX_PLANE_READ, 0x00); /* enable even-odd addressing */ vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x10); /* map starts at b800:0 or b000:0 */ vga_wgfx(state->vgabase, VGA_GFX_MISC, beg); /* if 512 char mode is already enabled don't re-enable it. */ if ((set) && (ch512 != vga_512_chars)) { vga_512_chars = ch512; /* 256-char: enable intensity bit 512-char: disable intensity bit */ inb_p(video_port_status); /* clear address flip-flop */ /* color plane enable register */ vga_wattr(state->vgabase, VGA_ATC_PLANE_ENABLE, ch512 ? 0x07 : 0x0f); /* Wilton (1987) mentions the following; I don't know what it means, but it works, and it appears necessary */ inb_p(video_port_status); vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); clear_attribs = true; } raw_spin_unlock_irq(&vga_lock); if (clear_attribs) { for (i = 0; i < MAX_NR_CONSOLES; i++) { struct vc_data *c = vc_cons[i].d; if (c && c->vc_sw == &vga_con) { /* force hi font mask to 0, so we always clear the bit on either transition */ c->vc_hi_font_mask = 0x00; clear_buffer_attributes(c); c->vc_hi_font_mask = ch512 ? 0x0800 : 0; } } } return 0; } /* * Adjust the screen to fit a font of a certain height */ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight) { unsigned char ovr, vde, fsr; int rows, maxscan, i; rows = vc->vc_scan_lines / fontheight; /* Number of video rows we end up with */ maxscan = rows * fontheight - 1; /* Scan lines to actually display-1 */ /* Reprogram the CRTC for the new font size Note: the attempt to read the overflow register will fail on an EGA, but using 0xff for the previous value appears to be OK for EGA text modes in the range 257-512 scan lines, so I guess we don't need to worry about it. The same applies for the spill bits in the font size and cursor registers; they are write-only on EGA, but it appears that they are all don't care bits on EGA, so I guess it doesn't matter. */ raw_spin_lock_irq(&vga_lock); outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ ovr = inb_p(vga_video_port_val); outb_p(0x09, vga_video_port_reg); /* Font size register */ fsr = inb_p(vga_video_port_val); raw_spin_unlock_irq(&vga_lock); vde = maxscan & 0xff; /* Vertical display end reg */ ovr = (ovr & 0xbd) + /* Overflow register */ ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ raw_spin_lock_irq(&vga_lock); outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ outb_p(ovr, vga_video_port_val); outb_p(0x09, vga_video_port_reg); /* Font size */ outb_p(fsr, vga_video_port_val); outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ outb_p(vde, vga_video_port_val); raw_spin_unlock_irq(&vga_lock); vga_video_font_height = fontheight; for (i = 0; i < MAX_NR_CONSOLES; i++) { struct vc_data *c = vc_cons[i].d; if (c && c->vc_sw == &vga_con) { if (con_is_visible(c)) { /* void size to cause regs to be rewritten */ cursor_size_lastfrom = 0; cursor_size_lastto = 0; c->vc_sw->con_cursor(c, CM_DRAW); } c->vc_font.height = fontheight; vc_resize(c, 0, rows); /* Adjust console size */ } } return 0; } static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigned int flags) { unsigned charcount = font->charcount; int rc; if (vga_video_type < VIDEO_TYPE_EGAM) return -EINVAL; if (font->width != VGA_FONTWIDTH || (charcount != 256 && charcount != 512)) return -EINVAL; rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512); if (rc) return rc; if (!(flags & KD_FONT_FLAG_DONT_RECALC)) rc = vgacon_adjust_height(c, font->height); return rc; } static int vgacon_font_get(struct vc_data *c, struct console_font *font) { if (vga_video_type < VIDEO_TYPE_EGAM) return -EINVAL; font->width = VGA_FONTWIDTH; font->height = c->vc_font.height; font->charcount = vga_512_chars ? 512 : 256; if (!font->data) return 0; return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars); } static int vgacon_resize(struct vc_data *c, unsigned int width, unsigned int height, unsigned int user) { if ((width << 1) * height > vga_vram_size) return -EINVAL; if (width % 2 || width > screen_info.orig_video_cols || height > (screen_info.orig_video_lines * vga_default_font_height)/ c->vc_font.height) /* let svgatextmode tinker with video timings and return success */ return (user) ? 0 : -EINVAL; if (con_is_visible(c) && !vga_is_gfx) /* who knows */ vgacon_doresize(c, width, height); return 0; } static int vgacon_set_origin(struct vc_data *c) { if (vga_is_gfx || /* We don't play origin tricks in graphic modes */ (console_blanked && !vga_palette_blanked)) /* Nor we write to blanked screens */ return 0; c->vc_origin = c->vc_visible_origin = vga_vram_base; vga_set_mem_top(c); vga_rolled_over = 0; return 1; } static void vgacon_save_screen(struct vc_data *c) { static int vga_bootup_console = 0; if (!vga_bootup_console) { /* This is a gross hack, but here is the only place we can * set bootup console parameters without messing up generic * console initialization routines. */ vga_bootup_console = 1; c->state.x = screen_info.orig_x; c->state.y = screen_info.orig_y; } /* We can't copy in more than the size of the video buffer, * or we'll be copying in VGA BIOS */ if (!vga_is_gfx) scr_memcpyw((u16 *) c->vc_screenbuf, (u16 *) c->vc_origin, c->vc_screenbuf_size > vga_vram_size ? vga_vram_size : c->vc_screenbuf_size); } static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, enum con_scroll dir, unsigned int lines) { unsigned long oldo; unsigned int delta; if (t || b != c->vc_rows || vga_is_gfx || c->vc_mode != KD_TEXT) return false; if (!vga_hardscroll_enabled || lines >= c->vc_rows / 2) return false; vgacon_restore_screen(c); oldo = c->vc_origin; delta = lines * c->vc_size_row; if (dir == SM_UP) { if (c->vc_scr_end + delta >= vga_vram_end) { scr_memcpyw((u16 *) vga_vram_base, (u16 *) (oldo + delta), c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_base; vga_rolled_over = oldo - vga_vram_base; } else c->vc_origin += delta; scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - delta), c->vc_video_erase_char, delta); } else { if (oldo - delta < vga_vram_base) { scr_memmovew((u16 *) (vga_vram_end - c->vc_screenbuf_size + delta), (u16 *) oldo, c->vc_screenbuf_size - delta); c->vc_origin = vga_vram_end - c->vc_screenbuf_size; vga_rolled_over = 0; } else c->vc_origin -= delta; c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, delta); } c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; c->vc_visible_origin = c->vc_origin; vga_set_mem_top(c); c->vc_pos = (c->vc_pos - oldo) + c->vc_origin; return true; } /* * The console `switch' structure for the VGA based console */ static void vgacon_clear(struct vc_data *vc, int sy, int sx, int height, int width) { } static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { } static void vgacon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos) { } const struct consw vga_con = { .owner = THIS_MODULE, .con_startup = vgacon_startup, .con_init = vgacon_init, .con_deinit = vgacon_deinit, .con_clear = vgacon_clear, .con_putc = vgacon_putc, .con_putcs = vgacon_putcs, .con_cursor = vgacon_cursor, .con_scroll = vgacon_scroll, .con_switch = vgacon_switch, .con_blank = vgacon_blank, .con_font_set = vgacon_font_set, .con_font_get = vgacon_font_get, .con_resize = vgacon_resize, .con_set_palette = vgacon_set_palette, .con_scrolldelta = vgacon_scrolldelta, .con_set_origin = vgacon_set_origin, .con_save_screen = vgacon_save_screen, .con_build_attr = vgacon_build_attr, .con_invert_region = vgacon_invert_region, }; EXPORT_SYMBOL(vga_con); MODULE_LICENSE("GPL");
static const char *vgacon_startup(void) { const char *display_desc = NULL; u16 saved1, saved2; volatile u16 *p; if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB || screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { no_vga: #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; return conswitchp->con_startup(); #else return NULL; #endif } /* boot_params.screen_info reasonably initialized? */ if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; /* VGA16 modes are not handled by VGACON */ if ((screen_info.orig_video_mode == 0x0D) || /* 320x200/4 */ (screen_info.orig_video_mode == 0x0E) || /* 640x200/4 */ (screen_info.orig_video_mode == 0x10) || /* 640x350/4 */ (screen_info.orig_video_mode == 0x12) || /* 640x480/4 */ (screen_info.orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */ goto no_vga; vga_video_num_lines = screen_info.orig_video_lines; vga_video_num_columns = screen_info.orig_video_cols; vgastate.vgabase = NULL; if (screen_info.orig_video_mode == 7) { /* Monochrome display */ vga_vram_base = 0xb0000; vga_video_port_reg = VGA_CRT_IM; vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource mda1_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BB }; static struct resource mda2_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3BF, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; request_resource(&ioport_resource, &mda1_console_resource); request_resource(&ioport_resource, &mda2_console_resource); vga_video_font_height = 14; } } else { /* If not, it is color. */ vga_can_do_color = true; vga_vram_base = 0xb8000; vga_video_port_reg = VGA_CRT_IC; vga_video_port_val = VGA_CRT_DC; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { int i; vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource vga_console_resource = { .name = "vga+", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, &vga_console_resource); /* * Normalise the palette registers, to point * the 16 screen colours to the first 16 * DAC entries. */ for (i = 0; i < 16; i++) { inb_p(VGA_IS1_RC); outb_p(i, VGA_ATT_W); outb_p(i, VGA_ATT_W); } outb_p(0x20, VGA_ATT_W); /* * Now set the DAC registers back to their * default values */ for (i = 0; i < 16; i++) { outb_p(color_table[i], VGA_PEL_IW); outb_p(default_red[i], VGA_PEL_D); outb_p(default_grn[i], VGA_PEL_D); outb_p(default_blu[i], VGA_PEL_D); } } } else { static struct resource cga_console_resource = { .name = "cga", .flags = IORESOURCE_IO, .start = 0x3D4, .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; request_resource(&ioport_resource, &cga_console_resource); vga_video_font_height = 8; } } vga_vram_base = VGA_MAP_MEM(vga_vram_base, vga_vram_size); vga_vram_end = vga_vram_base + vga_vram_size; /* * Find out if there is a graphics card present. * Are there smarter methods around? */ p = (volatile u16 *) vga_vram_base; saved1 = scr_readw(p); saved2 = scr_readw(p + 1); scr_writew(0xAA55, p); scr_writew(0x55AA, p + 1); if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(0x55AA, p); scr_writew(0xAA55, p + 1); if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(saved1, p); scr_writew(saved2, p + 1); if (vga_video_type == VIDEO_TYPE_EGAC || vga_video_type == VIDEO_TYPE_VGAC || vga_video_type == VIDEO_TYPE_EGAM) { vga_hardscroll_enabled = vga_hardscroll_user_enable; vga_default_font_height = screen_info.orig_video_points; vga_video_font_height = screen_info.orig_video_points; /* This may be suboptimal but is a safe bet - go with it */ vga_scan_lines = vga_video_font_height * vga_video_num_lines; } vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; if (!vga_init_done) { vgacon_scrollback_startup(); vga_init_done = true; } return display_desc; }
static const char *vgacon_startup(void) { const char *display_desc = NULL; u16 saved1, saved2; volatile u16 *p; if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB || screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) { no_vga: #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; return conswitchp->con_startup(); #else return NULL; #endif } /* boot_params.screen_info reasonably initialized? */ if ((screen_info.orig_video_lines == 0) || (screen_info.orig_video_cols == 0)) goto no_vga; /* VGA16 modes are not handled by VGACON */ if ((screen_info.orig_video_mode == 0x0D) || /* 320x200/4 */ (screen_info.orig_video_mode == 0x0E) || /* 640x200/4 */ (screen_info.orig_video_mode == 0x10) || /* 640x350/4 */ (screen_info.orig_video_mode == 0x12) || /* 640x480/4 */ (screen_info.orig_video_mode == 0x6A)) /* 800x600/4 (VESA) */ goto no_vga; vga_video_num_lines = screen_info.orig_video_lines; vga_video_num_columns = screen_info.orig_video_cols; vgastate.vgabase = NULL; if (screen_info.orig_video_mode == 7) { /* Monochrome display */ vga_vram_base = 0xb0000; vga_video_port_reg = VGA_CRT_IM; vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource mda1_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3B0, .end = 0x3BB }; static struct resource mda2_console_resource = { .name = "mda", .flags = IORESOURCE_IO, .start = 0x3BF, .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; request_resource(&ioport_resource, &mda1_console_resource); request_resource(&ioport_resource, &mda2_console_resource); vga_video_font_height = 14; } } else { /* If not, it is color. */ vga_can_do_color = true; vga_vram_base = 0xb8000; vga_video_port_reg = VGA_CRT_IC; vga_video_port_val = VGA_CRT_DC; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { int i; vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { static struct resource ega_console_resource = { .name = "ega", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { static struct resource vga_console_resource = { .name = "vga+", .flags = IORESOURCE_IO, .start = 0x3C0, .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, &vga_console_resource); /* * Normalise the palette registers, to point * the 16 screen colours to the first 16 * DAC entries. */ for (i = 0; i < 16; i++) { inb_p(VGA_IS1_RC); outb_p(i, VGA_ATT_W); outb_p(i, VGA_ATT_W); } outb_p(0x20, VGA_ATT_W); /* * Now set the DAC registers back to their * default values */ for (i = 0; i < 16; i++) { outb_p(color_table[i], VGA_PEL_IW); outb_p(default_red[i], VGA_PEL_D); outb_p(default_grn[i], VGA_PEL_D); outb_p(default_blu[i], VGA_PEL_D); } } } else { static struct resource cga_console_resource = { .name = "cga", .flags = IORESOURCE_IO, .start = 0x3D4, .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; request_resource(&ioport_resource, &cga_console_resource); vga_video_font_height = 8; } } vga_vram_base = VGA_MAP_MEM(vga_vram_base, vga_vram_size); vga_vram_end = vga_vram_base + vga_vram_size; /* * Find out if there is a graphics card present. * Are there smarter methods around? */ p = (volatile u16 *) vga_vram_base; saved1 = scr_readw(p); saved2 = scr_readw(p + 1); scr_writew(0xAA55, p); scr_writew(0x55AA, p + 1); if (scr_readw(p) != 0xAA55 || scr_readw(p + 1) != 0x55AA) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(0x55AA, p); scr_writew(0xAA55, p + 1); if (scr_readw(p) != 0x55AA || scr_readw(p + 1) != 0xAA55) { scr_writew(saved1, p); scr_writew(saved2, p + 1); goto no_vga; } scr_writew(saved1, p); scr_writew(saved2, p + 1); if (vga_video_type == VIDEO_TYPE_EGAC || vga_video_type == VIDEO_TYPE_VGAC || vga_video_type == VIDEO_TYPE_EGAM) { vga_hardscroll_enabled = vga_hardscroll_user_enable; vga_default_font_height = screen_info.orig_video_points; vga_video_font_height = screen_info.orig_video_points; /* This may be suboptimal but is a safe bet - go with it */ vga_scan_lines = vga_video_font_height * vga_video_num_lines; } vgacon_xres = screen_info.orig_video_cols * VGA_FONTWIDTH; vgacon_yres = vga_scan_lines; vga_init_done = true; return display_desc; }
{'added': [(363, '\tvga_init_done = true;')], 'deleted': [(168, '#ifdef CONFIG_VGACON_SOFT_SCROLLBACK'), (169, '/* software scrollback */'), (170, 'struct vgacon_scrollback_info {'), (171, '\tvoid *data;'), (172, '\tint tail;'), (173, '\tint size;'), (174, '\tint rows;'), (175, '\tint cnt;'), (176, '\tint cur;'), (177, '\tint save;'), (178, '\tint restore;'), (179, '};'), (180, ''), (181, 'static struct vgacon_scrollback_info *vgacon_scrollback_cur;'), (182, 'static struct vgacon_scrollback_info vgacon_scrollbacks[MAX_NR_CONSOLES];'), (183, 'static bool scrollback_persistent = \\'), (184, '\tIS_ENABLED(CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT);'), (185, 'module_param_named(scrollback_persistent, scrollback_persistent, bool, 0000);'), (186, 'MODULE_PARM_DESC(scrollback_persistent, "Enable persistent scrollback for all vga consoles");'), (187, ''), (188, 'static void vgacon_scrollback_reset(int vc_num, size_t reset_size)'), (189, '{'), (190, '\tstruct vgacon_scrollback_info *scrollback = &vgacon_scrollbacks[vc_num];'), (191, ''), (192, '\tif (scrollback->data && reset_size > 0)'), (193, '\t\tmemset(scrollback->data, 0, reset_size);'), (194, ''), (195, '\tscrollback->cnt = 0;'), (196, '\tscrollback->tail = 0;'), (197, '\tscrollback->cur = 0;'), (198, '}'), (199, ''), (200, 'static void vgacon_scrollback_init(int vc_num)'), (201, '{'), (202, '\tint pitch = vga_video_num_columns * 2;'), (203, '\tsize_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;'), (204, '\tint rows = size / pitch;'), (205, '\tvoid *data;'), (206, ''), (207, '\tdata = kmalloc_array(CONFIG_VGACON_SOFT_SCROLLBACK_SIZE, 1024,'), (208, '\t\t\t GFP_NOWAIT);'), (209, ''), (210, '\tvgacon_scrollbacks[vc_num].data = data;'), (211, '\tvgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];'), (212, ''), (213, '\tvgacon_scrollback_cur->rows = rows - 1;'), (214, '\tvgacon_scrollback_cur->size = rows * pitch;'), (215, ''), (216, '\tvgacon_scrollback_reset(vc_num, size);'), (217, '}'), (218, ''), (219, 'static void vgacon_scrollback_switch(int vc_num)'), (220, '{'), (221, '\tif (!scrollback_persistent)'), (222, '\t\tvc_num = 0;'), (223, ''), (224, '\tif (!vgacon_scrollbacks[vc_num].data) {'), (225, '\t\tvgacon_scrollback_init(vc_num);'), (226, '\t} else {'), (227, '\t\tif (scrollback_persistent) {'), (228, '\t\t\tvgacon_scrollback_cur = &vgacon_scrollbacks[vc_num];'), (229, '\t\t} else {'), (230, '\t\t\tsize_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;'), (231, ''), (232, '\t\t\tvgacon_scrollback_reset(vc_num, size);'), (233, '\t\t}'), (234, '\t}'), (235, '}'), (236, ''), (237, 'static void vgacon_scrollback_startup(void)'), (238, '{'), (239, '\tvgacon_scrollback_cur = &vgacon_scrollbacks[0];'), (240, '\tvgacon_scrollback_init(0);'), (241, '}'), (242, ''), (243, 'static void vgacon_scrollback_update(struct vc_data *c, int t, int count)'), (244, '{'), (245, '\tvoid *p;'), (246, ''), (247, '\tif (!vgacon_scrollback_cur->data || !vgacon_scrollback_cur->size ||'), (248, '\t c->vc_num != fg_console)'), (249, '\t\treturn;'), (250, ''), (251, '\tp = (void *) (c->vc_origin + t * c->vc_size_row);'), (252, ''), (253, '\twhile (count--) {'), (254, '\t\tif ((vgacon_scrollback_cur->tail + c->vc_size_row) >'), (255, '\t\t vgacon_scrollback_cur->size)'), (256, '\t\t\tvgacon_scrollback_cur->tail = 0;'), (257, ''), (258, '\t\tscr_memcpyw(vgacon_scrollback_cur->data +'), (259, '\t\t\t vgacon_scrollback_cur->tail,'), (260, '\t\t\t p, c->vc_size_row);'), (261, ''), (262, '\t\tvgacon_scrollback_cur->cnt++;'), (263, '\t\tp += c->vc_size_row;'), (264, '\t\tvgacon_scrollback_cur->tail += c->vc_size_row;'), (265, ''), (266, '\t\tif (vgacon_scrollback_cur->tail >= vgacon_scrollback_cur->size)'), (267, '\t\t\tvgacon_scrollback_cur->tail = 0;'), (268, ''), (269, '\t\tif (vgacon_scrollback_cur->cnt > vgacon_scrollback_cur->rows)'), (270, '\t\t\tvgacon_scrollback_cur->cnt = vgacon_scrollback_cur->rows;'), (271, ''), (272, '\t\tvgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;'), (273, '\t}'), (274, '}'), (275, ''), (276, 'static void vgacon_restore_screen(struct vc_data *c)'), (277, '{'), (278, '\tc->vc_origin = c->vc_visible_origin;'), (279, '\tvgacon_scrollback_cur->save = 0;'), (280, ''), (281, '\tif (!vga_is_gfx && !vgacon_scrollback_cur->restore) {'), (282, '\t\tscr_memcpyw((u16 *) c->vc_origin, (u16 *) c->vc_screenbuf,'), (283, '\t\t\t c->vc_screenbuf_size > vga_vram_size ?'), (284, '\t\t\t vga_vram_size : c->vc_screenbuf_size);'), (285, '\t\tvgacon_scrollback_cur->restore = 1;'), (286, '\t\tvgacon_scrollback_cur->cur = vgacon_scrollback_cur->cnt;'), (287, '\t}'), (288, '}'), (289, ''), (290, 'static void vgacon_scrolldelta(struct vc_data *c, int lines)'), (291, '{'), (292, '\tint start, end, count, soff;'), (293, ''), (294, '\tif (!lines) {'), (295, '\t\tvgacon_restore_screen(c);'), (296, '\t\treturn;'), (297, '\t}'), (298, ''), (299, '\tif (!vgacon_scrollback_cur->data)'), (300, '\t\treturn;'), (301, ''), (302, '\tif (!vgacon_scrollback_cur->save) {'), (303, '\t\tvgacon_cursor(c, CM_ERASE);'), (304, '\t\tvgacon_save_screen(c);'), (305, '\t\tc->vc_origin = (unsigned long)c->vc_screenbuf;'), (306, '\t\tvgacon_scrollback_cur->save = 1;'), (307, '\t}'), (308, ''), (309, '\tvgacon_scrollback_cur->restore = 0;'), (310, '\tstart = vgacon_scrollback_cur->cur + lines;'), (311, '\tend = start + abs(lines);'), (312, ''), (313, '\tif (start < 0)'), (314, '\t\tstart = 0;'), (315, ''), (316, '\tif (start > vgacon_scrollback_cur->cnt)'), (317, '\t\tstart = vgacon_scrollback_cur->cnt;'), (318, ''), (319, '\tif (end < 0)'), (320, '\t\tend = 0;'), (321, ''), (322, '\tif (end > vgacon_scrollback_cur->cnt)'), (323, '\t\tend = vgacon_scrollback_cur->cnt;'), (324, ''), (325, '\tvgacon_scrollback_cur->cur = start;'), (326, '\tcount = end - start;'), (327, '\tsoff = vgacon_scrollback_cur->tail -'), (328, '\t\t((vgacon_scrollback_cur->cnt - end) * c->vc_size_row);'), (329, '\tsoff -= count * c->vc_size_row;'), (330, ''), (331, '\tif (soff < 0)'), (332, '\t\tsoff += vgacon_scrollback_cur->size;'), (333, ''), (334, '\tcount = vgacon_scrollback_cur->cnt - start;'), (335, ''), (336, '\tif (count > c->vc_rows)'), (337, '\t\tcount = c->vc_rows;'), (338, ''), (339, '\tif (count) {'), (340, '\t\tint copysize;'), (341, ''), (342, '\t\tint diff = c->vc_rows - count;'), (343, '\t\tvoid *d = (void *) c->vc_visible_origin;'), (344, '\t\tvoid *s = (void *) c->vc_screenbuf;'), (345, ''), (346, '\t\tcount *= c->vc_size_row;'), (347, '\t\t/* how much memory to end of buffer left? */'), (348, '\t\tcopysize = min(count, vgacon_scrollback_cur->size - soff);'), (349, '\t\tscr_memcpyw(d, vgacon_scrollback_cur->data + soff, copysize);'), (350, '\t\td += copysize;'), (351, '\t\tcount -= copysize;'), (352, ''), (353, '\t\tif (count) {'), (354, '\t\t\tscr_memcpyw(d, vgacon_scrollback_cur->data, count);'), (355, '\t\t\td += count;'), (356, '\t\t}'), (357, ''), (358, '\t\tif (diff)'), (359, '\t\t\tscr_memcpyw(d, s, diff * c->vc_size_row);'), (360, '\t} else'), (361, '\t\tvgacon_cursor(c, CM_MOVE);'), (362, '}'), (363, ''), (364, 'static void vgacon_flush_scrollback(struct vc_data *c)'), (365, '{'), (366, '\tsize_t size = CONFIG_VGACON_SOFT_SCROLLBACK_SIZE * 1024;'), (367, ''), (368, '\tvgacon_scrollback_reset(c->vc_num, size);'), (369, '}'), (370, '#else'), (371, '#define vgacon_scrollback_startup(...) do { } while (0)'), (372, '#define vgacon_scrollback_init(...) do { } while (0)'), (373, '#define vgacon_scrollback_update(...) do { } while (0)'), (374, '#define vgacon_scrollback_switch(...) do { } while (0)'), (375, ''), (389, 'static void vgacon_flush_scrollback(struct vc_data *c)'), (390, '{'), (391, '}'), (392, '#endif /* CONFIG_VGACON_SOFT_SCROLLBACK */'), (393, ''), (576, '\tif (!vga_init_done) {'), (577, '\t\tvgacon_scrollback_startup();'), (578, '\t\tvga_init_done = true;'), (579, '\t}'), (872, '\tvgacon_scrollback_switch(c->vc_num);'), (1389, '\t\tvgacon_scrollback_update(c, t, lines);'), (1453, '\t.con_flush_scrollback = vgacon_flush_scrollback,')]}
1
220
906
5,743
152
840
25
https://github.com/torvalds/linux
CVE-2020-28097
CWE-125
1,075
lsh_projection.cc
C++
tflite::ops::builtin::lsh_projection::Resize
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // LSH Projection projects an input to a bit vector via locality sensitive // hashing. // // Options: // Sparse: // Computed bit vector is considered to be sparse. // Each output element is an int32 made up by multiple bits computed from // hash functions. // // Dense: // Computed bit vector is considered to be dense. Each output element is // either 0 or 1 that represents a bit. // // Input: // Tensor[0]: Hash functions. Dim.size == 2, DataType: Float. // Tensor[0].Dim[0]: Num of hash functions. // Tensor[0].Dim[1]: Num of projected output bits generated by // each hash function. // In sparse case, Tensor[0].Dim[1] + ceil( log2(Tensor[0].Dim[0] )) <= 32. // // Tensor[1]: Input. Dim.size >= 1, No restriction on DataType. // Tensor[2]: Optional, Weight. Dim.size == 1, DataType: Float. // If not set, each element of input is considered to have same // weight of 1.0 Tensor[1].Dim[0] == Tensor[2].Dim[0] // // Output: // Sparse: // Output.Dim == { Tensor[0].Dim[0] } // A tensor of int32 that represents hash signatures, // // NOTE: To avoid collisions across hash functions, an offset value of // k * (1 << Tensor[0].Dim[1]) will be added to each signature, // k is the index of the hash function. // Dense: // Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } // A flattened tensor represents projected bit vectors. #include <stddef.h> #include <stdint.h> #include <cstring> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include <farmhash.h> namespace tflite { namespace ops { namespace builtin { namespace lsh_projection { TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); } // Compute sign bit of dot product of hash(seed, input) and weight. // NOTE: use float as seed, and convert it to double as a temporary solution // to match the trained model. This is going to be changed once the new // model is trained in an optimized method. // int RunningSignBit(const TfLiteTensor* input, const TfLiteTensor* weight, float seed) { double score = 0.0; int input_item_bytes = input->bytes / SizeOfDimension(input, 0); char* input_ptr = input->data.raw; const size_t seed_size = sizeof(float); const size_t key_bytes = sizeof(float) + input_item_bytes; std::unique_ptr<char[]> key(new char[key_bytes]); const float* weight_ptr = GetTensorData<float>(weight); for (int i = 0; i < SizeOfDimension(input, 0); ++i) { // Create running hash id and value for current dimension. memcpy(key.get(), &seed, seed_size); memcpy(key.get() + seed_size, input_ptr, input_item_bytes); int64_t hash_signature = ::util::Fingerprint64(key.get(), key_bytes); double running_value = static_cast<double>(hash_signature); input_ptr += input_item_bytes; if (weight_ptr == nullptr) { score += running_value; } else { score += weight_ptr[i] * running_value; } } return (score > 0) ? 1 : 0; } void SparseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input, const TfLiteTensor* weight, int32_t* out_buf) { int num_hash = SizeOfDimension(hash, 0); int num_bits = SizeOfDimension(hash, 1); for (int i = 0; i < num_hash; i++) { int32_t hash_signature = 0; for (int j = 0; j < num_bits; j++) { float seed = GetTensorData<float>(hash)[i * num_bits + j]; int bit = RunningSignBit(input, weight, seed); hash_signature = (hash_signature << 1) | bit; } *out_buf++ = hash_signature + i * (1 << num_bits); } } void DenseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input, const TfLiteTensor* weight, int32_t* out_buf) { int num_hash = SizeOfDimension(hash, 0); int num_bits = SizeOfDimension(hash, 1); for (int i = 0; i < num_hash; i++) { for (int j = 0; j < num_bits; j++) { float seed = GetTensorData<float>(hash)[i * num_bits + j]; int bit = RunningSignBit(input, weight, seed); *out_buf++ = bit; } } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); int32_t* out_buf = GetOutput(context, node, 0)->data.i32; const TfLiteTensor* hash = GetInput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 1); const TfLiteTensor* weight = NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2); switch (params->type) { case kTfLiteLshProjectionDense: DenseLshProjection(hash, input, weight, out_buf); break; case kTfLiteLshProjectionSparse: SparseLshProjection(hash, input, weight, out_buf); break; default: return kTfLiteError; } return kTfLiteOk; } } // namespace lsh_projection TfLiteRegistration* Register_LSH_PROJECTION() { static TfLiteRegistration r = {nullptr, nullptr, lsh_projection::Resize, lsh_projection::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // LSH Projection projects an input to a bit vector via locality sensitive // hashing. // // Options: // Sparse: // Computed bit vector is considered to be sparse. // Each output element is an int32 made up by multiple bits computed from // hash functions. // // Dense: // Computed bit vector is considered to be dense. Each output element is // either 0 or 1 that represents a bit. // // Input: // Tensor[0]: Hash functions. Dim.size == 2, DataType: Float. // Tensor[0].Dim[0]: Num of hash functions. // Tensor[0].Dim[1]: Num of projected output bits generated by // each hash function. // In sparse case, Tensor[0].Dim[1] + ceil( log2(Tensor[0].Dim[0] )) <= 32. // // Tensor[1]: Input. Dim.size >= 1, No restriction on DataType. // Tensor[2]: Optional, Weight. Dim.size == 1, DataType: Float. // If not set, each element of input is considered to have same // weight of 1.0 Tensor[1].Dim[0] == Tensor[2].Dim[0] // // Output: // Sparse: // Output.Dim == { Tensor[0].Dim[0] } // A tensor of int32 that represents hash signatures, // // NOTE: To avoid collisions across hash functions, an offset value of // k * (1 << Tensor[0].Dim[1]) will be added to each signature, // k is the index of the hash function. // Dense: // Output.Dim == { Tensor[0].Dim[0] * Tensor[0].Dim[1] } // A flattened tensor represents projected bit vectors. #include <stddef.h> #include <stdint.h> #include <cstring> #include <memory> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/kernel_util.h" #include <farmhash.h> namespace tflite { namespace ops { namespace builtin { namespace lsh_projection { TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash)); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input)); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight)); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); } // Compute sign bit of dot product of hash(seed, input) and weight. // NOTE: use float as seed, and convert it to double as a temporary solution // to match the trained model. This is going to be changed once the new // model is trained in an optimized method. // int RunningSignBit(const TfLiteTensor* input, const TfLiteTensor* weight, float seed) { double score = 0.0; int input_item_bytes = input->bytes / SizeOfDimension(input, 0); char* input_ptr = input->data.raw; const size_t seed_size = sizeof(float); const size_t key_bytes = sizeof(float) + input_item_bytes; std::unique_ptr<char[]> key(new char[key_bytes]); const float* weight_ptr = GetTensorData<float>(weight); for (int i = 0; i < SizeOfDimension(input, 0); ++i) { // Create running hash id and value for current dimension. memcpy(key.get(), &seed, seed_size); memcpy(key.get() + seed_size, input_ptr, input_item_bytes); int64_t hash_signature = ::util::Fingerprint64(key.get(), key_bytes); double running_value = static_cast<double>(hash_signature); input_ptr += input_item_bytes; if (weight_ptr == nullptr) { score += running_value; } else { score += weight_ptr[i] * running_value; } } return (score > 0) ? 1 : 0; } void SparseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input, const TfLiteTensor* weight, int32_t* out_buf) { int num_hash = SizeOfDimension(hash, 0); int num_bits = SizeOfDimension(hash, 1); for (int i = 0; i < num_hash; i++) { int32_t hash_signature = 0; for (int j = 0; j < num_bits; j++) { float seed = GetTensorData<float>(hash)[i * num_bits + j]; int bit = RunningSignBit(input, weight, seed); hash_signature = (hash_signature << 1) | bit; } *out_buf++ = hash_signature + i * (1 << num_bits); } } void DenseLshProjection(const TfLiteTensor* hash, const TfLiteTensor* input, const TfLiteTensor* weight, int32_t* out_buf) { int num_hash = SizeOfDimension(hash, 0); int num_bits = SizeOfDimension(hash, 1); for (int i = 0; i < num_hash; i++) { for (int j = 0; j < num_bits; j++) { float seed = GetTensorData<float>(hash)[i * num_bits + j]; int bit = RunningSignBit(input, weight, seed); *out_buf++ = bit; } } } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TfLiteTensor* out_tensor; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor)); int32_t* out_buf = out_tensor->data.i32; const TfLiteTensor* hash; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash)); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input)); const TfLiteTensor* weight = NumInputs(node) == 2 ? nullptr : GetInput(context, node, 2); switch (params->type) { case kTfLiteLshProjectionDense: DenseLshProjection(hash, input, weight, out_buf); break; case kTfLiteLshProjectionSparse: SparseLshProjection(hash, input, weight, out_buf); break; default: return kTfLiteError; } return kTfLiteOk; } } // namespace lsh_projection TfLiteRegistration* Register_LSH_PROJECTION() { static TfLiteRegistration r = {nullptr, nullptr, lsh_projection::Resize, lsh_projection::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input = GetInput(context, node, 1); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight = GetInput(context, node, 2); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output = GetOutput(context, node, 0); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); }
TfLiteStatus Resize(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* hash; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash)); TF_LITE_ENSURE_EQ(context, NumDimensions(hash), 2); // Support up to 32 bits. TF_LITE_ENSURE(context, SizeOfDimension(hash, 1) <= 32); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input)); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (NumInputs(node) == 3) { const TfLiteTensor* weight; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight)); TF_LITE_ENSURE_EQ(context, NumDimensions(weight), 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(weight, 0), SizeOfDimension(input, 0)); } TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TfLiteIntArray* outputSize = TfLiteIntArrayCreate(1); switch (params->type) { case kTfLiteLshProjectionSparse: outputSize->data[0] = SizeOfDimension(hash, 0); break; case kTfLiteLshProjectionDense: outputSize->data[0] = SizeOfDimension(hash, 0) * SizeOfDimension(hash, 1); break; default: return kTfLiteError; } return context->ResizeTensor(context, output, outputSize); }
{'added': [(76, ' const TfLiteTensor* hash;'), (77, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));'), (82, ' const TfLiteTensor* input;'), (83, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));'), (87, ' const TfLiteTensor* weight;'), (88, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &weight));'), (94, ' TfLiteTensor* output;'), (95, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (177, ' TfLiteTensor* out_tensor;'), (178, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &out_tensor));'), (179, ' int32_t* out_buf = out_tensor->data.i32;'), (180, ' const TfLiteTensor* hash;'), (181, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &hash));'), (182, ' const TfLiteTensor* input;'), (183, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input));')], 'deleted': [(76, ' const TfLiteTensor* hash = GetInput(context, node, 0);'), (81, ' const TfLiteTensor* input = GetInput(context, node, 1);'), (85, ' const TfLiteTensor* weight = GetInput(context, node, 2);'), (91, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (173, ' int32_t* out_buf = GetOutput(context, node, 0)->data.i32;'), (174, ' const TfLiteTensor* hash = GetInput(context, node, 0);'), (175, ' const TfLiteTensor* input = GetInput(context, node, 1);')]}
15
7
129
1,007
30
272
5
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
2,110
ipv6_routing.c
C
ipv6AddRoute
/** * @file ipv6_routing.c * @brief IPv6 routing * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.0 **/ //Switch to the appropriate trace level #define TRACE_LEVEL IPV6_TRACE_LEVEL //Dependencies #include <limits.h> #include "core/net.h" #include "core/ip.h" #include "ipv6/ipv6.h" #include "ipv6/ipv6_misc.h" #include "ipv6/ipv6_routing.h" #include "ipv6/icmpv6.h" #include "ipv6/ndp.h" #include "debug.h" //Check TCP/IP stack configuration #if (IPV6_SUPPORT == ENABLED && IPV6_ROUTING_SUPPORT == ENABLED) //IPv6 routing table static Ipv6RoutingTableEntry ipv6RoutingTable[IPV6_ROUTING_TABLE_SIZE]; /** * @brief Initialize IPv6 routing table * @return Error code **/ error_t ipv6InitRouting(void) { //Clear the routing table osMemset(ipv6RoutingTable, 0, sizeof(ipv6RoutingTable)); //Successful initialization return NO_ERROR; } /** * @brief Enable routing for the specified interface * @param[in] interface Underlying network interface * @param[in] enable When the flag is set to TRUE, routing is enabled on the * interface and the router can forward packets to or from the interface * @return Error code **/ error_t ipv6EnableRouting(NetInterface *interface, bool_t enable) { //Check parameters if(interface == NULL) return ERROR_INVALID_PARAMETER; //Get exclusive access osAcquireMutex(&netMutex); //Enable or disable routing interface->ipv6Context.isRouter = enable; //Release exclusive access osReleaseMutex(&netMutex); //Successful processing return NO_ERROR; } /** * @brief Add a new entry in the IPv6 routing table * @param[in] prefix Network destination * @param[in] prefixLen Length of the prefix, in bits * @param[in] interface Network interface where to forward the packet * @param[in] nextHop IPv6 address of the next hop * @param[in] metric Metric value * @return Error code **/ error_t ipv6AddRoute(const Ipv6Addr *prefix, uint_t prefixLen, NetInterface *interface, const Ipv6Addr *nextHop, uint_t metric) { error_t error; uint_t i; Ipv6RoutingTableEntry *entry; Ipv6RoutingTableEntry *firstFreeEntry; //Check parameters if(prefix == NULL || interface == NULL) return ERROR_INVALID_PARAMETER; //Keep track of the first free entry firstFreeEntry = NULL; //Get exclusive access osAcquireMutex(&netMutex); //Loop through routing table entries for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid) { //Check prefix length if(entry->prefixLen == prefixLen) { //Check whether the current entry matches the specified destination if(ipv6CompPrefix(&entry->prefix, prefix, prefixLen)) break; } } else { //Keep track of the first free entry if(firstFreeEntry == NULL) firstFreeEntry = entry; } } //If the routing table does not contain the specified destination, //then a new entry should be created if(i >= IPV6_ROUTING_TABLE_SIZE) entry = firstFreeEntry; //Check whether the routing table runs out of space if(entry != NULL) { //Network destination entry->prefix = *prefix; entry->prefixLen = prefixLen; //Interface where to forward the packet entry->interface = interface; //Address of the next hop if(nextHop != NULL) entry->nextHop = *nextHop; else entry->nextHop = IPV6_UNSPECIFIED_ADDR; //Metric value entry->metric = metric; //The entry is now valid entry->valid = TRUE; //Sucessful processing error = NO_ERROR; } else { //The routing table is full error = ERROR_FAILURE; } //Release exclusive access osReleaseMutex(&netMutex); //Return status code return error; } /** * @brief Remove an entry from the IPv6 routing table * @param[in] prefix Network destination * @param[in] prefixLen Length of the prefix, in bits * @return Error code **/ error_t ipv6DeleteRoute(const Ipv6Addr *prefix, uint_t prefixLen) { error_t error; uint_t i; Ipv6RoutingTableEntry *entry; //Initialize status code error = ERROR_NOT_FOUND; //Get exclusive access osAcquireMutex(&netMutex); //Loop through routing table entries for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid) { //Check prefix length if(entry->prefixLen == prefixLen) { //Check whether the current entry matches the specified destination if(ipv6CompPrefix(&entry->prefix, prefix, prefixLen)) { //Delete current entry entry->valid = FALSE; //The route was successfully deleted from the routing table error = NO_ERROR; } } } } //Release exclusive access osReleaseMutex(&netMutex); //Return status code return error; } /** * @brief Delete all routes from the IPv6 routing table * @return Error code **/ error_t ipv6DeleteAllRoutes(void) { //Get exclusive access osAcquireMutex(&netMutex); //Clear the routing table osMemset(ipv6RoutingTable, 0, sizeof(ipv6RoutingTable)); //Release exclusive access osReleaseMutex(&netMutex); //Successful processing return NO_ERROR; } /** * @brief Forward an IPv6 packet * @param[in] srcInterface Network interface on which the packet was received * @param[in] ipPacket Multi-part buffer that holds the IPv6 packet to forward * @param[in] ipPacketOffset Offset to the first byte of the IPv6 packet * @return Error code **/ error_t ipv6ForwardPacket(NetInterface *srcInterface, NetBuffer *ipPacket, size_t ipPacketOffset) { error_t error; uint_t i; uint_t metric; uint_t prefixLen; bool_t match; size_t length; size_t destOffset; NetInterface *destInterface; NetBuffer *destBuffer; Ipv6Header *ipHeader; Ipv6RoutingTableEntry *entry; Ipv6Addr destIpAddr; #if (ETH_SUPPORT == ENABLED) NetInterface *physicalInterface; #endif //Silently drop any IP packets received on an interface that has //not been assigned a valid link-local address if(ipv6GetLinkLocalAddrState(srcInterface) != IPV6_ADDR_STATE_PREFERRED) return ERROR_NOT_CONFIGURED; //If routing is not enabled on the interface, then the router cannot //forward packets from the interface if(!srcInterface->ipv6Context.isRouter) return ERROR_FAILURE; //Calculate the length of the IPv6 packet length = netBufferGetLength(ipPacket) - ipPacketOffset; //Ensure the packet length is greater than 40 bytes if(length < sizeof(Ipv6Header)) return ERROR_INVALID_LENGTH; //Point to the IPv6 header ipHeader = netBufferAt(ipPacket, ipPacketOffset); //Sanity check if(ipHeader == NULL) return ERROR_FAILURE; //An IPv6 packet with a source address of unspecified must never be //forwarded by an IPv6 router (refer to RFC section 3513 2.5.2) if(ipv6CompAddr(&ipHeader->srcAddr, &IPV6_UNSPECIFIED_ADDR)) return ERROR_INVALID_ADDRESS; //The unspecified address must not be used as the destination address //of IPv6 packets (refer to RFC section 3513 2.5.2) if(ipv6CompAddr(&ipHeader->destAddr, &IPV6_UNSPECIFIED_ADDR)) return ERROR_INVALID_ADDRESS; //An IPv6 packet with a destination address of loopback must never be //forwarded by an IPv6 router (refer to RFC 3513 section 2.5.3) if(ipv6CompAddr(&ipHeader->destAddr, &IPV6_LOOPBACK_ADDR)) return ERROR_INVALID_ADDRESS; //Check whether the destination address is a link-local address if(ipv6IsLinkLocalUnicastAddr(&ipHeader->destAddr)) { //Forward the packet on the same network interface destInterface = srcInterface; //Next hop destIpAddr = ipHeader->destAddr; } else { //Lowest metric value metric = UINT_MAX; //Longest prefix length prefixLen = 0; //Outgoing network interface destInterface = NULL; //Route determination process for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid && entry->interface != NULL) { //Clear flag match = FALSE; //Do not forward any IP packets to an interface that has not //been assigned a valid link-local address... if(ipv6GetLinkLocalAddrState(entry->interface) == IPV6_ADDR_STATE_PREFERRED) { //If routing is enabled on the interface, then the router //can forward packets to the interface if(entry->interface->ipv6Context.isRouter) { //Compare the destination address with the current entry for a match if(ipv6CompPrefix(&ipHeader->destAddr, &entry->prefix, entry->prefixLen)) { //The longest matching route is the most specific route to the //destination IPv6 address... if(entry->prefixLen > prefixLen) { //Give the current route the higher precedence match = TRUE; } else if(entry->prefixLen == prefixLen) { //If multiple entries with the longest match are found, the //router uses the lowest metric to select the best route if(entry->metric < metric) { //Give the current route the higher precedence match = TRUE; } } } } } //Matching entry? if(match) { //Select the current route metric = entry->metric; prefixLen = entry->prefixLen; //Outgoing interface on which to forward the packet destInterface = entry->interface; //Next hop if(!ipv6CompAddr(&entry->nextHop, &IPV6_UNSPECIFIED_ADDR)) destIpAddr = entry->nextHop; else destIpAddr = ipHeader->destAddr; } } } } //No route to the destination? if(destInterface == NULL) { //A Destination Unreachable message should be generated by a router //in response to a packet that cannot be delivered icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_DEST_UNREACHABLE, ICMPV6_CODE_NO_ROUTE_TO_DEST, 0, ipPacket, ipPacketOffset); //Exit immediately return ERROR_NO_ROUTE; } //Check whether the length of the IPv6 packet is larger than the link MTU if(length > destInterface->ipv6Context.linkMtu) { //A Packet Too Big must be sent by a router in response to a packet //that it cannot forward because the packet is larger than the MTU //of the outgoing link icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_PACKET_TOO_BIG, 0, destInterface->ipv6Context.linkMtu, ipPacket, ipPacketOffset); //Exit immediately return ERROR_INVALID_LENGTH; } //Check whether the packet is explicitly addressed to the router itself if(!ipv6CheckDestAddr(destInterface, &ipHeader->destAddr)) { //Valid unicast address? if(!ipv6IsMulticastAddr(&ipHeader->destAddr)) { //Process IPv6 packet //ipv6ProcessPacket(destInterface, ipPacket, ipPacketOffset); //Exit immediately return NO_ERROR; } } //Check whether the IPv6 packet is about to be sent out the interface //on which it was received if(destInterface == srcInterface) { #if (NDP_SUPPORT == ENABLED) //A router should send a Redirect message whenever it forwards a packet //that is not explicitly addressed to itself in which the source address //identifies a neighbor, and if(ipv6IsOnLink(srcInterface, &ipHeader->srcAddr)) { //The router determines that a better first-hop node resides on the //same link as the sending node for the destination address of the //packet being forwarded, and if(ipv6IsOnLink(destInterface, &destIpAddr)) { //The destination address of the packet is not a multicast address if(!ipv6IsMulticastAddr(&ipHeader->destAddr)) { //Transmit a Redirect message ndpSendRedirect(srcInterface, &destIpAddr, ipPacket, ipPacketOffset); } } } #endif } else { //Check whether the scope of the source address is smaller than the //scope of the destination address if(ipv6GetAddrScope(&ipHeader->srcAddr) < ipv6GetAddrScope(&ipHeader->destAddr)) { //A Destination Unreachable message should be generated by a router //in response to a packet that cannot be delivered without leaving //the scope of the source address icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_DEST_UNREACHABLE, ICMPV6_CODE_BEYOND_SCOPE_OF_SRC_ADDR, 0, ipPacket, ipPacketOffset); //Exit immediately return ERROR_INVALID_ADDRESS; } } //Hop Limit exceeded in transit? if(ipHeader->hopLimit <= 1) { //If a router receives a packet with a Hop Limit of zero, or if a router //decrements a packet's Hop Limit to zero, it must discard the packet //and originate an ICMPv6 Time Exceeded message icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_TIME_EXCEEDED, ICMPV6_CODE_HOP_LIMIT_EXCEEDED, 0, ipPacket, ipPacketOffset); //Exit immediately return ERROR_FAILURE; } //The Hop-by-Hop Options header, when present, must immediately follow //the IPv6 header. Its presence is indicated by the value zero in the //Next Header field of the IPv6 header if(ipHeader->nextHeader == IPV6_HOP_BY_HOP_OPT_HEADER) { //Point to the extension header size_t headerOffset = ipPacketOffset + sizeof(Ipv6Header); //Calculate the offset of the Next Header field size_t nextHeaderOffset = ipPacketOffset + &ipHeader->nextHeader - (uint8_t *) ipHeader; //The Hop-by-Hop Options header is used to carry optional information //that must be examined by every node along a packet's delivery path error = ipv6ParseHopByHopOptHeader(srcInterface, ipPacket, ipPacketOffset, &headerOffset, &nextHeaderOffset); //Any error while processing the extension header? if(error) return error; } //Allocate a buffer to hold the IPv6 packet destBuffer = ethAllocBuffer(length, &destOffset); //Successful memory allocation? if(destBuffer != NULL) { //Copy IPv6 header error = netBufferCopy(destBuffer, destOffset, ipPacket, ipPacketOffset, length); //Check status code if(!error) { //Point to the IPv6 header ipHeader = netBufferAt(destBuffer, destOffset); //Every time a router forwards a packet, it decrements the Hop Limit field ipHeader->hopLimit--; #if (ETH_SUPPORT == ENABLED) //Point to the physical interface physicalInterface = nicGetPhysicalInterface(destInterface); //Ethernet interface? if(physicalInterface->nicDriver != NULL && physicalInterface->nicDriver->type == NIC_TYPE_ETHERNET) { MacAddr destMacAddr; NetTxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; //Destination IPv6 address if(ipv6CompAddr(&destIpAddr, &IPV6_UNSPECIFIED_ADDR)) destIpAddr = ipHeader->destAddr; //Check whether the destination IPv6 address is a multicast address? if(ipv6IsMulticastAddr(&destIpAddr)) { //Map IPv6 multicast address to MAC-layer multicast address error = ipv6MapMulticastAddrToMac(&destIpAddr, &destMacAddr); } else { //Resolve host address using Neighbor Discovery protocol error = ndpResolve(destInterface, &destIpAddr, &destMacAddr); } //Successful address resolution? if(!error) { //Debug message TRACE_INFO("Forwarding IPv6 packet to %s (%" PRIuSIZE " bytes)...\r\n", destInterface->name, length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Send Ethernet frame error = ethSendFrame(destInterface, NULL, &destMacAddr, ETH_TYPE_IPV6, destBuffer, destOffset, &ancillary); } //Address resolution is in progress? else if(error == ERROR_IN_PROGRESS) { //Debug message TRACE_INFO("Enqueuing IPv6 packet (%" PRIuSIZE " bytes)...\r\n", length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Enqueue packets waiting for address resolution error = ndpEnqueuePacket(srcInterface, destInterface, &destIpAddr, destBuffer, destOffset, &ancillary); } //Address resolution failed? else { //Debug message TRACE_WARNING("Cannot map IPv6 address to Ethernet address!\r\n"); } } else #endif #if (PPP_SUPPORT == ENABLED) //PPP interface? if(destInterface->nicDriver != NULL && destInterface->nicDriver->type == NIC_TYPE_PPP) { //Debug message TRACE_INFO("Forwarding IPv6 packet to %s (%" PRIuSIZE " bytes)...\r\n", destInterface->name, length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Send PPP frame error = pppSendFrame(destInterface, destBuffer, destOffset, PPP_PROTOCOL_IPV6); } else #endif //6LoWPAN interface? if(destInterface->nicDriver != NULL && destInterface->nicDriver->type == NIC_TYPE_6LOWPAN) { NetTxAncillary ancillary; //Debug message TRACE_INFO("Forwarding IPv6 packet to %s (%" PRIuSIZE " bytes)...\r\n", destInterface->name, length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; //Send the packet over the specified link error = nicSendPacket(destInterface, destBuffer, destOffset, &ancillary); } else //Unknown interface type? { //Report an error error = ERROR_INVALID_INTERFACE; } } //Free previously allocated memory netBufferFree(destBuffer); } else { //Failed to allocate memory error = ERROR_OUT_OF_MEMORY; } //Return status code return error; } #endif
/** * @file ipv6_routing.c * @brief IPv6 routing * * @section License * * SPDX-License-Identifier: GPL-2.0-or-later * * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved. * * This file is part of CycloneTCP Open. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * @author Oryx Embedded SARL (www.oryx-embedded.com) * @version 2.0.2 **/ //Switch to the appropriate trace level #define TRACE_LEVEL IPV6_TRACE_LEVEL //Dependencies #include <limits.h> #include "core/net.h" #include "core/ip.h" #include "ipv6/ipv6.h" #include "ipv6/ipv6_misc.h" #include "ipv6/ipv6_routing.h" #include "ipv6/icmpv6.h" #include "ipv6/ndp.h" #include "debug.h" //Check TCP/IP stack configuration #if (IPV6_SUPPORT == ENABLED && IPV6_ROUTING_SUPPORT == ENABLED) //IPv6 routing table static Ipv6RoutingTableEntry ipv6RoutingTable[IPV6_ROUTING_TABLE_SIZE]; /** * @brief Initialize IPv6 routing table * @return Error code **/ error_t ipv6InitRouting(void) { //Clear the routing table osMemset(ipv6RoutingTable, 0, sizeof(ipv6RoutingTable)); //Successful initialization return NO_ERROR; } /** * @brief Enable routing for the specified interface * @param[in] interface Underlying network interface * @param[in] enable When the flag is set to TRUE, routing is enabled on the * interface and the router can forward packets to or from the interface * @return Error code **/ error_t ipv6EnableRouting(NetInterface *interface, bool_t enable) { //Check parameters if(interface == NULL) return ERROR_INVALID_PARAMETER; //Get exclusive access osAcquireMutex(&netMutex); //Enable or disable routing interface->ipv6Context.isRouter = enable; //Release exclusive access osReleaseMutex(&netMutex); //Successful processing return NO_ERROR; } /** * @brief Add a new entry in the IPv6 routing table * @param[in] prefix Network destination * @param[in] prefixLen Length of the prefix, in bits * @param[in] interface Network interface where to forward the packet * @param[in] nextHop IPv6 address of the next hop * @param[in] metric Metric value * @return Error code **/ error_t ipv6AddRoute(const Ipv6Addr *prefix, uint_t prefixLen, NetInterface *interface, const Ipv6Addr *nextHop, uint_t metric) { error_t error; uint_t i; Ipv6RoutingTableEntry *entry; Ipv6RoutingTableEntry *firstFreeEntry; //Check parameters if(prefix == NULL || interface == NULL) return ERROR_INVALID_PARAMETER; //Keep track of the first free entry firstFreeEntry = NULL; //Get exclusive access osAcquireMutex(&netMutex); //Loop through routing table entries for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid) { //Check prefix length if(entry->prefixLen == prefixLen) { //Check whether the current entry matches the specified destination if(ipv6CompPrefix(&entry->prefix, prefix, prefixLen)) break; } } else { //Keep track of the first free entry if(firstFreeEntry == NULL) firstFreeEntry = entry; } } //If the routing table does not contain the specified destination, //then a new entry should be created if(i >= IPV6_ROUTING_TABLE_SIZE) entry = firstFreeEntry; //Check whether the routing table runs out of space if(entry != NULL) { //Network destination entry->prefix = *prefix; entry->prefixLen = prefixLen; //Interface where to forward the packet entry->interface = interface; //Address of the next hop if(nextHop != NULL) entry->nextHop = *nextHop; else entry->nextHop = IPV6_UNSPECIFIED_ADDR; //Metric value entry->metric = metric; //The entry is now valid entry->valid = TRUE; //Successful processing error = NO_ERROR; } else { //The routing table is full error = ERROR_FAILURE; } //Release exclusive access osReleaseMutex(&netMutex); //Return status code return error; } /** * @brief Remove an entry from the IPv6 routing table * @param[in] prefix Network destination * @param[in] prefixLen Length of the prefix, in bits * @return Error code **/ error_t ipv6DeleteRoute(const Ipv6Addr *prefix, uint_t prefixLen) { error_t error; uint_t i; Ipv6RoutingTableEntry *entry; //Initialize status code error = ERROR_NOT_FOUND; //Get exclusive access osAcquireMutex(&netMutex); //Loop through routing table entries for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid) { //Check prefix length if(entry->prefixLen == prefixLen) { //Check whether the current entry matches the specified destination if(ipv6CompPrefix(&entry->prefix, prefix, prefixLen)) { //Delete current entry entry->valid = FALSE; //The route was successfully deleted from the routing table error = NO_ERROR; } } } } //Release exclusive access osReleaseMutex(&netMutex); //Return status code return error; } /** * @brief Delete all routes from the IPv6 routing table * @return Error code **/ error_t ipv6DeleteAllRoutes(void) { //Get exclusive access osAcquireMutex(&netMutex); //Clear the routing table osMemset(ipv6RoutingTable, 0, sizeof(ipv6RoutingTable)); //Release exclusive access osReleaseMutex(&netMutex); //Successful processing return NO_ERROR; } /** * @brief Forward an IPv6 packet * @param[in] srcInterface Network interface on which the packet was received * @param[in] ipPacket Multi-part buffer that holds the IPv6 packet to forward * @param[in] ipPacketOffset Offset to the first byte of the IPv6 packet * @return Error code **/ error_t ipv6ForwardPacket(NetInterface *srcInterface, NetBuffer *ipPacket, size_t ipPacketOffset) { error_t error; uint_t i; uint_t metric; uint_t prefixLen; bool_t match; size_t length; size_t destOffset; NetInterface *destInterface; NetBuffer *destBuffer; Ipv6Header *ipHeader; Ipv6RoutingTableEntry *entry; Ipv6Addr destIpAddr; #if (ETH_SUPPORT == ENABLED) NetInterface *physicalInterface; #endif //Silently drop any IP packets received on an interface that has //not been assigned a valid link-local address if(ipv6GetLinkLocalAddrState(srcInterface) != IPV6_ADDR_STATE_PREFERRED) return ERROR_NOT_CONFIGURED; //If routing is not enabled on the interface, then the router cannot //forward packets from the interface if(!srcInterface->ipv6Context.isRouter) return ERROR_FAILURE; //Calculate the length of the IPv6 packet length = netBufferGetLength(ipPacket) - ipPacketOffset; //Ensure the packet length is greater than 40 bytes if(length < sizeof(Ipv6Header)) return ERROR_INVALID_LENGTH; //Point to the IPv6 header ipHeader = netBufferAt(ipPacket, ipPacketOffset); //Sanity check if(ipHeader == NULL) return ERROR_FAILURE; //An IPv6 packet with a source address of unspecified must never be //forwarded by an IPv6 router (refer to RFC section 3513 2.5.2) if(ipv6CompAddr(&ipHeader->srcAddr, &IPV6_UNSPECIFIED_ADDR)) return ERROR_INVALID_ADDRESS; //The unspecified address must not be used as the destination address //of IPv6 packets (refer to RFC section 3513 2.5.2) if(ipv6CompAddr(&ipHeader->destAddr, &IPV6_UNSPECIFIED_ADDR)) return ERROR_INVALID_ADDRESS; //An IPv6 packet with a destination address of loopback must never be //forwarded by an IPv6 router (refer to RFC 3513 section 2.5.3) if(ipv6CompAddr(&ipHeader->destAddr, &IPV6_LOOPBACK_ADDR)) return ERROR_INVALID_ADDRESS; //Check whether the destination address is a link-local address if(ipv6IsLinkLocalUnicastAddr(&ipHeader->destAddr)) { //Forward the packet on the same network interface destInterface = srcInterface; //Next hop destIpAddr = ipHeader->destAddr; } else { //Lowest metric value metric = UINT_MAX; //Longest prefix length prefixLen = 0; //Outgoing network interface destInterface = NULL; //Route determination process for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid && entry->interface != NULL) { //Clear flag match = FALSE; //Do not forward any IP packets to an interface that has not //been assigned a valid link-local address... if(ipv6GetLinkLocalAddrState(entry->interface) == IPV6_ADDR_STATE_PREFERRED) { //If routing is enabled on the interface, then the router //can forward packets to the interface if(entry->interface->ipv6Context.isRouter) { //Compare the destination address with the current entry for a match if(ipv6CompPrefix(&ipHeader->destAddr, &entry->prefix, entry->prefixLen)) { //The longest matching route is the most specific route to the //destination IPv6 address... if(entry->prefixLen > prefixLen) { //Give the current route the higher precedence match = TRUE; } else if(entry->prefixLen == prefixLen) { //If multiple entries with the longest match are found, the //router uses the lowest metric to select the best route if(entry->metric < metric) { //Give the current route the higher precedence match = TRUE; } } } } } //Matching entry? if(match) { //Select the current route metric = entry->metric; prefixLen = entry->prefixLen; //Outgoing interface on which to forward the packet destInterface = entry->interface; //Next hop if(!ipv6CompAddr(&entry->nextHop, &IPV6_UNSPECIFIED_ADDR)) destIpAddr = entry->nextHop; else destIpAddr = ipHeader->destAddr; } } } } //No route to the destination? if(destInterface == NULL) { //A Destination Unreachable message should be generated by a router //in response to a packet that cannot be delivered icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_DEST_UNREACHABLE, ICMPV6_CODE_NO_ROUTE_TO_DEST, 0, ipPacket, ipPacketOffset); //Exit immediately return ERROR_NO_ROUTE; } //Check whether the length of the IPv6 packet is larger than the link MTU if(length > destInterface->ipv6Context.linkMtu) { //A Packet Too Big must be sent by a router in response to a packet //that it cannot forward because the packet is larger than the MTU //of the outgoing link icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_PACKET_TOO_BIG, 0, destInterface->ipv6Context.linkMtu, ipPacket, ipPacketOffset); //Exit immediately return ERROR_INVALID_LENGTH; } //Check whether the packet is explicitly addressed to the router itself if(!ipv6CheckDestAddr(destInterface, &ipHeader->destAddr)) { //Valid unicast address? if(!ipv6IsMulticastAddr(&ipHeader->destAddr)) { //Process IPv6 packet //ipv6ProcessPacket(destInterface, ipPacket, ipPacketOffset); //Exit immediately return NO_ERROR; } } //Check whether the IPv6 packet is about to be sent out the interface //on which it was received if(destInterface == srcInterface) { #if (NDP_SUPPORT == ENABLED) //A router should send a Redirect message whenever it forwards a packet //that is not explicitly addressed to itself in which the source address //identifies a neighbor, and if(ipv6IsOnLink(srcInterface, &ipHeader->srcAddr)) { //The router determines that a better first-hop node resides on the //same link as the sending node for the destination address of the //packet being forwarded, and if(ipv6IsOnLink(destInterface, &destIpAddr)) { //The destination address of the packet is not a multicast address if(!ipv6IsMulticastAddr(&ipHeader->destAddr)) { //Transmit a Redirect message ndpSendRedirect(srcInterface, &destIpAddr, ipPacket, ipPacketOffset); } } } #endif } else { //Check whether the scope of the source address is smaller than the //scope of the destination address if(ipv6GetAddrScope(&ipHeader->srcAddr) < ipv6GetAddrScope(&ipHeader->destAddr)) { //A Destination Unreachable message should be generated by a router //in response to a packet that cannot be delivered without leaving //the scope of the source address icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_DEST_UNREACHABLE, ICMPV6_CODE_BEYOND_SCOPE_OF_SRC_ADDR, 0, ipPacket, ipPacketOffset); //Exit immediately return ERROR_INVALID_ADDRESS; } } //Hop Limit exceeded in transit? if(ipHeader->hopLimit <= 1) { //If a router receives a packet with a Hop Limit of zero, or if a router //decrements a packet's Hop Limit to zero, it must discard the packet //and originate an ICMPv6 Time Exceeded message icmpv6SendErrorMessage(srcInterface, ICMPV6_TYPE_TIME_EXCEEDED, ICMPV6_CODE_HOP_LIMIT_EXCEEDED, 0, ipPacket, ipPacketOffset); //Exit immediately return ERROR_FAILURE; } //The Hop-by-Hop Options header, when present, must immediately follow //the IPv6 header. Its presence is indicated by the value zero in the //Next Header field of the IPv6 header if(ipHeader->nextHeader == IPV6_HOP_BY_HOP_OPT_HEADER) { //Point to the extension header size_t headerOffset = ipPacketOffset + sizeof(Ipv6Header); //Calculate the offset of the Next Header field size_t nextHeaderOffset = ipPacketOffset + &ipHeader->nextHeader - (uint8_t *) ipHeader; //The Hop-by-Hop Options header is used to carry optional information //that must be examined by every node along a packet's delivery path error = ipv6ParseHopByHopOptHeader(srcInterface, ipPacket, ipPacketOffset, &headerOffset, &nextHeaderOffset); //Any error while processing the extension header? if(error) return error; } //Allocate a buffer to hold the IPv6 packet destBuffer = ethAllocBuffer(length, &destOffset); //Successful memory allocation? if(destBuffer != NULL) { //Copy IPv6 header error = netBufferCopy(destBuffer, destOffset, ipPacket, ipPacketOffset, length); //Check status code if(!error) { //Point to the IPv6 header ipHeader = netBufferAt(destBuffer, destOffset); //Every time a router forwards a packet, it decrements the Hop Limit field ipHeader->hopLimit--; #if (ETH_SUPPORT == ENABLED) //Point to the physical interface physicalInterface = nicGetPhysicalInterface(destInterface); //Ethernet interface? if(physicalInterface->nicDriver != NULL && physicalInterface->nicDriver->type == NIC_TYPE_ETHERNET) { MacAddr destMacAddr; NetTxAncillary ancillary; //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; //Destination IPv6 address if(ipv6CompAddr(&destIpAddr, &IPV6_UNSPECIFIED_ADDR)) destIpAddr = ipHeader->destAddr; //Check whether the destination IPv6 address is a multicast address? if(ipv6IsMulticastAddr(&destIpAddr)) { //Map IPv6 multicast address to MAC-layer multicast address error = ipv6MapMulticastAddrToMac(&destIpAddr, &destMacAddr); } else { //Resolve host address using Neighbor Discovery protocol error = ndpResolve(destInterface, &destIpAddr, &destMacAddr); } //Successful address resolution? if(!error) { //Debug message TRACE_INFO("Forwarding IPv6 packet to %s (%" PRIuSIZE " bytes)...\r\n", destInterface->name, length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Send Ethernet frame error = ethSendFrame(destInterface, NULL, &destMacAddr, ETH_TYPE_IPV6, destBuffer, destOffset, &ancillary); } //Address resolution is in progress? else if(error == ERROR_IN_PROGRESS) { //Debug message TRACE_INFO("Enqueuing IPv6 packet (%" PRIuSIZE " bytes)...\r\n", length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Enqueue packets waiting for address resolution error = ndpEnqueuePacket(srcInterface, destInterface, &destIpAddr, destBuffer, destOffset, &ancillary); } //Address resolution failed? else { //Debug message TRACE_WARNING("Cannot map IPv6 address to Ethernet address!\r\n"); } } else #endif #if (PPP_SUPPORT == ENABLED) //PPP interface? if(destInterface->nicDriver != NULL && destInterface->nicDriver->type == NIC_TYPE_PPP) { //Debug message TRACE_INFO("Forwarding IPv6 packet to %s (%" PRIuSIZE " bytes)...\r\n", destInterface->name, length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Send PPP frame error = pppSendFrame(destInterface, destBuffer, destOffset, PPP_PROTOCOL_IPV6); } else #endif //6LoWPAN interface? if(destInterface->nicDriver != NULL && destInterface->nicDriver->type == NIC_TYPE_6LOWPAN) { NetTxAncillary ancillary; //Debug message TRACE_INFO("Forwarding IPv6 packet to %s (%" PRIuSIZE " bytes)...\r\n", destInterface->name, length); //Dump IP header contents for debugging purpose ipv6DumpHeader(ipHeader); //Additional options can be passed to the stack along with the packet ancillary = NET_DEFAULT_TX_ANCILLARY; //Send the packet over the specified link error = nicSendPacket(destInterface, destBuffer, destOffset, &ancillary); } else //Unknown interface type? { //Report an error error = ERROR_INVALID_INTERFACE; } } //Free previously allocated memory netBufferFree(destBuffer); } else { //Failed to allocate memory error = ERROR_OUT_OF_MEMORY; } //Return status code return error; } #endif
error_t ipv6AddRoute(const Ipv6Addr *prefix, uint_t prefixLen, NetInterface *interface, const Ipv6Addr *nextHop, uint_t metric) { error_t error; uint_t i; Ipv6RoutingTableEntry *entry; Ipv6RoutingTableEntry *firstFreeEntry; //Check parameters if(prefix == NULL || interface == NULL) return ERROR_INVALID_PARAMETER; //Keep track of the first free entry firstFreeEntry = NULL; //Get exclusive access osAcquireMutex(&netMutex); //Loop through routing table entries for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid) { //Check prefix length if(entry->prefixLen == prefixLen) { //Check whether the current entry matches the specified destination if(ipv6CompPrefix(&entry->prefix, prefix, prefixLen)) break; } } else { //Keep track of the first free entry if(firstFreeEntry == NULL) firstFreeEntry = entry; } } //If the routing table does not contain the specified destination, //then a new entry should be created if(i >= IPV6_ROUTING_TABLE_SIZE) entry = firstFreeEntry; //Check whether the routing table runs out of space if(entry != NULL) { //Network destination entry->prefix = *prefix; entry->prefixLen = prefixLen; //Interface where to forward the packet entry->interface = interface; //Address of the next hop if(nextHop != NULL) entry->nextHop = *nextHop; else entry->nextHop = IPV6_UNSPECIFIED_ADDR; //Metric value entry->metric = metric; //The entry is now valid entry->valid = TRUE; //Sucessful processing error = NO_ERROR; } else { //The routing table is full error = ERROR_FAILURE; } //Release exclusive access osReleaseMutex(&netMutex); //Return status code return error; }
error_t ipv6AddRoute(const Ipv6Addr *prefix, uint_t prefixLen, NetInterface *interface, const Ipv6Addr *nextHop, uint_t metric) { error_t error; uint_t i; Ipv6RoutingTableEntry *entry; Ipv6RoutingTableEntry *firstFreeEntry; //Check parameters if(prefix == NULL || interface == NULL) return ERROR_INVALID_PARAMETER; //Keep track of the first free entry firstFreeEntry = NULL; //Get exclusive access osAcquireMutex(&netMutex); //Loop through routing table entries for(i = 0; i < IPV6_ROUTING_TABLE_SIZE; i++) { //Point to the current entry entry = &ipv6RoutingTable[i]; //Valid entry? if(entry->valid) { //Check prefix length if(entry->prefixLen == prefixLen) { //Check whether the current entry matches the specified destination if(ipv6CompPrefix(&entry->prefix, prefix, prefixLen)) break; } } else { //Keep track of the first free entry if(firstFreeEntry == NULL) firstFreeEntry = entry; } } //If the routing table does not contain the specified destination, //then a new entry should be created if(i >= IPV6_ROUTING_TABLE_SIZE) entry = firstFreeEntry; //Check whether the routing table runs out of space if(entry != NULL) { //Network destination entry->prefix = *prefix; entry->prefixLen = prefixLen; //Interface where to forward the packet entry->interface = interface; //Address of the next hop if(nextHop != NULL) entry->nextHop = *nextHop; else entry->nextHop = IPV6_UNSPECIFIED_ADDR; //Metric value entry->metric = metric; //The entry is now valid entry->valid = TRUE; //Successful processing error = NO_ERROR; } else { //The routing table is full error = ERROR_FAILURE; } //Release exclusive access osReleaseMutex(&netMutex); //Return status code return error; }
{'added': [(9, ' * Copyright (C) 2010-2021 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.2'), (172, ' //Successful processing')], 'deleted': [(9, ' * Copyright (C) 2010-2020 Oryx Embedded SARL. All rights reserved.'), (28, ' * @version 2.0.0'), (172, ' //Sucessful processing')]}
3
3
325
1,488
50
220
11
https://github.com/Oryx-Embedded/CycloneTCP
CVE-2021-26788
CWE-20
3,235
ip6_tables.c
C
get_chainname_rulenum
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/capability.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/poison.h> #include <linux/icmpv6.h> #include <net/ipv6.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv6 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ip6t_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ip6t, IP6T); } EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore only need to read-lock in the softirq; doing a write_lock_bh() in user context stops packets coming through and allows user context to read the counters or update the rules. Hence the start of any table is given by get_table() below. */ /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip6_packet_match(const struct sk_buff *skb, const char *indev, const char *outdev, const struct ip6t_ip6 *ip6info, unsigned int *protoff, int *fragoff, bool *hotdrop) { unsigned long ret; const struct ipv6hdr *ipv6 = ipv6_hdr(skb); #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, &ip6info->src), IP6T_INV_SRCIP) || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, &ip6info->dst), IP6T_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); /* dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, ipinfo->smsk.s_addr, ipinfo->src.s_addr, ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ return false; } ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ip6info->iniface, ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ip6info->outiface, ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : ""); return false; } /* ... might want to do something with class and flowlabel here ... */ /* look for the desired protocol header */ if (ip6info->flags & IP6T_F_PROTO) { int protohdr; unsigned short _frag_off; protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; return false; } *fragoff = _frag_off; dprintf("Packet protocol %hi ?= %s%hi.\n", protohdr, ip6info->invflags & IP6T_INV_PROTO ? "!":"", ip6info->proto); if (ip6info->proto == protohdr) { if (ip6info->invflags & IP6T_INV_PROTO) return false; return true; } /* We need match for the '-p all', too! */ if ((ip6info->proto != 0) && !(ip6info->invflags & IP6T_INV_PROTO)) return false; } return true; } /* should be ip6 safe */ static bool ip6_checkentry(const struct ip6t_ip6 *ipv6) { if (ipv6->flags & ~IP6T_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ipv6->flags & ~IP6T_F_MASK); return false; } if (ipv6->invflags & ~IP6T_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ipv6->invflags & ~IP6T_INV_MASK); return false; } return true; } static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline struct ip6t_entry * get_entry(const void *base, unsigned int offset) { return (struct ip6t_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ip6t_ip6 *ipv6) { static const struct ip6t_ip6 uncond; return memcmp(ipv6, &uncond, sizeof(uncond)) == 0; } static inline const struct xt_entry_target * ip6t_get_target_c(const struct ip6t_entry *e) { return ip6t_get_target((struct ip6t_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* This cries for unification! */ static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP6_TRACE_COMMENT_RULE, NF_IP6_TRACE_COMMENT_RETURN, NF_IP6_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP6_TRACE_COMMENT_RULE] = "rule", [NF_IP6_TRACE_COMMENT_RETURN] = "return", [NF_IP6_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_WARNING, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ip6t_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ipv6)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ip6t_entry *e) { const struct ip6t_entry *root; const char *hookname, *chainname, *comment; const struct ip6t_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP6_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ip6t_entry * ip6t_next_entry(const struct ip6t_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV6; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((e->target_offset == sizeof(struct ip6t_entry) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0 && unconditional(&e->ipv6)) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ip6t_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ip6t_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ip6t_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV6; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ip6t_entry *e) { const struct xt_entry_target *t; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ip6t_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", par.match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ip6t_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ip6t_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV6, }; int ret; t = ip6t_get_target(e); ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ip6t_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(&e->ipv6)) return false; t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_err("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ip6t_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ip6t_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV6; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { struct ip6t_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ip6t_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ip6t_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ip6t_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ip6t_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET6, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET6, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ip6t_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ip6t_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ip6t_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ip6t_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET6, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ip6t_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ip6t_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET6); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET6); #endif return ret; } static int get_entries(struct net *net, struct ip6t_get_entries __user *uptr, const int *len) { int ret; struct ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ip6t_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ip6t_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("ip_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET6, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ip6t_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ip6t_entry entries[0]; }; static int compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ip6t_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ip6t_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ip6t_entry); *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ip6t_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ip6t_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ip6t_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ip6t_entry *)e); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ip6t_entry *e, struct net *net, const char *name) { unsigned int j; int ret = 0; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; struct ip6t_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); xt_compat_init_offsets(AF_INET6, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ip6t_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ip6t_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ip6t_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ip6t_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, int *len) { int ret; struct compat_ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET6); t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET6); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET6); return ret; } static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IP6T_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ip6t_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IP6T_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IP6T_SO_GET_REVISION_MATCH: case IP6T_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET6, rev.name, rev.revision, target, &ret), "ip6t_%s", rev.name); break; } default: duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ip6t_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ip6t_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ip6t_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ip6t_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return (type == test_type && code >= min_code && code <= max_code) ^ invert; } static bool icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmp6hdr *ic; struct icmp6hdr _icmph; const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp6_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->icmp6_type, ic->icmp6_code, !!(icmpinfo->invflags&IP6T_ICMP_INV)); } /* Called when user tries to insert an entry of this type. */ static int icmp6_checkentry(const struct xt_mtchk_param *par) { const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; } /* The built-in targets: standard (NULL) and error. */ static struct xt_target ip6t_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV6, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ip6t_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV6, }, }; static struct nf_sockopt_ops ip6t_sockopts = { .pf = PF_INET6, .set_optmin = IP6T_BASE_CTL, .set_optmax = IP6T_SO_SET_MAX+1, .set = do_ip6t_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ip6t_set_ctl, #endif .get_optmin = IP6T_BASE_CTL, .get_optmax = IP6T_SO_GET_MAX+1, .get = do_ip6t_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ip6t_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ip6t_builtin_mt[] __read_mostly = { { .name = "icmp6", .match = icmp6_match, .matchsize = sizeof(struct ip6t_icmp), .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, }, }; static int __net_init ip6_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV6); } static void __net_exit ip6_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV6); } static struct pernet_operations ip6_tables_net_ops = { .init = ip6_tables_net_init, .exit = ip6_tables_net_exit, }; static int __init ip6_tables_init(void) { int ret; ret = register_pernet_subsys(&ip6_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); err4: xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); err2: unregister_pernet_subsys(&ip6_tables_net_ops); err1: return ret; } static void __exit ip6_tables_fini(void) { nf_unregister_sockopt(&ip6t_sockopts); xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); unregister_pernet_subsys(&ip6_tables_net_ops); } EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); module_init(ip6_tables_init); module_exit(ip6_tables_fini);
/* * Packet matching code. * * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> * Copyright (c) 2006-2010 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/capability.h> #include <linux/in.h> #include <linux/skbuff.h> #include <linux/kmod.h> #include <linux/vmalloc.h> #include <linux/netdevice.h> #include <linux/module.h> #include <linux/poison.h> #include <linux/icmpv6.h> #include <net/ipv6.h> #include <net/compat.h> #include <asm/uaccess.h> #include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/err.h> #include <linux/cpumask.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter/x_tables.h> #include <net/netfilter/nf_log.h> #include "../../netfilter/xt_repldata.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); MODULE_DESCRIPTION("IPv6 packet filter"); /*#define DEBUG_IP_FIREWALL*/ /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */ /*#define DEBUG_IP_FIREWALL_USER*/ #ifdef DEBUG_IP_FIREWALL #define dprintf(format, args...) pr_info(format , ## args) #else #define dprintf(format, args...) #endif #ifdef DEBUG_IP_FIREWALL_USER #define duprintf(format, args...) pr_info(format , ## args) #else #define duprintf(format, args...) #endif #ifdef CONFIG_NETFILTER_DEBUG #define IP_NF_ASSERT(x) WARN_ON(!(x)) #else #define IP_NF_ASSERT(x) #endif #if 0 /* All the better to debug you with... */ #define static #define inline #endif void *ip6t_alloc_initial_table(const struct xt_table *info) { return xt_alloc_initial_table(ip6t, IP6T); } EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table); /* We keep a set of rules for each CPU, so we can avoid write-locking them in the softirq when updating the counters and therefore only need to read-lock in the softirq; doing a write_lock_bh() in user context stops packets coming through and allows user context to read the counters or update the rules. Hence the start of any table is given by get_table() below. */ /* Returns whether matches rule or not. */ /* Performance critical - called for every packet */ static inline bool ip6_packet_match(const struct sk_buff *skb, const char *indev, const char *outdev, const struct ip6t_ip6 *ip6info, unsigned int *protoff, int *fragoff, bool *hotdrop) { unsigned long ret; const struct ipv6hdr *ipv6 = ipv6_hdr(skb); #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg))) if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk, &ip6info->src), IP6T_INV_SRCIP) || FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk, &ip6info->dst), IP6T_INV_DSTIP)) { dprintf("Source or dest mismatch.\n"); /* dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr, ipinfo->smsk.s_addr, ipinfo->src.s_addr, ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : ""); dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr, ipinfo->dmsk.s_addr, ipinfo->dst.s_addr, ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/ return false; } ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_IN)) { dprintf("VIA in mismatch (%s vs %s).%s\n", indev, ip6info->iniface, ip6info->invflags & IP6T_INV_VIA_IN ? " (INV)" : ""); return false; } ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask); if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) { dprintf("VIA out mismatch (%s vs %s).%s\n", outdev, ip6info->outiface, ip6info->invflags & IP6T_INV_VIA_OUT ? " (INV)" : ""); return false; } /* ... might want to do something with class and flowlabel here ... */ /* look for the desired protocol header */ if (ip6info->flags & IP6T_F_PROTO) { int protohdr; unsigned short _frag_off; protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off, NULL); if (protohdr < 0) { if (_frag_off == 0) *hotdrop = true; return false; } *fragoff = _frag_off; dprintf("Packet protocol %hi ?= %s%hi.\n", protohdr, ip6info->invflags & IP6T_INV_PROTO ? "!":"", ip6info->proto); if (ip6info->proto == protohdr) { if (ip6info->invflags & IP6T_INV_PROTO) return false; return true; } /* We need match for the '-p all', too! */ if ((ip6info->proto != 0) && !(ip6info->invflags & IP6T_INV_PROTO)) return false; } return true; } /* should be ip6 safe */ static bool ip6_checkentry(const struct ip6t_ip6 *ipv6) { if (ipv6->flags & ~IP6T_F_MASK) { duprintf("Unknown flag bits set: %08X\n", ipv6->flags & ~IP6T_F_MASK); return false; } if (ipv6->invflags & ~IP6T_INV_MASK) { duprintf("Unknown invflag bits set: %08X\n", ipv6->invflags & ~IP6T_INV_MASK); return false; } return true; } static unsigned int ip6t_error(struct sk_buff *skb, const struct xt_action_param *par) { net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo); return NF_DROP; } static inline struct ip6t_entry * get_entry(const void *base, unsigned int offset) { return (struct ip6t_entry *)(base + offset); } /* All zeroes == unconditional rule. */ /* Mildly perf critical (only if packet tracing is on) */ static inline bool unconditional(const struct ip6t_entry *e) { static const struct ip6t_ip6 uncond; return e->target_offset == sizeof(struct ip6t_entry) && memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0; } static inline const struct xt_entry_target * ip6t_get_target_c(const struct ip6t_entry *e) { return ip6t_get_target((struct ip6t_entry *)e); } #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* This cries for unification! */ static const char *const hooknames[] = { [NF_INET_PRE_ROUTING] = "PREROUTING", [NF_INET_LOCAL_IN] = "INPUT", [NF_INET_FORWARD] = "FORWARD", [NF_INET_LOCAL_OUT] = "OUTPUT", [NF_INET_POST_ROUTING] = "POSTROUTING", }; enum nf_ip_trace_comments { NF_IP6_TRACE_COMMENT_RULE, NF_IP6_TRACE_COMMENT_RETURN, NF_IP6_TRACE_COMMENT_POLICY, }; static const char *const comments[] = { [NF_IP6_TRACE_COMMENT_RULE] = "rule", [NF_IP6_TRACE_COMMENT_RETURN] = "return", [NF_IP6_TRACE_COMMENT_POLICY] = "policy", }; static struct nf_loginfo trace_loginfo = { .type = NF_LOG_TYPE_LOG, .u = { .log = { .level = LOGLEVEL_WARNING, .logflags = NF_LOG_MASK, }, }, }; /* Mildly perf critical (only if packet tracing is on) */ static inline int get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (unconditional(s) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; } static void trace_packet(struct net *net, const struct sk_buff *skb, unsigned int hook, const struct net_device *in, const struct net_device *out, const char *tablename, const struct xt_table_info *private, const struct ip6t_entry *e) { const struct ip6t_entry *root; const char *hookname, *chainname, *comment; const struct ip6t_entry *iter; unsigned int rulenum = 0; root = get_entry(private->entries, private->hook_entry[hook]); hookname = chainname = hooknames[hook]; comment = comments[NF_IP6_TRACE_COMMENT_RULE]; xt_entry_foreach(iter, root, private->size - private->hook_entry[hook]) if (get_chainname_rulenum(iter, e, hookname, &chainname, &comment, &rulenum) != 0) break; nf_log_trace(net, AF_INET6, hook, skb, in, out, &trace_loginfo, "TRACE: %s:%s:%s:%u ", tablename, chainname, comment, rulenum); } #endif static inline struct ip6t_entry * ip6t_next_entry(const struct ip6t_entry *entry) { return (void *)entry + entry->next_offset; } /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ip6t_do_table(struct sk_buff *skb, const struct nf_hook_state *state, struct xt_table *table) { unsigned int hook = state->hook; static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long)))); /* Initializing verdict to NF_DROP keeps gcc happy. */ unsigned int verdict = NF_DROP; const char *indev, *outdev; const void *table_base; struct ip6t_entry *e, **jumpstack; unsigned int stackidx, cpu; const struct xt_table_info *private; struct xt_action_param acpar; unsigned int addend; /* Initialization */ stackidx = 0; indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; /* We handle fragments by dealing with the first fragment as * if it was a normal packet. All other fragments are treated * normally, except that they will NEVER match rules that ask * things we don't know, ie. tcp syn flag or ports). If the * rule is also a fragment-specific rule, non-fragments won't * match it. */ acpar.hotdrop = false; acpar.net = state->net; acpar.in = state->in; acpar.out = state->out; acpar.family = NFPROTO_IPV6; acpar.hooknum = hook; IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); addend = xt_write_recseq_begin(); private = table->private; /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); cpu = smp_processor_id(); table_base = private->entries; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not * clobber the jumpstack. * * For recursion via REJECT or SYNPROXY the stack will be clobbered * but it is no problem since absolute verdict is issued by these. */ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); e = get_entry(table_base, private->hook_entry[hook]); do { const struct xt_entry_target *t; const struct xt_entry_match *ematch; struct xt_counters *counter; IP_NF_ASSERT(e); acpar.thoff = 0; if (!ip6_packet_match(skb, indev, outdev, &e->ipv6, &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) { no_match: e = ip6t_next_entry(e); continue; } xt_ematch_foreach(ematch, e) { acpar.match = ematch->u.kernel.match; acpar.matchinfo = ematch->data; if (!acpar.match->match(skb, &acpar)) goto no_match; } counter = xt_get_this_cpu_counter(&e->counters); ADD_COUNTER(*counter, skb->len, 1); t = ip6t_get_target_c(e); IP_NF_ASSERT(t->u.kernel.target); #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) /* The packet is traced: log it */ if (unlikely(skb->nf_trace)) trace_packet(state->net, skb, hook, state->in, state->out, table->name, private, e); #endif /* Standard target? */ if (!t->u.kernel.target->target) { int v; v = ((struct xt_standard_target *)t)->verdict; if (v < 0) { /* Pop from stack? */ if (v != XT_RETURN) { verdict = (unsigned int)(-v) - 1; break; } if (stackidx == 0) e = get_entry(table_base, private->underflow[hook]); else e = ip6t_next_entry(jumpstack[--stackidx]); continue; } if (table_base + v != ip6t_next_entry(e) && !(e->ipv6.flags & IP6T_F_GOTO)) { jumpstack[stackidx++] = e; } e = get_entry(table_base, v); continue; } acpar.target = t->u.kernel.target; acpar.targinfo = t->data; verdict = t->u.kernel.target->target(skb, &acpar); if (verdict == XT_CONTINUE) e = ip6t_next_entry(e); else /* Verdict */ break; } while (!acpar.hotdrop); xt_write_recseq_end(addend); local_bh_enable(); #ifdef DEBUG_ALLOW_ALL return NF_ACCEPT; #else if (acpar.hotdrop) return NF_DROP; else return verdict; #endif } /* Figures out from what hook each rule can be called: returns 0 if there are loops. Puts hook bitmask in comefrom. */ static int mark_source_chains(const struct xt_table_info *newinfo, unsigned int valid_hooks, void *entry0) { unsigned int hook; /* No recursion; use packet counter to save back ptrs (reset to 0 as we leave), and comefrom to save source hook bitmask */ for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) { unsigned int pos = newinfo->hook_entry[hook]; struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos); if (!(valid_hooks & (1 << hook))) continue; /* Set initial back pointer. */ e->counters.pcnt = pos; for (;;) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(e); int visited = e->comefrom & (1 << hook); if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { pr_err("iptables: loop hook %u pos %u %08X.\n", hook, pos, e->comefrom); return 0; } e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS)); /* Unconditional return/END. */ if ((unconditional(e) && (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < 0) || visited) { unsigned int oldpos, size; if ((strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0) && t->verdict < -NF_MAX_VERDICT - 1) { duprintf("mark_source_chains: bad " "negative verdict (%i)\n", t->verdict); return 0; } /* Return: backtrack through the last big jump. */ do { e->comefrom ^= (1<<NF_INET_NUMHOOKS); #ifdef DEBUG_IP_FIREWALL_USER if (e->comefrom & (1 << NF_INET_NUMHOOKS)) { duprintf("Back unset " "on hook %u " "rule %u\n", hook, pos); } #endif oldpos = pos; pos = e->counters.pcnt; e->counters.pcnt = 0; /* We're at the start. */ if (pos == oldpos) goto next; e = (struct ip6t_entry *) (entry0 + pos); } while (oldpos == pos + e->next_offset); /* Move along one */ size = e->next_offset; e = (struct ip6t_entry *) (entry0 + pos + size); e->counters.pcnt = pos; pos += size; } else { int newpos = t->verdict; if (strcmp(t->target.u.user.name, XT_STANDARD_TARGET) == 0 && newpos >= 0) { if (newpos > newinfo->size - sizeof(struct ip6t_entry)) { duprintf("mark_source_chains: " "bad verdict (%i)\n", newpos); return 0; } /* This a jump; chase it. */ duprintf("Jump rule %u -> %u\n", pos, newpos); } else { /* ... this is a fallthru */ newpos = pos + e->next_offset; } e = (struct ip6t_entry *) (entry0 + newpos); e->counters.pcnt = pos; pos = newpos; } } next: duprintf("Finished chain %u\n", hook); } return 1; } static void cleanup_match(struct xt_entry_match *m, struct net *net) { struct xt_mtdtor_param par; par.net = net; par.match = m->u.kernel.match; par.matchinfo = m->data; par.family = NFPROTO_IPV6; if (par.match->destroy != NULL) par.match->destroy(&par); module_put(par.match->me); } static int check_entry(const struct ip6t_entry *e) { const struct xt_entry_target *t; if (!ip6_checkentry(&e->ipv6)) return -EINVAL; if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset) return -EINVAL; t = ip6t_get_target_c(e); if (e->target_offset + t->u.target_size > e->next_offset) return -EINVAL; return 0; } static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ip6t_ip6 *ipv6 = par->entryinfo; int ret; par->match = m->u.kernel.match; par->matchinfo = m->data; ret = xt_check_match(par, m->u.match_size - sizeof(*m), ipv6->proto, ipv6->invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", par.match->name); return ret; } return 0; } static int find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { struct xt_match *match; int ret; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("find_check_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; ret = check_match(m, par); if (ret) goto err; return 0; err: module_put(m->u.kernel.match->me); return ret; } static int check_target(struct ip6t_entry *e, struct net *net, const char *name) { struct xt_entry_target *t = ip6t_get_target(e); struct xt_tgchk_param par = { .net = net, .table = name, .entryinfo = e, .target = t->u.kernel.target, .targinfo = t->data, .hook_mask = e->comefrom, .family = NFPROTO_IPV6, }; int ret; t = ip6t_get_target(e); ret = xt_check_target(&par, t->u.target_size - sizeof(*t), e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO); if (ret < 0) { duprintf("ip_tables: check failed for `%s'.\n", t->u.kernel.target->name); return ret; } return 0; } static int find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, unsigned int size) { struct xt_entry_target *t; struct xt_target *target; int ret; unsigned int j; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = find_check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } t = ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("find_check_entry: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto cleanup_matches; } t->u.kernel.target = target; ret = check_target(e, net, name); if (ret) goto err; return 0; err: module_put(t->u.kernel.target->me); cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static bool check_underflow(const struct ip6t_entry *e) { const struct xt_entry_target *t; unsigned int verdict; if (!unconditional(e)) return false; t = ip6t_get_target_c(e); if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0) return false; verdict = ((struct xt_standard_target *)t)->verdict; verdict = -verdict - 1; return verdict == NF_DROP || verdict == NF_ACCEPT; } static int check_entry_size_and_hooks(struct ip6t_entry *e, struct xt_table_info *newinfo, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, unsigned int valid_hooks) { unsigned int h; int err; if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p\n", e); return -EINVAL; } if (e->next_offset < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } err = check_entry(e); if (err) return err; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if (!(valid_hooks & (1 << h))) continue; if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) { if (!check_underflow(e)) { pr_debug("Underflows must be unconditional and " "use the STANDARD target with " "ACCEPT/DROP\n"); return -EINVAL; } newinfo->underflow[h] = underflows[h]; } } /* Clear counters and comefrom */ e->counters = ((struct xt_counters) { 0, 0 }); e->comefrom = 0; return 0; } static void cleanup_entry(struct ip6t_entry *e, struct net *net) { struct xt_tgdtor_param par; struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) cleanup_match(ematch, net); t = ip6t_get_target(e); par.net = net; par.target = t->u.kernel.target; par.targinfo = t->data; par.family = NFPROTO_IPV6; if (par.target->destroy != NULL) par.target->destroy(&par); module_put(par.target->me); xt_percpu_counter_free(e->counters.pcnt); } /* Checks and translates the user-supplied table segment (held in newinfo) */ static int translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0, const struct ip6t_replace *repl) { struct ip6t_entry *iter; unsigned int i; int ret = 0; newinfo->size = repl->size; newinfo->number = repl->num_entries; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = 0xFFFFFFFF; newinfo->underflow[i] = 0xFFFFFFFF; } duprintf("translate_table: size %u\n", newinfo->size); i = 0; /* Walk through entries, checking offsets. */ xt_entry_foreach(iter, entry0, newinfo->size) { ret = check_entry_size_and_hooks(iter, newinfo, entry0, entry0 + repl->size, repl->hook_entry, repl->underflow, repl->valid_hooks); if (ret != 0) return ret; ++i; if (strcmp(ip6t_get_target(iter)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (i != repl->num_entries) { duprintf("translate_table: %u not %u entries\n", i, repl->num_entries); return -EINVAL; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(repl->valid_hooks & (1 << i))) continue; if (newinfo->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, repl->hook_entry[i]); return -EINVAL; } if (newinfo->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, repl->underflow[i]); return -EINVAL; } } if (!mark_source_chains(newinfo, repl->valid_hooks, entry0)) return -ELOOP; /* Finally, each sanity check must pass */ i = 0; xt_entry_foreach(iter, entry0, newinfo->size) { ret = find_check_entry(iter, net, repl->name, repl->size); if (ret != 0) break; ++i; } if (ret != 0) { xt_entry_foreach(iter, entry0, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter, net); } return ret; } return ret; } static void get_counters(const struct xt_table_info *t, struct xt_counters counters[]) { struct ip6t_entry *iter; unsigned int cpu; unsigned int i; for_each_possible_cpu(cpu) { seqcount_t *s = &per_cpu(xt_recseq, cpu); i = 0; xt_entry_foreach(iter, t->entries, t->size) { struct xt_counters *tmp; u64 bcnt, pcnt; unsigned int start; tmp = xt_get_per_cpu_counter(&iter->counters, cpu); do { start = read_seqcount_begin(s); bcnt = tmp->bcnt; pcnt = tmp->pcnt; } while (read_seqcount_retry(s, start)); ADD_COUNTER(counters[i], bcnt, pcnt); ++i; } } } static struct xt_counters *alloc_counters(const struct xt_table *table) { unsigned int countersize; struct xt_counters *counters; const struct xt_table_info *private = table->private; /* We need atomic snapshot of counters: rest doesn't change (other than comefrom, which userspace doesn't care about). */ countersize = sizeof(struct xt_counters) * private->number; counters = vzalloc(countersize); if (counters == NULL) return ERR_PTR(-ENOMEM); get_counters(private, counters); return counters; } static int copy_entries_to_user(unsigned int total_size, const struct xt_table *table, void __user *userptr) { unsigned int off, num; const struct ip6t_entry *e; struct xt_counters *counters; const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); loc_cpu_entry = private->entries; if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) { ret = -EFAULT; goto free_counters; } /* FIXME: use iterator macros --RR */ /* ... then go back and fix counters and names */ for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){ unsigned int i; const struct xt_entry_match *m; const struct xt_entry_target *t; e = (struct ip6t_entry *)(loc_cpu_entry + off); if (copy_to_user(userptr + off + offsetof(struct ip6t_entry, counters), &counters[num], sizeof(counters[num])) != 0) { ret = -EFAULT; goto free_counters; } for (i = sizeof(struct ip6t_entry); i < e->target_offset; i += m->u.match_size) { m = (void *)e + i; if (copy_to_user(userptr + off + i + offsetof(struct xt_entry_match, u.user.name), m->u.kernel.match->name, strlen(m->u.kernel.match->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } t = ip6t_get_target_c(e); if (copy_to_user(userptr + off + e->target_offset + offsetof(struct xt_entry_target, u.user.name), t->u.kernel.target->name, strlen(t->u.kernel.target->name)+1) != 0) { ret = -EFAULT; goto free_counters; } } free_counters: vfree(counters); return ret; } #ifdef CONFIG_COMPAT static void compat_standard_from_user(void *dst, const void *src) { int v = *(compat_int_t *)src; if (v > 0) v += xt_compat_calc_jump(AF_INET6, v); memcpy(dst, &v, sizeof(v)); } static int compat_standard_to_user(void __user *dst, const void *src) { compat_int_t cv = *(int *)src; if (cv > 0) cv -= xt_compat_calc_jump(AF_INET6, cv); return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0; } static int compat_calc_entry(const struct ip6t_entry *e, const struct xt_table_info *info, const void *base, struct xt_table_info *newinfo) { const struct xt_entry_match *ematch; const struct xt_entry_target *t; unsigned int entry_offset; int off, i, ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - base; xt_ematch_foreach(ematch, e) off += xt_compat_match_offset(ematch->u.kernel.match); t = ip6t_get_target_c(e); off += xt_compat_target_offset(t->u.kernel.target); newinfo->size -= off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) return ret; for (i = 0; i < NF_INET_NUMHOOKS; i++) { if (info->hook_entry[i] && (e < (struct ip6t_entry *)(base + info->hook_entry[i]))) newinfo->hook_entry[i] -= off; if (info->underflow[i] && (e < (struct ip6t_entry *)(base + info->underflow[i]))) newinfo->underflow[i] -= off; } return 0; } static int compat_table_info(const struct xt_table_info *info, struct xt_table_info *newinfo) { struct ip6t_entry *iter; const void *loc_cpu_entry; int ret; if (!newinfo || !info) return -EINVAL; /* we dont care about newinfo->entries */ memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); newinfo->initial_entries = 0; loc_cpu_entry = info->entries; xt_compat_init_offsets(AF_INET6, info->number); xt_entry_foreach(iter, loc_cpu_entry, info->size) { ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo); if (ret != 0) return ret; } return 0; } #endif static int get_info(struct net *net, void __user *user, const int *len, int compat) { char name[XT_TABLE_MAXNAMELEN]; struct xt_table *t; int ret; if (*len != sizeof(struct ip6t_getinfo)) { duprintf("length %u != %zu\n", *len, sizeof(struct ip6t_getinfo)); return -EINVAL; } if (copy_from_user(name, user, sizeof(name)) != 0) return -EFAULT; name[XT_TABLE_MAXNAMELEN-1] = '\0'; #ifdef CONFIG_COMPAT if (compat) xt_compat_lock(AF_INET6); #endif t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (!IS_ERR_OR_NULL(t)) { struct ip6t_getinfo info; const struct xt_table_info *private = t->private; #ifdef CONFIG_COMPAT struct xt_table_info tmp; if (compat) { ret = compat_table_info(private, &tmp); xt_compat_flush_offsets(AF_INET6); private = &tmp; } #endif memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); memcpy(info.underflow, private->underflow, sizeof(info.underflow)); info.num_entries = private->number; info.size = private->size; strcpy(info.name, name); if (copy_to_user(user, &info, *len) != 0) ret = -EFAULT; else ret = 0; xt_table_unlock(t); module_put(t->me); } else ret = t ? PTR_ERR(t) : -ENOENT; #ifdef CONFIG_COMPAT if (compat) xt_compat_unlock(AF_INET6); #endif return ret; } static int get_entries(struct net *net, struct ip6t_get_entries __user *uptr, const int *len) { int ret; struct ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct ip6t_get_entries) + get.size) { duprintf("get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { struct xt_table_info *private = t->private; duprintf("t->private->number = %u\n", private->number); if (get.size == private->size) ret = copy_entries_to_user(private->size, t, uptr->entrytable); else { duprintf("get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; return ret; } static int __do_replace(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info *newinfo, unsigned int num_counters, void __user *counters_ptr) { int ret; struct xt_table *t; struct xt_table_info *oldinfo; struct xt_counters *counters; struct ip6t_entry *iter; ret = 0; counters = vzalloc(num_counters * sizeof(struct xt_counters)); if (!counters) { ret = -ENOMEM; goto out; } t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name), "ip6table_%s", name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free_newinfo_counters_untrans; } /* You lied! */ if (valid_hooks != t->valid_hooks) { duprintf("Valid hook crap: %08X vs %08X\n", valid_hooks, t->valid_hooks); ret = -EINVAL; goto put_module; } oldinfo = xt_replace_table(t, num_counters, newinfo, &ret); if (!oldinfo) goto put_module; /* Update module usage count based on number of rules */ duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n", oldinfo->number, oldinfo->initial_entries, newinfo->number); if ((oldinfo->number > oldinfo->initial_entries) || (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); if ((oldinfo->number > oldinfo->initial_entries) && (newinfo->number <= oldinfo->initial_entries)) module_put(t->me); /* Get the old counters, and synchronize with replace */ get_counters(oldinfo, counters); /* Decrease module usage counts and free resource */ xt_entry_foreach(iter, oldinfo->entries, oldinfo->size) cleanup_entry(iter, net); xt_free_table_info(oldinfo); if (copy_to_user(counters_ptr, counters, sizeof(struct xt_counters) * num_counters) != 0) { /* Silent error, can't fail, new table is already in place */ net_warn_ratelimited("ip6tables: counters copy to user failed while replacing table\n"); } vfree(counters); xt_table_unlock(t); return ret; put_module: module_put(t->me); xt_table_unlock(t); free_newinfo_counters_untrans: vfree(counters); out: return ret; } static int do_replace(struct net *net, const void __user *user, unsigned int len) { int ret; struct ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); if (ret != 0) goto free_newinfo; duprintf("ip_tables: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, tmp.counters); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int do_add_counters(struct net *net, const void __user *user, unsigned int len, int compat) { unsigned int i; struct xt_counters_info tmp; struct xt_counters *paddc; unsigned int num_counters; char *name; int size; void *ptmp; struct xt_table *t; const struct xt_table_info *private; int ret = 0; struct ip6t_entry *iter; unsigned int addend; #ifdef CONFIG_COMPAT struct compat_xt_counters_info compat_tmp; if (compat) { ptmp = &compat_tmp; size = sizeof(struct compat_xt_counters_info); } else #endif { ptmp = &tmp; size = sizeof(struct xt_counters_info); } if (copy_from_user(ptmp, user, size) != 0) return -EFAULT; #ifdef CONFIG_COMPAT if (compat) { num_counters = compat_tmp.num_counters; name = compat_tmp.name; } else #endif { num_counters = tmp.num_counters; name = tmp.name; } if (len != size + num_counters * sizeof(struct xt_counters)) return -EINVAL; paddc = vmalloc(len - size); if (!paddc) return -ENOMEM; if (copy_from_user(paddc, user + size, len - size) != 0) { ret = -EFAULT; goto free; } t = xt_find_table_lock(net, AF_INET6, name); if (IS_ERR_OR_NULL(t)) { ret = t ? PTR_ERR(t) : -ENOENT; goto free; } local_bh_disable(); private = t->private; if (private->number != num_counters) { ret = -EINVAL; goto unlock_up_free; } i = 0; addend = xt_write_recseq_begin(); xt_entry_foreach(iter, private->entries, private->size) { struct xt_counters *tmp; tmp = xt_get_this_cpu_counter(&iter->counters); ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt); ++i; } xt_write_recseq_end(addend); unlock_up_free: local_bh_enable(); xt_table_unlock(t); module_put(t->me); free: vfree(paddc); return ret; } #ifdef CONFIG_COMPAT struct compat_ip6t_replace { char name[XT_TABLE_MAXNAMELEN]; u32 valid_hooks; u32 num_entries; u32 size; u32 hook_entry[NF_INET_NUMHOOKS]; u32 underflow[NF_INET_NUMHOOKS]; u32 num_counters; compat_uptr_t counters; /* struct xt_counters * */ struct compat_ip6t_entry entries[0]; }; static int compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr, unsigned int *size, struct xt_counters *counters, unsigned int i) { struct xt_entry_target *t; struct compat_ip6t_entry __user *ce; u_int16_t target_offset, next_offset; compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; origsize = *size; ce = (struct compat_ip6t_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], sizeof(counters[i])) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ip6t_entry); *size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_to_user(ematch, dstptr, size); if (ret != 0) return ret; } target_offset = e->target_offset - (origsize - *size); t = ip6t_get_target(e); ret = xt_compat_target_to_user(t, dstptr, size); if (ret) return ret; next_offset = e->next_offset - (origsize - *size); if (put_user(target_offset, &ce->target_offset) != 0 || put_user(next_offset, &ce->next_offset) != 0) return -EFAULT; return 0; } static int compat_find_calc_match(struct xt_entry_match *m, const char *name, const struct ip6t_ip6 *ipv6, int *size) { struct xt_match *match; match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name, m->u.user.revision); if (IS_ERR(match)) { duprintf("compat_check_calc_match: `%s' not found\n", m->u.user.name); return PTR_ERR(match); } m->u.kernel.match = match; *size += xt_compat_match_offset(match); return 0; } static void compat_release_entry(struct compat_ip6t_entry *e) { struct xt_entry_target *t; struct xt_entry_match *ematch; /* Cleanup all matches */ xt_ematch_foreach(ematch, e) module_put(ematch->u.kernel.match->me); t = compat_ip6t_get_target(e); module_put(t->u.kernel.target->me); } static int check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e, struct xt_table_info *newinfo, unsigned int *size, const unsigned char *base, const unsigned char *limit, const unsigned int *hook_entries, const unsigned int *underflows, const char *name) { struct xt_entry_match *ematch; struct xt_entry_target *t; struct xt_target *target; unsigned int entry_offset; unsigned int j; int ret, off, h; duprintf("check_compat_entry_size_and_hooks %p\n", e); if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 || (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit || (unsigned char *)e + e->next_offset > limit) { duprintf("Bad offset %p, limit = %p\n", e, limit); return -EINVAL; } if (e->next_offset < sizeof(struct compat_ip6t_entry) + sizeof(struct compat_xt_entry_target)) { duprintf("checking: element %p size %u\n", e, e->next_offset); return -EINVAL; } /* For purposes of check_entry casting the compat entry is fine */ ret = check_entry((struct ip6t_entry *)e); if (ret) return ret; off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); entry_offset = (void *)e - (void *)base; j = 0; xt_ematch_foreach(ematch, e) { ret = compat_find_calc_match(ematch, name, &e->ipv6, &off); if (ret != 0) goto release_matches; ++j; } t = compat_ip6t_get_target(e); target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name, t->u.user.revision); if (IS_ERR(target)) { duprintf("check_compat_entry_size_and_hooks: `%s' not found\n", t->u.user.name); ret = PTR_ERR(target); goto release_matches; } t->u.kernel.target = target; off += xt_compat_target_offset(target); *size += off; ret = xt_compat_add_offset(AF_INET6, entry_offset, off); if (ret) goto out; /* Check hooks & underflows */ for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)e - base == hook_entries[h]) newinfo->hook_entry[h] = hook_entries[h]; if ((unsigned char *)e - base == underflows[h]) newinfo->underflow[h] = underflows[h]; } /* Clear counters and comefrom */ memset(&e->counters, 0, sizeof(e->counters)); e->comefrom = 0; return 0; out: module_put(t->u.kernel.target->me); release_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; module_put(ematch->u.kernel.match->me); } return ret; } static int compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr, unsigned int *size, const char *name, struct xt_table_info *newinfo, unsigned char *base) { struct xt_entry_target *t; struct ip6t_entry *de; unsigned int origsize; int ret, h; struct xt_entry_match *ematch; ret = 0; origsize = *size; de = (struct ip6t_entry *)*dstptr; memcpy(de, e, sizeof(struct ip6t_entry)); memcpy(&de->counters, &e->counters, sizeof(e->counters)); *dstptr += sizeof(struct ip6t_entry); *size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry); xt_ematch_foreach(ematch, e) { ret = xt_compat_match_from_user(ematch, dstptr, size); if (ret != 0) return ret; } de->target_offset = e->target_offset - (origsize - *size); t = compat_ip6t_get_target(e); xt_compat_target_from_user(t, dstptr, size); de->next_offset = e->next_offset - (origsize - *size); for (h = 0; h < NF_INET_NUMHOOKS; h++) { if ((unsigned char *)de - base < newinfo->hook_entry[h]) newinfo->hook_entry[h] -= origsize - *size; if ((unsigned char *)de - base < newinfo->underflow[h]) newinfo->underflow[h] -= origsize - *size; } return ret; } static int compat_check_entry(struct ip6t_entry *e, struct net *net, const char *name) { unsigned int j; int ret = 0; struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; e->counters.pcnt = xt_percpu_counter_alloc(); if (IS_ERR_VALUE(e->counters.pcnt)) return -ENOMEM; j = 0; mtpar.net = net; mtpar.table = name; mtpar.entryinfo = &e->ipv6; mtpar.hook_mask = e->comefrom; mtpar.family = NFPROTO_IPV6; xt_ematch_foreach(ematch, e) { ret = check_match(ematch, &mtpar); if (ret != 0) goto cleanup_matches; ++j; } ret = check_target(e, net, name); if (ret) goto cleanup_matches; return 0; cleanup_matches: xt_ematch_foreach(ematch, e) { if (j-- == 0) break; cleanup_match(ematch, net); } xt_percpu_counter_free(e->counters.pcnt); return ret; } static int translate_compat_table(struct net *net, const char *name, unsigned int valid_hooks, struct xt_table_info **pinfo, void **pentry0, unsigned int total_size, unsigned int number, unsigned int *hook_entries, unsigned int *underflows) { unsigned int i, j; struct xt_table_info *newinfo, *info; void *pos, *entry0, *entry1; struct compat_ip6t_entry *iter0; struct ip6t_entry *iter1; unsigned int size; int ret = 0; info = *pinfo; entry0 = *pentry0; size = total_size; info->number = number; /* Init all hooks to impossible value. */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { info->hook_entry[i] = 0xFFFFFFFF; info->underflow[i] = 0xFFFFFFFF; } duprintf("translate_compat_table: size %u\n", info->size); j = 0; xt_compat_lock(AF_INET6); xt_compat_init_offsets(AF_INET6, number); /* Walk through entries, checking offsets. */ xt_entry_foreach(iter0, entry0, total_size) { ret = check_compat_entry_size_and_hooks(iter0, info, &size, entry0, entry0 + total_size, hook_entries, underflows, name); if (ret != 0) goto out_unlock; ++j; } ret = -EINVAL; if (j != number) { duprintf("translate_compat_table: %u not %u entries\n", j, number); goto out_unlock; } /* Check hooks all assigned */ for (i = 0; i < NF_INET_NUMHOOKS; i++) { /* Only hooks which are valid */ if (!(valid_hooks & (1 << i))) continue; if (info->hook_entry[i] == 0xFFFFFFFF) { duprintf("Invalid hook entry %u %u\n", i, hook_entries[i]); goto out_unlock; } if (info->underflow[i] == 0xFFFFFFFF) { duprintf("Invalid underflow %u %u\n", i, underflows[i]); goto out_unlock; } } ret = -ENOMEM; newinfo = xt_alloc_table_info(size); if (!newinfo) goto out_unlock; newinfo->number = number; for (i = 0; i < NF_INET_NUMHOOKS; i++) { newinfo->hook_entry[i] = info->hook_entry[i]; newinfo->underflow[i] = info->underflow[i]; } entry1 = newinfo->entries; pos = entry1; size = total_size; xt_entry_foreach(iter0, entry0, total_size) { ret = compat_copy_entry_from_user(iter0, &pos, &size, name, newinfo, entry1); if (ret != 0) break; } xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); if (ret) goto free_newinfo; ret = -ELOOP; if (!mark_source_chains(newinfo, valid_hooks, entry1)) goto free_newinfo; i = 0; xt_entry_foreach(iter1, entry1, newinfo->size) { ret = compat_check_entry(iter1, net, name); if (ret != 0) break; ++i; if (strcmp(ip6t_get_target(iter1)->u.user.name, XT_ERROR_TARGET) == 0) ++newinfo->stacksize; } if (ret) { /* * The first i matches need cleanup_entry (calls ->destroy) * because they had called ->check already. The other j-i * entries need only release. */ int skip = i; j -= i; xt_entry_foreach(iter0, entry0, newinfo->size) { if (skip-- > 0) continue; if (j-- == 0) break; compat_release_entry(iter0); } xt_entry_foreach(iter1, entry1, newinfo->size) { if (i-- == 0) break; cleanup_entry(iter1, net); } xt_free_table_info(newinfo); return ret; } *pinfo = newinfo; *pentry0 = entry1; xt_free_table_info(info); return 0; free_newinfo: xt_free_table_info(newinfo); out: xt_entry_foreach(iter0, entry0, total_size) { if (j-- == 0) break; compat_release_entry(iter0); } return ret; out_unlock: xt_compat_flush_offsets(AF_INET6); xt_compat_unlock(AF_INET6); goto out; } static int compat_do_replace(struct net *net, void __user *user, unsigned int len) { int ret; struct compat_ip6t_replace tmp; struct xt_table_info *newinfo; void *loc_cpu_entry; struct ip6t_entry *iter; if (copy_from_user(&tmp, user, sizeof(tmp)) != 0) return -EFAULT; /* overflow check */ if (tmp.size >= INT_MAX / num_possible_cpus()) return -ENOMEM; if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters)) return -ENOMEM; if (tmp.num_counters == 0) return -EINVAL; tmp.name[sizeof(tmp.name)-1] = 0; newinfo = xt_alloc_table_info(tmp.size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; if (copy_from_user(loc_cpu_entry, user + sizeof(tmp), tmp.size) != 0) { ret = -EFAULT; goto free_newinfo; } ret = translate_compat_table(net, tmp.name, tmp.valid_hooks, &newinfo, &loc_cpu_entry, tmp.size, tmp.num_entries, tmp.hook_entry, tmp.underflow); if (ret != 0) goto free_newinfo; duprintf("compat_do_replace: Translated table\n"); ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo, tmp.num_counters, compat_ptr(tmp.counters)); if (ret) goto free_newinfo_untrans; return 0; free_newinfo_untrans: xt_entry_foreach(iter, loc_cpu_entry, newinfo->size) cleanup_entry(iter, net); free_newinfo: xt_free_table_info(newinfo); return ret; } static int compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = compat_do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 1); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } struct compat_ip6t_get_entries { char name[XT_TABLE_MAXNAMELEN]; compat_uint_t size; struct compat_ip6t_entry entrytable[0]; }; static int compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table, void __user *userptr) { struct xt_counters *counters; const struct xt_table_info *private = table->private; void __user *pos; unsigned int size; int ret = 0; unsigned int i = 0; struct ip6t_entry *iter; counters = alloc_counters(table); if (IS_ERR(counters)) return PTR_ERR(counters); pos = userptr; size = total_size; xt_entry_foreach(iter, private->entries, total_size) { ret = compat_copy_entry_to_user(iter, &pos, &size, counters, i++); if (ret != 0) break; } vfree(counters); return ret; } static int compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr, int *len) { int ret; struct compat_ip6t_get_entries get; struct xt_table *t; if (*len < sizeof(get)) { duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get)); return -EINVAL; } if (copy_from_user(&get, uptr, sizeof(get)) != 0) return -EFAULT; if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) { duprintf("compat_get_entries: %u != %zu\n", *len, sizeof(get) + get.size); return -EINVAL; } xt_compat_lock(AF_INET6); t = xt_find_table_lock(net, AF_INET6, get.name); if (!IS_ERR_OR_NULL(t)) { const struct xt_table_info *private = t->private; struct xt_table_info info; duprintf("t->private->number = %u\n", private->number); ret = compat_table_info(private, &info); if (!ret && get.size == info.size) { ret = compat_copy_entries_to_user(private->size, t, uptr->entrytable); } else if (!ret) { duprintf("compat_get_entries: I've got %u not %u!\n", private->size, get.size); ret = -EAGAIN; } xt_compat_flush_offsets(AF_INET6); module_put(t->me); xt_table_unlock(t); } else ret = t ? PTR_ERR(t) : -ENOENT; xt_compat_unlock(AF_INET6); return ret; } static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *); static int compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 1); break; case IP6T_SO_GET_ENTRIES: ret = compat_get_entries(sock_net(sk), user, len); break; default: ret = do_ip6t_get_ctl(sk, cmd, user, len); } return ret; } #endif static int do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); break; case IP6T_SO_SET_ADD_COUNTERS: ret = do_add_counters(sock_net(sk), user, len, 0); break; default: duprintf("do_ip6t_set_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static int do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { int ret; if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) return -EPERM; switch (cmd) { case IP6T_SO_GET_INFO: ret = get_info(sock_net(sk), user, len, 0); break; case IP6T_SO_GET_ENTRIES: ret = get_entries(sock_net(sk), user, len); break; case IP6T_SO_GET_REVISION_MATCH: case IP6T_SO_GET_REVISION_TARGET: { struct xt_get_revision rev; int target; if (*len != sizeof(rev)) { ret = -EINVAL; break; } if (copy_from_user(&rev, user, sizeof(rev)) != 0) { ret = -EFAULT; break; } rev.name[sizeof(rev.name)-1] = 0; if (cmd == IP6T_SO_GET_REVISION_TARGET) target = 1; else target = 0; try_then_request_module(xt_find_revision(AF_INET6, rev.name, rev.revision, target, &ret), "ip6t_%s", rev.name); break; } default: duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd); ret = -EINVAL; } return ret; } static void __ip6t_unregister_table(struct net *net, struct xt_table *table) { struct xt_table_info *private; void *loc_cpu_entry; struct module *table_owner = table->me; struct ip6t_entry *iter; private = xt_unregister_table(table); /* Decrease module usage counts and free resources */ loc_cpu_entry = private->entries; xt_entry_foreach(iter, loc_cpu_entry, private->size) cleanup_entry(iter, net); if (private->number > private->initial_entries) module_put(table_owner); xt_free_table_info(private); } int ip6t_register_table(struct net *net, const struct xt_table *table, const struct ip6t_replace *repl, const struct nf_hook_ops *ops, struct xt_table **res) { int ret; struct xt_table_info *newinfo; struct xt_table_info bootstrap = {0}; void *loc_cpu_entry; struct xt_table *new_table; newinfo = xt_alloc_table_info(repl->size); if (!newinfo) return -ENOMEM; loc_cpu_entry = newinfo->entries; memcpy(loc_cpu_entry, repl->entries, repl->size); ret = translate_table(net, newinfo, loc_cpu_entry, repl); if (ret != 0) goto out_free; new_table = xt_register_table(net, table, &bootstrap, newinfo); if (IS_ERR(new_table)) { ret = PTR_ERR(new_table); goto out_free; } /* set res now, will see skbs right after nf_register_net_hooks */ WRITE_ONCE(*res, new_table); ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); if (ret != 0) { __ip6t_unregister_table(net, new_table); *res = NULL; } return ret; out_free: xt_free_table_info(newinfo); return ret; } void ip6t_unregister_table(struct net *net, struct xt_table *table, const struct nf_hook_ops *ops) { nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks)); __ip6t_unregister_table(net, table); } /* Returns 1 if the type and code is matched by the range, 0 otherwise */ static inline bool icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code, u_int8_t type, u_int8_t code, bool invert) { return (type == test_type && code >= min_code && code <= max_code) ^ invert; } static bool icmp6_match(const struct sk_buff *skb, struct xt_action_param *par) { const struct icmp6hdr *ic; struct icmp6hdr _icmph; const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must not be a fragment. */ if (par->fragoff != 0) return false; ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); if (ic == NULL) { /* We've been asked to examine this packet, and we * can't. Hence, no choice but to drop. */ duprintf("Dropping evil ICMP tinygram.\n"); par->hotdrop = true; return false; } return icmp6_type_code_match(icmpinfo->type, icmpinfo->code[0], icmpinfo->code[1], ic->icmp6_type, ic->icmp6_code, !!(icmpinfo->invflags&IP6T_ICMP_INV)); } /* Called when user tries to insert an entry of this type. */ static int icmp6_checkentry(const struct xt_mtchk_param *par) { const struct ip6t_icmp *icmpinfo = par->matchinfo; /* Must specify no unknown invflags */ return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0; } /* The built-in targets: standard (NULL) and error. */ static struct xt_target ip6t_builtin_tg[] __read_mostly = { { .name = XT_STANDARD_TARGET, .targetsize = sizeof(int), .family = NFPROTO_IPV6, #ifdef CONFIG_COMPAT .compatsize = sizeof(compat_int_t), .compat_from_user = compat_standard_from_user, .compat_to_user = compat_standard_to_user, #endif }, { .name = XT_ERROR_TARGET, .target = ip6t_error, .targetsize = XT_FUNCTION_MAXNAMELEN, .family = NFPROTO_IPV6, }, }; static struct nf_sockopt_ops ip6t_sockopts = { .pf = PF_INET6, .set_optmin = IP6T_BASE_CTL, .set_optmax = IP6T_SO_SET_MAX+1, .set = do_ip6t_set_ctl, #ifdef CONFIG_COMPAT .compat_set = compat_do_ip6t_set_ctl, #endif .get_optmin = IP6T_BASE_CTL, .get_optmax = IP6T_SO_GET_MAX+1, .get = do_ip6t_get_ctl, #ifdef CONFIG_COMPAT .compat_get = compat_do_ip6t_get_ctl, #endif .owner = THIS_MODULE, }; static struct xt_match ip6t_builtin_mt[] __read_mostly = { { .name = "icmp6", .match = icmp6_match, .matchsize = sizeof(struct ip6t_icmp), .checkentry = icmp6_checkentry, .proto = IPPROTO_ICMPV6, .family = NFPROTO_IPV6, }, }; static int __net_init ip6_tables_net_init(struct net *net) { return xt_proto_init(net, NFPROTO_IPV6); } static void __net_exit ip6_tables_net_exit(struct net *net) { xt_proto_fini(net, NFPROTO_IPV6); } static struct pernet_operations ip6_tables_net_ops = { .init = ip6_tables_net_init, .exit = ip6_tables_net_exit, }; static int __init ip6_tables_init(void) { int ret; ret = register_pernet_subsys(&ip6_tables_net_ops); if (ret < 0) goto err1; /* No one else will be downing sem now, so we won't sleep */ ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); if (ret < 0) goto err2; ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); if (ret < 0) goto err4; /* Register setsockopt */ ret = nf_register_sockopt(&ip6t_sockopts); if (ret < 0) goto err5; pr_info("(C) 2000-2006 Netfilter Core Team\n"); return 0; err5: xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); err4: xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); err2: unregister_pernet_subsys(&ip6_tables_net_ops); err1: return ret; } static void __exit ip6_tables_fini(void) { nf_unregister_sockopt(&ip6t_sockopts); xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt)); xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); unregister_pernet_subsys(&ip6_tables_net_ops); } EXPORT_SYMBOL(ip6t_register_table); EXPORT_SYMBOL(ip6t_unregister_table); EXPORT_SYMBOL(ip6t_do_table); module_init(ip6_tables_init); module_exit(ip6_tables_fini);
get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (s->target_offset == sizeof(struct ip6t_entry) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0 && unconditional(&s->ipv6)) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; }
get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e, const char *hookname, const char **chainname, const char **comment, unsigned int *rulenum) { const struct xt_standard_target *t = (void *)ip6t_get_target_c(s); if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) { /* Head of user chain: ERROR target with chainname */ *chainname = t->target.data; (*rulenum) = 0; } else if (s == e) { (*rulenum)++; if (unconditional(s) && strcmp(t->target.u.kernel.target->name, XT_STANDARD_TARGET) == 0 && t->verdict < 0) { /* Tail of chains: STANDARD target (return/policy) */ *comment = *chainname == hookname ? comments[NF_IP6_TRACE_COMMENT_POLICY] : comments[NF_IP6_TRACE_COMMENT_RETURN]; } return 1; } else (*rulenum)++; return 0; }
{'added': [(201, 'static inline bool unconditional(const struct ip6t_entry *e)'), (205, '\treturn e->target_offset == sizeof(struct ip6t_entry) &&'), (206, '\t memcmp(&e->ipv6, &uncond, sizeof(uncond)) == 0;'), (262, '\t\tif (unconditional(s) &&'), (265, '\t\t t->verdict < 0) {'), (491, '\t\t\tif ((unconditional(e) &&'), (494, '\t\t\t t->verdict < 0) || visited) {'), (729, '\tif (!unconditional(e))'), (777, '\t\t\t\tpr_debug("Underflows must be unconditional and "'), (778, '\t\t\t\t\t "use the STANDARD target with "'), (779, '\t\t\t\t\t "ACCEPT/DROP\\n");')], 'deleted': [(201, 'static inline bool unconditional(const struct ip6t_ip6 *ipv6)'), (205, '\treturn memcmp(ipv6, &uncond, sizeof(uncond)) == 0;'), (261, '\t\tif (s->target_offset == sizeof(struct ip6t_entry) &&'), (264, '\t\t t->verdict < 0 &&'), (265, '\t\t unconditional(&s->ipv6)) {'), (491, '\t\t\tif ((e->target_offset == sizeof(struct ip6t_entry) &&'), (494, '\t\t\t t->verdict < 0 &&'), (495, '\t\t\t unconditional(&e->ipv6)) || visited) {'), (730, '\tif (!unconditional(&e->ipv6))'), (778, '\t\t\t\tpr_err("Underflows must be unconditional and "'), (779, '\t\t\t\t "use the STANDARD target with "'), (780, '\t\t\t\t "ACCEPT/DROP\\n");')]}
11
12
1,806
11,126
24
183
8
https://github.com/torvalds/linux
CVE-2016-3134
CWE-119
891
av_parsers.c
C
gf_avc_read_pps_bs_internal
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Romain Bouqueau, Cyril Concolato * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/constants.h> #include <gpac/mpeg4_odf.h> #include <gpac/maths.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_OGG #include <gpac/internal/ogg.h> #endif //uncomment/define globally to remove all bitstream parsing logging from code (this will break inspect mode ananlyze=bs) //#define GPAC_DISABLE_AVPARSE_LOGS #ifndef GPAC_DISABLE_AVPARSE_LOGS void gf_bs_log_idx(GF_BitStream *bs, u32 nBits, const char *fname, s64 val, s32 idx1, s32 idx2, s32 idx3); #define gf_bs_log(_bs, _nBits, _fname, _val) gf_bs_log_idx(_bs, _nBits, _fname, _val, -1, -1, -1) u32 gf_bs_read_int_log_idx3(GF_BitStream *bs, u32 nBits, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val = gf_bs_read_int(bs, nBits); gf_bs_log_idx(bs, nBits, fname, val, idx1, idx2, idx3); return val; } #define gf_bs_read_int_log(_bs, _nBits, _fname) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, -1, -1, -1) #define gf_bs_read_int_log_idx(_bs, _nBits, _fname, _idx) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, _idx, -1, -1) #define gf_bs_read_int_log_idx2(_bs, _nBits, _fname, _idx1, _idx2) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, (s32) _idx1, (s32) _idx2, -1) #else #define gf_bs_log(_bs, _nBits, _fname, _val) #define gf_bs_log_idx(_bs, _nBits, _fname, _val, _idx1, _idx2, _idx3) #define gf_bs_read_int_log(_bs, _nbb, _f) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx(_bs, _nbb, _f, _idx) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx2(_bs, _nbb, _f, _idx1, _idx2) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx3(_bs, _nbb, _f, _idx1, _idx2, _idx3) gf_bs_read_int(_bs, _nbb) #endif static const struct { u32 w, h; } std_par[] = { { 4, 3}, {3, 2}, {16, 9}, {5, 3}, {5, 4}, {8, 5}, {2, 1}, {1, 1}, {0, 0}, }; GF_EXPORT void gf_media_reduce_aspect_ratio(u32 *width, u32 *height) { u32 i = 0; u32 w = *width; u32 h = *height; while (std_par[i].w) { if (std_par[i].w * h == std_par[i].h * w) { *width = std_par[i].w; *height = std_par[i].h; return; } i++; } //not standard one, reduce by power of 2 i = 2; while (1) { if (w <= i) return; if (h <= i) return; if (w % i) return; if (h % i) return; *width = w / i; *height = h / i; i *= 2; } } GF_EXPORT void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } } struct __m4v_profile { u32 value; const char *name; } M4VProfiles[] = { {0x00, "Reserved (0x00) Profile"}, {0x01, "Simple Profile @ Level 1"}, {0x02, "Simple Profile @ Level 2"}, {0x03, "Simple Profile @ Level 3"}, {0x08, "Simple Profile @ Level 0"}, {0x10, "Simple Scalable Profile @ Level 0"}, {0x11, "Simple Scalable Profile @ Level 1"}, {0x12, "Simple Scalable Profile @ Level 2"}, {0x21, "Core Profile @ Level 1"}, {0x22, "Core Profile @ Level 2"}, {0x32, "Main Profile @ Level 2"}, {0x33, "Main Profile @ Level 3"}, {0x34, "Main Profile @ Level 4"}, {0x42, "N-bit Profile @ Level 2"}, {0x51, "Scalable Texture Profile @ Level 1"}, {0x61, "Simple Face Animation Profile @ Level 1"}, {0x62, "Simple Face Animation Profile @ Level 2"}, {0x63, "Simple FBA Profile @ Level 1"}, {0x64, "Simple FBA Profile @ Level 2"}, {0x71, "Basic Animated Texture Profile @ Level 1"}, {0x72, "Basic Animated Texture Profile @ Level 2"}, {0x7F, "AVC/H264 Profile"}, {0x81, "Hybrid Profile @ Level 1"}, {0x82, "Hybrid Profile @ Level 2"}, {0x91, "Advanced Real Time Simple Profile @ Level 1"}, {0x92, "Advanced Real Time Simple Profile @ Level 2"}, {0x93, "Advanced Real Time Simple Profile @ Level 3"}, {0x94, "Advanced Real Time Simple Profile @ Level 4"}, {0xA1, "Core Scalable Profile @ Level1"}, {0xA2, "Core Scalable Profile @ Level2"}, {0xA3, "Core Scalable Profile @ Level3"}, {0xB1, "Advanced Coding Efficiency Profile @ Level 1"}, {0xB2, "Advanced Coding Efficiency Profile @ Level 2"}, {0xB3, "Advanced Coding Efficiency Profile @ Level 3"}, {0xB4, "Advanced Coding Efficiency Profile @ Level 4"}, {0xC1, "Advanced Core Profile @ Level 1"}, {0xC2, "Advanced Core Profile @ Level 2"}, {0xD1, "Advanced Scalable Texture @ Level1"}, {0xD2, "Advanced Scalable Texture @ Level2"}, {0xE1, "Simple Studio Profile @ Level 1"}, {0xE2, "Simple Studio Profile @ Level 2"}, {0xE3, "Simple Studio Profile @ Level 3"}, {0xE4, "Simple Studio Profile @ Level 4"}, {0xE5, "Core Studio Profile @ Level 1"}, {0xE6, "Core Studio Profile @ Level 2"}, {0xE7, "Core Studio Profile @ Level 3"}, {0xE8, "Core Studio Profile @ Level 4"}, {0xF0, "Advanced Simple Profile @ Level 0"}, {0xF1, "Advanced Simple Profile @ Level 1"}, {0xF2, "Advanced Simple Profile @ Level 2"}, {0xF3, "Advanced Simple Profile @ Level 3"}, {0xF4, "Advanced Simple Profile @ Level 4"}, {0xF5, "Advanced Simple Profile @ Level 5"}, {0xF7, "Advanced Simple Profile @ Level 3b"}, {0xF8, "Fine Granularity Scalable Profile @ Level 0"}, {0xF9, "Fine Granularity Scalable Profile @ Level 1"}, {0xFA, "Fine Granularity Scalable Profile @ Level 2"}, {0xFB, "Fine Granularity Scalable Profile @ Level 3"}, {0xFC, "Fine Granularity Scalable Profile @ Level 4"}, {0xFD, "Fine Granularity Scalable Profile @ Level 5"}, {0xFE, "Not part of MPEG-4 Visual profiles"}, {0xFF, "No visual capability required"} }; GF_EXPORT const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; } #ifndef GPAC_DISABLE_AV_PARSERS #define MPEG12_START_CODE_PREFIX 0x000001 #define MPEG12_PICTURE_START_CODE 0x00000100 #define MPEG12_SLICE_MIN_START 0x00000101 #define MPEG12_SLICE_MAX_START 0x000001af #define MPEG12_USER_DATA_START_CODE 0x000001b2 #define MPEG12_SEQUENCE_START_CODE 0x000001b3 #define MPEG12_SEQUENCE_ERR_START_CODE 0x000001b4 #define MPEG12_EXT_START_CODE 0x000001b5 #define MPEG12_SEQUENCE_END_START_CODE 0x000001b7 #define MPEG12_GOP_START_CODE 0x000001b8 s32 gf_mv12_next_start_code(unsigned char *pbuffer, u32 buflen, u32 *optr, u32 *scode) { u32 value; u32 offset; if (buflen < 4) return -1; for (offset = 0; offset < buflen - 3; offset++, pbuffer++) { #ifdef GPAC_BIG_ENDIAN value = *(u32 *)pbuffer >> 8; #else value = (pbuffer[0] << 16) | (pbuffer[1] << 8) | (pbuffer[2] << 0); #endif if (value == MPEG12_START_CODE_PREFIX) { *optr = offset; *scode = (value << 8) | pbuffer[3]; return 0; } } return -1; } s32 gf_mv12_next_slice_start(unsigned char *pbuffer, u32 startoffset, u32 buflen, u32 *slice_offset) { u32 slicestart, code; while (gf_mv12_next_start_code(pbuffer + startoffset, buflen - startoffset, &slicestart, &code) >= 0) { if ((code >= MPEG12_SLICE_MIN_START) && (code <= MPEG12_SLICE_MAX_START)) { *slice_offset = slicestart + startoffset; return 0; } startoffset += slicestart + 4; } return -1; } /* MPEG-4 video (14496-2) */ struct __tag_m4v_parser { GF_BitStream *bs; Bool mpeg12, step_mode; u32 current_object_type; u32 force_next_obj_type; u64 current_object_start; u32 tc_dec, prev_tc_dec, tc_disp, prev_tc_disp; }; GF_EXPORT GF_M4VParser *gf_m4v_parser_new(u8 *data, u64 data_size, Bool mpeg12video) { GF_M4VParser *tmp; if (!data || !data_size) return NULL; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); tmp->mpeg12 = mpeg12video; return tmp; } GF_M4VParser *gf_m4v_parser_bs_new(GF_BitStream *bs, Bool mpeg12video) { GF_M4VParser *tmp; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = bs; tmp->mpeg12 = mpeg12video; return tmp; } GF_EXPORT void gf_m4v_parser_del(GF_M4VParser *m4v) { gf_bs_del(m4v->bs); gf_free(m4v); } GF_EXPORT void gf_m4v_parser_del_no_bs(GF_M4VParser *m4v) { gf_free(m4v); } GF_EXPORT void gf_m4v_parser_set_inspect(GF_M4VParser *m4v) { if (m4v) m4v->step_mode = 1; } GF_EXPORT u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v) { if (m4v) return m4v->current_object_type; return 0; } #define M4V_CACHE_SIZE 4096 s32 M4V_LoadObject(GF_M4VParser *m4v) { u32 v, bpos, found; char m4v_cache[M4V_CACHE_SIZE]; u64 end, cache_start, load_size; if (!m4v) return 0; if (m4v->force_next_obj_type) { m4v->current_object_type = m4v->force_next_obj_type - 1; m4v->force_next_obj_type = 0; return (s32)m4v->current_object_type; } bpos = 0; found = 0; load_size = 0; end = 0; cache_start = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(m4v->bs)) break; load_size = gf_bs_available(m4v->bs); if (load_size > M4V_CACHE_SIZE) load_size = M4V_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(m4v->bs); gf_bs_read_data(m4v->bs, m4v_cache, (u32)load_size); } v = ((v << 8) & 0xFFFFFF00) | ((u8)m4v_cache[bpos]); bpos++; if ((v & 0xFFFFFF00) == 0x00000100) { end = cache_start + bpos - 4; found = 1; break; } } if (!found) return -1; m4v->current_object_start = end; gf_bs_seek(m4v->bs, end + 3); m4v->current_object_type = gf_bs_read_u8(m4v->bs); return (s32)m4v->current_object_type; } GF_EXPORT void gf_m4v_rewrite_pl(u8 **o_data, u32 *o_dataLen, u8 PL) { u32 pos = 0; unsigned char *data = (unsigned char *)*o_data; u32 dataLen = *o_dataLen; while (pos + 4 < dataLen) { if (!data[pos] && !data[pos + 1] && (data[pos + 2] == 0x01) && (data[pos + 3] == M4V_VOS_START_CODE)) { data[pos + 4] = PL; return; } pos++; } /*emulate VOS at beggining*/ (*o_data) = (char *)gf_malloc(sizeof(char)*(dataLen + 5)); (*o_data)[0] = 0; (*o_data)[1] = 0; (*o_data)[2] = 1; (*o_data)[3] = (char)M4V_VOS_START_CODE; (*o_data)[4] = PL; memcpy((*o_data + 5), data, sizeof(char)*dataLen); gf_free(data); (*o_dataLen) = dataLen + 5; } static GF_Err M4V_Reset(GF_M4VParser *m4v, u64 start) { gf_bs_seek(m4v->bs, start); assert(start < (u64)1<<31); m4v->current_object_start = (u32)start; m4v->current_object_type = 0; return GF_OK; } void gf_m4v_parser_reset(GF_M4VParser *m4v, u8 sc_type) { m4v->current_object_start = 0; m4v->current_object_type = 0; m4v->force_next_obj_type = sc_type; } static GF_Err gf_m4v_parse_config_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { unsigned char p[4]; u32 ext_type; s32 o_type; u8 go, par; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = 0; go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_SEQ_START_CODE: dsi->RAP_stream = 1; gf_bs_read_data(m4v->bs, (char *)p, 4); dsi->width = (p[0] << 4) | ((p[1] >> 4) & 0xf); dsi->height = ((p[1] & 0xf) << 8) | p[2]; dsi->VideoPL = GF_CODECID_MPEG1; par = (p[3] >> 4) & 0xf; switch (par) { case 2: dsi->par_num = dsi->height / 3; dsi->par_den = dsi->width / 4; break; case 3: dsi->par_num = dsi->height / 9; dsi->par_den = dsi->width / 16; break; case 4: dsi->par_num = dsi->height / 2; dsi->par_den = dsi->width / 21; break; default: dsi->par_den = dsi->par_num = 0; break; } switch (p[3] & 0xf) { case 0: break; case 1: dsi->fps = 24000.0 / 1001.0; break; case 2: dsi->fps = 24.0; break; case 3: dsi->fps = 25.0; break; case 4: dsi->fps = 30000.0 / 1001.0; break; case 5: dsi->fps = 30.0; break; case 6: dsi->fps = 50.0; break; case 7: dsi->fps = ((60.0*1000.0) / 1001.0); break; case 8: dsi->fps = 60.0; break; case 9: dsi->fps = 1; break; case 10: dsi->fps = 5; break; case 11: dsi->fps = 10; break; case 12: dsi->fps = 12; break; case 13: dsi->fps = 15; break; } break; case M2V_EXT_START_CODE: gf_bs_read_data(m4v->bs, (char *)p, 4); ext_type = ((p[0] >> 4) & 0xf); if (ext_type == 1) { dsi->VideoPL = 0x65; dsi->height = ((p[1] & 0x1) << 13) | ((p[2] & 0x80) << 5) | (dsi->height & 0x0fff); dsi->width = (((p[2] >> 5) & 0x3) << 12) | (dsi->width & 0x0fff); } break; case M2V_PIC_START_CODE: if (dsi->width) go = 0; break; default: break; /*EOS*/ case -1: go = 0; m4v->current_object_start = gf_bs_get_position(m4v->bs); break; } } M4V_Reset(m4v, 0); return GF_OK; } static const struct { u32 w, h; } m4v_sar[6] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 } }; static u8 m4v_get_sar_idx(u32 w, u32 h) { u32 i; for (i = 0; i < 6; i++) { if ((m4v_sar[i].w == w) && (m4v_sar[i].h == h)) return i; } return 0xF; } static void gf_m4v_parse_vol(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { u8 verid, par; s32 clock_rate; u8 vpl = dsi->VideoPL; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = vpl; verid = 0; dsi->RAP_stream = gf_bs_read_int(m4v->bs, 1); dsi->objectType = gf_bs_read_int(m4v->bs, 8); if (gf_bs_read_int(m4v->bs, 1)) { verid = gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 3); } par = gf_bs_read_int(m4v->bs, 4); if (par == 0xF) { dsi->par_num = gf_bs_read_int(m4v->bs, 8); dsi->par_den = gf_bs_read_int(m4v->bs, 8); } else if (par<6) { dsi->par_num = m4v_sar[par].w; dsi->par_den = m4v_sar[par].h; } if (gf_bs_read_int(m4v->bs, 1)) { gf_bs_read_int(m4v->bs, 3); if (gf_bs_read_int(m4v->bs, 1)) gf_bs_read_int(m4v->bs, 79); } dsi->has_shape = gf_bs_read_int(m4v->bs, 2); if (dsi->has_shape && (verid!=1) ) gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 1); /*clock rate*/ dsi->clock_rate = gf_bs_read_int(m4v->bs, 16); /*marker*/ gf_bs_read_int(m4v->bs, 1); clock_rate = dsi->clock_rate-1; if (clock_rate >= 65536) clock_rate = 65535; if (clock_rate > 0) { for (dsi->NumBitsTimeIncrement = 1; dsi->NumBitsTimeIncrement < 16; dsi->NumBitsTimeIncrement++) { if (clock_rate == 1) break; clock_rate = (clock_rate >> 1); } } else { /*fix from vivien for divX*/ dsi->NumBitsTimeIncrement = 1; } /*fixed FPS stream*/ dsi->time_increment = 0; if (gf_bs_read_int(m4v->bs, 1)) { dsi->time_increment = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); } if (!dsi->has_shape) { gf_bs_read_int(m4v->bs, 1); dsi->width = gf_bs_read_int(m4v->bs, 13); gf_bs_read_int(m4v->bs, 1); dsi->height = gf_bs_read_int(m4v->bs, 13); } else { dsi->width = dsi->height = 0; } gf_bs_align(m4v->bs); } static GF_Err gf_m4v_parse_config_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { s32 o_type; u8 go; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { /*vosh*/ case M4V_VOS_START_CODE: dsi->VideoPL = (u8)gf_bs_read_u8(m4v->bs); break; case M4V_VOL_START_CODE: gf_m4v_parse_vol(m4v, dsi); /*shape will be done later*/ gf_bs_align(m4v->bs); break; case M4V_VOP_START_CODE: case M4V_GOV_START_CODE: go = 0; break; /*EOS*/ case -1: m4v->current_object_start = gf_bs_get_position(m4v->bs); return GF_EOS; /*don't interest us*/ case M4V_UDTA_START_CODE: default: break; } } return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_config(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { if (m4v->mpeg12) { return gf_m4v_parse_config_mpeg12(m4v, dsi); } else { return gf_m4v_parse_config_mpeg4(m4v, dsi); } } static GF_Err gf_m4v_parse_frame_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, val; s32 o_type; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = GF_FALSE; m4v->current_object_type = (u32)-1; *frame_type = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_PIC_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; *is_coded = 1; /*val = */gf_bs_read_u8(m4v->bs); val = gf_bs_read_u8(m4v->bs); *frame_type = ((val >> 3) & 0x7) - 1; break; case M2V_GOP_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M2V_SEQ_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) { go = 0; break; } /**/ break; default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } *size = m4v->current_object_start - *start; return GF_OK; } static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32)-1; *frame_type = 0; *start = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs++; /*no support for B frames in parsing*/ secs += (dsi->enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi->NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi->enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi->clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOL_START_CODE: if (m4v->step_mode) gf_m4v_parse_vol(m4v, dsi); case M4V_VOS_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } assert(m4v->current_object_start >= *start); *size = m4v->current_object_start - *start; return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_frame(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { if (m4v->mpeg12) { return gf_m4v_parse_frame_mpeg12(m4v, dsi, frame_type, time_inc, size, start, is_coded); } else { return gf_m4v_parse_frame_mpeg4(m4v, dsi, frame_type, time_inc, size, start, is_coded); } } GF_Err gf_m4v_rewrite_par(u8 **o_data, u32 *o_dataLen, s32 par_n, s32 par_d) { u64 start, end, size; GF_BitStream *mod; GF_M4VParser *m4v; Bool go = 1; m4v = gf_m4v_parser_new(*o_data, *o_dataLen, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); start = 0; while (go) { u32 type = M4V_LoadObject(m4v); end = gf_bs_get_position(m4v->bs) - 4; size = end - start; /*store previous object*/ if (size) { assert (size < (u64)1<<31); gf_bs_write_data(mod, *o_data + start, (u32)size); start = end; } switch (type) { case M4V_VOL_START_CODE: gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 1, 8); gf_bs_write_int(mod, M4V_VOL_START_CODE, 8); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 1), 1); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 8), 8); start = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, (u32)start, 1); if (start) { gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 7), 7); } start = gf_bs_read_int(m4v->bs, 4); if (start == 0xF) { gf_bs_read_int(m4v->bs, 8); gf_bs_read_int(m4v->bs, 8); } if ((par_n >= 0) && (par_d >= 0)) { u8 par = m4v_get_sar_idx(par_n, par_d); gf_bs_write_int(mod, par, 4); if (par == 0xF) { gf_bs_write_int(mod, par_n, 8); gf_bs_write_int(mod, par_d, 8); } } else { gf_bs_write_int(mod, 0x0, 4); } case -1: go = 0; break; default: break; } } while (gf_bs_bits_available(m4v->bs)) { u32 b = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, b, 1); } gf_m4v_parser_del(m4v); gf_free(*o_data); gf_bs_get_content(mod, o_data, o_dataLen); gf_bs_del(mod); return GF_OK; } GF_EXPORT u64 gf_m4v_get_object_start(GF_M4VParser *m4v) { return m4v->current_object_start; } #if 0 //unused Bool gf_m4v_is_valid_object_type(GF_M4VParser *m4v) { return ((s32)m4v->current_object_type == -1) ? 0 : 1; } #endif GF_EXPORT GF_Err gf_m4v_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, 0); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e < 0 ? e : GF_OK; } GF_EXPORT GF_Err gf_mpegv12_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, GF_TRUE); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e; } #endif /* AAC parser */ struct __m4a_oti { u32 type; const char *name; } M4AObjectTypes[] = { {0, "MPEG-4 Audio Reserved"}, {1, "MPEG-4 Audio AAC Main"}, {2, "MPEG-4 Audio AAC LC"}, {3, "MPEG-4 Audio AAC SSR"}, {4, "MPEG-4 Audio AAC LTP"}, {5, "MPEG-4 Audio SBR"}, {6, "MPEG-4 Audio AAC Scalable"}, {7, "MPEG-4 Audio TwinVQ"}, {8, "MPEG-4 Audio CELP"}, {9, "MPEG-4 Audio HVXC"}, {10, "MPEG-4 Audio Reserved"}, {11, "MPEG-4 Audio Reserved"}, {12, "MPEG-4 Audio TTSI"}, {13, "MPEG-4 Audio Main synthetic"}, {14, "MPEG-4 Audio Wavetable synthesis"}, {15, "MPEG-4 Audio General MIDI"}, {16, "MPEG-4 Audio Algorithmic Synthesis and Audio FX"}, {17, "MPEG-4 Audio ER AAC LC"}, {18, "MPEG-4 Audio Reserved"}, {19, "MPEG-4 Audio ER AAC LTP"}, {20, "MPEG-4 Audio ER AAC scalable"}, {21, "MPEG-4 Audio ER TwinVQ"}, {22, "MPEG-4 Audio ER BSAC"}, {23, "MPEG-4 Audio ER AAC LD"}, {24, "MPEG-4 Audio ER CELP"}, {25, "MPEG-4 Audio ER HVXC"}, {26, "MPEG-4 Audio ER HILN"}, {27, "MPEG-4 Audio ER Parametric"}, {28, "MPEG-4 Audio SSC"}, {29, "MPEG-4 Audio ParametricStereo"}, {30, "MPEG-4 Audio Reserved"}, {31, "MPEG-4 Audio Reserved"}, {32, "MPEG-1 Audio Layer-1"}, {33, "MPEG-1 Audio Layer-2"}, {34, "MPEG-1 Audio Layer-3"}, {35, "MPEG-4 Audio DST"}, {36, "MPEG-4 Audio ALS"}, {37, "MPEG-4 Audio SLS"}, {42, "MPEG Audio xHE-AAC"}, }; GF_EXPORT const char *gf_m4a_object_type_name(u32 objectType) { u32 i, count = GF_ARRAY_LENGTH(M4AObjectTypes); for (i=0; i<count; i++) { if (objectType==M4AObjectTypes[i].type) return M4AObjectTypes[i].name; } return "MPEG-4 Audio Unknown"; } struct __m4a_profile { u32 value; const char *name; } M4AProfiles[] = { {0x00, "ISO Reserved (0x00)"}, {0x01, "Main Audio Profile @ Level 1"}, {0x02, "Main Audio Profile @ Level 2"}, {0x03, "Main Audio Profile @ Level 3"}, {0x04, "Main Audio Profile @ Level 4"}, {0x05, "Scalable Audio Profile @ Level 1"}, {0x06, "Scalable Audio Profile @ Level 2"}, {0x07, "Scalable Audio Profile @ Level 3"}, {0x08, "Scalable Audio Profile @ Level 4"}, {0x09, "Speech Audio Profile @ Level 1"}, {0x0A, "Speech Audio Profile @ Level 2"}, {0x0B, "Synthetic Audio Profile @ Level 1"}, {0x0C, "Synthetic Audio Profile @ Level 2"}, {0x0D, "Synthetic Audio Profile @ Level 3"}, {0x0E, "High Quality Audio Profile @ Level 1"}, {0x0F, "High Quality Audio Profile @ Level 2"}, {0x10, "High Quality Audio Profile @ Level 3"}, {0x11, "High Quality Audio Profile @ Level 4"}, {0x12, "High Quality Audio Profile @ Level 5"}, {0x13, "High Quality Audio Profile @ Level 6"}, {0x14, "High Quality Audio Profile @ Level 7"}, {0x15, "High Quality Audio Profile @ Level 8"}, {0x16, "Low Delay Audio Profile @ Level 1"}, {0x17, "Low Delay Audio Profile @ Level 2"}, {0x18, "Low Delay Audio Profile @ Level 3"}, {0x19, "Low Delay Audio Profile @ Level 4"}, {0x1A, "Low Delay Audio Profile @ Level 5"}, {0x1B, "Low Delay Audio Profile @ Level 6"}, {0x1C, "Low Delay Audio Profile @ Level 7"}, {0x1D, "Low Delay Audio Profile @ Level 8"}, {0x1E, "Natural Audio Profile @ Level 1"}, {0x1F, "Natural Audio Profile @ Level 2"}, {0x20, "Natural Audio Profile @ Level 3"}, {0x21, "Natural Audio Profile @ Level 4"}, {0x22, "Mobile Audio Internetworking Profile @ Level 1"}, {0x23, "Mobile Audio Internetworking Profile @ Level 2"}, {0x24, "Mobile Audio Internetworking Profile @ Level 3"}, {0x25, "Mobile Audio Internetworking Profile @ Level 4"}, {0x26, "Mobile Audio Internetworking Profile @ Level 5"}, {0x27, "Mobile Audio Internetworking Profile @ Level 6"}, {0x28, "AAC Profile @ Level 1"}, {0x29, "AAC Profile @ Level 2"}, {0x2A, "AAC Profile @ Level 4"}, {0x2B, "AAC Profile @ Level 5"}, {0x2C, "High Efficiency AAC Profile @ Level 2"}, {0x2D, "High Efficiency AAC Profile @ Level 3"}, {0x2E, "High Efficiency AAC Profile @ Level 4"}, {0x2F, "High Efficiency AAC Profile @ Level 5"}, {0x30, "High Efficiency AAC v2 Profile @ Level 2"}, {0x31, "High Efficiency AAC v2 Profile @ Level 3"}, {0x32, "High Efficiency AAC v2 Profile @ Level 4"}, {0x33, "High Efficiency AAC v2 Profile @ Level 5"}, {0x34, "Low Delay AAC Profile"}, {0x35, "Baseline MPEG Surround Profile @ Level 1"}, {0x36, "Baseline MPEG Surround Profile @ Level 2"}, {0x37, "Baseline MPEG Surround Profile @ Level 3"}, {0x38, "Baseline MPEG Surround Profile @ Level 4"}, {0x39, "Baseline MPEG Surround Profile @ Level 5"}, {0x3A, "Baseline MPEG Surround Profile @ Level 6"}, {0x3B, "High Definition AAC Profile @ Level 1"}, {0x3C, "ALS Simple Profile @ Level 1"}, {0x50, "AAC Profile @ Level 6"}, {0x51, "AAC Profile @ Level 7"}, {0x52, "High Efficiency AAC Profile @ Level 6"}, {0x53, "High Efficiency AAC Profile @ Level 7"}, {0x54, "High Efficiency AAC v2 Profile @ Level 6"}, {0x55, "High Efficiency AAC v2 Profile @ Level 7"}, {0x56, "Extended High Efficiency AAC Profile @ Level 6"}, {0x57, "Extended High Efficiency AAC Profile @ Level 7"}, {0xFE, "Not part of MPEG-4 audio profiles"}, {0xFF, "No audio capability required"} }; GF_EXPORT const char *gf_m4a_get_profile_name(u8 audio_pl) { u32 i, count = GF_ARRAY_LENGTH(M4AProfiles); for (i=0; i<count; i++) { if ((u32) audio_pl==M4AProfiles[i].value) return M4AProfiles[i].name; } return "ISO Reserved / User Private"; } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u32 gf_m4a_get_profile(GF_M4ADecSpecInfo *cfg) { switch (cfg->base_object_type) { case 2: /*AAC LC*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x28 : 0x29; /*LC@L1 or LC@L2*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2A : 0x2B; /*LC@L4 or LC@L5*/ return (cfg->base_sr <= 48000) ? 0x50 : 0x51; /*LC@L4 or LC@L5*/ case 5: /*HE-AAC - SBR*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x2C : 0x2D; /*HE@L2 or HE@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2E : 0x2F; /*HE@L4 or HE@L5*/ return (cfg->base_sr <= 48000) ? 0x52 : 0x53; /*HE@L6 or HE@L7*/ case 29: /*HE-AACv2 - SBR+PS*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x30 : 0x31; /*HE-AACv2@L2 or HE-AACv2@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x32 : 0x33; /*HE-AACv2@L4 or HE-AACv2@L5*/ return (cfg->base_sr <= 48000) ? 0x54 : 0x55; /*HE-AACv2@L6 or HE-AACv2@L7*/ /*default to HQ*/ default: if (cfg->nb_chan <= 2) return (cfg->base_sr < 24000) ? 0x0E : 0x0F; /*HQ@L1 or HQ@L2*/ return 0x10; /*HQ@L3*/ } } GF_EXPORT GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; cfg->program_config_element_present = 1; cfg->cpe_channels = 0; cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag"); cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type"); cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index"); cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements"); cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements"); cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements"); cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements"); cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements"); cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements"); cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present"); if (cfg->mono_mixdown_present) { cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number"); } cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present"); if (cfg->stereo_mixdown_present) { cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number"); } cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present"); if (cfg->matrix_mixdown_idx_present) { cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx"); cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable"); } for (i = 0; i < cfg->num_front_channel_elements; i++) { cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i); cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i); if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_side_channel_elements; i++) { cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i); cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i); if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_back_channel_elements; i++) { cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i); cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i); if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i); cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i); } gf_bs_align(bs); cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes"); gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements; cfg->nb_chan += cfg->cpe_channels; return GF_OK; } GF_EXPORT GF_Err gf_m4a_parse_config(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg, Bool size_known) { u32 audio_obj_type; memset(cfg, 0, sizeof(GF_M4ADecSpecInfo)); cfg->base_object_type = gf_bs_read_int_log(bs, 5, "base_object_type"); /*extended object type*/ if (cfg->base_object_type == 31) { cfg->base_object_type = 32 + gf_bs_read_int_log(bs, 6, "extended_base_object_type"); } cfg->base_sr_index = gf_bs_read_int_log(bs, 4, "base_samplerate_index"); if (cfg->base_sr_index == 0x0F) { cfg->base_sr = gf_bs_read_int_log(bs, 24, "base_samplerate"); } else { cfg->base_sr = GF_M4ASampleRates[cfg->base_sr_index]; } cfg->chan_cfg = gf_bs_read_int_log(bs, 4, "channel_configuration"); if (cfg->chan_cfg) { cfg->nb_chan = GF_M4ANumChannels[cfg->chan_cfg - 1]; } audio_obj_type = cfg->base_object_type; if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = GF_TRUE; cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "sbr_samplerate_index"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "sbr_samplerate"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "sbr_object_type"); if (cfg->sbr_object_type==31) cfg->sbr_object_type = 32 + gf_bs_read_int_log(bs, 6, "audioObjectTypeExt"); audio_obj_type = cfg->sbr_object_type; if (cfg->sbr_object_type==22) { /*ext_chan_cfg = */gf_bs_read_int_log(bs, 4, "channel_configuration"); } } /*object cfg*/ switch (audio_obj_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { Bool ext_flag; gf_bs_read_int_log(bs, 1, "frame_length_flag"); if (gf_bs_read_int_log(bs, 1, "depends_on_core_coder")) gf_bs_read_int_log(bs, 14, "delay"); ext_flag = gf_bs_read_int_log(bs, 1, "extension_flag"); if (!cfg->chan_cfg) { gf_m4a_parse_program_config_element(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_read_int_log(bs, 3, "layerN"); } if (ext_flag) { if (cfg->base_object_type == 22) { gf_bs_read_int_log(bs, 5, "numOfSubFrame"); gf_bs_read_int_log(bs, 11, "layer_length"); } if ((cfg->base_object_type == 17) || (cfg->base_object_type == 19) || (cfg->base_object_type == 20) || (cfg->base_object_type == 23) ) { gf_bs_read_int_log(bs, 1, "aacSectionDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacScalefactorDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacSpectralDataResilienceFlag"); } gf_bs_read_int_log(bs, 1, "extensionFlag3"); } } break; } /*ER cfg*/ switch (audio_obj_type) { case 17: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: { u32 epConfig = gf_bs_read_int_log(bs, 2, "epConfig"); if ((epConfig == 2) || (epConfig == 3)) { } if (epConfig == 3) { gf_bs_read_int_log(bs, 1, "directMapping"); } } break; } if (size_known && (cfg->base_object_type != 5) && (cfg->base_object_type != 29)) { while (gf_bs_available(bs) >= 2) { u32 sync = gf_bs_peek_bits(bs, 11, 0); if (sync == 0x2b7) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "extensionAudioObjectType "); cfg->has_sbr = gf_bs_read_int_log(bs, 1, "sbrPresentFlag"); if (cfg->has_sbr) { cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "extensionSamplingFrequencyIndex"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "extensionSamplingFrequency"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } } } else if (sync == 0x548) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->has_ps = gf_bs_read_int_log(bs, 1, "hasParametricStereo"); if (cfg->has_ps) cfg->nb_chan = 1; } else { break; } } } cfg->audioPL = gf_m4a_get_profile(cfg); return GF_OK; } GF_EXPORT GF_Err gf_m4a_get_config(u8 *dsi, u32 dsi_size, GF_M4ADecSpecInfo *cfg) { GF_BitStream *bs; if (!dsi || !dsi_size || (dsi_size < 2)) return GF_NON_COMPLIANT_BITSTREAM; bs = gf_bs_new(dsi, dsi_size, GF_BITSTREAM_READ); gf_m4a_parse_config(bs, cfg, GF_TRUE); gf_bs_del(bs); return GF_OK; } u32 gf_latm_get_value(GF_BitStream *bs) { u32 i, tmp, value = 0; u32 bytesForValue = gf_bs_read_int(bs, 2); for (i = 0; i <= bytesForValue; i++) { value <<= 8; tmp = gf_bs_read_int(bs, 8); value += tmp; } return value; } GF_EXPORT u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; } GF_EXPORT GF_Err gf_m4a_write_program_config_element_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; gf_bs_write_int(bs, cfg->element_instance_tag, 4); gf_bs_write_int(bs, cfg->object_type, 2); gf_bs_write_int(bs, cfg->sampling_frequency_index, 4); gf_bs_write_int(bs, cfg->num_front_channel_elements, 4); gf_bs_write_int(bs, cfg->num_side_channel_elements, 4); gf_bs_write_int(bs, cfg->num_back_channel_elements, 4); gf_bs_write_int(bs, cfg->num_lfe_channel_elements, 2); gf_bs_write_int(bs, cfg->num_assoc_data_elements, 3); gf_bs_write_int(bs, cfg->num_valid_cc_elements, 4); gf_bs_write_int(bs, cfg->mono_mixdown_present, 1); if (cfg->mono_mixdown_present) { gf_bs_write_int(bs, cfg->mono_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->stereo_mixdown_present, 1); if (cfg->stereo_mixdown_present) { gf_bs_write_int(bs, cfg->stereo_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->matrix_mixdown_idx_present, 1); if (cfg->matrix_mixdown_idx_present) { gf_bs_write_int(bs, cfg->matrix_mixdown_idx, 2); gf_bs_write_int(bs, cfg->pseudo_surround_enable, 1); } for (i = 0; i < cfg->num_front_channel_elements; i++) { gf_bs_write_int(bs, cfg->front_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->front_element_tag_select[i], 4); } for (i = 0; i < cfg->num_side_channel_elements; i++) { gf_bs_write_int(bs, cfg->side_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->side_element_tag_select[i], 4); } for (i = 0; i < cfg->num_back_channel_elements; i++) { gf_bs_write_int(bs, cfg->back_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->back_element_tag_select[i], 4); } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { gf_bs_write_int(bs, cfg->lfe_element_tag_select[i], 4); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { gf_bs_write_int(bs, cfg->assoc_data_element_tag_select[i], 4); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { gf_bs_write_int(bs, cfg->cc_element_is_ind_sw[i], 1); gf_bs_write_int(bs, cfg->valid_cc_element_tag_select[i], 4); } gf_bs_align(bs); gf_bs_write_int(bs, cfg->comment_field_bytes, 8); gf_bs_write_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { if (!cfg->base_sr_index) { if (!cfg->base_sr) return GF_BAD_PARAM; while (GF_M4ASampleRates[cfg->base_sr_index]) { if (GF_M4ASampleRates[cfg->base_sr_index] == cfg->base_sr) break; cfg->base_sr_index++; } } if (cfg->sbr_sr && !cfg->sbr_sr_index) { while (GF_M4ASampleRates[cfg->sbr_sr_index]) { if (GF_M4ASampleRates[cfg->sbr_sr_index] == cfg->sbr_sr) break; cfg->sbr_sr_index++; } } /*extended object type*/ if (cfg->base_object_type >= 32) { gf_bs_write_int(bs, 31, 5); gf_bs_write_int(bs, cfg->base_object_type - 32, 6); } else { gf_bs_write_int(bs, cfg->base_object_type, 5); } gf_bs_write_int(bs, cfg->base_sr_index, 4); if (cfg->base_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->base_sr, 24); } if (cfg->program_config_element_present) { gf_bs_write_int(bs, 0, 4); } else { cfg->chan_cfg = gf_m4a_get_channel_cfg(cfg->nb_chan); if (!cfg->chan_cfg) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AAC] Cannot write decoder config, ProgramConfigElement is missing and channel configuration is not a predefined one !\n")); return GF_BAD_PARAM; } gf_bs_write_int(bs, cfg->chan_cfg, 4); } if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = 1; gf_bs_write_int(bs, cfg->sbr_sr_index, 4); if (cfg->sbr_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->sbr_sr, 24); } gf_bs_write_int(bs, cfg->sbr_object_type, 5); } /*object cfg*/ switch (cfg->base_object_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { /*frame length flag*/ gf_bs_write_int(bs, 0, 1); /*depends on core coder*/ gf_bs_write_int(bs, 0, 1); /*ext flag*/ gf_bs_write_int(bs, 0, 1); if (cfg->program_config_element_present) { gf_m4a_write_program_config_element_bs(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_write_int(bs, 0, 3); } } break; } /*ER cfg - not supported*/ /*implicit sbr/ps signaling not written here, cf reframe_adts*/ return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config(GF_M4ADecSpecInfo *cfg, u8 **dsi, u32 *dsi_size) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_m4a_write_config_bs(bs, cfg); gf_bs_get_content(bs, dsi, dsi_size); gf_bs_del(bs); return GF_OK; } /*AV1 parsing*/ static u32 av1_read_ns(GF_BitStream *bs, u32 n, const char *fname) { u32 v, res; Bool extra_bit; int w = (u32)(log(n) / log(2)) + 1; u32 m = (1 << w) - n; assert(w < 32); v = gf_bs_read_int(bs, w - 1); if (v < m) { if (fname) { gf_bs_log(bs, w-1, fname, v); } return v; } extra_bit = gf_bs_read_int(bs, 1); res = (v << 1) - m + extra_bit; if (fname) { gf_bs_log(bs, w, fname, res); } return res; } static void av1_color_config(GF_BitStream *bs, AV1State *state) { state->config->high_bitdepth = gf_bs_read_int_log(bs, 1, "high_bitdepth"); state->bit_depth = 8; if (state->config->seq_profile == 2 && state->config->high_bitdepth) { state->config->twelve_bit = gf_bs_read_int_log(bs, 1, "twelve_bit"); state->bit_depth = state->config->twelve_bit ? 12 : 10; } else if (state->config->seq_profile <= 2) { state->bit_depth = state->config->high_bitdepth ? 10 : 8; } state->config->monochrome = GF_FALSE; if (state->config->seq_profile == 1) { state->config->monochrome = GF_FALSE; } else { state->config->monochrome = gf_bs_read_int_log(bs, 1, "monochrome"); } /*NumPlanes = mono_chrome ? 1 : 3;*/ state->color_description_present_flag = gf_bs_read_int_log(bs, 1, "color_description_present_flag"); if (state->color_description_present_flag) { state->color_primaries = gf_bs_read_int_log(bs, 8, "color_primaries"); state->transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); state->matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } else { state->color_primaries = 2/*CP_UNSPECIFIED*/; state->transfer_characteristics = 2/*TC_UNSPECIFIED*/; state->matrix_coefficients = 2/*MC_UNSPECIFIED*/; } if (state->config->monochrome) { state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; state->config->chroma_sample_position = 0/*CSP_UNKNOWN*/; state->separate_uv_delta_q = 0; return; } else if (state->color_primaries == 0/*CP_BT_709*/ && state->transfer_characteristics == 13/*TC_SRGB*/ && state->matrix_coefficients == 0/*MC_IDENTITY*/) { state->color_range = GF_TRUE; state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); if (state->config->seq_profile == 0) { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; } else if (state->config->seq_profile == 1) { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { if (state->bit_depth == 12) { state->config->chroma_subsampling_x = gf_bs_read_int_log(bs, 1, "chroma_subsampling_x"); if (state->config->chroma_subsampling_x) state->config->chroma_subsampling_y = gf_bs_read_int_log(bs, 1, "chroma_subsampling_y"); else state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_FALSE; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y) { state->config->chroma_sample_position = gf_bs_read_int_log(bs, 2, "chroma_sample_position"); } } state->separate_uv_delta_q = gf_bs_read_int_log(bs, 1, "separate_uv_delta_q"); } static u32 av1_uvlc(GF_BitStream *bs, const char *fname) { u32 res; u8 leadingZeros = 0; while (1) { Bool done = gf_bs_read_int(bs, 1); if (done) break; leadingZeros++; } if (leadingZeros >= 32) { return 0xFFFFFFFF; } res = gf_bs_read_int(bs, leadingZeros) + (1 << leadingZeros) - 1; gf_bs_log(bs, 2*leadingZeros, fname, res); return res; } static void timing_info(GF_BitStream *bs, AV1State *state) { u32 time_scale = 0; u32 num_units_in_display_tick = gf_bs_read_int_log(bs, 32, "num_units_in_display_tick"); if (num_units_in_display_tick == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] num_units_in_display_tick must be greater than 0.\n")); } time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); if (time_scale == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] time_scale must be greater than 0.\n")); } state->equal_picture_interval = gf_bs_read_int_log(bs, 1, "equal_picture_interval"); if (state->equal_picture_interval) { u32 num_ticks_per_picture_minus_1 = av1_uvlc(bs, "num_ticks_per_picture_minus_1"); state->tb_num = time_scale; state->tb_den = (num_ticks_per_picture_minus_1 + 1)*num_units_in_display_tick; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] VFR not supported.\n")); //TODO: upload num_units_in_display_tick (eq. to the POC in H264), compute delta between frames, set it as dts_inc in gf_import_aom_av1() } } static void decoder_model_info(AV1State *state, GF_BitStream *bs) { state->buffer_delay_length = 1 + gf_bs_read_int_log(bs, 5, "buffer_delay_length_minus1"); gf_bs_read_int_log(bs, 32, "num_units_in_decoding_tick"); state->buffer_removal_time_length = gf_bs_read_int_log(bs, 5, "buffer_removal_time_length"); state->frame_presentation_time_length = 1 + gf_bs_read_int_log(bs, 5, "frame_presentation_time_length_minus1"); } static void operating_parameters_info(GF_BitStream *bs, const u8 idx, const u8 buffer_delay_length_minus_1) { const u8 n = buffer_delay_length_minus_1 + 1; gf_bs_read_int_log(bs, n, "decoder_buffer_delay"); gf_bs_read_int_log(bs, n, "encoder_buffer_delay"); gf_bs_read_int_log(bs, 1, "low_delay_mode_flag"); } static void av1_parse_sequence_header_obu(GF_BitStream *bs, AV1State *state) { u8 buffer_delay_length_minus_1 = 0; state->frame_state.seen_seq_header = GF_TRUE; state->config->seq_profile = gf_bs_read_int_log(bs, 3, "seq_profile"); state->still_picture = gf_bs_read_int_log(bs, 1, "still_picture"); state->reduced_still_picture_header = gf_bs_read_int_log(bs, 1, "reduced_still_picture_header"); if (state->reduced_still_picture_header) { //timing_info_present_flag = GF_FALSE; //initial_display_delay_present_flag = GF_FALSE; state->operating_points_count = 1; state->config->seq_level_idx_0 = gf_bs_read_int_log(bs, 5, "seq_level_idx_0"); } else { u8 i = 0; Bool initial_display_delay_present_flag; Bool timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (timing_info_present_flag) { timing_info(bs, state); state->decoder_model_info_present_flag = gf_bs_read_int_log(bs, 1, "decoder_model_info_present_flag"); if (state->decoder_model_info_present_flag) { decoder_model_info(state, bs); } } else { state->decoder_model_info_present_flag = GF_FALSE; } initial_display_delay_present_flag = gf_bs_read_int_log(bs, 1, "initial_display_delay_present_flag"); state->operating_points_count = 1 + gf_bs_read_int_log(bs, 5, "operating_points_count_minus1"); for (i = 0; i < state->operating_points_count; i++) { u8 seq_level_idx_i, seq_tier = 0; state->operating_point_idc[i] = gf_bs_read_int_log_idx(bs, 12, "operating_point_idc", i); seq_level_idx_i = gf_bs_read_int_log_idx(bs, 5, "seq_level_idx", i); if (i == 0) state->config->seq_level_idx_0 = seq_level_idx_i; if (seq_level_idx_i > 7) { seq_tier = gf_bs_read_int_log_idx(bs, 1, "seq_tier", i); } if (i == 0) state->config->seq_tier_0 = seq_tier; if (state->decoder_model_info_present_flag) { state->decoder_model_present_for_this_op[i] = gf_bs_read_int_log_idx(bs, 1, "decoder_model_present_for_this_op", i); if (state->decoder_model_present_for_this_op[i]) { operating_parameters_info(bs, i, buffer_delay_length_minus_1); } } else { state->decoder_model_present_for_this_op[i] = 0; } if (initial_display_delay_present_flag) { if (gf_bs_read_int_log_idx(bs, 1, "initial_display_delay_present_for_this_op", i) ) { gf_bs_read_int_log_idx(bs, 4, "initial_display_delay_minus1", i); } } } } //operatingPoint = av1_choose_operating_point(bs); state->OperatingPointIdc = 0;//TODO: operating_point_idc[operatingPoint]; state->frame_width_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_width_bits_minus1"); state->frame_height_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_height_bits_minus1"); state->width = gf_bs_read_int_log(bs, state->frame_width_bits_minus_1 + 1, "width_minus1") + 1; state->height = gf_bs_read_int_log(bs, state->frame_height_bits_minus_1 + 1, "height_minus1") + 1; state->sequence_width = state->width; state->sequence_height = state->height; state->frame_id_numbers_present_flag = GF_FALSE; if (!state->reduced_still_picture_header) { state->frame_id_numbers_present_flag = gf_bs_read_int_log(bs, 1, "frame_id_numbers_present_flag"); } if (state->frame_id_numbers_present_flag) { state->delta_frame_id_length_minus_2 = gf_bs_read_int_log(bs, 4, "delta_frame_id_length_minus2"); state->additional_frame_id_length_minus_1 = gf_bs_read_int_log(bs, 3, "additional_frame_id_length_minus1"); } state->use_128x128_superblock = gf_bs_read_int_log(bs, 1, "use_128x128_superblock"); gf_bs_read_int_log(bs, 1, "enable_filter_intra"); gf_bs_read_int_log(bs, 1, "enable_intra_edge_filter"); if (state->reduced_still_picture_header) { /*enable_interintra_compound = 0; enable_masked_compound = 0; enable_dual_filter = 0; enable_jnt_comp = 0; enable_ref_frame_mvs = 0;*/ state->enable_warped_motion = 0; state->enable_order_hint = GF_FALSE; state->OrderHintBits = 0; state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { Bool seq_choose_screen_content_tools; gf_bs_read_int_log(bs, 1, "enable_interintra_compound"); gf_bs_read_int_log(bs, 1, "enable_masked_compound"); state->enable_warped_motion = gf_bs_read_int_log(bs, 1, "enable_warped_motion"); gf_bs_read_int_log(bs, 1, "enable_dual_filter"); state->enable_order_hint = gf_bs_read_int_log(bs, 1, "enable_order_hint"); if (state->enable_order_hint) { gf_bs_read_int_log(bs, 1, "enable_jnt_comp"); state->enable_ref_frame_mvs = gf_bs_read_int_log(bs, 1, "enable_ref_frame_mvs"); } else { /*enable_jnt_comp = 0*/; /*enable_ref_frame_mvs = 0*/; } seq_choose_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_choose_screen_content_tools"); state->seq_force_screen_content_tools = 0; if (seq_choose_screen_content_tools) { state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { state->seq_force_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_force_screen_content_tools"); } state->seq_force_integer_mv = 0; if (state->seq_force_screen_content_tools > 0) { const Bool seq_choose_integer_mv = gf_bs_read_int_log(bs, 1, "seq_choose_integer_mv"); if (seq_choose_integer_mv) { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } else { state->seq_force_integer_mv = gf_bs_read_int_log(bs, 1, "seq_force_integer_mv"); } } else { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } if (state->enable_order_hint) { u8 order_hint_bits_minus_1 = gf_bs_read_int_log(bs, 3, "order_hint_bits_minus1"); state->OrderHintBits = order_hint_bits_minus_1 + 1; } else { state->OrderHintBits = 0; } } state->enable_superres = gf_bs_read_int_log(bs, 1, "enable_superres"); state->enable_cdef = gf_bs_read_int_log(bs, 1, "enable_cdef"); state->enable_restoration = gf_bs_read_int_log(bs, 1, "enable_restoration"); av1_color_config(bs, state); state->film_grain_params_present = gf_bs_read_int_log(bs, 1, "film_grain_params_present"); } #define IVF_FILE_HEADER_SIZE 32 Bool gf_media_probe_ivf(GF_BitStream *bs) { u32 dw = 0; if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) return GF_FALSE; dw = gf_bs_peek_bits(bs, 32, 0); if (dw != GF_4CC('D', 'K', 'I', 'F')) { return GF_FALSE; } return GF_TRUE; } GF_Err gf_media_parse_ivf_file_header(GF_BitStream *bs, u32 *width, u32 *height, u32 *codec_fourcc, u32 *timebase_num, u32 *timebase_den, u32 *num_frames) { u32 dw = 0; if (!width || !height || !codec_fourcc || !timebase_den || !timebase_num || !num_frames) { assert(0); return GF_BAD_PARAM; } if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Not enough bytes available ("LLU").\n", gf_bs_available(bs))); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u32(bs); if (dw != GF_4CC('D', 'K', 'I', 'F')) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[IVF] Invalid signature\n")); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); if (dw != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF version. 0 expected, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); //length of header in bytes if (dw != IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF header length. Expected 32 bytes, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } *codec_fourcc = gf_bs_read_u32(bs); *width = gf_bs_read_u16_le(bs); *height = gf_bs_read_u16_le(bs); *timebase_num = gf_bs_read_u32_le(bs); *timebase_den = gf_bs_read_u32_le(bs); *num_frames = gf_bs_read_u32_le(bs); gf_bs_read_u32_le(bs); //skip unused return GF_OK; } GF_Err gf_media_parse_ivf_frame_header(GF_BitStream *bs, u64 *frame_size, u64 *pts) { if (!frame_size) return GF_BAD_PARAM; *frame_size = gf_bs_read_u32_le(bs); if (*frame_size > 256 * 1024 * 1024) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong frame size %u\n", *frame_size)); *frame_size = 0; return GF_NON_COMPLIANT_BITSTREAM; } *pts = gf_bs_read_u64_le(bs); return GF_OK; } GF_Err gf_media_vp9_parse_superframe(GF_BitStream *bs, u64 ivf_frame_size, u32 *num_frames_in_superframe, u32 frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME], u32 *superframe_index_size) { u32 byte, bytes_per_framesize; u64 pos = gf_bs_get_position(bs), i = 0; GF_Err e; assert(bs && num_frames_in_superframe); /*initialize like there is no superframe*/ memset(frame_sizes, 0, VP9_MAX_FRAMES_IN_SUPERFRAME * sizeof(frame_sizes[0])); *num_frames_in_superframe = 1; frame_sizes[0] = (u32)ivf_frame_size; *superframe_index_size = 0; e = gf_bs_seek(bs, pos + ivf_frame_size - 1); if (e) return e; byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ bytes_per_framesize = 1 + ((byte & 0x18) >> 3); *num_frames_in_superframe = (u32)(1 + (byte & 0x7)); /*superframe_index()*/ *superframe_index_size = 2 + bytes_per_framesize * *num_frames_in_superframe; gf_bs_seek(bs, pos + ivf_frame_size - *superframe_index_size); byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ frame_sizes[0] = 0; for (i = 0; i < *num_frames_in_superframe; ++i) { gf_bs_read_data(bs, (char*)(frame_sizes + i), bytes_per_framesize); } exit: gf_bs_seek(bs, pos); return e; } static Bool vp9_frame_sync_code(GF_BitStream *bs) { u8 val = gf_bs_read_int_log(bs, 8, "syncbyte1"); if (val != 0x49) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte2"); if (val != 0x83) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte3"); if (val != 0x42) return GF_FALSE; return GF_TRUE; } typedef enum { CS_UNKNOWN = 0, CS_BT_601 = 1, CS_BT_709 = 2, CS_SMPTE_170 = 3, CS_SMPTE_240 = 4, CS_BT_2020 = 5, CS_RESERVED = 6, CS_RGB = 7, } VP9_color_space; static const int VP9_CS_to_23001_8_colour_primaries[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 1 }; static const int VP9_CS_to_23001_8_transfer_characteristics[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 13 }; static const int VP9_CS_to_23001_8_matrix_coefficients[] = { -1/*undefined*/, 6, 1, -1, -1, 9, -1/*reserved*/, 0 }; static GF_Err vp9_color_config(GF_BitStream *bs, GF_VPConfig *vp9_cfg) { VP9_color_space color_space; if (vp9_cfg->profile >= 2) { Bool ten_or_twelve_bit = gf_bs_read_int_log(bs, 1, "ten_or_twelve_bit"); vp9_cfg->bit_depth = ten_or_twelve_bit ? 12 : 10; } else { vp9_cfg->bit_depth = 8; } color_space = gf_bs_read_int_log(bs, 3, "color_space"); vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; if (color_space != CS_RGB) { vp9_cfg->video_fullRange_flag = gf_bs_read_int_log(bs, 1, "video_fullRange_flag"); if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { u8 subsampling_x, subsampling_y, subsampling_xy_to_chroma_subsampling[2][2] = { {3, 0}, {2, 0} }; subsampling_x = gf_bs_read_int_log(bs, 1, "subsampling_x"); subsampling_y = gf_bs_read_int_log(bs, 1, "subsampling_x"); vp9_cfg->chroma_subsampling = subsampling_xy_to_chroma_subsampling[subsampling_x][subsampling_y]; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (1) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } else { vp9_cfg->chroma_subsampling = 0; } } else { vp9_cfg->video_fullRange_flag = GF_TRUE; if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { vp9_cfg->chroma_subsampling = 3; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (2) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } } return GF_OK; } static void vp9_compute_image_size(int FrameWidth, int FrameHeight, int *Sb64Cols, int *Sb64Rows) { int MiCols = (FrameWidth + 7) >> 3; int MiRows = (FrameHeight + 7) >> 3; *Sb64Cols = (MiCols + 7) >> 3; *Sb64Rows = (MiRows + 7) >> 3; } static void vp9_frame_size(GF_BitStream *bs, int *FrameWidth, int *FrameHeight, int *Sb64Cols, int *Sb64Rows) { int frame_width_minus_1 = gf_bs_read_int_log(bs, 16, "frame_width_minus_1"); int frame_height_minus_1 = gf_bs_read_int_log(bs, 16, "frame_height_minus_1"); if (frame_width_minus_1 + 1 != *FrameWidth || frame_height_minus_1 + 1 != *FrameHeight) { if (*FrameWidth || *FrameHeight) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[VP9] inconsistent frame dimensions: previous was %dx%d, new one is %dx%d.\n", *FrameWidth, *FrameHeight, frame_width_minus_1 + 1, frame_height_minus_1 + 1)); } *FrameWidth = frame_width_minus_1 + 1; *FrameHeight = frame_height_minus_1 + 1; vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } static void vp9_render_size(GF_BitStream *bs, int FrameWidth, int FrameHeight, int *renderWidth, int *renderHeight) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different"); if (render_and_frame_size_different == 1) { int render_width_minus_1 = gf_bs_read_int_log(bs, 16, "render_width_minus_1"); int render_height_minus_1 = gf_bs_read_int_log(bs, 16, "render_height_minus_1"); *renderWidth = render_width_minus_1 + 1; *renderHeight = render_height_minus_1 + 1; } else { *renderWidth = FrameWidth; *renderHeight = FrameHeight; } } static s64 vp9_s(GF_BitStream *bs, int n, const char *fname, u32 idx) { s64 value = gf_bs_read_int(bs, n); Bool sign = gf_bs_read_int(bs, 1); if (sign) value = -value; gf_bs_log_idx(bs, n+1, fname, value, idx, -1, -1); return value; } static void vp9_loop_filter_params(GF_BitStream *bs) { /*loop_filter_level = */gf_bs_read_int_log(bs, 6, "loop_filter_level"); /*loop_filter_sharpness = */gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); Bool loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { Bool loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update == GF_TRUE) { int i; for (i = 0; i < 4; i++) { Bool update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_ref_deltas", i); } for (i = 0; i < 2; i++) { Bool update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_mode_deltas", i); } } } } static void vp9_quantization_params(GF_BitStream *bs) { /*base_q_idx = */gf_bs_read_int_log(bs, 8, "base_q_idx"); } #define VP9_MAX_SEGMENTS 8 #define VP9_SEG_LVL_MAX 4 static const int segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 }; static const int segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 }; #define VP9_MIN_TILE_WIDTH_B64 4 #define VP9_MAX_TILE_WIDTH_B64 64 static void vp9_segmentation_params(GF_BitStream *bs) { Bool segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled == 1) { int i; Bool segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map) { for (i = 0; i < 7; i++) /*segmentation_tree_probs[i] = read_prob()*/ /*segmentation_temporal_update = */gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); /*for (i = 0; i < 3; i++) segmentation_pred_prob[i] = segmentation_temporal_update ? read_prob() : 255*/ } Bool segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); if (segmentation_update_data == 1) { /*segmentation_abs_or_delta_update =*/ gf_bs_read_int_log(bs, 1, "segmentation_abs_or_delta_update"); for (i = 0; i < VP9_MAX_SEGMENTS; i++) { int j; for (j = 0; j < VP9_SEG_LVL_MAX; j++) { /*feature_value = 0*/ Bool feature_enabled = gf_bs_read_int_log(bs, 1, "feature_enabled"); /*FeatureEnabled[i][j] = feature_enabled*/ if (feature_enabled) { int bits_to_read = segmentation_feature_bits[j]; /*feature_value =*/ gf_bs_read_int_log(bs, bits_to_read, "feature_value"); if (segmentation_feature_signed[j] == 1) { /*Bool feature_sign = */gf_bs_read_int_log(bs, 1, "feature_sign"); /*if (feature_sign == 1) feature_value *= -1*/ } } /*FeatureData[i][j] = feature_value*/ } } } } } static int calc_min_log2_tile_cols(int Sb64Cols) { int minLog2 = 0; while ((VP9_MAX_TILE_WIDTH_B64 << minLog2) < Sb64Cols) minLog2++; return minLog2; } static int calc_max_log2_tile_cols(int Sb64Cols) { int maxLog2 = 1; while ((Sb64Cols >> maxLog2) >= VP9_MIN_TILE_WIDTH_B64) maxLog2++; return maxLog2 - 1; } static void vp9_tile_info(GF_BitStream *bs, int Sb64Cols) { Bool tile_rows_log2; int minLog2TileCols = calc_min_log2_tile_cols(Sb64Cols); int maxLog2TileCols = calc_max_log2_tile_cols(Sb64Cols); int tile_cols_log2 = minLog2TileCols; while (tile_cols_log2 < maxLog2TileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2) tile_cols_log2++; else break; } tile_rows_log2 = gf_bs_read_int_log(bs, 1, "tile_rows_log2"); if (tile_rows_log2) { /*Bool increment_tile_rows_log2 = */gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); //tile_rows_log2 += increment_tile_rows_log2; } } static void vp9_frame_size_with_refs(GF_BitStream *bs, u8 refresh_frame_flags, u8 * ref_frame_idx, int * RefFrameWidth, int *RefFrameHeight, int *FrameWidth, int *FrameHeight, int *RenderWidth, int *RenderHeight, int *Sb64Cols, int *Sb64Rows) { Bool found_ref; int i; for (i = 0; i < 3; i++) { found_ref = gf_bs_read_int_log(bs, 1, "found_ref"); if (found_ref) { *FrameWidth = RefFrameWidth [ref_frame_idx[i]]; *FrameHeight = RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { vp9_frame_size(bs, FrameWidth, FrameHeight, Sb64Cols, Sb64Rows); } else { vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } vp9_render_size(bs, *FrameWidth, *FrameHeight, RenderWidth, RenderHeight); } static void vp9_read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*raw_interpolation_filter = */gf_bs_read_int_log(bs, 2, "raw_interpolation_filter"); } } #define VP9_KEY_FRAME 0 GF_Err gf_media_vp9_parse_sample(GF_BitStream *bs, GF_VPConfig *vp9_cfg, Bool *key_frame, u32 *FrameWidth, u32 *FrameHeight, u32 *renderWidth, u32 *renderHeight) { Bool FrameIsIntra = GF_FALSE, profile_low_bit, profile_high_bit, show_existing_frame = GF_FALSE, frame_type = GF_FALSE, show_frame = GF_FALSE, error_resilient_mode = GF_FALSE; /*u8 frame_context_idx = 0, reset_frame_context = 0, frame_marker = 0*/; int Sb64Cols = 0, Sb64Rows = 0, i; u8 refresh_frame_flags = 0; assert(bs && key_frame); /*uncompressed header*/ /*frame_marker = */gf_bs_read_int_log(bs, 2, "frame_marker"); profile_low_bit = gf_bs_read_int_log(bs, 1, "profile_low_bit"); profile_high_bit = gf_bs_read_int_log(bs, 1, "profile_high_bit"); vp9_cfg->profile = (profile_high_bit << 1) + profile_low_bit; if (vp9_cfg->profile == 3) { Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] uncompressed header reserved zero is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (show_existing_frame == GF_TRUE) { /*frame_to_show_map_idx = */gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); return GF_OK; } frame_type = gf_bs_read_int_log(bs, 1, "frame_type"); show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); if (frame_type == VP9_KEY_FRAME) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); refresh_frame_flags = 0xFF; *key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; } else { Bool intra_only = GF_FALSE; *key_frame = GF_FALSE; if (show_frame == GF_FALSE) { intra_only = gf_bs_read_int_log(bs, 1, "intra_only"); } FrameIsIntra = intra_only; if (error_resilient_mode == GF_FALSE) { /*reset_frame_context = */gf_bs_read_int_log(bs, 2, "reset_frame_context"); } if (intra_only == GF_TRUE) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_cfg->profile > 0) { if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; } else { u8 color_space = CS_BT_601; vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; vp9_cfg->chroma_subsampling = 0; vp9_cfg->bit_depth = 8; } refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); } else { refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); u8 ref_frame_idx[3]; for (i = 0; i < 3; i++) { ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); /*ref_frame_sign_bias[LAST_FRAME + i] = */gf_bs_read_int_log_idx(bs, 1, "ref_frame_sign_bias", i); } vp9_frame_size_with_refs(bs, refresh_frame_flags, ref_frame_idx, vp9_cfg->RefFrameWidth, vp9_cfg->RefFrameHeight, FrameWidth, FrameHeight, renderWidth, renderHeight, &Sb64Cols, &Sb64Rows); /*allow_high_precision_mv = */gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); vp9_read_interpolation_filter(bs); } } if (error_resilient_mode == 0) { /*refresh_frame_context = */gf_bs_read_int_log(bs, 1, "refresh_frame_context"); /*frame_parallel_decoding_mode = */gf_bs_read_int_log(bs, 1, "frame_parallel_decoding_mode"); } /*frame_context_idx = */gf_bs_read_int_log(bs, 2, "frame_context_idx"); if (FrameIsIntra || error_resilient_mode) { /*setup_past_independence + save_probs ...*/ //frame_context_idx = 0; } vp9_loop_filter_params(bs); vp9_quantization_params(bs); vp9_segmentation_params(bs); vp9_tile_info(bs, Sb64Cols); /*header_size_in_bytes = */gf_bs_read_int_log(bs, 16, "header_size_in_bytes"); /*Reference frame update process (8.10 - partial)*/ for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { if ((refresh_frame_flags >> i) & 1) { vp9_cfg->RefFrameWidth[i] = *FrameWidth; vp9_cfg->RefFrameHeight[i] = *FrameHeight; } } return GF_OK; } GF_Err gf_av1_parse_obu_header(GF_BitStream *bs, ObuType *obu_type, Bool *obu_extension_flag, Bool *obu_has_size_field, u8 *temporal_id, u8 *spatial_id) { Bool forbidden = gf_bs_read_int(bs, 1); if (forbidden) { return GF_NON_COMPLIANT_BITSTREAM; } *obu_type = gf_bs_read_int(bs, 4); *obu_extension_flag = gf_bs_read_int(bs, 1); *obu_has_size_field = gf_bs_read_int(bs, 1); if (gf_bs_read_int(bs, 1) /*obu_reserved_1bit*/) { return GF_NON_COMPLIANT_BITSTREAM; } if (*obu_extension_flag) { *temporal_id = gf_bs_read_int(bs, 3); *spatial_id = gf_bs_read_int(bs, 2); /*extension_header_reserved_3bits = */gf_bs_read_int(bs, 3); } return GF_OK; } #endif // GPAC_DISABLE_AV_PARSERS GF_EXPORT const char *gf_av1_get_obu_name(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: return "seq_header"; case OBU_TEMPORAL_DELIMITER: return "delimiter"; case OBU_FRAME_HEADER: return "frame_header"; case OBU_TILE_GROUP: return "tile_group"; case OBU_METADATA: return "metadata"; case OBU_FRAME: return "frame"; case OBU_REDUNDANT_FRAME_HEADER: return "redundant_frame_header"; case OBU_TILE_LIST: return "tile_list"; case OBU_PADDING: return "padding"; case OBU_RESERVED_0: case OBU_RESERVED_9: case OBU_RESERVED_10: case OBU_RESERVED_11: case OBU_RESERVED_12: case OBU_RESERVED_13: case OBU_RESERVED_14: return "reserved"; default: return "unknown"; } } Bool av1_is_obu_header(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: case OBU_METADATA: // TODO add check based on the metadata type return GF_TRUE; default: return GF_FALSE; } } #ifndef GPAC_DISABLE_AV_PARSERS static Bool av1_is_obu_frame(AV1State *state, ObuType obu_type) { switch (obu_type) { case OBU_PADDING: case OBU_REDUNDANT_FRAME_HEADER: return GF_FALSE; case OBU_TEMPORAL_DELIMITER: return state->keep_temporal_delim ? GF_TRUE : GF_FALSE; default: return GF_TRUE; } } u64 gf_av1_leb128_read(GF_BitStream *bs, u8 *opt_Leb128Bytes) { u64 value = 0; u8 Leb128Bytes = 0, i = 0; for (i = 0; i < 8; i++) { u8 leb128_byte = gf_bs_read_u8(bs); value |= ( ((u64) (leb128_byte & 0x7f)) << (i * 7)); Leb128Bytes += 1; if (!(leb128_byte & 0x80)) { break; } } if (opt_Leb128Bytes) { *opt_Leb128Bytes = Leb128Bytes; } return value; } u32 gf_av1_leb128_size(u64 value) { u32 gf_av1_leb128_size = 0; do { ++gf_av1_leb128_size; } while ((value >>= 7) != 0); return gf_av1_leb128_size; } u64 gf_av1_leb128_write(GF_BitStream *bs, u64 value) { u32 i, leb_size = gf_av1_leb128_size(value); for (i = 0; i < leb_size; ++i) { u8 byte = value & 0x7f; value >>= 7; if (value != 0) byte |= 0x80; //more bytes follow gf_bs_write_u8(bs, byte); } return leb_size; } #define OBU_BLOCK_SIZE 4096 static void av1_add_obu_internal(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, GF_List **obu_list, AV1State *state) { char block[OBU_BLOCK_SIZE]; Bool has_size_field = 0, obu_extension_flag = 0; u8 temporal_id, spatial_id; GF_AV1_OBUArrayEntry *a = NULL; if (state && state->mem_mode) { if (!state->bs) state->bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(state->bs, state->frame_obus, state->frame_obus_alloc); } else { GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] Failed to allocate OBU\n")); return; } } gf_bs_seek(bs, pos); gf_av1_parse_obu_header(bs, &obu_type, &obu_extension_flag, &has_size_field, &temporal_id, &spatial_id); gf_bs_seek(bs, pos); if (has_size_field) { if (a) { a->obu = gf_malloc((size_t)obu_length); gf_bs_read_data(bs, a->obu, (u32)obu_length); a->obu_length = obu_length; } else { u32 remain = (u32)obu_length; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } return; } } else { u8 i, hdr_size = obu_extension_flag ? 2 : 1; const u32 leb_size = (u32)gf_av1_leb128_size(obu_length); const u64 obu_size = obu_length - hdr_size; if (a) { a->obu = gf_malloc((size_t)obu_length + leb_size); a->obu_length = obu_length + leb_size; for (i = 0; i < hdr_size; ++i) { a->obu[i] = gf_bs_read_u8(bs); /*add size field flag*/ if (i == 0) a->obu[0] |= 0x02; } { u32 out_size = 0; u8 *output = NULL; GF_BitStream *bsLeb128 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*write size field*/ gf_av1_leb128_write(bsLeb128, obu_size); assert(gf_bs_get_position(bsLeb128) == leb_size); gf_bs_get_content(bsLeb128, &output, &out_size); gf_bs_del(bsLeb128); memcpy(a->obu + hdr_size, output, out_size); gf_free(output); } gf_bs_read_data(bs, a->obu + hdr_size + leb_size, (u32)(obu_size)); assert(gf_bs_get_position(bs) == pos + obu_length); } else { u32 remain; for (i = 0; i < hdr_size; ++i) { u8 hdr_b = gf_bs_read_u8(bs); if (i == 0) hdr_b |= 0x02; /*add size field flag*/ gf_bs_write_u8(state->bs, hdr_b); } /*add size field */ gf_av1_leb128_write(state->bs, obu_size); remain = (u32)obu_length - hdr_size; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } assert(gf_bs_get_position(bs) == pos + obu_length); return; } } if (!obu_list) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] internal error, no OBU list cannot add\n")); gf_free(a->obu); gf_free(a); return; } a->obu_type = obu_type; if (! *obu_list) *obu_list = gf_list_new(); gf_list_add(*obu_list, a); } static void av1_populate_state_from_obu(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, AV1State *state) { if (av1_is_obu_header(obu_type)) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.header_obus, NULL); } if (!state->skip_frames && av1_is_obu_frame(state, obu_type)) { if (!state->mem_mode) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.frame_obus, NULL); } else { av1_add_obu_internal(bs, pos, obu_length, obu_type, NULL, state); } } } GF_Err aom_av1_parse_temporal_unit_from_section5(GF_BitStream *bs, AV1State *state) { if (!state) return GF_BAD_PARAM; state->obu_type = -1; while (state->obu_type != OBU_TEMPORAL_DELIMITER) { GF_Err e; if (!gf_bs_available(bs)) return state->unframed ? GF_BUFFER_TOO_SMALL : GF_OK; u64 pos = gf_bs_get_position(bs), obu_length = 0; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] OBU (Section 5) frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Section5 OBU detected (size "LLU")\n", obu_length)); av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); } return GF_OK; } Bool gf_media_aom_probe_annexb(GF_BitStream *bs) { Bool res = GF_TRUE; u64 init_pos = gf_bs_get_position(bs); u64 sz = gf_av1_leb128_read(bs, NULL); if (!sz) res = GF_FALSE; while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (!frame_unit_size) { res = GF_FALSE; break; } if (sz < Leb128Bytes + frame_unit_size) { res = GF_FALSE; break; } sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { ObuType obu_type; u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (frame_unit_size < Leb128Bytes + obu_length) { res = GF_FALSE; break; } pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; u8 tid, sid; Bool extflag, has_size; GF_Err e = gf_av1_parse_obu_header(bs, &obu_type, &extflag, &has_size, &tid, &sid); if (e) { res = GF_FALSE; break; } if (has_size) { obu_length = (u32)gf_av1_leb128_read(bs, NULL); } else { if (obu_length >= 1 + extflag) { obu_length = obu_length - 1 - extflag; } else { res = GF_FALSE; break; } } u32 hdr_size = (u32)(gf_bs_get_position(bs) - pos); obu_length += hdr_size; if (frame_unit_size < obu_length) { res = GF_FALSE; break; } frame_unit_size -= obu_length; gf_bs_skip_bytes(bs, obu_length - hdr_size); } if (!res) break; } gf_bs_seek(bs, init_pos); return res; } GF_Err aom_av1_parse_temporal_unit_from_annexb(GF_BitStream *bs, AV1State *state) { GF_Err e; u64 tupos; u64 tusize, sz; if (!bs || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; tusize = sz = gf_av1_leb128_read(bs, NULL); tupos = gf_bs_get_position(bs); if (!sz) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] temporal unit size is 0, likely not annex B\n")); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B temporal unit detected (size "LLU") ***** \n", sz)); while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (sz < Leb128Bytes + frame_unit_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B sz("LLU") < Leb128Bytes("LLU") + frame_unit_size("LLU")\n", sz, Leb128Bytes, frame_unit_size)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B frame unit detected (size "LLU")\n", frame_unit_size)); sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (frame_unit_size < Leb128Bytes + obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < Leb128Bytes("LLU") + obu_length("LLU")\n", frame_unit_size, Leb128Bytes, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B OBU detected (size "LLU")\n", obu_length)); pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); if (frame_unit_size < obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < OBU size ("LLU")\n", frame_unit_size, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } frame_unit_size -= obu_length; } } assert(sz == 0); if (tusize != gf_bs_get_position(bs) - tupos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B TU size "LLU" different from consumed bytes "LLU".\n", tusize, gf_bs_get_position(bs) - tupos)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } GF_Err aom_av1_parse_temporal_unit_from_ivf(GF_BitStream *bs, AV1State *state) { u64 frame_size, pts_ignored; GF_Err e; if (gf_bs_available(bs)<12) return GF_EOS; e = gf_media_parse_ivf_frame_header(bs, &frame_size, &pts_ignored); if (e) return e; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] IVF frame detected (size "LLU")\n", frame_size)); if (gf_bs_available(bs) < frame_size) return GF_EOS; while (frame_size > 0) { u64 obu_size = 0, pos = gf_bs_get_position(bs); e = gf_av1_parse_obu(bs, &state->obu_type, &obu_size, NULL, state); if (e != GF_OK) return e; if (obu_size != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] IVF frame size "LLU" different from consumed bytes "LLU".\n", obu_size, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_size, state->obu_type, state); frame_size -= obu_size; } return GF_OK; } #define AV1_NUM_REF_FRAMES 8 #define AV1_ALL_FRAMES ((1 << AV1_NUM_REF_FRAMES) - 1) #define AV1_SUPERRES_DENOM_MIN 9 #define AV1_SUPERRES_DENOM_BITS 3 #define AV1_SUPERRES_NUM 8 #define AV1_REFS_PER_FRAME 7 #define AV1_PRIMARY_REF_NONE 7 #define MAX_TILE_WIDTH 4096 #define MAX_TILE_AREA (4096 * 2304) static u32 aom_av1_tile_log2(u32 blkSize, u32 target) { u32 k; for (k = 0; (blkSize << k) < target; k++) { } return k; } static u64 aom_av1_le(GF_BitStream *bs, u32 n, const char *name) { u32 i = 0; u64 t = 0; for (i = 0; i < n; i++) { u8 byte = gf_bs_read_int(bs, 8); t += (byte << (i * 8)); } gf_bs_log(bs, n*8, name, t); return t; } static void av1_parse_tile_info(GF_BitStream *bs, AV1State *state) { u32 i; u32 MiCols = 2 * ((state->width + 7) >> 3); u32 MiRows = 2 * ((state->height + 7) >> 3); u32 sbCols = state->use_128x128_superblock ? ((MiCols + 31) >> 5) : ((MiCols + 15) >> 4); u32 sbRows = state->use_128x128_superblock ? ((MiRows + 31) >> 5) : ((MiRows + 15) >> 4); u32 sbShift = state->use_128x128_superblock ? 5 : 4; u32 sbSize = sbShift + 2; u32 maxTileWidthSb = MAX_TILE_WIDTH >> sbSize; u32 maxTileAreaSb = MAX_TILE_AREA >> (2 * sbSize); u32 minLog2tileCols = aom_av1_tile_log2(maxTileWidthSb, sbCols); u32 maxLog2tileCols = aom_av1_tile_log2(1, MIN(sbCols, AV1_MAX_TILE_COLS)); u32 maxLog2tileRows = aom_av1_tile_log2(1, MIN(sbRows, AV1_MAX_TILE_ROWS)); u32 minLog2Tiles = MAX(minLog2tileCols, aom_av1_tile_log2(maxTileAreaSb, sbRows * sbCols)); Bool uniform_tile_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_tile_spacing_flag"); if (uniform_tile_spacing_flag) { u32 startSb, tileWidthSb, tileHeightSb, minLog2tileRows; state->tileColsLog2 = minLog2tileCols; while (state->tileColsLog2 < maxLog2tileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2 == 1) state->tileColsLog2++; else break; } tileWidthSb = (sbCols + (1 << state->tileColsLog2) - 1) >> state->tileColsLog2; i = 0; for (startSb = 0; startSb < sbCols; startSb += tileWidthSb) { i += 1; } state->tileCols = i; minLog2tileRows = MAX((int)(minLog2Tiles - state->tileColsLog2), 0); state->tileRowsLog2 = minLog2tileRows; while (state->tileRowsLog2 < maxLog2tileRows) { Bool increment_tile_rows_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); if (increment_tile_rows_log2 == 1) state->tileRowsLog2++; else break; } tileHeightSb = (sbRows + (1 << state->tileRowsLog2) - 1) >> state->tileRowsLog2; i = 0; for (startSb = 0; startSb < sbRows; startSb += tileHeightSb) { i += 1; } state->tileRows = i; } else { u32 startSb, maxTileHeightSb, widestTileSb; widestTileSb = 0; startSb = 0; for (i = 0; startSb < sbCols; i++) { u32 maxWidth = MIN((int)(sbCols - startSb), maxTileWidthSb); u32 width_in_sbs_minus_1 = av1_read_ns(bs, maxWidth, "width_in_sbs_minus_1"); u32 sizeSb = width_in_sbs_minus_1 + 1; widestTileSb = MAX(sizeSb, widestTileSb); startSb += sizeSb; } if (!widestTileSb) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] widest tile is 0, broken bitstream\n")); return; } state->tileCols = i; state->tileColsLog2 = aom_av1_tile_log2(1, state->tileCols); if (minLog2Tiles > 0) maxTileAreaSb = (sbRows * sbCols) >> (minLog2Tiles + 1); else maxTileAreaSb = sbRows * sbCols; maxTileHeightSb = MAX(maxTileAreaSb / widestTileSb, 1); startSb = 0; for (i = 0; startSb < sbRows; i++) { u32 maxHeight = MIN((int)(sbRows - startSb), maxTileHeightSb); u32 height_in_sbs_minus_1 = av1_read_ns(bs, maxHeight, "height_in_sbs_minus_1"); u32 sizeSb = height_in_sbs_minus_1 + 1; startSb += sizeSb; } state->tileRows = i; state->tileRowsLog2 = aom_av1_tile_log2(1, state->tileRows); } if (state->tileColsLog2 > 0 || state->tileRowsLog2 > 0) { gf_bs_read_int_log(bs, state->tileRowsLog2 + state->tileColsLog2, "context_update_tile_id"); state->tile_size_bytes = gf_bs_read_int_log(bs, 2, "tile_size_bytes_minus1") + 1; } } static void superres_params(GF_BitStream *bs, AV1State *state) { u32 SuperresDenom; Bool use_superres; if (state->enable_superres) { use_superres = gf_bs_read_int_log(bs, 1, "use_superres"); } else { use_superres = GF_FALSE; } if (use_superres) { u8 coded_denom = gf_bs_read_int_log(bs, AV1_SUPERRES_DENOM_BITS, "coded_denom"); SuperresDenom = coded_denom + AV1_SUPERRES_DENOM_MIN; } else { SuperresDenom = AV1_SUPERRES_NUM; } state->UpscaledWidth = state->width; state->width = (state->UpscaledWidth * AV1_SUPERRES_NUM + (SuperresDenom / 2)) / SuperresDenom; } static void av1_frame_size(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { if (frame_size_override_flag) { u32 frame_width_minus_1, frame_height_minus_1; u8 n = state->frame_width_bits_minus_1 + 1; frame_width_minus_1 = gf_bs_read_int_log(bs, n, "frame_width_minus_1"); n = state->frame_height_bits_minus_1 + 1; frame_height_minus_1 = gf_bs_read_int_log(bs, n, "frame_height_minus_1"); state->width = frame_width_minus_1 + 1; state->height = frame_height_minus_1 + 1; } else { state->width = state->sequence_width; state->height = state->sequence_height; } superres_params(bs, state); //compute_image_size(); //no bits } static void av1_render_size(GF_BitStream *bs) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different_flag"); if (render_and_frame_size_different == GF_TRUE) { gf_bs_read_int_log(bs, 16, "render_width_minus_1"); gf_bs_read_int_log(bs, 16, "render_height_minus_1"); //RenderWidth = render_width_minus_1 + 1; //RenderHeight = render_height_minus_1 + 1; } else { //RenderWidth = UpscaledWidth; //RenderHeight = FrameHeight; } } static void read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*interpolation_filter =*/ gf_bs_read_int_log(bs, 2, "interpolation_filter"); } } static void frame_size_with_refs(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { Bool found_ref = GF_FALSE; u32 i = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { found_ref = gf_bs_read_int_log_idx(bs, 1, "found_ref", i); if (found_ref == 1) { #if 0 UpscaledWidth = RefUpscaledWidth[ref_frame_idx[i]]; FrameWidth = UpscaledWidth; FrameHeight = RefFrameHeight[ref_frame_idx[i]]; RenderWidth = RefRenderWidth[ref_frame_idx[i]]; RenderHeight = RefRenderHeight[ref_frame_idx[i]]; #endif break; } } if (found_ref == 0) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } else { superres_params(bs, state); //compute_image_size(); } } static s32 av1_delta_q(GF_BitStream *bs, const char *name_flag, const char *name) { Bool delta_coded = gf_bs_read_int_log(bs, 1, name_flag); s32 delta_q = 0; if (delta_coded) { u32 signMask = 1 << (7 - 1); delta_q = gf_bs_read_int_log(bs, 7, name); if (delta_q & signMask) delta_q = delta_q - 2 * signMask; } return delta_q; } static u8 Segmentation_Feature_Bits[] = { 8,6,6,6,6,3,0,0 }; static u8 Segmentation_Feature_Signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static u8 av1_get_qindex(Bool ignoreDeltaQ, u32 segmentId, u32 base_q_idx, u32 delta_q_present, u32 CurrentQIndex, Bool segmentation_enabled, u8 *features_SEG_LVL_ALT_Q_enabled, s32 *features_SEG_LVL_ALT_Q) { //If seg_feature_active_idx( segmentId, SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply: if (segmentation_enabled && features_SEG_LVL_ALT_Q_enabled[segmentId]) { //Set the variable data equal to FeatureData[ segmentId ][ SEG_LVL_ALT_Q ]. s32 data = features_SEG_LVL_ALT_Q[segmentId]; s32 qindex = base_q_idx + data; //If ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, set qindex equal to CurrentQIndex + data. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) qindex = CurrentQIndex + data; //Return Clip3( 0, 255, qindex ). if (qindex < 0) return 0; else if (qindex > 255) return 255; else return (u8)qindex; } //Otherwise, if ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, return CurrentQIndex. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) return CurrentQIndex; //otherwise return base_q_idx; } enum { AV1_RESTORE_NONE = 0, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ }; #define AV1_GMC_IDENTITY 0 #define AV1_GMC_TRANSLATION 1 #define AV1_GMC_ROTZOOM 2 #define AV1_GMC_AFFINE 3 #define AV1_LAST_FRAME 1 #define AV1_LAST2_FRAME 2 #define AV1_LAST3_FRAME 3 #define AV1_GOLDEN_FRAME 4 #define AV1_BWDREF_FRAME 5 #define AV1_ALTREF2_FRAME 6 #define AV1_ALTREF_FRAME 7 #define GM_ABS_ALPHA_BITS 12 #define GM_ALPHA_PREC_BITS 15 #define GM_ABS_TRANS_ONLY_BITS 9 #define GM_TRANS_ONLY_PREC_BITS 3 #define GM_ABS_TRANS_BITS 12 #define GM_TRANS_PREC_BITS 6 #define WARPEDMODEL_PREC_BITS 16 static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms) { s32 i = 0; s32 mk = 0; s32 k = 3; while (1) { s32 b2 = i ? k + i - 1 : k; s32 a = 1 << b2; if (numSyms <= mk + 3 * a) { s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL); return subexp_final_bits + mk; } else { s32 subexp_more_bits = gf_bs_read_int(bs, 1); if (subexp_more_bits) { i++; mk += a; } else { s32 subexp_bits = gf_bs_read_int(bs, b2); return subexp_bits + mk; } } } } static GFINLINE s32 inverse_recenter(s32 r, u32 v) { if ((s64)v > (s64)(2 * r)) return v; else if (v & 1) return r - ((v + 1) >> 1); else return r + (v >> 1); } static s32 av1_decode_unsigned_subexp_with_ref(GF_BitStream *bs, s32 mx, s32 r) { u32 v = av1_decode_subexp(bs, mx); if ((r < 0) && (-(-r << 1) <= mx)) { return inverse_recenter(r, v); } else if ((r << 1) <= mx) { return inverse_recenter(r, v); } else { return mx - 1 - inverse_recenter(mx - 1 - r, v); } } static s16 av1_decode_signed_subexp_with_ref(GF_BitStream *bs, s32 low, s32 high, s32 r) { s16 x = av1_decode_unsigned_subexp_with_ref(bs, high - low, r - low); return x + low; } static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx) { u8 absBits = GM_ABS_ALPHA_BITS; u8 precBits = GM_ALPHA_PREC_BITS; if (idx < 2) { if (type == AV1_GMC_TRANSLATION) { absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); } else { absBits = GM_ABS_TRANS_BITS; precBits = GM_TRANS_PREC_BITS; } } s32 precDiff = WARPEDMODEL_PREC_BITS - precBits; s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0; s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0; s32 mx = (1 << absBits); s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub; s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r); if (val < 0) { val = -val; state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round); } else { state->GmParams.coefs[ref][idx] = (val << precDiff) + round; } } static s32 av1_get_relative_dist(s32 a, s32 b, AV1State *state) { if (!state->enable_order_hint) return 0; s32 diff = a - b; s32 m = 1 << (state->OrderHintBits - 1); diff = (diff & (m - 1)) - (diff & m); return diff; } static void av1_setup_past_independence(AV1State *state) { u32 ref, i; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { for (i = 0; i <= 5; i++) { state->PrevGmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } } static void av1_load_previous(AV1State *state, u8 primary_ref_frame, s8 *ref_frame_idx) { s8 prevFrame = ref_frame_idx[primary_ref_frame]; if (prevFrame < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] load_previous: prevFrame reference index %d is invalid\n", prevFrame)); } else { state->PrevGmParams = state->SavedGmParams[prevFrame]; // load_loop_filter_params( prevFrame ) // load_segmentation_params( prevFrame ) } } static void av1_decode_frame_wrapup(AV1State *state) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { if ((state->frame_state.refresh_frame_flags >> i) & 1) { state->RefOrderHint[i] = state->frame_state.order_hint; state->SavedGmParams[i] = state->GmParams; state->RefFrameType[i] = state->frame_state.frame_type; } } state->frame_state.seen_frame_header = GF_FALSE; //Otherwise (show_existing_frame is equal to 1), if frame_type is equal to KEY_FRAME, the reference frame loading process as specified in section 7.21 is invoked if ((state->frame_state.show_existing_frame) && (state->frame_state.frame_type == AV1_KEY_FRAME)) { state->frame_state.order_hint = state->RefOrderHint[state->frame_state.frame_to_show_map_idx]; //OrderHints[ j + LAST_FRAME ] is set equal to SavedOrderHints[state->frame_to_show_map_idx ][ j + LAST_FRAME ] for j = 0..REFS_PER_FRAME-1. //gm_params[ ref ][ j ] is set equal to SavedGmParams[ frame_to_show_map_idx ][ ref ][ j ] for ref = LAST_FRAME..ALTREF_FRAME, for j = 0..5. state->GmParams = state->SavedGmParams[state->frame_state.frame_to_show_map_idx]; } } static s32 find_latest_forward(u32 curFrameHint, u8 *shiftedOrderHints, u8 *usedFrame) { u32 i; s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint < curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } return ref; } //see 7.8 of AV1 spec static void av1_set_frame_refs(AV1State *state, u8 last_frame_idx, u8 gold_frame_idx, s8 *ref_frame_idx) { u32 i; u8 usedFrame[AV1_NUM_REF_FRAMES]; u8 shiftedOrderHints[AV1_NUM_REF_FRAMES]; for (i = 0; i < AV1_REFS_PER_FRAME; i++) ref_frame_idx[i] = -1; ref_frame_idx[AV1_LAST_FRAME - AV1_LAST_FRAME] = last_frame_idx; ref_frame_idx[AV1_GOLDEN_FRAME - AV1_LAST_FRAME] = gold_frame_idx; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { usedFrame[i] = 0; } usedFrame[last_frame_idx] = 1; usedFrame[gold_frame_idx] = 1; u32 curFrameHint = 1 << (state->OrderHintBits - 1); for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { shiftedOrderHints[i] = curFrameHint + av1_get_relative_dist(state->RefOrderHint[i], state->frame_state.order_hint, state); } u8 lastOrderHint = shiftedOrderHints[last_frame_idx]; u8 goldOrderHint = shiftedOrderHints[gold_frame_idx]; //It is a requirement of bitstream conformance that lastOrderHint is strictly less than curFrameHint. if (lastOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: lastOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //It is a requirement of bitstream conformance that goldOrderHint is strictly less than curFrameHint. if (goldOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: goldOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //find_latest_backward() { s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for BWDREF_FRAME ref = -1; s32 earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_BWDREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for ALTREF2_FRAME ref = -1; earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF2_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //The remaining references are set to be forward references in anti-chronological order as follows: const u8 Ref_Frame_List[AV1_REFS_PER_FRAME - 2] = { AV1_LAST2_FRAME, AV1_LAST3_FRAME, AV1_BWDREF_FRAME, AV1_ALTREF2_FRAME, AV1_ALTREF_FRAME }; for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) { u8 refFrame = Ref_Frame_List[i]; if (ref_frame_idx[refFrame - AV1_LAST_FRAME] < 0) { s32 last_ref = find_latest_forward(curFrameHint, shiftedOrderHints, usedFrame); if (last_ref >= 0) { ref_frame_idx[refFrame - AV1_LAST_FRAME] = last_ref; usedFrame[last_ref] = 1; } } } //Finally, any remaining references are set to the reference frame with smallest output order as follows: ref = -1; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (ref < 0 || hint < earliestOrderHint) { ref = i; earliestOrderHint = hint; } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (ref_frame_idx[i] < 0) { ref_frame_idx[i] = ref; } } } static void av1_parse_uncompressed_header(GF_BitStream *bs, AV1State *state) { Bool error_resilient_mode = GF_FALSE, allow_screen_content_tools = GF_FALSE, force_integer_mv = GF_FALSE; Bool /*use_ref_frame_mvs = GF_FALSE,*/ FrameIsIntra = GF_FALSE, frame_size_override_flag = GF_FALSE; Bool disable_cdf_update = GF_FALSE; u8 showable_frame; u8 primary_ref_frame; u16 idLen = 0; u32 idx; s8 ref_frame_idx[AV1_REFS_PER_FRAME]; AV1StateFrame *frame_state = &state->frame_state; if (state->frame_id_numbers_present_flag) { idLen = (state->additional_frame_id_length_minus_1 + state->delta_frame_id_length_minus_2 + 3); } frame_state->refresh_frame_flags = 0; showable_frame = 0; if (state->reduced_still_picture_header) { frame_state->key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; frame_state->frame_type = AV1_KEY_FRAME; frame_state->show_frame = GF_TRUE; frame_state->show_existing_frame = 0; } else { frame_state->show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (frame_state->show_existing_frame == GF_TRUE) { frame_state->frame_to_show_map_idx = gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); frame_state->frame_type = state->RefFrameType[frame_state->frame_to_show_map_idx]; if (state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } frame_state->refresh_frame_flags = 0; if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "display_frame_id"); } if (frame_state->frame_type == AV1_KEY_FRAME) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } /* if (film_grain_params_present) { load_grain_params(frame_to_show_map_idx) }*/ return; } frame_state->frame_type = gf_bs_read_int_log(bs, 2, "frame_type"); FrameIsIntra = (frame_state->frame_type == AV1_INTRA_ONLY_FRAME || frame_state->frame_type == AV1_KEY_FRAME); frame_state->show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); if (frame_state->is_first_frame) { frame_state->key_frame = frame_state->seen_seq_header && frame_state->show_frame && frame_state->frame_type == AV1_KEY_FRAME && frame_state->seen_frame_header; } if (frame_state->show_frame && state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } if (frame_state->show_frame) { showable_frame = frame_state->frame_type != AV1_KEY_FRAME; } else { showable_frame = gf_bs_read_int_log(bs, 1, "showable_frame"); } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) error_resilient_mode = GF_TRUE; else error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); } if ((frame_state->frame_type == AV1_KEY_FRAME) && frame_state->show_frame) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { state->RefValid[i] = 0; state->RefOrderHint[i] = 0; } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { state->OrderHints[AV1_LAST_FRAME + i] = 0; } } disable_cdf_update = gf_bs_read_int_log(bs, 1, "disable_cdf_update"); if (state->seq_force_screen_content_tools == 2/*SELECT_SCREEN_CONTENT_TOOLS*/) { allow_screen_content_tools = gf_bs_read_int_log(bs, 1, "allow_screen_content_tools"); } else { allow_screen_content_tools = state->seq_force_screen_content_tools; } if (allow_screen_content_tools) { if (state->seq_force_integer_mv == 2/*SELECT_INTEGER_MV*/) { force_integer_mv = gf_bs_read_int_log(bs, 1, "force_integer_mv"); } else { force_integer_mv = state->seq_force_integer_mv; } } else { force_integer_mv = 0; } if (FrameIsIntra) { force_integer_mv = 1; } if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "current_frame_id"); } if (frame_state->frame_type == AV1_SWITCH_FRAME) frame_size_override_flag = GF_TRUE; else if (state->reduced_still_picture_header) frame_size_override_flag = GF_FALSE; else frame_size_override_flag = gf_bs_read_int_log(bs, 1, "frame_size_override_flag"); frame_state->order_hint = gf_bs_read_int(bs, state->OrderHintBits); if (FrameIsIntra || error_resilient_mode) { primary_ref_frame = AV1_PRIMARY_REF_NONE; } else { primary_ref_frame = gf_bs_read_int_log(bs, 3, "primary_ref_frame"); } if (state->decoder_model_info_present_flag) { u8 buffer_removal_time_present_flag = gf_bs_read_int_log(bs, 1, "buffer_removal_time_present_flag"); if (buffer_removal_time_present_flag) { u32 opNum; for (opNum = 0; opNum < state->operating_points_count; opNum++) { if (state->decoder_model_present_for_this_op[opNum]) { u8 opPtIdc = state->operating_point_idc[opNum]; u8 inTemporalLayer = (opPtIdc >> state->temporal_id) & 1; u8 inSpatialLayer = (opPtIdc >> (state->spatial_id + 8)) & 1; if (opPtIdc == 0 || (inTemporalLayer && inSpatialLayer)) { gf_bs_read_int_log_idx(bs, state->buffer_removal_time_length, "buffer_removal_time", opNum); } } } } } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } else { frame_state->refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); } if (!FrameIsIntra || frame_state->refresh_frame_flags != AV1_ALL_FRAMES) { if (error_resilient_mode && state->enable_order_hint) { u32 i = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { u8 ref_order_hint = gf_bs_read_int_log_idx(bs, state->OrderHintBits, "ref_order_hint", i); if (ref_order_hint != state->RefOrderHint[i]) { state->RefValid[i] = 0; } state->RefOrderHint[i] = ref_order_hint; } } } u8 allow_intrabc = 0; if (frame_state->frame_type == AV1_KEY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { if (frame_state->frame_type == AV1_INTRA_ONLY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { u32 i = 0; Bool frame_refs_short_signaling = GF_FALSE; if (state->enable_order_hint) { frame_refs_short_signaling = gf_bs_read_int_log(bs, 1, "frame_refs_short_signaling"); if (frame_refs_short_signaling) { u8 last_frame_idx = gf_bs_read_int_log(bs, 3, "last_frame_idx"); u8 gold_frame_idx = gf_bs_read_int_log(bs, 3, "gold_frame_idx"); av1_set_frame_refs(state, last_frame_idx, gold_frame_idx, ref_frame_idx); } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (!frame_refs_short_signaling) ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); if (state->frame_id_numbers_present_flag) { u32 n = state->delta_frame_id_length_minus_2 + 2; /*delta_frame_id_minus_1 =*/ gf_bs_read_int_log_idx(bs, n, "delta_frame_id_minus1", i); //DeltaFrameId = delta_frame_id_minus_1 + 1; //expectedFrameId[i] = ((current_frame_id + (1 << idLen) - DeltaFrameId) % (1 << idLen)); } } if (frame_size_override_flag && !error_resilient_mode) { frame_size_with_refs(bs, state, frame_size_override_flag); } else { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } frame_state->allow_high_precision_mv = 0; if (!force_integer_mv) { frame_state->allow_high_precision_mv = gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); } read_interpolation_filter(bs); gf_bs_read_int_log(bs, 1, "is_motion_mode_switchable"); if (!(error_resilient_mode || !state->enable_ref_frame_mvs)) { gf_bs_read_int_log(bs, 1, "use_ref_frame_mvs"); } } } if (!FrameIsIntra) { u32 i; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refFrame = AV1_LAST_FRAME + i; u8 ridx = ref_frame_idx[i]; if (ridx >= 0) { u8 hint = state->RefOrderHint[ridx]; state->OrderHints[refFrame] = hint; /* if ( !enable_order_hint ) { RefFrameSignBias[ refFrame ] = 0; } else { RefFrameSignBias[ refFrame ] = get_relative_dist( hint, OrderHint) > 0; } */ } } } if (!(state->reduced_still_picture_header || disable_cdf_update)) gf_bs_read_int_log(bs, 1, "disable_frame_end_update_cdf"); if (primary_ref_frame == AV1_PRIMARY_REF_NONE) { //init_non_coeff_cdfs(); av1_setup_past_independence(state); } else { //load_cdfs(ref_frame_idx[primary_ref_frame]); av1_load_previous(state, primary_ref_frame, ref_frame_idx); } av1_parse_tile_info(bs, state); //quantization_params( ): u8 base_q_idx = gf_bs_read_int(bs, 8); s32 DeltaQUDc = 0; s32 DeltaQUAc = 0; s32 DeltaQVDc = 0; s32 DeltaQVAc = 0; s32 DeltaQYDc = av1_delta_q(bs, "DeltaQYDc_coded", "DeltaQYDc"); if (!state->config->monochrome) { u8 diff_uv_delta = 0; if (state->separate_uv_delta_q) diff_uv_delta = gf_bs_read_int(bs, 1); DeltaQUDc = av1_delta_q(bs, "DeltaQUDc_coded", "DeltaQUDc"); DeltaQUAc = av1_delta_q(bs, "DeltaQUAc_coded", "DeltaQUAc"); if (diff_uv_delta) { DeltaQVDc = av1_delta_q(bs, "DeltaQVDc_coded", "DeltaQVDc"); DeltaQVAc = av1_delta_q(bs, "DeltaQVAc_coded", "DeltaQVAc"); } } if (gf_bs_read_int_log(bs, 1, "using_qmatrix")) { gf_bs_read_int_log(bs, 4, "qm_y"); gf_bs_read_int_log(bs, 4, "qm_u"); if (!state->separate_uv_delta_q) { gf_bs_read_int_log(bs, 4, "qm_v"); } } u8 seg_features_SEG_LVL_ALT_Q_enabled[8] = { 0,0,0,0,0,0,0,0 }; s32 seg_features_SEG_LVL_ALT_Q[8] = { 0,0,0,0,0,0,0,0 }; //segmentation_params( ): u8 segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled) { /*u8 segmentation_temporal_update = 0;*/ u8 segmentation_update_data = 1; if (primary_ref_frame != AV1_PRIMARY_REF_NONE) { u8 segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map == 1) gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); } if (segmentation_update_data == 1) { u32 i, j; for (i = 0; i < 8/*=MAX_SEGMENTS*/; i++) { for (j = 0; j < 8 /*=SEG_LVL_MAX*/; j++) { if (/*feature_enabled = */gf_bs_read_int_log_idx2(bs, 1, "feature_enabled", i, j) == 1) { s32 val; u32 bitsToRead = Segmentation_Feature_Bits[j]; //this is SEG_LVL_ALT_Q if (!j) seg_features_SEG_LVL_ALT_Q_enabled[i] = 1; if (Segmentation_Feature_Signed[j] == 1) { val = gf_bs_read_int_log_idx2(bs, 1 + bitsToRead, "signed_feature_value", i, j); } else { val = gf_bs_read_int_log_idx2(bs, bitsToRead, "feature_value", i, j); } if (!j) seg_features_SEG_LVL_ALT_Q[i] = val; } } } //ignore all init steps } } //delta_q_params(): /*u8 delta_q_res = 0;*/ u8 delta_q_present = 0; if (base_q_idx > 0) { delta_q_present = gf_bs_read_int_log(bs, 1, "delta_q_present"); } if (delta_q_present) { gf_bs_read_int_log(bs, 2, "delta_q_res"); } //delta_lf_params(): u8 delta_lf_present = 0; /*u8 delta_lf_res = 0; u8 delta_lf_multi = 0;*/ if (delta_q_present) { if (!allow_intrabc) { delta_lf_present = gf_bs_read_int_log(bs, 1, "delta_lf_present"); } if (delta_lf_present) { gf_bs_read_int_log(bs, 2, "delta_lf_res"); gf_bs_read_int_log(bs, 1, "delta_lf_multi"); } } //init lossless stuff! u8 CodedLossless = 1; for (idx = 0; idx < 8; idx++) { u8 qindex = av1_get_qindex(GF_TRUE, idx, base_q_idx, delta_q_present, 0/*CurrentQIndex always ignored at this level of parsin*/, segmentation_enabled, seg_features_SEG_LVL_ALT_Q_enabled, seg_features_SEG_LVL_ALT_Q); Bool LosslessArray = (qindex == 0) && (DeltaQYDc == 0) && (DeltaQUAc == 0) && (DeltaQUDc == 0) && (DeltaQVAc == 0) && (DeltaQVDc == 0); if (!LosslessArray) CodedLossless = 0; } Bool AllLossless = CodedLossless && (state->width == state->UpscaledWidth); //loop_filter_params(): if (!CodedLossless && !allow_intrabc) { u8 loop_filter_level_0 = gf_bs_read_int_log(bs, 6, "loop_filter_level_0"); u8 loop_filter_level_1 = gf_bs_read_int_log(bs, 6, "loop_filter_level_1"); if (!state->config->monochrome) { if (loop_filter_level_0 || loop_filter_level_1) { gf_bs_read_int_log(bs, 6, "loop_filter_level_2"); gf_bs_read_int_log(bs, 6, "loop_filter_level_3"); } } gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); u8 loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { u8 loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update) { u32 i; for (i = 0; i < 8/*TOTAL_REFS_PER_FRAME*/; i++) { u8 update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == 1) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_ref_deltas", i); } } for (i = 0; i < 2; i++) { u8 update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_mode_deltas", i); } } } } } //cdef_params( ): if (!CodedLossless && !allow_intrabc && state->enable_cdef) { gf_bs_read_int_log(bs, 2, "cdef_damping_minus_3"); u8 cdef_bits = gf_bs_read_int_log(bs, 2, "cdef_bits"); u32 i, num_cd = 1 << cdef_bits; for (i = 0; i < num_cd; i++) { gf_bs_read_int_log_idx(bs, 4, "cdef_y_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_y_sec_strength", i); if (!state->config->monochrome) { gf_bs_read_int_log_idx(bs, 4, "cdef_uv_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_uv_sec_strength", i); } } } //lr_params( ) : if (!AllLossless && !allow_intrabc && state->enable_restoration) { u32 i, nb_planes = state->config->monochrome ? 1 : 3; u8 UsesLr = 0; u8 usesChromaLr = 0; for (i = 0; i < nb_planes; i++) { u8 lr_type = gf_bs_read_int_log_idx(bs, 2, "lr_type", i); //FrameRestorationType[i] = Remap_Lr_Type[lr_type] if (lr_type != AV1_RESTORE_NONE) { UsesLr = 1; if (i > 0) { usesChromaLr = 1; } } } if (UsesLr) { if (state->use_128x128_superblock) { gf_bs_read_int_log(bs, 1, "lr_unit_shift_minus_1"); } else { u8 lr_unit_shift = gf_bs_read_int_log(bs, 1, "lr_unit_shift"); if (lr_unit_shift) { gf_bs_read_int_log(bs, 1, "lr_unit_extra_shift"); //lr_unit_shift += lr_unit_extra_shift; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y && usesChromaLr) { gf_bs_read_int_log(bs, 1, "lr_uv_shift"); } } } //read_tx_mode(): if (CodedLossless == 1) { } else { gf_bs_read_int_log(bs, 1, "tx_mode_select"); } //frame_reference_mode( ): u8 reference_select = 0; if (FrameIsIntra) { } else { reference_select = gf_bs_read_int_log(bs, 1, "reference_select"); } //skip_mode_params( ): u8 skipModeAllowed = 0; if (FrameIsIntra || !reference_select || !state->enable_order_hint) { } else { u32 i; s32 forwardIdx = -1; s32 backwardIdx = -1; s32 forwardHint = 0; s32 backwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, frame_state->order_hint, state) < 0) { if (forwardIdx < 0 || av1_get_relative_dist(refHint, forwardHint, state) > 0) { forwardIdx = i; forwardHint = refHint; } } else if (av1_get_relative_dist(refHint, frame_state->order_hint, state) > 0) { if (backwardIdx < 0 || av1_get_relative_dist(refHint, backwardHint, state) < 0) { backwardIdx = i; backwardHint = refHint; } } } if (forwardIdx < 0) { skipModeAllowed = 0; } else if (backwardIdx >= 0) { skipModeAllowed = 1; //SkipModeFrame[0] = AV1_LAST_FRAME + MIN(forwardIdx, backwardIdx); //SkipModeFrame[1] = AV1_LAST_FRAME + MAX(forwardIdx, backwardIdx); } else { s32 secondForwardIdx = -1; s32 secondForwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, forwardHint, state) < 0) { if (secondForwardIdx < 0 || av1_get_relative_dist(refHint, secondForwardHint, state) > 0) { secondForwardIdx = i; secondForwardHint = refHint; } } } if (secondForwardIdx < 0) { skipModeAllowed = 0; } else { skipModeAllowed = 1; //SkipModeFrame[ 0 ] = LAST_FRAME + Min(forwardIdx, secondForwardIdx) //SkipModeFrame[ 1 ] = LAST_FRAME + Max(forwardIdx, secondForwardIdx) } } } if (skipModeAllowed) { gf_bs_read_int_log(bs, 1, "skip_mode_present"); } if (FrameIsIntra || error_resilient_mode || !state->enable_warped_motion) { } else { gf_bs_read_int_log(bs, 1, "allow_warped_motion"); } gf_bs_read_int_log(bs, 1, "reduced_tx"); //global_motion_params( ) u32 ref; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { u32 i; for (i = 0; i < 6; i++) { state->GmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } if (!FrameIsIntra) { u32 refs; for (refs = AV1_LAST_FRAME; refs <= AV1_ALTREF_FRAME; refs++) { u8 type = AV1_GMC_IDENTITY; Bool is_global = gf_bs_read_int_log_idx(bs, 1, "is_global", refs); if (is_global) { Bool is_rot_zoom = gf_bs_read_int_log_idx(bs, 1, "is_rot_zoom", refs); if (is_rot_zoom) { type = AV1_GMC_ROTZOOM; } else { Bool is_trans = gf_bs_read_int_log_idx(bs, 1, "is_translation", refs); type = is_trans ? AV1_GMC_TRANSLATION : AV1_GMC_AFFINE; } } if (type >= AV1_GMC_ROTZOOM) { av1_read_global_param(state, bs, type, refs, 2); av1_read_global_param(state, bs, type, refs, 3); if (type == AV1_GMC_AFFINE) { av1_read_global_param(state, bs, type, refs, 4); av1_read_global_param(state, bs, type, refs, 5); } else { state->GmParams.coefs[refs][4] = -state->GmParams.coefs[refs][3]; state->GmParams.coefs[refs][5] = state->GmParams.coefs[refs][2]; } } if (type >= AV1_GMC_TRANSLATION) { av1_read_global_param(state, bs, type, refs, 0); av1_read_global_param(state, bs, type, refs, 1); } } } //film_grain_params() if (!state->film_grain_params_present || (!state->frame_state.show_frame && !showable_frame)) { } else { u8 apply_grain = gf_bs_read_int_log(bs, 1, "apply_grain"); if (apply_grain) { gf_bs_read_int_log(bs, 16, "grain_seed"); u8 update_grain = 1; if (state->frame_state.frame_type == AV1_INTER_FRAME) { update_grain = gf_bs_read_int_log(bs, 1, "update_grain"); } if (!update_grain) { gf_bs_read_int_log(bs, 3, "film_grain_params_ref_idx"); } else { u32 i, num_y_points = gf_bs_read_int_log(bs, 4, "num_y_points"); for (i = 0; i < num_y_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_y_value", i); gf_bs_read_int_log_idx(bs, 8, "point_y_scaling", i); } u8 chroma_scaling_from_luma = 0; if (!state->config->monochrome) chroma_scaling_from_luma = gf_bs_read_int_log(bs, 1, "chroma_scaling_from_luma"); u8 num_cb_points = 0; u8 num_cr_points = 0; if (state->config->monochrome || chroma_scaling_from_luma || ((state->config->chroma_subsampling_x == 1) && (state->config->chroma_subsampling_y == 1) && (num_y_points == 0)) ) { } else { num_cb_points = gf_bs_read_int_log(bs, 4, "num_cb_points"); for (i = 0; i < num_cb_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cb_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cb_scaling", i); } num_cr_points = gf_bs_read_int_log(bs, 4, "num_cr_points"); for (i = 0; i < num_cr_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cr_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cr_scaling", i); } } gf_bs_read_int_log(bs, 2, "grain_scaling_minus_8"); u8 ar_coeff_lag = gf_bs_read_int_log(bs, 2, "ar_coeff_lag"); u16 numPosLuma = 2 * ar_coeff_lag * (ar_coeff_lag + 1); u16 numPosChroma = numPosLuma; if (num_y_points) { numPosChroma = numPosLuma + 1; for (i = 0; i < numPosLuma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_y_plus_128", i); } } if (chroma_scaling_from_luma || num_cb_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cb_plus_128", i); } } if (chroma_scaling_from_luma || num_cr_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cr_plus_128", i); } } gf_bs_read_int_log(bs, 2, "ar_coeff_shift_minus_6"); gf_bs_read_int_log(bs, 2, "grain_scale_shift"); if (num_cb_points) { gf_bs_read_int_log(bs, 8, "cb_mult"); gf_bs_read_int_log(bs, 8, "cb_luma_mult"); gf_bs_read_int_log(bs, 9, "cb_offset"); } if (num_cr_points) { gf_bs_read_int_log(bs, 8, "cr_mult"); gf_bs_read_int_log(bs, 8, "cr_luma_mult"); gf_bs_read_int_log(bs, 9, "cr_offset"); } gf_bs_read_int_log(bs, 1, "overlap_flag"); gf_bs_read_int_log(bs, 1, "clip_to_restricted_range"); } } } //end of uncompressed header !! } GF_EXPORT void gf_av1_init_state(AV1State *state) { if (!state) return; memset(state, 0, sizeof(AV1State)); state->color_primaries = 2; state->transfer_characteristics = 2; state->matrix_coefficients = 2; } GF_EXPORT void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } } static GF_Err av1_parse_tile_group(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { u32 TileNum, tg_start = 0, tg_end = 0; Bool numTiles = state->tileCols * state->tileRows; Bool tile_start_and_end_present_flag = GF_FALSE; GF_Err e = GF_OK; if (numTiles > 1) tile_start_and_end_present_flag = gf_bs_read_int(bs, 1); if (numTiles == 1 || !tile_start_and_end_present_flag) { tg_start = 0; tg_end = numTiles - 1; /*state->frame_state.tg[0].start_idx = 0; state->frame_state.tg[0].end_idx = numTiles - 1;*/ } else { u32 tileBits = state->tileColsLog2 + state->tileRowsLog2; /*state->frame_state.tg[state->frame_state.tg_idx].start_idx*/ tg_start = gf_bs_read_int(bs, tileBits); /*state->frame_state.tg[state->frame_state.tg_idx].end_idx*/ tg_end = gf_bs_read_int(bs, tileBits); } /*state->frame_state.tg_idx++;*/ gf_bs_align(bs); if (tg_end >= GF_ARRAY_LENGTH(state->frame_state.tiles)) return GF_NON_COMPLIANT_BITSTREAM; state->frame_state.nb_tiles_in_obu = 0; for (TileNum = tg_start; TileNum <= tg_end; TileNum++) { u32 tile_start_offset, tile_size; /*u32 tileRow = TileNum / state->tileCols; u32 tileCol = TileNum % state->tileCols;*/ Bool lastTile = TileNum == tg_end; u64 pos = gf_bs_get_position(bs); if (lastTile) { tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(obu_size - (pos - obu_start)); } else { u64 tile_size_minus_1 = aom_av1_le(bs, state->tile_size_bytes, "tile_size_minus_1"); pos = gf_bs_get_position(bs); tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(tile_size_minus_1 + 1/* + state->tile_size_bytes*/); } if (tile_start_offset + tile_size > obu_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Error parsing tile group, tile %d start %d + size %d exceeds OBU length %d\n", TileNum, tile_start_offset, tile_size, obu_size)); e = GF_NON_COMPLIANT_BITSTREAM; break; } state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].obu_start_offset = tile_start_offset; state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].size = tile_size; gf_bs_skip_bytes(bs, tile_size); state->frame_state.nb_tiles_in_obu++; } if (tg_end == numTiles - 1) { av1_decode_frame_wrapup(state); } return e; } static void av1_parse_frame_header(GF_BitStream *bs, AV1State *state) { AV1StateFrame *frame_state = &state->frame_state; if (frame_state->seen_frame_header == GF_FALSE) { u64 pos = gf_bs_get_position(bs); state->frame_state.show_existing_frame = GF_FALSE; frame_state->seen_frame_header = GF_TRUE; av1_parse_uncompressed_header(bs, state); state->frame_state.is_first_frame = GF_FALSE; state->frame_state.uncompressed_header_bytes = (u32) (gf_bs_get_position(bs) - pos); if (state->frame_state.show_existing_frame) { av1_decode_frame_wrapup(state); frame_state->seen_frame_header = GF_FALSE; } else { //TileNum = 0; frame_state->seen_frame_header = GF_TRUE; } } } static GF_Err av1_parse_frame(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { av1_parse_frame_header(bs, state); //byte alignment gf_bs_align(bs); return av1_parse_tile_group(bs, state, obu_start, obu_size); } static void on_aom_av1_eos(void *_state) { AV1State *state = (AV1State *)_state; state->bs_overread = GF_TRUE; } GF_EXPORT GF_Err gf_av1_parse_obu(GF_BitStream *bs, ObuType *obu_type, u64 *obu_size, u32 *obu_hdr_size, AV1State *state) { GF_Err e = GF_OK; u32 hdr_size; u64 pos = gf_bs_get_position(bs); if (!bs || !obu_type || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; gf_bs_set_eos_callback(bs, on_aom_av1_eos, state); state->obu_extension_flag = state->obu_has_size_field = 0; state->temporal_id = state->spatial_id = 0; state->frame_state.uncompressed_header_bytes = 0; e = gf_av1_parse_obu_header(bs, obu_type, &state->obu_extension_flag, &state->obu_has_size_field, &state->temporal_id, &state->spatial_id); if (e) return e; if (state->obu_has_size_field) { *obu_size = (u32)gf_av1_leb128_read(bs, NULL); } else { if (*obu_size >= 1 + state->obu_extension_flag) { *obu_size = *obu_size - 1 - state->obu_extension_flag; } else { GF_LOG(state->config ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_CODING, ("[AV1] computed OBU size "LLD" (input value = "LLU"). Skipping.\n", *obu_size - 1 - state->obu_extension_flag, *obu_size)); return GF_NON_COMPLIANT_BITSTREAM; } } hdr_size = (u32)(gf_bs_get_position(bs) - pos); if ((gf_bs_available(bs) < *obu_size) || state->bs_overread) { gf_bs_seek(bs, pos); return GF_BUFFER_TOO_SMALL; } *obu_size += hdr_size; if (obu_hdr_size) *obu_hdr_size = hdr_size; if (*obu_type != OBU_SEQUENCE_HEADER && *obu_type != OBU_TEMPORAL_DELIMITER && state->OperatingPointIdc != 0 && state->obu_extension_flag == 1) { u32 inTemporalLayer = (state->OperatingPointIdc >> state->temporal_id) & 1; u32 inSpatialLayer = (state->OperatingPointIdc >> (state->spatial_id + 8)) & 1; if (!inTemporalLayer || !inSpatialLayer) { *obu_type = -1; gf_bs_seek(bs, pos + *obu_size); return GF_OK; } } e = GF_OK; switch (*obu_type) { case OBU_SEQUENCE_HEADER: av1_parse_sequence_header_obu(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Sequence header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_METADATA: #if 0 //TODO + sample groups const ObuMetadataType metadata_type = (u32)read_leb128(bs, NULL); we should check for 16 bits limit(AV1MetadataSampleGroupEntry) for ISOBMFF bindings, see https ://github.com/AOMediaCodec/av1-isobmff/pull/86#issuecomment-416659538 if (metadata_type == OBU_METADATA_TYPE_ITUT_T35) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_CLL) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_MDCV) { } else if (metadata_type == OBU_METADATA_TYPE_SCALABILITY) { } else if (metadata_type == METADATA_TYPE_TIMECODE) { } #endif GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] parsing for metadata is not implemented. Forwarding.\n")); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Metadata parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME_HEADER: case OBU_REDUNDANT_FRAME_HEADER: if (state->config) { av1_parse_frame_header(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME: e = av1_parse_frame(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TILE_GROUP: if (state->config) { e = av1_parse_tile_group(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Tile group parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TEMPORAL_DELIMITER: state->frame_state.seen_frame_header = GF_FALSE; case OBU_PADDING: gf_bs_seek(bs, pos + *obu_size); break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] unknown OBU type %u (size "LLU"). Skipping.\n", *obu_type, *obu_size)); gf_bs_seek(bs, pos + *obu_size); break; } return e; } GF_EXPORT GF_Err gf_media_prores_parse_bs(GF_BitStream *bs, GF_ProResFrameInfo *prores_frame) { u32 i, j; u64 start, pos; memset(prores_frame, 0, sizeof(GF_ProResFrameInfo)); start = gf_bs_get_position(bs); if (gf_bs_available(bs) < 10) return GF_BUFFER_TOO_SMALL; prores_frame->frame_size = gf_bs_read_u32(bs); prores_frame->frame_identifier = gf_bs_read_u32(bs); if (prores_frame->frame_identifier != GF_4CC('i','c','p','f')) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame identifier, expected \"icpf\" got \"%s\"\n", gf_4cc_to_str(prores_frame->frame_identifier) )); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } /*parse frame header*/ pos = gf_bs_get_position(bs); prores_frame->frame_hdr_size = gf_bs_read_u16(bs); if (gf_bs_available(bs) + 2 < prores_frame->frame_hdr_size) { gf_bs_seek(bs, start); return GF_BUFFER_TOO_SMALL; } gf_bs_read_u8(bs); prores_frame->version = gf_bs_read_u8(bs); prores_frame->encoder_id = gf_bs_read_u32(bs); prores_frame->width = gf_bs_read_u16(bs); prores_frame->height = gf_bs_read_u16(bs); prores_frame->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->interlaced_mode = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->aspect_ratio_information = gf_bs_read_int(bs, 4); prores_frame->framerate_code = gf_bs_read_int(bs, 4); prores_frame->color_primaries = gf_bs_read_u8(bs); prores_frame->transfer_characteristics = gf_bs_read_u8(bs); prores_frame->matrix_coefficients = gf_bs_read_u8(bs); gf_bs_read_int(bs, 4); prores_frame->alpha_channel_type = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 14); prores_frame->load_luma_quant_matrix = gf_bs_read_int(bs, 1); prores_frame->load_chroma_quant_matrix = gf_bs_read_int(bs, 1); if (prores_frame->load_luma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->luma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } if (prores_frame->load_chroma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->chroma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } pos = gf_bs_get_position(bs) - pos; if (pos != prores_frame->frame_hdr_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame header size, expected %d got %d\n", prores_frame->frame_hdr_size, (u32) pos)); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } prores_frame->nb_pic = ((prores_frame->interlaced_mode==1) || (prores_frame->interlaced_mode==2)) ? 2 : 1; gf_bs_seek(bs, start); return GF_OK; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT u8 gf_mp3_version(u32 hdr) { return ((hdr >> 19) & 0x3); } GF_EXPORT const char *gf_mp3_version_name(u32 hdr) { u32 v = gf_mp3_version(hdr); switch (v) { case 0: return "MPEG-2.5"; case 1: return "Reserved"; case 2: return "MPEG-2"; case 3: return "MPEG-1"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u8 gf_mp3_layer(u32 hdr) { return 4 - (((hdr >> 17) & 0x3)); } GF_EXPORT u8 gf_mp3_num_channels(u32 hdr) { if (((hdr >> 6) & 0x3) == 3) return 1; return 2; } GF_EXPORT u16 gf_mp3_sampling_rate(u32 hdr) { u16 res; /* extract the necessary fields from the MP3 header */ u8 version = gf_mp3_version(hdr); u8 sampleRateIndex = (hdr >> 10) & 0x3; switch (sampleRateIndex) { case 0: res = 44100; break; case 1: res = 48000; break; case 2: res = 32000; break; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n")); return 0; } /*reserved or MPEG-1*/ if (version & 1) return res; /*MPEG-2*/ res /= 2; /*MPEG-2.5*/ if (version == 0) res /= 2; return res; } GF_EXPORT u16 gf_mp3_window_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); if (layer == 3) { if (version == 3) return 1152; return 576; } if (layer == 2) return 1152; return 384; } GF_EXPORT u8 gf_mp3_object_type_indication(u32 hdr) { switch (gf_mp3_version(hdr)) { case 3: return GF_CODECID_MPEG_AUDIO; case 2: case 0: return GF_CODECID_MPEG2_PART3; default: return 0x00; } } /*aligned bitrate parsing with libMAD*/ static u32 const bitrate_table[5][15] = { /* MPEG-1 */ { 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */ 256000, 288000, 320000, 352000, 384000, 416000, 448000 }, { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */ 128000, 160000, 192000, 224000, 256000, 320000, 384000 }, { 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */ 112000, 128000, 160000, 192000, 224000, 256000, 320000 }, /* MPEG-2 LSF */ { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */ 128000, 144000, 160000, 176000, 192000, 224000, 256000 }, { 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers */ 64000, 80000, 96000, 112000, 128000, 144000, 160000 } /* II & III */ }; u32 gf_mp3_bit_rate(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u8 bitRateIndex = (hdr >> 12) & 0xF; u32 lidx; /*MPEG-1*/ if (version & 1) { if (!layer) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } lidx = layer - 1; } /*MPEG-2/2.5*/ else { lidx = 3 + (layer >> 1); } if (lidx>4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } return bitrate_table[lidx][bitRateIndex]; } GF_EXPORT u16 gf_mp3_frame_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u32 pad = ((hdr >> 9) & 0x1) ? 1 : 0; u32 bitrate = gf_mp3_bit_rate(hdr); u32 samplerate = gf_mp3_sampling_rate(hdr); u32 frameSize = 0; if (!samplerate || !bitrate) return 0; if (layer == 1) { frameSize = ((12 * bitrate / samplerate) + pad) * 4; } else { u32 slots_per_frame = 144; if ((layer == 3) && !(version & 1)) slots_per_frame = 72; frameSize = (slots_per_frame * bitrate / samplerate) + pad; } return (u16)frameSize; } GF_EXPORT u32 gf_mp3_get_next_header(FILE* in) { u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; while (1) { if (gf_fread(&b, 1, in) == 0) return 0; if (state == 3) { bytes[state] = b; return GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) state = 1; else state = 0; } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { if ((dropped == 0) && ((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[0] = (u8)0xFF; bytes[1] = b; state = 2; } else { dropped++; } } } } return 0; } GF_EXPORT u32 gf_mp3_get_next_header_mem(const u8 *buffer, u32 size, u32 *pos) { u32 cur; u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; cur = 0; *pos = 0; while (cur < size) { b = (u8)buffer[cur]; cur++; if (state == 3) { u32 val; bytes[state] = b; val = GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); if (gf_mp3_frame_size(val)) { *pos = dropped; return val; } state = 0; dropped = cur; } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) { state = 1; dropped += 1; } else { state = 0; dropped = cur; } } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; dropped = cur; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { dropped++; } } } return 0; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT Bool gf_avc_is_rext_profile(u8 profile_idc) { switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: case 83: case 86: case 118: case 128: case 138: case 139: case 134: case 135: return GF_TRUE; default: return GF_FALSE; } } GF_EXPORT const char *gf_avc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x42: return "Baseline"; case 0x4D: return "Main"; case 0x53: return "Scalable Baseline"; case 0x56: return "Scalable High"; case 0x58: return "Extended"; case 0x64: return "High"; case 0x6E: return "High 10"; case 0x7A: return "High 4:2:2"; case 0x90: case 0xF4: return "High 4:4:4"; default: return "Unknown"; } } GF_EXPORT const char *gf_hevc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x01: return "Main"; case 0x02: return "Main 10"; case 0x03: return "Main Still Picture"; default: return "Unknown"; } } GF_EXPORT const char *gf_avc_hevc_get_chroma_format_name(u8 chroma_format) { switch (chroma_format) { case 1: return "YUV 4:2:0"; case 2: return "YUV 4:2:2"; case 3: return "YUV 4:4:4"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val=0, code; s32 nb_lead = -1; u32 bits = 0; for (code=0; !code; nb_lead++) { if (nb_lead>=32) { //gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp) //we only test once nb_lead>=32 to avoid testing at each bit read if (!gf_bs_available(bs)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n")); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead)); } return 0; } code = gf_bs_read_int(bs, 1); bits++; } if (nb_lead) { val = gf_bs_read_int(bs, nb_lead); val += (1 << nb_lead) - 1; bits += nb_lead; } if (fname) { gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3); } return val; } #define gf_bs_read_ue_log_idx2(_bs, _fname, _idx1, _idx2) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx1, (s32) _idx2, -1) #define gf_bs_read_ue_log_idx(_bs, _fname, _idx) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx, -1, -1) #define gf_bs_read_ue_log(_bs, _fname) gf_bs_read_ue_log_idx3(_bs, _fname, -1, -1, -1) u32 gf_bs_read_ue(GF_BitStream *bs) { return gf_bs_read_ue_log(bs, NULL); } s32 gf_bs_read_se(GF_BitStream *bs) { u32 v = gf_bs_read_ue(bs); if ((v & 0x1) == 0) return (s32)(0 - (v >> 1)); return (v + 1) >> 1; } s32 gf_bs_read_se_log_idx2(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2) { s32 res = gf_bs_read_se(bs); if (fname) gf_bs_log_idx(bs, -1, fname, res, idx1, idx2, -1); return res; } #define gf_bs_read_se_log_idx(_bs, _fname, _idx) gf_bs_read_se_log_idx2(_bs, _fname, (s32) _idx, -1) #define gf_bs_read_se_log(_bs, _fname) gf_bs_read_se_log_idx2(_bs, _fname, -1, -1) void gf_bs_write_ue(GF_BitStream *bs, u32 num) { s32 length = 1; s32 temp = ++num; while (temp != 1) { temp >>= 1; length += 2; } gf_bs_write_int(bs, 0, length >> 1); gf_bs_write_int(bs, num, (length + 1) >> 1); } void gf_bs_write_se(GF_BitStream *bs, s32 num) { u32 v; if (num <= 0) v = (-1 * num) << 1; else v = (num << 1) - 1; gf_bs_write_ue(bs, v); } u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3 == 0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4 == 0x01) is_sc = 4; } } gf_bs_seek(bs, pos + is_sc); return is_sc; } /*read that amount of data at each IO access rather than fetching byte by byte...*/ #define AVC_CACHE_SIZE 4096 static u32 gf_media_nalu_locate_start_code_bs(GF_BitStream *bs, Bool locate_trailing) { u32 v, bpos, nb_cons_zeros = 0; char avc_cache[AVC_CACHE_SIZE]; u64 end, cache_start, load_size; u64 start = gf_bs_get_position(bs); if (start < 3) return 0; load_size = 0; bpos = 0; cache_start = 0; end = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(bs)) break; load_size = gf_bs_available(bs); if (load_size > AVC_CACHE_SIZE) load_size = AVC_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(bs); gf_bs_read_data(bs, avc_cache, (u32)load_size); } v = ( (v<<8) & 0xFFFFFF00) | ((u32) avc_cache[bpos]); bpos++; if (locate_trailing) { if ((v & 0x000000FF) == 0) nb_cons_zeros++; else nb_cons_zeros = 0; } if (v == 0x00000001) end = cache_start + bpos - 4; else if ((v & 0x00FFFFFF) == 0x00000001) end = cache_start + bpos - 3; } gf_bs_seek(bs, start); if (!end) end = gf_bs_get_size(bs); if (locate_trailing) { if (nb_cons_zeros >= 3) return (u32)(end - start - nb_cons_zeros); } return (u32)(end - start); } GF_EXPORT u32 gf_media_nalu_next_start_code_bs(GF_BitStream *bs) { return gf_media_nalu_locate_start_code_bs(bs, 0); } GF_EXPORT u32 gf_media_nalu_next_start_code(const u8 *data, u32 data_len, u32 *sc_size) { u32 avail = data_len; const u8 *cur = data; while (cur) { u32 v, bpos; u8 *next_zero = memchr(cur, 0, avail); if (!next_zero) return data_len; v = 0xffffff00; bpos = (u32)(next_zero - data) + 1; while (1) { u8 cval; if (bpos == (u32)data_len) return data_len; cval = data[bpos]; v = ((v << 8) & 0xFFFFFF00) | ((u32)cval); bpos++; if (v == 0x00000001) { *sc_size = 4; return bpos - 4; } else if ((v & 0x00FFFFFF) == 0x00000001) { *sc_size = 3; return bpos - 3; } if (cval) break; } if (bpos >= data_len) break; cur = data + bpos; avail = data_len - bpos; } return data_len; } Bool gf_media_avc_slice_is_intra(AVCState *avc) { switch (avc->s_info.slice_type) { case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: return 1; default: return 0; } } #if 0 //unused Bool gf_media_avc_slice_is_IDR(AVCState *avc) { if (avc->sei.recovery_point.valid) { avc->sei.recovery_point.valid = 0; return 1; } if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) return 0; return gf_media_avc_slice_is_intra(avc); } #endif static const struct { u32 w, h; } avc_hevc_sar[] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4, 3 }, { 3, 2 }, { 2, 1 } }; /*ISO 14496-10 (N11084) E.1.2*/ static void avc_parse_hrd_parameters(GF_BitStream *bs, AVC_HRD *hrd) { int i, cpb_cnt_minus1; cpb_cnt_minus1 = gf_bs_read_ue_log(bs, "cpb_cnt_minus1"); if (cpb_cnt_minus1 > 31) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] invalid cpb_cnt_minus1 value: %d (expected in [0;31])\n", cpb_cnt_minus1)); gf_bs_read_int_log(bs, 4, "bit_rate_scale"); gf_bs_read_int_log(bs, 4, "cpb_size_scale"); /*for( SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; SchedSelIdx++ ) {*/ for (i = 0; i <= cpb_cnt_minus1; i++) { gf_bs_read_ue_log_idx(bs, "bit_rate_value_minus1", i); gf_bs_read_ue_log_idx(bs, "cpb_size_value_minus1", i); gf_bs_read_int_log_idx(bs, 1, "cbr_flag", i); } gf_bs_read_int_log(bs, 5, "initial_cpb_removal_delay_length_minus1"); hrd->cpb_removal_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "cpb_removal_delay_length_minus1"); hrd->dpb_output_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "dpb_output_delay_length_minus1"); hrd->time_offset_length = gf_bs_read_int_log(bs, 5, "time_offset_length"); return; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_add_count(u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && (u8)buffer[i] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; if (!buffer[i]) num_zero = 1; } else { if (!buffer[i]) num_zero++; else num_zero = 0; } i++; } return emulation_bytes_count; } u32 gf_media_nalu_add_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && (u8)buffer_src[i] < 0x04) { /*add emulation code*/ num_zero = 0; buffer_dst[i + emulation_bytes_count] = 0x03; emulation_bytes_count++; if (!buffer_src[i]) num_zero = 1; } else { if (!buffer_src[i]) num_zero++; else num_zero = 0; } buffer_dst[i + emulation_bytes_count] = buffer_src[i]; i++; } return nal_size + emulation_bytes_count; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_remove_count(const u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; if (!buffer || !nal_size) return 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && buffer[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } if (!buffer[i]) num_zero++; else num_zero = 0; i++; } return emulation_bytes_count; } /*nal_size is updated to allow better error detection*/ GF_EXPORT u32 gf_media_nalu_remove_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && buffer_src[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer_src[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } buffer_dst[i - emulation_bytes_count] = buffer_src[i]; if (!buffer_src[i]) num_zero++; else num_zero = 0; i++; } return nal_size - emulation_bytes_count; } static s32 gf_avc_read_sps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos, u32 nal_hdr) { AVC_SPS *sps; s32 mb_width, mb_height, sps_id = -1; u32 profile_idc, level_idc, pcomp, i, chroma_format_idc, cl = 0, cr = 0, ct = 0, cb = 0, luma_bd, chroma_bd; u8 separate_colour_plane_flag = 0; if (!vui_flag_pos) { gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } if (!bs) { return -1; } if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } profile_idc = gf_bs_read_int_log(bs, 8, "profile_idc"); pcomp = gf_bs_read_int_log(bs, 8, "profile_compatibility"); /*sanity checks*/ if (pcomp & 0x3) return -1; level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); /*SubsetSps is used to be sure that AVC SPS are not going to be scratched by subset SPS. According to the SVC standard, subset SPS can have the same sps_id than its base layer, but it does not refer to the same SPS. */ sps_id = gf_bs_read_ue_log(bs, "sps_id") + GF_SVC_SSPS_ID_SHIFT * subseq_sps; if (sps_id >= 32) { return -1; } if (sps_id < 0) { return -1; } luma_bd = chroma_bd = 0; sps = &avc->sps[sps_id]; chroma_format_idc = sps->ChromaArrayType = 1; sps->state |= subseq_sps ? AVC_SUBSPS_PARSED : AVC_SPS_PARSED; /*High Profile and SVC*/ switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: /*sanity checks: note1 from 7.4.2.1.1 of iso/iec 14496-10-N11084*/ if (pcomp & 0xE0) return -1; case 83: case 86: case 118: case 128: chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); sps->ChromaArrayType = chroma_format_idc; if (chroma_format_idc == 3) { separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); /* Depending on the value of separate_colour_plane_flag, the value of the variable ChromaArrayType is assigned as follows. \96 If separate_colour_plane_flag is equal to 0, ChromaArrayType is set equal to chroma_format_idc. \96 Otherwise (separate_colour_plane_flag is equal to 1), ChromaArrayType is set equal to 0. */ if (separate_colour_plane_flag) sps->ChromaArrayType = 0; } luma_bd = gf_bs_read_ue_log(bs, "luma_bit_depth"); chroma_bd = gf_bs_read_ue_log(bs, "chroma_bit_depth"); /*qpprime_y_zero_transform_bypass_flag = */ gf_bs_read_int_log(bs, 1, "qpprime_y_zero_transform_bypass_flag"); /*seq_scaling_matrix_present_flag*/ if (gf_bs_read_int_log(bs, 1, "seq_scaling_matrix_present_flag")) { u32 k; for (k = 0; k < 8; k++) { if (gf_bs_read_int_log_idx(bs, 1, "seq_scaling_list_present_flag", k)) { u32 z, last = 8, next = 8; u32 sl = k < 6 ? 16 : 64; for (z = 0; z < sl; z++) { if (next) { s32 delta = gf_bs_read_se(bs); next = (last + delta + 256) % 256; } last = next ? next : last; } } } } break; } sps->profile_idc = profile_idc; sps->level_idc = level_idc; sps->prof_compat = pcomp; sps->log2_max_frame_num = gf_bs_read_ue_log(bs, "log2_max_frame_num") + 4; sps->poc_type = gf_bs_read_ue_log(bs, "poc_type"); sps->chroma_format = chroma_format_idc; sps->luma_bit_depth_m8 = luma_bd; sps->chroma_bit_depth_m8 = chroma_bd; if (sps->poc_type == 0) { sps->log2_max_poc_lsb = gf_bs_read_ue_log(bs, "log2_max_poc_lsb") + 4; } else if (sps->poc_type == 1) { sps->delta_pic_order_always_zero_flag = gf_bs_read_int_log(bs, 1, "delta_pic_order_always_zero_flag"); sps->offset_for_non_ref_pic = gf_bs_read_se_log(bs, "offset_for_non_ref_pic"); sps->offset_for_top_to_bottom_field = gf_bs_read_se_log(bs, "offset_for_top_to_bottom_field"); sps->poc_cycle_length = gf_bs_read_ue_log(bs, "poc_cycle_length"); if (sps->poc_cycle_length > GF_ARRAY_LENGTH(sps->offset_for_ref_frame)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] offset_for_ref_frame overflow from poc_cycle_length\n")); return -1; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = gf_bs_read_se_log_idx(bs, "offset_for_ref_frame", i); } if (sps->poc_type > 2) { return -1; } sps->max_num_ref_frames = gf_bs_read_ue_log(bs, "max_num_ref_frames"); sps->gaps_in_frame_num_value_allowed_flag = gf_bs_read_int_log(bs, 1, "gaps_in_frame_num_value_allowed_flag"); mb_width = gf_bs_read_ue_log(bs, "pic_width_in_mbs_minus1") + 1; mb_height = gf_bs_read_ue_log(bs, "pic_height_in_map_units_minus1") + 1; sps->frame_mbs_only_flag = gf_bs_read_int_log(bs, 1, "frame_mbs_only_flag"); sps->width = mb_width * 16; sps->height = (2 - sps->frame_mbs_only_flag) * mb_height * 16; if (!sps->frame_mbs_only_flag) sps->mb_adaptive_frame_field_flag = gf_bs_read_int_log(bs, 1, "mb_adaptive_frame_field_flag"); gf_bs_read_int_log(bs, 1, "direct_8x8_inference_flag"); if (gf_bs_read_int_log(bs, 1, "frame_cropping_flag")) { int CropUnitX, CropUnitY, SubWidthC = -1, SubHeightC = -1; if (chroma_format_idc == 1) { SubWidthC = 2; SubHeightC = 2; } else if (chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else if ((chroma_format_idc == 3) && (separate_colour_plane_flag == 0)) { SubWidthC = 1; SubHeightC = 1; } if (sps->ChromaArrayType == 0) { assert(SubWidthC == -1); CropUnitX = 1; CropUnitY = 2 - sps->frame_mbs_only_flag; } else { CropUnitX = SubWidthC; CropUnitY = SubHeightC * (2 - sps->frame_mbs_only_flag); } cl = gf_bs_read_ue_log(bs, "frame_crop_left_offset"); cr = gf_bs_read_ue_log(bs, "frame_crop_right_offset"); ct = gf_bs_read_ue_log(bs, "frame_crop_top_offset"); cb = gf_bs_read_ue_log(bs, "frame_crop_bottom_offset"); sps->width -= CropUnitX * (cl + cr); sps->height -= CropUnitY * (ct + cb); cl *= CropUnitX; cr *= CropUnitX; ct *= CropUnitY; cb *= CropUnitY; } sps->crop.left = cl; sps->crop.right = cr; sps->crop.top = ct; sps->crop.bottom = cb; if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } /*vui_parameters_present_flag*/ sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag"); if (sps->vui_parameters_present_flag) { sps->vui.aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->vui.aspect_ratio_info_present_flag) { s32 aspect_ratio_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (aspect_ratio_idc == 255) { sps->vui.par_num = gf_bs_read_int_log(bs, 16, "aspect_ratio_num"); sps->vui.par_den = gf_bs_read_int_log(bs, 16, "aspect_ratio_den"); } else if (aspect_ratio_idc < GF_ARRAY_LENGTH(avc_hevc_sar) ) { sps->vui.par_num = avc_hevc_sar[aspect_ratio_idc].w; sps->vui.par_den = avc_hevc_sar[aspect_ratio_idc].h; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] Unknown aspect_ratio_idc: your video may have a wrong aspect ratio. Contact the GPAC team!\n")); } } sps->vui.overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "overscan_info_present_flag"); if (sps->vui.overscan_info_present_flag) gf_bs_read_int_log(bs, 1, "overscan_appropriate_flag"); /* default values */ sps->vui.video_format = 5; sps->vui.colour_primaries = 2; sps->vui.transfer_characteristics = 2; sps->vui.matrix_coefficients = 2; /* now read values if possible */ sps->vui.video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->vui.video_signal_type_present_flag) { sps->vui.video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->vui.video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); sps->vui.colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"); if (sps->vui.colour_description_present_flag) { sps->vui.colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->vui.transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); sps->vui.matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if (gf_bs_read_int_log(bs, 1, "chroma_location_info_present_flag")) { gf_bs_read_ue_log(bs, "chroma_sample_location_type_top_field"); gf_bs_read_ue_log(bs, "chroma_sample_location_type_bottom_field"); } sps->vui.timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (sps->vui.timing_info_present_flag) { sps->vui.num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->vui.time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->vui.fixed_frame_rate_flag = gf_bs_read_int_log(bs, 1, "fixed_frame_rate_flag"); } sps->vui.nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "nal_hrd_parameters_present_flag"); if (sps->vui.nal_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); sps->vui.vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vcl_hrd_parameters_present_flag"); if (sps->vui.vcl_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); if (sps->vui.nal_hrd_parameters_present_flag || sps->vui.vcl_hrd_parameters_present_flag) sps->vui.low_delay_hrd_flag = gf_bs_read_int_log(bs, 1, "low_delay_hrd_flag"); sps->vui.pic_struct_present_flag = gf_bs_read_int_log(bs, 1, "pic_struct_present_flag"); } /*end of seq_parameter_set_data*/ if (subseq_sps) { if ((profile_idc == 83) || (profile_idc == 86)) { u8 extended_spatial_scalability_idc; /*parsing seq_parameter_set_svc_extension*/ gf_bs_read_int_log(bs, 1, "inter_layer_deblocking_filter_control_present_flag"); extended_spatial_scalability_idc = gf_bs_read_int_log(bs, 2, "extended_spatial_scalability_idc"); if (sps->ChromaArrayType == 1 || sps->ChromaArrayType == 2) { gf_bs_read_int_log(bs, 1, "chroma_phase_x_plus1_flag"); } if (sps->ChromaArrayType == 1) { gf_bs_read_int_log(bs, 2, "chroma_phase_y_plus1"); } if (extended_spatial_scalability_idc == 1) { if (sps->ChromaArrayType > 0) { gf_bs_read_int_log(bs, 1, "seq_ref_layer_chroma_phase_x_plus1_flag"); gf_bs_read_int_log(bs, 2, "seq_ref_layer_chroma_phase_y_plus1"); } gf_bs_read_se_log(bs, "seq_scaled_ref_layer_left_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_top_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_right_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_bottom_offset"); } if (gf_bs_read_int_log(bs, 1, "seq_tcoeff_level_prediction_flag")) { gf_bs_read_int_log(bs, 1, "adaptive_tcoeff_level_prediction_flag"); } gf_bs_read_int_log(bs, 1, "slice_header_restriction_flag"); if (gf_bs_read_int_log(bs, 1, "svc_vui_parameters_present")) { u32 vui_ext_num_entries_minus1 = gf_bs_read_ue_log(bs, "vui_ext_num_entries_minus1"); for (i = 0; i <= vui_ext_num_entries_minus1; i++) { u8 vui_ext_nal_hrd_parameters_present_flag, vui_ext_vcl_hrd_parameters_present_flag, vui_ext_timing_info_present_flag; gf_bs_read_int_log(bs, 3, "vui_ext_dependency_id"); gf_bs_read_int_log(bs, 4, "vui_ext_quality_id"); gf_bs_read_int_log(bs, 3, "vui_ext_temporal_id"); vui_ext_timing_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_timing_info_present_flag"); if (vui_ext_timing_info_present_flag) { gf_bs_read_int_log(bs, 32, "vui_ext_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vui_ext_time_scale"); gf_bs_read_int_log(bs, 1, "vui_ext_fixed_frame_rate_flag"); } vui_ext_nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_nal_hrd_parameters_present_flag"); if (vui_ext_nal_hrd_parameters_present_flag) { //hrd_parameters( ) } vui_ext_vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_vcl_hrd_parameters_present_flag"); if (vui_ext_vcl_hrd_parameters_present_flag) { //hrd_parameters( ) } if (vui_ext_nal_hrd_parameters_present_flag || vui_ext_vcl_hrd_parameters_present_flag) { gf_bs_read_int_log(bs, 1, "vui_ext_low_delay_hrd_flag"); } gf_bs_read_int_log(bs, 1, "vui_ext_pic_struct_present_flag"); } } } else if ((profile_idc == 118) || (profile_idc == 128)) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[avc-h264] MVC parsing not implemented - skipping parsing end of Subset SPS\n")); return sps_id; } if (gf_bs_read_int_log(bs, 1, "additional_extension2")) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] skipping parsing end of Subset SPS (additional_extension2)\n")); return sps_id; } } return sps_id; } GF_EXPORT s32 gf_avc_read_sps_bs(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { return gf_avc_read_sps_bs_internal(bs, avc, subseq_sps, vui_flag_pos, 0); } GF_EXPORT s32 gf_avc_read_sps(const u8 *sps_data, u32 sps_size, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { s32 sps_id = -1; GF_BitStream *bs; char *sps_data_without_emulation_bytes = NULL; u32 sps_data_without_emulation_bytes_size = 0; if (vui_flag_pos) { /*SPS still contains emulation bytes*/ sps_data_without_emulation_bytes = gf_malloc(sps_size * sizeof(char)); sps_data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(sps_data, sps_data_without_emulation_bytes, sps_size); bs = gf_bs_new(sps_data_without_emulation_bytes, sps_data_without_emulation_bytes_size, GF_BITSTREAM_READ); *vui_flag_pos = 0; } else { bs = gf_bs_new(sps_data, sps_size, GF_BITSTREAM_READ); } if (!bs) { sps_id = -1; goto exit; } sps_id = gf_avc_read_sps_bs(bs, avc, subseq_sps, vui_flag_pos); exit: gf_bs_del(bs); if (sps_data_without_emulation_bytes) gf_free(sps_data_without_emulation_bytes); return sps_id; } static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 255) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (pps->sps_id >= 32) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; } GF_EXPORT s32 gf_avc_read_pps_bs(GF_BitStream *bs, AVCState *avc) { return gf_avc_read_pps_bs_internal(bs, avc, 0); } GF_EXPORT s32 gf_avc_read_pps(const u8 *pps_data, u32 pps_size, AVCState *avc) { GF_BitStream *bs; s32 pps_id; /*PPS still contains emulation bytes*/ bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { return -1; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); pps_id = gf_avc_read_pps_bs(bs, avc); gf_bs_del(bs); return pps_id; } #if 0 //unused s32 gf_avc_read_sps_ext(const char *spse_data, u32 spse_size) { GF_BitStream *bs; s32 sps_id; bs = gf_bs_new(spse_data, spse_size, GF_BITSTREAM_READ); sps_id = gf_avc_read_sps_ext_bs(bs); gf_bs_del(bs); return sps_id; } #endif static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader) { gf_bs_read_int_log(bs, 1, "reserved_one_bit"); NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag"); NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id"); gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag"); NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId"); NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id"); NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id"); gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag"); gf_bs_read_int_log(bs, 1, "discardable_flag"); gf_bs_read_int_log(bs, 1, "output_flag"); gf_bs_read_int_log(bs, 2, "reserved_three_2bits"); return 1; } static void ref_pic_list_modification(GF_BitStream *bs, u32 slice_type) { if (slice_type % 5 != 2 && slice_type % 5 != 4) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } if (slice_type % 5 == 1) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } } static void pred_weight_table(GF_BitStream *bs, u32 slice_type, u32 ChromaArrayType, u32 num_ref_idx_l0_active_minus1, u32 num_ref_idx_l1_active_minus1) { u32 i, j; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) { gf_bs_read_ue_log(bs, "chroma_log2_weight_denom"); } for (i = 0; i <= num_ref_idx_l0_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l0_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l0_flag", i)) for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l0", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l0", i, j); } } } if (slice_type % 5 == 1) { for (i = 0; i <= num_ref_idx_l1_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l1_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l1", i); gf_bs_read_se_log_idx(bs, "luma_offset_l1", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l1_flag", i)) { for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l1", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l1", i, j); } } } } } } static void dec_ref_pic_marking(GF_BitStream *bs, Bool IdrPicFlag) { if (IdrPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); gf_bs_read_int_log(bs, 1, "long_term_reference_flag"); } else { if (gf_bs_read_int_log(bs, 1, "adaptive_ref_pic_marking_mode_flag")) { u32 idx=0, memory_management_control_operation; do { memory_management_control_operation = gf_bs_read_ue_log_idx(bs, "memory_management_control_operation", idx); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) gf_bs_read_ue_log_idx(bs, "difference_of_pic_nums_minus1", idx); if (memory_management_control_operation == 2) gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); if (memory_management_control_operation == 3 || memory_management_control_operation == 6) gf_bs_read_ue_log_idx(bs, "long_term_frame_idx", idx); if (memory_management_control_operation == 4) gf_bs_read_ue_log_idx(bs, "max_long_term_frame_idx_plus1", idx); idx++; } while (memory_management_control_operation != 0); } } } static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si) { s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id]; if (!si->sps->log2_max_frame_num) return -2; avc->sps_active_idx = si->pps->sps_id; avc->pps_active_idx = pps_id; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; si->bottom_field_flag = 0; if (!si->sps->frame_mbs_only_flag) { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } if (si->slice_type % 5 == GF_AVC_TYPE_B) { gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag"); } num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1; num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1; if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) { Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag"); if (num_ref_idx_active_override_flag) { num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1"); if (si->slice_type % 5 == GF_AVC_TYPE_B) { num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1"); } } } if (si->nal_unit_type == 20 || si->nal_unit_type == 21) { //ref_pic_list_mvc_modification(); /* specified in Annex H */ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n")); assert(0); return -1; } else { ref_pic_list_modification(bs, si->slice_type); } if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP)) || (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) { pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1); } if (si->nal_ref_idc != 0) { dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE)); } if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) { gf_bs_read_ue_log(bs, "cabac_init_idc"); } /*slice_qp_delta = */gf_bs_read_se(bs); if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) { if (si->slice_type % 5 == GF_AVC_TYPE_SP) { gf_bs_read_int_log(bs, 1, "sp_for_switch_flag"); } gf_bs_read_se_log(bs, "slice_qs_delta"); } if (si->pps->deblocking_filter_control_present_flag) { if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) { gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2"); gf_bs_read_se_log(bs, "slice_beta_offset_div2"); } } if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) { gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle"); } return 0; } static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si) { s32 pps_id; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; si->pps->id = pps_id; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT]; if (!si->sps->log2_max_frame_num) return -2; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; if (si->sps->frame_mbs_only_flag) { /*s->picture_structure= PICT_FRAME;*/ } else { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } return 0; } static s32 avc_parse_recovery_point_sei(GF_BitStream *bs, AVCState *avc) { AVCSeiRecoveryPoint *rp = &avc->sei.recovery_point; rp->frame_cnt = gf_bs_read_ue_log(bs, "frame_cnt"); rp->exact_match_flag = gf_bs_read_int_log(bs, 1, "exact_match_flag"); rp->broken_link_flag = gf_bs_read_int_log(bs, 1, "broken_link_flag"); rp->changing_slice_group_idc = gf_bs_read_int_log(bs, 2, "changing_slice_group_idc"); rp->valid = 1; return 0; } /*for interpretation see ISO 14496-10 N.11084, table D-1*/ static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; } #if !defined(GPAC_DISABLE_HEVC) static void avc_parse_itu_t_t35_sei(GF_BitStream* bs, AVCSeiItuTT35DolbyVision *dovi) { u8 itu_t_t35_country_code = gf_bs_read_u8(bs); u16 terminal_provider_code = gf_bs_read_u16(bs); u32 user_id = gf_bs_read_u32(bs); u8 data_type_code = gf_bs_read_u8(bs); if (itu_t_t35_country_code == 0xB5 && terminal_provider_code == 0x31 && user_id == 0x47413934 && (data_type_code == 0x8 || data_type_code == 0x9)) { dovi->rpu_flag = GF_TRUE; } } #endif static void avc_compute_poc(AVCSliceInfo *si) { enum { AVC_PIC_FRAME, AVC_PIC_FIELD_TOP, AVC_PIC_FIELD_BOTTOM, } pic_type; s32 field_poc[2] = { 0,0 }; s32 max_frame_num; if (!si->sps) return; max_frame_num = 1 << (si->sps->log2_max_frame_num); /* picture type */ if (si->sps->frame_mbs_only_flag || !si->field_pic_flag) pic_type = AVC_PIC_FRAME; else if (si->bottom_field_flag) pic_type = AVC_PIC_FIELD_BOTTOM; else pic_type = AVC_PIC_FIELD_TOP; /* frame_num_offset */ if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; si->frame_num_offset = 0; } else { if (si->frame_num < si->frame_num_prev) si->frame_num_offset = si->frame_num_offset_prev + max_frame_num; else si->frame_num_offset = si->frame_num_offset_prev; } /*ISO 14496-10 N.11084 8.2.1.1*/ if (si->sps->poc_type == 0) { const u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*ISO 14496-10 N.11084 eq (8-3)*/ if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; /*ISO 14496-10 N.11084 eq (8-4)*/ if (pic_type != AVC_PIC_FIELD_BOTTOM) field_poc[0] = si->poc_msb + si->poc_lsb; /*ISO 14496-10 N.11084 eq (8-5)*/ if (pic_type != AVC_PIC_FIELD_TOP) { if (!si->field_pic_flag) field_poc[1] = field_poc[0] + si->delta_poc_bottom; else field_poc[1] = si->poc_msb + si->poc_lsb; } } /*ISO 14496-10 N.11084 8.2.1.2*/ else if (si->sps->poc_type == 1) { u32 i; s32 abs_frame_num, expected_delta_per_poc_cycle, expected_poc; if (si->sps->poc_cycle_length) abs_frame_num = si->frame_num_offset + si->frame_num; else abs_frame_num = 0; if (!si->nal_ref_idc && (abs_frame_num > 0)) abs_frame_num--; expected_delta_per_poc_cycle = 0; for (i = 0; i < si->sps->poc_cycle_length; i++) expected_delta_per_poc_cycle += si->sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { const u32 poc_cycle_cnt = (abs_frame_num - 1) / si->sps->poc_cycle_length; const u32 frame_num_in_poc_cycle = (abs_frame_num - 1) % si->sps->poc_cycle_length; expected_poc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) expected_poc += si->sps->offset_for_ref_frame[i]; } else { expected_poc = 0; } if (!si->nal_ref_idc) expected_poc += si->sps->offset_for_non_ref_pic; field_poc[0] = expected_poc + si->delta_poc[0]; field_poc[1] = field_poc[0] + si->sps->offset_for_top_to_bottom_field; if (pic_type == AVC_PIC_FRAME) field_poc[1] += si->delta_poc[1]; } /*ISO 14496-10 N.11084 8.2.1.3*/ else if (si->sps->poc_type == 2) { int poc; if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { poc = 0; } else { const int abs_frame_num = si->frame_num_offset + si->frame_num; poc = 2 * abs_frame_num; if (!si->nal_ref_idc) poc -= 1; } field_poc[0] = poc; field_poc[1] = poc; } /*ISO 14496-10 N.11084 eq (8-1)*/ if (pic_type == AVC_PIC_FRAME) si->poc = MIN(field_poc[0], field_poc[1]); else if (pic_type == AVC_PIC_FIELD_TOP) si->poc = field_poc[0]; else si->poc = field_poc[1]; } GF_EXPORT s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; } u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc) { u32 ptype, psize, hdr, var; u32 start; GF_BitStream *bs; GF_BitStream *bs_dest = NULL; u8 nhdr; Bool sei_removed = GF_FALSE; char store; hdr = buffer[0]; if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0; if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nhdr = gf_bs_read_int(bs, 8); if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8); /*parse SEI*/ while (gf_bs_available(bs)) { Bool do_copy; ptype = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); ptype += v; if (v != 0xFF) break; } psize = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); psize += v; if (v != 0xFF) break; } start = (u32)gf_bs_get_position(bs); do_copy = 1; if (start + psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start)); if (bs_dest) gf_bs_del(bs_dest); return nal_size; } switch (ptype) { /*remove SEI messages forbidden in MP4*/ case 3: /*filler data*/ case 10: /*sub_seq info*/ case 11: /*sub_seq_layer char*/ case 12: /*sub_seq char*/ do_copy = 0; sei_removed = GF_TRUE; break; case 5: /*user unregistered */ store = buffer[start + psize]; buffer[start + psize] = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16)); buffer[start + psize] = store; break; case 6: /*recovery point*/ avc_parse_recovery_point_sei(bs, avc); break; case 1: /*pic_timing*/ avc_parse_pic_timing_sei(bs, avc); break; case 0: /*buffering period*/ case 2: /*pan scan rect*/ case 4: /*user registered ITU t35*/ case 7: /*def_rec_pic_marking_repetition*/ case 8: /*spare_pic*/ case 9: /*scene info*/ case 13: /*full frame freeze*/ case 14: /*full frame freeze release*/ case 15: /*full frame snapshot*/ case 16: /*progressive refinement segment start*/ case 17: /*progressive refinement segment end*/ case 18: /*motion constrained slice group*/ default: /*add all unknown SEIs*/ break; } if (do_copy && bs_dest) { var = ptype; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); var = psize; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); gf_bs_seek(bs, start); //bs_read_data does not skip EPB, read byte per byte var = psize; while (var) { gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs)); var--; } } else { gf_bs_seek(bs, start); //bs_skip_bytes does not skip EPB, skip byte per byte while (psize) { gf_bs_read_u8(bs); psize--; } } if (gf_bs_available(bs) <= 2) { var = gf_bs_read_int(bs, 8); if (var != 0x80) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n")); } if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8); break; } } gf_bs_del(bs); //we cannot compare final size and original size since original may have EPB and final does not yet have them if (bs_dest && sei_removed) { u8 *dst_no_epb = NULL; u32 dst_no_epb_size = 0; gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size); nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size); } if (bs_dest) gf_bs_del(bs_dest); return nal_size; } static u8 avc_hevc_get_sar_idx(u32 w, u32 h) { u32 i, count = GF_ARRAY_LENGTH(avc_hevc_sar); for (i = 0; i < count; i++) { if ((avc_hevc_sar[i].w == w) && (avc_hevc_sar[i].h == h)) return i; } return 0xFF; } static void avc_hevc_rewrite_vui(GF_VUIInfo *vui_info, GF_BitStream *orig, GF_BitStream *mod) { /* VUI present flag*/ Bool vui_present_flag = gf_bs_read_int(orig, 1); /*setup default values*/ Bool aspect_ratio_info_present_flag = 0; s32 aspect_ratio_idc = -1; u32 ar_n=0, ar_d=0; Bool overscan_info_present_flag = 0; u32 overscan_info=0; u32 video_signal_type_present_flag=0; u32 video_format = 5; u32 video_full_range_flag = 0; u32 colour_description_present_flag = 0; u32 colour_primaries = 2; u32 transfer_characteristics = 2; u32 matrix_coefficients = 2; //if VUI is present, read all SAR and overscan values if (vui_present_flag) { /* VUI found in input bitstream */ aspect_ratio_info_present_flag = gf_bs_read_int(orig, 1); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = gf_bs_read_int(orig, 8); /*aspect_ratio_idc*/ if (aspect_ratio_idc == 255) { ar_n = gf_bs_read_int(orig, 16); /*sar_width*/ ar_d = gf_bs_read_int(orig, 16); /*sar_height*/ } } /*overscan_info_present_flag */ overscan_info_present_flag = gf_bs_read_int(orig, 1); if(overscan_info_present_flag) { overscan_info = gf_bs_read_int(orig, 1); } /* read all video signal related flags first */ video_signal_type_present_flag = gf_bs_read_int(orig, 1); if(video_signal_type_present_flag) { video_format = gf_bs_read_int(orig, 3); video_full_range_flag = gf_bs_read_int(orig, 1); colour_description_present_flag = gf_bs_read_int(orig, 1); if(colour_description_present_flag) { colour_primaries = gf_bs_read_int(orig, 8); transfer_characteristics = gf_bs_read_int(orig, 8); matrix_coefficients = gf_bs_read_int(orig, 8); } } } //recompute values //no change if ((vui_info->ar_num<0) || (vui_info->ar_den<0)) { } //remove par else if ((vui_info->ar_num==0) || (vui_info->ar_den==0)) { aspect_ratio_info_present_flag = 0; } //set par else { aspect_ratio_info_present_flag = 1; ar_n = vui_info->ar_num; ar_d = vui_info->ar_den; aspect_ratio_idc = avc_hevc_get_sar_idx((u32) ar_n, (u32) ar_d); } if (vui_info->remove_video_info) { video_signal_type_present_flag = 0; } /* correct the values of each flags */ else if ((vui_info->fullrange==0) && (vui_info->video_format==5) && (vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { video_signal_type_present_flag = 0; /* all default, nothing to write*/ } else { video_signal_type_present_flag = 1; video_format = (vui_info->video_format < 0) ? video_format : vui_info->video_format; video_full_range_flag = (vui_info->fullrange < 0) ? video_full_range_flag : vui_info->fullrange; if ((vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { colour_description_present_flag = 0; } else { colour_description_present_flag = 1; colour_primaries = (vui_info->color_prim < 0) ? colour_primaries : vui_info->color_prim; transfer_characteristics = (vui_info->color_tfc < 0) ? transfer_characteristics : vui_info->color_tfc; matrix_coefficients = (vui_info->color_matrix < 0) ? matrix_coefficients : vui_info->color_matrix; } if ((colour_primaries==2) && (transfer_characteristics==2) && (matrix_coefficients==2)) { colour_description_present_flag = 0; if ((video_format==5) && (video_full_range_flag==0)) video_signal_type_present_flag = 0; } } //always rewrite VUI gf_bs_write_int(mod, 1, 1); gf_bs_write_int(mod, aspect_ratio_info_present_flag, 1); if (aspect_ratio_info_present_flag) { gf_bs_write_int(mod, aspect_ratio_idc, 8); if (aspect_ratio_idc == 255) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } if (vui_info->update) { vui_info->ar_num = ar_n; vui_info->ar_den = ar_d; } } gf_bs_write_int(mod, overscan_info_present_flag, 1); if (overscan_info_present_flag) { gf_bs_write_int(mod, overscan_info, 1); } gf_bs_write_int(mod, video_signal_type_present_flag, 1); if (video_signal_type_present_flag) { gf_bs_write_int(mod, video_format, 3); gf_bs_write_int(mod, video_full_range_flag, 1); gf_bs_write_int(mod, colour_description_present_flag, 1); if (colour_description_present_flag) { gf_bs_write_int(mod, colour_primaries, 8); gf_bs_write_int(mod, transfer_characteristics, 8); gf_bs_write_int(mod, matrix_coefficients, 8); } if (vui_info->update) { vui_info->video_format = video_format; vui_info->fullrange = video_full_range_flag; if (colour_description_present_flag) { vui_info->color_prim = colour_primaries; vui_info->color_tfc = transfer_characteristics; vui_info->color_matrix = matrix_coefficients; } } } /*no VUI in input bitstream but we just inserted one, set all remaining vui flags to 0*/ if (!vui_present_flag) { gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*otherwise we copy over th bits from the input bitrate*/ } GF_Err gf_avc_change_vui(GF_AVCConfig *avcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { u8 *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size - 1) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data + 1, no_emulation_buf, slc->size - 1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 8); while (bit_offset - 8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &flag); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data + 1, flag) + 1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_avc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_avc_read_sps(sps_data, sps_size, &avc, 0, NULL); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = avc.sps[idx].width; if (height) *height = avc.sps[idx].height; if (par_n) *par_n = avc.sps[idx].vui.par_num ? avc.sps[idx].vui.par_num : (u32)-1; if (par_d) *par_d = avc.sps[idx].vui.par_den ? avc.sps[idx].vui.par_den : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_avc_get_pps_info(u8 *pps_data, u32 pps_size, u32 *pps_id, u32 *sps_id) { GF_BitStream *bs; GF_Err e = GF_OK; bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); /*nal hdr*/ gf_bs_read_int(bs, 8); *pps_id = gf_bs_read_ue(bs); *sps_id = gf_bs_read_ue(bs); exit: gf_bs_del(bs); return e; } #ifndef GPAC_DISABLE_HEVC /********** HEVC parsing **********/ Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } } Bool gf_hevc_slice_is_IDR(HEVCState *hevc) { if (hevc->sei.recovery_point.valid) { hevc->sei.recovery_point.valid = 0; return GF_TRUE; } switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: return GF_TRUE; default: return GF_FALSE; } } static Bool hevc_parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int_log_idx(bs, 1, "inter_ref_pic_set_prediction_flag", idx_rps); if (inter_ref_pic_set_prediction_flag) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = gf_bs_read_ue_log_idx(bs, "delta_idx_minus1", idx_rps); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int_log_idx(bs, 1, "delta_rps_sign", idx_rps); abs_delta_rps_minus1 = gf_bs_read_ue_log_idx(bs, "abs_delta_rps_minus1", idx_rps); deltaRPS = (1 - (delta_rps_sign << 1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i = 0; i <= nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag ? 1 : 0; if (!used_by_curr_pic_flag) { used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc == 1) || (ref_idc == 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc; sps->rps[idx_rps].num_negative_pics = gf_bs_read_ue_log_idx(bs, "num_negative_pics", idx_rps); sps->rps[idx_rps].num_positive_pics = gf_bs_read_ue_log_idx(bs, "num_positive_pics", idx_rps); if (sps->rps[idx_rps].num_negative_pics > 16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics > 16) return GF_FALSE; for (i = 0; i < sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s0_minus1", idx_rps, i); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "delta_poc_s0_minus1", idx_rps, i); } for (i = 0; i < sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s1_minus1" , idx_rps, i); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_s1_flag", idx_rps, i); } } return GF_TRUE; } void hevc_pred_weight_table(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si, HEVC_PPS *pps, HEVC_SPS *sps, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { u32 i, num_ref_idx; Bool first_pass = GF_TRUE; u8 luma_weights[20], chroma_weights[20]; u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; num_ref_idx = num_ref_idx_l0_active; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) gf_bs_read_se_log(bs, "delta_chroma_log2_weight_denom"); parse_weights: for (i = 0; i < num_ref_idx; i++) { luma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "luma_weights", i); //infered to be 0 if not present chroma_weights[i] = 0; } if (ChromaArrayType != 0) { for (i = 0; i < num_ref_idx; i++) { chroma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "chroma_weights", i); } } for (i = 0; i < num_ref_idx; i++) { if (luma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (chroma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_1", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_1", i); } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) { if (!first_pass) return; first_pass = GF_FALSE; num_ref_idx = num_ref_idx_l1_active; goto parse_weights; } } static Bool ref_pic_lists_modification(GF_BitStream *bs, u32 slice_type, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { //u32 i; Bool ref_pic_list_modification_flag_l0 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0"); if (ref_pic_list_modification_flag_l0) { /*for (i=0; i<num_ref_idx_l0_active; i++) { list_entry_l0[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr())/log(2))); }*/ return GF_FALSE; } if (slice_type == GF_HEVC_SLICE_TYPE_B) { Bool ref_pic_list_modification_flag_l1 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1"); if (ref_pic_list_modification_flag_l1) { /*for (i=0; i<num_ref_idx_l1_active; i++) { list_entry_l1[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr()) / log(2))); }*/ return GF_FALSE; } } return GF_TRUE; } static s32 hevc_parse_slice_segment(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si) { u32 i, j; u32 num_ref_idx_l0_active = 0, num_ref_idx_l1_active = 0; HEVC_PPS *pps; HEVC_SPS *sps; s32 pps_id; Bool RapPicFlag = GF_FALSE; Bool IDRPicFlag = GF_FALSE; si->first_slice_segment_in_pic_flag = gf_bs_read_int_log(bs, 1, "first_slice_segment_in_pic_flag"); switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: IDRPicFlag = GF_TRUE; RapPicFlag = GF_TRUE; break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_CRA: RapPicFlag = GF_TRUE; break; } if (RapPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 64) return -1; pps = &hevc->pps[pps_id]; sps = &hevc->sps[pps->sps_id]; si->sps = sps; si->pps = pps; if (!si->first_slice_segment_in_pic_flag && pps->dependent_slice_segments_enabled_flag) { si->dependent_slice_segment_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segment_flag"); } else { si->dependent_slice_segment_flag = GF_FALSE; } if (!si->first_slice_segment_in_pic_flag) { si->slice_segment_address = gf_bs_read_int_log(bs, sps->bitsSliceSegmentAddress, "slice_segment_address"); } else { si->slice_segment_address = 0; } if (!si->dependent_slice_segment_flag) { Bool deblocking_filter_override_flag = 0; Bool slice_temporal_mvp_enabled_flag = 0; Bool slice_sao_luma_flag = 0; Bool slice_sao_chroma_flag = 0; Bool slice_deblocking_filter_disabled_flag = 0; //"slice_reserved_undetermined_flag[]" gf_bs_read_int_log(bs, pps->num_extra_slice_header_bits, "slice_reserved_undetermined_flag"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (pps->output_flag_present_flag) gf_bs_read_int_log(bs, 1, "pic_output_flag"); if (sps->separate_colour_plane_flag == 1) gf_bs_read_int_log(bs, 2, "colour_plane_id"); if (IDRPicFlag) { si->poc_lsb = 0; //if not asked to parse full header, abort since we know the poc if (!hevc->full_slice_header_parse) return 0; } else { si->poc_lsb = gf_bs_read_int_log(bs, sps->log2_max_pic_order_cnt_lsb, "poc_lsb"); //if not asked to parse full header, abort once we have the poc if (!hevc->full_slice_header_parse) return 0; if (gf_bs_read_int_log(bs, 1, "short_term_ref_pic_set_sps_flag") == 0) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, sps->num_short_term_ref_pic_sets); if (!ret) return -1; } else if (sps->num_short_term_ref_pic_sets > 1) { u32 numbits = 0; while ((u32)(1 << numbits) < sps->num_short_term_ref_pic_sets) numbits++; if (numbits > 0) gf_bs_read_int_log(bs, numbits, "short_term_ref_pic_set_idx"); /*else short_term_ref_pic_set_idx = 0;*/ } if (sps->long_term_ref_pics_present_flag) { u8 DeltaPocMsbCycleLt[32]; u32 num_long_term_sps = 0; u32 num_long_term_pics = 0; memset(DeltaPocMsbCycleLt, 0, sizeof(u8) * 32); if (sps->num_long_term_ref_pic_sps > 0) { num_long_term_sps = gf_bs_read_ue_log(bs, "num_long_term_sps"); } num_long_term_pics = gf_bs_read_ue_log(bs, "num_long_term_pics"); for (i = 0; i < num_long_term_sps + num_long_term_pics; i++) { if (i < num_long_term_sps) { if (sps->num_long_term_ref_pic_sps > 1) gf_bs_read_int_log_idx(bs, gf_get_bit_size(sps->num_long_term_ref_pic_sps), "lt_idx_sps", i); } else { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "PocLsbLt", i); gf_bs_read_int_log_idx(bs, 1, "UsedByCurrPicLt", i); } if (gf_bs_read_int_log_idx(bs, 1, "delta_poc_msb_present_flag", i)) { if (i == 0 || i == num_long_term_sps) DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i); else DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i) + DeltaPocMsbCycleLt[i - 1]; } } } if (sps->temporal_mvp_enable_flag) slice_temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "slice_temporal_mvp_enabled_flag"); } if (sps->sample_adaptive_offset_enabled_flag) { u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; slice_sao_luma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_luma_flag"); if (ChromaArrayType != 0) slice_sao_chroma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_chroma_flag"); } if (si->slice_type == GF_HEVC_SLICE_TYPE_P || si->slice_type == GF_HEVC_SLICE_TYPE_B) { //u32 NumPocTotalCurr; num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active; num_ref_idx_l1_active = 0; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active; if (gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag")) { num_ref_idx_l0_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_active"); if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_active"); } if (pps->lists_modification_present_flag /*TODO: && NumPicTotalCurr > 1*/) { if (!ref_pic_lists_modification(bs, si->slice_type, num_ref_idx_l0_active, num_ref_idx_l1_active)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[hevc] ref_pic_lists_modification( ) not implemented\n")); return -1; } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) gf_bs_read_int_log(bs, 1, "mvd_l1_zero_flag"); if (pps->cabac_init_present_flag) gf_bs_read_int_log(bs, 1, "cabac_init_flag"); if (slice_temporal_mvp_enabled_flag) { // When collocated_from_l0_flag is not present, it is inferred to be equal to 1. Bool collocated_from_l0_flag = 1; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) collocated_from_l0_flag = gf_bs_read_int_log(bs, 1, "collocated_from_l0_flag"); if ((collocated_from_l0_flag && (num_ref_idx_l0_active > 1)) || (!collocated_from_l0_flag && (num_ref_idx_l1_active > 1)) ) { gf_bs_read_ue_log(bs, "collocated_ref_idx"); } } if ((pps->weighted_pred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_P) || (pps->weighted_bipred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_B) ) { hevc_pred_weight_table(bs, hevc, si, pps, sps, num_ref_idx_l0_active, num_ref_idx_l1_active); } gf_bs_read_ue_log(bs, "five_minus_max_num_merge_cand"); } si->slice_qp_delta_start_bits = (s32) (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); si->slice_qp_delta = gf_bs_read_se_log(bs, "slice_qp_delta"); if (pps->slice_chroma_qp_offsets_present_flag) { gf_bs_read_se_log(bs, "slice_cb_qp_offset"); gf_bs_read_se_log(bs, "slice_cr_qp_offset"); } if (pps->deblocking_filter_override_enabled_flag) { deblocking_filter_override_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_flag"); } if (deblocking_filter_override_flag) { slice_deblocking_filter_disabled_flag = gf_bs_read_int_log(bs, 1, "slice_deblocking_filter_disabled_flag"); if (!slice_deblocking_filter_disabled_flag) { gf_bs_read_se_log(bs, "slice_beta_offset_div2"); gf_bs_read_se_log(bs, "slice_tc_offset_div2"); } } if (pps->loop_filter_across_slices_enabled_flag && (slice_sao_luma_flag || slice_sao_chroma_flag || !slice_deblocking_filter_disabled_flag) ) { gf_bs_read_int_log(bs, 1, "slice_loop_filter_across_slices_enabled_flag"); } } //dependent slice segment else { //if not asked to parse full header, abort if (!hevc->full_slice_header_parse) return 0; } si->entry_point_start_bits = ((u32)gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) { u32 num_entry_point_offsets = gf_bs_read_ue_log(bs, "num_entry_point_offsets"); if (num_entry_point_offsets > 0) { u32 offset = gf_bs_read_ue_log(bs, "offset") + 1; u32 segments = offset >> 4; s32 remain = (offset & 15); for (i = 0; i < num_entry_point_offsets; i++) { //u32 res = 0; for (j = 0; j < segments; j++) { //res <<= 16; /*res +=*/ gf_bs_read_int(bs, 16); } if (remain) { //res <<= remain; /* res += */ gf_bs_read_int(bs, remain); } // entry_point_offset = val + 1; // +1; // +1 to get the size } } } if (pps->slice_segment_header_extension_present_flag) { u32 size_ext = gf_bs_read_ue_log(bs, "size_ext"); while (size_ext) { gf_bs_read_int(bs, 8); size_ext--; } } si->header_size_bits = (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); // av_parser.c modified on 16 jan. 2019 if (gf_bs_read_int_log(bs, 1, "byte_align") == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Error parsing slice header: byte_align not found at end of header !\n")); } gf_bs_align(bs); si->payload_start_offset = (s32)gf_bs_get_position(bs); return 0; } static void gf_hevc_vvc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc, VVCState *vvc) { u32 ptype, psize, hdr; u64 start; GF_BitStream *bs; hdr = buffer[0]; if (((hdr & 0x7e) >> 1) != GF_HEVC_NALU_SEI_PREFIX) return; bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); gf_bs_read_int(bs, 16); /*parse SEI*/ while (gf_bs_available(bs)) { u32 consumed; ptype = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); ptype += 255; } ptype += gf_bs_read_int(bs, 8); psize = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); psize += 255; } psize += gf_bs_read_int(bs, 8); start = gf_bs_get_position(bs); if (start+psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] SEI user message type %d size error (%d but %d remain), skipping SEI message\n", hevc ? "HEVC" : "VVC", ptype, psize, nal_size-start)); break; } switch (ptype) { case 4: /*user registered ITU-T T35*/ if (hevc) { avc_parse_itu_t_t35_sei(bs, &hevc->sei.dovi); } break; default: break; } gf_bs_align(bs); consumed = (u32) (gf_bs_get_position(bs) - start); psize-=consumed; gf_bs_skip_bytes(bs, psize); if (gf_bs_available(bs) <= 2) break; } gf_bs_del(bs); } void gf_hevc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc) { gf_hevc_vvc_parse_sei(buffer, nal_size, hevc, NULL); } static void hevc_compute_poc(HEVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_pic_order_cnt_lsb); /*POC reset for IDR frames, NOT for CRA*/ switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: si->poc_lsb_prev = 0; si->poc_msb_prev = 0; break; } if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: si->poc_msb = 0; break; } si->poc = si->poc_msb + si->poc_lsb; } static Bool hevc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } void hevc_profile_tier_level(GF_BitStream *bs, Bool ProfilePresentFlag, u8 MaxNumSubLayersMinus1, HEVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ProfilePresentFlag) { ptl->profile_space = gf_bs_read_int_log_idx(bs, 2, "profile_space", idx); ptl->tier_flag = gf_bs_read_int_log_idx(bs, 1, "tier_flag", idx); ptl->profile_idc = gf_bs_read_int_log_idx(bs, 5, "profile_idc", idx); ptl->profile_compatibility_flag = gf_bs_read_int_log_idx(bs, 32, "profile_compatibility_flag", idx); ptl->general_progressive_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_progressive_source_flag", idx); ptl->general_interlaced_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_interlaced_source_flag", idx); ptl->general_non_packed_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_non_packed_constraint_flag", idx); ptl->general_frame_only_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_frame_only_constraint_flag", idx); ptl->general_reserved_44bits = gf_bs_read_long_int(bs, 44); } ptl->level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); for (i = 0; i < MaxNumSubLayersMinus1; i++) { ptl->sub_ptl[i].profile_present_flag = gf_bs_read_int_log_idx2(bs, 1, "profile_present_flag", idx, i); ptl->sub_ptl[i].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } if (MaxNumSubLayersMinus1 > 0) { for (i = MaxNumSubLayersMinus1; i < 8; i++) { /*reserved_zero_2bits*/gf_bs_read_int(bs, 2); } } for (i = 0; i < MaxNumSubLayersMinus1; i++) { if (ptl->sub_ptl[i].profile_present_flag) { ptl->sub_ptl[i].profile_space = gf_bs_read_int_log_idx2(bs, 2, "sublayer_profile_space", idx, i); ptl->sub_ptl[i].tier_flag = gf_bs_read_int_log_idx2(bs, 1, "sublayer_tier_flag", idx, i); ptl->sub_ptl[i].profile_idc = gf_bs_read_int_log_idx2(bs, 5, "sublayer_profile_idc", idx, i); ptl->sub_ptl[i].profile_compatibility_flag = gf_bs_read_int_log_idx2(bs, 32, "sublayer_profile_compatibility_flag", idx, i); /*ptl->sub_ptl[i].progressive_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_progressive_source_flag", idx, i); /*ptl->sub_ptl[i].interlaced_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_interlaced_source_flag", idx, i); /*ptl->sub_ptl[i].non_packed_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_non_packed_constraint_flag", idx, i); /*ptl->sub_ptl[i].frame_only_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_frame_only_constraint_flag", idx, i); /*ptl->sub_ptl[i].reserved_44bits =*/ gf_bs_read_long_int(bs, 44); } if (ptl->sub_ptl[i].level_present_flag) ptl->sub_ptl[i].level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } } static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type) { u32 idx = 0, type; for (type = 0; type < scalability_type; type++) { idx += (vps->scalability_mask[type] ? 1 : 0); } return idx; } #define LHVC_VIEW_ORDER_INDEX 1 #define LHVC_SCALABILITY_INDEX 2 static u32 lhvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type) { u32 idx; if (!vps->scalability_mask[scalability_type]) return 0; idx = scalability_type_to_idx(vps, scalability_type); return vps->dimension_id[layer_id_in_vps][idx]; } static u32 lhvc_get_view_index(HEVC_VPS *vps, u32 id) { return lhvc_get_scalability_id(vps, vps->layer_id_in_vps[id], LHVC_VIEW_ORDER_INDEX); } static u32 lhvc_get_num_views(HEVC_VPS *vps) { u32 numViews = 1, i; for (i = 0; i < vps->max_layers; i++) { u32 layer_id = vps->layer_id_in_nuh[i]; if (i > 0 && (lhvc_get_view_index(vps, layer_id) != lhvc_get_scalability_id(vps, i - 1, LHVC_VIEW_ORDER_INDEX))) { numViews++; } } return numViews; } static void lhvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs, u32 idx) { u8 chroma_bitdepth_present_flag; fmt->pic_width_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_width_luma_samples", idx); fmt->pic_height_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_height_luma_samples", idx); chroma_bitdepth_present_flag = gf_bs_read_int_log_idx(bs, 1, "chroma_bitdepth_present_flag", idx); if (chroma_bitdepth_present_flag) { fmt->chroma_format_idc = gf_bs_read_int_log_idx(bs, 2, "chroma_format_idc", idx); if (fmt->chroma_format_idc == 3) fmt->separate_colour_plane_flag = gf_bs_read_int_log_idx(bs, 1, "separate_colour_plane_flag", idx); fmt->bit_depth_luma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_luma_minus8", idx); fmt->bit_depth_chroma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_chroma_minus8", idx); } if (gf_bs_read_int_log_idx(bs, 1, "conformance_window_vps_flag", idx)) { gf_bs_read_ue_log_idx(bs, "conf_win_vps_left_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_right_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_top_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_bottom_offset", idx); } } static Bool hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs) { u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len; u32 i, j, num_scalability_types, num_add_olss, num_add_layer_set, num_indepentdent_layers, nb_bits, default_output_layer_idc = 0; u8 dimension_id_len[16], dim_bit_offset[16]; u8 /*avc_base_layer_flag, */NumLayerSets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag, ols_ids_to_ls_idx; u8 layer_set_idx_for_ols_minus1[MAX_LHVC_LAYERS]; u8 nb_output_layers_in_output_layer_set[MAX_LHVC_LAYERS + 1]; u8 ols_highest_output_layer_id[MAX_LHVC_LAYERS + 1]; u32 k, d, r, p, iNuhLId, jNuhLId; u8 num_direct_ref_layers[64], num_pred_layers[64], num_layers_in_tree_partition[MAX_LHVC_LAYERS]; u8 dependency_flag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS], id_pred_layers[64][MAX_LHVC_LAYERS]; // u8 num_ref_layers[64]; // u8 tree_partition_layer_id[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; // u8 id_ref_layers[64][MAX_LHVC_LAYERS]; // u8 id_direct_ref_layers[64][MAX_LHVC_LAYERS]; u8 layer_id_in_list_flag[64]; Bool OutputLayerFlag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; vps->vps_extension_found = 1; if ((vps->max_layers > 1) && vps->base_layer_internal_flag) hevc_profile_tier_level(bs, 0, vps->max_sub_layers - 1, &vps->ext_ptl[0], 0); splitting_flag = gf_bs_read_int_log(bs, 1, "splitting_flag"); num_scalability_types = 0; for (i = 0; i < 16; i++) { vps->scalability_mask[i] = gf_bs_read_int_log_idx(bs, 1, "scalability_mask", i); num_scalability_types += vps->scalability_mask[i]; } if (num_scalability_types >= 16) { num_scalability_types = 16; } dimension_id_len[0] = 0; for (i = 0; i < (num_scalability_types - splitting_flag); i++) { dimension_id_len[i] = 1 + gf_bs_read_int_log_idx(bs, 3, "dimension_id_len_minus1", i); } if (splitting_flag) { for (i = 0; i < num_scalability_types; i++) { dim_bit_offset[i] = 0; for (j = 0; j < i; j++) dim_bit_offset[i] += dimension_id_len[j]; } dimension_id_len[num_scalability_types - 1] = 1 + (5 - dim_bit_offset[num_scalability_types - 1]); dim_bit_offset[num_scalability_types] = 6; } vps_nuh_layer_id_present_flag = gf_bs_read_int_log(bs, 1, "vps_nuh_layer_id_present_flag"); vps->layer_id_in_nuh[0] = 0; vps->layer_id_in_vps[0] = 0; for (i = 1; i < vps->max_layers; i++) { if (vps_nuh_layer_id_present_flag) { vps->layer_id_in_nuh[i] = gf_bs_read_int_log_idx(bs, 6, "layer_id_in_nuh", i); } else { vps->layer_id_in_nuh[i] = i; } vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i; if (!splitting_flag) { for (j = 0; j < num_scalability_types; j++) { vps->dimension_id[i][j] = gf_bs_read_int_log_idx2(bs, dimension_id_len[j], "dimension_id", i, j); } } } if (splitting_flag) { for (i = 0; i < vps->max_layers; i++) for (j = 0; j < num_scalability_types; j++) vps->dimension_id[i][j] = ((vps->layer_id_in_nuh[i] & ((1 << dim_bit_offset[j + 1]) - 1)) >> dim_bit_offset[j]); } else { for (j = 0; j < num_scalability_types; j++) vps->dimension_id[0][j] = 0; } view_id_len = gf_bs_read_int_log(bs, 4, "view_id_len"); if (view_id_len > 0) { for (i = 0; i < lhvc_get_num_views(vps); i++) { gf_bs_read_int_log_idx(bs, view_id_len, "view_id_val", i); } } for (i = 1; i < vps->max_layers; i++) { for (j = 0; j < i; j++) { vps->direct_dependency_flag[i][j] = gf_bs_read_int_log_idx(bs, 1, "direct_dependency_flag", i); } } //we do the test on MAX_LHVC_LAYERS and break in the loop to avoid a wrong GCC 4.8 warning on array bounds for (i = 0; i < MAX_LHVC_LAYERS; i++) { if (i >= vps->max_layers) break; for (j = 0; j < vps->max_layers; j++) { dependency_flag[i][j] = vps->direct_dependency_flag[i][j]; for (k = 0; k < i; k++) if (vps->direct_dependency_flag[i][k] && vps->direct_dependency_flag[k][j]) dependency_flag[i][j] = 1; } } for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; d = r = p = 0; for (j = 0; j < vps->max_layers; j++) { jNuhLId = vps->layer_id_in_nuh[j]; if (vps->direct_dependency_flag[i][j]) { // id_direct_ref_layers[iNuhLId][d] = jNuhLId; d++; } if (dependency_flag[i][j]) { // id_ref_layers[iNuhLId][r] = jNuhLId; r++; } if (dependency_flag[j][i]) id_pred_layers[iNuhLId][p++] = jNuhLId; } num_direct_ref_layers[iNuhLId] = d; // num_ref_layers[iNuhLId] = r; num_pred_layers[iNuhLId] = p; } memset(layer_id_in_list_flag, 0, 64 * sizeof(u8)); k = 0; //num_indepentdent_layers for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; if (!num_direct_ref_layers[iNuhLId]) { u32 h = 1; //tree_partition_layer_id[k][0] = iNuhLId; for (j = 0; j < num_pred_layers[iNuhLId]; j++) { u32 predLId = id_pred_layers[iNuhLId][j]; if (!layer_id_in_list_flag[predLId]) { //tree_partition_layer_id[k][h++] = predLId; layer_id_in_list_flag[predLId] = 1; } } num_layers_in_tree_partition[k++] = h; } } num_indepentdent_layers = k; num_add_layer_set = 0; if (num_indepentdent_layers > 1) num_add_layer_set = gf_bs_read_ue_log(bs, "num_add_layer_set"); for (i = 0; i < num_add_layer_set; i++) for (j = 1; j < num_indepentdent_layers; j++) { nb_bits = 1; while ((1 << nb_bits) < (num_layers_in_tree_partition[j] + 1)) nb_bits++; gf_bs_read_int_log_idx2(bs, nb_bits, "highest_layer_idx_plus1", i, j); } if (gf_bs_read_int_log(bs, 1, "vps_sub_layers_max_minus1_present_flag")) { for (i = 0; i < vps->max_layers; i++) { gf_bs_read_int_log_idx(bs, 3, "sub_layers_vps_max_minus1", i); } } if (gf_bs_read_int_log(bs, 1, "max_tid_ref_present_flag")) { for (i = 0; i < (vps->max_layers - 1); i++) { for (j = i + 1; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[j][i]) gf_bs_read_int_log_idx2(bs, 3, "max_tid_il_ref_pics_plus1", i, j); } } } gf_bs_read_int_log(bs, 1, "default_ref_layers_active_flag"); vps->num_profile_tier_level = 1 + gf_bs_read_ue_log(bs, "num_profile_tier_level"); if (vps->num_profile_tier_level > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of PTLs in VPS %d\n", vps->num_profile_tier_level)); vps->num_profile_tier_level = 1; return GF_FALSE; } for (i = vps->base_layer_internal_flag ? 2 : 1; i < vps->num_profile_tier_level; i++) { Bool vps_profile_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_profile_present_flag", i); hevc_profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers - 1, &vps->ext_ptl[i - 1], i-1); } NumLayerSets = vps->num_layer_sets + num_add_layer_set; num_add_olss = 0; if (NumLayerSets > 1) { num_add_olss = gf_bs_read_ue_log(bs, "num_add_olss"); default_output_layer_idc = gf_bs_read_int_log(bs, 2, "default_output_layer_idc"); default_output_layer_idc = default_output_layer_idc < 2 ? default_output_layer_idc : 2; } vps->num_output_layer_sets = num_add_olss + NumLayerSets; layer_set_idx_for_ols_minus1[0] = 1; vps->output_layer_flag[0][0] = 1; for (i = 0; i < vps->num_output_layer_sets; i++) { if ((NumLayerSets > 2) && (i >= NumLayerSets)) { nb_bits = 1; while ((1 << nb_bits) < (NumLayerSets - 1)) nb_bits++; layer_set_idx_for_ols_minus1[i] = gf_bs_read_int_log_idx(bs, nb_bits, "layer_set_idx_for_ols_minus1", i); } else layer_set_idx_for_ols_minus1[i] = 0; ols_ids_to_ls_idx = i < NumLayerSets ? i : layer_set_idx_for_ols_minus1[i] + 1; if ((i > (vps->num_layer_sets - 1)) || (default_output_layer_idc == 2)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) vps->output_layer_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "output_layer_flag", i, j); } if ((default_output_layer_idc == 0) || (default_output_layer_idc == 1)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if ((default_output_layer_idc == 0) || (vps->LayerSetLayerIdList[i][j] == vps->LayerSetLayerIdListMax[i])) OutputLayerFlag[i][j] = GF_TRUE; else OutputLayerFlag[i][j] = GF_FALSE; } } for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (OutputLayerFlag[i][j]) { u32 curLayerID; vps->necessary_layers_flag[i][j] = GF_TRUE; curLayerID = vps->LayerSetLayerIdList[i][j]; for (k = 0; k < j; k++) { u32 refLayerId = vps->LayerSetLayerIdList[i][k]; if (dependency_flag[vps->layer_id_in_vps[curLayerID]][vps->layer_id_in_vps[refLayerId]]) vps->necessary_layers_flag[i][k] = GF_TRUE; } } } vps->num_necessary_layers[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (vps->necessary_layers_flag[i][j]) vps->num_necessary_layers[i] += 1; } if (i == 0) { if (vps->base_layer_internal_flag) { if (vps->max_layers > 1) vps->profile_tier_level_idx[0][0] = 1; else vps->profile_tier_level_idx[0][0] = 0; } continue; } nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_profile_tier_level) nb_bits++; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) if (vps->necessary_layers_flag[i][j] && vps->num_profile_tier_level) vps->profile_tier_level_idx[i][j] = gf_bs_read_int_log_idx2(bs, nb_bits, "profile_tier_level_idx", i, j); else vps->profile_tier_level_idx[i][j] = 0; nb_output_layers_in_output_layer_set[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { nb_output_layers_in_output_layer_set[i] += OutputLayerFlag[i][j]; if (OutputLayerFlag[i][j]) { ols_highest_output_layer_id[i] = vps->LayerSetLayerIdList[ols_ids_to_ls_idx][j]; } } if (nb_output_layers_in_output_layer_set[i] == 1 && ols_highest_output_layer_id[i] > 0) vps->alt_output_layer_flag[i] = gf_bs_read_int_log_idx(bs, 1, "alt_output_layer_flag", i); } vps->num_rep_formats = 1 + gf_bs_read_ue_log(bs, "num_rep_formats_minus1"); if (vps->num_rep_formats > 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of rep formats in VPS %d\n", vps->num_rep_formats)); vps->num_rep_formats = 0; return GF_FALSE; } for (i = 0; i < vps->num_rep_formats; i++) { lhvc_parse_rep_format(&vps->rep_formats[i], bs, i); } if (vps->num_rep_formats > 1) rep_format_idx_present_flag = gf_bs_read_int_log(bs, 1, "rep_format_idx_present_flag"); else rep_format_idx_present_flag = 0; vps->rep_format_idx[0] = 0; nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_rep_formats) nb_bits++; for (i = vps->base_layer_internal_flag ? 1 : 0; i < vps->max_layers; i++) { if (rep_format_idx_present_flag) { vps->rep_format_idx[i] = gf_bs_read_int_log_idx(bs, nb_bits, "rep_format_idx", i); } else { vps->rep_format_idx[i] = i < vps->num_rep_formats - 1 ? i : vps->num_rep_formats - 1; } } //TODO - we don't use the rest ... return GF_TRUE; } static void sub_layer_hrd_parameters(GF_BitStream *bs, int subLayerId, u32 cpb_cnt, Bool sub_pic_hrd_params_present_flag, u32 idx1, u32 idx2) { u32 i; if (!gf_bs_available(bs)) return; for (i = 0; i <= cpb_cnt; i++) { gf_bs_read_ue_log_idx3(bs, "bit_rate_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "cpb_size_value_minus1", idx1, idx2, i); if (sub_pic_hrd_params_present_flag) { gf_bs_read_ue_log_idx3(bs, "cpb_size_du_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "bit_rate_du_value_minus1", idx1, idx2, i); } gf_bs_read_int_log_idx3(bs, 1, "cbr_flag", idx1, idx2, i); } } static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1, u32 idx) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "nal_hrd_parameters_present_flag", idx); vcl_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "vcl_hrd_parameters_present_flag", idx); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int_log_idx(bs, 1, "sub_pic_hrd_params_present_flag", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 8, "tick_divisor_minus2", idx); gf_bs_read_int_log_idx(bs, 5, "du_cpb_removal_delay_increment_length_minus1", idx); gf_bs_read_int_log_idx(bs, 1, "sub_pic_cpb_params_in_pic_timing_sei_flag", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_du_length_minus1", idx); } gf_bs_read_int_log_idx(bs, 4, "bit_rate_scale", idx); gf_bs_read_int_log_idx(bs, 4, "cpb_size_scale", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 4, "cpb_size_du_scale", idx); } gf_bs_read_int_log_idx(bs, 5, "initial_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "au_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_length_minus1", idx); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_general_flag", idx); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_within_cvs_flag", idx); } if (fixed_pic_rate_within_cvs_flag_i) gf_bs_read_ue_log_idx(bs, "elemental_duration_in_tc_minus1", idx); else low_delay_hrd_flag_i = gf_bs_read_int_log_idx(bs, 1, "low_delay_hrd_flag", idx); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = gf_bs_read_ue_log_idx(bs, "cpb_cnt_minus1", idx); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } } } static s32 gf_hevc_read_vps_bs_internal(GF_BitStream *bs, HEVCState *hevc, Bool stop_at_vps_ext) { u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag; u32 i, j; s32 vps_id; HEVC_VPS *vps; u8 layer_id_included_flag[MAX_LHVC_LAYERS][64]; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) return -1; vps = &hevc->vps[vps_id]; vps->bit_pos_vps_extensions = -1; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->base_layer_internal_flag = gf_bs_read_int_log(bs, 1, "base_layer_internal_flag"); vps->base_layer_available_flag = gf_bs_read_int_log(bs, 1, "base_layer_available_flag"); vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers_minus1"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; vps->temporal_id_nesting = gf_bs_read_int_log(bs, 1, "temporal_id_nesting"); gf_bs_read_int_log(bs, 16, "vps_reserved_ffff_16bits"); hevc_profile_tier_level(bs, 1, vps->max_sub_layers - 1, &vps->ptl, 0); vps_sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "vps_sub_layer_ordering_info_present_flag"); for (i = (vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) { gf_bs_read_ue_log_idx(bs, "vps_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "vps_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "vps_max_latency_increase_plus1", i); } vps->max_layer_id = gf_bs_read_int_log(bs, 6, "max_layer_id"); if (vps->max_layer_id > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS max layer ID %u but GPAC only supports %u\n", vps->max_layer_id, MAX_LHVC_LAYERS)); return -1; } vps->num_layer_sets = gf_bs_read_ue_log(bs, "num_layer_sets_minus1") + 1; if (vps->num_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of layer sets in VPS %d\n", vps->num_layer_sets)); return -1; } for (i = 1; i < vps->num_layer_sets; i++) { for (j = 0; j <= vps->max_layer_id; j++) { layer_id_included_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "layer_id_included_flag", i, j); } } vps->num_layers_in_id_list[0] = 1; for (i = 1; i < vps->num_layer_sets; i++) { u32 n, m; n = 0; for (m = 0; m <= vps->max_layer_id; m++) { if (layer_id_included_flag[i][m]) { vps->LayerSetLayerIdList[i][n++] = m; if (vps->LayerSetLayerIdListMax[i] < m) vps->LayerSetLayerIdListMax[i] = m; } } vps->num_layers_in_id_list[i] = n; } if (gf_bs_read_int_log(bs, 1, "vps_timing_info_present_flag")) { u32 vps_num_hrd_parameters; gf_bs_read_int_log(bs, 32, "vps_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vps_time_scale"); if (gf_bs_read_int_log(bs, 1, "vps_poc_proportional_to_timing_flag")) { gf_bs_read_ue_log(bs, "vps_num_ticks_poc_diff_one_minus1"); } vps_num_hrd_parameters = gf_bs_read_ue_log(bs, "vps_num_hrd_parameters"); for (i = 0; i < vps_num_hrd_parameters; i++) { Bool cprms_present_flag = GF_TRUE; gf_bs_read_ue_log_idx(bs, "hrd_layer_set_idx", i); if (i > 0) cprms_present_flag = gf_bs_read_int_log(bs, 1, "cprms_present_flag"); hevc_parse_hrd_parameters(bs, cprms_present_flag, vps->max_sub_layers - 1, i); } } if (stop_at_vps_ext) { return vps_id; } vps_extension_flag = gf_bs_read_int_log(bs, 1, "vps_extension_flag"); if (vps_extension_flag) { Bool res; gf_bs_align(bs); res = hevc_parse_vps_extension(vps, bs); if (res != GF_TRUE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Failed to parse VPS extensions\n")); return -1; } if (gf_bs_read_int_log(bs, 1, "vps_extension2_flag")) { #if 0 while (gf_bs_available(bs)) { /*vps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } } return vps_id; } GF_EXPORT s32 gf_hevc_read_vps_ex(u8 *data, u32 *size, HEVCState *hevc, Bool remove_extensions) { GF_BitStream *bs; char *data_without_emulation_bytes = NULL; u32 data_without_emulation_bytes_size = 0; s32 vps_id = -1; /*still contains emulation bytes*/ data_without_emulation_bytes_size = remove_extensions ? gf_media_nalu_emulation_bytes_remove_count(data, (*size)) : 0; if (!data_without_emulation_bytes_size) { bs = gf_bs_new(data, (*size), GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } //when removing VPS ext, we have to get the full buffer without emulation prevention bytes becuase we do a bit-by-bit copy of the vps else { data_without_emulation_bytes = gf_malloc((*size) * sizeof(char)); data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(data, data_without_emulation_bytes, (*size)); bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ); } if (!bs) goto exit; if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, remove_extensions); if (vps_id < 0) goto exit; if (remove_extensions) { u8 *new_vps; u32 new_vps_size, emulation_bytes; u32 bit_pos = gf_bs_get_bit_offset(bs); GF_BitStream *w_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(bs, 0); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u16(w_bs, gf_bs_read_u16(bs) ); bit_pos -= 48; while (bit_pos) { u32 v = gf_bs_read_int(bs, 1); gf_bs_write_int(w_bs, v, 1); bit_pos--; } /*vps extension flag*/ gf_bs_write_int(w_bs, 0, 1); new_vps = NULL; gf_bs_get_content(w_bs, &new_vps, &new_vps_size); gf_bs_del(w_bs); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(new_vps, new_vps_size); if (emulation_bytes + new_vps_size > *size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("Buffer too small to rewrite VPS - skipping rewrite\n")); } else { *size = gf_media_nalu_add_emulation_bytes(new_vps, data, new_vps_size); } if (new_vps) gf_free(new_vps); } exit: if (bs) gf_bs_del(bs); if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes); return vps_id; } GF_EXPORT s32 gf_hevc_read_vps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_vps_ex(data, &size, hevc, GF_FALSE); } GF_EXPORT s32 gf_hevc_read_vps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); } static void hevc_scaling_list_data(GF_BitStream *bs) { u32 i, sizeId, matrixId; for (sizeId = 0; sizeId < 4; sizeId++) { for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) { u32 idx = sizeId*100 + 10*matrixId; u32 scaling_list_pred_mode_flag_sizeId_matrixId = gf_bs_read_int_log_idx(bs, 1, "scaling_list_pred_mode_flag_sizeId_matrixId", idx); if (!scaling_list_pred_mode_flag_sizeId_matrixId) { gf_bs_read_ue_log_idx(bs, "scaling_list_pred_matrix_id_delta", idx); } else { //u32 nextCoef = 8; u32 coefNum = MIN(64, (1 << (4 + (sizeId << 1)))); if (sizeId > 1) { gf_bs_read_se_log_idx(bs, "scaling_list_dc_coef_minus8", idx); } for (i = 0; i < coefNum; i++) { gf_bs_read_se_log_idx2(bs, "scaling_list_delta_coef", idx, i); } } } } } static const struct { u32 w, h; } hevc_sar[17] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4,3}, { 3,2}, { 2,1} }; static s32 gf_hevc_read_sps_bs_internal(GF_BitStream *bs, HEVCState *hevc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id = -1; u32 i, nb_CTUs, depth; HEVC_SPS *sps; HEVC_VPS *vps; HEVC_ProfileTierLevel ptl; Bool multiLayerExtSpsFlag; u8 sps_ext_or_max_sub_layers_minus1, max_sub_layers_minus1; if (vui_flag_pos) *vui_flag_pos = 0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) { return -1; } memset(&ptl, 0, sizeof(ptl)); max_sub_layers_minus1 = 0; sps_ext_or_max_sub_layers_minus1 = 0; if (layer_id == 0) max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1"); else sps_ext_or_max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "sps_ext_or_max_sub_layers_minus1"); multiLayerExtSpsFlag = (layer_id != 0) && (sps_ext_or_max_sub_layers_minus1 == 7); if (!multiLayerExtSpsFlag) { gf_bs_read_int_log(bs, 1, "temporal_id_nesting_flag"); hevc_profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl, 0); } sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((sps_id < 0) || (sps_id >= 16)) { return -1; } sps = &hevc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->ptl = ptl; vps = &hevc->vps[vps_id]; sps->max_sub_layers_minus1 = 0; sps->sps_ext_or_max_sub_layers_minus1 = 0; /* default values */ sps->colour_primaries = 2; sps->transfer_characteristic = 2; sps->matrix_coeffs = 2; //sps_rep_format_idx = 0; if (multiLayerExtSpsFlag) { sps->update_rep_format_flag = gf_bs_read_int_log(bs, 1, "update_rep_format_flag"); if (sps->update_rep_format_flag) { sps->rep_format_idx = gf_bs_read_int_log(bs, 8, "rep_format_idx"); } else { sps->rep_format_idx = vps->rep_format_idx[layer_id]; } sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples; sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples; sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc; sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma; sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma; sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag; //TODO this is crude ... sps->ptl = vps->ext_ptl[0]; } else { sps->chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); if ((sps->cw_flag = gf_bs_read_int_log(bs, 1, "conformance_window_flag"))) { u32 SubWidthC, SubHeightC; if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->bit_depth_luma = 8 + gf_bs_read_ue_log(bs, "bit_depth_luma_minus8"); sps->bit_depth_chroma = 8 + gf_bs_read_ue_log(bs, "bit_depth_chroma_minus8"); } sps->log2_max_pic_order_cnt_lsb = 4 + gf_bs_read_ue_log(bs, "log2_max_pic_order_cnt_lsb_minus4"); if (!multiLayerExtSpsFlag) { sps->sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "sub_layer_ordering_info_present_flag"); for (i = sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1; i <= sps->max_sub_layers_minus1; i++) { gf_bs_read_ue_log_idx(bs, "max_dec_pic_buffering", i); gf_bs_read_ue_log_idx(bs, "num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "max_latency_increase", i); } } sps->log2_min_luma_coding_block_size = 3 + gf_bs_read_ue_log(bs, "log2_min_luma_coding_block_size_minus3"); sps->log2_diff_max_min_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_luma_coding_block_size"); sps->max_CU_width = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->max_CU_height = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->log2_min_transform_block_size = 2 + gf_bs_read_ue_log(bs, "log2_min_transform_block_size_minus2"); sps->log2_max_transform_block_size = sps->log2_min_transform_block_size + gf_bs_read_ue_log(bs, "log2_max_transform_block_size"); depth = 0; sps->max_transform_hierarchy_depth_inter = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_inter"); sps->max_transform_hierarchy_depth_intra = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_intra"); while ((u32)(sps->max_CU_width >> sps->log2_diff_max_min_luma_coding_block_size) > (u32)(1 << (sps->log2_min_transform_block_size + depth))) { depth++; } sps->max_CU_depth = sps->log2_diff_max_min_luma_coding_block_size + depth; nb_CTUs = ((sps->width + sps->max_CU_width - 1) / sps->max_CU_width) * ((sps->height + sps->max_CU_height - 1) / sps->max_CU_height); sps->bitsSliceSegmentAddress = 0; while (nb_CTUs > (u32)(1 << sps->bitsSliceSegmentAddress)) { sps->bitsSliceSegmentAddress++; } sps->scaling_list_enable_flag = gf_bs_read_int_log(bs, 1, "scaling_list_enable_flag"); if (sps->scaling_list_enable_flag) { sps->infer_scaling_list_flag = 0; sps->scaling_list_ref_layer_id = 0; if (multiLayerExtSpsFlag) { sps->infer_scaling_list_flag = gf_bs_read_int_log(bs, 1, "infer_scaling_list_flag"); } if (sps->infer_scaling_list_flag) { sps->scaling_list_ref_layer_id = gf_bs_read_int_log(bs, 6, "scaling_list_ref_layer_id"); } else { sps->scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "scaling_list_data_present_flag"); if (sps->scaling_list_data_present_flag) { hevc_scaling_list_data(bs); } } } sps->asymmetric_motion_partitions_enabled_flag = gf_bs_read_int_log(bs, 1, "asymmetric_motion_partitions_enabled_flag"); sps->sample_adaptive_offset_enabled_flag = gf_bs_read_int_log(bs, 1, "sample_adaptive_offset_enabled_flag"); if ( (sps->pcm_enabled_flag = gf_bs_read_int_log(bs, 1, "pcm_enabled_flag")) ) { sps->pcm_sample_bit_depth_luma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_luma_minus1"); sps->pcm_sample_bit_depth_chroma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_chroma_minus1"); sps->log2_min_pcm_luma_coding_block_size_minus3 = gf_bs_read_ue_log(bs, "log2_min_pcm_luma_coding_block_size_minus3"); sps->log2_diff_max_min_pcm_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_pcm_luma_coding_block_size"); sps->pcm_loop_filter_disable_flag = gf_bs_read_int_log(bs, 1, "pcm_loop_filter_disable_flag"); } sps->num_short_term_ref_pic_sets = gf_bs_read_ue_log(bs, "num_short_term_ref_pic_sets"); if (sps->num_short_term_ref_pic_sets > 64) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid number of short term reference picture sets %d\n", sps->num_short_term_ref_pic_sets)); return -1; } for (i = 0; i < sps->num_short_term_ref_pic_sets; i++) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, i); /*cannot parse short_term_ref_pic_set, skip VUI parsing*/ if (!ret) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid short_term_ref_pic_set\n")); return -1; } } sps->long_term_ref_pics_present_flag = gf_bs_read_int_log(bs, 1, "long_term_ref_pics_present_flag"); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pic_sps = gf_bs_read_ue_log(bs, "num_long_term_ref_pic_sps"); for (i = 0; i < sps->num_long_term_ref_pic_sps; i++) { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "lt_ref_pic_poc_lsb_sps", i); gf_bs_read_int_log_idx(bs, 1, "used_by_curr_pic_lt_sps_flag", i); } } sps->temporal_mvp_enable_flag = gf_bs_read_int_log(bs, 1, "temporal_mvp_enable_flag"); sps->strong_intra_smoothing_enable_flag = gf_bs_read_int_log(bs, 1, "strong_intra_smoothing_enable_flag"); if (vui_flag_pos) *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); if ((sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag")) ) { sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { sps->sar_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (sps->sar_idc == 255) { sps->sar_width = gf_bs_read_int_log(bs, 16, "aspect_ratio_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "aspect_ratio_height"); } else if (sps->sar_idc < 17) { sps->sar_width = hevc_sar[sps->sar_idc].w; sps->sar_height = hevc_sar[sps->sar_idc].h; } } if ((sps->overscan_info_present = gf_bs_read_int_log(bs, 1, "overscan_info_present"))) sps->overscan_appropriate = gf_bs_read_int_log(bs, 1, "overscan_appropriate"); sps->video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->video_signal_type_present_flag) { sps->video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); if ((sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"))) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->transfer_characteristic = gf_bs_read_int_log(bs, 8, "transfer_characteristic"); sps->matrix_coeffs = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if ((sps->chroma_loc_info_present_flag = gf_bs_read_int_log(bs, 1, "chroma_loc_info_present_flag"))) { sps->chroma_sample_loc_type_top_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_top_field"); sps->chroma_sample_loc_type_bottom_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_bottom_field"); } sps->neutra_chroma_indication_flag = gf_bs_read_int_log(bs, 1, "neutra_chroma_indication_flag"); sps->field_seq_flag = gf_bs_read_int_log(bs, 1, "field_seq_flag"); sps->frame_field_info_present_flag = gf_bs_read_int_log(bs, 1, "frame_field_info_present_flag"); if ((sps->default_display_window_flag = gf_bs_read_int_log(bs, 1, "default_display_window_flag"))) { sps->left_offset = gf_bs_read_ue_log(bs, "display_window_left_offset"); sps->right_offset = gf_bs_read_ue_log(bs, "display_window_right_offset"); sps->top_offset = gf_bs_read_ue_log(bs, "display_window_top_offset"); sps->bottom_offset = gf_bs_read_ue_log(bs, "display_window_bottom_offset"); } sps->has_timing_info = gf_bs_read_int_log(bs, 1, "has_timing_info"); if (sps->has_timing_info) { sps->num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->poc_proportional_to_timing_flag = gf_bs_read_int_log(bs, 1, "poc_proportional_to_timing_flag"); if (sps->poc_proportional_to_timing_flag) sps->num_ticks_poc_diff_one_minus1 = gf_bs_read_ue_log(bs, "num_ticks_poc_diff_one_minus1"); if ((sps->hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "hrd_parameters_present_flag"))) { // GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[HEVC] HRD param parsing not implemented\n")); return sps_id; } } if (gf_bs_read_int_log(bs, 1, "bitstream_restriction_flag")) { gf_bs_read_int_log(bs, 1, "tiles_fixed_structure_flag"); gf_bs_read_int_log(bs, 1, "motion_vectors_over_pic_boundaries_flag"); gf_bs_read_int_log(bs, 1, "restricted_ref_pic_lists_flag"); gf_bs_read_ue_log(bs, "min_spatial_segmentation_idc"); gf_bs_read_ue_log(bs, "max_bytes_per_pic_denom"); gf_bs_read_ue_log(bs, "max_bits_per_min_cu_denom"); gf_bs_read_ue_log(bs, "log2_max_mv_length_horizontal"); gf_bs_read_ue_log(bs, "log2_max_mv_length_vertical"); } } if (gf_bs_read_int_log(bs, 1, "sps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*sps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return sps_id; } GF_EXPORT s32 gf_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos) { GF_BitStream *bs; s32 sps_id = -1; u8 layer_id; if (vui_flag_pos) *vui_flag_pos = 0; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit; sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, vui_flag_pos); exit: if (bs) gf_bs_del(bs); return sps_id; } GF_EXPORT s32 gf_hevc_read_sps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_sps_ex(data, size, hevc, NULL); } GF_EXPORT s32 gf_hevc_read_sps_bs(GF_BitStream *bs, HEVCState *hevc) { u8 layer_id; if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) return -1; return gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, NULL); } static s32 gf_hevc_read_pps_bs_internal(GF_BitStream *bs, HEVCState *hevc) { u32 i; s32 pps_id; HEVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &hevc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (pps->sps_id >= 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } hevc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->dependent_slice_segments_enabled_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segments_enabled_flag"); pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->num_extra_slice_header_bits = gf_bs_read_int_log(bs, 3, "num_extra_slice_header_bits"); pps->sign_data_hiding_flag = gf_bs_read_int_log(bs, 1, "sign_data_hiding_flag"); pps->cabac_init_present_flag = gf_bs_read_int_log(bs, 1, "cabac_init_present_flag"); pps->num_ref_idx_l0_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active"); pps->num_ref_idx_l1_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active"); pps->pic_init_qp_minus26 = gf_bs_read_se_log(bs, "pic_init_qp_minus26"); pps->constrained_intra_pred_flag = gf_bs_read_int_log(bs, 1, "constrained_intra_pred_flag"); pps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "transform_skip_enabled_flag"); if ((pps->cu_qp_delta_enabled_flag = gf_bs_read_int_log(bs, 1, "cu_qp_delta_enabled_flag"))) pps->diff_cu_qp_delta_depth = gf_bs_read_ue_log(bs, "diff_cu_qp_delta_depth"); pps->pic_cb_qp_offset = gf_bs_read_se_log(bs, "pic_cb_qp_offset"); pps->pic_cr_qp_offset = gf_bs_read_se_log(bs, "pic_cr_qp_offset"); pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int_log(bs, 1, "slice_chroma_qp_offsets_present_flag"); pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); pps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "weighted_bipred_flag"); pps->transquant_bypass_enable_flag = gf_bs_read_int_log(bs, 1, "transquant_bypass_enable_flag"); pps->tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "tiles_enabled_flag"); pps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); if (pps->tiles_enabled_flag) { pps->num_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_tile_columns_minus1"); pps->num_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_tile_rows_minus1"); pps->uniform_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_spacing_flag"); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = 1 + gf_bs_read_ue_log_idx(bs, "column_width_minus1", i); } for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = 1 + gf_bs_read_ue_log_idx(bs, "row_height_minus1", i); } } pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_tiles_enabled_flag"); } pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_slices_enabled_flag"); if ((pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"))) { pps->deblocking_filter_override_enabled_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_enabled_flag"); if (! (pps->pic_disable_deblocking_filter_flag = gf_bs_read_int_log(bs, 1, "pic_disable_deblocking_filter_flag"))) { pps->beta_offset_div2 = gf_bs_read_se_log(bs, "beta_offset_div2"); pps->tc_offset_div2 = gf_bs_read_se_log(bs, "tc_offset_div2"); } } if ((pps->pic_scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "pic_scaling_list_data_present_flag"))) { hevc_scaling_list_data(bs); } pps->lists_modification_present_flag = gf_bs_read_int_log(bs, 1, "lists_modification_present_flag"); pps->log2_parallel_merge_level_minus2 = gf_bs_read_ue_log(bs, "log2_parallel_merge_level_minus2"); pps->slice_segment_header_extension_present_flag = gf_bs_read_int_log(bs, 1, "slice_segment_header_extension_present_flag"); if (gf_bs_read_int_log(bs, 1, "pps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*pps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return pps_id; } GF_EXPORT s32 gf_hevc_read_pps(u8 *data, u32 size, HEVCState *hevc) { GF_BitStream *bs; s32 pps_id = -1; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); exit: if (bs) gf_bs_del(bs); return pps_id; } GF_EXPORT s32 gf_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_pps_bs_internal(bs, hevc); } GF_EXPORT s32 gf_hevc_parse_nalu_bs(GF_BitStream *bs, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; HEVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &hevc->s_info, sizeof(HEVCSliceInfo)); if (!hevc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: ret = 1; break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RASL_R: is_slice = GF_TRUE; /* slice - read the info and compare.*/ ret = hevc_parse_slice_segment(bs, hevc, &n_state); if (ret < 0) return ret; hevc_compute_poc(&n_state); ret = 0; if (hevc->s_info.poc != n_state.poc) { ret = 1; break; } if (n_state.first_slice_segment_in_pic_flag) { if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_HEVC_NALU_SEQ_PARAM: hevc->last_parsed_sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, *layer_id, NULL); ret = (hevc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_PIC_PARAM: hevc->last_parsed_pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); ret = (hevc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_VID_PARAM: hevc->last_parsed_vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); ret = (hevc->last_parsed_vps_id>=0) ? 0 : -1; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && hevc->s_info.sps) { n_state.frame_num_offset_prev = hevc->s_info.frame_num_offset; n_state.frame_num_prev = hevc->s_info.frame_num; n_state.poc_lsb_prev = hevc->s_info.poc_lsb; n_state.poc_msb_prev = hevc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) hevc_compute_poc(&n_state); memcpy(&hevc->s_info, &n_state, sizeof(HEVCSliceInfo)); return ret; } GF_EXPORT s32 gf_hevc_parse_nalu(u8 *data, u32 size, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret = -1; if (!hevc) { if (nal_unit_type) (*nal_unit_type) = (data[0] & 0x7E) >> 1; if (layer_id) { u8 id = data[0] & 1; id <<= 5; id |= (data[1] >> 3) & 0x1F; (*layer_id) = id; } if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_hevc_parse_nalu_bs(bs, hevc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } GF_EXPORT GF_Err gf_hevc_change_vui(GF_HEVCConfig *hvcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; HEVCState hevc; u32 i, bit_offset, flag; s32 idx; GF_NALUFFParamArray *spss; GF_NALUFFParam *slc; orig = NULL; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; i = 0; spss = NULL; while ((spss = (GF_NALUFFParamArray *)gf_list_enum(hvcc->param_array, &i))) { if (spss->type == GF_HEVC_NALU_SEQ_PARAM) break; spss = NULL; } if (!spss) return GF_NON_COMPLIANT_BITSTREAM; i = 0; while ((slc = (GF_NALUFFParam *)gf_list_enum(spss->nalus, &i))) { u8 *no_emulation_buf; u32 no_emulation_buf_size, emulation_bytes; /*SPS may still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data, no_emulation_buf, slc->size); idx = gf_hevc_read_sps_ex(no_emulation_buf, no_emulation_buf_size, &hevc, &bit_offset); if (idx < 0) { if (orig) gf_bs_del(orig); gf_free(no_emulation_buf); continue; } orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 0); while (bit_offset) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &no_emulation_buf_size); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, no_emulation_buf_size); if (no_emulation_buf_size + emulation_bytes > slc->size) slc->data = (char*)gf_realloc(slc->data, no_emulation_buf_size + emulation_bytes); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data, no_emulation_buf_size); gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_hevc_change_par(GF_HEVCConfig *hvcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_change_color(GF_HEVCConfig *hvcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { s32 idx; idx = gf_hevc_read_sps(sps_data, sps_size, hevc); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = hevc->sps[idx].width; if (height) *height = hevc->sps[idx].height; if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1; if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_hevc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { HEVCState hevc; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d); } #endif //GPAC_DISABLE_HEVC static u32 AC3_FindSyncCode(u8 *buf, u32 buflen) { u32 end = buflen - 6; u32 offset = 0; while (offset <= end) { if (buf[offset] == 0x0b && buf[offset + 1] == 0x77) { return offset; } offset++; } return buflen; } static Bool AC3_FindSyncCodeBS(GF_BitStream *bs) { u8 b1; u64 pos = gf_bs_get_position(bs); u64 end = gf_bs_get_size(bs); pos += 1; b1 = gf_bs_read_u8(bs); while (pos + 1 <= end) { u8 b2 = gf_bs_read_u8(bs); if ((b1 == 0x0b) && (b2 == 0x77)) { gf_bs_seek(bs, pos - 1); return GF_TRUE; } pos++; b1 = b2; } return GF_FALSE; } static const u32 ac3_sizecod_to_bitrate[] = { 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 576000, 640000 }; static const u32 ac3_sizecod2_to_framesize[] = { 96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344, 1536, 1728, 1920 }; static const u32 ac3_sizecod1_to_framesize[] = { 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253, 1393 }; static const u32 ac3_sizecod0_to_framesize[] = { 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024, 1152, 1280 }; static const u32 ac3_mod_to_chans[] = { 2, 1, 2, 3, 3, 4, 4, 5 }; GF_EXPORT u32 gf_ac3_get_channels(u32 acmod) { u32 nb_ch; nb_ch = ac3_mod_to_chans[acmod]; return nb_ch; } GF_EXPORT u32 gf_ac3_get_bitrate(u32 brcode) { return ac3_sizecod_to_bitrate[brcode]; } Bool gf_ac3_parser(u8 *buf, u32 buflen, u32 *pos, GF_AC3Config *hdr, Bool full_parse) { GF_BitStream *bs; Bool ret; if (buflen < 6) return GF_FALSE; (*pos) = AC3_FindSyncCode(buf, buflen); if (*pos >= buflen) return GF_FALSE; bs = gf_bs_new((const char*)(buf + *pos), buflen, GF_BITSTREAM_READ); ret = gf_ac3_parser_bs(bs, hdr, full_parse); gf_bs_del(bs); return ret; } GF_EXPORT Bool gf_ac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, frmsizecod, bsid, ac3_mod, freq, framesize, bsmod, syncword; u64 pos; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } gf_bs_read_int_log(bs, 16, "crc1"); fscod = gf_bs_read_int_log(bs, 2, "fscod"); frmsizecod = gf_bs_read_int_log(bs, 6, "frmsizecod"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); bsmod = gf_bs_read_int_log(bs, 3, "bsmod"); ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); if (frmsizecod >= 2 * sizeof(ac3_sizecod_to_bitrate) / sizeof(u32)) return GF_FALSE; hdr->bitrate = ac3_sizecod_to_bitrate[frmsizecod / 2]; if (bsid > 8) hdr->bitrate = hdr->bitrate >> (bsid - 8); switch (fscod) { case 0: if (frmsizecod >= 2 * sizeof(ac3_sizecod0_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 48000; framesize = ac3_sizecod0_to_framesize[frmsizecod / 2] * 2; break; case 1: if (frmsizecod >= 2 * sizeof(ac3_sizecod1_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 44100; framesize = (ac3_sizecod1_to_framesize[frmsizecod / 2] + (frmsizecod & 0x1)) * 2; break; case 2: if (frmsizecod >= 2 * sizeof(ac3_sizecod2_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 32000; framesize = ac3_sizecod2_to_framesize[frmsizecod / 2] * 2; break; default: return GF_FALSE; } hdr->sample_rate = freq; hdr->framesize = framesize; if (full_parse) { hdr->streams[0].bsid = bsid; hdr->streams[0].bsmod = bsmod; hdr->streams[0].acmod = ac3_mod; hdr->streams[0].lfon = 0; hdr->streams[0].fscod = fscod; hdr->brcode = frmsizecod / 2; } if (ac3_mod >= 2 * sizeof(ac3_mod_to_chans) / sizeof(u32)) return GF_FALSE; hdr->channels = ac3_mod_to_chans[ac3_mod]; if ((ac3_mod & 0x1) && (ac3_mod != 1)) gf_bs_read_int_log(bs, 2, "cmixlev"); if (ac3_mod & 0x4) gf_bs_read_int_log(bs, 2, "surmixlev"); if (ac3_mod == 0x2) gf_bs_read_int_log(bs, 2, "dsurmod"); if (gf_bs_read_int_log(bs, 1, "lfeon")) { hdr->channels += 1; hdr->streams[0].lfon = 1; } gf_bs_seek(bs, pos); return GF_TRUE; } GF_EXPORT Bool gf_eac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, bsid, ac3_mod, freq, framesize, syncword, substreamid, lfon, channels, numblkscod, strmtyp, frmsiz; u64 pos; u16 chanmap; static u32 numblks[4] = {1, 2, 3, 6}; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); framesize = 0; numblkscod = 0; memset(hdr, 0, sizeof(GF_AC3Config)); block: syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[E-AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } strmtyp = gf_bs_read_int_log(bs, 2, "strmtyp"); substreamid = gf_bs_read_int_log(bs, 3, "substreamid"); //next main (independent) AU, done with this frame if ((strmtyp!=0x1) && ((hdr->substreams >> substreamid) & 0x1)) { hdr->framesize = framesize; gf_bs_seek(bs, pos); return GF_TRUE; } frmsiz = gf_bs_read_int_log(bs, 11, "frmsiz"); framesize += 2 * (1 + frmsiz); fscod = gf_bs_read_int_log(bs, 2, "fscod"); if (fscod == 0x3) { fscod = gf_bs_read_int_log(bs, 2, "fscod2"); numblkscod += 6; } else { numblkscod += gf_bs_read_int_log(bs, 2, "numblkscod"); } assert(numblkscod <= 9); if ((hdr->substreams >> substreamid) & 0x1) { //we still have sync frames following if (substreamid) { if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) { gf_bs_seek(bs, pos); return GF_FALSE; } goto block; } } hdr->substreams |= (1 << substreamid); switch (fscod) { case 0: freq = 48000; break; case 1: freq = 44100; break; case 2: freq = 32000; break; default: return GF_FALSE; } ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); lfon = gf_bs_read_int_log(bs, 1, "lfon"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); if (!substreamid && (bsid != 16/*E-AC3*/)) return GF_FALSE; gf_bs_read_int_log(bs, 5, "dialnorm"); if (gf_bs_read_int_log(bs, 1, "compre")) { gf_bs_read_int_log(bs, 8, "compr"); } if (ac3_mod==0) { gf_bs_read_int_log(bs, 5, "dialnorm2"); if (gf_bs_read_int_log(bs, 1, "compr2e")) { gf_bs_read_int_log(bs, 8, "compr2"); } } chanmap = 0; if (strmtyp==0x1) { if (gf_bs_read_int_log(bs, 1, "chanmape")) { chanmap = gf_bs_read_int_log(bs, 16, "chanmap"); } } channels = ac3_mod_to_chans[ac3_mod]; if (lfon) channels += 1; hdr->bitrate = 0; hdr->sample_rate = freq; hdr->framesize = framesize; if (strmtyp != 1) { hdr->channels = channels; hdr->streams[substreamid].lfon = lfon; if (full_parse) { hdr->streams[substreamid].bsid = bsid; hdr->streams[substreamid].bsmod = 0; hdr->streams[substreamid].acmod = ac3_mod; hdr->streams[substreamid].fscod = fscod; hdr->brcode = 0; } hdr->nb_streams++; //not clear if this is only for the independent streams hdr->brcode += ((frmsiz+1) * freq) / (numblks[numblkscod]*16) / 1000; if (lfon) hdr->channels += 1; } else { hdr->streams[substreamid].nb_dep_sub = substreamid; hdr->streams[substreamid].chan_loc |= chanmap; } if (numblkscod < 6) { //we need 6 blocks to make a sample if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) return GF_FALSE; goto block; } gf_bs_seek(bs, pos); return GF_TRUE; } #endif /*GPAC_DISABLE_AV_PARSERS*/ u32 gf_id3_read_size(GF_BitStream *bs) { u32 size = 0; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); return size; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG) /* Vorbis parser */ static u32 vorbis_book_maptype1_quantvals(u32 entries, u32 dim) { u32 vals = (u32)floor(pow(entries, 1.0 / dim)); while (1) { u32 acc = 1; u32 acc1 = 1; u32 i; for (i = 0; i < dim; i++) { acc *= vals; acc1 *= vals + 1; } if (acc <= entries && acc1 > entries) return (vals); else { if (acc > entries) vals--; else vals++; } } } static u32 ilog(u32 v, Bool dec) { u32 ret = 0; if (dec && v) --v; while (v) { ret++; v >>= 1; } return (ret); } static u32 icount(u32 v) { u32 ret = 0; while (v) { ret += v & 1; v >>= 1; } return(ret); } GF_EXPORT Bool gf_vorbis_parse_header(GF_VorbisParser *vp, u8 *data, u32 data_len) { u32 pack_type, i, j, k, times, nb_part, nb_books, nb_modes; u32 l; char szNAME[8]; oggpack_buffer opb; oggpack_readinit(&opb, (u8*)data, data_len); pack_type = oggpack_read(&opb, 8); i = 0; while (i < 6) { szNAME[i] = oggpack_read(&opb, 8); i++; } szNAME[i] = 0; if (strcmp(szNAME, "vorbis")) { return GF_FALSE; } switch (pack_type) { case 0x01: vp->version = oggpack_read(&opb, 32); if (vp->version != 0) { return GF_FALSE; } vp->channels = oggpack_read(&opb, 8); vp->sample_rate = oggpack_read(&opb, 32); vp->max_r = oggpack_read(&opb, 32); vp->avg_r = oggpack_read(&opb, 32); vp->low_r = oggpack_read(&opb, 32); vp->min_block = 1<<oggpack_read(&opb, 4); vp->max_block = 1<<oggpack_read(&opb, 4); if (vp->sample_rate < 1 || vp->channels < 1 || vp->min_block < 8 || vp->max_block < vp->min_block || oggpack_read(&opb, 1) != 1) { return GF_FALSE; } vp->nb_init=1; return GF_TRUE; case 0x03: /*trash comments*/ vp->nb_init++; return GF_TRUE; case 0x05: /*need at least bitstream header to make sure we're parsing the right thing*/ if (!vp->nb_init) return GF_FALSE; break; default: return GF_FALSE; } /*OK parse codebook*/ nb_books = oggpack_read(&opb, 8) + 1; /*skip vorbis static books*/ for (i = 0; i < nb_books; i++) { u32 map_type, qb, qq; u32 entries, dim; oggpack_read(&opb, 24); dim = oggpack_read(&opb, 16); entries = oggpack_read(&opb, 24); if ((s32)entries < 0) entries = 0; if (oggpack_read(&opb, 1) == 0) { if (oggpack_read(&opb, 1)) { for (j = 0; j < entries; j++) { if (oggpack_read(&opb, 1)) { oggpack_read(&opb, 5); } } } else { for (j = 0; j < entries; j++) oggpack_read(&opb, 5); } } else { oggpack_read(&opb, 5); for (j = 0; j < entries;) { u32 num = oggpack_read(&opb, ilog(entries - j, GF_FALSE)); for (k = 0; k < num && j < entries; k++, j++) { } } } switch ((map_type = oggpack_read(&opb, 4))) { case 0: break; case 1: case 2: oggpack_read(&opb, 32); oggpack_read(&opb, 32); qq = oggpack_read(&opb, 4) + 1; oggpack_read(&opb, 1); if (map_type == 1) qb = vorbis_book_maptype1_quantvals(entries, dim); else if (map_type == 2) qb = entries * dim; else qb = 0; for (j = 0; j < qb; j++) oggpack_read(&opb, qq); break; } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) oggpack_read(&opb, 16); times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 type = oggpack_read(&opb, 16); if (type) { u32 *parts, *class_dims, count, rangebits; u32 max_class = 0; nb_part = oggpack_read(&opb, 5); parts = (u32*)gf_malloc(sizeof(u32) * nb_part); for (j = 0; j < nb_part; j++) { parts[j] = oggpack_read(&opb, 4); if (max_class < parts[j]) max_class = parts[j]; } class_dims = (u32*)gf_malloc(sizeof(u32) * (max_class + 1)); for (j = 0; j < max_class + 1; j++) { u32 class_sub; class_dims[j] = oggpack_read(&opb, 3) + 1; class_sub = oggpack_read(&opb, 2); if (class_sub) oggpack_read(&opb, 8); for (k = 0; k < (u32)(1 << class_sub); k++) oggpack_read(&opb, 8); } oggpack_read(&opb, 2); rangebits = oggpack_read(&opb, 4); count = 0; for (j = 0, k = 0; j < nb_part; j++) { count += class_dims[parts[j]]; for (; k < count; k++) oggpack_read(&opb, rangebits); } gf_free(parts); gf_free(class_dims); } else { oggpack_read(&opb, 8 + 16 + 16 + 6 + 8); nb_books = oggpack_read(&opb, 4) + 1; for (j = 0; j < nb_books; j++) oggpack_read(&opb, 8); } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 acc = 0; oggpack_read(&opb, 16);/*type*/ oggpack_read(&opb, 24); oggpack_read(&opb, 24); oggpack_read(&opb, 24); nb_part = oggpack_read(&opb, 6) + 1; oggpack_read(&opb, 8); for (j = 0; j < nb_part; j++) { u32 cascade = oggpack_read(&opb, 3); if (oggpack_read(&opb, 1)) cascade |= (oggpack_read(&opb, 5) << 3); acc += icount(cascade); } for (j = 0; j < acc; j++) oggpack_read(&opb, 8); } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 sub_maps = 1; oggpack_read(&opb, 16); if (oggpack_read(&opb, 1)) sub_maps = oggpack_read(&opb, 4) + 1; if (oggpack_read(&opb, 1)) { u32 nb_steps = oggpack_read(&opb, 8) + 1; for (j = 0; j < nb_steps; j++) { oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); } } oggpack_read(&opb, 2); if (sub_maps>1) { for(l=0; l<vp->channels; l++) oggpack_read(&opb, 4); } for (j = 0; j < sub_maps; j++) { oggpack_read(&opb, 8); oggpack_read(&opb, 8); oggpack_read(&opb, 8); } } nb_modes = oggpack_read(&opb, 6) + 1; for (i = 0; i < nb_modes; i++) { vp->mode_flag[i] = oggpack_read(&opb, 1); oggpack_read(&opb, 16); oggpack_read(&opb, 16); oggpack_read(&opb, 8); } vp->modebits = 0; j = nb_modes; while (j > 1) { vp->modebits++; j >>= 1; } return GF_TRUE; } GF_EXPORT u32 gf_vorbis_check_frame(GF_VorbisParser *vp, u8 *data, u32 data_length) { s32 block_size; oggpack_buffer opb; if (!vp) return 0; oggpack_readinit(&opb, (unsigned char*)data, data_length); /*not audio*/ if (oggpack_read(&opb, 1) != 0) return 0; block_size = oggpack_read(&opb, vp->modebits); if (block_size == -1) return 0; return ((vp->mode_flag[block_size]) ? vp->max_block : vp->min_block) / (2); } /*call with vorbis header packets - initializes the parser on success, leave it to NULL otherwise returns 1 if success, 0 if error.*/ Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len) { char tag[9]; GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ); gf_bs_read_data(bs, tag, 8); tag[8]=0; if (memcmp(data, "OpusHead", sizeof(char)*8)) { gf_bs_del(bs); return GF_FALSE; } /*Identification Header*/ opus->version = gf_bs_read_u8(bs); /*version*/ if (opus->version != 1) { gf_bs_del(bs); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version)); return GF_FALSE; } opus->OutputChannelCount = gf_bs_read_u8(bs); opus->PreSkip = gf_bs_read_u16_le(bs); opus->InputSampleRate = gf_bs_read_u32_le(bs); opus->OutputGain = gf_bs_read_u16_le(bs); opus->ChannelMappingFamily = gf_bs_read_u8(bs); if (opus->ChannelMappingFamily != 0) { opus->StreamCount = gf_bs_read_u8(bs); opus->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount); } gf_bs_del(bs); return GF_TRUE; } /*returns 0 if init error or not a vorbis frame, otherwise returns the number of audio samples in this frame*/ u32 gf_opus_check_frame(GF_OpusParser *op, u8 *data, u32 data_length) { u32 block_size; if (!memcmp(data, "OpusHead", sizeof(char)*8)) return 0; if (!memcmp(data, "OpusTags", sizeof(char)*8)) return 0; /*consider the whole packet as Ogg packets and ISOBMFF samples for Opus are framed similarly*/ static const int OpusFrameDurIn48k[] = { 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, }; int TOC_config = (data[0] & 0xf8) >> 3; //int s = (data[0] & 0x04) >> 2; block_size = OpusFrameDurIn48k[TOC_config]; int c = data[0] & 0x03; if (c == 1 || c == 2) { block_size *= 2; } else if (c == 3) { /*unknown number of frames*/ int num_frames = data[1] & 0x3f; block_size *= num_frames; } return block_size; } #endif /*!defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG)*/ u64 gf_mpegh_escaped_value(GF_BitStream *bs, u32 nBits1, u32 nBits2, u32 nBits3) { u64 value = gf_bs_read_int(bs, nBits1); if (value == (1<<nBits1)-1) { u32 vadd = gf_bs_read_int(bs, nBits2); value += vadd; if (vadd == (1<<nBits2)-1) { vadd = gf_bs_read_int(bs, nBits3); value += vadd; } } return value; } GF_EXPORT s32 gf_mpegh_get_mhas_pl(u8 *ptr, u32 size, u64 *ch_layout) { s32 PL = -1; GF_BitStream *bs; u32 i; s32 sync_pos=-1; for (i=0; i<size-3; i++) { if ((ptr[i]==0xC0) && (ptr[i+1]== 0x01) && (ptr[i+2]==0xA5)) { sync_pos = i; break; } } if (sync_pos<0) return 0; if (ch_layout) *ch_layout = 0; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, sync_pos); while (gf_bs_available(bs)) { u32 type = (u32) gf_mpegh_escaped_value(bs, 3, 8, 8); /*u64 label = */gf_mpegh_escaped_value(bs, 2, 8, 32); u64 mh_size = gf_mpegh_escaped_value(bs, 11, 24, 24); if (mh_size > gf_bs_available(bs)) break; //MHAS config if (type==1) { PL = gf_bs_read_int(bs, 8); if (ch_layout) { u32 idx = gf_bs_read_int(bs, 5); if (idx==0x1f) gf_bs_read_int(bs, 24); /*idx = */gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); //speaker config idx = gf_bs_read_int(bs, 2); if (idx == 0) { *ch_layout = gf_audio_fmt_get_layout_from_cicp( gf_bs_read_int(bs, 6) ); } } break; } gf_bs_skip_bytes(bs, mh_size); } gf_bs_del(bs); return PL; } GF_EXPORT void gf_media_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc) { gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc); } static Bool vvc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 1, "resevred0"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 5, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ptl->pt_present) { ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx); ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx); } ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx); ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx); ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx); //general constraints info - max size if 1 + 81 + 8 + 255 if (ptl->pt_present) { // general_constraints_info ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx); if (ptl->gci_present) { u8 res; ptl->gci[0] = 0x80; ptl->gci[0] |= gf_bs_read_int(bs, 7); //81-7 = 74 bits till reserved gf_bs_read_data(bs, ptl->gci+1, 9); ptl->gci[10] = gf_bs_read_int(bs, 2)<<6; //skip extensions ptl->gci[11] = 0; res = gf_bs_read_int(bs, 8); gf_bs_read_int(bs, res); } gf_bs_align(bs); } for (i=ptl->ptl_max_tid; i>0; i--) { ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } gf_bs_align(bs); for (i=ptl->ptl_max_tid; i>0; i--) { if (ptl->sub_ptl[i-1].level_present_flag) ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } if (ptl->pt_present) { ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx); for (i=0; i<ptl->num_sub_profiles; i++) { ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i); } } } static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext) { u32 i, j; s32 vps_id; VVC_VPS *vps; Bool vps_default_ptl_dpb_hrd_max_tid_flag=0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) return -1; if (!vps_id) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n")); return -1; } vps = &vvc->vps[vps_id]; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; if ((vps->max_layers>1) && (vps->max_sub_layers>1)) vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag"); if (vps->max_layers>1) vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent"); for (i=0; i<vps->max_layers; i++) { u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i); if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id; if (i && !vps->all_layers_independent) { Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i); if (!layer_indep) { Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i); for (j=0; j<i; j++) { Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j); if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) { gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j); } } } } } vps->num_ptl = 1; if (vps->max_layers > 1) { if (vps->all_layers_independent) { vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols"); } if (!vps->each_layer_is_ols) { u32 vps_ols_mode_idc = 2; if (!vps->all_layers_independent) { vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc"); } if (vps_ols_mode_idc==2) { u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2"); for (i=0; i<vps_num_output_layer_sets; i++) { for (j=0; j<vps->max_layers; j++) { gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j); } } } } vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1"); } vps->ptl[0].pt_present = 1; for (i=0; i<vps->num_ptl; i++) { if (i) vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i); if (!vps_default_ptl_dpb_hrd_max_tid_flag) vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i); else vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;; } //align gf_bs_align(bs); for (i=0; i<vps->num_ptl; i++) { vvc_profile_tier_level(bs, &vps->ptl[i], i); } //TODO, parse multilayer stuff return vps_id; } static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (sps_id >= 16) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if (vps_id >= 16) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; } static s32 gf_media_vvc_read_pps_bs_internal(GF_BitStream *bs, VVCState *vvc) { u32 i; s32 pps_id; VVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_int_log(bs, 6, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &vvc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if (pps->sps_id >= 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } vvc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->mixed_nal_types = gf_bs_read_int_log(bs, 1, "mixed_nal_types"); pps->width = gf_bs_read_ue_log(bs, "width"); pps->height = gf_bs_read_ue_log(bs, "height"); pps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_flag"); if (pps->conf_window) { pps->cw_left = gf_bs_read_ue_log(bs, "conf_win_left_offset"); pps->cw_right = gf_bs_read_ue_log(bs, "conf_win_right_offset"); pps->cw_top = gf_bs_read_ue_log(bs, "conf_win_top_offset"); pps->cw_bottom = gf_bs_read_ue_log(bs, "conf_win_bottom_offset"); } //scaling window if (gf_bs_read_int_log(bs, 1, "scaling_window_explicit_signalling_flag")) { gf_bs_read_se_log(bs, "scaling_win_left_offset"); gf_bs_read_se_log(bs, "scaling_win_right_offset"); gf_bs_read_se_log(bs, "scaling_win_top_offset"); gf_bs_read_se_log(bs, "scaling_win_bottom_offset"); } pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->no_pic_partition_flag = gf_bs_read_int_log(bs, 1, "no_pic_partition_flag"); pps->subpic_id_mapping_present_flag = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (pps->subpic_id_mapping_present_flag) { u32 pps_subpic_id_len, pps_num_subpics=0; if (!pps->no_pic_partition_flag) { pps_num_subpics = 1+gf_bs_read_ue_log(bs, "pps_num_subpics_minus1"); } pps_subpic_id_len = 1 + gf_bs_read_ue(bs); for (i=0; i<pps_num_subpics; i++) { gf_bs_read_int_log_idx(bs, pps_subpic_id_len, "subpic_id", i); } } if (!pps->no_pic_partition_flag) { gf_bs_read_int_log(bs, 2, "pps_log2_ctu_size_minus5"); u32 num_exp_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_columns_minus1"); u32 num_exp_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_rows_minus1"); for (i=0; i<num_exp_tile_columns; i++) gf_bs_read_ue_log_idx(bs, "tile_column_width_minus1", i); for (i=0; i<num_exp_tile_rows; i++) gf_bs_read_ue_log_idx(bs, "tile_row_height_minus1", i); //todo parse the rest return pps_id; } //todo parse the rest return pps_id; } static s32 vvc_parse_picture_header(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { u32 pps_id; si->irap_or_gdr_pic = gf_bs_read_int_log(bs, 1, "irap_or_gdr_pic"); si->non_ref_pic = gf_bs_read_int_log(bs, 1, "non_ref_pic"); if (si->irap_or_gdr_pic) si->gdr_pic = gf_bs_read_int_log(bs, 1, "gdr_pic"); if ((si->inter_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "inter_slice_allowed_flag"))) si->intra_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "intra_slice_allowed_flag"); pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 64) return -1; si->pps = &vvc->pps[pps_id]; si->sps = &vvc->sps[si->pps->sps_id]; si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); si->recovery_point_valid = 0; si->gdr_recovery_count = 0; if (si->gdr_pic) { si->recovery_point_valid = 1; si->gdr_recovery_count = gf_bs_read_ue_log(bs, "gdr_recovery_count"); } gf_bs_read_int_log(bs, si->sps->ph_num_extra_bits, "ph_extra_bits"); if (si->sps->poc_msb_cycle_flag) { if ( (si->poc_msb_cycle_present_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_present_flag"))) { si->poc_msb_cycle = gf_bs_read_int_log(bs, si->sps->poc_msb_cycle_len, "poc_msb_cycle"); } } return 0; } static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; } /*this needs further tests !*/ static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; } GF_EXPORT s32 gf_media_vvc_parse_nalu_bs(GF_BitStream *bs, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; VVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &vvc->s_info, sizeof(VVCSliceInfo)); if (!vvc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_VVC_NALU_ACCESS_UNIT: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: ret = 1; break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: /* slice - read the info and compare.*/ ret = vvc_parse_slice(bs, vvc, &n_state); if (ret < 0) return ret; ret = 0; if (n_state.picture_header_in_slice_header_flag) { is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (vvc->s_info.poc != n_state.poc) { ret = 1; break; } if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_VVC_NALU_PIC_HEADER: if (vvc_parse_picture_header(bs, vvc, &n_state)<0) { ret = -1; break; } is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; } break; case GF_VVC_NALU_SEQ_PARAM: vvc->last_parsed_sps_id = gf_media_vvc_read_sps_bs_internal(bs, vvc, *layer_id, NULL); ret = (vvc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_PIC_PARAM: vvc->last_parsed_pps_id = gf_media_vvc_read_pps_bs_internal(bs, vvc); ret = (vvc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_VID_PARAM: vvc->last_parsed_vps_id = gf_media_vvc_read_vps_bs_internal(bs, vvc, GF_FALSE); ret = (vvc->last_parsed_vps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_DEC_PARAM: ret = 0; break; case GF_VVC_NALU_APS_PREFIX: //we use the mix aps type + aps id (first 8 bits) as unique identifier vvc->last_parsed_aps_id = gf_bs_read_int_log(bs, 8, "aps_id"); ret = 0; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && vvc->s_info.sps) { // n_state.frame_num_offset_prev = vvc->s_info.frame_num_offset; // n_state.frame_num_prev = vvc->s_info.frame_num; n_state.poc_lsb_prev = vvc->s_info.poc_lsb; n_state.poc_msb_prev = vvc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) vvc_compute_poc(&n_state); memcpy(&vvc->s_info, &n_state, sizeof(VVCSliceInfo)); return ret; } GF_EXPORT s32 gf_media_vvc_parse_nalu(u8 *data, u32 size, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret; if (!vvc) { if (nal_unit_type) (*nal_unit_type) = data[1] >> 3; if (layer_id) (*layer_id) = data[0] & 0x3f; if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_media_vvc_parse_nalu_bs(bs, vvc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } Bool gf_media_vvc_slice_is_ref(VVCState *vvc) { if (!vvc->s_info.irap_or_gdr_pic) { return GF_FALSE; } if (vvc->s_info.gdr_pic) { if (vvc->s_info.recovery_point_valid) { vvc->s_info.recovery_point_valid = 0; return GF_TRUE; } return GF_FALSE; } return GF_TRUE; }
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre, Romain Bouqueau, Cyril Concolato * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / Media Tools sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/media_dev.h> #include <gpac/constants.h> #include <gpac/mpeg4_odf.h> #include <gpac/maths.h> #include <gpac/avparse.h> #ifndef GPAC_DISABLE_OGG #include <gpac/internal/ogg.h> #endif //uncomment/define globally to remove all bitstream parsing logging from code (this will break inspect mode ananlyze=bs) //#define GPAC_DISABLE_AVPARSE_LOGS #ifndef GPAC_DISABLE_AVPARSE_LOGS void gf_bs_log_idx(GF_BitStream *bs, u32 nBits, const char *fname, s64 val, s32 idx1, s32 idx2, s32 idx3); #define gf_bs_log(_bs, _nBits, _fname, _val) gf_bs_log_idx(_bs, _nBits, _fname, _val, -1, -1, -1) u32 gf_bs_read_int_log_idx3(GF_BitStream *bs, u32 nBits, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val = gf_bs_read_int(bs, nBits); gf_bs_log_idx(bs, nBits, fname, val, idx1, idx2, idx3); return val; } #define gf_bs_read_int_log(_bs, _nBits, _fname) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, -1, -1, -1) #define gf_bs_read_int_log_idx(_bs, _nBits, _fname, _idx) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, _idx, -1, -1) #define gf_bs_read_int_log_idx2(_bs, _nBits, _fname, _idx1, _idx2) gf_bs_read_int_log_idx3(_bs, _nBits, _fname, (s32) _idx1, (s32) _idx2, -1) #else #define gf_bs_log(_bs, _nBits, _fname, _val) #define gf_bs_log_idx(_bs, _nBits, _fname, _val, _idx1, _idx2, _idx3) #define gf_bs_read_int_log(_bs, _nbb, _f) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx(_bs, _nbb, _f, _idx) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx2(_bs, _nbb, _f, _idx1, _idx2) gf_bs_read_int(_bs, _nbb) #define gf_bs_read_int_log_idx3(_bs, _nbb, _f, _idx1, _idx2, _idx3) gf_bs_read_int(_bs, _nbb) #endif static const struct { u32 w, h; } std_par[] = { { 4, 3}, {3, 2}, {16, 9}, {5, 3}, {5, 4}, {8, 5}, {2, 1}, {1, 1}, {0, 0}, }; GF_EXPORT void gf_media_reduce_aspect_ratio(u32 *width, u32 *height) { u32 i = 0; u32 w = *width; u32 h = *height; while (std_par[i].w) { if (std_par[i].w * h == std_par[i].h * w) { *width = std_par[i].w; *height = std_par[i].h; return; } i++; } //not standard one, reduce by power of 2 i = 2; while (1) { if (w <= i) return; if (h <= i) return; if (w % i) return; if (h % i) return; *width = w / i; *height = h / i; i *= 2; } } GF_EXPORT void gf_media_get_reduced_frame_rate(u32 *timescale, u32 *sample_dur) { u32 res; if (!*sample_dur) return; res = *timescale / *sample_dur; if (res * (*sample_dur) == *timescale) { *timescale = res; *sample_dur = 1; } else if ((double)(*timescale * 1001 - (res + 1) * *sample_dur * 1000) / ((res + 1) * *sample_dur * 1000) < 0.001) { *timescale = (res + 1) * 1000; *sample_dur = 1001; } } struct __m4v_profile { u32 value; const char *name; } M4VProfiles[] = { {0x00, "Reserved (0x00) Profile"}, {0x01, "Simple Profile @ Level 1"}, {0x02, "Simple Profile @ Level 2"}, {0x03, "Simple Profile @ Level 3"}, {0x08, "Simple Profile @ Level 0"}, {0x10, "Simple Scalable Profile @ Level 0"}, {0x11, "Simple Scalable Profile @ Level 1"}, {0x12, "Simple Scalable Profile @ Level 2"}, {0x21, "Core Profile @ Level 1"}, {0x22, "Core Profile @ Level 2"}, {0x32, "Main Profile @ Level 2"}, {0x33, "Main Profile @ Level 3"}, {0x34, "Main Profile @ Level 4"}, {0x42, "N-bit Profile @ Level 2"}, {0x51, "Scalable Texture Profile @ Level 1"}, {0x61, "Simple Face Animation Profile @ Level 1"}, {0x62, "Simple Face Animation Profile @ Level 2"}, {0x63, "Simple FBA Profile @ Level 1"}, {0x64, "Simple FBA Profile @ Level 2"}, {0x71, "Basic Animated Texture Profile @ Level 1"}, {0x72, "Basic Animated Texture Profile @ Level 2"}, {0x7F, "AVC/H264 Profile"}, {0x81, "Hybrid Profile @ Level 1"}, {0x82, "Hybrid Profile @ Level 2"}, {0x91, "Advanced Real Time Simple Profile @ Level 1"}, {0x92, "Advanced Real Time Simple Profile @ Level 2"}, {0x93, "Advanced Real Time Simple Profile @ Level 3"}, {0x94, "Advanced Real Time Simple Profile @ Level 4"}, {0xA1, "Core Scalable Profile @ Level1"}, {0xA2, "Core Scalable Profile @ Level2"}, {0xA3, "Core Scalable Profile @ Level3"}, {0xB1, "Advanced Coding Efficiency Profile @ Level 1"}, {0xB2, "Advanced Coding Efficiency Profile @ Level 2"}, {0xB3, "Advanced Coding Efficiency Profile @ Level 3"}, {0xB4, "Advanced Coding Efficiency Profile @ Level 4"}, {0xC1, "Advanced Core Profile @ Level 1"}, {0xC2, "Advanced Core Profile @ Level 2"}, {0xD1, "Advanced Scalable Texture @ Level1"}, {0xD2, "Advanced Scalable Texture @ Level2"}, {0xE1, "Simple Studio Profile @ Level 1"}, {0xE2, "Simple Studio Profile @ Level 2"}, {0xE3, "Simple Studio Profile @ Level 3"}, {0xE4, "Simple Studio Profile @ Level 4"}, {0xE5, "Core Studio Profile @ Level 1"}, {0xE6, "Core Studio Profile @ Level 2"}, {0xE7, "Core Studio Profile @ Level 3"}, {0xE8, "Core Studio Profile @ Level 4"}, {0xF0, "Advanced Simple Profile @ Level 0"}, {0xF1, "Advanced Simple Profile @ Level 1"}, {0xF2, "Advanced Simple Profile @ Level 2"}, {0xF3, "Advanced Simple Profile @ Level 3"}, {0xF4, "Advanced Simple Profile @ Level 4"}, {0xF5, "Advanced Simple Profile @ Level 5"}, {0xF7, "Advanced Simple Profile @ Level 3b"}, {0xF8, "Fine Granularity Scalable Profile @ Level 0"}, {0xF9, "Fine Granularity Scalable Profile @ Level 1"}, {0xFA, "Fine Granularity Scalable Profile @ Level 2"}, {0xFB, "Fine Granularity Scalable Profile @ Level 3"}, {0xFC, "Fine Granularity Scalable Profile @ Level 4"}, {0xFD, "Fine Granularity Scalable Profile @ Level 5"}, {0xFE, "Not part of MPEG-4 Visual profiles"}, {0xFF, "No visual capability required"} }; GF_EXPORT const char *gf_m4v_get_profile_name(u8 video_pl) { u32 i, count = GF_ARRAY_LENGTH(M4VProfiles); for (i=0; i<count; i++) { if ((u32)video_pl == M4VProfiles[i].value) return M4VProfiles[i].name; } return "ISO Reserved Profile"; } #ifndef GPAC_DISABLE_AV_PARSERS #define MPEG12_START_CODE_PREFIX 0x000001 #define MPEG12_PICTURE_START_CODE 0x00000100 #define MPEG12_SLICE_MIN_START 0x00000101 #define MPEG12_SLICE_MAX_START 0x000001af #define MPEG12_USER_DATA_START_CODE 0x000001b2 #define MPEG12_SEQUENCE_START_CODE 0x000001b3 #define MPEG12_SEQUENCE_ERR_START_CODE 0x000001b4 #define MPEG12_EXT_START_CODE 0x000001b5 #define MPEG12_SEQUENCE_END_START_CODE 0x000001b7 #define MPEG12_GOP_START_CODE 0x000001b8 s32 gf_mv12_next_start_code(unsigned char *pbuffer, u32 buflen, u32 *optr, u32 *scode) { u32 value; u32 offset; if (buflen < 4) return -1; for (offset = 0; offset < buflen - 3; offset++, pbuffer++) { #ifdef GPAC_BIG_ENDIAN value = *(u32 *)pbuffer >> 8; #else value = (pbuffer[0] << 16) | (pbuffer[1] << 8) | (pbuffer[2] << 0); #endif if (value == MPEG12_START_CODE_PREFIX) { *optr = offset; *scode = (value << 8) | pbuffer[3]; return 0; } } return -1; } s32 gf_mv12_next_slice_start(unsigned char *pbuffer, u32 startoffset, u32 buflen, u32 *slice_offset) { u32 slicestart, code; while (gf_mv12_next_start_code(pbuffer + startoffset, buflen - startoffset, &slicestart, &code) >= 0) { if ((code >= MPEG12_SLICE_MIN_START) && (code <= MPEG12_SLICE_MAX_START)) { *slice_offset = slicestart + startoffset; return 0; } startoffset += slicestart + 4; } return -1; } /* MPEG-4 video (14496-2) */ struct __tag_m4v_parser { GF_BitStream *bs; Bool mpeg12, step_mode; u32 current_object_type; u32 force_next_obj_type; u64 current_object_start; u32 tc_dec, prev_tc_dec, tc_disp, prev_tc_disp; }; GF_EXPORT GF_M4VParser *gf_m4v_parser_new(u8 *data, u64 data_size, Bool mpeg12video) { GF_M4VParser *tmp; if (!data || !data_size) return NULL; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); tmp->mpeg12 = mpeg12video; return tmp; } GF_M4VParser *gf_m4v_parser_bs_new(GF_BitStream *bs, Bool mpeg12video) { GF_M4VParser *tmp; GF_SAFEALLOC(tmp, GF_M4VParser); if (!tmp) return NULL; tmp->bs = bs; tmp->mpeg12 = mpeg12video; return tmp; } GF_EXPORT void gf_m4v_parser_del(GF_M4VParser *m4v) { gf_bs_del(m4v->bs); gf_free(m4v); } GF_EXPORT void gf_m4v_parser_del_no_bs(GF_M4VParser *m4v) { gf_free(m4v); } GF_EXPORT void gf_m4v_parser_set_inspect(GF_M4VParser *m4v) { if (m4v) m4v->step_mode = 1; } GF_EXPORT u32 gf_m4v_parser_get_obj_type(GF_M4VParser *m4v) { if (m4v) return m4v->current_object_type; return 0; } #define M4V_CACHE_SIZE 4096 s32 M4V_LoadObject(GF_M4VParser *m4v) { u32 v, bpos, found; char m4v_cache[M4V_CACHE_SIZE]; u64 end, cache_start, load_size; if (!m4v) return 0; if (m4v->force_next_obj_type) { m4v->current_object_type = m4v->force_next_obj_type - 1; m4v->force_next_obj_type = 0; return (s32)m4v->current_object_type; } bpos = 0; found = 0; load_size = 0; end = 0; cache_start = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(m4v->bs)) break; load_size = gf_bs_available(m4v->bs); if (load_size > M4V_CACHE_SIZE) load_size = M4V_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(m4v->bs); gf_bs_read_data(m4v->bs, m4v_cache, (u32)load_size); } v = ((v << 8) & 0xFFFFFF00) | ((u8)m4v_cache[bpos]); bpos++; if ((v & 0xFFFFFF00) == 0x00000100) { end = cache_start + bpos - 4; found = 1; break; } } if (!found) return -1; m4v->current_object_start = end; gf_bs_seek(m4v->bs, end + 3); m4v->current_object_type = gf_bs_read_u8(m4v->bs); return (s32)m4v->current_object_type; } GF_EXPORT void gf_m4v_rewrite_pl(u8 **o_data, u32 *o_dataLen, u8 PL) { u32 pos = 0; unsigned char *data = (unsigned char *)*o_data; u32 dataLen = *o_dataLen; while (pos + 4 < dataLen) { if (!data[pos] && !data[pos + 1] && (data[pos + 2] == 0x01) && (data[pos + 3] == M4V_VOS_START_CODE)) { data[pos + 4] = PL; return; } pos++; } /*emulate VOS at beggining*/ (*o_data) = (char *)gf_malloc(sizeof(char)*(dataLen + 5)); (*o_data)[0] = 0; (*o_data)[1] = 0; (*o_data)[2] = 1; (*o_data)[3] = (char)M4V_VOS_START_CODE; (*o_data)[4] = PL; memcpy((*o_data + 5), data, sizeof(char)*dataLen); gf_free(data); (*o_dataLen) = dataLen + 5; } static GF_Err M4V_Reset(GF_M4VParser *m4v, u64 start) { gf_bs_seek(m4v->bs, start); assert(start < (u64)1<<31); m4v->current_object_start = (u32)start; m4v->current_object_type = 0; return GF_OK; } void gf_m4v_parser_reset(GF_M4VParser *m4v, u8 sc_type) { m4v->current_object_start = 0; m4v->current_object_type = 0; m4v->force_next_obj_type = sc_type; } static GF_Err gf_m4v_parse_config_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { unsigned char p[4]; u32 ext_type; s32 o_type; u8 go, par; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = 0; go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_SEQ_START_CODE: dsi->RAP_stream = 1; gf_bs_read_data(m4v->bs, (char *)p, 4); dsi->width = (p[0] << 4) | ((p[1] >> 4) & 0xf); dsi->height = ((p[1] & 0xf) << 8) | p[2]; dsi->VideoPL = GF_CODECID_MPEG1; par = (p[3] >> 4) & 0xf; switch (par) { case 2: dsi->par_num = dsi->height / 3; dsi->par_den = dsi->width / 4; break; case 3: dsi->par_num = dsi->height / 9; dsi->par_den = dsi->width / 16; break; case 4: dsi->par_num = dsi->height / 2; dsi->par_den = dsi->width / 21; break; default: dsi->par_den = dsi->par_num = 0; break; } switch (p[3] & 0xf) { case 0: break; case 1: dsi->fps = 24000.0 / 1001.0; break; case 2: dsi->fps = 24.0; break; case 3: dsi->fps = 25.0; break; case 4: dsi->fps = 30000.0 / 1001.0; break; case 5: dsi->fps = 30.0; break; case 6: dsi->fps = 50.0; break; case 7: dsi->fps = ((60.0*1000.0) / 1001.0); break; case 8: dsi->fps = 60.0; break; case 9: dsi->fps = 1; break; case 10: dsi->fps = 5; break; case 11: dsi->fps = 10; break; case 12: dsi->fps = 12; break; case 13: dsi->fps = 15; break; } break; case M2V_EXT_START_CODE: gf_bs_read_data(m4v->bs, (char *)p, 4); ext_type = ((p[0] >> 4) & 0xf); if (ext_type == 1) { dsi->VideoPL = 0x65; dsi->height = ((p[1] & 0x1) << 13) | ((p[2] & 0x80) << 5) | (dsi->height & 0x0fff); dsi->width = (((p[2] >> 5) & 0x3) << 12) | (dsi->width & 0x0fff); } break; case M2V_PIC_START_CODE: if (dsi->width) go = 0; break; default: break; /*EOS*/ case -1: go = 0; m4v->current_object_start = gf_bs_get_position(m4v->bs); break; } } M4V_Reset(m4v, 0); return GF_OK; } static const struct { u32 w, h; } m4v_sar[6] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 } }; static u8 m4v_get_sar_idx(u32 w, u32 h) { u32 i; for (i = 0; i < 6; i++) { if ((m4v_sar[i].w == w) && (m4v_sar[i].h == h)) return i; } return 0xF; } static void gf_m4v_parse_vol(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { u8 verid, par; s32 clock_rate; u8 vpl = dsi->VideoPL; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); dsi->VideoPL = vpl; verid = 0; dsi->RAP_stream = gf_bs_read_int(m4v->bs, 1); dsi->objectType = gf_bs_read_int(m4v->bs, 8); if (gf_bs_read_int(m4v->bs, 1)) { verid = gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 3); } par = gf_bs_read_int(m4v->bs, 4); if (par == 0xF) { dsi->par_num = gf_bs_read_int(m4v->bs, 8); dsi->par_den = gf_bs_read_int(m4v->bs, 8); } else if (par<6) { dsi->par_num = m4v_sar[par].w; dsi->par_den = m4v_sar[par].h; } if (gf_bs_read_int(m4v->bs, 1)) { gf_bs_read_int(m4v->bs, 3); if (gf_bs_read_int(m4v->bs, 1)) gf_bs_read_int(m4v->bs, 79); } dsi->has_shape = gf_bs_read_int(m4v->bs, 2); if (dsi->has_shape && (verid!=1) ) gf_bs_read_int(m4v->bs, 4); gf_bs_read_int(m4v->bs, 1); /*clock rate*/ dsi->clock_rate = gf_bs_read_int(m4v->bs, 16); /*marker*/ gf_bs_read_int(m4v->bs, 1); clock_rate = dsi->clock_rate-1; if (clock_rate >= 65536) clock_rate = 65535; if (clock_rate > 0) { for (dsi->NumBitsTimeIncrement = 1; dsi->NumBitsTimeIncrement < 16; dsi->NumBitsTimeIncrement++) { if (clock_rate == 1) break; clock_rate = (clock_rate >> 1); } } else { /*fix from vivien for divX*/ dsi->NumBitsTimeIncrement = 1; } /*fixed FPS stream*/ dsi->time_increment = 0; if (gf_bs_read_int(m4v->bs, 1)) { dsi->time_increment = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); } if (!dsi->has_shape) { gf_bs_read_int(m4v->bs, 1); dsi->width = gf_bs_read_int(m4v->bs, 13); gf_bs_read_int(m4v->bs, 1); dsi->height = gf_bs_read_int(m4v->bs, 13); } else { dsi->width = dsi->height = 0; } gf_bs_align(m4v->bs); } static GF_Err gf_m4v_parse_config_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { s32 o_type; u8 go; if (!m4v || !dsi) return GF_BAD_PARAM; memset(dsi, 0, sizeof(GF_M4VDecSpecInfo)); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { /*vosh*/ case M4V_VOS_START_CODE: dsi->VideoPL = (u8)gf_bs_read_u8(m4v->bs); break; case M4V_VOL_START_CODE: gf_m4v_parse_vol(m4v, dsi); /*shape will be done later*/ gf_bs_align(m4v->bs); break; case M4V_VOP_START_CODE: case M4V_GOV_START_CODE: go = 0; break; /*EOS*/ case -1: m4v->current_object_start = gf_bs_get_position(m4v->bs); return GF_EOS; /*don't interest us*/ case M4V_UDTA_START_CODE: default: break; } } return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_config(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi) { if (m4v->mpeg12) { return gf_m4v_parse_config_mpeg12(m4v, dsi); } else { return gf_m4v_parse_config_mpeg4(m4v, dsi); } } static GF_Err gf_m4v_parse_frame_mpeg12(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, val; s32 o_type; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = GF_FALSE; m4v->current_object_type = (u32)-1; *frame_type = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M2V_PIC_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; *is_coded = 1; /*val = */gf_bs_read_u8(m4v->bs); val = gf_bs_read_u8(m4v->bs); *frame_type = ((val >> 3) & 0x7) - 1; break; case M2V_GOP_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M2V_SEQ_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) { go = 0; break; } /**/ break; default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } *size = m4v->current_object_start - *start; return GF_OK; } static GF_Err gf_m4v_parse_frame_mpeg4(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { u8 go, hasVOP, firstObj, secs; s32 o_type; u32 vop_inc = 0; if (!m4v || !size || !start || !frame_type) return GF_BAD_PARAM; *size = 0; firstObj = 1; hasVOP = 0; *is_coded = 0; m4v->current_object_type = (u32)-1; *frame_type = 0; *start = 0; if (!m4v->step_mode) M4V_Reset(m4v, m4v->current_object_start); go = 1; while (go) { o_type = M4V_LoadObject(m4v); switch (o_type) { case M4V_VOP_START_CODE: /*done*/ if (hasVOP) { go = 0; break; } if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } hasVOP = 1; /*coding type*/ *frame_type = gf_bs_read_int(m4v->bs, 2); /*modulo time base*/ secs = 0; while (gf_bs_read_int(m4v->bs, 1) != 0) secs++; /*no support for B frames in parsing*/ secs += (dsi->enh_layer || *frame_type!=2) ? m4v->tc_dec : m4v->tc_disp; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*vop_time_inc*/ if (dsi->NumBitsTimeIncrement) vop_inc = gf_bs_read_int(m4v->bs, dsi->NumBitsTimeIncrement); m4v->prev_tc_dec = m4v->tc_dec; m4v->prev_tc_disp = m4v->tc_disp; if (dsi->enh_layer || *frame_type!=2) { m4v->tc_disp = m4v->tc_dec; m4v->tc_dec = secs; } *time_inc = secs * dsi->clock_rate + vop_inc; /*marker*/ gf_bs_read_int(m4v->bs, 1); /*coded*/ *is_coded = gf_bs_read_int(m4v->bs, 1); gf_bs_align(m4v->bs); break; case M4V_GOV_START_CODE: if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } if (hasVOP) go = 0; break; case M4V_VOL_START_CODE: if (m4v->step_mode) gf_m4v_parse_vol(m4v, dsi); case M4V_VOS_START_CODE: if (hasVOP) { go = 0; } else if (firstObj) { *start = m4v->current_object_start; firstObj = 0; } break; case M4V_VO_START_CODE: default: break; case -1: *size = gf_bs_get_position(m4v->bs) - *start; return GF_EOS; } if (m4v->step_mode) return GF_OK; } assert(m4v->current_object_start >= *start); *size = m4v->current_object_start - *start; return GF_OK; } GF_EXPORT GF_Err gf_m4v_parse_frame(GF_M4VParser *m4v, GF_M4VDecSpecInfo *dsi, u8 *frame_type, u32 *time_inc, u64 *size, u64 *start, Bool *is_coded) { if (m4v->mpeg12) { return gf_m4v_parse_frame_mpeg12(m4v, dsi, frame_type, time_inc, size, start, is_coded); } else { return gf_m4v_parse_frame_mpeg4(m4v, dsi, frame_type, time_inc, size, start, is_coded); } } GF_Err gf_m4v_rewrite_par(u8 **o_data, u32 *o_dataLen, s32 par_n, s32 par_d) { u64 start, end, size; GF_BitStream *mod; GF_M4VParser *m4v; Bool go = 1; m4v = gf_m4v_parser_new(*o_data, *o_dataLen, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); start = 0; while (go) { u32 type = M4V_LoadObject(m4v); end = gf_bs_get_position(m4v->bs) - 4; size = end - start; /*store previous object*/ if (size) { assert (size < (u64)1<<31); gf_bs_write_data(mod, *o_data + start, (u32)size); start = end; } switch (type) { case M4V_VOL_START_CODE: gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 0, 8); gf_bs_write_int(mod, 1, 8); gf_bs_write_int(mod, M4V_VOL_START_CODE, 8); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 1), 1); gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 8), 8); start = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, (u32)start, 1); if (start) { gf_bs_write_int(mod, gf_bs_read_int(m4v->bs, 7), 7); } start = gf_bs_read_int(m4v->bs, 4); if (start == 0xF) { gf_bs_read_int(m4v->bs, 8); gf_bs_read_int(m4v->bs, 8); } if ((par_n >= 0) && (par_d >= 0)) { u8 par = m4v_get_sar_idx(par_n, par_d); gf_bs_write_int(mod, par, 4); if (par == 0xF) { gf_bs_write_int(mod, par_n, 8); gf_bs_write_int(mod, par_d, 8); } } else { gf_bs_write_int(mod, 0x0, 4); } case -1: go = 0; break; default: break; } } while (gf_bs_bits_available(m4v->bs)) { u32 b = gf_bs_read_int(m4v->bs, 1); gf_bs_write_int(mod, b, 1); } gf_m4v_parser_del(m4v); gf_free(*o_data); gf_bs_get_content(mod, o_data, o_dataLen); gf_bs_del(mod); return GF_OK; } GF_EXPORT u64 gf_m4v_get_object_start(GF_M4VParser *m4v) { return m4v->current_object_start; } #if 0 //unused Bool gf_m4v_is_valid_object_type(GF_M4VParser *m4v) { return ((s32)m4v->current_object_type == -1) ? 0 : 1; } #endif GF_EXPORT GF_Err gf_m4v_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, 0); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e < 0 ? e : GF_OK; } GF_EXPORT GF_Err gf_mpegv12_get_config(u8 *rawdsi, u32 rawdsi_size, GF_M4VDecSpecInfo *dsi) { GF_Err e; GF_M4VParser *vparse; if (!rawdsi || !rawdsi_size) return GF_NON_COMPLIANT_BITSTREAM; vparse = gf_m4v_parser_new(rawdsi, rawdsi_size, GF_TRUE); e = gf_m4v_parse_config(vparse, dsi); dsi->next_object_start = (u32)vparse->current_object_start; gf_m4v_parser_del(vparse); return e; } #endif /* AAC parser */ struct __m4a_oti { u32 type; const char *name; } M4AObjectTypes[] = { {0, "MPEG-4 Audio Reserved"}, {1, "MPEG-4 Audio AAC Main"}, {2, "MPEG-4 Audio AAC LC"}, {3, "MPEG-4 Audio AAC SSR"}, {4, "MPEG-4 Audio AAC LTP"}, {5, "MPEG-4 Audio SBR"}, {6, "MPEG-4 Audio AAC Scalable"}, {7, "MPEG-4 Audio TwinVQ"}, {8, "MPEG-4 Audio CELP"}, {9, "MPEG-4 Audio HVXC"}, {10, "MPEG-4 Audio Reserved"}, {11, "MPEG-4 Audio Reserved"}, {12, "MPEG-4 Audio TTSI"}, {13, "MPEG-4 Audio Main synthetic"}, {14, "MPEG-4 Audio Wavetable synthesis"}, {15, "MPEG-4 Audio General MIDI"}, {16, "MPEG-4 Audio Algorithmic Synthesis and Audio FX"}, {17, "MPEG-4 Audio ER AAC LC"}, {18, "MPEG-4 Audio Reserved"}, {19, "MPEG-4 Audio ER AAC LTP"}, {20, "MPEG-4 Audio ER AAC scalable"}, {21, "MPEG-4 Audio ER TwinVQ"}, {22, "MPEG-4 Audio ER BSAC"}, {23, "MPEG-4 Audio ER AAC LD"}, {24, "MPEG-4 Audio ER CELP"}, {25, "MPEG-4 Audio ER HVXC"}, {26, "MPEG-4 Audio ER HILN"}, {27, "MPEG-4 Audio ER Parametric"}, {28, "MPEG-4 Audio SSC"}, {29, "MPEG-4 Audio ParametricStereo"}, {30, "MPEG-4 Audio Reserved"}, {31, "MPEG-4 Audio Reserved"}, {32, "MPEG-1 Audio Layer-1"}, {33, "MPEG-1 Audio Layer-2"}, {34, "MPEG-1 Audio Layer-3"}, {35, "MPEG-4 Audio DST"}, {36, "MPEG-4 Audio ALS"}, {37, "MPEG-4 Audio SLS"}, {42, "MPEG Audio xHE-AAC"}, }; GF_EXPORT const char *gf_m4a_object_type_name(u32 objectType) { u32 i, count = GF_ARRAY_LENGTH(M4AObjectTypes); for (i=0; i<count; i++) { if (objectType==M4AObjectTypes[i].type) return M4AObjectTypes[i].name; } return "MPEG-4 Audio Unknown"; } struct __m4a_profile { u32 value; const char *name; } M4AProfiles[] = { {0x00, "ISO Reserved (0x00)"}, {0x01, "Main Audio Profile @ Level 1"}, {0x02, "Main Audio Profile @ Level 2"}, {0x03, "Main Audio Profile @ Level 3"}, {0x04, "Main Audio Profile @ Level 4"}, {0x05, "Scalable Audio Profile @ Level 1"}, {0x06, "Scalable Audio Profile @ Level 2"}, {0x07, "Scalable Audio Profile @ Level 3"}, {0x08, "Scalable Audio Profile @ Level 4"}, {0x09, "Speech Audio Profile @ Level 1"}, {0x0A, "Speech Audio Profile @ Level 2"}, {0x0B, "Synthetic Audio Profile @ Level 1"}, {0x0C, "Synthetic Audio Profile @ Level 2"}, {0x0D, "Synthetic Audio Profile @ Level 3"}, {0x0E, "High Quality Audio Profile @ Level 1"}, {0x0F, "High Quality Audio Profile @ Level 2"}, {0x10, "High Quality Audio Profile @ Level 3"}, {0x11, "High Quality Audio Profile @ Level 4"}, {0x12, "High Quality Audio Profile @ Level 5"}, {0x13, "High Quality Audio Profile @ Level 6"}, {0x14, "High Quality Audio Profile @ Level 7"}, {0x15, "High Quality Audio Profile @ Level 8"}, {0x16, "Low Delay Audio Profile @ Level 1"}, {0x17, "Low Delay Audio Profile @ Level 2"}, {0x18, "Low Delay Audio Profile @ Level 3"}, {0x19, "Low Delay Audio Profile @ Level 4"}, {0x1A, "Low Delay Audio Profile @ Level 5"}, {0x1B, "Low Delay Audio Profile @ Level 6"}, {0x1C, "Low Delay Audio Profile @ Level 7"}, {0x1D, "Low Delay Audio Profile @ Level 8"}, {0x1E, "Natural Audio Profile @ Level 1"}, {0x1F, "Natural Audio Profile @ Level 2"}, {0x20, "Natural Audio Profile @ Level 3"}, {0x21, "Natural Audio Profile @ Level 4"}, {0x22, "Mobile Audio Internetworking Profile @ Level 1"}, {0x23, "Mobile Audio Internetworking Profile @ Level 2"}, {0x24, "Mobile Audio Internetworking Profile @ Level 3"}, {0x25, "Mobile Audio Internetworking Profile @ Level 4"}, {0x26, "Mobile Audio Internetworking Profile @ Level 5"}, {0x27, "Mobile Audio Internetworking Profile @ Level 6"}, {0x28, "AAC Profile @ Level 1"}, {0x29, "AAC Profile @ Level 2"}, {0x2A, "AAC Profile @ Level 4"}, {0x2B, "AAC Profile @ Level 5"}, {0x2C, "High Efficiency AAC Profile @ Level 2"}, {0x2D, "High Efficiency AAC Profile @ Level 3"}, {0x2E, "High Efficiency AAC Profile @ Level 4"}, {0x2F, "High Efficiency AAC Profile @ Level 5"}, {0x30, "High Efficiency AAC v2 Profile @ Level 2"}, {0x31, "High Efficiency AAC v2 Profile @ Level 3"}, {0x32, "High Efficiency AAC v2 Profile @ Level 4"}, {0x33, "High Efficiency AAC v2 Profile @ Level 5"}, {0x34, "Low Delay AAC Profile"}, {0x35, "Baseline MPEG Surround Profile @ Level 1"}, {0x36, "Baseline MPEG Surround Profile @ Level 2"}, {0x37, "Baseline MPEG Surround Profile @ Level 3"}, {0x38, "Baseline MPEG Surround Profile @ Level 4"}, {0x39, "Baseline MPEG Surround Profile @ Level 5"}, {0x3A, "Baseline MPEG Surround Profile @ Level 6"}, {0x3B, "High Definition AAC Profile @ Level 1"}, {0x3C, "ALS Simple Profile @ Level 1"}, {0x50, "AAC Profile @ Level 6"}, {0x51, "AAC Profile @ Level 7"}, {0x52, "High Efficiency AAC Profile @ Level 6"}, {0x53, "High Efficiency AAC Profile @ Level 7"}, {0x54, "High Efficiency AAC v2 Profile @ Level 6"}, {0x55, "High Efficiency AAC v2 Profile @ Level 7"}, {0x56, "Extended High Efficiency AAC Profile @ Level 6"}, {0x57, "Extended High Efficiency AAC Profile @ Level 7"}, {0xFE, "Not part of MPEG-4 audio profiles"}, {0xFF, "No audio capability required"} }; GF_EXPORT const char *gf_m4a_get_profile_name(u8 audio_pl) { u32 i, count = GF_ARRAY_LENGTH(M4AProfiles); for (i=0; i<count; i++) { if ((u32) audio_pl==M4AProfiles[i].value) return M4AProfiles[i].name; } return "ISO Reserved / User Private"; } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u32 gf_m4a_get_profile(GF_M4ADecSpecInfo *cfg) { switch (cfg->base_object_type) { case 2: /*AAC LC*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x28 : 0x29; /*LC@L1 or LC@L2*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2A : 0x2B; /*LC@L4 or LC@L5*/ return (cfg->base_sr <= 48000) ? 0x50 : 0x51; /*LC@L4 or LC@L5*/ case 5: /*HE-AAC - SBR*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x2C : 0x2D; /*HE@L2 or HE@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x2E : 0x2F; /*HE@L4 or HE@L5*/ return (cfg->base_sr <= 48000) ? 0x52 : 0x53; /*HE@L6 or HE@L7*/ case 29: /*HE-AACv2 - SBR+PS*/ if (cfg->nb_chan <= 2) return (cfg->base_sr <= 24000) ? 0x30 : 0x31; /*HE-AACv2@L2 or HE-AACv2@L3*/ if (cfg->nb_chan <= 5) return (cfg->base_sr <= 48000) ? 0x32 : 0x33; /*HE-AACv2@L4 or HE-AACv2@L5*/ return (cfg->base_sr <= 48000) ? 0x54 : 0x55; /*HE-AACv2@L6 or HE-AACv2@L7*/ /*default to HQ*/ default: if (cfg->nb_chan <= 2) return (cfg->base_sr < 24000) ? 0x0E : 0x0F; /*HQ@L1 or HQ@L2*/ return 0x10; /*HQ@L3*/ } } GF_EXPORT GF_Err gf_m4a_parse_program_config_element(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; cfg->program_config_element_present = 1; cfg->cpe_channels = 0; cfg->element_instance_tag = gf_bs_read_int_log(bs, 4, "element_instance_tag"); cfg->object_type = gf_bs_read_int_log(bs, 2, "object_type"); cfg->sampling_frequency_index = gf_bs_read_int_log(bs, 4, "sampling_frequency_index"); cfg->num_front_channel_elements = gf_bs_read_int_log(bs, 4, "num_front_channel_elements"); cfg->num_side_channel_elements = gf_bs_read_int_log(bs, 4, "num_side_channel_elements"); cfg->num_back_channel_elements = gf_bs_read_int_log(bs, 4, "num_back_channel_elements"); cfg->num_lfe_channel_elements = gf_bs_read_int_log(bs, 2, "num_lfe_channel_elements"); cfg->num_assoc_data_elements = gf_bs_read_int_log(bs, 3, "num_assoc_data_elements"); cfg->num_valid_cc_elements = gf_bs_read_int_log(bs, 4, "num_valid_cc_elements"); cfg->mono_mixdown_present = (Bool)gf_bs_read_int_log(bs, 1, "mono_mixdown_present"); if (cfg->mono_mixdown_present) { cfg->mono_mixdown_element_number = gf_bs_read_int_log(bs, 4, "mono_mixdown_element_number"); } cfg->stereo_mixdown_present = gf_bs_read_int_log(bs, 1, "stereo_mixdown_present"); if (cfg->stereo_mixdown_present) { cfg->stereo_mixdown_element_number = gf_bs_read_int_log(bs, 4, "stereo_mixdown_element_number"); } cfg->matrix_mixdown_idx_present = gf_bs_read_int_log(bs, 1, "matrix_mixdown_idx_present"); if (cfg->matrix_mixdown_idx_present) { cfg->matrix_mixdown_idx = gf_bs_read_int_log(bs, 2, "matrix_mixdown_idx"); cfg->pseudo_surround_enable = gf_bs_read_int_log(bs, 1, "pseudo_surround_enable"); } for (i = 0; i < cfg->num_front_channel_elements; i++) { cfg->front_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "front_element_is_cpe", i); cfg->front_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "front_element_tag_select", i); if (cfg->front_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_side_channel_elements; i++) { cfg->side_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "side_element_is_cpe", i); cfg->side_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "side_element_tag_select", i); if (cfg->side_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_back_channel_elements; i++) { cfg->back_element_is_cpe[i] = gf_bs_read_int_log_idx(bs, 1, "back_element_is_cpe", i); cfg->back_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "back_element_tag_select", i); if (cfg->back_element_is_cpe[i]) cfg->cpe_channels++; } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { cfg->lfe_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "lfe_element_tag_select", i); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { cfg->assoc_data_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "assoc_data_element_tag_select", i); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { cfg->cc_element_is_ind_sw[i] = gf_bs_read_int_log_idx(bs, 1, "cc_element_is_ind_sw", i); cfg->valid_cc_element_tag_select[i] = gf_bs_read_int_log_idx(bs, 4, "valid_cc_element_tag_select", i); } gf_bs_align(bs); cfg->comment_field_bytes = gf_bs_read_int_log(bs, 8, "comment_field_bytes"); gf_bs_read_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); cfg->nb_chan = cfg->num_front_channel_elements + cfg->num_back_channel_elements + cfg->num_side_channel_elements + cfg->num_lfe_channel_elements; cfg->nb_chan += cfg->cpe_channels; return GF_OK; } GF_EXPORT GF_Err gf_m4a_parse_config(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg, Bool size_known) { u32 audio_obj_type; memset(cfg, 0, sizeof(GF_M4ADecSpecInfo)); cfg->base_object_type = gf_bs_read_int_log(bs, 5, "base_object_type"); /*extended object type*/ if (cfg->base_object_type == 31) { cfg->base_object_type = 32 + gf_bs_read_int_log(bs, 6, "extended_base_object_type"); } cfg->base_sr_index = gf_bs_read_int_log(bs, 4, "base_samplerate_index"); if (cfg->base_sr_index == 0x0F) { cfg->base_sr = gf_bs_read_int_log(bs, 24, "base_samplerate"); } else { cfg->base_sr = GF_M4ASampleRates[cfg->base_sr_index]; } cfg->chan_cfg = gf_bs_read_int_log(bs, 4, "channel_configuration"); if (cfg->chan_cfg) { cfg->nb_chan = GF_M4ANumChannels[cfg->chan_cfg - 1]; } audio_obj_type = cfg->base_object_type; if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = GF_TRUE; cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "sbr_samplerate_index"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "sbr_samplerate"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "sbr_object_type"); if (cfg->sbr_object_type==31) cfg->sbr_object_type = 32 + gf_bs_read_int_log(bs, 6, "audioObjectTypeExt"); audio_obj_type = cfg->sbr_object_type; if (cfg->sbr_object_type==22) { /*ext_chan_cfg = */gf_bs_read_int_log(bs, 4, "channel_configuration"); } } /*object cfg*/ switch (audio_obj_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { Bool ext_flag; gf_bs_read_int_log(bs, 1, "frame_length_flag"); if (gf_bs_read_int_log(bs, 1, "depends_on_core_coder")) gf_bs_read_int_log(bs, 14, "delay"); ext_flag = gf_bs_read_int_log(bs, 1, "extension_flag"); if (!cfg->chan_cfg) { gf_m4a_parse_program_config_element(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_read_int_log(bs, 3, "layerN"); } if (ext_flag) { if (cfg->base_object_type == 22) { gf_bs_read_int_log(bs, 5, "numOfSubFrame"); gf_bs_read_int_log(bs, 11, "layer_length"); } if ((cfg->base_object_type == 17) || (cfg->base_object_type == 19) || (cfg->base_object_type == 20) || (cfg->base_object_type == 23) ) { gf_bs_read_int_log(bs, 1, "aacSectionDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacScalefactorDataResilienceFlag"); gf_bs_read_int_log(bs, 1, "aacSpectralDataResilienceFlag"); } gf_bs_read_int_log(bs, 1, "extensionFlag3"); } } break; } /*ER cfg*/ switch (audio_obj_type) { case 17: case 19: case 20: case 21: case 22: case 23: case 24: case 25: case 26: case 27: { u32 epConfig = gf_bs_read_int_log(bs, 2, "epConfig"); if ((epConfig == 2) || (epConfig == 3)) { } if (epConfig == 3) { gf_bs_read_int_log(bs, 1, "directMapping"); } } break; } if (size_known && (cfg->base_object_type != 5) && (cfg->base_object_type != 29)) { while (gf_bs_available(bs) >= 2) { u32 sync = gf_bs_peek_bits(bs, 11, 0); if (sync == 0x2b7) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->sbr_object_type = gf_bs_read_int_log(bs, 5, "extensionAudioObjectType "); cfg->has_sbr = gf_bs_read_int_log(bs, 1, "sbrPresentFlag"); if (cfg->has_sbr) { cfg->sbr_sr_index = gf_bs_read_int_log(bs, 4, "extensionSamplingFrequencyIndex"); if (cfg->sbr_sr_index == 0x0F) { cfg->sbr_sr = gf_bs_read_int_log(bs, 24, "extensionSamplingFrequency"); } else { cfg->sbr_sr = GF_M4ASampleRates[cfg->sbr_sr_index]; } } } else if (sync == 0x548) { gf_bs_read_int_log(bs, 11, "syncExtensionType"); cfg->has_ps = gf_bs_read_int_log(bs, 1, "hasParametricStereo"); if (cfg->has_ps) cfg->nb_chan = 1; } else { break; } } } cfg->audioPL = gf_m4a_get_profile(cfg); return GF_OK; } GF_EXPORT GF_Err gf_m4a_get_config(u8 *dsi, u32 dsi_size, GF_M4ADecSpecInfo *cfg) { GF_BitStream *bs; if (!dsi || !dsi_size || (dsi_size < 2)) return GF_NON_COMPLIANT_BITSTREAM; bs = gf_bs_new(dsi, dsi_size, GF_BITSTREAM_READ); gf_m4a_parse_config(bs, cfg, GF_TRUE); gf_bs_del(bs); return GF_OK; } u32 gf_latm_get_value(GF_BitStream *bs) { u32 i, tmp, value = 0; u32 bytesForValue = gf_bs_read_int(bs, 2); for (i = 0; i <= bytesForValue; i++) { value <<= 8; tmp = gf_bs_read_int(bs, 8); value += tmp; } return value; } GF_EXPORT u32 gf_m4a_get_channel_cfg(u32 nb_chan) { u32 i, count = sizeof(GF_M4ANumChannels) / sizeof(u32); for (i = 0; i < count; i++) { if (GF_M4ANumChannels[i] == nb_chan) return i + 1; } return 0; } GF_EXPORT GF_Err gf_m4a_write_program_config_element_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { u32 i; gf_bs_write_int(bs, cfg->element_instance_tag, 4); gf_bs_write_int(bs, cfg->object_type, 2); gf_bs_write_int(bs, cfg->sampling_frequency_index, 4); gf_bs_write_int(bs, cfg->num_front_channel_elements, 4); gf_bs_write_int(bs, cfg->num_side_channel_elements, 4); gf_bs_write_int(bs, cfg->num_back_channel_elements, 4); gf_bs_write_int(bs, cfg->num_lfe_channel_elements, 2); gf_bs_write_int(bs, cfg->num_assoc_data_elements, 3); gf_bs_write_int(bs, cfg->num_valid_cc_elements, 4); gf_bs_write_int(bs, cfg->mono_mixdown_present, 1); if (cfg->mono_mixdown_present) { gf_bs_write_int(bs, cfg->mono_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->stereo_mixdown_present, 1); if (cfg->stereo_mixdown_present) { gf_bs_write_int(bs, cfg->stereo_mixdown_element_number, 4); } gf_bs_write_int(bs, cfg->matrix_mixdown_idx_present, 1); if (cfg->matrix_mixdown_idx_present) { gf_bs_write_int(bs, cfg->matrix_mixdown_idx, 2); gf_bs_write_int(bs, cfg->pseudo_surround_enable, 1); } for (i = 0; i < cfg->num_front_channel_elements; i++) { gf_bs_write_int(bs, cfg->front_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->front_element_tag_select[i], 4); } for (i = 0; i < cfg->num_side_channel_elements; i++) { gf_bs_write_int(bs, cfg->side_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->side_element_tag_select[i], 4); } for (i = 0; i < cfg->num_back_channel_elements; i++) { gf_bs_write_int(bs, cfg->back_element_is_cpe[i], 1); gf_bs_write_int(bs, cfg->back_element_tag_select[i], 4); } for (i = 0; i < cfg->num_lfe_channel_elements; i++) { gf_bs_write_int(bs, cfg->lfe_element_tag_select[i], 4); } for (i = 0; i < cfg->num_assoc_data_elements; i++) { gf_bs_write_int(bs, cfg->assoc_data_element_tag_select[i], 4); } for (i = 0; i < cfg->num_valid_cc_elements; i++) { gf_bs_write_int(bs, cfg->cc_element_is_ind_sw[i], 1); gf_bs_write_int(bs, cfg->valid_cc_element_tag_select[i], 4); } gf_bs_align(bs); gf_bs_write_int(bs, cfg->comment_field_bytes, 8); gf_bs_write_data(bs, (char *)cfg->comments, cfg->comment_field_bytes); return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config_bs(GF_BitStream *bs, GF_M4ADecSpecInfo *cfg) { if (!cfg->base_sr_index) { if (!cfg->base_sr) return GF_BAD_PARAM; while (GF_M4ASampleRates[cfg->base_sr_index]) { if (GF_M4ASampleRates[cfg->base_sr_index] == cfg->base_sr) break; cfg->base_sr_index++; } } if (cfg->sbr_sr && !cfg->sbr_sr_index) { while (GF_M4ASampleRates[cfg->sbr_sr_index]) { if (GF_M4ASampleRates[cfg->sbr_sr_index] == cfg->sbr_sr) break; cfg->sbr_sr_index++; } } /*extended object type*/ if (cfg->base_object_type >= 32) { gf_bs_write_int(bs, 31, 5); gf_bs_write_int(bs, cfg->base_object_type - 32, 6); } else { gf_bs_write_int(bs, cfg->base_object_type, 5); } gf_bs_write_int(bs, cfg->base_sr_index, 4); if (cfg->base_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->base_sr, 24); } if (cfg->program_config_element_present) { gf_bs_write_int(bs, 0, 4); } else { cfg->chan_cfg = gf_m4a_get_channel_cfg(cfg->nb_chan); if (!cfg->chan_cfg) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AAC] Cannot write decoder config, ProgramConfigElement is missing and channel configuration is not a predefined one !\n")); return GF_BAD_PARAM; } gf_bs_write_int(bs, cfg->chan_cfg, 4); } if (cfg->base_object_type == 5 || cfg->base_object_type == 29) { if (cfg->base_object_type == 29) { cfg->has_ps = 1; cfg->nb_chan = 1; } cfg->has_sbr = 1; gf_bs_write_int(bs, cfg->sbr_sr_index, 4); if (cfg->sbr_sr_index == 0x0F) { gf_bs_write_int(bs, cfg->sbr_sr, 24); } gf_bs_write_int(bs, cfg->sbr_object_type, 5); } /*object cfg*/ switch (cfg->base_object_type) { case 1: case 2: case 3: case 4: case 6: case 7: case 17: case 19: case 20: case 21: case 22: case 23: case 42: { /*frame length flag*/ gf_bs_write_int(bs, 0, 1); /*depends on core coder*/ gf_bs_write_int(bs, 0, 1); /*ext flag*/ gf_bs_write_int(bs, 0, 1); if (cfg->program_config_element_present) { gf_m4a_write_program_config_element_bs(bs, cfg); } if ((cfg->base_object_type == 6) || (cfg->base_object_type == 20)) { gf_bs_write_int(bs, 0, 3); } } break; } /*ER cfg - not supported*/ /*implicit sbr/ps signaling not written here, cf reframe_adts*/ return GF_OK; } GF_EXPORT GF_Err gf_m4a_write_config(GF_M4ADecSpecInfo *cfg, u8 **dsi, u32 *dsi_size) { GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_m4a_write_config_bs(bs, cfg); gf_bs_get_content(bs, dsi, dsi_size); gf_bs_del(bs); return GF_OK; } /*AV1 parsing*/ static u32 av1_read_ns(GF_BitStream *bs, u32 n, const char *fname) { u32 v, res; Bool extra_bit; int w = (u32)(log(n) / log(2)) + 1; u32 m = (1 << w) - n; assert(w < 32); v = gf_bs_read_int(bs, w - 1); if (v < m) { if (fname) { gf_bs_log(bs, w-1, fname, v); } return v; } extra_bit = gf_bs_read_int(bs, 1); res = (v << 1) - m + extra_bit; if (fname) { gf_bs_log(bs, w, fname, res); } return res; } static void av1_color_config(GF_BitStream *bs, AV1State *state) { state->config->high_bitdepth = gf_bs_read_int_log(bs, 1, "high_bitdepth"); state->bit_depth = 8; if (state->config->seq_profile == 2 && state->config->high_bitdepth) { state->config->twelve_bit = gf_bs_read_int_log(bs, 1, "twelve_bit"); state->bit_depth = state->config->twelve_bit ? 12 : 10; } else if (state->config->seq_profile <= 2) { state->bit_depth = state->config->high_bitdepth ? 10 : 8; } state->config->monochrome = GF_FALSE; if (state->config->seq_profile == 1) { state->config->monochrome = GF_FALSE; } else { state->config->monochrome = gf_bs_read_int_log(bs, 1, "monochrome"); } /*NumPlanes = mono_chrome ? 1 : 3;*/ state->color_description_present_flag = gf_bs_read_int_log(bs, 1, "color_description_present_flag"); if (state->color_description_present_flag) { state->color_primaries = gf_bs_read_int_log(bs, 8, "color_primaries"); state->transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); state->matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } else { state->color_primaries = 2/*CP_UNSPECIFIED*/; state->transfer_characteristics = 2/*TC_UNSPECIFIED*/; state->matrix_coefficients = 2/*MC_UNSPECIFIED*/; } if (state->config->monochrome) { state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; state->config->chroma_sample_position = 0/*CSP_UNKNOWN*/; state->separate_uv_delta_q = 0; return; } else if (state->color_primaries == 0/*CP_BT_709*/ && state->transfer_characteristics == 13/*TC_SRGB*/ && state->matrix_coefficients == 0/*MC_IDENTITY*/) { state->color_range = GF_TRUE; state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; state->color_range = gf_bs_read_int_log(bs, 1, "color_range"); if (state->config->seq_profile == 0) { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_TRUE; } else if (state->config->seq_profile == 1) { state->config->chroma_subsampling_x = GF_FALSE; state->config->chroma_subsampling_y = GF_FALSE; } else { if (state->bit_depth == 12) { state->config->chroma_subsampling_x = gf_bs_read_int_log(bs, 1, "chroma_subsampling_x"); if (state->config->chroma_subsampling_x) state->config->chroma_subsampling_y = gf_bs_read_int_log(bs, 1, "chroma_subsampling_y"); else state->config->chroma_subsampling_y = GF_FALSE; } else { state->config->chroma_subsampling_x = GF_TRUE; state->config->chroma_subsampling_y = GF_FALSE; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y) { state->config->chroma_sample_position = gf_bs_read_int_log(bs, 2, "chroma_sample_position"); } } state->separate_uv_delta_q = gf_bs_read_int_log(bs, 1, "separate_uv_delta_q"); } static u32 av1_uvlc(GF_BitStream *bs, const char *fname) { u32 res; u8 leadingZeros = 0; while (1) { Bool done = gf_bs_read_int(bs, 1); if (done) break; leadingZeros++; } if (leadingZeros >= 32) { return 0xFFFFFFFF; } res = gf_bs_read_int(bs, leadingZeros) + (1 << leadingZeros) - 1; gf_bs_log(bs, 2*leadingZeros, fname, res); return res; } static void timing_info(GF_BitStream *bs, AV1State *state) { u32 time_scale = 0; u32 num_units_in_display_tick = gf_bs_read_int_log(bs, 32, "num_units_in_display_tick"); if (num_units_in_display_tick == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] num_units_in_display_tick must be greater than 0.\n")); } time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); if (time_scale == 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] time_scale must be greater than 0.\n")); } state->equal_picture_interval = gf_bs_read_int_log(bs, 1, "equal_picture_interval"); if (state->equal_picture_interval) { u32 num_ticks_per_picture_minus_1 = av1_uvlc(bs, "num_ticks_per_picture_minus_1"); state->tb_num = time_scale; state->tb_den = (num_ticks_per_picture_minus_1 + 1)*num_units_in_display_tick; } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] VFR not supported.\n")); //TODO: upload num_units_in_display_tick (eq. to the POC in H264), compute delta between frames, set it as dts_inc in gf_import_aom_av1() } } static void decoder_model_info(AV1State *state, GF_BitStream *bs) { state->buffer_delay_length = 1 + gf_bs_read_int_log(bs, 5, "buffer_delay_length_minus1"); gf_bs_read_int_log(bs, 32, "num_units_in_decoding_tick"); state->buffer_removal_time_length = gf_bs_read_int_log(bs, 5, "buffer_removal_time_length"); state->frame_presentation_time_length = 1 + gf_bs_read_int_log(bs, 5, "frame_presentation_time_length_minus1"); } static void operating_parameters_info(GF_BitStream *bs, const u8 idx, const u8 buffer_delay_length_minus_1) { const u8 n = buffer_delay_length_minus_1 + 1; gf_bs_read_int_log(bs, n, "decoder_buffer_delay"); gf_bs_read_int_log(bs, n, "encoder_buffer_delay"); gf_bs_read_int_log(bs, 1, "low_delay_mode_flag"); } static void av1_parse_sequence_header_obu(GF_BitStream *bs, AV1State *state) { u8 buffer_delay_length_minus_1 = 0; state->frame_state.seen_seq_header = GF_TRUE; state->config->seq_profile = gf_bs_read_int_log(bs, 3, "seq_profile"); state->still_picture = gf_bs_read_int_log(bs, 1, "still_picture"); state->reduced_still_picture_header = gf_bs_read_int_log(bs, 1, "reduced_still_picture_header"); if (state->reduced_still_picture_header) { //timing_info_present_flag = GF_FALSE; //initial_display_delay_present_flag = GF_FALSE; state->operating_points_count = 1; state->config->seq_level_idx_0 = gf_bs_read_int_log(bs, 5, "seq_level_idx_0"); } else { u8 i = 0; Bool initial_display_delay_present_flag; Bool timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (timing_info_present_flag) { timing_info(bs, state); state->decoder_model_info_present_flag = gf_bs_read_int_log(bs, 1, "decoder_model_info_present_flag"); if (state->decoder_model_info_present_flag) { decoder_model_info(state, bs); } } else { state->decoder_model_info_present_flag = GF_FALSE; } initial_display_delay_present_flag = gf_bs_read_int_log(bs, 1, "initial_display_delay_present_flag"); state->operating_points_count = 1 + gf_bs_read_int_log(bs, 5, "operating_points_count_minus1"); for (i = 0; i < state->operating_points_count; i++) { u8 seq_level_idx_i, seq_tier = 0; state->operating_point_idc[i] = gf_bs_read_int_log_idx(bs, 12, "operating_point_idc", i); seq_level_idx_i = gf_bs_read_int_log_idx(bs, 5, "seq_level_idx", i); if (i == 0) state->config->seq_level_idx_0 = seq_level_idx_i; if (seq_level_idx_i > 7) { seq_tier = gf_bs_read_int_log_idx(bs, 1, "seq_tier", i); } if (i == 0) state->config->seq_tier_0 = seq_tier; if (state->decoder_model_info_present_flag) { state->decoder_model_present_for_this_op[i] = gf_bs_read_int_log_idx(bs, 1, "decoder_model_present_for_this_op", i); if (state->decoder_model_present_for_this_op[i]) { operating_parameters_info(bs, i, buffer_delay_length_minus_1); } } else { state->decoder_model_present_for_this_op[i] = 0; } if (initial_display_delay_present_flag) { if (gf_bs_read_int_log_idx(bs, 1, "initial_display_delay_present_for_this_op", i) ) { gf_bs_read_int_log_idx(bs, 4, "initial_display_delay_minus1", i); } } } } //operatingPoint = av1_choose_operating_point(bs); state->OperatingPointIdc = 0;//TODO: operating_point_idc[operatingPoint]; state->frame_width_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_width_bits_minus1"); state->frame_height_bits_minus_1 = gf_bs_read_int_log(bs, 4, "frame_height_bits_minus1"); state->width = gf_bs_read_int_log(bs, state->frame_width_bits_minus_1 + 1, "width_minus1") + 1; state->height = gf_bs_read_int_log(bs, state->frame_height_bits_minus_1 + 1, "height_minus1") + 1; state->sequence_width = state->width; state->sequence_height = state->height; state->frame_id_numbers_present_flag = GF_FALSE; if (!state->reduced_still_picture_header) { state->frame_id_numbers_present_flag = gf_bs_read_int_log(bs, 1, "frame_id_numbers_present_flag"); } if (state->frame_id_numbers_present_flag) { state->delta_frame_id_length_minus_2 = gf_bs_read_int_log(bs, 4, "delta_frame_id_length_minus2"); state->additional_frame_id_length_minus_1 = gf_bs_read_int_log(bs, 3, "additional_frame_id_length_minus1"); } state->use_128x128_superblock = gf_bs_read_int_log(bs, 1, "use_128x128_superblock"); gf_bs_read_int_log(bs, 1, "enable_filter_intra"); gf_bs_read_int_log(bs, 1, "enable_intra_edge_filter"); if (state->reduced_still_picture_header) { /*enable_interintra_compound = 0; enable_masked_compound = 0; enable_dual_filter = 0; enable_jnt_comp = 0; enable_ref_frame_mvs = 0;*/ state->enable_warped_motion = 0; state->enable_order_hint = GF_FALSE; state->OrderHintBits = 0; state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { Bool seq_choose_screen_content_tools; gf_bs_read_int_log(bs, 1, "enable_interintra_compound"); gf_bs_read_int_log(bs, 1, "enable_masked_compound"); state->enable_warped_motion = gf_bs_read_int_log(bs, 1, "enable_warped_motion"); gf_bs_read_int_log(bs, 1, "enable_dual_filter"); state->enable_order_hint = gf_bs_read_int_log(bs, 1, "enable_order_hint"); if (state->enable_order_hint) { gf_bs_read_int_log(bs, 1, "enable_jnt_comp"); state->enable_ref_frame_mvs = gf_bs_read_int_log(bs, 1, "enable_ref_frame_mvs"); } else { /*enable_jnt_comp = 0*/; /*enable_ref_frame_mvs = 0*/; } seq_choose_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_choose_screen_content_tools"); state->seq_force_screen_content_tools = 0; if (seq_choose_screen_content_tools) { state->seq_force_screen_content_tools = 2/*SELECT_SCREEN_CONTENT_TOOLS*/; } else { state->seq_force_screen_content_tools = gf_bs_read_int_log(bs, 1, "seq_force_screen_content_tools"); } state->seq_force_integer_mv = 0; if (state->seq_force_screen_content_tools > 0) { const Bool seq_choose_integer_mv = gf_bs_read_int_log(bs, 1, "seq_choose_integer_mv"); if (seq_choose_integer_mv) { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } else { state->seq_force_integer_mv = gf_bs_read_int_log(bs, 1, "seq_force_integer_mv"); } } else { state->seq_force_integer_mv = 2/*SELECT_INTEGER_MV*/; } if (state->enable_order_hint) { u8 order_hint_bits_minus_1 = gf_bs_read_int_log(bs, 3, "order_hint_bits_minus1"); state->OrderHintBits = order_hint_bits_minus_1 + 1; } else { state->OrderHintBits = 0; } } state->enable_superres = gf_bs_read_int_log(bs, 1, "enable_superres"); state->enable_cdef = gf_bs_read_int_log(bs, 1, "enable_cdef"); state->enable_restoration = gf_bs_read_int_log(bs, 1, "enable_restoration"); av1_color_config(bs, state); state->film_grain_params_present = gf_bs_read_int_log(bs, 1, "film_grain_params_present"); } #define IVF_FILE_HEADER_SIZE 32 Bool gf_media_probe_ivf(GF_BitStream *bs) { u32 dw = 0; if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) return GF_FALSE; dw = gf_bs_peek_bits(bs, 32, 0); if (dw != GF_4CC('D', 'K', 'I', 'F')) { return GF_FALSE; } return GF_TRUE; } GF_Err gf_media_parse_ivf_file_header(GF_BitStream *bs, u32 *width, u32 *height, u32 *codec_fourcc, u32 *timebase_num, u32 *timebase_den, u32 *num_frames) { u32 dw = 0; if (!width || !height || !codec_fourcc || !timebase_den || !timebase_num || !num_frames) { assert(0); return GF_BAD_PARAM; } if (gf_bs_available(bs) < IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Not enough bytes available ("LLU").\n", gf_bs_available(bs))); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u32(bs); if (dw != GF_4CC('D', 'K', 'I', 'F')) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[IVF] Invalid signature\n")); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); if (dw != 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF version. 0 expected, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } dw = gf_bs_read_u16_le(bs); //length of header in bytes if (dw != IVF_FILE_HEADER_SIZE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong IVF header length. Expected 32 bytes, got %u\n", dw)); return GF_NON_COMPLIANT_BITSTREAM; } *codec_fourcc = gf_bs_read_u32(bs); *width = gf_bs_read_u16_le(bs); *height = gf_bs_read_u16_le(bs); *timebase_num = gf_bs_read_u32_le(bs); *timebase_den = gf_bs_read_u32_le(bs); *num_frames = gf_bs_read_u32_le(bs); gf_bs_read_u32_le(bs); //skip unused return GF_OK; } GF_Err gf_media_parse_ivf_frame_header(GF_BitStream *bs, u64 *frame_size, u64 *pts) { if (!frame_size) return GF_BAD_PARAM; *frame_size = gf_bs_read_u32_le(bs); if (*frame_size > 256 * 1024 * 1024) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[IVF] Wrong frame size %u\n", *frame_size)); *frame_size = 0; return GF_NON_COMPLIANT_BITSTREAM; } *pts = gf_bs_read_u64_le(bs); return GF_OK; } GF_Err gf_media_vp9_parse_superframe(GF_BitStream *bs, u64 ivf_frame_size, u32 *num_frames_in_superframe, u32 frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME], u32 *superframe_index_size) { u32 byte, bytes_per_framesize; u64 pos = gf_bs_get_position(bs), i = 0; GF_Err e; assert(bs && num_frames_in_superframe); /*initialize like there is no superframe*/ memset(frame_sizes, 0, VP9_MAX_FRAMES_IN_SUPERFRAME * sizeof(frame_sizes[0])); *num_frames_in_superframe = 1; frame_sizes[0] = (u32)ivf_frame_size; *superframe_index_size = 0; e = gf_bs_seek(bs, pos + ivf_frame_size - 1); if (e) return e; byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ bytes_per_framesize = 1 + ((byte & 0x18) >> 3); *num_frames_in_superframe = (u32)(1 + (byte & 0x7)); /*superframe_index()*/ *superframe_index_size = 2 + bytes_per_framesize * *num_frames_in_superframe; gf_bs_seek(bs, pos + ivf_frame_size - *superframe_index_size); byte = gf_bs_read_u8(bs); if ((byte & 0xe0) != 0xc0) goto exit; /*no superframe*/ frame_sizes[0] = 0; for (i = 0; i < *num_frames_in_superframe; ++i) { gf_bs_read_data(bs, (char*)(frame_sizes + i), bytes_per_framesize); } exit: gf_bs_seek(bs, pos); return e; } static Bool vp9_frame_sync_code(GF_BitStream *bs) { u8 val = gf_bs_read_int_log(bs, 8, "syncbyte1"); if (val != 0x49) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte2"); if (val != 0x83) return GF_FALSE; val = gf_bs_read_int_log(bs, 8, "syncbyte3"); if (val != 0x42) return GF_FALSE; return GF_TRUE; } typedef enum { CS_UNKNOWN = 0, CS_BT_601 = 1, CS_BT_709 = 2, CS_SMPTE_170 = 3, CS_SMPTE_240 = 4, CS_BT_2020 = 5, CS_RESERVED = 6, CS_RGB = 7, } VP9_color_space; static const int VP9_CS_to_23001_8_colour_primaries[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 1 }; static const int VP9_CS_to_23001_8_transfer_characteristics[] = { -1/*undefined*/, 5, 1, 6, 7, 9, -1/*reserved*/, 13 }; static const int VP9_CS_to_23001_8_matrix_coefficients[] = { -1/*undefined*/, 6, 1, -1, -1, 9, -1/*reserved*/, 0 }; static GF_Err vp9_color_config(GF_BitStream *bs, GF_VPConfig *vp9_cfg) { VP9_color_space color_space; if (vp9_cfg->profile >= 2) { Bool ten_or_twelve_bit = gf_bs_read_int_log(bs, 1, "ten_or_twelve_bit"); vp9_cfg->bit_depth = ten_or_twelve_bit ? 12 : 10; } else { vp9_cfg->bit_depth = 8; } color_space = gf_bs_read_int_log(bs, 3, "color_space"); vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; if (color_space != CS_RGB) { vp9_cfg->video_fullRange_flag = gf_bs_read_int_log(bs, 1, "video_fullRange_flag"); if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { u8 subsampling_x, subsampling_y, subsampling_xy_to_chroma_subsampling[2][2] = { {3, 0}, {2, 0} }; subsampling_x = gf_bs_read_int_log(bs, 1, "subsampling_x"); subsampling_y = gf_bs_read_int_log(bs, 1, "subsampling_x"); vp9_cfg->chroma_subsampling = subsampling_xy_to_chroma_subsampling[subsampling_x][subsampling_y]; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (1) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } else { vp9_cfg->chroma_subsampling = 0; } } else { vp9_cfg->video_fullRange_flag = GF_TRUE; if (vp9_cfg->profile == 1 || vp9_cfg->profile == 3) { vp9_cfg->chroma_subsampling = 3; Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] color config reserved zero (2) is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } } return GF_OK; } static void vp9_compute_image_size(int FrameWidth, int FrameHeight, int *Sb64Cols, int *Sb64Rows) { int MiCols = (FrameWidth + 7) >> 3; int MiRows = (FrameHeight + 7) >> 3; *Sb64Cols = (MiCols + 7) >> 3; *Sb64Rows = (MiRows + 7) >> 3; } static void vp9_frame_size(GF_BitStream *bs, int *FrameWidth, int *FrameHeight, int *Sb64Cols, int *Sb64Rows) { int frame_width_minus_1 = gf_bs_read_int_log(bs, 16, "frame_width_minus_1"); int frame_height_minus_1 = gf_bs_read_int_log(bs, 16, "frame_height_minus_1"); if (frame_width_minus_1 + 1 != *FrameWidth || frame_height_minus_1 + 1 != *FrameHeight) { if (*FrameWidth || *FrameHeight) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[VP9] inconsistent frame dimensions: previous was %dx%d, new one is %dx%d.\n", *FrameWidth, *FrameHeight, frame_width_minus_1 + 1, frame_height_minus_1 + 1)); } *FrameWidth = frame_width_minus_1 + 1; *FrameHeight = frame_height_minus_1 + 1; vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } static void vp9_render_size(GF_BitStream *bs, int FrameWidth, int FrameHeight, int *renderWidth, int *renderHeight) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different"); if (render_and_frame_size_different == 1) { int render_width_minus_1 = gf_bs_read_int_log(bs, 16, "render_width_minus_1"); int render_height_minus_1 = gf_bs_read_int_log(bs, 16, "render_height_minus_1"); *renderWidth = render_width_minus_1 + 1; *renderHeight = render_height_minus_1 + 1; } else { *renderWidth = FrameWidth; *renderHeight = FrameHeight; } } static s64 vp9_s(GF_BitStream *bs, int n, const char *fname, u32 idx) { s64 value = gf_bs_read_int(bs, n); Bool sign = gf_bs_read_int(bs, 1); if (sign) value = -value; gf_bs_log_idx(bs, n+1, fname, value, idx, -1, -1); return value; } static void vp9_loop_filter_params(GF_BitStream *bs) { /*loop_filter_level = */gf_bs_read_int_log(bs, 6, "loop_filter_level"); /*loop_filter_sharpness = */gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); Bool loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { Bool loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update == GF_TRUE) { int i; for (i = 0; i < 4; i++) { Bool update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_ref_deltas", i); } for (i = 0; i < 2; i++) { Bool update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta == GF_TRUE) vp9_s(bs, 6, "loop_filter_mode_deltas", i); } } } } static void vp9_quantization_params(GF_BitStream *bs) { /*base_q_idx = */gf_bs_read_int_log(bs, 8, "base_q_idx"); } #define VP9_MAX_SEGMENTS 8 #define VP9_SEG_LVL_MAX 4 static const int segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 }; static const int segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 }; #define VP9_MIN_TILE_WIDTH_B64 4 #define VP9_MAX_TILE_WIDTH_B64 64 static void vp9_segmentation_params(GF_BitStream *bs) { Bool segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled == 1) { int i; Bool segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map) { for (i = 0; i < 7; i++) /*segmentation_tree_probs[i] = read_prob()*/ /*segmentation_temporal_update = */gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); /*for (i = 0; i < 3; i++) segmentation_pred_prob[i] = segmentation_temporal_update ? read_prob() : 255*/ } Bool segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); if (segmentation_update_data == 1) { /*segmentation_abs_or_delta_update =*/ gf_bs_read_int_log(bs, 1, "segmentation_abs_or_delta_update"); for (i = 0; i < VP9_MAX_SEGMENTS; i++) { int j; for (j = 0; j < VP9_SEG_LVL_MAX; j++) { /*feature_value = 0*/ Bool feature_enabled = gf_bs_read_int_log(bs, 1, "feature_enabled"); /*FeatureEnabled[i][j] = feature_enabled*/ if (feature_enabled) { int bits_to_read = segmentation_feature_bits[j]; /*feature_value =*/ gf_bs_read_int_log(bs, bits_to_read, "feature_value"); if (segmentation_feature_signed[j] == 1) { /*Bool feature_sign = */gf_bs_read_int_log(bs, 1, "feature_sign"); /*if (feature_sign == 1) feature_value *= -1*/ } } /*FeatureData[i][j] = feature_value*/ } } } } } static int calc_min_log2_tile_cols(int Sb64Cols) { int minLog2 = 0; while ((VP9_MAX_TILE_WIDTH_B64 << minLog2) < Sb64Cols) minLog2++; return minLog2; } static int calc_max_log2_tile_cols(int Sb64Cols) { int maxLog2 = 1; while ((Sb64Cols >> maxLog2) >= VP9_MIN_TILE_WIDTH_B64) maxLog2++; return maxLog2 - 1; } static void vp9_tile_info(GF_BitStream *bs, int Sb64Cols) { Bool tile_rows_log2; int minLog2TileCols = calc_min_log2_tile_cols(Sb64Cols); int maxLog2TileCols = calc_max_log2_tile_cols(Sb64Cols); int tile_cols_log2 = minLog2TileCols; while (tile_cols_log2 < maxLog2TileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2) tile_cols_log2++; else break; } tile_rows_log2 = gf_bs_read_int_log(bs, 1, "tile_rows_log2"); if (tile_rows_log2) { /*Bool increment_tile_rows_log2 = */gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); //tile_rows_log2 += increment_tile_rows_log2; } } static void vp9_frame_size_with_refs(GF_BitStream *bs, u8 refresh_frame_flags, u8 * ref_frame_idx, int * RefFrameWidth, int *RefFrameHeight, int *FrameWidth, int *FrameHeight, int *RenderWidth, int *RenderHeight, int *Sb64Cols, int *Sb64Rows) { Bool found_ref; int i; for (i = 0; i < 3; i++) { found_ref = gf_bs_read_int_log(bs, 1, "found_ref"); if (found_ref) { *FrameWidth = RefFrameWidth [ref_frame_idx[i]]; *FrameHeight = RefFrameHeight[ref_frame_idx[i]]; break; } } if (found_ref == 0) { vp9_frame_size(bs, FrameWidth, FrameHeight, Sb64Cols, Sb64Rows); } else { vp9_compute_image_size(*FrameWidth, *FrameHeight, Sb64Cols, Sb64Rows); } vp9_render_size(bs, *FrameWidth, *FrameHeight, RenderWidth, RenderHeight); } static void vp9_read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*raw_interpolation_filter = */gf_bs_read_int_log(bs, 2, "raw_interpolation_filter"); } } #define VP9_KEY_FRAME 0 GF_Err gf_media_vp9_parse_sample(GF_BitStream *bs, GF_VPConfig *vp9_cfg, Bool *key_frame, u32 *FrameWidth, u32 *FrameHeight, u32 *renderWidth, u32 *renderHeight) { Bool FrameIsIntra = GF_FALSE, profile_low_bit, profile_high_bit, show_existing_frame = GF_FALSE, frame_type = GF_FALSE, show_frame = GF_FALSE, error_resilient_mode = GF_FALSE; /*u8 frame_context_idx = 0, reset_frame_context = 0, frame_marker = 0*/; int Sb64Cols = 0, Sb64Rows = 0, i; u8 refresh_frame_flags = 0; assert(bs && key_frame); /*uncompressed header*/ /*frame_marker = */gf_bs_read_int_log(bs, 2, "frame_marker"); profile_low_bit = gf_bs_read_int_log(bs, 1, "profile_low_bit"); profile_high_bit = gf_bs_read_int_log(bs, 1, "profile_high_bit"); vp9_cfg->profile = (profile_high_bit << 1) + profile_low_bit; if (vp9_cfg->profile == 3) { Bool reserved_zero = gf_bs_read_int_log(bs, 1, "reserved_zero"); if (reserved_zero) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VP9] uncompressed header reserved zero is not zero.\n")); return GF_NON_COMPLIANT_BITSTREAM; } } show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (show_existing_frame == GF_TRUE) { /*frame_to_show_map_idx = */gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); return GF_OK; } frame_type = gf_bs_read_int_log(bs, 1, "frame_type"); show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); if (frame_type == VP9_KEY_FRAME) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); refresh_frame_flags = 0xFF; *key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; } else { Bool intra_only = GF_FALSE; *key_frame = GF_FALSE; if (show_frame == GF_FALSE) { intra_only = gf_bs_read_int_log(bs, 1, "intra_only"); } FrameIsIntra = intra_only; if (error_resilient_mode == GF_FALSE) { /*reset_frame_context = */gf_bs_read_int_log(bs, 2, "reset_frame_context"); } if (intra_only == GF_TRUE) { if (!vp9_frame_sync_code(bs)) return GF_NON_COMPLIANT_BITSTREAM; if (vp9_cfg->profile > 0) { if (vp9_color_config(bs, vp9_cfg) != GF_OK) return GF_NON_COMPLIANT_BITSTREAM; } else { u8 color_space = CS_BT_601; vp9_cfg->colour_primaries = VP9_CS_to_23001_8_colour_primaries[color_space]; vp9_cfg->transfer_characteristics = VP9_CS_to_23001_8_transfer_characteristics[color_space]; vp9_cfg->matrix_coefficients = VP9_CS_to_23001_8_matrix_coefficients[color_space]; vp9_cfg->chroma_subsampling = 0; vp9_cfg->bit_depth = 8; } refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); vp9_frame_size(bs, FrameWidth, FrameHeight, &Sb64Cols, &Sb64Rows); vp9_render_size(bs, *FrameWidth, *FrameHeight, renderWidth, renderHeight); } else { refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); u8 ref_frame_idx[3]; for (i = 0; i < 3; i++) { ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); /*ref_frame_sign_bias[LAST_FRAME + i] = */gf_bs_read_int_log_idx(bs, 1, "ref_frame_sign_bias", i); } vp9_frame_size_with_refs(bs, refresh_frame_flags, ref_frame_idx, vp9_cfg->RefFrameWidth, vp9_cfg->RefFrameHeight, FrameWidth, FrameHeight, renderWidth, renderHeight, &Sb64Cols, &Sb64Rows); /*allow_high_precision_mv = */gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); vp9_read_interpolation_filter(bs); } } if (error_resilient_mode == 0) { /*refresh_frame_context = */gf_bs_read_int_log(bs, 1, "refresh_frame_context"); /*frame_parallel_decoding_mode = */gf_bs_read_int_log(bs, 1, "frame_parallel_decoding_mode"); } /*frame_context_idx = */gf_bs_read_int_log(bs, 2, "frame_context_idx"); if (FrameIsIntra || error_resilient_mode) { /*setup_past_independence + save_probs ...*/ //frame_context_idx = 0; } vp9_loop_filter_params(bs); vp9_quantization_params(bs); vp9_segmentation_params(bs); vp9_tile_info(bs, Sb64Cols); /*header_size_in_bytes = */gf_bs_read_int_log(bs, 16, "header_size_in_bytes"); /*Reference frame update process (8.10 - partial)*/ for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { if ((refresh_frame_flags >> i) & 1) { vp9_cfg->RefFrameWidth[i] = *FrameWidth; vp9_cfg->RefFrameHeight[i] = *FrameHeight; } } return GF_OK; } GF_Err gf_av1_parse_obu_header(GF_BitStream *bs, ObuType *obu_type, Bool *obu_extension_flag, Bool *obu_has_size_field, u8 *temporal_id, u8 *spatial_id) { Bool forbidden = gf_bs_read_int(bs, 1); if (forbidden) { return GF_NON_COMPLIANT_BITSTREAM; } *obu_type = gf_bs_read_int(bs, 4); *obu_extension_flag = gf_bs_read_int(bs, 1); *obu_has_size_field = gf_bs_read_int(bs, 1); if (gf_bs_read_int(bs, 1) /*obu_reserved_1bit*/) { return GF_NON_COMPLIANT_BITSTREAM; } if (*obu_extension_flag) { *temporal_id = gf_bs_read_int(bs, 3); *spatial_id = gf_bs_read_int(bs, 2); /*extension_header_reserved_3bits = */gf_bs_read_int(bs, 3); } return GF_OK; } #endif // GPAC_DISABLE_AV_PARSERS GF_EXPORT const char *gf_av1_get_obu_name(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: return "seq_header"; case OBU_TEMPORAL_DELIMITER: return "delimiter"; case OBU_FRAME_HEADER: return "frame_header"; case OBU_TILE_GROUP: return "tile_group"; case OBU_METADATA: return "metadata"; case OBU_FRAME: return "frame"; case OBU_REDUNDANT_FRAME_HEADER: return "redundant_frame_header"; case OBU_TILE_LIST: return "tile_list"; case OBU_PADDING: return "padding"; case OBU_RESERVED_0: case OBU_RESERVED_9: case OBU_RESERVED_10: case OBU_RESERVED_11: case OBU_RESERVED_12: case OBU_RESERVED_13: case OBU_RESERVED_14: return "reserved"; default: return "unknown"; } } Bool av1_is_obu_header(ObuType obu_type) { switch (obu_type) { case OBU_SEQUENCE_HEADER: case OBU_METADATA: // TODO add check based on the metadata type return GF_TRUE; default: return GF_FALSE; } } #ifndef GPAC_DISABLE_AV_PARSERS static Bool av1_is_obu_frame(AV1State *state, ObuType obu_type) { switch (obu_type) { case OBU_PADDING: case OBU_REDUNDANT_FRAME_HEADER: return GF_FALSE; case OBU_TEMPORAL_DELIMITER: return state->keep_temporal_delim ? GF_TRUE : GF_FALSE; default: return GF_TRUE; } } u64 gf_av1_leb128_read(GF_BitStream *bs, u8 *opt_Leb128Bytes) { u64 value = 0; u8 Leb128Bytes = 0, i = 0; for (i = 0; i < 8; i++) { u8 leb128_byte = gf_bs_read_u8(bs); value |= ( ((u64) (leb128_byte & 0x7f)) << (i * 7)); Leb128Bytes += 1; if (!(leb128_byte & 0x80)) { break; } } if (opt_Leb128Bytes) { *opt_Leb128Bytes = Leb128Bytes; } return value; } u32 gf_av1_leb128_size(u64 value) { u32 gf_av1_leb128_size = 0; do { ++gf_av1_leb128_size; } while ((value >>= 7) != 0); return gf_av1_leb128_size; } u64 gf_av1_leb128_write(GF_BitStream *bs, u64 value) { u32 i, leb_size = gf_av1_leb128_size(value); for (i = 0; i < leb_size; ++i) { u8 byte = value & 0x7f; value >>= 7; if (value != 0) byte |= 0x80; //more bytes follow gf_bs_write_u8(bs, byte); } return leb_size; } #define OBU_BLOCK_SIZE 4096 static void av1_add_obu_internal(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, GF_List **obu_list, AV1State *state) { char block[OBU_BLOCK_SIZE]; Bool has_size_field = 0, obu_extension_flag = 0; u8 temporal_id, spatial_id; GF_AV1_OBUArrayEntry *a = NULL; if (state && state->mem_mode) { if (!state->bs) state->bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); else gf_bs_reassign_buffer(state->bs, state->frame_obus, state->frame_obus_alloc); } else { GF_SAFEALLOC(a, GF_AV1_OBUArrayEntry); if (!a) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] Failed to allocate OBU\n")); return; } } gf_bs_seek(bs, pos); gf_av1_parse_obu_header(bs, &obu_type, &obu_extension_flag, &has_size_field, &temporal_id, &spatial_id); gf_bs_seek(bs, pos); if (has_size_field) { if (a) { a->obu = gf_malloc((size_t)obu_length); gf_bs_read_data(bs, a->obu, (u32)obu_length); a->obu_length = obu_length; } else { u32 remain = (u32)obu_length; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } return; } } else { u8 i, hdr_size = obu_extension_flag ? 2 : 1; const u32 leb_size = (u32)gf_av1_leb128_size(obu_length); const u64 obu_size = obu_length - hdr_size; if (a) { a->obu = gf_malloc((size_t)obu_length + leb_size); a->obu_length = obu_length + leb_size; for (i = 0; i < hdr_size; ++i) { a->obu[i] = gf_bs_read_u8(bs); /*add size field flag*/ if (i == 0) a->obu[0] |= 0x02; } { u32 out_size = 0; u8 *output = NULL; GF_BitStream *bsLeb128 = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*write size field*/ gf_av1_leb128_write(bsLeb128, obu_size); assert(gf_bs_get_position(bsLeb128) == leb_size); gf_bs_get_content(bsLeb128, &output, &out_size); gf_bs_del(bsLeb128); memcpy(a->obu + hdr_size, output, out_size); gf_free(output); } gf_bs_read_data(bs, a->obu + hdr_size + leb_size, (u32)(obu_size)); assert(gf_bs_get_position(bs) == pos + obu_length); } else { u32 remain; for (i = 0; i < hdr_size; ++i) { u8 hdr_b = gf_bs_read_u8(bs); if (i == 0) hdr_b |= 0x02; /*add size field flag*/ gf_bs_write_u8(state->bs, hdr_b); } /*add size field */ gf_av1_leb128_write(state->bs, obu_size); remain = (u32)obu_length - hdr_size; while (remain) { u32 block_size = OBU_BLOCK_SIZE; if (block_size > remain) block_size = remain; gf_bs_read_data(bs, block, block_size); gf_bs_write_data(state->bs, block, block_size); remain -= block_size; } assert(gf_bs_get_position(bs) == pos + obu_length); return; } } if (!obu_list) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[AV1] internal error, no OBU list cannot add\n")); gf_free(a->obu); gf_free(a); return; } a->obu_type = obu_type; if (! *obu_list) *obu_list = gf_list_new(); gf_list_add(*obu_list, a); } static void av1_populate_state_from_obu(GF_BitStream *bs, u64 pos, u64 obu_length, ObuType obu_type, AV1State *state) { if (av1_is_obu_header(obu_type)) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.header_obus, NULL); } if (!state->skip_frames && av1_is_obu_frame(state, obu_type)) { if (!state->mem_mode) { av1_add_obu_internal(bs, pos, obu_length, obu_type, &state->frame_state.frame_obus, NULL); } else { av1_add_obu_internal(bs, pos, obu_length, obu_type, NULL, state); } } } GF_Err aom_av1_parse_temporal_unit_from_section5(GF_BitStream *bs, AV1State *state) { if (!state) return GF_BAD_PARAM; state->obu_type = -1; while (state->obu_type != OBU_TEMPORAL_DELIMITER) { GF_Err e; if (!gf_bs_available(bs)) return state->unframed ? GF_BUFFER_TOO_SMALL : GF_OK; u64 pos = gf_bs_get_position(bs), obu_length = 0; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] OBU (Section 5) frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Section5 OBU detected (size "LLU")\n", obu_length)); av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); } return GF_OK; } Bool gf_media_aom_probe_annexb(GF_BitStream *bs) { Bool res = GF_TRUE; u64 init_pos = gf_bs_get_position(bs); u64 sz = gf_av1_leb128_read(bs, NULL); if (!sz) res = GF_FALSE; while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (!frame_unit_size) { res = GF_FALSE; break; } if (sz < Leb128Bytes + frame_unit_size) { res = GF_FALSE; break; } sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { ObuType obu_type; u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (frame_unit_size < Leb128Bytes + obu_length) { res = GF_FALSE; break; } pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; u8 tid, sid; Bool extflag, has_size; GF_Err e = gf_av1_parse_obu_header(bs, &obu_type, &extflag, &has_size, &tid, &sid); if (e) { res = GF_FALSE; break; } if (has_size) { obu_length = (u32)gf_av1_leb128_read(bs, NULL); } else { if (obu_length >= 1 + extflag) { obu_length = obu_length - 1 - extflag; } else { res = GF_FALSE; break; } } u32 hdr_size = (u32)(gf_bs_get_position(bs) - pos); obu_length += hdr_size; if (frame_unit_size < obu_length) { res = GF_FALSE; break; } frame_unit_size -= obu_length; gf_bs_skip_bytes(bs, obu_length - hdr_size); } if (!res) break; } gf_bs_seek(bs, init_pos); return res; } GF_Err aom_av1_parse_temporal_unit_from_annexb(GF_BitStream *bs, AV1State *state) { GF_Err e; u64 tupos; u64 tusize, sz; if (!bs || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; tusize = sz = gf_av1_leb128_read(bs, NULL); tupos = gf_bs_get_position(bs); if (!sz) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] temporal unit size is 0, likely not annex B\n")); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B temporal unit detected (size "LLU") ***** \n", sz)); while (sz > 0) { u8 Leb128Bytes = 0; u64 frame_unit_size = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (sz < Leb128Bytes + frame_unit_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B sz("LLU") < Leb128Bytes("LLU") + frame_unit_size("LLU")\n", sz, Leb128Bytes, frame_unit_size)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B frame unit detected (size "LLU")\n", frame_unit_size)); sz -= Leb128Bytes + frame_unit_size; while (frame_unit_size > 0) { u64 pos, obu_length = gf_av1_leb128_read(bs, &Leb128Bytes); if (state->bs_overread) { return GF_BUFFER_TOO_SMALL; } if (frame_unit_size < Leb128Bytes + obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < Leb128Bytes("LLU") + obu_length("LLU")\n", frame_unit_size, Leb128Bytes, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] Annex B OBU detected (size "LLU")\n", obu_length)); pos = gf_bs_get_position(bs); frame_unit_size -= Leb128Bytes; e = gf_av1_parse_obu(bs, &state->obu_type, &obu_length, NULL, state); if (e) return e; if (obu_length != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B frame size "LLU" different from consumed bytes "LLU".\n", obu_length, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_length, state->obu_type, state); if (frame_unit_size < obu_length) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Annex B frame_unit_size("LLU") < OBU size ("LLU")\n", frame_unit_size, obu_length)); return GF_NON_COMPLIANT_BITSTREAM; } frame_unit_size -= obu_length; } } assert(sz == 0); if (tusize != gf_bs_get_position(bs) - tupos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] Annex B TU size "LLU" different from consumed bytes "LLU".\n", tusize, gf_bs_get_position(bs) - tupos)); return GF_NON_COMPLIANT_BITSTREAM; } return GF_OK; } GF_Err aom_av1_parse_temporal_unit_from_ivf(GF_BitStream *bs, AV1State *state) { u64 frame_size, pts_ignored; GF_Err e; if (gf_bs_available(bs)<12) return GF_EOS; e = gf_media_parse_ivf_frame_header(bs, &frame_size, &pts_ignored); if (e) return e; GF_LOG(GF_LOG_DEBUG, GF_LOG_CONTAINER, ("[AV1] IVF frame detected (size "LLU")\n", frame_size)); if (gf_bs_available(bs) < frame_size) return GF_EOS; while (frame_size > 0) { u64 obu_size = 0, pos = gf_bs_get_position(bs); e = gf_av1_parse_obu(bs, &state->obu_type, &obu_size, NULL, state); if (e != GF_OK) return e; if (obu_size != gf_bs_get_position(bs) - pos) { GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[AV1] IVF frame size "LLU" different from consumed bytes "LLU".\n", obu_size, gf_bs_get_position(bs) - pos)); return GF_NON_COMPLIANT_BITSTREAM; } av1_populate_state_from_obu(bs, pos, obu_size, state->obu_type, state); frame_size -= obu_size; } return GF_OK; } #define AV1_NUM_REF_FRAMES 8 #define AV1_ALL_FRAMES ((1 << AV1_NUM_REF_FRAMES) - 1) #define AV1_SUPERRES_DENOM_MIN 9 #define AV1_SUPERRES_DENOM_BITS 3 #define AV1_SUPERRES_NUM 8 #define AV1_REFS_PER_FRAME 7 #define AV1_PRIMARY_REF_NONE 7 #define MAX_TILE_WIDTH 4096 #define MAX_TILE_AREA (4096 * 2304) static u32 aom_av1_tile_log2(u32 blkSize, u32 target) { u32 k; for (k = 0; (blkSize << k) < target; k++) { } return k; } static u64 aom_av1_le(GF_BitStream *bs, u32 n, const char *name) { u32 i = 0; u64 t = 0; for (i = 0; i < n; i++) { u8 byte = gf_bs_read_int(bs, 8); t += (byte << (i * 8)); } gf_bs_log(bs, n*8, name, t); return t; } static void av1_parse_tile_info(GF_BitStream *bs, AV1State *state) { u32 i; u32 MiCols = 2 * ((state->width + 7) >> 3); u32 MiRows = 2 * ((state->height + 7) >> 3); u32 sbCols = state->use_128x128_superblock ? ((MiCols + 31) >> 5) : ((MiCols + 15) >> 4); u32 sbRows = state->use_128x128_superblock ? ((MiRows + 31) >> 5) : ((MiRows + 15) >> 4); u32 sbShift = state->use_128x128_superblock ? 5 : 4; u32 sbSize = sbShift + 2; u32 maxTileWidthSb = MAX_TILE_WIDTH >> sbSize; u32 maxTileAreaSb = MAX_TILE_AREA >> (2 * sbSize); u32 minLog2tileCols = aom_av1_tile_log2(maxTileWidthSb, sbCols); u32 maxLog2tileCols = aom_av1_tile_log2(1, MIN(sbCols, AV1_MAX_TILE_COLS)); u32 maxLog2tileRows = aom_av1_tile_log2(1, MIN(sbRows, AV1_MAX_TILE_ROWS)); u32 minLog2Tiles = MAX(minLog2tileCols, aom_av1_tile_log2(maxTileAreaSb, sbRows * sbCols)); Bool uniform_tile_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_tile_spacing_flag"); if (uniform_tile_spacing_flag) { u32 startSb, tileWidthSb, tileHeightSb, minLog2tileRows; state->tileColsLog2 = minLog2tileCols; while (state->tileColsLog2 < maxLog2tileCols) { Bool increment_tile_cols_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_cols_log2"); if (increment_tile_cols_log2 == 1) state->tileColsLog2++; else break; } tileWidthSb = (sbCols + (1 << state->tileColsLog2) - 1) >> state->tileColsLog2; i = 0; for (startSb = 0; startSb < sbCols; startSb += tileWidthSb) { i += 1; } state->tileCols = i; minLog2tileRows = MAX((int)(minLog2Tiles - state->tileColsLog2), 0); state->tileRowsLog2 = minLog2tileRows; while (state->tileRowsLog2 < maxLog2tileRows) { Bool increment_tile_rows_log2 = gf_bs_read_int_log(bs, 1, "increment_tile_rows_log2"); if (increment_tile_rows_log2 == 1) state->tileRowsLog2++; else break; } tileHeightSb = (sbRows + (1 << state->tileRowsLog2) - 1) >> state->tileRowsLog2; i = 0; for (startSb = 0; startSb < sbRows; startSb += tileHeightSb) { i += 1; } state->tileRows = i; } else { u32 startSb, maxTileHeightSb, widestTileSb; widestTileSb = 0; startSb = 0; for (i = 0; startSb < sbCols; i++) { u32 maxWidth = MIN((int)(sbCols - startSb), maxTileWidthSb); u32 width_in_sbs_minus_1 = av1_read_ns(bs, maxWidth, "width_in_sbs_minus_1"); u32 sizeSb = width_in_sbs_minus_1 + 1; widestTileSb = MAX(sizeSb, widestTileSb); startSb += sizeSb; } if (!widestTileSb) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] widest tile is 0, broken bitstream\n")); return; } state->tileCols = i; state->tileColsLog2 = aom_av1_tile_log2(1, state->tileCols); if (minLog2Tiles > 0) maxTileAreaSb = (sbRows * sbCols) >> (minLog2Tiles + 1); else maxTileAreaSb = sbRows * sbCols; maxTileHeightSb = MAX(maxTileAreaSb / widestTileSb, 1); startSb = 0; for (i = 0; startSb < sbRows; i++) { u32 maxHeight = MIN((int)(sbRows - startSb), maxTileHeightSb); u32 height_in_sbs_minus_1 = av1_read_ns(bs, maxHeight, "height_in_sbs_minus_1"); u32 sizeSb = height_in_sbs_minus_1 + 1; startSb += sizeSb; } state->tileRows = i; state->tileRowsLog2 = aom_av1_tile_log2(1, state->tileRows); } if (state->tileColsLog2 > 0 || state->tileRowsLog2 > 0) { gf_bs_read_int_log(bs, state->tileRowsLog2 + state->tileColsLog2, "context_update_tile_id"); state->tile_size_bytes = gf_bs_read_int_log(bs, 2, "tile_size_bytes_minus1") + 1; } } static void superres_params(GF_BitStream *bs, AV1State *state) { u32 SuperresDenom; Bool use_superres; if (state->enable_superres) { use_superres = gf_bs_read_int_log(bs, 1, "use_superres"); } else { use_superres = GF_FALSE; } if (use_superres) { u8 coded_denom = gf_bs_read_int_log(bs, AV1_SUPERRES_DENOM_BITS, "coded_denom"); SuperresDenom = coded_denom + AV1_SUPERRES_DENOM_MIN; } else { SuperresDenom = AV1_SUPERRES_NUM; } state->UpscaledWidth = state->width; state->width = (state->UpscaledWidth * AV1_SUPERRES_NUM + (SuperresDenom / 2)) / SuperresDenom; } static void av1_frame_size(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { if (frame_size_override_flag) { u32 frame_width_minus_1, frame_height_minus_1; u8 n = state->frame_width_bits_minus_1 + 1; frame_width_minus_1 = gf_bs_read_int_log(bs, n, "frame_width_minus_1"); n = state->frame_height_bits_minus_1 + 1; frame_height_minus_1 = gf_bs_read_int_log(bs, n, "frame_height_minus_1"); state->width = frame_width_minus_1 + 1; state->height = frame_height_minus_1 + 1; } else { state->width = state->sequence_width; state->height = state->sequence_height; } superres_params(bs, state); //compute_image_size(); //no bits } static void av1_render_size(GF_BitStream *bs) { Bool render_and_frame_size_different = gf_bs_read_int_log(bs, 1, "render_and_frame_size_different_flag"); if (render_and_frame_size_different == GF_TRUE) { gf_bs_read_int_log(bs, 16, "render_width_minus_1"); gf_bs_read_int_log(bs, 16, "render_height_minus_1"); //RenderWidth = render_width_minus_1 + 1; //RenderHeight = render_height_minus_1 + 1; } else { //RenderWidth = UpscaledWidth; //RenderHeight = FrameHeight; } } static void read_interpolation_filter(GF_BitStream *bs) { Bool is_filter_switchable = gf_bs_read_int_log(bs, 1, "is_filter_switchable"); if (!is_filter_switchable) { /*interpolation_filter =*/ gf_bs_read_int_log(bs, 2, "interpolation_filter"); } } static void frame_size_with_refs(GF_BitStream *bs, AV1State *state, Bool frame_size_override_flag) { Bool found_ref = GF_FALSE; u32 i = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { found_ref = gf_bs_read_int_log_idx(bs, 1, "found_ref", i); if (found_ref == 1) { #if 0 UpscaledWidth = RefUpscaledWidth[ref_frame_idx[i]]; FrameWidth = UpscaledWidth; FrameHeight = RefFrameHeight[ref_frame_idx[i]]; RenderWidth = RefRenderWidth[ref_frame_idx[i]]; RenderHeight = RefRenderHeight[ref_frame_idx[i]]; #endif break; } } if (found_ref == 0) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } else { superres_params(bs, state); //compute_image_size(); } } static s32 av1_delta_q(GF_BitStream *bs, const char *name_flag, const char *name) { Bool delta_coded = gf_bs_read_int_log(bs, 1, name_flag); s32 delta_q = 0; if (delta_coded) { u32 signMask = 1 << (7 - 1); delta_q = gf_bs_read_int_log(bs, 7, name); if (delta_q & signMask) delta_q = delta_q - 2 * signMask; } return delta_q; } static u8 Segmentation_Feature_Bits[] = { 8,6,6,6,6,3,0,0 }; static u8 Segmentation_Feature_Signed[] = { 1, 1, 1, 1, 1, 0, 0, 0 }; static u8 av1_get_qindex(Bool ignoreDeltaQ, u32 segmentId, u32 base_q_idx, u32 delta_q_present, u32 CurrentQIndex, Bool segmentation_enabled, u8 *features_SEG_LVL_ALT_Q_enabled, s32 *features_SEG_LVL_ALT_Q) { //If seg_feature_active_idx( segmentId, SEG_LVL_ALT_Q ) is equal to 1 the following ordered steps apply: if (segmentation_enabled && features_SEG_LVL_ALT_Q_enabled[segmentId]) { //Set the variable data equal to FeatureData[ segmentId ][ SEG_LVL_ALT_Q ]. s32 data = features_SEG_LVL_ALT_Q[segmentId]; s32 qindex = base_q_idx + data; //If ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, set qindex equal to CurrentQIndex + data. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) qindex = CurrentQIndex + data; //Return Clip3( 0, 255, qindex ). if (qindex < 0) return 0; else if (qindex > 255) return 255; else return (u8)qindex; } //Otherwise, if ignoreDeltaQ is equal to 0 and delta_q_present is equal to 1, return CurrentQIndex. if ((ignoreDeltaQ == 0) && (delta_q_present == 1)) return CurrentQIndex; //otherwise return base_q_idx; } enum { AV1_RESTORE_NONE = 0, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ }; #define AV1_GMC_IDENTITY 0 #define AV1_GMC_TRANSLATION 1 #define AV1_GMC_ROTZOOM 2 #define AV1_GMC_AFFINE 3 #define AV1_LAST_FRAME 1 #define AV1_LAST2_FRAME 2 #define AV1_LAST3_FRAME 3 #define AV1_GOLDEN_FRAME 4 #define AV1_BWDREF_FRAME 5 #define AV1_ALTREF2_FRAME 6 #define AV1_ALTREF_FRAME 7 #define GM_ABS_ALPHA_BITS 12 #define GM_ALPHA_PREC_BITS 15 #define GM_ABS_TRANS_ONLY_BITS 9 #define GM_TRANS_ONLY_PREC_BITS 3 #define GM_ABS_TRANS_BITS 12 #define GM_TRANS_PREC_BITS 6 #define WARPEDMODEL_PREC_BITS 16 static u32 av1_decode_subexp(GF_BitStream *bs, s32 numSyms) { s32 i = 0; s32 mk = 0; s32 k = 3; while (1) { s32 b2 = i ? k + i - 1 : k; s32 a = 1 << b2; if (numSyms <= mk + 3 * a) { s32 subexp_final_bits = av1_read_ns(bs, numSyms - mk, NULL); return subexp_final_bits + mk; } else { s32 subexp_more_bits = gf_bs_read_int(bs, 1); if (subexp_more_bits) { i++; mk += a; } else { s32 subexp_bits = gf_bs_read_int(bs, b2); return subexp_bits + mk; } } } } static GFINLINE s32 inverse_recenter(s32 r, u32 v) { if ((s64)v > (s64)(2 * r)) return v; else if (v & 1) return r - ((v + 1) >> 1); else return r + (v >> 1); } static s32 av1_decode_unsigned_subexp_with_ref(GF_BitStream *bs, s32 mx, s32 r) { u32 v = av1_decode_subexp(bs, mx); if ((r < 0) && (-(-r << 1) <= mx)) { return inverse_recenter(r, v); } else if ((r << 1) <= mx) { return inverse_recenter(r, v); } else { return mx - 1 - inverse_recenter(mx - 1 - r, v); } } static s16 av1_decode_signed_subexp_with_ref(GF_BitStream *bs, s32 low, s32 high, s32 r) { s16 x = av1_decode_unsigned_subexp_with_ref(bs, high - low, r - low); return x + low; } static void av1_read_global_param(AV1State *state, GF_BitStream *bs, u8 type, u8 ref, u8 idx) { u8 absBits = GM_ABS_ALPHA_BITS; u8 precBits = GM_ALPHA_PREC_BITS; if (idx < 2) { if (type == AV1_GMC_TRANSLATION) { absBits = GM_ABS_TRANS_ONLY_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); precBits = GM_TRANS_ONLY_PREC_BITS - (!state->frame_state.allow_high_precision_mv ? 1 : 0); } else { absBits = GM_ABS_TRANS_BITS; precBits = GM_TRANS_PREC_BITS; } } s32 precDiff = WARPEDMODEL_PREC_BITS - precBits; s32 round = (idx % 3) == 2 ? (1 << WARPEDMODEL_PREC_BITS) : 0; s32 sub = (idx % 3) == 2 ? (1 << precBits) : 0; s32 mx = (1 << absBits); s32 r = (state->PrevGmParams.coefs[ref][idx] >> precDiff) - sub; s32 val = av1_decode_signed_subexp_with_ref(bs, -mx, mx + 1, r); if (val < 0) { val = -val; state->GmParams.coefs[ref][idx] = (-(val << precDiff) + round); } else { state->GmParams.coefs[ref][idx] = (val << precDiff) + round; } } static s32 av1_get_relative_dist(s32 a, s32 b, AV1State *state) { if (!state->enable_order_hint) return 0; s32 diff = a - b; s32 m = 1 << (state->OrderHintBits - 1); diff = (diff & (m - 1)) - (diff & m); return diff; } static void av1_setup_past_independence(AV1State *state) { u32 ref, i; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { for (i = 0; i <= 5; i++) { state->PrevGmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } } static void av1_load_previous(AV1State *state, u8 primary_ref_frame, s8 *ref_frame_idx) { s8 prevFrame = ref_frame_idx[primary_ref_frame]; if (prevFrame < 0) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] load_previous: prevFrame reference index %d is invalid\n", prevFrame)); } else { state->PrevGmParams = state->SavedGmParams[prevFrame]; // load_loop_filter_params( prevFrame ) // load_segmentation_params( prevFrame ) } } static void av1_decode_frame_wrapup(AV1State *state) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { if ((state->frame_state.refresh_frame_flags >> i) & 1) { state->RefOrderHint[i] = state->frame_state.order_hint; state->SavedGmParams[i] = state->GmParams; state->RefFrameType[i] = state->frame_state.frame_type; } } state->frame_state.seen_frame_header = GF_FALSE; //Otherwise (show_existing_frame is equal to 1), if frame_type is equal to KEY_FRAME, the reference frame loading process as specified in section 7.21 is invoked if ((state->frame_state.show_existing_frame) && (state->frame_state.frame_type == AV1_KEY_FRAME)) { state->frame_state.order_hint = state->RefOrderHint[state->frame_state.frame_to_show_map_idx]; //OrderHints[ j + LAST_FRAME ] is set equal to SavedOrderHints[state->frame_to_show_map_idx ][ j + LAST_FRAME ] for j = 0..REFS_PER_FRAME-1. //gm_params[ ref ][ j ] is set equal to SavedGmParams[ frame_to_show_map_idx ][ ref ][ j ] for ref = LAST_FRAME..ALTREF_FRAME, for j = 0..5. state->GmParams = state->SavedGmParams[state->frame_state.frame_to_show_map_idx]; } } static s32 find_latest_forward(u32 curFrameHint, u8 *shiftedOrderHints, u8 *usedFrame) { u32 i; s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint < curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } return ref; } //see 7.8 of AV1 spec static void av1_set_frame_refs(AV1State *state, u8 last_frame_idx, u8 gold_frame_idx, s8 *ref_frame_idx) { u32 i; u8 usedFrame[AV1_NUM_REF_FRAMES]; u8 shiftedOrderHints[AV1_NUM_REF_FRAMES]; for (i = 0; i < AV1_REFS_PER_FRAME; i++) ref_frame_idx[i] = -1; ref_frame_idx[AV1_LAST_FRAME - AV1_LAST_FRAME] = last_frame_idx; ref_frame_idx[AV1_GOLDEN_FRAME - AV1_LAST_FRAME] = gold_frame_idx; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { usedFrame[i] = 0; } usedFrame[last_frame_idx] = 1; usedFrame[gold_frame_idx] = 1; u32 curFrameHint = 1 << (state->OrderHintBits - 1); for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { shiftedOrderHints[i] = curFrameHint + av1_get_relative_dist(state->RefOrderHint[i], state->frame_state.order_hint, state); } u8 lastOrderHint = shiftedOrderHints[last_frame_idx]; u8 goldOrderHint = shiftedOrderHints[gold_frame_idx]; //It is a requirement of bitstream conformance that lastOrderHint is strictly less than curFrameHint. if (lastOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: lastOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //It is a requirement of bitstream conformance that goldOrderHint is strictly less than curFrameHint. if (goldOrderHint >= curFrameHint) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] non conformant bitstream detected while setting up frame refs: goldOrderHint(%d) shall be stricly less than curFrameHint(%d)\n", lastOrderHint, curFrameHint)); } //find_latest_backward() { s32 ref = -1; s32 latestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint >= latestOrderHint)) { ref = i; latestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for BWDREF_FRAME ref = -1; s32 earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_BWDREF_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //find_earliest_backward() for ALTREF2_FRAME ref = -1; earliestOrderHint = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (!usedFrame[i] && ((u32)hint >= curFrameHint) && (ref < 0 || hint < earliestOrderHint)) { ref = i; earliestOrderHint = hint; } } if (ref >= 0) { ref_frame_idx[AV1_ALTREF2_FRAME - AV1_LAST_FRAME] = ref; usedFrame[ref] = 1; } //The remaining references are set to be forward references in anti-chronological order as follows: const u8 Ref_Frame_List[AV1_REFS_PER_FRAME - 2] = { AV1_LAST2_FRAME, AV1_LAST3_FRAME, AV1_BWDREF_FRAME, AV1_ALTREF2_FRAME, AV1_ALTREF_FRAME }; for (i = 0; i < AV1_REFS_PER_FRAME - 2; i++) { u8 refFrame = Ref_Frame_List[i]; if (ref_frame_idx[refFrame - AV1_LAST_FRAME] < 0) { s32 last_ref = find_latest_forward(curFrameHint, shiftedOrderHints, usedFrame); if (last_ref >= 0) { ref_frame_idx[refFrame - AV1_LAST_FRAME] = last_ref; usedFrame[last_ref] = 1; } } } //Finally, any remaining references are set to the reference frame with smallest output order as follows: ref = -1; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { s32 hint = shiftedOrderHints[i]; if (ref < 0 || hint < earliestOrderHint) { ref = i; earliestOrderHint = hint; } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (ref_frame_idx[i] < 0) { ref_frame_idx[i] = ref; } } } static void av1_parse_uncompressed_header(GF_BitStream *bs, AV1State *state) { Bool error_resilient_mode = GF_FALSE, allow_screen_content_tools = GF_FALSE, force_integer_mv = GF_FALSE; Bool /*use_ref_frame_mvs = GF_FALSE,*/ FrameIsIntra = GF_FALSE, frame_size_override_flag = GF_FALSE; Bool disable_cdf_update = GF_FALSE; u8 showable_frame; u8 primary_ref_frame; u16 idLen = 0; u32 idx; s8 ref_frame_idx[AV1_REFS_PER_FRAME]; AV1StateFrame *frame_state = &state->frame_state; if (state->frame_id_numbers_present_flag) { idLen = (state->additional_frame_id_length_minus_1 + state->delta_frame_id_length_minus_2 + 3); } frame_state->refresh_frame_flags = 0; showable_frame = 0; if (state->reduced_still_picture_header) { frame_state->key_frame = GF_TRUE; FrameIsIntra = GF_TRUE; frame_state->frame_type = AV1_KEY_FRAME; frame_state->show_frame = GF_TRUE; frame_state->show_existing_frame = 0; } else { frame_state->show_existing_frame = gf_bs_read_int_log(bs, 1, "show_existing_frame"); if (frame_state->show_existing_frame == GF_TRUE) { frame_state->frame_to_show_map_idx = gf_bs_read_int_log(bs, 3, "frame_to_show_map_idx"); frame_state->frame_type = state->RefFrameType[frame_state->frame_to_show_map_idx]; if (state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } frame_state->refresh_frame_flags = 0; if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "display_frame_id"); } if (frame_state->frame_type == AV1_KEY_FRAME) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } /* if (film_grain_params_present) { load_grain_params(frame_to_show_map_idx) }*/ return; } frame_state->frame_type = gf_bs_read_int_log(bs, 2, "frame_type"); FrameIsIntra = (frame_state->frame_type == AV1_INTRA_ONLY_FRAME || frame_state->frame_type == AV1_KEY_FRAME); frame_state->show_frame = gf_bs_read_int_log(bs, 1, "show_frame"); if (frame_state->is_first_frame) { frame_state->key_frame = frame_state->seen_seq_header && frame_state->show_frame && frame_state->frame_type == AV1_KEY_FRAME && frame_state->seen_frame_header; } if (frame_state->show_frame && state->decoder_model_info_present_flag && !state->equal_picture_interval) { gf_bs_read_int_log(bs, state->frame_presentation_time_length, "frame_presentation_time"); } if (frame_state->show_frame) { showable_frame = frame_state->frame_type != AV1_KEY_FRAME; } else { showable_frame = gf_bs_read_int_log(bs, 1, "showable_frame"); } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) error_resilient_mode = GF_TRUE; else error_resilient_mode = gf_bs_read_int_log(bs, 1, "error_resilient_mode"); } if ((frame_state->frame_type == AV1_KEY_FRAME) && frame_state->show_frame) { u32 i; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { state->RefValid[i] = 0; state->RefOrderHint[i] = 0; } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { state->OrderHints[AV1_LAST_FRAME + i] = 0; } } disable_cdf_update = gf_bs_read_int_log(bs, 1, "disable_cdf_update"); if (state->seq_force_screen_content_tools == 2/*SELECT_SCREEN_CONTENT_TOOLS*/) { allow_screen_content_tools = gf_bs_read_int_log(bs, 1, "allow_screen_content_tools"); } else { allow_screen_content_tools = state->seq_force_screen_content_tools; } if (allow_screen_content_tools) { if (state->seq_force_integer_mv == 2/*SELECT_INTEGER_MV*/) { force_integer_mv = gf_bs_read_int_log(bs, 1, "force_integer_mv"); } else { force_integer_mv = state->seq_force_integer_mv; } } else { force_integer_mv = 0; } if (FrameIsIntra) { force_integer_mv = 1; } if (state->frame_id_numbers_present_flag) { gf_bs_read_int_log(bs, idLen, "current_frame_id"); } if (frame_state->frame_type == AV1_SWITCH_FRAME) frame_size_override_flag = GF_TRUE; else if (state->reduced_still_picture_header) frame_size_override_flag = GF_FALSE; else frame_size_override_flag = gf_bs_read_int_log(bs, 1, "frame_size_override_flag"); frame_state->order_hint = gf_bs_read_int(bs, state->OrderHintBits); if (FrameIsIntra || error_resilient_mode) { primary_ref_frame = AV1_PRIMARY_REF_NONE; } else { primary_ref_frame = gf_bs_read_int_log(bs, 3, "primary_ref_frame"); } if (state->decoder_model_info_present_flag) { u8 buffer_removal_time_present_flag = gf_bs_read_int_log(bs, 1, "buffer_removal_time_present_flag"); if (buffer_removal_time_present_flag) { u32 opNum; for (opNum = 0; opNum < state->operating_points_count; opNum++) { if (state->decoder_model_present_for_this_op[opNum]) { u8 opPtIdc = state->operating_point_idc[opNum]; u8 inTemporalLayer = (opPtIdc >> state->temporal_id) & 1; u8 inSpatialLayer = (opPtIdc >> (state->spatial_id + 8)) & 1; if (opPtIdc == 0 || (inTemporalLayer && inSpatialLayer)) { gf_bs_read_int_log_idx(bs, state->buffer_removal_time_length, "buffer_removal_time", opNum); } } } } } if (frame_state->frame_type == AV1_SWITCH_FRAME || (frame_state->frame_type == AV1_KEY_FRAME && frame_state->show_frame)) { frame_state->refresh_frame_flags = AV1_ALL_FRAMES; } else { frame_state->refresh_frame_flags = gf_bs_read_int_log(bs, 8, "refresh_frame_flags"); } if (!FrameIsIntra || frame_state->refresh_frame_flags != AV1_ALL_FRAMES) { if (error_resilient_mode && state->enable_order_hint) { u32 i = 0; for (i = 0; i < AV1_NUM_REF_FRAMES; i++) { u8 ref_order_hint = gf_bs_read_int_log_idx(bs, state->OrderHintBits, "ref_order_hint", i); if (ref_order_hint != state->RefOrderHint[i]) { state->RefValid[i] = 0; } state->RefOrderHint[i] = ref_order_hint; } } } u8 allow_intrabc = 0; if (frame_state->frame_type == AV1_KEY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { if (frame_state->frame_type == AV1_INTRA_ONLY_FRAME) { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); if (allow_screen_content_tools && state->UpscaledWidth == state->width) { allow_intrabc = gf_bs_read_int_log(bs, 1, "allow_intrabc"); } } else { u32 i = 0; Bool frame_refs_short_signaling = GF_FALSE; if (state->enable_order_hint) { frame_refs_short_signaling = gf_bs_read_int_log(bs, 1, "frame_refs_short_signaling"); if (frame_refs_short_signaling) { u8 last_frame_idx = gf_bs_read_int_log(bs, 3, "last_frame_idx"); u8 gold_frame_idx = gf_bs_read_int_log(bs, 3, "gold_frame_idx"); av1_set_frame_refs(state, last_frame_idx, gold_frame_idx, ref_frame_idx); } } for (i = 0; i < AV1_REFS_PER_FRAME; i++) { if (!frame_refs_short_signaling) ref_frame_idx[i] = gf_bs_read_int_log_idx(bs, 3, "ref_frame_idx", i); if (state->frame_id_numbers_present_flag) { u32 n = state->delta_frame_id_length_minus_2 + 2; /*delta_frame_id_minus_1 =*/ gf_bs_read_int_log_idx(bs, n, "delta_frame_id_minus1", i); //DeltaFrameId = delta_frame_id_minus_1 + 1; //expectedFrameId[i] = ((current_frame_id + (1 << idLen) - DeltaFrameId) % (1 << idLen)); } } if (frame_size_override_flag && !error_resilient_mode) { frame_size_with_refs(bs, state, frame_size_override_flag); } else { av1_frame_size(bs, state, frame_size_override_flag); av1_render_size(bs); } frame_state->allow_high_precision_mv = 0; if (!force_integer_mv) { frame_state->allow_high_precision_mv = gf_bs_read_int_log(bs, 1, "allow_high_precision_mv"); } read_interpolation_filter(bs); gf_bs_read_int_log(bs, 1, "is_motion_mode_switchable"); if (!(error_resilient_mode || !state->enable_ref_frame_mvs)) { gf_bs_read_int_log(bs, 1, "use_ref_frame_mvs"); } } } if (!FrameIsIntra) { u32 i; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refFrame = AV1_LAST_FRAME + i; u8 ridx = ref_frame_idx[i]; if (ridx >= 0) { u8 hint = state->RefOrderHint[ridx]; state->OrderHints[refFrame] = hint; /* if ( !enable_order_hint ) { RefFrameSignBias[ refFrame ] = 0; } else { RefFrameSignBias[ refFrame ] = get_relative_dist( hint, OrderHint) > 0; } */ } } } if (!(state->reduced_still_picture_header || disable_cdf_update)) gf_bs_read_int_log(bs, 1, "disable_frame_end_update_cdf"); if (primary_ref_frame == AV1_PRIMARY_REF_NONE) { //init_non_coeff_cdfs(); av1_setup_past_independence(state); } else { //load_cdfs(ref_frame_idx[primary_ref_frame]); av1_load_previous(state, primary_ref_frame, ref_frame_idx); } av1_parse_tile_info(bs, state); //quantization_params( ): u8 base_q_idx = gf_bs_read_int(bs, 8); s32 DeltaQUDc = 0; s32 DeltaQUAc = 0; s32 DeltaQVDc = 0; s32 DeltaQVAc = 0; s32 DeltaQYDc = av1_delta_q(bs, "DeltaQYDc_coded", "DeltaQYDc"); if (!state->config->monochrome) { u8 diff_uv_delta = 0; if (state->separate_uv_delta_q) diff_uv_delta = gf_bs_read_int(bs, 1); DeltaQUDc = av1_delta_q(bs, "DeltaQUDc_coded", "DeltaQUDc"); DeltaQUAc = av1_delta_q(bs, "DeltaQUAc_coded", "DeltaQUAc"); if (diff_uv_delta) { DeltaQVDc = av1_delta_q(bs, "DeltaQVDc_coded", "DeltaQVDc"); DeltaQVAc = av1_delta_q(bs, "DeltaQVAc_coded", "DeltaQVAc"); } } if (gf_bs_read_int_log(bs, 1, "using_qmatrix")) { gf_bs_read_int_log(bs, 4, "qm_y"); gf_bs_read_int_log(bs, 4, "qm_u"); if (!state->separate_uv_delta_q) { gf_bs_read_int_log(bs, 4, "qm_v"); } } u8 seg_features_SEG_LVL_ALT_Q_enabled[8] = { 0,0,0,0,0,0,0,0 }; s32 seg_features_SEG_LVL_ALT_Q[8] = { 0,0,0,0,0,0,0,0 }; //segmentation_params( ): u8 segmentation_enabled = gf_bs_read_int_log(bs, 1, "segmentation_enabled"); if (segmentation_enabled) { /*u8 segmentation_temporal_update = 0;*/ u8 segmentation_update_data = 1; if (primary_ref_frame != AV1_PRIMARY_REF_NONE) { u8 segmentation_update_map = gf_bs_read_int_log(bs, 1, "segmentation_update_map"); if (segmentation_update_map == 1) gf_bs_read_int_log(bs, 1, "segmentation_temporal_update"); segmentation_update_data = gf_bs_read_int_log(bs, 1, "segmentation_update_data"); } if (segmentation_update_data == 1) { u32 i, j; for (i = 0; i < 8/*=MAX_SEGMENTS*/; i++) { for (j = 0; j < 8 /*=SEG_LVL_MAX*/; j++) { if (/*feature_enabled = */gf_bs_read_int_log_idx2(bs, 1, "feature_enabled", i, j) == 1) { s32 val; u32 bitsToRead = Segmentation_Feature_Bits[j]; //this is SEG_LVL_ALT_Q if (!j) seg_features_SEG_LVL_ALT_Q_enabled[i] = 1; if (Segmentation_Feature_Signed[j] == 1) { val = gf_bs_read_int_log_idx2(bs, 1 + bitsToRead, "signed_feature_value", i, j); } else { val = gf_bs_read_int_log_idx2(bs, bitsToRead, "feature_value", i, j); } if (!j) seg_features_SEG_LVL_ALT_Q[i] = val; } } } //ignore all init steps } } //delta_q_params(): /*u8 delta_q_res = 0;*/ u8 delta_q_present = 0; if (base_q_idx > 0) { delta_q_present = gf_bs_read_int_log(bs, 1, "delta_q_present"); } if (delta_q_present) { gf_bs_read_int_log(bs, 2, "delta_q_res"); } //delta_lf_params(): u8 delta_lf_present = 0; /*u8 delta_lf_res = 0; u8 delta_lf_multi = 0;*/ if (delta_q_present) { if (!allow_intrabc) { delta_lf_present = gf_bs_read_int_log(bs, 1, "delta_lf_present"); } if (delta_lf_present) { gf_bs_read_int_log(bs, 2, "delta_lf_res"); gf_bs_read_int_log(bs, 1, "delta_lf_multi"); } } //init lossless stuff! u8 CodedLossless = 1; for (idx = 0; idx < 8; idx++) { u8 qindex = av1_get_qindex(GF_TRUE, idx, base_q_idx, delta_q_present, 0/*CurrentQIndex always ignored at this level of parsin*/, segmentation_enabled, seg_features_SEG_LVL_ALT_Q_enabled, seg_features_SEG_LVL_ALT_Q); Bool LosslessArray = (qindex == 0) && (DeltaQYDc == 0) && (DeltaQUAc == 0) && (DeltaQUDc == 0) && (DeltaQVAc == 0) && (DeltaQVDc == 0); if (!LosslessArray) CodedLossless = 0; } Bool AllLossless = CodedLossless && (state->width == state->UpscaledWidth); //loop_filter_params(): if (!CodedLossless && !allow_intrabc) { u8 loop_filter_level_0 = gf_bs_read_int_log(bs, 6, "loop_filter_level_0"); u8 loop_filter_level_1 = gf_bs_read_int_log(bs, 6, "loop_filter_level_1"); if (!state->config->monochrome) { if (loop_filter_level_0 || loop_filter_level_1) { gf_bs_read_int_log(bs, 6, "loop_filter_level_2"); gf_bs_read_int_log(bs, 6, "loop_filter_level_3"); } } gf_bs_read_int_log(bs, 3, "loop_filter_sharpness"); u8 loop_filter_delta_enabled = gf_bs_read_int_log(bs, 1, "loop_filter_delta_enabled"); if (loop_filter_delta_enabled == 1) { u8 loop_filter_delta_update = gf_bs_read_int_log(bs, 1, "loop_filter_delta_update"); if (loop_filter_delta_update) { u32 i; for (i = 0; i < 8/*TOTAL_REFS_PER_FRAME*/; i++) { u8 update_ref_delta = gf_bs_read_int_log_idx(bs, 1, "update_ref_delta", i); if (update_ref_delta == 1) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_ref_deltas", i); } } for (i = 0; i < 2; i++) { u8 update_mode_delta = gf_bs_read_int_log_idx(bs, 1, "update_mode_delta", i); if (update_mode_delta) { gf_bs_read_int_log_idx(bs, 1 + 6, "loop_filter_mode_deltas", i); } } } } } //cdef_params( ): if (!CodedLossless && !allow_intrabc && state->enable_cdef) { gf_bs_read_int_log(bs, 2, "cdef_damping_minus_3"); u8 cdef_bits = gf_bs_read_int_log(bs, 2, "cdef_bits"); u32 i, num_cd = 1 << cdef_bits; for (i = 0; i < num_cd; i++) { gf_bs_read_int_log_idx(bs, 4, "cdef_y_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_y_sec_strength", i); if (!state->config->monochrome) { gf_bs_read_int_log_idx(bs, 4, "cdef_uv_pri_strength", i); gf_bs_read_int_log_idx(bs, 2, "cdef_uv_sec_strength", i); } } } //lr_params( ) : if (!AllLossless && !allow_intrabc && state->enable_restoration) { u32 i, nb_planes = state->config->monochrome ? 1 : 3; u8 UsesLr = 0; u8 usesChromaLr = 0; for (i = 0; i < nb_planes; i++) { u8 lr_type = gf_bs_read_int_log_idx(bs, 2, "lr_type", i); //FrameRestorationType[i] = Remap_Lr_Type[lr_type] if (lr_type != AV1_RESTORE_NONE) { UsesLr = 1; if (i > 0) { usesChromaLr = 1; } } } if (UsesLr) { if (state->use_128x128_superblock) { gf_bs_read_int_log(bs, 1, "lr_unit_shift_minus_1"); } else { u8 lr_unit_shift = gf_bs_read_int_log(bs, 1, "lr_unit_shift"); if (lr_unit_shift) { gf_bs_read_int_log(bs, 1, "lr_unit_extra_shift"); //lr_unit_shift += lr_unit_extra_shift; } } if (state->config->chroma_subsampling_x && state->config->chroma_subsampling_y && usesChromaLr) { gf_bs_read_int_log(bs, 1, "lr_uv_shift"); } } } //read_tx_mode(): if (CodedLossless == 1) { } else { gf_bs_read_int_log(bs, 1, "tx_mode_select"); } //frame_reference_mode( ): u8 reference_select = 0; if (FrameIsIntra) { } else { reference_select = gf_bs_read_int_log(bs, 1, "reference_select"); } //skip_mode_params( ): u8 skipModeAllowed = 0; if (FrameIsIntra || !reference_select || !state->enable_order_hint) { } else { u32 i; s32 forwardIdx = -1; s32 backwardIdx = -1; s32 forwardHint = 0; s32 backwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, frame_state->order_hint, state) < 0) { if (forwardIdx < 0 || av1_get_relative_dist(refHint, forwardHint, state) > 0) { forwardIdx = i; forwardHint = refHint; } } else if (av1_get_relative_dist(refHint, frame_state->order_hint, state) > 0) { if (backwardIdx < 0 || av1_get_relative_dist(refHint, backwardHint, state) < 0) { backwardIdx = i; backwardHint = refHint; } } } if (forwardIdx < 0) { skipModeAllowed = 0; } else if (backwardIdx >= 0) { skipModeAllowed = 1; //SkipModeFrame[0] = AV1_LAST_FRAME + MIN(forwardIdx, backwardIdx); //SkipModeFrame[1] = AV1_LAST_FRAME + MAX(forwardIdx, backwardIdx); } else { s32 secondForwardIdx = -1; s32 secondForwardHint = 0; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { u8 refHint = state->RefOrderHint[ref_frame_idx[i]]; if (av1_get_relative_dist(refHint, forwardHint, state) < 0) { if (secondForwardIdx < 0 || av1_get_relative_dist(refHint, secondForwardHint, state) > 0) { secondForwardIdx = i; secondForwardHint = refHint; } } } if (secondForwardIdx < 0) { skipModeAllowed = 0; } else { skipModeAllowed = 1; //SkipModeFrame[ 0 ] = LAST_FRAME + Min(forwardIdx, secondForwardIdx) //SkipModeFrame[ 1 ] = LAST_FRAME + Max(forwardIdx, secondForwardIdx) } } } if (skipModeAllowed) { gf_bs_read_int_log(bs, 1, "skip_mode_present"); } if (FrameIsIntra || error_resilient_mode || !state->enable_warped_motion) { } else { gf_bs_read_int_log(bs, 1, "allow_warped_motion"); } gf_bs_read_int_log(bs, 1, "reduced_tx"); //global_motion_params( ) u32 ref; for (ref = AV1_LAST_FRAME; ref <= AV1_ALTREF_FRAME; ref++) { u32 i; for (i = 0; i < 6; i++) { state->GmParams.coefs[ref][i] = ((i % 3 == 2) ? 1 << WARPEDMODEL_PREC_BITS : 0); } } if (!FrameIsIntra) { u32 refs; for (refs = AV1_LAST_FRAME; refs <= AV1_ALTREF_FRAME; refs++) { u8 type = AV1_GMC_IDENTITY; Bool is_global = gf_bs_read_int_log_idx(bs, 1, "is_global", refs); if (is_global) { Bool is_rot_zoom = gf_bs_read_int_log_idx(bs, 1, "is_rot_zoom", refs); if (is_rot_zoom) { type = AV1_GMC_ROTZOOM; } else { Bool is_trans = gf_bs_read_int_log_idx(bs, 1, "is_translation", refs); type = is_trans ? AV1_GMC_TRANSLATION : AV1_GMC_AFFINE; } } if (type >= AV1_GMC_ROTZOOM) { av1_read_global_param(state, bs, type, refs, 2); av1_read_global_param(state, bs, type, refs, 3); if (type == AV1_GMC_AFFINE) { av1_read_global_param(state, bs, type, refs, 4); av1_read_global_param(state, bs, type, refs, 5); } else { state->GmParams.coefs[refs][4] = -state->GmParams.coefs[refs][3]; state->GmParams.coefs[refs][5] = state->GmParams.coefs[refs][2]; } } if (type >= AV1_GMC_TRANSLATION) { av1_read_global_param(state, bs, type, refs, 0); av1_read_global_param(state, bs, type, refs, 1); } } } //film_grain_params() if (!state->film_grain_params_present || (!state->frame_state.show_frame && !showable_frame)) { } else { u8 apply_grain = gf_bs_read_int_log(bs, 1, "apply_grain"); if (apply_grain) { gf_bs_read_int_log(bs, 16, "grain_seed"); u8 update_grain = 1; if (state->frame_state.frame_type == AV1_INTER_FRAME) { update_grain = gf_bs_read_int_log(bs, 1, "update_grain"); } if (!update_grain) { gf_bs_read_int_log(bs, 3, "film_grain_params_ref_idx"); } else { u32 i, num_y_points = gf_bs_read_int_log(bs, 4, "num_y_points"); for (i = 0; i < num_y_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_y_value", i); gf_bs_read_int_log_idx(bs, 8, "point_y_scaling", i); } u8 chroma_scaling_from_luma = 0; if (!state->config->monochrome) chroma_scaling_from_luma = gf_bs_read_int_log(bs, 1, "chroma_scaling_from_luma"); u8 num_cb_points = 0; u8 num_cr_points = 0; if (state->config->monochrome || chroma_scaling_from_luma || ((state->config->chroma_subsampling_x == 1) && (state->config->chroma_subsampling_y == 1) && (num_y_points == 0)) ) { } else { num_cb_points = gf_bs_read_int_log(bs, 4, "num_cb_points"); for (i = 0; i < num_cb_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cb_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cb_scaling", i); } num_cr_points = gf_bs_read_int_log(bs, 4, "num_cr_points"); for (i = 0; i < num_cr_points; i++) { gf_bs_read_int_log_idx(bs, 8, "point_cr_value", i); gf_bs_read_int_log_idx(bs, 8, "point_cr_scaling", i); } } gf_bs_read_int_log(bs, 2, "grain_scaling_minus_8"); u8 ar_coeff_lag = gf_bs_read_int_log(bs, 2, "ar_coeff_lag"); u16 numPosLuma = 2 * ar_coeff_lag * (ar_coeff_lag + 1); u16 numPosChroma = numPosLuma; if (num_y_points) { numPosChroma = numPosLuma + 1; for (i = 0; i < numPosLuma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_y_plus_128", i); } } if (chroma_scaling_from_luma || num_cb_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cb_plus_128", i); } } if (chroma_scaling_from_luma || num_cr_points) { for (i = 0; i < numPosChroma; i++) { gf_bs_read_int_log_idx(bs, 8, "ar_coeffs_cr_plus_128", i); } } gf_bs_read_int_log(bs, 2, "ar_coeff_shift_minus_6"); gf_bs_read_int_log(bs, 2, "grain_scale_shift"); if (num_cb_points) { gf_bs_read_int_log(bs, 8, "cb_mult"); gf_bs_read_int_log(bs, 8, "cb_luma_mult"); gf_bs_read_int_log(bs, 9, "cb_offset"); } if (num_cr_points) { gf_bs_read_int_log(bs, 8, "cr_mult"); gf_bs_read_int_log(bs, 8, "cr_luma_mult"); gf_bs_read_int_log(bs, 9, "cr_offset"); } gf_bs_read_int_log(bs, 1, "overlap_flag"); gf_bs_read_int_log(bs, 1, "clip_to_restricted_range"); } } } //end of uncompressed header !! } GF_EXPORT void gf_av1_init_state(AV1State *state) { if (!state) return; memset(state, 0, sizeof(AV1State)); state->color_primaries = 2; state->transfer_characteristics = 2; state->matrix_coefficients = 2; } GF_EXPORT void gf_av1_reset_state(AV1State *state, Bool is_destroy) { GF_List *l1, *l2; if (state->frame_state.header_obus) { while (gf_list_count(state->frame_state.header_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.header_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } if (state->frame_state.frame_obus) { while (gf_list_count(state->frame_state.frame_obus)) { GF_AV1_OBUArrayEntry *a = (GF_AV1_OBUArrayEntry*)gf_list_pop_back(state->frame_state.frame_obus); if (a->obu) gf_free(a->obu); gf_free(a); } } l1 = state->frame_state.frame_obus; l2 = state->frame_state.header_obus; memset(&state->frame_state, 0, sizeof(AV1StateFrame)); state->frame_state.is_first_frame = GF_TRUE; if (is_destroy) { gf_list_del(l1); gf_list_del(l2); if (state->bs) { if (gf_bs_get_position(state->bs)) { u32 size; gf_bs_get_content_no_truncate(state->bs, &state->frame_obus, &size, &state->frame_obus_alloc); } gf_bs_del(state->bs); } state->bs = NULL; } else { state->frame_state.frame_obus = l1; state->frame_state.header_obus = l2; if (state->bs) gf_bs_seek(state->bs, 0); } } static GF_Err av1_parse_tile_group(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { u32 TileNum, tg_start = 0, tg_end = 0; Bool numTiles = state->tileCols * state->tileRows; Bool tile_start_and_end_present_flag = GF_FALSE; GF_Err e = GF_OK; if (numTiles > 1) tile_start_and_end_present_flag = gf_bs_read_int(bs, 1); if (numTiles == 1 || !tile_start_and_end_present_flag) { tg_start = 0; tg_end = numTiles - 1; /*state->frame_state.tg[0].start_idx = 0; state->frame_state.tg[0].end_idx = numTiles - 1;*/ } else { u32 tileBits = state->tileColsLog2 + state->tileRowsLog2; /*state->frame_state.tg[state->frame_state.tg_idx].start_idx*/ tg_start = gf_bs_read_int(bs, tileBits); /*state->frame_state.tg[state->frame_state.tg_idx].end_idx*/ tg_end = gf_bs_read_int(bs, tileBits); } /*state->frame_state.tg_idx++;*/ gf_bs_align(bs); if (tg_end >= GF_ARRAY_LENGTH(state->frame_state.tiles)) return GF_NON_COMPLIANT_BITSTREAM; state->frame_state.nb_tiles_in_obu = 0; for (TileNum = tg_start; TileNum <= tg_end; TileNum++) { u32 tile_start_offset, tile_size; /*u32 tileRow = TileNum / state->tileCols; u32 tileCol = TileNum % state->tileCols;*/ Bool lastTile = TileNum == tg_end; u64 pos = gf_bs_get_position(bs); if (lastTile) { tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(obu_size - (pos - obu_start)); } else { u64 tile_size_minus_1 = aom_av1_le(bs, state->tile_size_bytes, "tile_size_minus_1"); pos = gf_bs_get_position(bs); tile_start_offset = (u32)(pos - obu_start); tile_size = (u32)(tile_size_minus_1 + 1/* + state->tile_size_bytes*/); } if (tile_start_offset + tile_size > obu_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AV1] Error parsing tile group, tile %d start %d + size %d exceeds OBU length %d\n", TileNum, tile_start_offset, tile_size, obu_size)); e = GF_NON_COMPLIANT_BITSTREAM; break; } state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].obu_start_offset = tile_start_offset; state->frame_state.tiles[state->frame_state.nb_tiles_in_obu].size = tile_size; gf_bs_skip_bytes(bs, tile_size); state->frame_state.nb_tiles_in_obu++; } if (tg_end == numTiles - 1) { av1_decode_frame_wrapup(state); } return e; } static void av1_parse_frame_header(GF_BitStream *bs, AV1State *state) { AV1StateFrame *frame_state = &state->frame_state; if (frame_state->seen_frame_header == GF_FALSE) { u64 pos = gf_bs_get_position(bs); state->frame_state.show_existing_frame = GF_FALSE; frame_state->seen_frame_header = GF_TRUE; av1_parse_uncompressed_header(bs, state); state->frame_state.is_first_frame = GF_FALSE; state->frame_state.uncompressed_header_bytes = (u32) (gf_bs_get_position(bs) - pos); if (state->frame_state.show_existing_frame) { av1_decode_frame_wrapup(state); frame_state->seen_frame_header = GF_FALSE; } else { //TileNum = 0; frame_state->seen_frame_header = GF_TRUE; } } } static GF_Err av1_parse_frame(GF_BitStream *bs, AV1State *state, u64 obu_start, u64 obu_size) { av1_parse_frame_header(bs, state); //byte alignment gf_bs_align(bs); return av1_parse_tile_group(bs, state, obu_start, obu_size); } static void on_aom_av1_eos(void *_state) { AV1State *state = (AV1State *)_state; state->bs_overread = GF_TRUE; } GF_EXPORT GF_Err gf_av1_parse_obu(GF_BitStream *bs, ObuType *obu_type, u64 *obu_size, u32 *obu_hdr_size, AV1State *state) { GF_Err e = GF_OK; u32 hdr_size; u64 pos = gf_bs_get_position(bs); if (!bs || !obu_type || !state) return GF_BAD_PARAM; state->bs_overread = GF_FALSE; gf_bs_set_eos_callback(bs, on_aom_av1_eos, state); state->obu_extension_flag = state->obu_has_size_field = 0; state->temporal_id = state->spatial_id = 0; state->frame_state.uncompressed_header_bytes = 0; e = gf_av1_parse_obu_header(bs, obu_type, &state->obu_extension_flag, &state->obu_has_size_field, &state->temporal_id, &state->spatial_id); if (e) return e; if (state->obu_has_size_field) { *obu_size = (u32)gf_av1_leb128_read(bs, NULL); } else { if (*obu_size >= 1 + state->obu_extension_flag) { *obu_size = *obu_size - 1 - state->obu_extension_flag; } else { GF_LOG(state->config ? GF_LOG_WARNING : GF_LOG_DEBUG, GF_LOG_CODING, ("[AV1] computed OBU size "LLD" (input value = "LLU"). Skipping.\n", *obu_size - 1 - state->obu_extension_flag, *obu_size)); return GF_NON_COMPLIANT_BITSTREAM; } } hdr_size = (u32)(gf_bs_get_position(bs) - pos); if ((gf_bs_available(bs) < *obu_size) || state->bs_overread) { gf_bs_seek(bs, pos); return GF_BUFFER_TOO_SMALL; } *obu_size += hdr_size; if (obu_hdr_size) *obu_hdr_size = hdr_size; if (*obu_type != OBU_SEQUENCE_HEADER && *obu_type != OBU_TEMPORAL_DELIMITER && state->OperatingPointIdc != 0 && state->obu_extension_flag == 1) { u32 inTemporalLayer = (state->OperatingPointIdc >> state->temporal_id) & 1; u32 inSpatialLayer = (state->OperatingPointIdc >> (state->spatial_id + 8)) & 1; if (!inTemporalLayer || !inSpatialLayer) { *obu_type = -1; gf_bs_seek(bs, pos + *obu_size); return GF_OK; } } e = GF_OK; switch (*obu_type) { case OBU_SEQUENCE_HEADER: av1_parse_sequence_header_obu(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Sequence header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_METADATA: #if 0 //TODO + sample groups const ObuMetadataType metadata_type = (u32)read_leb128(bs, NULL); we should check for 16 bits limit(AV1MetadataSampleGroupEntry) for ISOBMFF bindings, see https ://github.com/AOMediaCodec/av1-isobmff/pull/86#issuecomment-416659538 if (metadata_type == OBU_METADATA_TYPE_ITUT_T35) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_CLL) { } else if (metadata_type == OBU_METADATA_TYPE_HDR_MDCV) { } else if (metadata_type == OBU_METADATA_TYPE_SCALABILITY) { } else if (metadata_type == METADATA_TYPE_TIMECODE) { } #endif GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[AV1] parsing for metadata is not implemented. Forwarding.\n")); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Metadata parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME_HEADER: case OBU_REDUNDANT_FRAME_HEADER: if (state->config) { av1_parse_frame_header(bs, state); if (gf_bs_get_position(bs) > pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame header parsing consumed too many bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_FRAME: e = av1_parse_frame(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Frame parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TILE_GROUP: if (state->config) { e = av1_parse_tile_group(bs, state, pos, *obu_size); if (gf_bs_get_position(bs) != pos + *obu_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] Tile group parsing did not consume the right number of bytes !\n")); e = GF_NON_COMPLIANT_BITSTREAM; } } gf_bs_seek(bs, pos + *obu_size); break; case OBU_TEMPORAL_DELIMITER: state->frame_state.seen_frame_header = GF_FALSE; case OBU_PADDING: gf_bs_seek(bs, pos + *obu_size); break; default: GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[AV1] unknown OBU type %u (size "LLU"). Skipping.\n", *obu_type, *obu_size)); gf_bs_seek(bs, pos + *obu_size); break; } return e; } GF_EXPORT GF_Err gf_media_prores_parse_bs(GF_BitStream *bs, GF_ProResFrameInfo *prores_frame) { u32 i, j; u64 start, pos; memset(prores_frame, 0, sizeof(GF_ProResFrameInfo)); start = gf_bs_get_position(bs); if (gf_bs_available(bs) < 10) return GF_BUFFER_TOO_SMALL; prores_frame->frame_size = gf_bs_read_u32(bs); prores_frame->frame_identifier = gf_bs_read_u32(bs); if (prores_frame->frame_identifier != GF_4CC('i','c','p','f')) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame identifier, expected \"icpf\" got \"%s\"\n", gf_4cc_to_str(prores_frame->frame_identifier) )); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } /*parse frame header*/ pos = gf_bs_get_position(bs); prores_frame->frame_hdr_size = gf_bs_read_u16(bs); if (gf_bs_available(bs) + 2 < prores_frame->frame_hdr_size) { gf_bs_seek(bs, start); return GF_BUFFER_TOO_SMALL; } gf_bs_read_u8(bs); prores_frame->version = gf_bs_read_u8(bs); prores_frame->encoder_id = gf_bs_read_u32(bs); prores_frame->width = gf_bs_read_u16(bs); prores_frame->height = gf_bs_read_u16(bs); prores_frame->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->interlaced_mode = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 2); prores_frame->aspect_ratio_information = gf_bs_read_int(bs, 4); prores_frame->framerate_code = gf_bs_read_int(bs, 4); prores_frame->color_primaries = gf_bs_read_u8(bs); prores_frame->transfer_characteristics = gf_bs_read_u8(bs); prores_frame->matrix_coefficients = gf_bs_read_u8(bs); gf_bs_read_int(bs, 4); prores_frame->alpha_channel_type = gf_bs_read_int(bs, 4); gf_bs_read_int(bs, 14); prores_frame->load_luma_quant_matrix = gf_bs_read_int(bs, 1); prores_frame->load_chroma_quant_matrix = gf_bs_read_int(bs, 1); if (prores_frame->load_luma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->luma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } if (prores_frame->load_chroma_quant_matrix) { for (i=0; i<8; i++) { for (j=0; j<8; j++) { prores_frame->chroma_quant_matrix[i][j] = gf_bs_read_u8(bs); } } } pos = gf_bs_get_position(bs) - pos; if (pos != prores_frame->frame_hdr_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[ProRes] Invalid frame header size, expected %d got %d\n", prores_frame->frame_hdr_size, (u32) pos)); gf_bs_seek(bs, start); return GF_NON_COMPLIANT_BITSTREAM; } prores_frame->nb_pic = ((prores_frame->interlaced_mode==1) || (prores_frame->interlaced_mode==2)) ? 2 : 1; gf_bs_seek(bs, start); return GF_OK; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT u8 gf_mp3_version(u32 hdr) { return ((hdr >> 19) & 0x3); } GF_EXPORT const char *gf_mp3_version_name(u32 hdr) { u32 v = gf_mp3_version(hdr); switch (v) { case 0: return "MPEG-2.5"; case 1: return "Reserved"; case 2: return "MPEG-2"; case 3: return "MPEG-1"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS GF_EXPORT u8 gf_mp3_layer(u32 hdr) { return 4 - (((hdr >> 17) & 0x3)); } GF_EXPORT u8 gf_mp3_num_channels(u32 hdr) { if (((hdr >> 6) & 0x3) == 3) return 1; return 2; } GF_EXPORT u16 gf_mp3_sampling_rate(u32 hdr) { u16 res; /* extract the necessary fields from the MP3 header */ u8 version = gf_mp3_version(hdr); u8 sampleRateIndex = (hdr >> 10) & 0x3; switch (sampleRateIndex) { case 0: res = 44100; break; case 1: res = 48000; break; case 2: res = 32000; break; default: GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] Samplerate index not valid\n")); return 0; } /*reserved or MPEG-1*/ if (version & 1) return res; /*MPEG-2*/ res /= 2; /*MPEG-2.5*/ if (version == 0) res /= 2; return res; } GF_EXPORT u16 gf_mp3_window_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); if (layer == 3) { if (version == 3) return 1152; return 576; } if (layer == 2) return 1152; return 384; } GF_EXPORT u8 gf_mp3_object_type_indication(u32 hdr) { switch (gf_mp3_version(hdr)) { case 3: return GF_CODECID_MPEG_AUDIO; case 2: case 0: return GF_CODECID_MPEG2_PART3; default: return 0x00; } } /*aligned bitrate parsing with libMAD*/ static u32 const bitrate_table[5][15] = { /* MPEG-1 */ { 0, 32000, 64000, 96000, 128000, 160000, 192000, 224000, /* Layer I */ 256000, 288000, 320000, 352000, 384000, 416000, 448000 }, { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer II */ 128000, 160000, 192000, 224000, 256000, 320000, 384000 }, { 0, 32000, 40000, 48000, 56000, 64000, 80000, 96000, /* Layer III */ 112000, 128000, 160000, 192000, 224000, 256000, 320000 }, /* MPEG-2 LSF */ { 0, 32000, 48000, 56000, 64000, 80000, 96000, 112000, /* Layer I */ 128000, 144000, 160000, 176000, 192000, 224000, 256000 }, { 0, 8000, 16000, 24000, 32000, 40000, 48000, 56000, /* Layers */ 64000, 80000, 96000, 112000, 128000, 144000, 160000 } /* II & III */ }; u32 gf_mp3_bit_rate(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u8 bitRateIndex = (hdr >> 12) & 0xF; u32 lidx; /*MPEG-1*/ if (version & 1) { if (!layer) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } lidx = layer - 1; } /*MPEG-2/2.5*/ else { lidx = 3 + (layer >> 1); } if (lidx>4) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[MPEG-1/2 Audio] layer index not valid\n")); return 0; } return bitrate_table[lidx][bitRateIndex]; } GF_EXPORT u16 gf_mp3_frame_size(u32 hdr) { u8 version = gf_mp3_version(hdr); u8 layer = gf_mp3_layer(hdr); u32 pad = ((hdr >> 9) & 0x1) ? 1 : 0; u32 bitrate = gf_mp3_bit_rate(hdr); u32 samplerate = gf_mp3_sampling_rate(hdr); u32 frameSize = 0; if (!samplerate || !bitrate) return 0; if (layer == 1) { frameSize = ((12 * bitrate / samplerate) + pad) * 4; } else { u32 slots_per_frame = 144; if ((layer == 3) && !(version & 1)) slots_per_frame = 72; frameSize = (slots_per_frame * bitrate / samplerate) + pad; } return (u16)frameSize; } GF_EXPORT u32 gf_mp3_get_next_header(FILE* in) { u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; while (1) { if (gf_fread(&b, 1, in) == 0) return 0; if (state == 3) { bytes[state] = b; return GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) state = 1; else state = 0; } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { if ((dropped == 0) && ((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[0] = (u8)0xFF; bytes[1] = b; state = 2; } else { dropped++; } } } } return 0; } GF_EXPORT u32 gf_mp3_get_next_header_mem(const u8 *buffer, u32 size, u32 *pos) { u32 cur; u8 b, state = 0; u32 dropped = 0; unsigned char bytes[4]; bytes[0] = bytes[1] = bytes[2] = bytes[3] = 0; cur = 0; *pos = 0; while (cur < size) { b = (u8)buffer[cur]; cur++; if (state == 3) { u32 val; bytes[state] = b; val = GF_4CC((u32)bytes[0], bytes[1], bytes[2], bytes[3]); if (gf_mp3_frame_size(val)) { *pos = dropped; return val; } state = 0; dropped = cur; } if (state == 2) { if (((b & 0xF0) == 0) || ((b & 0xF0) == 0xF0) || ((b & 0x0C) == 0x0C)) { if (bytes[1] == 0xFF) { state = 1; dropped += 1; } else { state = 0; dropped = cur; } } else { bytes[state] = b; state = 3; } } if (state == 1) { if (((b & 0xE0) == 0xE0) && ((b & 0x18) != 0x08) && ((b & 0x06) != 0)) { bytes[state] = b; state = 2; } else { state = 0; dropped = cur; } } if (state == 0) { if (b == 0xFF) { bytes[state] = b; state = 1; } else { dropped++; } } } return 0; } #endif /*GPAC_DISABLE_AV_PARSERS*/ GF_EXPORT Bool gf_avc_is_rext_profile(u8 profile_idc) { switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: case 83: case 86: case 118: case 128: case 138: case 139: case 134: case 135: return GF_TRUE; default: return GF_FALSE; } } GF_EXPORT const char *gf_avc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x42: return "Baseline"; case 0x4D: return "Main"; case 0x53: return "Scalable Baseline"; case 0x56: return "Scalable High"; case 0x58: return "Extended"; case 0x64: return "High"; case 0x6E: return "High 10"; case 0x7A: return "High 4:2:2"; case 0x90: case 0xF4: return "High 4:4:4"; default: return "Unknown"; } } GF_EXPORT const char *gf_hevc_get_profile_name(u8 video_prof) { switch (video_prof) { case 0x01: return "Main"; case 0x02: return "Main 10"; case 0x03: return "Main Still Picture"; default: return "Unknown"; } } GF_EXPORT const char *gf_avc_hevc_get_chroma_format_name(u8 chroma_format) { switch (chroma_format) { case 1: return "YUV 4:2:0"; case 2: return "YUV 4:2:2"; case 3: return "YUV 4:4:4"; default: return "Unknown"; } } #ifndef GPAC_DISABLE_AV_PARSERS u32 gf_bs_read_ue_log_idx3(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2, s32 idx3) { u32 val=0, code; s32 nb_lead = -1; u32 bits = 0; for (code=0; !code; nb_lead++) { if (nb_lead>=32) { //gf_bs_read_int keeps returning 0 on EOS, so if no more bits available, rbsp was truncated otherwise code is broken in rbsp) //we only test once nb_lead>=32 to avoid testing at each bit read if (!gf_bs_available(bs)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] exp-golomb read failed, not enough bits in bitstream !\n")); } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Core] corrupted exp-golomb code, %d leading zeros, max 31 allowed !\n", nb_lead)); } return 0; } code = gf_bs_read_int(bs, 1); bits++; } if (nb_lead) { val = gf_bs_read_int(bs, nb_lead); val += (1 << nb_lead) - 1; bits += nb_lead; } if (fname) { gf_bs_log_idx(bs, bits, fname, val, idx1, idx2, idx3); } return val; } #define gf_bs_read_ue_log_idx2(_bs, _fname, _idx1, _idx2) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx1, (s32) _idx2, -1) #define gf_bs_read_ue_log_idx(_bs, _fname, _idx) gf_bs_read_ue_log_idx3(_bs, _fname, (s32) _idx, -1, -1) #define gf_bs_read_ue_log(_bs, _fname) gf_bs_read_ue_log_idx3(_bs, _fname, -1, -1, -1) u32 gf_bs_read_ue(GF_BitStream *bs) { return gf_bs_read_ue_log(bs, NULL); } s32 gf_bs_read_se(GF_BitStream *bs) { u32 v = gf_bs_read_ue(bs); if ((v & 0x1) == 0) return (s32)(0 - (v >> 1)); return (v + 1) >> 1; } s32 gf_bs_read_se_log_idx2(GF_BitStream *bs, const char *fname, s32 idx1, s32 idx2) { s32 res = gf_bs_read_se(bs); if (fname) gf_bs_log_idx(bs, -1, fname, res, idx1, idx2, -1); return res; } #define gf_bs_read_se_log_idx(_bs, _fname, _idx) gf_bs_read_se_log_idx2(_bs, _fname, (s32) _idx, -1) #define gf_bs_read_se_log(_bs, _fname) gf_bs_read_se_log_idx2(_bs, _fname, -1, -1) void gf_bs_write_ue(GF_BitStream *bs, u32 num) { s32 length = 1; s32 temp = ++num; while (temp != 1) { temp >>= 1; length += 2; } gf_bs_write_int(bs, 0, length >> 1); gf_bs_write_int(bs, num, (length + 1) >> 1); } void gf_bs_write_se(GF_BitStream *bs, s32 num) { u32 v; if (num <= 0) v = (-1 * num) << 1; else v = (num << 1) - 1; gf_bs_write_ue(bs, v); } u32 gf_media_nalu_is_start_code(GF_BitStream *bs) { u8 s1, s2, s3, s4; Bool is_sc = 0; u64 pos = gf_bs_get_position(bs); s1 = gf_bs_read_int(bs, 8); s2 = gf_bs_read_int(bs, 8); if (!s1 && !s2) { s3 = gf_bs_read_int(bs, 8); if (s3 == 0x01) is_sc = 3; else if (!s3) { s4 = gf_bs_read_int(bs, 8); if (s4 == 0x01) is_sc = 4; } } gf_bs_seek(bs, pos + is_sc); return is_sc; } /*read that amount of data at each IO access rather than fetching byte by byte...*/ #define AVC_CACHE_SIZE 4096 static u32 gf_media_nalu_locate_start_code_bs(GF_BitStream *bs, Bool locate_trailing) { u32 v, bpos, nb_cons_zeros = 0; char avc_cache[AVC_CACHE_SIZE]; u64 end, cache_start, load_size; u64 start = gf_bs_get_position(bs); if (start < 3) return 0; load_size = 0; bpos = 0; cache_start = 0; end = 0; v = 0xffffffff; while (!end) { /*refill cache*/ if (bpos == (u32)load_size) { if (!gf_bs_available(bs)) break; load_size = gf_bs_available(bs); if (load_size > AVC_CACHE_SIZE) load_size = AVC_CACHE_SIZE; bpos = 0; cache_start = gf_bs_get_position(bs); gf_bs_read_data(bs, avc_cache, (u32)load_size); } v = ( (v<<8) & 0xFFFFFF00) | ((u32) avc_cache[bpos]); bpos++; if (locate_trailing) { if ((v & 0x000000FF) == 0) nb_cons_zeros++; else nb_cons_zeros = 0; } if (v == 0x00000001) end = cache_start + bpos - 4; else if ((v & 0x00FFFFFF) == 0x00000001) end = cache_start + bpos - 3; } gf_bs_seek(bs, start); if (!end) end = gf_bs_get_size(bs); if (locate_trailing) { if (nb_cons_zeros >= 3) return (u32)(end - start - nb_cons_zeros); } return (u32)(end - start); } GF_EXPORT u32 gf_media_nalu_next_start_code_bs(GF_BitStream *bs) { return gf_media_nalu_locate_start_code_bs(bs, 0); } GF_EXPORT u32 gf_media_nalu_next_start_code(const u8 *data, u32 data_len, u32 *sc_size) { u32 avail = data_len; const u8 *cur = data; while (cur) { u32 v, bpos; u8 *next_zero = memchr(cur, 0, avail); if (!next_zero) return data_len; v = 0xffffff00; bpos = (u32)(next_zero - data) + 1; while (1) { u8 cval; if (bpos == (u32)data_len) return data_len; cval = data[bpos]; v = ((v << 8) & 0xFFFFFF00) | ((u32)cval); bpos++; if (v == 0x00000001) { *sc_size = 4; return bpos - 4; } else if ((v & 0x00FFFFFF) == 0x00000001) { *sc_size = 3; return bpos - 3; } if (cval) break; } if (bpos >= data_len) break; cur = data + bpos; avail = data_len - bpos; } return data_len; } Bool gf_media_avc_slice_is_intra(AVCState *avc) { switch (avc->s_info.slice_type) { case GF_AVC_TYPE_I: case GF_AVC_TYPE2_I: case GF_AVC_TYPE_SI: case GF_AVC_TYPE2_SI: return 1; default: return 0; } } #if 0 //unused Bool gf_media_avc_slice_is_IDR(AVCState *avc) { if (avc->sei.recovery_point.valid) { avc->sei.recovery_point.valid = 0; return 1; } if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) return 0; return gf_media_avc_slice_is_intra(avc); } #endif static const struct { u32 w, h; } avc_hevc_sar[] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4, 3 }, { 3, 2 }, { 2, 1 } }; /*ISO 14496-10 (N11084) E.1.2*/ static void avc_parse_hrd_parameters(GF_BitStream *bs, AVC_HRD *hrd) { int i, cpb_cnt_minus1; cpb_cnt_minus1 = gf_bs_read_ue_log(bs, "cpb_cnt_minus1"); if (cpb_cnt_minus1 > 31) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] invalid cpb_cnt_minus1 value: %d (expected in [0;31])\n", cpb_cnt_minus1)); gf_bs_read_int_log(bs, 4, "bit_rate_scale"); gf_bs_read_int_log(bs, 4, "cpb_size_scale"); /*for( SchedSelIdx = 0; SchedSelIdx <= cpb_cnt_minus1; SchedSelIdx++ ) {*/ for (i = 0; i <= cpb_cnt_minus1; i++) { gf_bs_read_ue_log_idx(bs, "bit_rate_value_minus1", i); gf_bs_read_ue_log_idx(bs, "cpb_size_value_minus1", i); gf_bs_read_int_log_idx(bs, 1, "cbr_flag", i); } gf_bs_read_int_log(bs, 5, "initial_cpb_removal_delay_length_minus1"); hrd->cpb_removal_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "cpb_removal_delay_length_minus1"); hrd->dpb_output_delay_length_minus1 = gf_bs_read_int_log(bs, 5, "dpb_output_delay_length_minus1"); hrd->time_offset_length = gf_bs_read_int_log(bs, 5, "time_offset_length"); return; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_add_count(u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && (u8)buffer[i] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; if (!buffer[i]) num_zero = 1; } else { if (!buffer[i]) num_zero++; else num_zero = 0; } i++; } return emulation_bytes_count; } u32 gf_media_nalu_add_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && (u8)buffer_src[i] < 0x04) { /*add emulation code*/ num_zero = 0; buffer_dst[i + emulation_bytes_count] = 0x03; emulation_bytes_count++; if (!buffer_src[i]) num_zero = 1; } else { if (!buffer_src[i]) num_zero++; else num_zero = 0; } buffer_dst[i + emulation_bytes_count] = buffer_src[i]; i++; } return nal_size + emulation_bytes_count; } /*returns the nal_size without emulation prevention bytes*/ u32 gf_media_nalu_emulation_bytes_remove_count(const u8 *buffer, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; if (!buffer || !nal_size) return 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: \96 0x00000300 \96 0x00000301 \96 0x00000302 \96 0x00000303" */ if (num_zero == 2 && buffer[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } if (!buffer[i]) num_zero++; else num_zero = 0; i++; } return emulation_bytes_count; } /*nal_size is updated to allow better error detection*/ GF_EXPORT u32 gf_media_nalu_remove_emulation_bytes(const u8 *buffer_src, u8 *buffer_dst, u32 nal_size) { u32 i = 0, emulation_bytes_count = 0; u8 num_zero = 0; while (i < nal_size) { /*ISO 14496-10: "Within the NAL unit, any four-byte sequence that starts with 0x000003 other than the following sequences shall not occur at any byte-aligned position: 0x00000300 0x00000301 0x00000302 0x00000303" */ if (num_zero == 2 && buffer_src[i] == 0x03 && i + 1 < nal_size /*next byte is readable*/ && (u8)buffer_src[i + 1] < 0x04) { /*emulation code found*/ num_zero = 0; emulation_bytes_count++; i++; } buffer_dst[i - emulation_bytes_count] = buffer_src[i]; if (!buffer_src[i]) num_zero++; else num_zero = 0; i++; } return nal_size - emulation_bytes_count; } static s32 gf_avc_read_sps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos, u32 nal_hdr) { AVC_SPS *sps; s32 mb_width, mb_height, sps_id = -1; u32 profile_idc, level_idc, pcomp, i, chroma_format_idc, cl = 0, cr = 0, ct = 0, cb = 0, luma_bd, chroma_bd; u8 separate_colour_plane_flag = 0; if (!vui_flag_pos) { gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } if (!bs) { return -1; } if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } profile_idc = gf_bs_read_int_log(bs, 8, "profile_idc"); pcomp = gf_bs_read_int_log(bs, 8, "profile_compatibility"); /*sanity checks*/ if (pcomp & 0x3) return -1; level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); /*SubsetSps is used to be sure that AVC SPS are not going to be scratched by subset SPS. According to the SVC standard, subset SPS can have the same sps_id than its base layer, but it does not refer to the same SPS. */ sps_id = gf_bs_read_ue_log(bs, "sps_id") + GF_SVC_SSPS_ID_SHIFT * subseq_sps; if ((sps_id < 0) || (sps_id >= 32)) { return -1; } luma_bd = chroma_bd = 0; sps = &avc->sps[sps_id]; chroma_format_idc = sps->ChromaArrayType = 1; sps->state |= subseq_sps ? AVC_SUBSPS_PARSED : AVC_SPS_PARSED; /*High Profile and SVC*/ switch (profile_idc) { case 100: case 110: case 122: case 244: case 44: /*sanity checks: note1 from 7.4.2.1.1 of iso/iec 14496-10-N11084*/ if (pcomp & 0xE0) return -1; case 83: case 86: case 118: case 128: chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); sps->ChromaArrayType = chroma_format_idc; if (chroma_format_idc == 3) { separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); /* Depending on the value of separate_colour_plane_flag, the value of the variable ChromaArrayType is assigned as follows. \96 If separate_colour_plane_flag is equal to 0, ChromaArrayType is set equal to chroma_format_idc. \96 Otherwise (separate_colour_plane_flag is equal to 1), ChromaArrayType is set equal to 0. */ if (separate_colour_plane_flag) sps->ChromaArrayType = 0; } luma_bd = gf_bs_read_ue_log(bs, "luma_bit_depth"); chroma_bd = gf_bs_read_ue_log(bs, "chroma_bit_depth"); /*qpprime_y_zero_transform_bypass_flag = */ gf_bs_read_int_log(bs, 1, "qpprime_y_zero_transform_bypass_flag"); /*seq_scaling_matrix_present_flag*/ if (gf_bs_read_int_log(bs, 1, "seq_scaling_matrix_present_flag")) { u32 k; for (k = 0; k < 8; k++) { if (gf_bs_read_int_log_idx(bs, 1, "seq_scaling_list_present_flag", k)) { u32 z, last = 8, next = 8; u32 sl = k < 6 ? 16 : 64; for (z = 0; z < sl; z++) { if (next) { s32 delta = gf_bs_read_se(bs); next = (last + delta + 256) % 256; } last = next ? next : last; } } } } break; } sps->profile_idc = profile_idc; sps->level_idc = level_idc; sps->prof_compat = pcomp; sps->log2_max_frame_num = gf_bs_read_ue_log(bs, "log2_max_frame_num") + 4; sps->poc_type = gf_bs_read_ue_log(bs, "poc_type"); sps->chroma_format = chroma_format_idc; sps->luma_bit_depth_m8 = luma_bd; sps->chroma_bit_depth_m8 = chroma_bd; if (sps->poc_type == 0) { sps->log2_max_poc_lsb = gf_bs_read_ue_log(bs, "log2_max_poc_lsb") + 4; } else if (sps->poc_type == 1) { sps->delta_pic_order_always_zero_flag = gf_bs_read_int_log(bs, 1, "delta_pic_order_always_zero_flag"); sps->offset_for_non_ref_pic = gf_bs_read_se_log(bs, "offset_for_non_ref_pic"); sps->offset_for_top_to_bottom_field = gf_bs_read_se_log(bs, "offset_for_top_to_bottom_field"); sps->poc_cycle_length = gf_bs_read_ue_log(bs, "poc_cycle_length"); if (sps->poc_cycle_length > GF_ARRAY_LENGTH(sps->offset_for_ref_frame)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] offset_for_ref_frame overflow from poc_cycle_length\n")); return -1; } for (i = 0; i < sps->poc_cycle_length; i++) sps->offset_for_ref_frame[i] = gf_bs_read_se_log_idx(bs, "offset_for_ref_frame", i); } if (sps->poc_type > 2) { return -1; } sps->max_num_ref_frames = gf_bs_read_ue_log(bs, "max_num_ref_frames"); sps->gaps_in_frame_num_value_allowed_flag = gf_bs_read_int_log(bs, 1, "gaps_in_frame_num_value_allowed_flag"); mb_width = gf_bs_read_ue_log(bs, "pic_width_in_mbs_minus1") + 1; mb_height = gf_bs_read_ue_log(bs, "pic_height_in_map_units_minus1") + 1; sps->frame_mbs_only_flag = gf_bs_read_int_log(bs, 1, "frame_mbs_only_flag"); sps->width = mb_width * 16; sps->height = (2 - sps->frame_mbs_only_flag) * mb_height * 16; if (!sps->frame_mbs_only_flag) sps->mb_adaptive_frame_field_flag = gf_bs_read_int_log(bs, 1, "mb_adaptive_frame_field_flag"); gf_bs_read_int_log(bs, 1, "direct_8x8_inference_flag"); if (gf_bs_read_int_log(bs, 1, "frame_cropping_flag")) { int CropUnitX, CropUnitY, SubWidthC = -1, SubHeightC = -1; if (chroma_format_idc == 1) { SubWidthC = 2; SubHeightC = 2; } else if (chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else if ((chroma_format_idc == 3) && (separate_colour_plane_flag == 0)) { SubWidthC = 1; SubHeightC = 1; } if (sps->ChromaArrayType == 0) { assert(SubWidthC == -1); CropUnitX = 1; CropUnitY = 2 - sps->frame_mbs_only_flag; } else { CropUnitX = SubWidthC; CropUnitY = SubHeightC * (2 - sps->frame_mbs_only_flag); } cl = gf_bs_read_ue_log(bs, "frame_crop_left_offset"); cr = gf_bs_read_ue_log(bs, "frame_crop_right_offset"); ct = gf_bs_read_ue_log(bs, "frame_crop_top_offset"); cb = gf_bs_read_ue_log(bs, "frame_crop_bottom_offset"); sps->width -= CropUnitX * (cl + cr); sps->height -= CropUnitY * (ct + cb); cl *= CropUnitX; cr *= CropUnitX; ct *= CropUnitY; cb *= CropUnitY; } sps->crop.left = cl; sps->crop.right = cr; sps->crop.top = ct; sps->crop.bottom = cb; if (vui_flag_pos) { *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); } /*vui_parameters_present_flag*/ sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag"); if (sps->vui_parameters_present_flag) { sps->vui.aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->vui.aspect_ratio_info_present_flag) { s32 aspect_ratio_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (aspect_ratio_idc == 255) { sps->vui.par_num = gf_bs_read_int_log(bs, 16, "aspect_ratio_num"); sps->vui.par_den = gf_bs_read_int_log(bs, 16, "aspect_ratio_den"); } else if (aspect_ratio_idc < GF_ARRAY_LENGTH(avc_hevc_sar) ) { sps->vui.par_num = avc_hevc_sar[aspect_ratio_idc].w; sps->vui.par_den = avc_hevc_sar[aspect_ratio_idc].h; } else { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] Unknown aspect_ratio_idc: your video may have a wrong aspect ratio. Contact the GPAC team!\n")); } } sps->vui.overscan_info_present_flag = gf_bs_read_int_log(bs, 1, "overscan_info_present_flag"); if (sps->vui.overscan_info_present_flag) gf_bs_read_int_log(bs, 1, "overscan_appropriate_flag"); /* default values */ sps->vui.video_format = 5; sps->vui.colour_primaries = 2; sps->vui.transfer_characteristics = 2; sps->vui.matrix_coefficients = 2; /* now read values if possible */ sps->vui.video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->vui.video_signal_type_present_flag) { sps->vui.video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->vui.video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); sps->vui.colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"); if (sps->vui.colour_description_present_flag) { sps->vui.colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->vui.transfer_characteristics = gf_bs_read_int_log(bs, 8, "transfer_characteristics"); sps->vui.matrix_coefficients = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if (gf_bs_read_int_log(bs, 1, "chroma_location_info_present_flag")) { gf_bs_read_ue_log(bs, "chroma_sample_location_type_top_field"); gf_bs_read_ue_log(bs, "chroma_sample_location_type_bottom_field"); } sps->vui.timing_info_present_flag = gf_bs_read_int_log(bs, 1, "timing_info_present_flag"); if (sps->vui.timing_info_present_flag) { sps->vui.num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->vui.time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->vui.fixed_frame_rate_flag = gf_bs_read_int_log(bs, 1, "fixed_frame_rate_flag"); } sps->vui.nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "nal_hrd_parameters_present_flag"); if (sps->vui.nal_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); sps->vui.vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vcl_hrd_parameters_present_flag"); if (sps->vui.vcl_hrd_parameters_present_flag) avc_parse_hrd_parameters(bs, &sps->vui.hrd); if (sps->vui.nal_hrd_parameters_present_flag || sps->vui.vcl_hrd_parameters_present_flag) sps->vui.low_delay_hrd_flag = gf_bs_read_int_log(bs, 1, "low_delay_hrd_flag"); sps->vui.pic_struct_present_flag = gf_bs_read_int_log(bs, 1, "pic_struct_present_flag"); } /*end of seq_parameter_set_data*/ if (subseq_sps) { if ((profile_idc == 83) || (profile_idc == 86)) { u8 extended_spatial_scalability_idc; /*parsing seq_parameter_set_svc_extension*/ gf_bs_read_int_log(bs, 1, "inter_layer_deblocking_filter_control_present_flag"); extended_spatial_scalability_idc = gf_bs_read_int_log(bs, 2, "extended_spatial_scalability_idc"); if (sps->ChromaArrayType == 1 || sps->ChromaArrayType == 2) { gf_bs_read_int_log(bs, 1, "chroma_phase_x_plus1_flag"); } if (sps->ChromaArrayType == 1) { gf_bs_read_int_log(bs, 2, "chroma_phase_y_plus1"); } if (extended_spatial_scalability_idc == 1) { if (sps->ChromaArrayType > 0) { gf_bs_read_int_log(bs, 1, "seq_ref_layer_chroma_phase_x_plus1_flag"); gf_bs_read_int_log(bs, 2, "seq_ref_layer_chroma_phase_y_plus1"); } gf_bs_read_se_log(bs, "seq_scaled_ref_layer_left_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_top_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_right_offset"); gf_bs_read_se_log(bs, "seq_scaled_ref_layer_bottom_offset"); } if (gf_bs_read_int_log(bs, 1, "seq_tcoeff_level_prediction_flag")) { gf_bs_read_int_log(bs, 1, "adaptive_tcoeff_level_prediction_flag"); } gf_bs_read_int_log(bs, 1, "slice_header_restriction_flag"); if (gf_bs_read_int_log(bs, 1, "svc_vui_parameters_present")) { u32 vui_ext_num_entries_minus1 = gf_bs_read_ue_log(bs, "vui_ext_num_entries_minus1"); for (i = 0; i <= vui_ext_num_entries_minus1; i++) { u8 vui_ext_nal_hrd_parameters_present_flag, vui_ext_vcl_hrd_parameters_present_flag, vui_ext_timing_info_present_flag; gf_bs_read_int_log(bs, 3, "vui_ext_dependency_id"); gf_bs_read_int_log(bs, 4, "vui_ext_quality_id"); gf_bs_read_int_log(bs, 3, "vui_ext_temporal_id"); vui_ext_timing_info_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_timing_info_present_flag"); if (vui_ext_timing_info_present_flag) { gf_bs_read_int_log(bs, 32, "vui_ext_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vui_ext_time_scale"); gf_bs_read_int_log(bs, 1, "vui_ext_fixed_frame_rate_flag"); } vui_ext_nal_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_nal_hrd_parameters_present_flag"); if (vui_ext_nal_hrd_parameters_present_flag) { //hrd_parameters( ) } vui_ext_vcl_hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_ext_vcl_hrd_parameters_present_flag"); if (vui_ext_vcl_hrd_parameters_present_flag) { //hrd_parameters( ) } if (vui_ext_nal_hrd_parameters_present_flag || vui_ext_vcl_hrd_parameters_present_flag) { gf_bs_read_int_log(bs, 1, "vui_ext_low_delay_hrd_flag"); } gf_bs_read_int_log(bs, 1, "vui_ext_pic_struct_present_flag"); } } } else if ((profile_idc == 118) || (profile_idc == 128)) { GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[avc-h264] MVC parsing not implemented - skipping parsing end of Subset SPS\n")); return sps_id; } if (gf_bs_read_int_log(bs, 1, "additional_extension2")) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] skipping parsing end of Subset SPS (additional_extension2)\n")); return sps_id; } } return sps_id; } GF_EXPORT s32 gf_avc_read_sps_bs(GF_BitStream *bs, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { return gf_avc_read_sps_bs_internal(bs, avc, subseq_sps, vui_flag_pos, 0); } GF_EXPORT s32 gf_avc_read_sps(const u8 *sps_data, u32 sps_size, AVCState *avc, u32 subseq_sps, u32 *vui_flag_pos) { s32 sps_id = -1; GF_BitStream *bs; char *sps_data_without_emulation_bytes = NULL; u32 sps_data_without_emulation_bytes_size = 0; if (vui_flag_pos) { /*SPS still contains emulation bytes*/ sps_data_without_emulation_bytes = gf_malloc(sps_size * sizeof(char)); sps_data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(sps_data, sps_data_without_emulation_bytes, sps_size); bs = gf_bs_new(sps_data_without_emulation_bytes, sps_data_without_emulation_bytes_size, GF_BITSTREAM_READ); *vui_flag_pos = 0; } else { bs = gf_bs_new(sps_data, sps_size, GF_BITSTREAM_READ); } if (!bs) { sps_id = -1; goto exit; } sps_id = gf_avc_read_sps_bs(bs, avc, subseq_sps, vui_flag_pos); exit: gf_bs_del(bs); if (sps_data_without_emulation_bytes) gf_free(sps_data_without_emulation_bytes); return sps_id; } static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 255)) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 32)) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; } GF_EXPORT s32 gf_avc_read_pps_bs(GF_BitStream *bs, AVCState *avc) { return gf_avc_read_pps_bs_internal(bs, avc, 0); } GF_EXPORT s32 gf_avc_read_pps(const u8 *pps_data, u32 pps_size, AVCState *avc) { GF_BitStream *bs; s32 pps_id; /*PPS still contains emulation bytes*/ bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { return -1; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); pps_id = gf_avc_read_pps_bs(bs, avc); gf_bs_del(bs); return pps_id; } #if 0 //unused s32 gf_avc_read_sps_ext(const char *spse_data, u32 spse_size) { GF_BitStream *bs; s32 sps_id; bs = gf_bs_new(spse_data, spse_size, GF_BITSTREAM_READ); sps_id = gf_avc_read_sps_ext_bs(bs); gf_bs_del(bs); return sps_id; } #endif static s32 SVC_ReadNal_header_extension(GF_BitStream *bs, SVC_NALUHeader *NalHeader) { gf_bs_read_int_log(bs, 1, "reserved_one_bit"); NalHeader->idr_pic_flag = gf_bs_read_int_log(bs, 1, "idr_flag"); NalHeader->priority_id = gf_bs_read_int_log(bs, 6, "priority_id"); gf_bs_read_int_log(bs, 1, "no_inter_layer_pred_flag"); NalHeader->dependency_id = gf_bs_read_int_log(bs, 3, "DependencyId"); NalHeader->quality_id = gf_bs_read_int_log(bs, 4, "quality_id"); NalHeader->temporal_id = gf_bs_read_int_log(bs, 3, "temporal_id"); gf_bs_read_int_log(bs, 1, "use_ref_base_pic_flag"); gf_bs_read_int_log(bs, 1, "discardable_flag"); gf_bs_read_int_log(bs, 1, "output_flag"); gf_bs_read_int_log(bs, 2, "reserved_three_2bits"); return 1; } static void ref_pic_list_modification(GF_BitStream *bs, u32 slice_type) { if (slice_type % 5 != 2 && slice_type % 5 != 4) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } if (slice_type % 5 == 1) { if (gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1")) { u32 idx=0, modification_of_pic_nums_idc; do { modification_of_pic_nums_idc = gf_bs_read_ue_log_idx(bs, "modification_of_pic_nums_idc", idx); if (modification_of_pic_nums_idc == 0 || modification_of_pic_nums_idc == 1) { gf_bs_read_ue_log_idx(bs, "abs_diff_pic_num_minus1", idx); } else if (modification_of_pic_nums_idc == 2) { gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); } idx++; } while ((modification_of_pic_nums_idc != 3) && gf_bs_available(bs)); } } } static void pred_weight_table(GF_BitStream *bs, u32 slice_type, u32 ChromaArrayType, u32 num_ref_idx_l0_active_minus1, u32 num_ref_idx_l1_active_minus1) { u32 i, j; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) { gf_bs_read_ue_log(bs, "chroma_log2_weight_denom"); } for (i = 0; i <= num_ref_idx_l0_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l0_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l0_flag", i)) for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l0", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l0", i, j); } } } if (slice_type % 5 == 1) { for (i = 0; i <= num_ref_idx_l1_active_minus1; i++) { if (gf_bs_read_int_log_idx(bs, 1, "luma_weight_l1_flag", i)) { gf_bs_read_se_log_idx(bs, "luma_weight_l1", i); gf_bs_read_se_log_idx(bs, "luma_offset_l1", i); } if (ChromaArrayType != 0) { if (gf_bs_read_int_log_idx(bs, 1, "chroma_weight_l1_flag", i)) { for (j = 0; j < 2; j++) { gf_bs_read_se_log_idx2(bs, "chroma_weight_l1", i, j); gf_bs_read_se_log_idx2(bs, "chroma_offset_l1", i, j); } } } } } } static void dec_ref_pic_marking(GF_BitStream *bs, Bool IdrPicFlag) { if (IdrPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); gf_bs_read_int_log(bs, 1, "long_term_reference_flag"); } else { if (gf_bs_read_int_log(bs, 1, "adaptive_ref_pic_marking_mode_flag")) { u32 idx=0, memory_management_control_operation; do { memory_management_control_operation = gf_bs_read_ue_log_idx(bs, "memory_management_control_operation", idx); if (memory_management_control_operation == 1 || memory_management_control_operation == 3) gf_bs_read_ue_log_idx(bs, "difference_of_pic_nums_minus1", idx); if (memory_management_control_operation == 2) gf_bs_read_ue_log_idx(bs, "long_term_pic_num", idx); if (memory_management_control_operation == 3 || memory_management_control_operation == 6) gf_bs_read_ue_log_idx(bs, "long_term_frame_idx", idx); if (memory_management_control_operation == 4) gf_bs_read_ue_log_idx(bs, "max_long_term_frame_idx_plus1", idx); idx++; } while (memory_management_control_operation != 0); } } } static s32 avc_parse_slice(GF_BitStream *bs, AVCState *avc, Bool svc_idr_flag, AVCSliceInfo *si) { s32 pps_id, num_ref_idx_l0_active_minus1 = 0, num_ref_idx_l1_active_minus1 = 0; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id]; if (!si->sps->log2_max_frame_num) return -2; avc->sps_active_idx = si->pps->sps_id; avc->pps_active_idx = pps_id; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; si->bottom_field_flag = 0; if (!si->sps->frame_mbs_only_flag) { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if ((si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) || svc_idr_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "poc_lsb"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } if (si->slice_type % 5 == GF_AVC_TYPE_B) { gf_bs_read_int_log(bs, 1, "direct_spatial_mv_pred_flag"); } num_ref_idx_l0_active_minus1 = si->pps->num_ref_idx_l0_default_active_minus1; num_ref_idx_l1_active_minus1 = si->pps->num_ref_idx_l1_default_active_minus1; if (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_B) { Bool num_ref_idx_active_override_flag = gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag"); if (num_ref_idx_active_override_flag) { num_ref_idx_l0_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_active_minus1"); if (si->slice_type % 5 == GF_AVC_TYPE_B) { num_ref_idx_l1_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_active_minus1"); } } } if (si->nal_unit_type == 20 || si->nal_unit_type == 21) { //ref_pic_list_mvc_modification(); /* specified in Annex H */ GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] unimplemented ref_pic_list_mvc_modification() in slide header\n")); assert(0); return -1; } else { ref_pic_list_modification(bs, si->slice_type); } if ((si->pps->weighted_pred_flag && (si->slice_type % 5 == GF_AVC_TYPE_P || si->slice_type % 5 == GF_AVC_TYPE_SP)) || (si->pps->weighted_bipred_idc == 1 && si->slice_type % 5 == GF_AVC_TYPE_B)) { pred_weight_table(bs, si->slice_type, si->sps->ChromaArrayType, num_ref_idx_l0_active_minus1, num_ref_idx_l1_active_minus1); } if (si->nal_ref_idc != 0) { dec_ref_pic_marking(bs, (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE)); } if (si->pps->entropy_coding_mode_flag && si->slice_type % 5 != GF_AVC_TYPE_I && si->slice_type % 5 != GF_AVC_TYPE_SI) { gf_bs_read_ue_log(bs, "cabac_init_idc"); } /*slice_qp_delta = */gf_bs_read_se(bs); if (si->slice_type % 5 == GF_AVC_TYPE_SP || si->slice_type % 5 == GF_AVC_TYPE_SI) { if (si->slice_type % 5 == GF_AVC_TYPE_SP) { gf_bs_read_int_log(bs, 1, "sp_for_switch_flag"); } gf_bs_read_se_log(bs, "slice_qs_delta"); } if (si->pps->deblocking_filter_control_present_flag) { if (gf_bs_read_ue_log(bs, "disable_deblocking_filter_idc") != 1) { gf_bs_read_se_log(bs, "slice_alpha_c0_offset_div2"); gf_bs_read_se_log(bs, "slice_beta_offset_div2"); } } if (si->pps->slice_group_count > 1 && si->pps->mb_slice_group_map_type >= 3 && si->pps->mb_slice_group_map_type <= 5) { gf_bs_read_int_log(bs, (u32)ceil(log1p((si->pps->pic_size_in_map_units_minus1 + 1) / (si->pps->slice_group_change_rate_minus1 + 1) ) / log(2)), "slice_group_change_cycle"); } return 0; } static s32 svc_parse_slice(GF_BitStream *bs, AVCState *avc, AVCSliceInfo *si) { s32 pps_id; /*s->current_picture.reference= h->nal_ref_idc != 0;*/ gf_bs_read_ue_log(bs, "first_mb_in_slice"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (si->slice_type > 9) return -1; pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id > 255) return -1; si->pps = &avc->pps[pps_id]; si->pps->id = pps_id; if (!si->pps->slice_group_count) return -2; si->sps = &avc->sps[si->pps->sps_id + GF_SVC_SSPS_ID_SHIFT]; if (!si->sps->log2_max_frame_num) return -2; si->frame_num = gf_bs_read_int_log(bs, si->sps->log2_max_frame_num, "frame_num"); si->field_pic_flag = 0; if (si->sps->frame_mbs_only_flag) { /*s->picture_structure= PICT_FRAME;*/ } else { si->field_pic_flag = gf_bs_read_int_log(bs, 1, "field_pic_flag"); if (si->field_pic_flag) si->bottom_field_flag = gf_bs_read_int_log(bs, 1, "bottom_field_flag"); } if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE || si->NalHeader.idr_pic_flag) si->idr_pic_id = gf_bs_read_ue_log(bs, "idr_pic_id"); if (si->sps->poc_type == 0) { si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); if (si->pps->pic_order_present && !si->field_pic_flag) { si->delta_poc_bottom = gf_bs_read_se_log(bs, "delta_poc_bottom"); } } else if ((si->sps->poc_type == 1) && !si->sps->delta_pic_order_always_zero_flag) { si->delta_poc[0] = gf_bs_read_se_log(bs, "delta_poc0"); if ((si->pps->pic_order_present == 1) && !si->field_pic_flag) si->delta_poc[1] = gf_bs_read_se_log(bs, "delta_poc1"); } if (si->pps->redundant_pic_cnt_present) { si->redundant_pic_cnt = gf_bs_read_ue_log(bs, "redundant_pic_cnt"); } return 0; } static s32 avc_parse_recovery_point_sei(GF_BitStream *bs, AVCState *avc) { AVCSeiRecoveryPoint *rp = &avc->sei.recovery_point; rp->frame_cnt = gf_bs_read_ue_log(bs, "frame_cnt"); rp->exact_match_flag = gf_bs_read_int_log(bs, 1, "exact_match_flag"); rp->broken_link_flag = gf_bs_read_int_log(bs, 1, "broken_link_flag"); rp->changing_slice_group_idc = gf_bs_read_int_log(bs, 2, "changing_slice_group_idc"); rp->valid = 1; return 0; } /*for interpretation see ISO 14496-10 N.11084, table D-1*/ static s32 avc_parse_pic_timing_sei(GF_BitStream *bs, AVCState *avc) { int sps_id = avc->sps_active_idx; const char NumClockTS[] = { 1, 1, 1, 2, 2, 3, 3, 2, 3 }; AVCSeiPicTiming *pt = &avc->sei.pic_timing; if (sps_id < 0) { /*sps_active_idx equals -1 when no sps has been detected. In this case SEI should not be decoded.*/ assert(0); return 1; } if (avc->sps[sps_id].vui.nal_hrd_parameters_present_flag || avc->sps[sps_id].vui.vcl_hrd_parameters_present_flag) { /*CpbDpbDelaysPresentFlag, see 14496-10(2003) E.11*/ gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.cpb_removal_delay_length_minus1, "cpb_removal_delay_minus1"); gf_bs_read_int_log(bs, 1 + avc->sps[sps_id].vui.hrd.dpb_output_delay_length_minus1, "dpb_output_delay_minus1"); } /*ISO 14496-10 (2003), D.8.2: we need to get pic_struct in order to know if we display top field first or bottom field first*/ if (avc->sps[sps_id].vui.pic_struct_present_flag) { int i; pt->pic_struct = gf_bs_read_int_log(bs, 4, "pic_struct"); if (pt->pic_struct > 8) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[avc-h264] invalid pic_struct value %d\n", pt->pic_struct)); return 1; } for (i = 0; i < NumClockTS[pt->pic_struct]; i++) { if (gf_bs_read_int_log_idx(bs, 1, "clock_timestamp_flag", i)) { Bool full_timestamp_flag; gf_bs_read_int_log_idx(bs, 2, "ct_type", i); gf_bs_read_int_log_idx(bs, 1, "nuit_field_based_flag", i); gf_bs_read_int_log_idx(bs, 5, "counting_type", i); full_timestamp_flag = gf_bs_read_int_log_idx(bs, 1, "full_timestamp_flag", i); gf_bs_read_int_log_idx(bs, 1, "discontinuity_flag", i); gf_bs_read_int_log_idx(bs, 1, "cnt_dropped_flag", i); gf_bs_read_int_log_idx(bs, 8, "n_frames", i); if (full_timestamp_flag) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } else { if (gf_bs_read_int_log_idx(bs, 1, "seconds_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "seconds_value", i); if (gf_bs_read_int_log_idx(bs, 1, "minutes_flag", i)) { gf_bs_read_int_log_idx(bs, 6, "minutes_value", i); if (gf_bs_read_int_log_idx(bs, 1, "hours_flag", i)) { gf_bs_read_int_log_idx(bs, 5, "hours_value", i); } } } if (avc->sps[sps_id].vui.hrd.time_offset_length > 0) gf_bs_read_int_log_idx(bs, avc->sps[sps_id].vui.hrd.time_offset_length, "time_offset", i); } } } } return 0; } #if !defined(GPAC_DISABLE_HEVC) static void avc_parse_itu_t_t35_sei(GF_BitStream* bs, AVCSeiItuTT35DolbyVision *dovi) { u8 itu_t_t35_country_code = gf_bs_read_u8(bs); u16 terminal_provider_code = gf_bs_read_u16(bs); u32 user_id = gf_bs_read_u32(bs); u8 data_type_code = gf_bs_read_u8(bs); if (itu_t_t35_country_code == 0xB5 && terminal_provider_code == 0x31 && user_id == 0x47413934 && (data_type_code == 0x8 || data_type_code == 0x9)) { dovi->rpu_flag = GF_TRUE; } } #endif static void avc_compute_poc(AVCSliceInfo *si) { enum { AVC_PIC_FRAME, AVC_PIC_FIELD_TOP, AVC_PIC_FIELD_BOTTOM, } pic_type; s32 field_poc[2] = { 0,0 }; s32 max_frame_num; if (!si->sps) return; max_frame_num = 1 << (si->sps->log2_max_frame_num); /* picture type */ if (si->sps->frame_mbs_only_flag || !si->field_pic_flag) pic_type = AVC_PIC_FRAME; else if (si->bottom_field_flag) pic_type = AVC_PIC_FIELD_BOTTOM; else pic_type = AVC_PIC_FIELD_TOP; /* frame_num_offset */ if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; si->frame_num_offset = 0; } else { if (si->frame_num < si->frame_num_prev) si->frame_num_offset = si->frame_num_offset_prev + max_frame_num; else si->frame_num_offset = si->frame_num_offset_prev; } /*ISO 14496-10 N.11084 8.2.1.1*/ if (si->sps->poc_type == 0) { const u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*ISO 14496-10 N.11084 eq (8-3)*/ if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; /*ISO 14496-10 N.11084 eq (8-4)*/ if (pic_type != AVC_PIC_FIELD_BOTTOM) field_poc[0] = si->poc_msb + si->poc_lsb; /*ISO 14496-10 N.11084 eq (8-5)*/ if (pic_type != AVC_PIC_FIELD_TOP) { if (!si->field_pic_flag) field_poc[1] = field_poc[0] + si->delta_poc_bottom; else field_poc[1] = si->poc_msb + si->poc_lsb; } } /*ISO 14496-10 N.11084 8.2.1.2*/ else if (si->sps->poc_type == 1) { u32 i; s32 abs_frame_num, expected_delta_per_poc_cycle, expected_poc; if (si->sps->poc_cycle_length) abs_frame_num = si->frame_num_offset + si->frame_num; else abs_frame_num = 0; if (!si->nal_ref_idc && (abs_frame_num > 0)) abs_frame_num--; expected_delta_per_poc_cycle = 0; for (i = 0; i < si->sps->poc_cycle_length; i++) expected_delta_per_poc_cycle += si->sps->offset_for_ref_frame[i]; if (abs_frame_num > 0) { const u32 poc_cycle_cnt = (abs_frame_num - 1) / si->sps->poc_cycle_length; const u32 frame_num_in_poc_cycle = (abs_frame_num - 1) % si->sps->poc_cycle_length; expected_poc = poc_cycle_cnt * expected_delta_per_poc_cycle; for (i = 0; i <= frame_num_in_poc_cycle; i++) expected_poc += si->sps->offset_for_ref_frame[i]; } else { expected_poc = 0; } if (!si->nal_ref_idc) expected_poc += si->sps->offset_for_non_ref_pic; field_poc[0] = expected_poc + si->delta_poc[0]; field_poc[1] = field_poc[0] + si->sps->offset_for_top_to_bottom_field; if (pic_type == AVC_PIC_FRAME) field_poc[1] += si->delta_poc[1]; } /*ISO 14496-10 N.11084 8.2.1.3*/ else if (si->sps->poc_type == 2) { int poc; if (si->nal_unit_type == GF_AVC_NALU_IDR_SLICE) { poc = 0; } else { const int abs_frame_num = si->frame_num_offset + si->frame_num; poc = 2 * abs_frame_num; if (!si->nal_ref_idc) poc -= 1; } field_poc[0] = poc; field_poc[1] = poc; } /*ISO 14496-10 N.11084 eq (8-1)*/ if (pic_type == AVC_PIC_FRAME) si->poc = MIN(field_poc[0], field_poc[1]); else if (pic_type == AVC_PIC_FIELD_TOP) si->poc = field_poc[0]; else si->poc = field_poc[1]; } GF_EXPORT s32 gf_avc_parse_nalu(GF_BitStream *bs, AVCState *avc) { u8 idr_flag; s32 slice, ret; u32 nal_hdr; AVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nal_hdr = gf_bs_read_u8(bs); slice = 0; memcpy(&n_state, &avc->s_info, sizeof(AVCSliceInfo)); avc->last_nal_type_parsed = n_state.nal_unit_type = nal_hdr & 0x1F; n_state.nal_ref_idc = (nal_hdr >> 5) & 0x3; idr_flag = 0; switch (n_state.nal_unit_type) { case GF_AVC_NALU_ACCESS_UNIT: case GF_AVC_NALU_END_OF_SEQ: case GF_AVC_NALU_END_OF_STREAM: ret = 1; break; case GF_AVC_NALU_SVC_SLICE: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); // slice buffer - read the info and compare. /*ret = */svc_parse_slice(bs, avc, &n_state); if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } avc_compute_poc(&n_state); if (avc->s_info.poc != n_state.poc) { memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 1; } memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return 0; case GF_AVC_NALU_SVC_PREFIX_NALU: SVC_ReadNal_header_extension(bs, &n_state.NalHeader); return 0; case GF_AVC_NALU_IDR_SLICE: case GF_AVC_NALU_NON_IDR_SLICE: case GF_AVC_NALU_DP_A_SLICE: case GF_AVC_NALU_DP_B_SLICE: case GF_AVC_NALU_DP_C_SLICE: slice = 1; /* slice buffer - read the info and compare.*/ ret = avc_parse_slice(bs, avc, idr_flag, &n_state); if (ret < 0) return ret; ret = 0; if ( ((avc->s_info.nal_unit_type > GF_AVC_NALU_IDR_SLICE) || (avc->s_info.nal_unit_type < GF_AVC_NALU_NON_IDR_SLICE)) && (avc->s_info.nal_unit_type != GF_AVC_NALU_SVC_SLICE) ) { break; } if (avc->s_info.frame_num != n_state.frame_num) { ret = 1; break; } if (avc->s_info.field_pic_flag != n_state.field_pic_flag) { ret = 1; break; } if ((avc->s_info.nal_ref_idc != n_state.nal_ref_idc) && (!avc->s_info.nal_ref_idc || !n_state.nal_ref_idc)) { ret = 1; break; } assert(avc->s_info.sps); if (avc->s_info.sps->poc_type == n_state.sps->poc_type) { if (!avc->s_info.sps->poc_type) { if (!n_state.bottom_field_flag && (avc->s_info.poc_lsb != n_state.poc_lsb)) { ret = 1; break; } if (avc->s_info.delta_poc_bottom != n_state.delta_poc_bottom) { ret = 1; break; } } else if (avc->s_info.sps->poc_type == 1) { if (avc->s_info.delta_poc[0] != n_state.delta_poc[0]) { ret = 1; break; } if (avc->s_info.delta_poc[1] != n_state.delta_poc[1]) { ret = 1; break; } } } if (n_state.nal_unit_type == GF_AVC_NALU_IDR_SLICE) { if (avc->s_info.nal_unit_type != GF_AVC_NALU_IDR_SLICE) { /*IdrPicFlag differs in value*/ ret = 1; break; } else if (avc->s_info.idr_pic_id != n_state.idr_pic_id) { /*both IDR and idr_pic_id differs*/ ret = 1; break; } } break; case GF_AVC_NALU_SEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 0, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_PIC_PARAM: avc->last_ps_idx = gf_avc_read_pps_bs_internal(bs, avc, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SVC_SUBSEQ_PARAM: avc->last_ps_idx = gf_avc_read_sps_bs_internal(bs, avc, 1, NULL, nal_hdr); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEQ_PARAM_EXT: avc->last_ps_idx = (s32) gf_bs_read_ue(bs); if (avc->last_ps_idx < 0) return -1; return 0; case GF_AVC_NALU_SEI: case GF_AVC_NALU_FILLER_DATA: return 0; default: if (avc->s_info.nal_unit_type <= GF_AVC_NALU_IDR_SLICE) ret = 1; //To detect change of AU when multiple sps and pps in stream else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEI && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else if ((nal_hdr & 0x1F) == GF_AVC_NALU_SEQ_PARAM && avc->s_info.nal_unit_type == GF_AVC_NALU_SVC_SLICE) ret = 1; else ret = 0; break; } /* save _prev values */ if (ret && avc->s_info.sps) { n_state.frame_num_offset_prev = avc->s_info.frame_num_offset; if ((avc->s_info.sps->poc_type != 2) || (avc->s_info.nal_ref_idc != 0)) n_state.frame_num_prev = avc->s_info.frame_num; if (avc->s_info.nal_ref_idc) { n_state.poc_lsb_prev = avc->s_info.poc_lsb; n_state.poc_msb_prev = avc->s_info.poc_msb; } } if (slice) avc_compute_poc(&n_state); memcpy(&avc->s_info, &n_state, sizeof(AVCSliceInfo)); return ret; } u32 gf_media_avc_reformat_sei(u8 *buffer, u32 nal_size, Bool isobmf_rewrite, AVCState *avc) { u32 ptype, psize, hdr, var; u32 start; GF_BitStream *bs; GF_BitStream *bs_dest = NULL; u8 nhdr; Bool sei_removed = GF_FALSE; char store; hdr = buffer[0]; if ((hdr & 0x1F) != GF_AVC_NALU_SEI) return 0; if (isobmf_rewrite) bs_dest = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); nhdr = gf_bs_read_int(bs, 8); if (bs_dest) gf_bs_write_int(bs_dest, nhdr, 8); /*parse SEI*/ while (gf_bs_available(bs)) { Bool do_copy; ptype = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); ptype += v; if (v != 0xFF) break; } psize = 0; while (1) { u8 v = gf_bs_read_int(bs, 8); psize += v; if (v != 0xFF) break; } start = (u32)gf_bs_get_position(bs); do_copy = 1; if (start + psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message type %d size error (%d but %d remain), keeping full SEI untouched\n", ptype, psize, nal_size - start)); if (bs_dest) gf_bs_del(bs_dest); return nal_size; } switch (ptype) { /*remove SEI messages forbidden in MP4*/ case 3: /*filler data*/ case 10: /*sub_seq info*/ case 11: /*sub_seq_layer char*/ case 12: /*sub_seq char*/ do_copy = 0; sei_removed = GF_TRUE; break; case 5: /*user unregistered */ store = buffer[start + psize]; buffer[start + psize] = 0; GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[avc-h264] SEI user message %s\n", buffer + start + 16)); buffer[start + psize] = store; break; case 6: /*recovery point*/ avc_parse_recovery_point_sei(bs, avc); break; case 1: /*pic_timing*/ avc_parse_pic_timing_sei(bs, avc); break; case 0: /*buffering period*/ case 2: /*pan scan rect*/ case 4: /*user registered ITU t35*/ case 7: /*def_rec_pic_marking_repetition*/ case 8: /*spare_pic*/ case 9: /*scene info*/ case 13: /*full frame freeze*/ case 14: /*full frame freeze release*/ case 15: /*full frame snapshot*/ case 16: /*progressive refinement segment start*/ case 17: /*progressive refinement segment end*/ case 18: /*motion constrained slice group*/ default: /*add all unknown SEIs*/ break; } if (do_copy && bs_dest) { var = ptype; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); var = psize; while (var >= 255) { gf_bs_write_int(bs_dest, 0xFF, 8); var -= 255; } gf_bs_write_int(bs_dest, var, 8); gf_bs_seek(bs, start); //bs_read_data does not skip EPB, read byte per byte var = psize; while (var) { gf_bs_write_u8(bs_dest, gf_bs_read_u8(bs)); var--; } } else { gf_bs_seek(bs, start); //bs_skip_bytes does not skip EPB, skip byte per byte while (psize) { gf_bs_read_u8(bs); psize--; } } if (gf_bs_available(bs) <= 2) { var = gf_bs_read_int(bs, 8); if (var != 0x80) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[avc-h264] SEI user message has less than 2 bytes remaining but no end of sei found\n")); } if (bs_dest) gf_bs_write_int(bs_dest, 0x80, 8); break; } } gf_bs_del(bs); //we cannot compare final size and original size since original may have EPB and final does not yet have them if (bs_dest && sei_removed) { u8 *dst_no_epb = NULL; u32 dst_no_epb_size = 0; gf_bs_get_content(bs_dest, &dst_no_epb, &dst_no_epb_size); nal_size = gf_media_nalu_add_emulation_bytes(buffer, dst_no_epb, dst_no_epb_size); } if (bs_dest) gf_bs_del(bs_dest); return nal_size; } static u8 avc_hevc_get_sar_idx(u32 w, u32 h) { u32 i, count = GF_ARRAY_LENGTH(avc_hevc_sar); for (i = 0; i < count; i++) { if ((avc_hevc_sar[i].w == w) && (avc_hevc_sar[i].h == h)) return i; } return 0xFF; } static void avc_hevc_rewrite_vui(GF_VUIInfo *vui_info, GF_BitStream *orig, GF_BitStream *mod) { /* VUI present flag*/ Bool vui_present_flag = gf_bs_read_int(orig, 1); /*setup default values*/ Bool aspect_ratio_info_present_flag = 0; s32 aspect_ratio_idc = -1; u32 ar_n=0, ar_d=0; Bool overscan_info_present_flag = 0; u32 overscan_info=0; u32 video_signal_type_present_flag=0; u32 video_format = 5; u32 video_full_range_flag = 0; u32 colour_description_present_flag = 0; u32 colour_primaries = 2; u32 transfer_characteristics = 2; u32 matrix_coefficients = 2; //if VUI is present, read all SAR and overscan values if (vui_present_flag) { /* VUI found in input bitstream */ aspect_ratio_info_present_flag = gf_bs_read_int(orig, 1); if (aspect_ratio_info_present_flag) { aspect_ratio_idc = gf_bs_read_int(orig, 8); /*aspect_ratio_idc*/ if (aspect_ratio_idc == 255) { ar_n = gf_bs_read_int(orig, 16); /*sar_width*/ ar_d = gf_bs_read_int(orig, 16); /*sar_height*/ } } /*overscan_info_present_flag */ overscan_info_present_flag = gf_bs_read_int(orig, 1); if(overscan_info_present_flag) { overscan_info = gf_bs_read_int(orig, 1); } /* read all video signal related flags first */ video_signal_type_present_flag = gf_bs_read_int(orig, 1); if(video_signal_type_present_flag) { video_format = gf_bs_read_int(orig, 3); video_full_range_flag = gf_bs_read_int(orig, 1); colour_description_present_flag = gf_bs_read_int(orig, 1); if(colour_description_present_flag) { colour_primaries = gf_bs_read_int(orig, 8); transfer_characteristics = gf_bs_read_int(orig, 8); matrix_coefficients = gf_bs_read_int(orig, 8); } } } //recompute values //no change if ((vui_info->ar_num<0) || (vui_info->ar_den<0)) { } //remove par else if ((vui_info->ar_num==0) || (vui_info->ar_den==0)) { aspect_ratio_info_present_flag = 0; } //set par else { aspect_ratio_info_present_flag = 1; ar_n = vui_info->ar_num; ar_d = vui_info->ar_den; aspect_ratio_idc = avc_hevc_get_sar_idx((u32) ar_n, (u32) ar_d); } if (vui_info->remove_video_info) { video_signal_type_present_flag = 0; } /* correct the values of each flags */ else if ((vui_info->fullrange==0) && (vui_info->video_format==5) && (vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { video_signal_type_present_flag = 0; /* all default, nothing to write*/ } else { video_signal_type_present_flag = 1; video_format = (vui_info->video_format < 0) ? video_format : vui_info->video_format; video_full_range_flag = (vui_info->fullrange < 0) ? video_full_range_flag : vui_info->fullrange; if ((vui_info->color_prim==2) && (vui_info->color_tfc==2) && (vui_info->color_matrix==2)) { colour_description_present_flag = 0; } else { colour_description_present_flag = 1; colour_primaries = (vui_info->color_prim < 0) ? colour_primaries : vui_info->color_prim; transfer_characteristics = (vui_info->color_tfc < 0) ? transfer_characteristics : vui_info->color_tfc; matrix_coefficients = (vui_info->color_matrix < 0) ? matrix_coefficients : vui_info->color_matrix; } if ((colour_primaries==2) && (transfer_characteristics==2) && (matrix_coefficients==2)) { colour_description_present_flag = 0; if ((video_format==5) && (video_full_range_flag==0)) video_signal_type_present_flag = 0; } } //always rewrite VUI gf_bs_write_int(mod, 1, 1); gf_bs_write_int(mod, aspect_ratio_info_present_flag, 1); if (aspect_ratio_info_present_flag) { gf_bs_write_int(mod, aspect_ratio_idc, 8); if (aspect_ratio_idc == 255) { gf_bs_write_int(mod, ar_n, 16); gf_bs_write_int(mod, ar_d, 16); } if (vui_info->update) { vui_info->ar_num = ar_n; vui_info->ar_den = ar_d; } } gf_bs_write_int(mod, overscan_info_present_flag, 1); if (overscan_info_present_flag) { gf_bs_write_int(mod, overscan_info, 1); } gf_bs_write_int(mod, video_signal_type_present_flag, 1); if (video_signal_type_present_flag) { gf_bs_write_int(mod, video_format, 3); gf_bs_write_int(mod, video_full_range_flag, 1); gf_bs_write_int(mod, colour_description_present_flag, 1); if (colour_description_present_flag) { gf_bs_write_int(mod, colour_primaries, 8); gf_bs_write_int(mod, transfer_characteristics, 8); gf_bs_write_int(mod, matrix_coefficients, 8); } if (vui_info->update) { vui_info->video_format = video_format; vui_info->fullrange = video_full_range_flag; if (colour_description_present_flag) { vui_info->color_prim = colour_primaries; vui_info->color_tfc = transfer_characteristics; vui_info->color_matrix = matrix_coefficients; } } } /*no VUI in input bitstream but we just inserted one, set all remaining vui flags to 0*/ if (!vui_present_flag) { gf_bs_write_int(mod, 0, 1); /*chroma_location_info_present_flag */ gf_bs_write_int(mod, 0, 1); /*timing_info_present_flag*/ gf_bs_write_int(mod, 0, 1); /*nal_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*vcl_hrd_parameters_present*/ gf_bs_write_int(mod, 0, 1); /*pic_struct_present*/ gf_bs_write_int(mod, 0, 1); /*bitstream_restriction*/ } /*otherwise we copy over th bits from the input bitrate*/ } GF_Err gf_avc_change_vui(GF_AVCConfig *avcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; AVCState avc; u32 i, bit_offset, flag; s32 idx; GF_AVCConfigSlot *slc; orig = NULL; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; i=0; while ((slc = (GF_AVCConfigSlot *)gf_list_enum(avcc->sequenceParameterSets, &i))) { u8 *no_emulation_buf = NULL; u32 no_emulation_buf_size = 0, emulation_bytes = 0; idx = gf_avc_read_sps(slc->data, slc->size, &avc, 0, &bit_offset); if (idx<0) { if ( orig ) gf_bs_del(orig); continue; } /*SPS still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size - 1) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data + 1, no_emulation_buf, slc->size - 1); orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); gf_bs_read_data(orig, no_emulation_buf, no_emulation_buf_size); gf_bs_seek(orig, 0); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 8); while (bit_offset - 8/*bit_offset doesn't take care of the first byte (NALU type)*/) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &flag); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, flag); if (flag+emulation_bytes+1>slc->size) slc->data = (char*)gf_realloc(slc->data, flag+emulation_bytes+1); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data + 1, flag) + 1; gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_media_avc_change_par(GF_AVCConfig *avcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_media_avc_change_color(GF_AVCConfig *avcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_avc_change_vui(avcc, &vuii); } GF_EXPORT GF_Err gf_avc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { AVCState avc; s32 idx; memset(&avc, 0, sizeof(AVCState)); avc.sps_active_idx = -1; idx = gf_avc_read_sps(sps_data, sps_size, &avc, 0, NULL); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = avc.sps[idx].width; if (height) *height = avc.sps[idx].height; if (par_n) *par_n = avc.sps[idx].vui.par_num ? avc.sps[idx].vui.par_num : (u32)-1; if (par_d) *par_d = avc.sps[idx].vui.par_den ? avc.sps[idx].vui.par_den : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_avc_get_pps_info(u8 *pps_data, u32 pps_size, u32 *pps_id, u32 *sps_id) { GF_BitStream *bs; GF_Err e = GF_OK; bs = gf_bs_new(pps_data, pps_size, GF_BITSTREAM_READ); if (!bs) { e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); /*nal hdr*/ gf_bs_read_int(bs, 8); *pps_id = gf_bs_read_ue(bs); *sps_id = gf_bs_read_ue(bs); exit: gf_bs_del(bs); return e; } #ifndef GPAC_DISABLE_HEVC /********** HEVC parsing **********/ Bool gf_hevc_slice_is_intra(HEVCState *hevc) { switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: return GF_TRUE; default: return GF_FALSE; } } Bool gf_hevc_slice_is_IDR(HEVCState *hevc) { if (hevc->sei.recovery_point.valid) { hevc->sei.recovery_point.valid = 0; return GF_TRUE; } switch (hevc->s_info.nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: return GF_TRUE; default: return GF_FALSE; } } static Bool hevc_parse_short_term_ref_pic_set(GF_BitStream *bs, HEVC_SPS *sps, u32 idx_rps) { u32 i; Bool inter_ref_pic_set_prediction_flag = 0; if (idx_rps != 0) inter_ref_pic_set_prediction_flag = gf_bs_read_int_log_idx(bs, 1, "inter_ref_pic_set_prediction_flag", idx_rps); if (inter_ref_pic_set_prediction_flag) { HEVC_ReferencePictureSets *ref_ps, *rps; u32 delta_idx_minus1 = 0; u32 ref_idx; u32 delta_rps_sign; u32 abs_delta_rps_minus1, nb_ref_pics; s32 deltaRPS; u32 k = 0, k0 = 0, k1 = 0; if (idx_rps == sps->num_short_term_ref_pic_sets) delta_idx_minus1 = gf_bs_read_ue_log_idx(bs, "delta_idx_minus1", idx_rps); assert(delta_idx_minus1 <= idx_rps - 1); ref_idx = idx_rps - 1 - delta_idx_minus1; delta_rps_sign = gf_bs_read_int_log_idx(bs, 1, "delta_rps_sign", idx_rps); abs_delta_rps_minus1 = gf_bs_read_ue_log_idx(bs, "abs_delta_rps_minus1", idx_rps); deltaRPS = (1 - (delta_rps_sign << 1)) * (abs_delta_rps_minus1 + 1); rps = &sps->rps[idx_rps]; ref_ps = &sps->rps[ref_idx]; nb_ref_pics = ref_ps->num_negative_pics + ref_ps->num_positive_pics; for (i = 0; i <= nb_ref_pics; i++) { s32 ref_idc; s32 used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag ? 1 : 0; if (!used_by_curr_pic_flag) { used_by_curr_pic_flag = gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_flag", idx_rps, i); ref_idc = used_by_curr_pic_flag << 1; } if ((ref_idc == 1) || (ref_idc == 2)) { s32 deltaPOC = deltaRPS; if (i < nb_ref_pics) deltaPOC += ref_ps->delta_poc[i]; rps->delta_poc[k] = deltaPOC; if (deltaPOC < 0) k0++; else k1++; k++; } } rps->num_negative_pics = k0; rps->num_positive_pics = k1; } else { s32 prev = 0, poc; sps->rps[idx_rps].num_negative_pics = gf_bs_read_ue_log_idx(bs, "num_negative_pics", idx_rps); sps->rps[idx_rps].num_positive_pics = gf_bs_read_ue_log_idx(bs, "num_positive_pics", idx_rps); if (sps->rps[idx_rps].num_negative_pics > 16) return GF_FALSE; if (sps->rps[idx_rps].num_positive_pics > 16) return GF_FALSE; for (i = 0; i < sps->rps[idx_rps].num_negative_pics; i++) { u32 delta_poc_s0_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s0_minus1", idx_rps, i); poc = prev - delta_poc_s0_minus1 - 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "delta_poc_s0_minus1", idx_rps, i); } for (i = 0; i < sps->rps[idx_rps].num_positive_pics; i++) { u32 delta_poc_s1_minus1 = gf_bs_read_ue_log_idx2(bs, "delta_poc_s1_minus1" , idx_rps, i); poc = prev + delta_poc_s1_minus1 + 1; prev = poc; sps->rps[idx_rps].delta_poc[i] = poc; gf_bs_read_int_log_idx2(bs, 1, "used_by_curr_pic_s1_flag", idx_rps, i); } } return GF_TRUE; } void hevc_pred_weight_table(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si, HEVC_PPS *pps, HEVC_SPS *sps, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { u32 i, num_ref_idx; Bool first_pass = GF_TRUE; u8 luma_weights[20], chroma_weights[20]; u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; num_ref_idx = num_ref_idx_l0_active; gf_bs_read_ue_log(bs, "luma_log2_weight_denom"); if (ChromaArrayType != 0) gf_bs_read_se_log(bs, "delta_chroma_log2_weight_denom"); parse_weights: for (i = 0; i < num_ref_idx; i++) { luma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "luma_weights", i); //infered to be 0 if not present chroma_weights[i] = 0; } if (ChromaArrayType != 0) { for (i = 0; i < num_ref_idx; i++) { chroma_weights[i] = gf_bs_read_int_log_idx(bs, 1, "chroma_weights", i); } } for (i = 0; i < num_ref_idx; i++) { if (luma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_luma_weight_l0", i); gf_bs_read_se_log_idx(bs, "luma_offset_l0", i); } if (chroma_weights[i]) { gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_0", i); gf_bs_read_se_log_idx(bs, "delta_chroma_weight_l0_1", i); gf_bs_read_se_log_idx(bs, "delta_chroma_offset_l0_1", i); } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) { if (!first_pass) return; first_pass = GF_FALSE; num_ref_idx = num_ref_idx_l1_active; goto parse_weights; } } static Bool ref_pic_lists_modification(GF_BitStream *bs, u32 slice_type, u32 num_ref_idx_l0_active, u32 num_ref_idx_l1_active) { //u32 i; Bool ref_pic_list_modification_flag_l0 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l0"); if (ref_pic_list_modification_flag_l0) { /*for (i=0; i<num_ref_idx_l0_active; i++) { list_entry_l0[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr())/log(2))); }*/ return GF_FALSE; } if (slice_type == GF_HEVC_SLICE_TYPE_B) { Bool ref_pic_list_modification_flag_l1 = gf_bs_read_int_log(bs, 1, "ref_pic_list_modification_flag_l1"); if (ref_pic_list_modification_flag_l1) { /*for (i=0; i<num_ref_idx_l1_active; i++) { list_entry_l1[i] = *//*gf_bs_read_int(bs, (u32)ceil(log(getNumPicTotalCurr()) / log(2))); }*/ return GF_FALSE; } } return GF_TRUE; } static s32 hevc_parse_slice_segment(GF_BitStream *bs, HEVCState *hevc, HEVCSliceInfo *si) { u32 i, j; u32 num_ref_idx_l0_active = 0, num_ref_idx_l1_active = 0; HEVC_PPS *pps; HEVC_SPS *sps; s32 pps_id; Bool RapPicFlag = GF_FALSE; Bool IDRPicFlag = GF_FALSE; si->first_slice_segment_in_pic_flag = gf_bs_read_int_log(bs, 1, "first_slice_segment_in_pic_flag"); switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: IDRPicFlag = GF_TRUE; RapPicFlag = GF_TRUE; break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_CRA: RapPicFlag = GF_TRUE; break; } if (RapPicFlag) { gf_bs_read_int_log(bs, 1, "no_output_of_prior_pics_flag"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; pps = &hevc->pps[pps_id]; sps = &hevc->sps[pps->sps_id]; si->sps = sps; si->pps = pps; if (!si->first_slice_segment_in_pic_flag && pps->dependent_slice_segments_enabled_flag) { si->dependent_slice_segment_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segment_flag"); } else { si->dependent_slice_segment_flag = GF_FALSE; } if (!si->first_slice_segment_in_pic_flag) { si->slice_segment_address = gf_bs_read_int_log(bs, sps->bitsSliceSegmentAddress, "slice_segment_address"); } else { si->slice_segment_address = 0; } if (!si->dependent_slice_segment_flag) { Bool deblocking_filter_override_flag = 0; Bool slice_temporal_mvp_enabled_flag = 0; Bool slice_sao_luma_flag = 0; Bool slice_sao_chroma_flag = 0; Bool slice_deblocking_filter_disabled_flag = 0; //"slice_reserved_undetermined_flag[]" gf_bs_read_int_log(bs, pps->num_extra_slice_header_bits, "slice_reserved_undetermined_flag"); si->slice_type = gf_bs_read_ue_log(bs, "slice_type"); if (pps->output_flag_present_flag) gf_bs_read_int_log(bs, 1, "pic_output_flag"); if (sps->separate_colour_plane_flag == 1) gf_bs_read_int_log(bs, 2, "colour_plane_id"); if (IDRPicFlag) { si->poc_lsb = 0; //if not asked to parse full header, abort since we know the poc if (!hevc->full_slice_header_parse) return 0; } else { si->poc_lsb = gf_bs_read_int_log(bs, sps->log2_max_pic_order_cnt_lsb, "poc_lsb"); //if not asked to parse full header, abort once we have the poc if (!hevc->full_slice_header_parse) return 0; if (gf_bs_read_int_log(bs, 1, "short_term_ref_pic_set_sps_flag") == 0) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, sps->num_short_term_ref_pic_sets); if (!ret) return -1; } else if (sps->num_short_term_ref_pic_sets > 1) { u32 numbits = 0; while ((u32)(1 << numbits) < sps->num_short_term_ref_pic_sets) numbits++; if (numbits > 0) gf_bs_read_int_log(bs, numbits, "short_term_ref_pic_set_idx"); /*else short_term_ref_pic_set_idx = 0;*/ } if (sps->long_term_ref_pics_present_flag) { u8 DeltaPocMsbCycleLt[32]; u32 num_long_term_sps = 0; u32 num_long_term_pics = 0; memset(DeltaPocMsbCycleLt, 0, sizeof(u8) * 32); if (sps->num_long_term_ref_pic_sps > 0) { num_long_term_sps = gf_bs_read_ue_log(bs, "num_long_term_sps"); } num_long_term_pics = gf_bs_read_ue_log(bs, "num_long_term_pics"); for (i = 0; i < num_long_term_sps + num_long_term_pics; i++) { if (i < num_long_term_sps) { if (sps->num_long_term_ref_pic_sps > 1) gf_bs_read_int_log_idx(bs, gf_get_bit_size(sps->num_long_term_ref_pic_sps), "lt_idx_sps", i); } else { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "PocLsbLt", i); gf_bs_read_int_log_idx(bs, 1, "UsedByCurrPicLt", i); } if (gf_bs_read_int_log_idx(bs, 1, "delta_poc_msb_present_flag", i)) { if (i == 0 || i == num_long_term_sps) DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i); else DeltaPocMsbCycleLt[i] = gf_bs_read_ue_log_idx(bs, "DeltaPocMsbCycleLt", i) + DeltaPocMsbCycleLt[i - 1]; } } } if (sps->temporal_mvp_enable_flag) slice_temporal_mvp_enabled_flag = gf_bs_read_int_log(bs, 1, "slice_temporal_mvp_enabled_flag"); } if (sps->sample_adaptive_offset_enabled_flag) { u32 ChromaArrayType = sps->separate_colour_plane_flag ? 0 : sps->chroma_format_idc; slice_sao_luma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_luma_flag"); if (ChromaArrayType != 0) slice_sao_chroma_flag = gf_bs_read_int_log(bs, 1, "slice_sao_chroma_flag"); } if (si->slice_type == GF_HEVC_SLICE_TYPE_P || si->slice_type == GF_HEVC_SLICE_TYPE_B) { //u32 NumPocTotalCurr; num_ref_idx_l0_active = pps->num_ref_idx_l0_default_active; num_ref_idx_l1_active = 0; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = pps->num_ref_idx_l1_default_active; if (gf_bs_read_int_log(bs, 1, "num_ref_idx_active_override_flag")) { num_ref_idx_l0_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_active"); if (si->slice_type == GF_HEVC_SLICE_TYPE_B) num_ref_idx_l1_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_active"); } if (pps->lists_modification_present_flag /*TODO: && NumPicTotalCurr > 1*/) { if (!ref_pic_lists_modification(bs, si->slice_type, num_ref_idx_l0_active, num_ref_idx_l1_active)) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[hevc] ref_pic_lists_modification( ) not implemented\n")); return -1; } } if (si->slice_type == GF_HEVC_SLICE_TYPE_B) gf_bs_read_int_log(bs, 1, "mvd_l1_zero_flag"); if (pps->cabac_init_present_flag) gf_bs_read_int_log(bs, 1, "cabac_init_flag"); if (slice_temporal_mvp_enabled_flag) { // When collocated_from_l0_flag is not present, it is inferred to be equal to 1. Bool collocated_from_l0_flag = 1; if (si->slice_type == GF_HEVC_SLICE_TYPE_B) collocated_from_l0_flag = gf_bs_read_int_log(bs, 1, "collocated_from_l0_flag"); if ((collocated_from_l0_flag && (num_ref_idx_l0_active > 1)) || (!collocated_from_l0_flag && (num_ref_idx_l1_active > 1)) ) { gf_bs_read_ue_log(bs, "collocated_ref_idx"); } } if ((pps->weighted_pred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_P) || (pps->weighted_bipred_flag && si->slice_type == GF_HEVC_SLICE_TYPE_B) ) { hevc_pred_weight_table(bs, hevc, si, pps, sps, num_ref_idx_l0_active, num_ref_idx_l1_active); } gf_bs_read_ue_log(bs, "five_minus_max_num_merge_cand"); } si->slice_qp_delta_start_bits = (s32) (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); si->slice_qp_delta = gf_bs_read_se_log(bs, "slice_qp_delta"); if (pps->slice_chroma_qp_offsets_present_flag) { gf_bs_read_se_log(bs, "slice_cb_qp_offset"); gf_bs_read_se_log(bs, "slice_cr_qp_offset"); } if (pps->deblocking_filter_override_enabled_flag) { deblocking_filter_override_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_flag"); } if (deblocking_filter_override_flag) { slice_deblocking_filter_disabled_flag = gf_bs_read_int_log(bs, 1, "slice_deblocking_filter_disabled_flag"); if (!slice_deblocking_filter_disabled_flag) { gf_bs_read_se_log(bs, "slice_beta_offset_div2"); gf_bs_read_se_log(bs, "slice_tc_offset_div2"); } } if (pps->loop_filter_across_slices_enabled_flag && (slice_sao_luma_flag || slice_sao_chroma_flag || !slice_deblocking_filter_disabled_flag) ) { gf_bs_read_int_log(bs, 1, "slice_loop_filter_across_slices_enabled_flag"); } } //dependent slice segment else { //if not asked to parse full header, abort if (!hevc->full_slice_header_parse) return 0; } si->entry_point_start_bits = ((u32)gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); if (pps->tiles_enabled_flag || pps->entropy_coding_sync_enabled_flag) { u32 num_entry_point_offsets = gf_bs_read_ue_log(bs, "num_entry_point_offsets"); if (num_entry_point_offsets > 0) { u32 offset = gf_bs_read_ue_log(bs, "offset") + 1; u32 segments = offset >> 4; s32 remain = (offset & 15); for (i = 0; i < num_entry_point_offsets; i++) { //u32 res = 0; for (j = 0; j < segments; j++) { //res <<= 16; /*res +=*/ gf_bs_read_int(bs, 16); } if (remain) { //res <<= remain; /* res += */ gf_bs_read_int(bs, remain); } // entry_point_offset = val + 1; // +1; // +1 to get the size } } } if (pps->slice_segment_header_extension_present_flag) { u32 size_ext = gf_bs_read_ue_log(bs, "size_ext"); while (size_ext) { gf_bs_read_int(bs, 8); size_ext--; } } si->header_size_bits = (gf_bs_get_position(bs) - 1) * 8 + gf_bs_get_bit_position(bs); // av_parser.c modified on 16 jan. 2019 if (gf_bs_read_int_log(bs, 1, "byte_align") == 0) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Error parsing slice header: byte_align not found at end of header !\n")); } gf_bs_align(bs); si->payload_start_offset = (s32)gf_bs_get_position(bs); return 0; } static void gf_hevc_vvc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc, VVCState *vvc) { u32 ptype, psize, hdr; u64 start; GF_BitStream *bs; hdr = buffer[0]; if (((hdr & 0x7e) >> 1) != GF_HEVC_NALU_SEI_PREFIX) return; bs = gf_bs_new(buffer, nal_size, GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); gf_bs_read_int(bs, 16); /*parse SEI*/ while (gf_bs_available(bs)) { u32 consumed; ptype = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); ptype += 255; } ptype += gf_bs_read_int(bs, 8); psize = 0; while (gf_bs_peek_bits(bs, 8, 0)==0xFF) { gf_bs_read_int(bs, 8); psize += 255; } psize += gf_bs_read_int(bs, 8); start = gf_bs_get_position(bs); if (start+psize >= nal_size) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[%s] SEI user message type %d size error (%d but %d remain), skipping SEI message\n", hevc ? "HEVC" : "VVC", ptype, psize, nal_size-start)); break; } switch (ptype) { case 4: /*user registered ITU-T T35*/ if (hevc) { avc_parse_itu_t_t35_sei(bs, &hevc->sei.dovi); } break; default: break; } gf_bs_align(bs); consumed = (u32) (gf_bs_get_position(bs) - start); psize-=consumed; gf_bs_skip_bytes(bs, psize); if (gf_bs_available(bs) <= 2) break; } gf_bs_del(bs); } void gf_hevc_parse_sei(char *buffer, u32 nal_size, HEVCState *hevc) { gf_hevc_vvc_parse_sei(buffer, nal_size, hevc, NULL); } static void hevc_compute_poc(HEVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_pic_order_cnt_lsb); /*POC reset for IDR frames, NOT for CRA*/ switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: si->poc_lsb_prev = 0; si->poc_msb_prev = 0; break; } if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; switch (si->nal_unit_type) { case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: si->poc_msb = 0; break; } si->poc = si->poc_msb + si->poc_lsb; } static Bool hevc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } void hevc_profile_tier_level(GF_BitStream *bs, Bool ProfilePresentFlag, u8 MaxNumSubLayersMinus1, HEVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ProfilePresentFlag) { ptl->profile_space = gf_bs_read_int_log_idx(bs, 2, "profile_space", idx); ptl->tier_flag = gf_bs_read_int_log_idx(bs, 1, "tier_flag", idx); ptl->profile_idc = gf_bs_read_int_log_idx(bs, 5, "profile_idc", idx); ptl->profile_compatibility_flag = gf_bs_read_int_log_idx(bs, 32, "profile_compatibility_flag", idx); ptl->general_progressive_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_progressive_source_flag", idx); ptl->general_interlaced_source_flag = gf_bs_read_int_log_idx(bs, 1, "general_interlaced_source_flag", idx); ptl->general_non_packed_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_non_packed_constraint_flag", idx); ptl->general_frame_only_constraint_flag = gf_bs_read_int_log_idx(bs, 1, "general_frame_only_constraint_flag", idx); ptl->general_reserved_44bits = gf_bs_read_long_int(bs, 44); } ptl->level_idc = gf_bs_read_int_log(bs, 8, "level_idc"); for (i = 0; i < MaxNumSubLayersMinus1; i++) { ptl->sub_ptl[i].profile_present_flag = gf_bs_read_int_log_idx2(bs, 1, "profile_present_flag", idx, i); ptl->sub_ptl[i].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } if (MaxNumSubLayersMinus1 > 0) { for (i = MaxNumSubLayersMinus1; i < 8; i++) { /*reserved_zero_2bits*/gf_bs_read_int(bs, 2); } } for (i = 0; i < MaxNumSubLayersMinus1; i++) { if (ptl->sub_ptl[i].profile_present_flag) { ptl->sub_ptl[i].profile_space = gf_bs_read_int_log_idx2(bs, 2, "sublayer_profile_space", idx, i); ptl->sub_ptl[i].tier_flag = gf_bs_read_int_log_idx2(bs, 1, "sublayer_tier_flag", idx, i); ptl->sub_ptl[i].profile_idc = gf_bs_read_int_log_idx2(bs, 5, "sublayer_profile_idc", idx, i); ptl->sub_ptl[i].profile_compatibility_flag = gf_bs_read_int_log_idx2(bs, 32, "sublayer_profile_compatibility_flag", idx, i); /*ptl->sub_ptl[i].progressive_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_progressive_source_flag", idx, i); /*ptl->sub_ptl[i].interlaced_source_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_interlaced_source_flag", idx, i); /*ptl->sub_ptl[i].non_packed_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_non_packed_constraint_flag", idx, i); /*ptl->sub_ptl[i].frame_only_constraint_flag =*/ gf_bs_read_int_log_idx2(bs, 1, "sublayer_frame_only_constraint_flag", idx, i); /*ptl->sub_ptl[i].reserved_44bits =*/ gf_bs_read_long_int(bs, 44); } if (ptl->sub_ptl[i].level_present_flag) ptl->sub_ptl[i].level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } } static u32 scalability_type_to_idx(HEVC_VPS *vps, u32 scalability_type) { u32 idx = 0, type; for (type = 0; type < scalability_type; type++) { idx += (vps->scalability_mask[type] ? 1 : 0); } return idx; } #define LHVC_VIEW_ORDER_INDEX 1 #define LHVC_SCALABILITY_INDEX 2 static u32 lhvc_get_scalability_id(HEVC_VPS *vps, u32 layer_id_in_vps, u32 scalability_type) { u32 idx; if (!vps->scalability_mask[scalability_type]) return 0; idx = scalability_type_to_idx(vps, scalability_type); return vps->dimension_id[layer_id_in_vps][idx]; } static u32 lhvc_get_view_index(HEVC_VPS *vps, u32 id) { return lhvc_get_scalability_id(vps, vps->layer_id_in_vps[id], LHVC_VIEW_ORDER_INDEX); } static u32 lhvc_get_num_views(HEVC_VPS *vps) { u32 numViews = 1, i; for (i = 0; i < vps->max_layers; i++) { u32 layer_id = vps->layer_id_in_nuh[i]; if (i > 0 && (lhvc_get_view_index(vps, layer_id) != lhvc_get_scalability_id(vps, i - 1, LHVC_VIEW_ORDER_INDEX))) { numViews++; } } return numViews; } static void lhvc_parse_rep_format(HEVC_RepFormat *fmt, GF_BitStream *bs, u32 idx) { u8 chroma_bitdepth_present_flag; fmt->pic_width_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_width_luma_samples", idx); fmt->pic_height_luma_samples = gf_bs_read_int_log_idx(bs, 16, "pic_height_luma_samples", idx); chroma_bitdepth_present_flag = gf_bs_read_int_log_idx(bs, 1, "chroma_bitdepth_present_flag", idx); if (chroma_bitdepth_present_flag) { fmt->chroma_format_idc = gf_bs_read_int_log_idx(bs, 2, "chroma_format_idc", idx); if (fmt->chroma_format_idc == 3) fmt->separate_colour_plane_flag = gf_bs_read_int_log_idx(bs, 1, "separate_colour_plane_flag", idx); fmt->bit_depth_luma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_luma_minus8", idx); fmt->bit_depth_chroma = 8 + gf_bs_read_int_log_idx(bs, 4, "bit_depth_chroma_minus8", idx); } if (gf_bs_read_int_log_idx(bs, 1, "conformance_window_vps_flag", idx)) { gf_bs_read_ue_log_idx(bs, "conf_win_vps_left_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_right_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_top_offset", idx); gf_bs_read_ue_log_idx(bs, "conf_win_vps_bottom_offset", idx); } } static Bool hevc_parse_vps_extension(HEVC_VPS *vps, GF_BitStream *bs) { u8 splitting_flag, vps_nuh_layer_id_present_flag, view_id_len; u32 i, j, num_scalability_types, num_add_olss, num_add_layer_set, num_indepentdent_layers, nb_bits, default_output_layer_idc = 0; u8 dimension_id_len[16], dim_bit_offset[16]; u8 /*avc_base_layer_flag, */NumLayerSets, /*default_one_target_output_layer_flag, */rep_format_idx_present_flag, ols_ids_to_ls_idx; u8 layer_set_idx_for_ols_minus1[MAX_LHVC_LAYERS]; u8 nb_output_layers_in_output_layer_set[MAX_LHVC_LAYERS + 1]; u8 ols_highest_output_layer_id[MAX_LHVC_LAYERS + 1]; u32 k, d, r, p, iNuhLId, jNuhLId; u8 num_direct_ref_layers[64], num_pred_layers[64], num_layers_in_tree_partition[MAX_LHVC_LAYERS]; u8 dependency_flag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS], id_pred_layers[64][MAX_LHVC_LAYERS]; // u8 num_ref_layers[64]; // u8 tree_partition_layer_id[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; // u8 id_ref_layers[64][MAX_LHVC_LAYERS]; // u8 id_direct_ref_layers[64][MAX_LHVC_LAYERS]; u8 layer_id_in_list_flag[64]; Bool OutputLayerFlag[MAX_LHVC_LAYERS][MAX_LHVC_LAYERS]; vps->vps_extension_found = 1; if ((vps->max_layers > 1) && vps->base_layer_internal_flag) hevc_profile_tier_level(bs, 0, vps->max_sub_layers - 1, &vps->ext_ptl[0], 0); splitting_flag = gf_bs_read_int_log(bs, 1, "splitting_flag"); num_scalability_types = 0; for (i = 0; i < 16; i++) { vps->scalability_mask[i] = gf_bs_read_int_log_idx(bs, 1, "scalability_mask", i); num_scalability_types += vps->scalability_mask[i]; } if (num_scalability_types >= 16) { num_scalability_types = 16; } dimension_id_len[0] = 0; for (i = 0; i < (num_scalability_types - splitting_flag); i++) { dimension_id_len[i] = 1 + gf_bs_read_int_log_idx(bs, 3, "dimension_id_len_minus1", i); } if (splitting_flag) { for (i = 0; i < num_scalability_types; i++) { dim_bit_offset[i] = 0; for (j = 0; j < i; j++) dim_bit_offset[i] += dimension_id_len[j]; } dimension_id_len[num_scalability_types - 1] = 1 + (5 - dim_bit_offset[num_scalability_types - 1]); dim_bit_offset[num_scalability_types] = 6; } vps_nuh_layer_id_present_flag = gf_bs_read_int_log(bs, 1, "vps_nuh_layer_id_present_flag"); vps->layer_id_in_nuh[0] = 0; vps->layer_id_in_vps[0] = 0; for (i = 1; i < vps->max_layers; i++) { if (vps_nuh_layer_id_present_flag) { vps->layer_id_in_nuh[i] = gf_bs_read_int_log_idx(bs, 6, "layer_id_in_nuh", i); } else { vps->layer_id_in_nuh[i] = i; } vps->layer_id_in_vps[vps->layer_id_in_nuh[i]] = i; if (!splitting_flag) { for (j = 0; j < num_scalability_types; j++) { vps->dimension_id[i][j] = gf_bs_read_int_log_idx2(bs, dimension_id_len[j], "dimension_id", i, j); } } } if (splitting_flag) { for (i = 0; i < vps->max_layers; i++) for (j = 0; j < num_scalability_types; j++) vps->dimension_id[i][j] = ((vps->layer_id_in_nuh[i] & ((1 << dim_bit_offset[j + 1]) - 1)) >> dim_bit_offset[j]); } else { for (j = 0; j < num_scalability_types; j++) vps->dimension_id[0][j] = 0; } view_id_len = gf_bs_read_int_log(bs, 4, "view_id_len"); if (view_id_len > 0) { for (i = 0; i < lhvc_get_num_views(vps); i++) { gf_bs_read_int_log_idx(bs, view_id_len, "view_id_val", i); } } for (i = 1; i < vps->max_layers; i++) { for (j = 0; j < i; j++) { vps->direct_dependency_flag[i][j] = gf_bs_read_int_log_idx(bs, 1, "direct_dependency_flag", i); } } //we do the test on MAX_LHVC_LAYERS and break in the loop to avoid a wrong GCC 4.8 warning on array bounds for (i = 0; i < MAX_LHVC_LAYERS; i++) { if (i >= vps->max_layers) break; for (j = 0; j < vps->max_layers; j++) { dependency_flag[i][j] = vps->direct_dependency_flag[i][j]; for (k = 0; k < i; k++) if (vps->direct_dependency_flag[i][k] && vps->direct_dependency_flag[k][j]) dependency_flag[i][j] = 1; } } for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; d = r = p = 0; for (j = 0; j < vps->max_layers; j++) { jNuhLId = vps->layer_id_in_nuh[j]; if (vps->direct_dependency_flag[i][j]) { // id_direct_ref_layers[iNuhLId][d] = jNuhLId; d++; } if (dependency_flag[i][j]) { // id_ref_layers[iNuhLId][r] = jNuhLId; r++; } if (dependency_flag[j][i]) id_pred_layers[iNuhLId][p++] = jNuhLId; } num_direct_ref_layers[iNuhLId] = d; // num_ref_layers[iNuhLId] = r; num_pred_layers[iNuhLId] = p; } memset(layer_id_in_list_flag, 0, 64 * sizeof(u8)); k = 0; //num_indepentdent_layers for (i = 0; i < vps->max_layers; i++) { iNuhLId = vps->layer_id_in_nuh[i]; if (!num_direct_ref_layers[iNuhLId]) { u32 h = 1; //tree_partition_layer_id[k][0] = iNuhLId; for (j = 0; j < num_pred_layers[iNuhLId]; j++) { u32 predLId = id_pred_layers[iNuhLId][j]; if (!layer_id_in_list_flag[predLId]) { //tree_partition_layer_id[k][h++] = predLId; layer_id_in_list_flag[predLId] = 1; } } num_layers_in_tree_partition[k++] = h; } } num_indepentdent_layers = k; num_add_layer_set = 0; if (num_indepentdent_layers > 1) num_add_layer_set = gf_bs_read_ue_log(bs, "num_add_layer_set"); for (i = 0; i < num_add_layer_set; i++) for (j = 1; j < num_indepentdent_layers; j++) { nb_bits = 1; while ((1 << nb_bits) < (num_layers_in_tree_partition[j] + 1)) nb_bits++; gf_bs_read_int_log_idx2(bs, nb_bits, "highest_layer_idx_plus1", i, j); } if (gf_bs_read_int_log(bs, 1, "vps_sub_layers_max_minus1_present_flag")) { for (i = 0; i < vps->max_layers; i++) { gf_bs_read_int_log_idx(bs, 3, "sub_layers_vps_max_minus1", i); } } if (gf_bs_read_int_log(bs, 1, "max_tid_ref_present_flag")) { for (i = 0; i < (vps->max_layers - 1); i++) { for (j = i + 1; j < vps->max_layers; j++) { if (vps->direct_dependency_flag[j][i]) gf_bs_read_int_log_idx2(bs, 3, "max_tid_il_ref_pics_plus1", i, j); } } } gf_bs_read_int_log(bs, 1, "default_ref_layers_active_flag"); vps->num_profile_tier_level = 1 + gf_bs_read_ue_log(bs, "num_profile_tier_level"); if (vps->num_profile_tier_level > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of PTLs in VPS %d\n", vps->num_profile_tier_level)); vps->num_profile_tier_level = 1; return GF_FALSE; } for (i = vps->base_layer_internal_flag ? 2 : 1; i < vps->num_profile_tier_level; i++) { Bool vps_profile_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_profile_present_flag", i); hevc_profile_tier_level(bs, vps_profile_present_flag, vps->max_sub_layers - 1, &vps->ext_ptl[i - 1], i-1); } NumLayerSets = vps->num_layer_sets + num_add_layer_set; num_add_olss = 0; if (NumLayerSets > 1) { num_add_olss = gf_bs_read_ue_log(bs, "num_add_olss"); default_output_layer_idc = gf_bs_read_int_log(bs, 2, "default_output_layer_idc"); default_output_layer_idc = default_output_layer_idc < 2 ? default_output_layer_idc : 2; } vps->num_output_layer_sets = num_add_olss + NumLayerSets; layer_set_idx_for_ols_minus1[0] = 1; vps->output_layer_flag[0][0] = 1; for (i = 0; i < vps->num_output_layer_sets; i++) { if ((NumLayerSets > 2) && (i >= NumLayerSets)) { nb_bits = 1; while ((1 << nb_bits) < (NumLayerSets - 1)) nb_bits++; layer_set_idx_for_ols_minus1[i] = gf_bs_read_int_log_idx(bs, nb_bits, "layer_set_idx_for_ols_minus1", i); } else layer_set_idx_for_ols_minus1[i] = 0; ols_ids_to_ls_idx = i < NumLayerSets ? i : layer_set_idx_for_ols_minus1[i] + 1; if ((i > (vps->num_layer_sets - 1)) || (default_output_layer_idc == 2)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) vps->output_layer_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "output_layer_flag", i, j); } if ((default_output_layer_idc == 0) || (default_output_layer_idc == 1)) { for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if ((default_output_layer_idc == 0) || (vps->LayerSetLayerIdList[i][j] == vps->LayerSetLayerIdListMax[i])) OutputLayerFlag[i][j] = GF_TRUE; else OutputLayerFlag[i][j] = GF_FALSE; } } for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (OutputLayerFlag[i][j]) { u32 curLayerID; vps->necessary_layers_flag[i][j] = GF_TRUE; curLayerID = vps->LayerSetLayerIdList[i][j]; for (k = 0; k < j; k++) { u32 refLayerId = vps->LayerSetLayerIdList[i][k]; if (dependency_flag[vps->layer_id_in_vps[curLayerID]][vps->layer_id_in_vps[refLayerId]]) vps->necessary_layers_flag[i][k] = GF_TRUE; } } } vps->num_necessary_layers[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { if (vps->necessary_layers_flag[i][j]) vps->num_necessary_layers[i] += 1; } if (i == 0) { if (vps->base_layer_internal_flag) { if (vps->max_layers > 1) vps->profile_tier_level_idx[0][0] = 1; else vps->profile_tier_level_idx[0][0] = 0; } continue; } nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_profile_tier_level) nb_bits++; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) if (vps->necessary_layers_flag[i][j] && vps->num_profile_tier_level) vps->profile_tier_level_idx[i][j] = gf_bs_read_int_log_idx2(bs, nb_bits, "profile_tier_level_idx", i, j); else vps->profile_tier_level_idx[i][j] = 0; nb_output_layers_in_output_layer_set[i] = 0; for (j = 0; j < vps->num_layers_in_id_list[ols_ids_to_ls_idx]; j++) { nb_output_layers_in_output_layer_set[i] += OutputLayerFlag[i][j]; if (OutputLayerFlag[i][j]) { ols_highest_output_layer_id[i] = vps->LayerSetLayerIdList[ols_ids_to_ls_idx][j]; } } if (nb_output_layers_in_output_layer_set[i] == 1 && ols_highest_output_layer_id[i] > 0) vps->alt_output_layer_flag[i] = gf_bs_read_int_log_idx(bs, 1, "alt_output_layer_flag", i); } vps->num_rep_formats = 1 + gf_bs_read_ue_log(bs, "num_rep_formats_minus1"); if (vps->num_rep_formats > 16) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of rep formats in VPS %d\n", vps->num_rep_formats)); vps->num_rep_formats = 0; return GF_FALSE; } for (i = 0; i < vps->num_rep_formats; i++) { lhvc_parse_rep_format(&vps->rep_formats[i], bs, i); } if (vps->num_rep_formats > 1) rep_format_idx_present_flag = gf_bs_read_int_log(bs, 1, "rep_format_idx_present_flag"); else rep_format_idx_present_flag = 0; vps->rep_format_idx[0] = 0; nb_bits = 1; while ((u32)(1 << nb_bits) < vps->num_rep_formats) nb_bits++; for (i = vps->base_layer_internal_flag ? 1 : 0; i < vps->max_layers; i++) { if (rep_format_idx_present_flag) { vps->rep_format_idx[i] = gf_bs_read_int_log_idx(bs, nb_bits, "rep_format_idx", i); } else { vps->rep_format_idx[i] = i < vps->num_rep_formats - 1 ? i : vps->num_rep_formats - 1; } } //TODO - we don't use the rest ... return GF_TRUE; } static void sub_layer_hrd_parameters(GF_BitStream *bs, int subLayerId, u32 cpb_cnt, Bool sub_pic_hrd_params_present_flag, u32 idx1, u32 idx2) { u32 i; if (!gf_bs_available(bs)) return; for (i = 0; i <= cpb_cnt; i++) { gf_bs_read_ue_log_idx3(bs, "bit_rate_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "cpb_size_value_minus1", idx1, idx2, i); if (sub_pic_hrd_params_present_flag) { gf_bs_read_ue_log_idx3(bs, "cpb_size_du_value_minus1", idx1, idx2, i); gf_bs_read_ue_log_idx3(bs, "bit_rate_du_value_minus1", idx1, idx2, i); } gf_bs_read_int_log_idx3(bs, 1, "cbr_flag", idx1, idx2, i); } } static void hevc_parse_hrd_parameters(GF_BitStream *bs, Bool commonInfPresentFlag, int maxNumSubLayersMinus1, u32 idx) { int i; Bool nal_hrd_parameters_present_flag = GF_FALSE; Bool vcl_hrd_parameters_present_flag = GF_FALSE; Bool sub_pic_hrd_params_present_flag = GF_FALSE; if (commonInfPresentFlag) { nal_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "nal_hrd_parameters_present_flag", idx); vcl_hrd_parameters_present_flag = gf_bs_read_int_log_idx(bs, 1, "vcl_hrd_parameters_present_flag", idx); if (nal_hrd_parameters_present_flag || vcl_hrd_parameters_present_flag) { sub_pic_hrd_params_present_flag = gf_bs_read_int_log_idx(bs, 1, "sub_pic_hrd_params_present_flag", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 8, "tick_divisor_minus2", idx); gf_bs_read_int_log_idx(bs, 5, "du_cpb_removal_delay_increment_length_minus1", idx); gf_bs_read_int_log_idx(bs, 1, "sub_pic_cpb_params_in_pic_timing_sei_flag", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_du_length_minus1", idx); } gf_bs_read_int_log_idx(bs, 4, "bit_rate_scale", idx); gf_bs_read_int_log_idx(bs, 4, "cpb_size_scale", idx); if (sub_pic_hrd_params_present_flag) { gf_bs_read_int_log_idx(bs, 4, "cpb_size_du_scale", idx); } gf_bs_read_int_log_idx(bs, 5, "initial_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "au_cpb_removal_delay_length_minus1", idx); gf_bs_read_int_log_idx(bs, 5, "dpb_output_delay_length_minus1", idx); } } for (i = 0; i <= maxNumSubLayersMinus1; i++) { Bool fixed_pic_rate_general_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_general_flag", idx); Bool fixed_pic_rate_within_cvs_flag_i = GF_TRUE; Bool low_delay_hrd_flag_i = GF_FALSE; u32 cpb_cnt_minus1_i = 0; if (!fixed_pic_rate_general_flag_i) { fixed_pic_rate_within_cvs_flag_i = gf_bs_read_int_log_idx(bs, 1, "fixed_pic_rate_within_cvs_flag", idx); } if (fixed_pic_rate_within_cvs_flag_i) gf_bs_read_ue_log_idx(bs, "elemental_duration_in_tc_minus1", idx); else low_delay_hrd_flag_i = gf_bs_read_int_log_idx(bs, 1, "low_delay_hrd_flag", idx); if (!low_delay_hrd_flag_i) { cpb_cnt_minus1_i = gf_bs_read_ue_log_idx(bs, "cpb_cnt_minus1", idx); } if (nal_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } if (vcl_hrd_parameters_present_flag) { sub_layer_hrd_parameters(bs, i, cpb_cnt_minus1_i, sub_pic_hrd_params_present_flag, idx, i); } } } static s32 gf_hevc_read_vps_bs_internal(GF_BitStream *bs, HEVCState *hevc, Bool stop_at_vps_ext) { u8 vps_sub_layer_ordering_info_present_flag, vps_extension_flag; u32 i, j; s32 vps_id; HEVC_VPS *vps; u8 layer_id_included_flag[MAX_LHVC_LAYERS][64]; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; vps = &hevc->vps[vps_id]; vps->bit_pos_vps_extensions = -1; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->base_layer_internal_flag = gf_bs_read_int_log(bs, 1, "base_layer_internal_flag"); vps->base_layer_available_flag = gf_bs_read_int_log(bs, 1, "base_layer_available_flag"); vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers_minus1"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; vps->temporal_id_nesting = gf_bs_read_int_log(bs, 1, "temporal_id_nesting"); gf_bs_read_int_log(bs, 16, "vps_reserved_ffff_16bits"); hevc_profile_tier_level(bs, 1, vps->max_sub_layers - 1, &vps->ptl, 0); vps_sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "vps_sub_layer_ordering_info_present_flag"); for (i = (vps_sub_layer_ordering_info_present_flag ? 0 : vps->max_sub_layers - 1); i < vps->max_sub_layers; i++) { gf_bs_read_ue_log_idx(bs, "vps_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "vps_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "vps_max_latency_increase_plus1", i); } vps->max_layer_id = gf_bs_read_int_log(bs, 6, "max_layer_id"); if (vps->max_layer_id > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] VPS max layer ID %u but GPAC only supports %u\n", vps->max_layer_id, MAX_LHVC_LAYERS)); return -1; } vps->num_layer_sets = gf_bs_read_ue_log(bs, "num_layer_sets_minus1") + 1; if (vps->num_layer_sets > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Wrong number of layer sets in VPS %d\n", vps->num_layer_sets)); return -1; } for (i = 1; i < vps->num_layer_sets; i++) { for (j = 0; j <= vps->max_layer_id; j++) { layer_id_included_flag[i][j] = gf_bs_read_int_log_idx2(bs, 1, "layer_id_included_flag", i, j); } } vps->num_layers_in_id_list[0] = 1; for (i = 1; i < vps->num_layer_sets; i++) { u32 n, m; n = 0; for (m = 0; m <= vps->max_layer_id; m++) { if (layer_id_included_flag[i][m]) { vps->LayerSetLayerIdList[i][n++] = m; if (vps->LayerSetLayerIdListMax[i] < m) vps->LayerSetLayerIdListMax[i] = m; } } vps->num_layers_in_id_list[i] = n; } if (gf_bs_read_int_log(bs, 1, "vps_timing_info_present_flag")) { u32 vps_num_hrd_parameters; gf_bs_read_int_log(bs, 32, "vps_num_units_in_tick"); gf_bs_read_int_log(bs, 32, "vps_time_scale"); if (gf_bs_read_int_log(bs, 1, "vps_poc_proportional_to_timing_flag")) { gf_bs_read_ue_log(bs, "vps_num_ticks_poc_diff_one_minus1"); } vps_num_hrd_parameters = gf_bs_read_ue_log(bs, "vps_num_hrd_parameters"); for (i = 0; i < vps_num_hrd_parameters; i++) { Bool cprms_present_flag = GF_TRUE; gf_bs_read_ue_log_idx(bs, "hrd_layer_set_idx", i); if (i > 0) cprms_present_flag = gf_bs_read_int_log(bs, 1, "cprms_present_flag"); hevc_parse_hrd_parameters(bs, cprms_present_flag, vps->max_sub_layers - 1, i); } } if (stop_at_vps_ext) { return vps_id; } vps_extension_flag = gf_bs_read_int_log(bs, 1, "vps_extension_flag"); if (vps_extension_flag) { Bool res; gf_bs_align(bs); res = hevc_parse_vps_extension(vps, bs); if (res != GF_TRUE) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Failed to parse VPS extensions\n")); return -1; } if (gf_bs_read_int_log(bs, 1, "vps_extension2_flag")) { #if 0 while (gf_bs_available(bs)) { /*vps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } } return vps_id; } GF_EXPORT s32 gf_hevc_read_vps_ex(u8 *data, u32 *size, HEVCState *hevc, Bool remove_extensions) { GF_BitStream *bs; char *data_without_emulation_bytes = NULL; u32 data_without_emulation_bytes_size = 0; s32 vps_id = -1; /*still contains emulation bytes*/ data_without_emulation_bytes_size = remove_extensions ? gf_media_nalu_emulation_bytes_remove_count(data, (*size)) : 0; if (!data_without_emulation_bytes_size) { bs = gf_bs_new(data, (*size), GF_BITSTREAM_READ); gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); } //when removing VPS ext, we have to get the full buffer without emulation prevention bytes becuase we do a bit-by-bit copy of the vps else { data_without_emulation_bytes = gf_malloc((*size) * sizeof(char)); data_without_emulation_bytes_size = gf_media_nalu_remove_emulation_bytes(data, data_without_emulation_bytes, (*size)); bs = gf_bs_new(data_without_emulation_bytes, data_without_emulation_bytes_size, GF_BITSTREAM_READ); } if (!bs) goto exit; if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, remove_extensions); if (vps_id < 0) goto exit; if (remove_extensions) { u8 *new_vps; u32 new_vps_size, emulation_bytes; u32 bit_pos = gf_bs_get_bit_offset(bs); GF_BitStream *w_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(bs, 0); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u8(w_bs, gf_bs_read_u8(bs) ); gf_bs_write_u16(w_bs, gf_bs_read_u16(bs) ); bit_pos -= 48; while (bit_pos) { u32 v = gf_bs_read_int(bs, 1); gf_bs_write_int(w_bs, v, 1); bit_pos--; } /*vps extension flag*/ gf_bs_write_int(w_bs, 0, 1); new_vps = NULL; gf_bs_get_content(w_bs, &new_vps, &new_vps_size); gf_bs_del(w_bs); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(new_vps, new_vps_size); if (emulation_bytes + new_vps_size > *size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("Buffer too small to rewrite VPS - skipping rewrite\n")); } else { *size = gf_media_nalu_add_emulation_bytes(new_vps, data, new_vps_size); } if (new_vps) gf_free(new_vps); } exit: if (bs) gf_bs_del(bs); if (data_without_emulation_bytes) gf_free(data_without_emulation_bytes); return vps_id; } GF_EXPORT s32 gf_hevc_read_vps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_vps_ex(data, &size, hevc, GF_FALSE); } GF_EXPORT s32 gf_hevc_read_vps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); } static void hevc_scaling_list_data(GF_BitStream *bs) { u32 i, sizeId, matrixId; for (sizeId = 0; sizeId < 4; sizeId++) { for (matrixId = 0; matrixId < 6; matrixId += (sizeId == 3) ? 3 : 1) { u32 idx = sizeId*100 + 10*matrixId; u32 scaling_list_pred_mode_flag_sizeId_matrixId = gf_bs_read_int_log_idx(bs, 1, "scaling_list_pred_mode_flag_sizeId_matrixId", idx); if (!scaling_list_pred_mode_flag_sizeId_matrixId) { gf_bs_read_ue_log_idx(bs, "scaling_list_pred_matrix_id_delta", idx); } else { //u32 nextCoef = 8; u32 coefNum = MIN(64, (1 << (4 + (sizeId << 1)))); if (sizeId > 1) { gf_bs_read_se_log_idx(bs, "scaling_list_dc_coef_minus8", idx); } for (i = 0; i < coefNum; i++) { gf_bs_read_se_log_idx2(bs, "scaling_list_delta_coef", idx, i); } } } } } static const struct { u32 w, h; } hevc_sar[17] = { { 0, 0 }, { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, { 160,99 }, { 4,3}, { 3,2}, { 2,1} }; static s32 gf_hevc_read_sps_bs_internal(GF_BitStream *bs, HEVCState *hevc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id = -1; u32 i, nb_CTUs, depth; HEVC_SPS *sps; HEVC_VPS *vps; HEVC_ProfileTierLevel ptl; Bool multiLayerExtSpsFlag; u8 sps_ext_or_max_sub_layers_minus1, max_sub_layers_minus1; if (vui_flag_pos) *vui_flag_pos = 0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } memset(&ptl, 0, sizeof(ptl)); max_sub_layers_minus1 = 0; sps_ext_or_max_sub_layers_minus1 = 0; if (layer_id == 0) max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1"); else sps_ext_or_max_sub_layers_minus1 = gf_bs_read_int_log(bs, 3, "sps_ext_or_max_sub_layers_minus1"); multiLayerExtSpsFlag = (layer_id != 0) && (sps_ext_or_max_sub_layers_minus1 == 7); if (!multiLayerExtSpsFlag) { gf_bs_read_int_log(bs, 1, "temporal_id_nesting_flag"); hevc_profile_tier_level(bs, 1, max_sub_layers_minus1, &ptl, 0); } sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((sps_id < 0) || (sps_id >= 16)) { return -1; } sps = &hevc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->ptl = ptl; vps = &hevc->vps[vps_id]; sps->max_sub_layers_minus1 = 0; sps->sps_ext_or_max_sub_layers_minus1 = 0; /* default values */ sps->colour_primaries = 2; sps->transfer_characteristic = 2; sps->matrix_coeffs = 2; //sps_rep_format_idx = 0; if (multiLayerExtSpsFlag) { sps->update_rep_format_flag = gf_bs_read_int_log(bs, 1, "update_rep_format_flag"); if (sps->update_rep_format_flag) { sps->rep_format_idx = gf_bs_read_int_log(bs, 8, "rep_format_idx"); } else { sps->rep_format_idx = vps->rep_format_idx[layer_id]; } sps->width = vps->rep_formats[sps->rep_format_idx].pic_width_luma_samples; sps->height = vps->rep_formats[sps->rep_format_idx].pic_height_luma_samples; sps->chroma_format_idc = vps->rep_formats[sps->rep_format_idx].chroma_format_idc; sps->bit_depth_luma = vps->rep_formats[sps->rep_format_idx].bit_depth_luma; sps->bit_depth_chroma = vps->rep_formats[sps->rep_format_idx].bit_depth_chroma; sps->separate_colour_plane_flag = vps->rep_formats[sps->rep_format_idx].separate_colour_plane_flag; //TODO this is crude ... sps->ptl = vps->ext_ptl[0]; } else { sps->chroma_format_idc = gf_bs_read_ue_log(bs, "chroma_format_idc"); if (sps->chroma_format_idc == 3) sps->separate_colour_plane_flag = gf_bs_read_int_log(bs, 1, "separate_colour_plane_flag"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); if ((sps->cw_flag = gf_bs_read_int_log(bs, 1, "conformance_window_flag"))) { u32 SubWidthC, SubHeightC; if (sps->chroma_format_idc == 1) { SubWidthC = SubHeightC = 2; } else if (sps->chroma_format_idc == 2) { SubWidthC = 2; SubHeightC = 1; } else { SubWidthC = SubHeightC = 1; } sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); sps->width -= SubWidthC * (sps->cw_left + sps->cw_right); sps->height -= SubHeightC * (sps->cw_top + sps->cw_bottom); } sps->bit_depth_luma = 8 + gf_bs_read_ue_log(bs, "bit_depth_luma_minus8"); sps->bit_depth_chroma = 8 + gf_bs_read_ue_log(bs, "bit_depth_chroma_minus8"); } sps->log2_max_pic_order_cnt_lsb = 4 + gf_bs_read_ue_log(bs, "log2_max_pic_order_cnt_lsb_minus4"); if (!multiLayerExtSpsFlag) { sps->sub_layer_ordering_info_present_flag = gf_bs_read_int_log(bs, 1, "sub_layer_ordering_info_present_flag"); for (i = sps->sub_layer_ordering_info_present_flag ? 0 : sps->max_sub_layers_minus1; i <= sps->max_sub_layers_minus1; i++) { gf_bs_read_ue_log_idx(bs, "max_dec_pic_buffering", i); gf_bs_read_ue_log_idx(bs, "num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "max_latency_increase", i); } } sps->log2_min_luma_coding_block_size = 3 + gf_bs_read_ue_log(bs, "log2_min_luma_coding_block_size_minus3"); sps->log2_diff_max_min_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_luma_coding_block_size"); sps->max_CU_width = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->max_CU_height = (1 << (sps->log2_min_luma_coding_block_size + sps->log2_diff_max_min_luma_coding_block_size)); sps->log2_min_transform_block_size = 2 + gf_bs_read_ue_log(bs, "log2_min_transform_block_size_minus2"); sps->log2_max_transform_block_size = sps->log2_min_transform_block_size + gf_bs_read_ue_log(bs, "log2_max_transform_block_size"); depth = 0; sps->max_transform_hierarchy_depth_inter = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_inter"); sps->max_transform_hierarchy_depth_intra = gf_bs_read_ue_log(bs, "max_transform_hierarchy_depth_intra"); while ((u32)(sps->max_CU_width >> sps->log2_diff_max_min_luma_coding_block_size) > (u32)(1 << (sps->log2_min_transform_block_size + depth))) { depth++; } sps->max_CU_depth = sps->log2_diff_max_min_luma_coding_block_size + depth; nb_CTUs = ((sps->width + sps->max_CU_width - 1) / sps->max_CU_width) * ((sps->height + sps->max_CU_height - 1) / sps->max_CU_height); sps->bitsSliceSegmentAddress = 0; while (nb_CTUs > (u32)(1 << sps->bitsSliceSegmentAddress)) { sps->bitsSliceSegmentAddress++; } sps->scaling_list_enable_flag = gf_bs_read_int_log(bs, 1, "scaling_list_enable_flag"); if (sps->scaling_list_enable_flag) { sps->infer_scaling_list_flag = 0; sps->scaling_list_ref_layer_id = 0; if (multiLayerExtSpsFlag) { sps->infer_scaling_list_flag = gf_bs_read_int_log(bs, 1, "infer_scaling_list_flag"); } if (sps->infer_scaling_list_flag) { sps->scaling_list_ref_layer_id = gf_bs_read_int_log(bs, 6, "scaling_list_ref_layer_id"); } else { sps->scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "scaling_list_data_present_flag"); if (sps->scaling_list_data_present_flag) { hevc_scaling_list_data(bs); } } } sps->asymmetric_motion_partitions_enabled_flag = gf_bs_read_int_log(bs, 1, "asymmetric_motion_partitions_enabled_flag"); sps->sample_adaptive_offset_enabled_flag = gf_bs_read_int_log(bs, 1, "sample_adaptive_offset_enabled_flag"); if ( (sps->pcm_enabled_flag = gf_bs_read_int_log(bs, 1, "pcm_enabled_flag")) ) { sps->pcm_sample_bit_depth_luma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_luma_minus1"); sps->pcm_sample_bit_depth_chroma_minus1 = gf_bs_read_int_log(bs, 4, "pcm_sample_bit_depth_chroma_minus1"); sps->log2_min_pcm_luma_coding_block_size_minus3 = gf_bs_read_ue_log(bs, "log2_min_pcm_luma_coding_block_size_minus3"); sps->log2_diff_max_min_pcm_luma_coding_block_size = gf_bs_read_ue_log(bs, "log2_diff_max_min_pcm_luma_coding_block_size"); sps->pcm_loop_filter_disable_flag = gf_bs_read_int_log(bs, 1, "pcm_loop_filter_disable_flag"); } sps->num_short_term_ref_pic_sets = gf_bs_read_ue_log(bs, "num_short_term_ref_pic_sets"); if (sps->num_short_term_ref_pic_sets > 64) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid number of short term reference picture sets %d\n", sps->num_short_term_ref_pic_sets)); return -1; } for (i = 0; i < sps->num_short_term_ref_pic_sets; i++) { Bool ret = hevc_parse_short_term_ref_pic_set(bs, sps, i); /*cannot parse short_term_ref_pic_set, skip VUI parsing*/ if (!ret) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] Invalid short_term_ref_pic_set\n")); return -1; } } sps->long_term_ref_pics_present_flag = gf_bs_read_int_log(bs, 1, "long_term_ref_pics_present_flag"); if (sps->long_term_ref_pics_present_flag) { sps->num_long_term_ref_pic_sps = gf_bs_read_ue_log(bs, "num_long_term_ref_pic_sps"); for (i = 0; i < sps->num_long_term_ref_pic_sps; i++) { gf_bs_read_int_log_idx(bs, sps->log2_max_pic_order_cnt_lsb, "lt_ref_pic_poc_lsb_sps", i); gf_bs_read_int_log_idx(bs, 1, "used_by_curr_pic_lt_sps_flag", i); } } sps->temporal_mvp_enable_flag = gf_bs_read_int_log(bs, 1, "temporal_mvp_enable_flag"); sps->strong_intra_smoothing_enable_flag = gf_bs_read_int_log(bs, 1, "strong_intra_smoothing_enable_flag"); if (vui_flag_pos) *vui_flag_pos = (u32)gf_bs_get_bit_offset(bs); if ((sps->vui_parameters_present_flag = gf_bs_read_int_log(bs, 1, "vui_parameters_present_flag")) ) { sps->aspect_ratio_info_present_flag = gf_bs_read_int_log(bs, 1, "aspect_ratio_info_present_flag"); if (sps->aspect_ratio_info_present_flag) { sps->sar_idc = gf_bs_read_int_log(bs, 8, "aspect_ratio_idc"); if (sps->sar_idc == 255) { sps->sar_width = gf_bs_read_int_log(bs, 16, "aspect_ratio_width"); sps->sar_height = gf_bs_read_int_log(bs, 16, "aspect_ratio_height"); } else if (sps->sar_idc < 17) { sps->sar_width = hevc_sar[sps->sar_idc].w; sps->sar_height = hevc_sar[sps->sar_idc].h; } } if ((sps->overscan_info_present = gf_bs_read_int_log(bs, 1, "overscan_info_present"))) sps->overscan_appropriate = gf_bs_read_int_log(bs, 1, "overscan_appropriate"); sps->video_signal_type_present_flag = gf_bs_read_int_log(bs, 1, "video_signal_type_present_flag"); if (sps->video_signal_type_present_flag) { sps->video_format = gf_bs_read_int_log(bs, 3, "video_format"); sps->video_full_range_flag = gf_bs_read_int_log(bs, 1, "video_full_range_flag"); if ((sps->colour_description_present_flag = gf_bs_read_int_log(bs, 1, "colour_description_present_flag"))) { sps->colour_primaries = gf_bs_read_int_log(bs, 8, "colour_primaries"); sps->transfer_characteristic = gf_bs_read_int_log(bs, 8, "transfer_characteristic"); sps->matrix_coeffs = gf_bs_read_int_log(bs, 8, "matrix_coefficients"); } } if ((sps->chroma_loc_info_present_flag = gf_bs_read_int_log(bs, 1, "chroma_loc_info_present_flag"))) { sps->chroma_sample_loc_type_top_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_top_field"); sps->chroma_sample_loc_type_bottom_field = gf_bs_read_ue_log(bs, "chroma_sample_loc_type_bottom_field"); } sps->neutra_chroma_indication_flag = gf_bs_read_int_log(bs, 1, "neutra_chroma_indication_flag"); sps->field_seq_flag = gf_bs_read_int_log(bs, 1, "field_seq_flag"); sps->frame_field_info_present_flag = gf_bs_read_int_log(bs, 1, "frame_field_info_present_flag"); if ((sps->default_display_window_flag = gf_bs_read_int_log(bs, 1, "default_display_window_flag"))) { sps->left_offset = gf_bs_read_ue_log(bs, "display_window_left_offset"); sps->right_offset = gf_bs_read_ue_log(bs, "display_window_right_offset"); sps->top_offset = gf_bs_read_ue_log(bs, "display_window_top_offset"); sps->bottom_offset = gf_bs_read_ue_log(bs, "display_window_bottom_offset"); } sps->has_timing_info = gf_bs_read_int_log(bs, 1, "has_timing_info"); if (sps->has_timing_info) { sps->num_units_in_tick = gf_bs_read_int_log(bs, 32, "num_units_in_tick"); sps->time_scale = gf_bs_read_int_log(bs, 32, "time_scale"); sps->poc_proportional_to_timing_flag = gf_bs_read_int_log(bs, 1, "poc_proportional_to_timing_flag"); if (sps->poc_proportional_to_timing_flag) sps->num_ticks_poc_diff_one_minus1 = gf_bs_read_ue_log(bs, "num_ticks_poc_diff_one_minus1"); if ((sps->hrd_parameters_present_flag = gf_bs_read_int_log(bs, 1, "hrd_parameters_present_flag"))) { // GF_LOG(GF_LOG_INFO, GF_LOG_CODING, ("[HEVC] HRD param parsing not implemented\n")); return sps_id; } } if (gf_bs_read_int_log(bs, 1, "bitstream_restriction_flag")) { gf_bs_read_int_log(bs, 1, "tiles_fixed_structure_flag"); gf_bs_read_int_log(bs, 1, "motion_vectors_over_pic_boundaries_flag"); gf_bs_read_int_log(bs, 1, "restricted_ref_pic_lists_flag"); gf_bs_read_ue_log(bs, "min_spatial_segmentation_idc"); gf_bs_read_ue_log(bs, "max_bytes_per_pic_denom"); gf_bs_read_ue_log(bs, "max_bits_per_min_cu_denom"); gf_bs_read_ue_log(bs, "log2_max_mv_length_horizontal"); gf_bs_read_ue_log(bs, "log2_max_mv_length_vertical"); } } if (gf_bs_read_int_log(bs, 1, "sps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*sps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return sps_id; } GF_EXPORT s32 gf_hevc_read_sps_ex(char *data, u32 size, HEVCState *hevc, u32 *vui_flag_pos) { GF_BitStream *bs; s32 sps_id = -1; u8 layer_id; if (vui_flag_pos) *vui_flag_pos = 0; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) goto exit; sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, vui_flag_pos); exit: if (bs) gf_bs_del(bs); return sps_id; } GF_EXPORT s32 gf_hevc_read_sps(u8 *data, u32 size, HEVCState *hevc) { return gf_hevc_read_sps_ex(data, size, hevc, NULL); } GF_EXPORT s32 gf_hevc_read_sps_bs(GF_BitStream *bs, HEVCState *hevc) { u8 layer_id; if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, &layer_id)) return -1; return gf_hevc_read_sps_bs_internal(bs, hevc, layer_id, NULL); } static s32 gf_hevc_read_pps_bs_internal(GF_BitStream *bs, HEVCState *hevc) { u32 i; s32 pps_id; HEVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &hevc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[HEVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } hevc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->dependent_slice_segments_enabled_flag = gf_bs_read_int_log(bs, 1, "dependent_slice_segments_enabled_flag"); pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->num_extra_slice_header_bits = gf_bs_read_int_log(bs, 3, "num_extra_slice_header_bits"); pps->sign_data_hiding_flag = gf_bs_read_int_log(bs, 1, "sign_data_hiding_flag"); pps->cabac_init_present_flag = gf_bs_read_int_log(bs, 1, "cabac_init_present_flag"); pps->num_ref_idx_l0_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active"); pps->num_ref_idx_l1_default_active = 1 + gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active"); pps->pic_init_qp_minus26 = gf_bs_read_se_log(bs, "pic_init_qp_minus26"); pps->constrained_intra_pred_flag = gf_bs_read_int_log(bs, 1, "constrained_intra_pred_flag"); pps->transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "transform_skip_enabled_flag"); if ((pps->cu_qp_delta_enabled_flag = gf_bs_read_int_log(bs, 1, "cu_qp_delta_enabled_flag"))) pps->diff_cu_qp_delta_depth = gf_bs_read_ue_log(bs, "diff_cu_qp_delta_depth"); pps->pic_cb_qp_offset = gf_bs_read_se_log(bs, "pic_cb_qp_offset"); pps->pic_cr_qp_offset = gf_bs_read_se_log(bs, "pic_cr_qp_offset"); pps->slice_chroma_qp_offsets_present_flag = gf_bs_read_int_log(bs, 1, "slice_chroma_qp_offsets_present_flag"); pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); pps->weighted_bipred_flag = gf_bs_read_int_log(bs, 1, "weighted_bipred_flag"); pps->transquant_bypass_enable_flag = gf_bs_read_int_log(bs, 1, "transquant_bypass_enable_flag"); pps->tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "tiles_enabled_flag"); pps->entropy_coding_sync_enabled_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); if (pps->tiles_enabled_flag) { pps->num_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_tile_columns_minus1"); pps->num_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_tile_rows_minus1"); pps->uniform_spacing_flag = gf_bs_read_int_log(bs, 1, "uniform_spacing_flag"); if (!pps->uniform_spacing_flag) { for (i = 0; i < pps->num_tile_columns - 1; i++) { pps->column_width[i] = 1 + gf_bs_read_ue_log_idx(bs, "column_width_minus1", i); } for (i = 0; i < pps->num_tile_rows - 1; i++) { pps->row_height[i] = 1 + gf_bs_read_ue_log_idx(bs, "row_height_minus1", i); } } pps->loop_filter_across_tiles_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_tiles_enabled_flag"); } pps->loop_filter_across_slices_enabled_flag = gf_bs_read_int_log(bs, 1, "loop_filter_across_slices_enabled_flag"); if ((pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"))) { pps->deblocking_filter_override_enabled_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_override_enabled_flag"); if (! (pps->pic_disable_deblocking_filter_flag = gf_bs_read_int_log(bs, 1, "pic_disable_deblocking_filter_flag"))) { pps->beta_offset_div2 = gf_bs_read_se_log(bs, "beta_offset_div2"); pps->tc_offset_div2 = gf_bs_read_se_log(bs, "tc_offset_div2"); } } if ((pps->pic_scaling_list_data_present_flag = gf_bs_read_int_log(bs, 1, "pic_scaling_list_data_present_flag"))) { hevc_scaling_list_data(bs); } pps->lists_modification_present_flag = gf_bs_read_int_log(bs, 1, "lists_modification_present_flag"); pps->log2_parallel_merge_level_minus2 = gf_bs_read_ue_log(bs, "log2_parallel_merge_level_minus2"); pps->slice_segment_header_extension_present_flag = gf_bs_read_int_log(bs, 1, "slice_segment_header_extension_present_flag"); if (gf_bs_read_int_log(bs, 1, "pps_extension_flag")) { #if 0 while (gf_bs_available(bs)) { /*pps_extension_data_flag */ gf_bs_read_int(bs, 1); } #endif } return pps_id; } GF_EXPORT s32 gf_hevc_read_pps(u8 *data, u32 size, HEVCState *hevc) { GF_BitStream *bs; s32 pps_id = -1; bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) goto exit; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) goto exit; pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); exit: if (bs) gf_bs_del(bs); return pps_id; } GF_EXPORT s32 gf_hevc_read_pps_bs(GF_BitStream *bs, HEVCState *hevc) { if (!bs || !hevc) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!hevc_parse_nal_header(bs, NULL, NULL, NULL)) return -1; return gf_hevc_read_pps_bs_internal(bs, hevc); } GF_EXPORT s32 gf_hevc_parse_nalu_bs(GF_BitStream *bs, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; HEVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &hevc->s_info, sizeof(HEVCSliceInfo)); if (!hevc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_END_OF_SEQ: case GF_HEVC_NALU_END_OF_STREAM: ret = 1; break; /*slice_segment_layer_rbsp*/ case GF_HEVC_NALU_SLICE_TRAIL_N: case GF_HEVC_NALU_SLICE_TRAIL_R: case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_STSA_R: case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: case GF_HEVC_NALU_SLICE_RADL_N: case GF_HEVC_NALU_SLICE_RADL_R: case GF_HEVC_NALU_SLICE_RASL_N: case GF_HEVC_NALU_SLICE_RASL_R: is_slice = GF_TRUE; /* slice - read the info and compare.*/ ret = hevc_parse_slice_segment(bs, hevc, &n_state); if (ret < 0) return ret; hevc_compute_poc(&n_state); ret = 0; if (hevc->s_info.poc != n_state.poc) { ret = 1; break; } if (n_state.first_slice_segment_in_pic_flag) { if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_HEVC_NALU_SEQ_PARAM: hevc->last_parsed_sps_id = gf_hevc_read_sps_bs_internal(bs, hevc, *layer_id, NULL); ret = (hevc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_PIC_PARAM: hevc->last_parsed_pps_id = gf_hevc_read_pps_bs_internal(bs, hevc); ret = (hevc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_HEVC_NALU_VID_PARAM: hevc->last_parsed_vps_id = gf_hevc_read_vps_bs_internal(bs, hevc, GF_FALSE); ret = (hevc->last_parsed_vps_id>=0) ? 0 : -1; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && hevc->s_info.sps) { n_state.frame_num_offset_prev = hevc->s_info.frame_num_offset; n_state.frame_num_prev = hevc->s_info.frame_num; n_state.poc_lsb_prev = hevc->s_info.poc_lsb; n_state.poc_msb_prev = hevc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) hevc_compute_poc(&n_state); memcpy(&hevc->s_info, &n_state, sizeof(HEVCSliceInfo)); return ret; } GF_EXPORT s32 gf_hevc_parse_nalu(u8 *data, u32 size, HEVCState *hevc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret = -1; if (!hevc) { if (nal_unit_type) (*nal_unit_type) = (data[0] & 0x7E) >> 1; if (layer_id) { u8 id = data[0] & 1; id <<= 5; id |= (data[1] >> 3) & 0x1F; (*layer_id) = id; } if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_hevc_parse_nalu_bs(bs, hevc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } GF_EXPORT GF_Err gf_hevc_change_vui(GF_HEVCConfig *hvcc, GF_VUIInfo *vui_info) { GF_BitStream *orig, *mod; HEVCState hevc; u32 i, bit_offset, flag; s32 idx; GF_NALUFFParamArray *spss; GF_NALUFFParam *slc; orig = NULL; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; i = 0; spss = NULL; while ((spss = (GF_NALUFFParamArray *)gf_list_enum(hvcc->param_array, &i))) { if (spss->type == GF_HEVC_NALU_SEQ_PARAM) break; spss = NULL; } if (!spss) return GF_NON_COMPLIANT_BITSTREAM; i = 0; while ((slc = (GF_NALUFFParam *)gf_list_enum(spss->nalus, &i))) { u8 *no_emulation_buf; u32 no_emulation_buf_size, emulation_bytes; /*SPS may still contains emulation bytes*/ no_emulation_buf = gf_malloc((slc->size) * sizeof(char)); no_emulation_buf_size = gf_media_nalu_remove_emulation_bytes(slc->data, no_emulation_buf, slc->size); idx = gf_hevc_read_sps_ex(no_emulation_buf, no_emulation_buf_size, &hevc, &bit_offset); if (idx < 0) { if (orig) gf_bs_del(orig); gf_free(no_emulation_buf); continue; } orig = gf_bs_new(no_emulation_buf, no_emulation_buf_size, GF_BITSTREAM_READ); mod = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); /*copy over till vui flag*/ assert(bit_offset >= 0); while (bit_offset) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); bit_offset--; } avc_hevc_rewrite_vui(vui_info, orig, mod); /*finally copy over remaining*/ while (gf_bs_bits_available(orig)) { flag = gf_bs_read_int(orig, 1); gf_bs_write_int(mod, flag, 1); } gf_bs_del(orig); orig = NULL; gf_free(no_emulation_buf); /*set anti-emulation*/ gf_bs_get_content(mod, &no_emulation_buf, &no_emulation_buf_size); emulation_bytes = gf_media_nalu_emulation_bytes_add_count(no_emulation_buf, no_emulation_buf_size); if (no_emulation_buf_size + emulation_bytes > slc->size) slc->data = (char*)gf_realloc(slc->data, no_emulation_buf_size + emulation_bytes); slc->size = gf_media_nalu_add_emulation_bytes(no_emulation_buf, slc->data, no_emulation_buf_size); gf_bs_del(mod); gf_free(no_emulation_buf); } return GF_OK; } GF_EXPORT GF_Err gf_hevc_change_par(GF_HEVCConfig *hvcc, s32 ar_n, s32 ar_d) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = ar_n; vuii.ar_den = ar_d; vuii.fullrange = -1; vuii.video_format = -1; vuii.color_prim = -1; vuii.color_tfc = -1; vuii.color_matrix = -1; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_change_color(GF_HEVCConfig *hvcc, s32 fullrange, s32 vidformat, s32 colorprim, s32 transfer, s32 colmatrix) { GF_VUIInfo vuii; memset(&vuii, 0, sizeof(GF_VUIInfo)); vuii.ar_num = -1; vuii.ar_den = -1; vuii.fullrange = fullrange; vuii.video_format = vidformat; vuii.color_prim = colorprim; vuii.color_tfc = transfer; vuii.color_matrix = colmatrix; return gf_hevc_change_vui(hvcc, &vuii); } GF_EXPORT GF_Err gf_hevc_get_sps_info_with_state(HEVCState *hevc, u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { s32 idx; idx = gf_hevc_read_sps(sps_data, sps_size, hevc); if (idx < 0) { return GF_NON_COMPLIANT_BITSTREAM; } if (sps_id) *sps_id = idx; if (width) *width = hevc->sps[idx].width; if (height) *height = hevc->sps[idx].height; if (par_n) *par_n = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_width : (u32)-1; if (par_d) *par_d = hevc->sps[idx].aspect_ratio_info_present_flag ? hevc->sps[idx].sar_height : (u32)-1; return GF_OK; } GF_EXPORT GF_Err gf_hevc_get_sps_info(u8 *sps_data, u32 sps_size, u32 *sps_id, u32 *width, u32 *height, s32 *par_n, s32 *par_d) { HEVCState hevc; memset(&hevc, 0, sizeof(HEVCState)); hevc.sps_active_idx = -1; return gf_hevc_get_sps_info_with_state(&hevc, sps_data, sps_size, sps_id, width, height, par_n, par_d); } #endif //GPAC_DISABLE_HEVC static u32 AC3_FindSyncCode(u8 *buf, u32 buflen) { u32 end = buflen - 6; u32 offset = 0; while (offset <= end) { if (buf[offset] == 0x0b && buf[offset + 1] == 0x77) { return offset; } offset++; } return buflen; } static Bool AC3_FindSyncCodeBS(GF_BitStream *bs) { u8 b1; u64 pos = gf_bs_get_position(bs); u64 end = gf_bs_get_size(bs); pos += 1; b1 = gf_bs_read_u8(bs); while (pos + 1 <= end) { u8 b2 = gf_bs_read_u8(bs); if ((b1 == 0x0b) && (b2 == 0x77)) { gf_bs_seek(bs, pos - 1); return GF_TRUE; } pos++; b1 = b2; } return GF_FALSE; } static const u32 ac3_sizecod_to_bitrate[] = { 32000, 40000, 48000, 56000, 64000, 80000, 96000, 112000, 128000, 160000, 192000, 224000, 256000, 320000, 384000, 448000, 512000, 576000, 640000 }; static const u32 ac3_sizecod2_to_framesize[] = { 96, 120, 144, 168, 192, 240, 288, 336, 384, 480, 576, 672, 768, 960, 1152, 1344, 1536, 1728, 1920 }; static const u32 ac3_sizecod1_to_framesize[] = { 69, 87, 104, 121, 139, 174, 208, 243, 278, 348, 417, 487, 557, 696, 835, 975, 1114, 1253, 1393 }; static const u32 ac3_sizecod0_to_framesize[] = { 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384, 448, 512, 640, 768, 896, 1024, 1152, 1280 }; static const u32 ac3_mod_to_chans[] = { 2, 1, 2, 3, 3, 4, 4, 5 }; GF_EXPORT u32 gf_ac3_get_channels(u32 acmod) { u32 nb_ch; nb_ch = ac3_mod_to_chans[acmod]; return nb_ch; } GF_EXPORT u32 gf_ac3_get_bitrate(u32 brcode) { return ac3_sizecod_to_bitrate[brcode]; } Bool gf_ac3_parser(u8 *buf, u32 buflen, u32 *pos, GF_AC3Config *hdr, Bool full_parse) { GF_BitStream *bs; Bool ret; if (buflen < 6) return GF_FALSE; (*pos) = AC3_FindSyncCode(buf, buflen); if (*pos >= buflen) return GF_FALSE; bs = gf_bs_new((const char*)(buf + *pos), buflen, GF_BITSTREAM_READ); ret = gf_ac3_parser_bs(bs, hdr, full_parse); gf_bs_del(bs); return ret; } GF_EXPORT Bool gf_ac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, frmsizecod, bsid, ac3_mod, freq, framesize, bsmod, syncword; u64 pos; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } gf_bs_read_int_log(bs, 16, "crc1"); fscod = gf_bs_read_int_log(bs, 2, "fscod"); frmsizecod = gf_bs_read_int_log(bs, 6, "frmsizecod"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); bsmod = gf_bs_read_int_log(bs, 3, "bsmod"); ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); if (frmsizecod >= 2 * sizeof(ac3_sizecod_to_bitrate) / sizeof(u32)) return GF_FALSE; hdr->bitrate = ac3_sizecod_to_bitrate[frmsizecod / 2]; if (bsid > 8) hdr->bitrate = hdr->bitrate >> (bsid - 8); switch (fscod) { case 0: if (frmsizecod >= 2 * sizeof(ac3_sizecod0_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 48000; framesize = ac3_sizecod0_to_framesize[frmsizecod / 2] * 2; break; case 1: if (frmsizecod >= 2 * sizeof(ac3_sizecod1_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 44100; framesize = (ac3_sizecod1_to_framesize[frmsizecod / 2] + (frmsizecod & 0x1)) * 2; break; case 2: if (frmsizecod >= 2 * sizeof(ac3_sizecod2_to_framesize) / sizeof(u32)) return GF_FALSE; freq = 32000; framesize = ac3_sizecod2_to_framesize[frmsizecod / 2] * 2; break; default: return GF_FALSE; } hdr->sample_rate = freq; hdr->framesize = framesize; if (full_parse) { hdr->streams[0].bsid = bsid; hdr->streams[0].bsmod = bsmod; hdr->streams[0].acmod = ac3_mod; hdr->streams[0].lfon = 0; hdr->streams[0].fscod = fscod; hdr->brcode = frmsizecod / 2; } if (ac3_mod >= 2 * sizeof(ac3_mod_to_chans) / sizeof(u32)) return GF_FALSE; hdr->channels = ac3_mod_to_chans[ac3_mod]; if ((ac3_mod & 0x1) && (ac3_mod != 1)) gf_bs_read_int_log(bs, 2, "cmixlev"); if (ac3_mod & 0x4) gf_bs_read_int_log(bs, 2, "surmixlev"); if (ac3_mod == 0x2) gf_bs_read_int_log(bs, 2, "dsurmod"); if (gf_bs_read_int_log(bs, 1, "lfeon")) { hdr->channels += 1; hdr->streams[0].lfon = 1; } gf_bs_seek(bs, pos); return GF_TRUE; } GF_EXPORT Bool gf_eac3_parser_bs(GF_BitStream *bs, GF_AC3Config *hdr, Bool full_parse) { u32 fscod, bsid, ac3_mod, freq, framesize, syncword, substreamid, lfon, channels, numblkscod, strmtyp, frmsiz; u64 pos; u16 chanmap; static u32 numblks[4] = {1, 2, 3, 6}; if (!hdr || (gf_bs_available(bs) < 6)) return GF_FALSE; if (!AC3_FindSyncCodeBS(bs)) return GF_FALSE; pos = gf_bs_get_position(bs); framesize = 0; numblkscod = 0; memset(hdr, 0, sizeof(GF_AC3Config)); block: syncword = gf_bs_read_u16(bs); if (syncword != 0x0B77) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[E-AC3] Wrong sync word detected (0x%X - expecting 0x0B77).\n", syncword)); return GF_FALSE; } strmtyp = gf_bs_read_int_log(bs, 2, "strmtyp"); substreamid = gf_bs_read_int_log(bs, 3, "substreamid"); //next main (independent) AU, done with this frame if ((strmtyp!=0x1) && ((hdr->substreams >> substreamid) & 0x1)) { hdr->framesize = framesize; gf_bs_seek(bs, pos); return GF_TRUE; } frmsiz = gf_bs_read_int_log(bs, 11, "frmsiz"); framesize += 2 * (1 + frmsiz); fscod = gf_bs_read_int_log(bs, 2, "fscod"); if (fscod == 0x3) { fscod = gf_bs_read_int_log(bs, 2, "fscod2"); numblkscod += 6; } else { numblkscod += gf_bs_read_int_log(bs, 2, "numblkscod"); } assert(numblkscod <= 9); if ((hdr->substreams >> substreamid) & 0x1) { //we still have sync frames following if (substreamid) { if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) { gf_bs_seek(bs, pos); return GF_FALSE; } goto block; } } hdr->substreams |= (1 << substreamid); switch (fscod) { case 0: freq = 48000; break; case 1: freq = 44100; break; case 2: freq = 32000; break; default: return GF_FALSE; } ac3_mod = gf_bs_read_int_log(bs, 3, "ac3_mod"); lfon = gf_bs_read_int_log(bs, 1, "lfon"); bsid = gf_bs_read_int_log(bs, 5, "bsid"); if (!substreamid && (bsid != 16/*E-AC3*/)) return GF_FALSE; gf_bs_read_int_log(bs, 5, "dialnorm"); if (gf_bs_read_int_log(bs, 1, "compre")) { gf_bs_read_int_log(bs, 8, "compr"); } if (ac3_mod==0) { gf_bs_read_int_log(bs, 5, "dialnorm2"); if (gf_bs_read_int_log(bs, 1, "compr2e")) { gf_bs_read_int_log(bs, 8, "compr2"); } } chanmap = 0; if (strmtyp==0x1) { if (gf_bs_read_int_log(bs, 1, "chanmape")) { chanmap = gf_bs_read_int_log(bs, 16, "chanmap"); } } channels = ac3_mod_to_chans[ac3_mod]; if (lfon) channels += 1; hdr->bitrate = 0; hdr->sample_rate = freq; hdr->framesize = framesize; if (strmtyp != 1) { hdr->channels = channels; hdr->streams[substreamid].lfon = lfon; if (full_parse) { hdr->streams[substreamid].bsid = bsid; hdr->streams[substreamid].bsmod = 0; hdr->streams[substreamid].acmod = ac3_mod; hdr->streams[substreamid].fscod = fscod; hdr->brcode = 0; } hdr->nb_streams++; //not clear if this is only for the independent streams hdr->brcode += ((frmsiz+1) * freq) / (numblks[numblkscod]*16) / 1000; if (lfon) hdr->channels += 1; } else { hdr->streams[substreamid].nb_dep_sub = substreamid; hdr->streams[substreamid].chan_loc |= chanmap; } if (numblkscod < 6) { //we need 6 blocks to make a sample if (gf_bs_seek(bs, pos + framesize) != GF_OK) { gf_bs_seek(bs, pos); return GF_FALSE; } if ((gf_bs_available(bs) < 6) || !AC3_FindSyncCodeBS(bs)) return GF_FALSE; goto block; } gf_bs_seek(bs, pos); return GF_TRUE; } #endif /*GPAC_DISABLE_AV_PARSERS*/ u32 gf_id3_read_size(GF_BitStream *bs) { u32 size = 0; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); size<<=7; gf_bs_read_int(bs, 1); size |= gf_bs_read_int(bs, 7); return size; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG) /* Vorbis parser */ static u32 vorbis_book_maptype1_quantvals(u32 entries, u32 dim) { u32 vals = (u32)floor(pow(entries, 1.0 / dim)); while (1) { u32 acc = 1; u32 acc1 = 1; u32 i; for (i = 0; i < dim; i++) { acc *= vals; acc1 *= vals + 1; } if (acc <= entries && acc1 > entries) return (vals); else { if (acc > entries) vals--; else vals++; } } } static u32 ilog(u32 v, Bool dec) { u32 ret = 0; if (dec && v) --v; while (v) { ret++; v >>= 1; } return (ret); } static u32 icount(u32 v) { u32 ret = 0; while (v) { ret += v & 1; v >>= 1; } return(ret); } GF_EXPORT Bool gf_vorbis_parse_header(GF_VorbisParser *vp, u8 *data, u32 data_len) { u32 pack_type, i, j, k, times, nb_part, nb_books, nb_modes; u32 l; char szNAME[8]; oggpack_buffer opb; oggpack_readinit(&opb, (u8*)data, data_len); pack_type = oggpack_read(&opb, 8); i = 0; while (i < 6) { szNAME[i] = oggpack_read(&opb, 8); i++; } szNAME[i] = 0; if (strcmp(szNAME, "vorbis")) { return GF_FALSE; } switch (pack_type) { case 0x01: vp->version = oggpack_read(&opb, 32); if (vp->version != 0) { return GF_FALSE; } vp->channels = oggpack_read(&opb, 8); vp->sample_rate = oggpack_read(&opb, 32); vp->max_r = oggpack_read(&opb, 32); vp->avg_r = oggpack_read(&opb, 32); vp->low_r = oggpack_read(&opb, 32); vp->min_block = 1<<oggpack_read(&opb, 4); vp->max_block = 1<<oggpack_read(&opb, 4); if (vp->sample_rate < 1 || vp->channels < 1 || vp->min_block < 8 || vp->max_block < vp->min_block || oggpack_read(&opb, 1) != 1) { return GF_FALSE; } vp->nb_init=1; return GF_TRUE; case 0x03: /*trash comments*/ vp->nb_init++; return GF_TRUE; case 0x05: /*need at least bitstream header to make sure we're parsing the right thing*/ if (!vp->nb_init) return GF_FALSE; break; default: return GF_FALSE; } /*OK parse codebook*/ nb_books = oggpack_read(&opb, 8) + 1; /*skip vorbis static books*/ for (i = 0; i < nb_books; i++) { u32 map_type, qb, qq; u32 entries, dim; oggpack_read(&opb, 24); dim = oggpack_read(&opb, 16); entries = oggpack_read(&opb, 24); if ((s32)entries < 0) entries = 0; if (oggpack_read(&opb, 1) == 0) { if (oggpack_read(&opb, 1)) { for (j = 0; j < entries; j++) { if (oggpack_read(&opb, 1)) { oggpack_read(&opb, 5); } } } else { for (j = 0; j < entries; j++) oggpack_read(&opb, 5); } } else { oggpack_read(&opb, 5); for (j = 0; j < entries;) { u32 num = oggpack_read(&opb, ilog(entries - j, GF_FALSE)); for (k = 0; k < num && j < entries; k++, j++) { } } } switch ((map_type = oggpack_read(&opb, 4))) { case 0: break; case 1: case 2: oggpack_read(&opb, 32); oggpack_read(&opb, 32); qq = oggpack_read(&opb, 4) + 1; oggpack_read(&opb, 1); if (map_type == 1) qb = vorbis_book_maptype1_quantvals(entries, dim); else if (map_type == 2) qb = entries * dim; else qb = 0; for (j = 0; j < qb; j++) oggpack_read(&opb, qq); break; } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) oggpack_read(&opb, 16); times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 type = oggpack_read(&opb, 16); if (type) { u32 *parts, *class_dims, count, rangebits; u32 max_class = 0; nb_part = oggpack_read(&opb, 5); parts = (u32*)gf_malloc(sizeof(u32) * nb_part); for (j = 0; j < nb_part; j++) { parts[j] = oggpack_read(&opb, 4); if (max_class < parts[j]) max_class = parts[j]; } class_dims = (u32*)gf_malloc(sizeof(u32) * (max_class + 1)); for (j = 0; j < max_class + 1; j++) { u32 class_sub; class_dims[j] = oggpack_read(&opb, 3) + 1; class_sub = oggpack_read(&opb, 2); if (class_sub) oggpack_read(&opb, 8); for (k = 0; k < (u32)(1 << class_sub); k++) oggpack_read(&opb, 8); } oggpack_read(&opb, 2); rangebits = oggpack_read(&opb, 4); count = 0; for (j = 0, k = 0; j < nb_part; j++) { count += class_dims[parts[j]]; for (; k < count; k++) oggpack_read(&opb, rangebits); } gf_free(parts); gf_free(class_dims); } else { oggpack_read(&opb, 8 + 16 + 16 + 6 + 8); nb_books = oggpack_read(&opb, 4) + 1; for (j = 0; j < nb_books; j++) oggpack_read(&opb, 8); } } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 acc = 0; oggpack_read(&opb, 16);/*type*/ oggpack_read(&opb, 24); oggpack_read(&opb, 24); oggpack_read(&opb, 24); nb_part = oggpack_read(&opb, 6) + 1; oggpack_read(&opb, 8); for (j = 0; j < nb_part; j++) { u32 cascade = oggpack_read(&opb, 3); if (oggpack_read(&opb, 1)) cascade |= (oggpack_read(&opb, 5) << 3); acc += icount(cascade); } for (j = 0; j < acc; j++) oggpack_read(&opb, 8); } times = oggpack_read(&opb, 6) + 1; for (i = 0; i < times; i++) { u32 sub_maps = 1; oggpack_read(&opb, 16); if (oggpack_read(&opb, 1)) sub_maps = oggpack_read(&opb, 4) + 1; if (oggpack_read(&opb, 1)) { u32 nb_steps = oggpack_read(&opb, 8) + 1; for (j = 0; j < nb_steps; j++) { oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); oggpack_read(&opb, ilog(vp->channels, GF_TRUE)); } } oggpack_read(&opb, 2); if (sub_maps>1) { for(l=0; l<vp->channels; l++) oggpack_read(&opb, 4); } for (j = 0; j < sub_maps; j++) { oggpack_read(&opb, 8); oggpack_read(&opb, 8); oggpack_read(&opb, 8); } } nb_modes = oggpack_read(&opb, 6) + 1; for (i = 0; i < nb_modes; i++) { vp->mode_flag[i] = oggpack_read(&opb, 1); oggpack_read(&opb, 16); oggpack_read(&opb, 16); oggpack_read(&opb, 8); } vp->modebits = 0; j = nb_modes; while (j > 1) { vp->modebits++; j >>= 1; } return GF_TRUE; } GF_EXPORT u32 gf_vorbis_check_frame(GF_VorbisParser *vp, u8 *data, u32 data_length) { s32 block_size; oggpack_buffer opb; if (!vp) return 0; oggpack_readinit(&opb, (unsigned char*)data, data_length); /*not audio*/ if (oggpack_read(&opb, 1) != 0) return 0; block_size = oggpack_read(&opb, vp->modebits); if (block_size == -1) return 0; return ((vp->mode_flag[block_size]) ? vp->max_block : vp->min_block) / (2); } /*call with vorbis header packets - initializes the parser on success, leave it to NULL otherwise returns 1 if success, 0 if error.*/ Bool gf_opus_parse_header(GF_OpusParser *opus, u8 *data, u32 data_len) { char tag[9]; GF_BitStream *bs = gf_bs_new(data, data_len, GF_BITSTREAM_READ); gf_bs_read_data(bs, tag, 8); tag[8]=0; if (memcmp(data, "OpusHead", sizeof(char)*8)) { gf_bs_del(bs); return GF_FALSE; } /*Identification Header*/ opus->version = gf_bs_read_u8(bs); /*version*/ if (opus->version != 1) { gf_bs_del(bs); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[Opus] Unsupported version %d\n", opus->version)); return GF_FALSE; } opus->OutputChannelCount = gf_bs_read_u8(bs); opus->PreSkip = gf_bs_read_u16_le(bs); opus->InputSampleRate = gf_bs_read_u32_le(bs); opus->OutputGain = gf_bs_read_u16_le(bs); opus->ChannelMappingFamily = gf_bs_read_u8(bs); if (opus->ChannelMappingFamily != 0) { opus->StreamCount = gf_bs_read_u8(bs); opus->CoupledCount = gf_bs_read_u8(bs); gf_bs_read_data(bs, (char *) opus->ChannelMapping, opus->OutputChannelCount); } gf_bs_del(bs); return GF_TRUE; } /*returns 0 if init error or not a vorbis frame, otherwise returns the number of audio samples in this frame*/ u32 gf_opus_check_frame(GF_OpusParser *op, u8 *data, u32 data_length) { u32 block_size; if (!memcmp(data, "OpusHead", sizeof(char)*8)) return 0; if (!memcmp(data, "OpusTags", sizeof(char)*8)) return 0; /*consider the whole packet as Ogg packets and ISOBMFF samples for Opus are framed similarly*/ static const int OpusFrameDurIn48k[] = { 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 1920, 2880, 480, 960, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, 120, 240, 480, 960, }; int TOC_config = (data[0] & 0xf8) >> 3; //int s = (data[0] & 0x04) >> 2; block_size = OpusFrameDurIn48k[TOC_config]; int c = data[0] & 0x03; if (c == 1 || c == 2) { block_size *= 2; } else if (c == 3) { /*unknown number of frames*/ int num_frames = data[1] & 0x3f; block_size *= num_frames; } return block_size; } #endif /*!defined(GPAC_DISABLE_AV_PARSERS) && !defined (GPAC_DISABLE_OGG)*/ u64 gf_mpegh_escaped_value(GF_BitStream *bs, u32 nBits1, u32 nBits2, u32 nBits3) { u64 value = gf_bs_read_int(bs, nBits1); if (value == (1<<nBits1)-1) { u32 vadd = gf_bs_read_int(bs, nBits2); value += vadd; if (vadd == (1<<nBits2)-1) { vadd = gf_bs_read_int(bs, nBits3); value += vadd; } } return value; } GF_EXPORT s32 gf_mpegh_get_mhas_pl(u8 *ptr, u32 size, u64 *ch_layout) { s32 PL = -1; GF_BitStream *bs; u32 i; s32 sync_pos=-1; for (i=0; i<size-3; i++) { if ((ptr[i]==0xC0) && (ptr[i+1]== 0x01) && (ptr[i+2]==0xA5)) { sync_pos = i; break; } } if (sync_pos<0) return 0; if (ch_layout) *ch_layout = 0; bs = gf_bs_new(ptr, size, GF_BITSTREAM_READ); gf_bs_skip_bytes(bs, sync_pos); while (gf_bs_available(bs)) { u32 type = (u32) gf_mpegh_escaped_value(bs, 3, 8, 8); /*u64 label = */gf_mpegh_escaped_value(bs, 2, 8, 32); u64 mh_size = gf_mpegh_escaped_value(bs, 11, 24, 24); if (mh_size > gf_bs_available(bs)) break; //MHAS config if (type==1) { PL = gf_bs_read_int(bs, 8); if (ch_layout) { u32 idx = gf_bs_read_int(bs, 5); if (idx==0x1f) gf_bs_read_int(bs, 24); /*idx = */gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 1); //speaker config idx = gf_bs_read_int(bs, 2); if (idx == 0) { *ch_layout = gf_audio_fmt_get_layout_from_cicp( gf_bs_read_int(bs, 6) ); } } break; } gf_bs_skip_bytes(bs, mh_size); } gf_bs_del(bs); return PL; } GF_EXPORT void gf_media_vvc_parse_sei(char *buffer, u32 nal_size, VVCState *vvc) { gf_hevc_vvc_parse_sei(buffer, nal_size, NULL, vvc); } static Bool vvc_parse_nal_header(GF_BitStream *bs, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { u32 val; val = gf_bs_read_int_log(bs, 1, "forbidden_zero"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 1, "resevred0"); if (val) return GF_FALSE; val = gf_bs_read_int_log(bs, 6, "layerID"); if (layer_id) *layer_id = val; val = gf_bs_read_int_log(bs, 5, "nuh_type"); if (nal_unit_type) *nal_unit_type = val; val = gf_bs_read_int_log(bs, 3, "temporalID"); if (!val) return GF_FALSE; val -= 1; if (temporal_id) *temporal_id = val; return GF_TRUE; } static void vvc_profile_tier_level(GF_BitStream *bs, VVC_ProfileTierLevel *ptl, u32 idx) { u32 i; if (ptl->pt_present) { ptl->general_profile_idc = gf_bs_read_int_log_idx(bs, 7, "general_profile_idc", idx); ptl->general_tier_flag = gf_bs_read_int_log_idx(bs, 1, "general_tier_flag", idx); } ptl->general_level_idc = gf_bs_read_int_log_idx(bs, 8, "general_level_idc", idx); ptl->frame_only_constraint = gf_bs_read_int_log_idx(bs, 1, "frame_only_constraint", idx); ptl->multilayer_enabled = gf_bs_read_int_log_idx(bs, 1, "multilayer_enabled", idx); //general constraints info - max size if 1 + 81 + 8 + 255 if (ptl->pt_present) { // general_constraints_info ptl->gci_present = gf_bs_read_int_log_idx(bs, 1, "gci_present", idx); if (ptl->gci_present) { u8 res; ptl->gci[0] = 0x80; ptl->gci[0] |= gf_bs_read_int(bs, 7); //81-7 = 74 bits till reserved gf_bs_read_data(bs, ptl->gci+1, 9); ptl->gci[10] = gf_bs_read_int(bs, 2)<<6; //skip extensions ptl->gci[11] = 0; res = gf_bs_read_int(bs, 8); gf_bs_read_int(bs, res); } gf_bs_align(bs); } for (i=ptl->ptl_max_tid; i>0; i--) { ptl->sub_ptl[i-1].level_present_flag = gf_bs_read_int_log_idx2(bs, 1, "level_present_flag", idx, i); } gf_bs_align(bs); for (i=ptl->ptl_max_tid; i>0; i--) { if (ptl->sub_ptl[i-1].level_present_flag) ptl->sub_ptl[i-1].sublayer_level_idc = gf_bs_read_int_log_idx2(bs, 8, "sublayer_level_idc", idx, i); } if (ptl->pt_present) { ptl->num_sub_profiles = gf_bs_read_int_log_idx(bs, 8, "num_sub_profiles", idx); for (i=0; i<ptl->num_sub_profiles; i++) { ptl->sub_profile_idc[i] = gf_bs_read_int_log_idx2(bs, 32, "sub_profile_idc", idx, i); } } } static s32 gf_media_vvc_read_vps_bs_internal(GF_BitStream *bs, VVCState *vvc, Bool stop_at_vps_ext) { u32 i, j; s32 vps_id; VVC_VPS *vps; Bool vps_default_ptl_dpb_hrd_max_tid_flag=0; //nalu header already parsed vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) return -1; if (!vps_id) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] VPS ID 0 is forbidden\n")); return -1; } vps = &vvc->vps[vps_id]; if (!vps->state) { vps->id = vps_id; vps->state = 1; } vps->max_layers = 1 + gf_bs_read_int_log(bs, 6, "max_layers"); if (vps->max_layers > MAX_LHVC_LAYERS) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] sorry, %d layers in VPS but only %d supported\n", vps->max_layers, MAX_LHVC_LAYERS)); return -1; } vps->max_sub_layers = gf_bs_read_int_log(bs, 3, "max_sub_layers_minus1") + 1; if ((vps->max_layers>1) && (vps->max_sub_layers>1)) vps_default_ptl_dpb_hrd_max_tid_flag = gf_bs_read_int_log(bs, 1, "vps_default_ptl_dpb_hrd_max_tid_flag"); if (vps->max_layers>1) vps->all_layers_independent = gf_bs_read_int_log(bs, 1, "all_layers_independent"); for (i=0; i<vps->max_layers; i++) { u32 layer_id = gf_bs_read_int_log_idx(bs, 6, "layer_id", i); if (layer_id>vps->max_layer_id) vps->max_layer_id = layer_id; if (i && !vps->all_layers_independent) { Bool layer_indep = gf_bs_read_int_log_idx(bs, 1, "layer_independent", i); if (!layer_indep) { Bool vps_max_tid_ref_present_flag = gf_bs_read_int_log_idx(bs, 1, "vps_max_tid_ref_present_flag", i); for (j=0; j<i; j++) { Bool vps_direct_ref_layer_flag = gf_bs_read_int_log_idx2(bs, 1, "vps_direct_ref_layer_flag", i, j); if (vps_max_tid_ref_present_flag && vps_direct_ref_layer_flag) { gf_bs_read_int_log_idx2(bs, 3, "vps_max_tid_il_ref_pics_plus1", i, j); } } } } } vps->num_ptl = 1; if (vps->max_layers > 1) { if (vps->all_layers_independent) { vps->each_layer_is_ols = gf_bs_read_int_log(bs, 1, "each_layer_is_ols"); } if (!vps->each_layer_is_ols) { u32 vps_ols_mode_idc = 2; if (!vps->all_layers_independent) { vps_ols_mode_idc = gf_bs_read_int_log(bs, 2, "vps_ols_mode_idc"); } if (vps_ols_mode_idc==2) { u8 vps_num_output_layer_sets = 2 + gf_bs_read_int_log(bs, 8, "vps_num_output_layer_sets_minus2"); for (i=0; i<vps_num_output_layer_sets; i++) { for (j=0; j<vps->max_layers; j++) { gf_bs_read_int_log_idx2(bs, 1, "vps_ols_output_layer_flag", i, j); } } } } vps->num_ptl = 1 + gf_bs_read_int_log(bs, 8, "num_ptl_minus1"); } vps->ptl[0].pt_present = 1; for (i=0; i<vps->num_ptl; i++) { if (i) vps->ptl[i].pt_present = gf_bs_read_int_log_idx(bs, 1, "pt_present", i); if (!vps_default_ptl_dpb_hrd_max_tid_flag) vps->ptl[i].ptl_max_tid = gf_bs_read_int_log_idx(bs, 3, "ptl_max_tid", i); else vps->ptl[i].ptl_max_tid = vps->max_sub_layers - 1;; } //align gf_bs_align(bs); for (i=0; i<vps->num_ptl; i++) { vvc_profile_tier_level(bs, &vps->ptl[i], i); } //TODO, parse multilayer stuff return vps_id; } static s32 gf_media_vvc_read_sps_bs_internal(GF_BitStream *bs, VVCState *vvc, u8 layer_id, u32 *vui_flag_pos) { s32 vps_id, sps_id; u32 i, CtbSizeY; VVC_SPS *sps; u8 sps_ptl_dpb_hrd_params_present_flag; if (vui_flag_pos) *vui_flag_pos = 0; sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((sps_id<0) || (sps_id >= 16)) { return -1; } vps_id = gf_bs_read_int_log(bs, 4, "vps_id"); if ((vps_id<0) || (vps_id >= 16)) { return -1; } if (!vps_id && !vvc->vps[0].state) { vvc->vps[0].state = 1; vvc->vps[0].num_ptl = 1; vvc->vps[0].max_layers = 1; vvc->vps[0].all_layers_independent = 1; } sps = &vvc->sps[sps_id]; if (!sps->state) { sps->state = 1; sps->id = sps_id; sps->vps_id = vps_id; } sps->max_sublayers = 1 + gf_bs_read_int_log(bs, 3, "max_sublayers_minus1"); sps->chroma_format_idc = gf_bs_read_int_log(bs, 2, "chroma_format_idc"); sps->log2_ctu_size = 5 + gf_bs_read_int_log(bs, 2, "log2_ctu_size_minus5"); CtbSizeY = 1<<sps->log2_ctu_size; sps_ptl_dpb_hrd_params_present_flag = gf_bs_read_int_log(bs, 1, "sps_ptl_dpb_hrd_params_present_flag"); if (sps_ptl_dpb_hrd_params_present_flag) { VVC_ProfileTierLevel ptl, *p_ptl; if (sps->vps_id) { p_ptl = &ptl; } else { p_ptl = &vvc->vps[0].ptl[0]; } memset(p_ptl, 0, sizeof(VVC_ProfileTierLevel)); p_ptl->pt_present = 1; p_ptl->ptl_max_tid = sps->max_sublayers; vvc_profile_tier_level(bs, p_ptl, 0); } sps->gdr_enabled = gf_bs_read_int_log(bs, 1, "gdr_enabled"); sps->ref_pic_resampling = gf_bs_read_int_log(bs, 1, "ref_pic_resampling"); if (sps->ref_pic_resampling) sps->res_change_in_clvs = gf_bs_read_int_log(bs, 1, "res_change_in_clvs"); sps->width = gf_bs_read_ue_log(bs, "width"); sps->height = gf_bs_read_ue_log(bs, "height"); sps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_present_flag"); if (sps->conf_window) { sps->cw_left = gf_bs_read_ue_log(bs, "conformance_window_left"); sps->cw_right = gf_bs_read_ue_log(bs, "conformance_window_right"); sps->cw_top = gf_bs_read_ue_log(bs, "conformance_window_top"); sps->cw_bottom = gf_bs_read_ue_log(bs, "conformance_window_bottom"); } sps->subpic_info_present = gf_bs_read_int_log(bs, 1, "subpic_info_present"); if (sps->subpic_info_present) { sps->nb_subpics = 1 + gf_bs_read_ue_log(bs, "nb_subpics_minus1"); if (sps->nb_subpics>1) { u32 tmpWidthVal, tmpHeightVal; sps->independent_subpic_flags = gf_bs_read_int_log(bs, 1, "independent_subpic_flags"); sps->subpic_same_size = gf_bs_read_int_log(bs, 1, "subpic_same_size"); tmpWidthVal = (sps->width + CtbSizeY-1) / CtbSizeY; tmpWidthVal = gf_get_bit_size(tmpWidthVal); tmpHeightVal = (sps->height + CtbSizeY-1) / CtbSizeY; tmpHeightVal = gf_get_bit_size(tmpHeightVal); for (i=0; i<sps->nb_subpics; i++) { if( !sps->subpic_same_size || !i) { if (i && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_ctu_top_left_x"); if (i && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_ctu_top_left_y"); if ((i+1 < sps->nb_subpics) && (sps->width > CtbSizeY)) gf_bs_read_int_log(bs, tmpWidthVal, "subpic_width_minus1"); if ((i+1 < sps->nb_subpics) && (sps->height > CtbSizeY)) gf_bs_read_int_log(bs, tmpHeightVal, "subpic_height_minus1"); } if (!sps->independent_subpic_flags) { gf_bs_read_int_log(bs, 1, "subpic_treated_as_pic_flag"); gf_bs_read_int_log(bs, 1, "loop_filter_across_subpic_enabled_flag"); } } sps->subpicid_len = gf_bs_read_ue_log(bs, "subpic_id_len_minus1") + 1; sps->subpicid_mapping_explicit = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_explicitly_signalled_flag"); if (sps->subpicid_mapping_explicit) { sps->subpicid_mapping_present = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (sps->subpicid_mapping_present) { for (i=0; i<sps->nb_subpics; i++) { gf_bs_read_ue_log(bs, "subpic_id"); } } } } } sps->bitdepth = gf_bs_read_ue_log(bs, "bitdepth_minus8") + 8; gf_bs_read_int_log(bs, 1, "entropy_coding_sync_enabled_flag"); gf_bs_read_int_log(bs, 1, "entry_point_offsets_present_flag"); sps->log2_max_poc_lsb = 4 + gf_bs_read_int_log(bs, 4, "log2_max_poc_lsb_minus4"); if ((sps->poc_msb_cycle_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_flag"))) sps->poc_msb_cycle_len = 1 + gf_bs_read_ue_log(bs, "poc_msb_cycle_len_minus1"); u8 sps_num_extra_ph_bits = 8 * gf_bs_read_int_log(bs, 2, "sps_num_extra_ph_bytes"); for (i=0; i<sps_num_extra_ph_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_ph_bit_present_flag", 1)) sps->ph_num_extra_bits++; } u8 sps_num_extra_sh_bits = 8 * gf_bs_read_int_log(bs, 2, "num_extra_sh_bytes"); for (i=0; i<sps_num_extra_sh_bits; i++) { if (gf_bs_read_int_log_idx(bs, 1, "extra_sh_bit_present_flag", i)) sps->sh_num_extra_bits++; } if (sps_ptl_dpb_hrd_params_present_flag) { u8 sps_sublayer_dpb_params_flag = 0; if (sps->max_sublayers>1) { sps_sublayer_dpb_params_flag = gf_bs_read_int_log(bs, 1, "sps_sublayer_dpb_params_flag"); } for (i=(sps_sublayer_dpb_params_flag ? 0 : sps->max_sublayers-1); i < sps->max_sublayers; i++ ) { gf_bs_read_ue_log_idx(bs, "dpb_max_dec_pic_buffering_minus1", i); gf_bs_read_ue_log_idx(bs, "dpb_max_num_reorder_pics", i); gf_bs_read_ue_log_idx(bs, "dpb_max_latency_increase_plus1", i); } } gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_partition_constraints_override_enabled_flag"); gf_bs_read_ue_log(bs, "sps_log2_min_luma_coding_block_size_minus2"); u8 sps_max_mtt_hierarchy_depth_intra_slice_luma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_luma"); if (sps_max_mtt_hierarchy_depth_intra_slice_luma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_luma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_luma"); } u8 sps_qtbtt_dual_tree_intra_flag = 0; if (sps->chroma_format_idc) { sps_qtbtt_dual_tree_intra_flag = gf_bs_read_int_log(bs, 1, "sps_qtbtt_dual_tree_intra_flag"); } if (sps_qtbtt_dual_tree_intra_flag) { gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_intra_slice_chroma"); u8 sps_max_mtt_hierarchy_depth_intra_slice_chroma = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_intra_slice_chroma"); if( sps_max_mtt_hierarchy_depth_intra_slice_chroma != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_intra_slice_chroma"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_intra_slice_chroma"); } } gf_bs_read_ue_log(bs, "sps_log2_diff_min_qt_min_cb_inter_slice"); u8 sps_max_mtt_hierarchy_depth_inter_slice = gf_bs_read_ue_log(bs, "sps_max_mtt_hierarchy_depth_inter_slice"); if (sps_max_mtt_hierarchy_depth_inter_slice != 0) { gf_bs_read_ue_log(bs, "sps_log2_diff_max_bt_min_qt_inter_slice"); gf_bs_read_ue_log(bs, "sps_log2_diff_max_tt_min_qt_inter_slice"); } //u8 sps_max_luma_transform_size_64_flag = 0; if (CtbSizeY > 32) { /*sps_max_luma_transform_size_64_flag = */gf_bs_read_int_log(bs, 1, "sps_max_luma_transform_size_64_flag"); } u8 sps_transform_skip_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_transform_skip_enabled_flag"); if (sps_transform_skip_enabled_flag) { gf_bs_read_ue_log(bs, "sps_log2_transform_skip_max_size_minus2"); gf_bs_read_int_log(bs, 1, "sps_bdpcm_enabled_flag"); } if (gf_bs_read_int_log(bs, 1, "sps_mts_enabled_flag")) { gf_bs_read_int_log(bs, 1, "sps_explicit_mts_intra_enabled_flag"); gf_bs_read_int_log(bs, 1, "sps_explicit_mts_inter_enabled_flag"); } gf_bs_read_int_log(bs, 1, "sps_lfnst_enabled_flag"); if (sps->chroma_format_idc) { u8 sps_joint_cbcr_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_joint_cbcr_enabled_flag"); u8 sps_same_qp_table_for_chroma_flag = gf_bs_read_int_log(bs, 1, "sps_same_qp_table_for_chroma_flag"); u32 numQpTables = sps_same_qp_table_for_chroma_flag ? 1 : (sps_joint_cbcr_enabled_flag ? 3 : 2); for (i=0; i<numQpTables; i++) { gf_bs_read_se_log_idx(bs, "sps_qp_table_start_minus26", i); u32 j, sps_num_points_in_qp_table = 1 + gf_bs_read_ue_log_idx(bs, "sps_num_points_in_qp_table_minus1", i); for (j=0; j<sps_num_points_in_qp_table; j++) { gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_in_val_minus1", i, j); gf_bs_read_ue_log_idx2(bs, "sps_delta_qp_diff_val", i, j); } } } gf_bs_read_int_log(bs, 1, "sps_sao_enabled_flag"); sps->alf_enabled_flag = gf_bs_read_int_log(bs, 1, "sps_alf_enabled_flag"); if (sps->alf_enabled_flag && sps->chroma_format_idc) { gf_bs_read_int_log(bs, 1, "sps_ccalf_enabled_flag"); } /*! TODO parse the rest !*/ return sps_id; } static s32 gf_media_vvc_read_pps_bs_internal(GF_BitStream *bs, VVCState *vvc) { u32 i; s32 pps_id; VVC_PPS *pps; //NAL header already read pps_id = gf_bs_read_int_log(bs, 6, "pps_id"); if ((pps_id < 0) || (pps_id >= 64)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong PPS ID %d in PPS\n", pps_id)); return -1; } pps = &vvc->pps[pps_id]; if (!pps->state) { pps->id = pps_id; pps->state = 1; } pps->sps_id = gf_bs_read_int_log(bs, 4, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 16)) { GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("[VVC] wrong SPS ID %d in PPS\n", pps->sps_id)); pps->sps_id=0; return -1; } vvc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->mixed_nal_types = gf_bs_read_int_log(bs, 1, "mixed_nal_types"); pps->width = gf_bs_read_ue_log(bs, "width"); pps->height = gf_bs_read_ue_log(bs, "height"); pps->conf_window = gf_bs_read_int_log(bs, 1, "conformance_window_flag"); if (pps->conf_window) { pps->cw_left = gf_bs_read_ue_log(bs, "conf_win_left_offset"); pps->cw_right = gf_bs_read_ue_log(bs, "conf_win_right_offset"); pps->cw_top = gf_bs_read_ue_log(bs, "conf_win_top_offset"); pps->cw_bottom = gf_bs_read_ue_log(bs, "conf_win_bottom_offset"); } //scaling window if (gf_bs_read_int_log(bs, 1, "scaling_window_explicit_signalling_flag")) { gf_bs_read_se_log(bs, "scaling_win_left_offset"); gf_bs_read_se_log(bs, "scaling_win_right_offset"); gf_bs_read_se_log(bs, "scaling_win_top_offset"); gf_bs_read_se_log(bs, "scaling_win_bottom_offset"); } pps->output_flag_present_flag = gf_bs_read_int_log(bs, 1, "output_flag_present_flag"); pps->no_pic_partition_flag = gf_bs_read_int_log(bs, 1, "no_pic_partition_flag"); pps->subpic_id_mapping_present_flag = gf_bs_read_int_log(bs, 1, "subpic_id_mapping_present_flag"); if (pps->subpic_id_mapping_present_flag) { u32 pps_subpic_id_len, pps_num_subpics=0; if (!pps->no_pic_partition_flag) { pps_num_subpics = 1+gf_bs_read_ue_log(bs, "pps_num_subpics_minus1"); } pps_subpic_id_len = 1 + gf_bs_read_ue(bs); for (i=0; i<pps_num_subpics; i++) { gf_bs_read_int_log_idx(bs, pps_subpic_id_len, "subpic_id", i); } } if (!pps->no_pic_partition_flag) { gf_bs_read_int_log(bs, 2, "pps_log2_ctu_size_minus5"); u32 num_exp_tile_columns = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_columns_minus1"); u32 num_exp_tile_rows = 1 + gf_bs_read_ue_log(bs, "num_exp_tile_rows_minus1"); for (i=0; i<num_exp_tile_columns; i++) gf_bs_read_ue_log_idx(bs, "tile_column_width_minus1", i); for (i=0; i<num_exp_tile_rows; i++) gf_bs_read_ue_log_idx(bs, "tile_row_height_minus1", i); //todo parse the rest return pps_id; } //todo parse the rest return pps_id; } static s32 vvc_parse_picture_header(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { u32 pps_id; si->irap_or_gdr_pic = gf_bs_read_int_log(bs, 1, "irap_or_gdr_pic"); si->non_ref_pic = gf_bs_read_int_log(bs, 1, "non_ref_pic"); if (si->irap_or_gdr_pic) si->gdr_pic = gf_bs_read_int_log(bs, 1, "gdr_pic"); if ((si->inter_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "inter_slice_allowed_flag"))) si->intra_slice_allowed_flag = gf_bs_read_int_log(bs, 1, "intra_slice_allowed_flag"); pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 64)) return -1; si->pps = &vvc->pps[pps_id]; si->sps = &vvc->sps[si->pps->sps_id]; si->poc_lsb = gf_bs_read_int_log(bs, si->sps->log2_max_poc_lsb, "poc_lsb"); si->recovery_point_valid = 0; si->gdr_recovery_count = 0; if (si->gdr_pic) { si->recovery_point_valid = 1; si->gdr_recovery_count = gf_bs_read_ue_log(bs, "gdr_recovery_count"); } gf_bs_read_int_log(bs, si->sps->ph_num_extra_bits, "ph_extra_bits"); if (si->sps->poc_msb_cycle_flag) { if ( (si->poc_msb_cycle_present_flag = gf_bs_read_int_log(bs, 1, "poc_msb_cycle_present_flag"))) { si->poc_msb_cycle = gf_bs_read_int_log(bs, si->sps->poc_msb_cycle_len, "poc_msb_cycle"); } } return 0; } static s32 vvc_parse_slice(GF_BitStream *bs, VVCState *vvc, VVCSliceInfo *si) { // u32 CurrSubpicIdx = 0; si->picture_header_in_slice_header_flag = gf_bs_read_int_log(bs, 1, "picture_header_in_slice_header_flag"); if (si->picture_header_in_slice_header_flag) { GF_LOG(GF_LOG_DEBUG, GF_LOG_CODING, ("[VVC] Picture header in slice header incomplete support, cannot guess slice type\n")); si->slice_type = GF_VVC_SLICE_TYPE_UNKNOWN; return vvc_parse_picture_header(bs, vvc, si); } if (!si->sps) return -1; si->slice_type = GF_VVC_SLICE_TYPE_I; if (gf_bs_read_int_log(bs, 1, "sps_subpic_info_present_flag")) { gf_bs_read_int_log(bs, si->sps->subpicid_len, "subpic_id"); //todo update CurrSubpicIdx } if (si->pps->rect_slice_flag ) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("[VVC] tiling parsing not supported - patch welcome\n")); return 0; } gf_bs_read_int_log(bs, si->sps->sh_num_extra_bits, "num_extra_bits"); /* if( !pps_rect_slice_flag && NumTilesInPic − sh_slice_address > 1 ) sh_num_tiles_in_slice_minus1 */ if (si->inter_slice_allowed_flag ) si->slice_type = gf_bs_read_int_log(bs, 2, "slice_type"); return 0; } /*this needs further tests !*/ static void vvc_compute_poc(VVCSliceInfo *si) { u32 max_poc_lsb = 1 << (si->sps->log2_max_poc_lsb); /*POC reset for IDR frames, NOT for CRA*/ if (si->irap_or_gdr_pic && !si->gdr_pic) { si->poc_lsb_prev = 0; si->poc_msb_prev = 0; } if (si->poc_msb_cycle_present_flag) { si->poc_msb = si->poc_msb_cycle; } else { if ((si->poc_lsb < si->poc_lsb_prev) && (si->poc_lsb_prev - si->poc_lsb >= max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev + max_poc_lsb; else if ((si->poc_lsb > si->poc_lsb_prev) && (si->poc_lsb - si->poc_lsb_prev > max_poc_lsb / 2)) si->poc_msb = si->poc_msb_prev - max_poc_lsb; else si->poc_msb = si->poc_msb_prev; } si->poc = si->poc_msb + si->poc_lsb; } GF_EXPORT s32 gf_media_vvc_parse_nalu_bs(GF_BitStream *bs, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { Bool is_slice = GF_FALSE; s32 ret = -1; VVCSliceInfo n_state; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); memcpy(&n_state, &vvc->s_info, sizeof(VVCSliceInfo)); if (!vvc_parse_nal_header(bs, nal_unit_type, temporal_id, layer_id)) return -1; n_state.nal_unit_type = *nal_unit_type; switch (n_state.nal_unit_type) { case GF_VVC_NALU_ACCESS_UNIT: case GF_VVC_NALU_END_OF_SEQ: case GF_VVC_NALU_END_OF_STREAM: ret = 1; break; case GF_VVC_NALU_SLICE_TRAIL: case GF_VVC_NALU_SLICE_STSA: case GF_VVC_NALU_SLICE_RADL: case GF_VVC_NALU_SLICE_RASL: case GF_VVC_NALU_SLICE_IDR_W_RADL: case GF_VVC_NALU_SLICE_IDR_N_LP: case GF_VVC_NALU_SLICE_CRA: case GF_VVC_NALU_SLICE_GDR: /* slice - read the info and compare.*/ ret = vvc_parse_slice(bs, vvc, &n_state); if (ret < 0) return ret; ret = 0; if (n_state.picture_header_in_slice_header_flag) { is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (vvc->s_info.poc != n_state.poc) { ret = 1; break; } if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; break; } } break; case GF_VVC_NALU_PIC_HEADER: if (vvc_parse_picture_header(bs, vvc, &n_state)<0) { ret = -1; break; } is_slice = GF_TRUE; vvc_compute_poc(&n_state); if (!(*layer_id) || (n_state.prev_layer_id_plus1 && ((*layer_id) <= n_state.prev_layer_id_plus1 - 1))) { ret = 1; } break; case GF_VVC_NALU_SEQ_PARAM: vvc->last_parsed_sps_id = gf_media_vvc_read_sps_bs_internal(bs, vvc, *layer_id, NULL); ret = (vvc->last_parsed_sps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_PIC_PARAM: vvc->last_parsed_pps_id = gf_media_vvc_read_pps_bs_internal(bs, vvc); ret = (vvc->last_parsed_pps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_VID_PARAM: vvc->last_parsed_vps_id = gf_media_vvc_read_vps_bs_internal(bs, vvc, GF_FALSE); ret = (vvc->last_parsed_vps_id>=0) ? 0 : -1; break; case GF_VVC_NALU_DEC_PARAM: ret = 0; break; case GF_VVC_NALU_APS_PREFIX: //we use the mix aps type + aps id (first 8 bits) as unique identifier vvc->last_parsed_aps_id = gf_bs_read_int_log(bs, 8, "aps_id"); ret = 0; break; default: ret = 0; break; } /* save _prev values */ if ((ret>0) && vvc->s_info.sps) { // n_state.frame_num_offset_prev = vvc->s_info.frame_num_offset; // n_state.frame_num_prev = vvc->s_info.frame_num; n_state.poc_lsb_prev = vvc->s_info.poc_lsb; n_state.poc_msb_prev = vvc->s_info.poc_msb; if (is_slice) n_state.prev_layer_id_plus1 = *layer_id + 1; } if (is_slice) vvc_compute_poc(&n_state); memcpy(&vvc->s_info, &n_state, sizeof(VVCSliceInfo)); return ret; } GF_EXPORT s32 gf_media_vvc_parse_nalu(u8 *data, u32 size, VVCState *vvc, u8 *nal_unit_type, u8 *temporal_id, u8 *layer_id) { GF_BitStream *bs = NULL; s32 ret; if (!vvc) { if (nal_unit_type) (*nal_unit_type) = data[1] >> 3; if (layer_id) (*layer_id) = data[0] & 0x3f; if (temporal_id) (*temporal_id) = (data[1] & 0x7); return -1; } bs = gf_bs_new(data, size, GF_BITSTREAM_READ); if (!bs) return -1; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); ret = gf_media_vvc_parse_nalu_bs(bs, vvc, nal_unit_type, temporal_id, layer_id); gf_bs_del(bs); return ret; } Bool gf_media_vvc_slice_is_ref(VVCState *vvc) { if (!vvc->s_info.irap_or_gdr_pic) { return GF_FALSE; } if (vvc->s_info.gdr_pic) { if (vvc->s_info.recovery_point_valid) { vvc->s_info.recovery_point_valid = 0; return GF_TRUE; } return GF_FALSE; } return GF_TRUE; }
static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if (pps_id >= 255) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if (pps->sps_id >= 32) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; }
static s32 gf_avc_read_pps_bs_internal(GF_BitStream *bs, AVCState *avc, u32 nal_hdr) { s32 pps_id; AVC_PPS *pps; gf_bs_enable_emulation_byte_removal(bs, GF_TRUE); if (!nal_hdr) { gf_bs_read_int_log(bs, 1, "forbidden_zero_bit"); gf_bs_read_int_log(bs, 2, "nal_ref_idc"); gf_bs_read_int_log(bs, 5, "nal_unit_type"); } pps_id = gf_bs_read_ue_log(bs, "pps_id"); if ((pps_id<0) || (pps_id >= 255)) { return -1; } pps = &avc->pps[pps_id]; pps->id = pps_id; if (!pps->status) pps->status = 1; pps->sps_id = gf_bs_read_ue_log(bs, "sps_id"); if ((pps->sps_id<0) || (pps->sps_id >= 32)) { pps->sps_id = 0; return -1; } /*sps_id may be refer to regular SPS or subseq sps, depending on the coded slice referring to the pps*/ if (!avc->sps[pps->sps_id].state && !avc->sps[pps->sps_id + GF_SVC_SSPS_ID_SHIFT].state) { return -1; } avc->pps_active_idx = pps->id; /*set active sps*/ avc->sps_active_idx = pps->sps_id; /*set active sps*/ pps->entropy_coding_mode_flag = gf_bs_read_int_log(bs, 1, "entropy_coding_mode_flag"); pps->pic_order_present = gf_bs_read_int_log(bs, 1, "pic_order_present"); pps->slice_group_count = gf_bs_read_ue_log(bs, "slice_group_count_minus1") + 1; if (pps->slice_group_count > 1) { u32 iGroup; pps->mb_slice_group_map_type = gf_bs_read_ue_log(bs, "mb_slice_group_map_type"); if (pps->mb_slice_group_map_type == 0) { for (iGroup = 0; iGroup <= pps->slice_group_count - 1; iGroup++) gf_bs_read_ue_log_idx(bs, "run_length_minus1", iGroup); } else if (pps->mb_slice_group_map_type == 2) { for (iGroup = 0; iGroup < pps->slice_group_count - 1; iGroup++) { gf_bs_read_ue_log_idx(bs, "top_left", iGroup); gf_bs_read_ue_log_idx(bs, "bottom_right", iGroup); } } else if (pps->mb_slice_group_map_type == 3 || pps->mb_slice_group_map_type == 4 || pps->mb_slice_group_map_type == 5) { gf_bs_read_int_log(bs, 1, "slice_group_change_direction_flag"); gf_bs_read_ue_log(bs, "slice_group_change_rate_minus1"); } else if (pps->mb_slice_group_map_type == 6) { u32 i; pps->pic_size_in_map_units_minus1 = gf_bs_read_ue_log(bs, "pic_size_in_map_units_minus1"); for (i = 0; i <= pps->pic_size_in_map_units_minus1; i++) { gf_bs_read_int_log_idx(bs, (u32)ceil(log(pps->slice_group_count) / log(2)), "slice_group_id", i); } } } pps->num_ref_idx_l0_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l0_default_active_minus1"); pps->num_ref_idx_l1_default_active_minus1 = gf_bs_read_ue_log(bs, "num_ref_idx_l1_default_active_minus1"); /* if ((pps->ref_count[0] > 32) || (pps->ref_count[1] > 32)) goto exit; */ pps->weighted_pred_flag = gf_bs_read_int_log(bs, 1, "weighted_pred_flag"); gf_bs_read_int_log(bs, 2, "weighted_bipred_idc"); gf_bs_read_se_log(bs, "init_qp_minus26"); gf_bs_read_se_log(bs, "init_qs_minus26"); gf_bs_read_se_log(bs, "chroma_qp_index_offset"); pps->deblocking_filter_control_present_flag = gf_bs_read_int_log(bs, 1, "deblocking_filter_control_present_flag"); gf_bs_read_int_log(bs, 1, "constrained_intra_pred"); pps->redundant_pic_cnt_present = gf_bs_read_int_log(bs, 1, "redundant_pic_cnt_present"); return pps_id; }
{'added': [(5118, '\tif ((sps_id < 0) || (sps_id >= 32)) {'), (5447, '\tif ((pps_id<0) || (pps_id >= 255)) {'), (5455, '\tif ((pps->sps_id<0) || (pps->sps_id >= 32)) {'), (6843, '\tif ((pps_id<0) || (pps_id >= 64))'), (7666, '\tif ((vps_id<0) || (vps_id >= 16)) return -1;'), (7895, '\tif ((vps_id<0) || (vps_id >= 16)) {'), (8210, '\tif ((pps->sps_id<0) || (pps->sps_id >= 16)) {'), (9360, '\tif ((vps_id<0) || (vps_id >= 16)) return -1;'), (9451, '\tif ((sps_id<0) || (sps_id >= 16)) {'), (9455, '\tif ((vps_id<0) || (vps_id >= 16)) {'), (9657, '\tif ((pps->sps_id<0) || (pps->sps_id >= 16)) {'), (9725, '\tif ((pps_id<0) || (pps_id >= 64))')], 'deleted': [(5118, '\tif (sps_id >= 32) {'), (5119, '\t\treturn -1;'), (5120, '\t}'), (5121, '\tif (sps_id < 0) {'), (5450, '\tif (pps_id >= 255) {'), (5458, '\tif (pps->sps_id >= 32) {'), (6846, '\tif (pps_id >= 64)'), (7669, '\tif (vps_id >= 16) return -1;'), (7898, '\tif (vps_id >= 16) {'), (8213, '\tif (pps->sps_id >= 16) {'), (9363, '\tif (vps_id >= 16) return -1;'), (9454, '\tif (sps_id >= 16) {'), (9458, '\tif (vps_id >= 16) {'), (9660, '\tif (pps->sps_id >= 16) {'), (9728, '\tif (pps_id >= 64)')]}
12
15
8,400
60,423
67
552
17
https://github.com/gpac/gpac
CVE-2021-30014
CWE-190
3,008
nsc_encode.c
C
nsc_encode_argb_to_aycocg
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } } static void nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; BYTE* co_dst; BYTE* cg_dst; INT8* co_src0; INT8* co_src1; INT8* cg_src0; INT8* cg_src1; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); for (y = 0; y < tempHeight >> 1; y++) { co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; co_src1 = co_src0 + tempWidth; cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } } void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { nsc_encode_argb_to_aycocg(context, bmpdata, rowstride); if (context->ChromaSubsamplingLevel) { nsc_encode_subsampling(context); } } static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
/** * FreeRDP: A Remote Desktop Protocol Implementation * NSCodec Encoder * * Copyright 2012 Vic Lee * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <winpr/crt.h> #include <freerdp/codec/nsc.h> #include <freerdp/codec/color.h> #include "nsc_types.h" #include "nsc_encode.h" static BOOL nsc_context_initialize_encode(NSC_CONTEXT* context) { int i; UINT32 length; UINT32 tempWidth; UINT32 tempHeight; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); /* The maximum length a decoded plane can reach in all cases */ length = tempWidth * tempHeight + 16; if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) { BYTE* tmp = (BYTE*) realloc(context->priv->PlaneBuffers[i], length); if (!tmp) goto fail; context->priv->PlaneBuffers[i] = tmp; } context->priv->PlaneBuffersLength = length; } if (context->ChromaSubsamplingLevel) { context->OrgByteCount[0] = tempWidth * context->height; context->OrgByteCount[1] = tempWidth * tempHeight / 4; context->OrgByteCount[2] = tempWidth * tempHeight / 4; context->OrgByteCount[3] = context->width * context->height; } else { context->OrgByteCount[0] = context->width * context->height; context->OrgByteCount[1] = context->width * context->height; context->OrgByteCount[2] = context->width * context->height; context->OrgByteCount[3] = context->width * context->height; } return TRUE; fail: if (length > context->priv->PlaneBuffersLength) { for (i = 0; i < 5; i++) free(context->priv->PlaneBuffers[i]); } return FALSE; } static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; } static BOOL nsc_encode_subsampling(NSC_CONTEXT* context) { UINT16 x; UINT16 y; UINT32 tempWidth; UINT32 tempHeight; if (!context) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); tempHeight = ROUND_UP_TO(context->height, 2); if (tempHeight == 0) return FALSE; if (tempWidth > context->priv->PlaneBuffersLength / tempHeight) return FALSE; for (y = 0; y < tempHeight >> 1; y++) { BYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1); BYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1); const INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth; const INT8* co_src1 = co_src0 + tempWidth; const INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth; const INT8* cg_src1 = cg_src0 + tempWidth; for (x = 0; x < tempWidth >> 1; x++) { *co_dst++ = (BYTE)(((INT16) * co_src0 + (INT16) * (co_src0 + 1) + (INT16) * co_src1 + (INT16) * (co_src1 + 1)) >> 2); *cg_dst++ = (BYTE)(((INT16) * cg_src0 + (INT16) * (cg_src0 + 1) + (INT16) * cg_src1 + (INT16) * (cg_src1 + 1)) >> 2); co_src0 += 2; co_src1 += 2; cg_src0 += 2; cg_src1 += 2; } } return TRUE; } BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride) { if (!context || !bmpdata || (rowstride == 0)) return FALSE; if (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride)) return FALSE; if (context->ChromaSubsamplingLevel) { if (!nsc_encode_subsampling(context)) return FALSE; } return TRUE; } static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize) { UINT32 left; UINT32 runlength = 1; UINT32 planeSize = 0; left = originalSize; /** * We quit the loop if the running compressed size is larger than the original. * In such cases data will be sent uncompressed. */ while (left > 4 && planeSize < originalSize - 4) { if (left > 5 && *in == *(in + 1)) { runlength++; } else if (runlength == 1) { *out++ = *in; planeSize++; } else if (runlength < 256) { *out++ = *in; *out++ = *in; *out++ = runlength - 2; runlength = 1; planeSize += 3; } else { *out++ = *in; *out++ = *in; *out++ = 0xFF; *out++ = (runlength & 0x000000FF); *out++ = (runlength & 0x0000FF00) >> 8; *out++ = (runlength & 0x00FF0000) >> 16; *out++ = (runlength & 0xFF000000) >> 24; runlength = 1; planeSize += 7; } in++; left--; } if (planeSize < originalSize - 4) CopyMemory(out, in, 4); planeSize += 4; return planeSize; } static void nsc_rle_compress_data(NSC_CONTEXT* context) { UINT16 i; UINT32 planeSize; UINT32 originalSize; for (i = 0; i < 4; i++) { originalSize = context->OrgByteCount[i]; if (originalSize == 0) { planeSize = 0; } else { planeSize = nsc_rle_encode(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], originalSize); if (planeSize < originalSize) CopyMemory(context->priv->PlaneBuffers[i], context->priv->PlaneBuffers[4], planeSize); else planeSize = originalSize; } context->PlaneByteCount[i] = planeSize; } } UINT32 nsc_compute_byte_count(NSC_CONTEXT* context, UINT32* ByteCount, UINT32 width, UINT32 height) { UINT32 tempWidth; UINT32 tempHeight; UINT32 maxPlaneSize; tempWidth = ROUND_UP_TO(width, 8); tempHeight = ROUND_UP_TO(height, 2); maxPlaneSize = tempWidth * tempHeight + 16; if (context->ChromaSubsamplingLevel) { ByteCount[0] = tempWidth * height; ByteCount[1] = tempWidth * tempHeight / 4; ByteCount[2] = tempWidth * tempHeight / 4; ByteCount[3] = width * height; } else { ByteCount[0] = width * height; ByteCount[1] = width * height; ByteCount[2] = width * height; ByteCount[3] = width * height; } return maxPlaneSize; } NSC_MESSAGE* nsc_encode_messages(NSC_CONTEXT* context, const BYTE* data, UINT32 x, UINT32 y, UINT32 width, UINT32 height, UINT32 scanline, UINT32* numMessages, UINT32 maxDataSize) { UINT32 i, j, k; UINT32 dataOffset; UINT32 rows, cols; UINT32 BytesPerPixel; UINT32 MaxRegionWidth; UINT32 MaxRegionHeight; UINT32 ByteCount[4]; UINT32 MaxPlaneSize; UINT32 MaxMessageSize; NSC_MESSAGE* messages; UINT32 PaddedMaxPlaneSize; k = 0; MaxRegionWidth = 64 * 4; MaxRegionHeight = 64 * 2; BytesPerPixel = GetBytesPerPixel(context->format); rows = (width + (MaxRegionWidth - (width % MaxRegionWidth))) / MaxRegionWidth; cols = (height + (MaxRegionHeight - (height % MaxRegionHeight))) / MaxRegionHeight; *numMessages = rows * cols; MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) ByteCount, width, height); MaxMessageSize = ByteCount[0] + ByteCount[1] + ByteCount[2] + ByteCount[3] + 20; maxDataSize -= 1024; /* reserve enough space for headers */ messages = (NSC_MESSAGE*) calloc(*numMessages, sizeof(NSC_MESSAGE)); if (!messages) return NULL; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { messages[k].x = x + (i * MaxRegionWidth); messages[k].y = y + (j * MaxRegionHeight); messages[k].width = (i < (rows - 1)) ? MaxRegionWidth : width - (i * MaxRegionWidth); messages[k].height = (j < (cols - 1)) ? MaxRegionHeight : height - (j * MaxRegionHeight); messages[k].data = data; messages[k].scanline = scanline; messages[k].MaxPlaneSize = nsc_compute_byte_count(context, (UINT32*) messages[k].OrgByteCount, messages[k].width, messages[k].height); k++; } } *numMessages = k; for (i = 0; i < *numMessages; i++) { PaddedMaxPlaneSize = messages[i].MaxPlaneSize + 32; messages[i].PlaneBuffer = (BYTE*) BufferPool_Take(context->priv->PlanePool, PaddedMaxPlaneSize * 5); if (!messages[i].PlaneBuffer) goto fail; messages[i].PlaneBuffers[0] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 0) + 16]); messages[i].PlaneBuffers[1] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 1) + 16]); messages[i].PlaneBuffers[2] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 2) + 16]); messages[i].PlaneBuffers[3] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 3) + 16]); messages[i].PlaneBuffers[4] = (BYTE*) & (messages[i].PlaneBuffer[(PaddedMaxPlaneSize * 4) + 16]); } for (i = 0; i < *numMessages; i++) { context->width = messages[i].width; context->height = messages[i].height; context->OrgByteCount[0] = messages[i].OrgByteCount[0]; context->OrgByteCount[1] = messages[i].OrgByteCount[1]; context->OrgByteCount[2] = messages[i].OrgByteCount[2]; context->OrgByteCount[3] = messages[i].OrgByteCount[3]; context->priv->PlaneBuffersLength = messages[i].MaxPlaneSize; context->priv->PlaneBuffers[0] = messages[i].PlaneBuffers[0]; context->priv->PlaneBuffers[1] = messages[i].PlaneBuffers[1]; context->priv->PlaneBuffers[2] = messages[i].PlaneBuffers[2]; context->priv->PlaneBuffers[3] = messages[i].PlaneBuffers[3]; context->priv->PlaneBuffers[4] = messages[i].PlaneBuffers[4]; dataOffset = (messages[i].y * messages[i].scanline) + (messages[i].x * BytesPerPixel); PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, &data[dataOffset], scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) messages[i].LumaPlaneByteCount = context->PlaneByteCount[0]; messages[i].OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; messages[i].GreenChromaPlaneByteCount = context->PlaneByteCount[2]; messages[i].AlphaPlaneByteCount = context->PlaneByteCount[3]; messages[i].ColorLossLevel = context->ColorLossLevel; messages[i].ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; } context->priv->PlaneBuffers[0] = NULL; context->priv->PlaneBuffers[1] = NULL; context->priv->PlaneBuffers[2] = NULL; context->priv->PlaneBuffers[3] = NULL; context->priv->PlaneBuffers[4] = NULL; return messages; fail: for (i = 0; i < *numMessages; i++) BufferPool_Return(context->priv->PlanePool, messages[i].PlaneBuffer); free(messages); return NULL; } BOOL nsc_write_message(NSC_CONTEXT* context, wStream* s, NSC_MESSAGE* message) { UINT32 totalPlaneByteCount; totalPlaneByteCount = message->LumaPlaneByteCount + message->OrangeChromaPlaneByteCount + message->GreenChromaPlaneByteCount + message->AlphaPlaneByteCount; if (!Stream_EnsureRemainingCapacity(s, 20 + totalPlaneByteCount)) return -1; Stream_Write_UINT32(s, message->LumaPlaneByteCount); /* LumaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->OrangeChromaPlaneByteCount); /* OrangeChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->GreenChromaPlaneByteCount); /* GreenChromaPlaneByteCount (4 bytes) */ Stream_Write_UINT32(s, message->AlphaPlaneByteCount); /* AlphaPlaneByteCount (4 bytes) */ Stream_Write_UINT8(s, message->ColorLossLevel); /* ColorLossLevel (1 byte) */ Stream_Write_UINT8(s, message->ChromaSubsamplingLevel); /* ChromaSubsamplingLevel (1 byte) */ Stream_Write_UINT16(s, 0); /* Reserved (2 bytes) */ if (message->LumaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[0], message->LumaPlaneByteCount); /* LumaPlane */ if (message->OrangeChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[1], message->OrangeChromaPlaneByteCount); /* OrangeChromaPlane */ if (message->GreenChromaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[2], message->GreenChromaPlaneByteCount); /* GreenChromaPlane */ if (message->AlphaPlaneByteCount) Stream_Write(s, message->PlaneBuffers[3], message->AlphaPlaneByteCount); /* AlphaPlane */ return TRUE; } void nsc_message_free(NSC_CONTEXT* context, NSC_MESSAGE* message) { BufferPool_Return(context->priv->PlanePool, message->PlaneBuffer); } BOOL nsc_compose_message(NSC_CONTEXT* context, wStream* s, const BYTE* data, UINT32 width, UINT32 height, UINT32 scanline) { NSC_MESSAGE s_message = { 0 }; NSC_MESSAGE* message = &s_message; context->width = width; context->height = height; if (!nsc_context_initialize_encode(context)) return FALSE; /* ARGB to AYCoCg conversion, chroma subsampling and colorloss reduction */ PROFILER_ENTER(context->priv->prof_nsc_encode) context->encode(context, data, scanline); PROFILER_EXIT(context->priv->prof_nsc_encode) /* RLE encode */ PROFILER_ENTER(context->priv->prof_nsc_rle_compress_data) nsc_rle_compress_data(context); PROFILER_EXIT(context->priv->prof_nsc_rle_compress_data) message->PlaneBuffers[0] = context->priv->PlaneBuffers[0]; message->PlaneBuffers[1] = context->priv->PlaneBuffers[1]; message->PlaneBuffers[2] = context->priv->PlaneBuffers[2]; message->PlaneBuffers[3] = context->priv->PlaneBuffers[3]; message->LumaPlaneByteCount = context->PlaneByteCount[0]; message->OrangeChromaPlaneByteCount = context->PlaneByteCount[1]; message->GreenChromaPlaneByteCount = context->PlaneByteCount[2]; message->AlphaPlaneByteCount = context->PlaneByteCount[3]; message->ColorLossLevel = context->ColorLossLevel; message->ChromaSubsamplingLevel = context->ChromaSubsamplingLevel; return nsc_write_message(context, s, message); }
static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } }
static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data, UINT32 scanline) { UINT16 x; UINT16 y; UINT16 rw; BYTE ccl; const BYTE* src; BYTE* yplane = NULL; BYTE* coplane = NULL; BYTE* cgplane = NULL; BYTE* aplane = NULL; INT16 r_val; INT16 g_val; INT16 b_val; BYTE a_val; UINT32 tempWidth; if (!context || data || (scanline == 0)) return FALSE; tempWidth = ROUND_UP_TO(context->width, 8); rw = (context->ChromaSubsamplingLevel ? tempWidth : context->width); ccl = context->ColorLossLevel; if (context->priv->PlaneBuffersLength < rw * scanline) return FALSE; if (rw < scanline * 2) return FALSE; for (y = 0; y < context->height; y++) { src = data + (context->height - 1 - y) * scanline; yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; aplane = context->priv->PlaneBuffers[3] + y * context->width; for (x = 0; x < context->width; x++) { switch (context->format) { case PIXEL_FORMAT_BGRX32: b_val = *src++; g_val = *src++; r_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGRA32: b_val = *src++; g_val = *src++; r_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_RGBX32: r_val = *src++; g_val = *src++; b_val = *src++; src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGBA32: r_val = *src++; g_val = *src++; b_val = *src++; a_val = *src++; break; case PIXEL_FORMAT_BGR24: b_val = *src++; g_val = *src++; r_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_RGB24: r_val = *src++; g_val = *src++; b_val = *src++; a_val = 0xFF; break; case PIXEL_FORMAT_BGR16: b_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); r_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_RGB16: r_val = (INT16)(((*(src + 1)) & 0xF8) | ((*(src + 1)) >> 5)); g_val = (INT16)((((*(src + 1)) & 0x07) << 5) | (((*src) & 0xE0) >> 3)); b_val = (INT16)((((*src) & 0x1F) << 3) | (((*src) >> 2) & 0x07)); a_val = 0xFF; src += 2; break; case PIXEL_FORMAT_A4: { int shift; BYTE idx; shift = (7 - (x % 8)); idx = ((*src) >> shift) & 1; idx |= (((*(src + 1)) >> shift) & 1) << 1; idx |= (((*(src + 2)) >> shift) & 1) << 2; idx |= (((*(src + 3)) >> shift) & 1) << 3; idx *= 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; if (shift == 0) src += 4; } a_val = 0xFF; break; case PIXEL_FORMAT_RGB8: { int idx = (*src) * 3; r_val = (INT16) context->palette[idx]; g_val = (INT16) context->palette[idx + 1]; b_val = (INT16) context->palette[idx + 2]; src++; } a_val = 0xFF; break; default: r_val = g_val = b_val = a_val = 0; break; } *yplane++ = (BYTE)((r_val >> 2) + (g_val >> 1) + (b_val >> 2)); /* Perform color loss reduction here */ *coplane++ = (BYTE)((r_val - b_val) >> ccl); *cgplane++ = (BYTE)((-(r_val >> 1) + g_val - (b_val >> 1)) >> ccl); *aplane++ = a_val; } if (context->ChromaSubsamplingLevel && (x % 2) == 1) { *yplane = *(yplane - 1); *coplane = *(coplane - 1); *cgplane = *(cgplane - 1); } } if (context->ChromaSubsamplingLevel && (y % 2) == 1) { yplane = context->priv->PlaneBuffers[0] + y * rw; coplane = context->priv->PlaneBuffers[1] + y * rw; cgplane = context->priv->PlaneBuffers[2] + y * rw; CopyMemory(yplane, yplane - rw, rw); CopyMemory(coplane, coplane - rw, rw); CopyMemory(cgplane, cgplane - rw, rw); } return TRUE; }
{'added': [(54, ''), (91, 'static BOOL nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (108, ''), (109, '\tif (!context || data || (scanline == 0))'), (110, '\t\treturn FALSE;'), (111, ''), (116, '\tif (context->priv->PlaneBuffersLength < rw * scanline)'), (117, '\t\treturn FALSE;'), (118, ''), (119, '\tif (rw < scanline * 2)'), (120, '\t\treturn FALSE;'), (121, ''), (256, ''), (257, '\treturn TRUE;'), (260, 'static BOOL nsc_encode_subsampling(NSC_CONTEXT* context)'), (266, ''), (267, '\tif (!context)'), (268, '\t\treturn FALSE;'), (269, ''), (273, '\tif (tempHeight == 0)'), (274, '\t\treturn FALSE;'), (275, ''), (276, '\tif (tempWidth > context->priv->PlaneBuffersLength / tempHeight)'), (277, '\t\treturn FALSE;'), (278, ''), (281, '\t\tBYTE* co_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (282, '\t\tBYTE* cg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (283, '\t\tconst INT8* co_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (284, '\t\tconst INT8* co_src1 = co_src0 + tempWidth;'), (285, '\t\tconst INT8* cg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (286, '\t\tconst INT8* cg_src1 = cg_src0 + tempWidth;'), (300, ''), (301, '\treturn TRUE;'), (304, 'BOOL nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (306, '\tif (!context || !bmpdata || (rowstride == 0))'), (307, '\t\treturn FALSE;'), (308, ''), (309, '\tif (!nsc_encode_argb_to_aycocg(context, bmpdata, rowstride))'), (310, '\t\treturn FALSE;'), (314, '\t\tif (!nsc_encode_subsampling(context))'), (315, '\t\t\treturn FALSE;'), (317, ''), (318, '\treturn TRUE;'), (321, 'static UINT32 nsc_rle_encode(const BYTE* in, BYTE* out, UINT32 originalSize)')], 'deleted': [(90, 'static void nsc_encode_argb_to_aycocg(NSC_CONTEXT* context, const BYTE* data,'), (247, 'static void nsc_encode_subsampling(NSC_CONTEXT* context)'), (251, '\tBYTE* co_dst;'), (252, '\tBYTE* cg_dst;'), (253, '\tINT8* co_src0;'), (254, '\tINT8* co_src1;'), (255, '\tINT8* cg_src0;'), (256, '\tINT8* cg_src1;'), (264, '\t\tco_dst = context->priv->PlaneBuffers[1] + y * (tempWidth >> 1);'), (265, '\t\tcg_dst = context->priv->PlaneBuffers[2] + y * (tempWidth >> 1);'), (266, '\t\tco_src0 = (INT8*) context->priv->PlaneBuffers[1] + (y << 1) * tempWidth;'), (267, '\t\tco_src1 = co_src0 + tempWidth;'), (268, '\t\tcg_src0 = (INT8*) context->priv->PlaneBuffers[2] + (y << 1) * tempWidth;'), (269, '\t\tcg_src1 = cg_src0 + tempWidth;'), (285, 'void nsc_encode(NSC_CONTEXT* context, const BYTE* bmpdata, UINT32 rowstride)'), (287, '\tnsc_encode_argb_to_aycocg(context, bmpdata, rowstride);'), (291, '\t\tnsc_encode_subsampling(context);'), (295, 'static UINT32 nsc_rle_encode(BYTE* in, BYTE* out, UINT32 originalSize)')]}
44
18
513
3,961
137
1,107
19
https://github.com/FreeRDP/FreeRDP
CVE-2018-8788
CWE-787
2,454
HexOutStream.cxx
C++
HexOutStream::HexOutStream
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <rdr/HexOutStream.h> #include <rdr/Exception.h> using namespace rdr; const int DEFAULT_BUF_LEN = 16384; static inline int min(int a, int b) {return a<b ? a : b;} HexOutStream::HexOutStream(OutStream& os, int buflen) : out_stream(os), offset(0), bufSize(buflen ? buflen : DEFAULT_BUF_LEN) { if (bufSize % 2) bufSize--; ptr = start = new U8[bufSize]; end = start + bufSize; } HexOutStream::~HexOutStream() { delete [] start; } char HexOutStream::intToHex(int i) { if ((i>=0) && (i<=9)) return '0'+i; else if ((i>=10) && (i<=15)) return 'a'+(i-10); else throw rdr::Exception("intToHex failed"); } char* HexOutStream::binToHexStr(const char* data, int length) { char* buffer = new char[length*2+1]; for (int i=0; i<length; i++) { buffer[i*2] = intToHex((data[i] >> 4) & 15); buffer[i*2+1] = intToHex((data[i] & 15)); if (!buffer[i*2] || !buffer[i*2+1]) { delete [] buffer; return 0; } } buffer[length*2] = 0; return buffer; } void HexOutStream::writeBuffer() { U8* pos = start; while (pos != ptr) { out_stream.check(2); U8* optr = out_stream.getptr(); U8* oend = out_stream.getend(); int length = min(ptr-pos, (oend-optr)/2); for (int i=0; i<length; i++) { optr[i*2] = intToHex((pos[i] >> 4) & 0xf); optr[i*2+1] = intToHex(pos[i] & 0xf); } out_stream.setptr(optr + length*2); pos += length; } offset += ptr - start; ptr = start; } int HexOutStream::length() { return offset + ptr - start; } void HexOutStream::flush() { writeBuffer(); out_stream.flush(); } int HexOutStream::overrun(int itemSize, int nItems) { if (itemSize > bufSize) throw Exception("HexOutStream overrun: max itemSize exceeded"); writeBuffer(); if (itemSize * nItems > end - ptr) nItems = (end - ptr) / itemSize; return nItems; }
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #include <rdr/HexOutStream.h> #include <rdr/Exception.h> using namespace rdr; const int DEFAULT_BUF_LEN = 16384; static inline size_t min(size_t a, size_t b) {return a<b ? a : b;} HexOutStream::HexOutStream(OutStream& os, size_t buflen) : out_stream(os), offset(0), bufSize(buflen ? buflen : DEFAULT_BUF_LEN) { if (bufSize % 2) bufSize--; ptr = start = new U8[bufSize]; end = start + bufSize; } HexOutStream::~HexOutStream() { delete [] start; } char HexOutStream::intToHex(int i) { if ((i>=0) && (i<=9)) return '0'+i; else if ((i>=10) && (i<=15)) return 'a'+(i-10); else throw rdr::Exception("intToHex failed"); } char* HexOutStream::binToHexStr(const char* data, size_t length) { char* buffer = new char[length*2+1]; for (size_t i=0; i<length; i++) { buffer[i*2] = intToHex((data[i] >> 4) & 15); buffer[i*2+1] = intToHex((data[i] & 15)); if (!buffer[i*2] || !buffer[i*2+1]) { delete [] buffer; return 0; } } buffer[length*2] = 0; return buffer; } void HexOutStream::writeBuffer() { U8* pos = start; while (pos != ptr) { out_stream.check(2); U8* optr = out_stream.getptr(); U8* oend = out_stream.getend(); size_t length = min(ptr-pos, (oend-optr)/2); for (size_t i=0; i<length; i++) { optr[i*2] = intToHex((pos[i] >> 4) & 0xf); optr[i*2+1] = intToHex(pos[i] & 0xf); } out_stream.setptr(optr + length*2); pos += length; } offset += ptr - start; ptr = start; } size_t HexOutStream::length() { return offset + ptr - start; } void HexOutStream::flush() { writeBuffer(); out_stream.flush(); } size_t HexOutStream::overrun(size_t itemSize, size_t nItems) { if (itemSize > bufSize) throw Exception("HexOutStream overrun: max itemSize exceeded"); writeBuffer(); if (itemSize * nItems > (size_t)(end - ptr)) nItems = (end - ptr) / itemSize; return nItems; }
HexOutStream::HexOutStream(OutStream& os, int buflen) : out_stream(os), offset(0), bufSize(buflen ? buflen : DEFAULT_BUF_LEN) { if (bufSize % 2) bufSize--; ptr = start = new U8[bufSize]; end = start + bufSize; }
HexOutStream::HexOutStream(OutStream& os, size_t buflen) : out_stream(os), offset(0), bufSize(buflen ? buflen : DEFAULT_BUF_LEN) { if (bufSize % 2) bufSize--; ptr = start = new U8[bufSize]; end = start + bufSize; }
{'added': [(26, 'static inline size_t min(size_t a, size_t b) {return a<b ? a : b;}'), (28, 'HexOutStream::HexOutStream(OutStream& os, size_t buflen)'), (51, 'char* HexOutStream::binToHexStr(const char* data, size_t length) {'), (53, ' for (size_t i=0; i<length; i++) {'), (73, ' size_t length = min(ptr-pos, (oend-optr)/2);'), (75, ' for (size_t i=0; i<length; i++) {'), (87, 'size_t HexOutStream::length()'), (98, 'size_t'), (99, 'HexOutStream::overrun(size_t itemSize, size_t nItems) {'), (105, ' if (itemSize * nItems > (size_t)(end - ptr))')], 'deleted': [(26, 'static inline int min(int a, int b) {return a<b ? a : b;}'), (28, 'HexOutStream::HexOutStream(OutStream& os, int buflen)'), (51, 'char* HexOutStream::binToHexStr(const char* data, int length) {'), (53, ' for (int i=0; i<length; i++) {'), (73, ' int length = min(ptr-pos, (oend-optr)/2);'), (75, ' for (int i=0; i<length; i++) {'), (87, 'int HexOutStream::length()'), (98, 'int'), (99, 'HexOutStream::overrun(int itemSize, int nItems) {'), (105, ' if (itemSize * nItems > end - ptr)')]}
10
10
73
537
8
57
2
https://github.com/CendioOssman/tigervnc
CVE-2019-15694
CWE-787
3,026
cipso_ipv4.c
C
cipso_v4_sock_getattr
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (sk_inet->opt) sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; sk_conn->icsk_ext_hdr_len += opt->optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } opt = xchg(&sk_inet->opt, opt); kfree(opt); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->__data, buf, buf_len); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); kfree(opt); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options **opt_ptr) { int hdr_delta = 0; struct ip_options *opt = *opt_ptr; if (opt->srr || opt->rr || opt->ts || opt->router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->cipso - sizeof(struct iphdr); cipso_ptr = &opt->__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->srr > opt->cipso) opt->srr -= cipso_len; if (opt->rr > opt->cipso) opt->rr -= cipso_len; if (opt->ts > opt->cipso) opt->ts -= cipso_len; if (opt->router_alert > opt->cipso) opt->router_alert -= cipso_len; opt->cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->optlen) if (opt->__data[iter] != IPOPT_NOP) { iter += opt->__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->optlen; opt->optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->optlen; kfree(opt); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = sk_inet->opt; if (opt == NULL || opt->cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
/* * CIPSO - Commercial IP Security Option * * This is an implementation of the CIPSO 2.2 protocol as specified in * draft-ietf-cipso-ipsecurity-01.txt with additional tag types as found in * FIPS-188. While CIPSO never became a full IETF RFC standard many vendors * have chosen to adopt the protocol and over the years it has become a * de-facto standard for labeled networking. * * The CIPSO draft specification can be found in the kernel's Documentation * directory as well as the following URL: * http://tools.ietf.org/id/draft-ietf-cipso-ipsecurity-01.txt * The FIPS-188 specification can be found at the following URL: * http://www.itl.nist.gov/fipspubs/fip188.htm * * Author: Paul Moore <paul.moore@hp.com> * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2006, 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/jhash.h> #include <linux/audit.h> #include <linux/slab.h> #include <net/ip.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <asm/atomic.h> #include <asm/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ /* XXX - This currently assumes a minimal number of different DOIs in use, * if in practice there are a lot of different DOIs this list should * probably be turned into a hash table or something similar so we * can do quick lookups. */ static DEFINE_SPINLOCK(cipso_v4_doi_list_lock); static LIST_HEAD(cipso_v4_doi_list); /* Label mapping cache */ int cipso_v4_cache_enabled = 1; int cipso_v4_cache_bucketsize = 10; #define CIPSO_V4_CACHE_BUCKETBITS 7 #define CIPSO_V4_CACHE_BUCKETS (1 << CIPSO_V4_CACHE_BUCKETBITS) #define CIPSO_V4_CACHE_REORDERLIMIT 10 struct cipso_v4_map_cache_bkt { spinlock_t lock; u32 size; struct list_head list; }; struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; size_t key_len; struct netlbl_lsm_cache *lsm_data; u32 activity; struct list_head list; }; static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; int cipso_v4_rbm_strictvalid = 1; /* * Protocol Constants */ /* Maximum size of the CIPSO IP option, derived from the fact that the maximum * IPv4 header size is 60 bytes and the base IPv4 header is 20 bytes long. */ #define CIPSO_V4_OPT_LEN_MAX 40 /* Length of the base CIPSO option, this includes the option type (1 byte), the * option length (1 byte), and the DOI (4 bytes). */ #define CIPSO_V4_HDR_LEN 6 /* Base length of the restrictive category bitmap tag (tag #1). */ #define CIPSO_V4_TAG_RBM_BLEN 4 /* Base length of the enumerated category tag (tag #2). */ #define CIPSO_V4_TAG_ENUM_BLEN 4 /* Base length of the ranged categories bitmap tag (tag #5). */ #define CIPSO_V4_TAG_RNG_BLEN 4 /* The maximum number of category ranges permitted in the ranged category tag * (tag #5). You may note that the IETF draft states that the maximum number * of category ranges is 7, but if the low end of the last category range is * zero then it is possible to fit 8 category ranges because the zero should * be omitted. */ #define CIPSO_V4_TAG_RNG_CAT_MAX 8 /* Base length of the local tag (non-standard tag). * Tag definition (may change between kernel versions) * * 0 8 16 24 32 * +----------+----------+----------+----------+ * | 10000000 | 00000110 | 32-bit secid value | * +----------+----------+----------+----------+ * | in (host byte order)| * +----------+----------+ * */ #define CIPSO_V4_TAG_LOC_BLEN 6 /* * Helper Functions */ /** * cipso_v4_bitmap_walk - Walk a bitmap looking for a bit * @bitmap: the bitmap * @bitmap_len: length in bits * @offset: starting offset * @state: if non-zero, look for a set (1) bit else look for a cleared (0) bit * * Description: * Starting at @offset, walk the bitmap from left to right until either the * desired bit is found or we reach the end. Return the bit offset, -1 if * not found, or -2 if error. */ static int cipso_v4_bitmap_walk(const unsigned char *bitmap, u32 bitmap_len, u32 offset, u8 state) { u32 bit_spot; u32 byte_offset; unsigned char bitmask; unsigned char byte; /* gcc always rounds to zero when doing integer division */ byte_offset = offset / 8; byte = bitmap[byte_offset]; bit_spot = offset; bitmask = 0x80 >> (offset % 8); while (bit_spot < bitmap_len) { if ((state && (byte & bitmask) == bitmask) || (state == 0 && (byte & bitmask) == 0)) return bit_spot; bit_spot++; bitmask >>= 1; if (bitmask == 0) { byte = bitmap[++byte_offset]; bitmask = 0x80; } } return -1; } /** * cipso_v4_bitmap_setbit - Sets a single bit in a bitmap * @bitmap: the bitmap * @bit: the bit * @state: if non-zero, set the bit (1) else clear the bit (0) * * Description: * Set a single bit in the bitmask. Returns zero on success, negative values * on error. */ static void cipso_v4_bitmap_setbit(unsigned char *bitmap, u32 bit, u8 state) { u32 byte_spot; u8 bitmask; /* gcc always rounds to zero when doing integer division */ byte_spot = bit / 8; bitmask = 0x80 >> (bit % 8); if (state) bitmap[byte_spot] |= bitmask; else bitmap[byte_spot] &= ~bitmask; } /** * cipso_v4_cache_entry_free - Frees a cache entry * @entry: the entry to free * * Description: * This function frees the memory associated with a cache entry including the * LSM cache data if there are no longer any users, i.e. reference count == 0. * */ static void cipso_v4_cache_entry_free(struct cipso_v4_map_cache_entry *entry) { if (entry->lsm_data) netlbl_secattr_cache_free(entry->lsm_data); kfree(entry->key); kfree(entry); } /** * cipso_v4_map_cache_hash - Hashing function for the CIPSO cache * @key: the hash key * @key_len: the length of the key in bytes * * Description: * The CIPSO tag hashing function. Returns a 32-bit hash value. * */ static u32 cipso_v4_map_cache_hash(const unsigned char *key, u32 key_len) { return jhash(key, key_len, 0); } /* * Label Mapping Cache Functions */ /** * cipso_v4_cache_init - Initialize the CIPSO cache * * Description: * Initializes the CIPSO label mapping cache, this function should be called * before any of the other functions defined in this file. Returns zero on * success, negative values on error. * */ static int cipso_v4_cache_init(void) { u32 iter; cipso_v4_cache = kcalloc(CIPSO_V4_CACHE_BUCKETS, sizeof(struct cipso_v4_map_cache_bkt), GFP_KERNEL); if (cipso_v4_cache == NULL) return -ENOMEM; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_init(&cipso_v4_cache[iter].lock); cipso_v4_cache[iter].size = 0; INIT_LIST_HEAD(&cipso_v4_cache[iter].list); } return 0; } /** * cipso_v4_cache_invalidate - Invalidates the current CIPSO cache * * Description: * Invalidates and frees any entries in the CIPSO cache. Returns zero on * success and negative values on failure. * */ void cipso_v4_cache_invalidate(void) { struct cipso_v4_map_cache_entry *entry, *tmp_entry; u32 iter; for (iter = 0; iter < CIPSO_V4_CACHE_BUCKETS; iter++) { spin_lock_bh(&cipso_v4_cache[iter].lock); list_for_each_entry_safe(entry, tmp_entry, &cipso_v4_cache[iter].list, list) { list_del(&entry->list); cipso_v4_cache_entry_free(entry); } cipso_v4_cache[iter].size = 0; spin_unlock_bh(&cipso_v4_cache[iter].lock); } } /** * cipso_v4_cache_check - Check the CIPSO cache for a label mapping * @key: the buffer to check * @key_len: buffer length in bytes * @secattr: the security attribute struct to use * * Description: * This function checks the cache to see if a label mapping already exists for * the given key. If there is a match then the cache is adjusted and the * @secattr struct is populated with the correct LSM security attributes. The * cache is adjusted in the following manner if the entry is not already the * first in the cache bucket: * * 1. The cache entry's activity counter is incremented * 2. The previous (higher ranking) entry's activity counter is decremented * 3. If the difference between the two activity counters is geater than * CIPSO_V4_CACHE_REORDERLIMIT the two entries are swapped * * Returns zero on success, -ENOENT for a cache miss, and other negative values * on error. * */ static int cipso_v4_cache_check(const unsigned char *key, u32 key_len, struct netlbl_lsm_secattr *secattr) { u32 bkt; struct cipso_v4_map_cache_entry *entry; struct cipso_v4_map_cache_entry *prev_entry = NULL; u32 hash; if (!cipso_v4_cache_enabled) return -ENOENT; hash = cipso_v4_map_cache_hash(key, key_len); bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) { if (entry->hash == hash && entry->key_len == key_len && memcmp(entry->key, key, key_len) == 0) { entry->activity += 1; atomic_inc(&entry->lsm_data->refcount); secattr->cache = entry->lsm_data; secattr->flags |= NETLBL_SECATTR_CACHE; secattr->type = NETLBL_NLTYPE_CIPSOV4; if (prev_entry == NULL) { spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } if (prev_entry->activity > 0) prev_entry->activity -= 1; if (entry->activity > prev_entry->activity && entry->activity - prev_entry->activity > CIPSO_V4_CACHE_REORDERLIMIT) { __list_del(entry->list.prev, entry->list.next); __list_add(&entry->list, prev_entry->list.prev, &prev_entry->list); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; } prev_entry = entry; } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return -ENOENT; } /** * cipso_v4_cache_add - Add an entry to the CIPSO cache * @skb: the packet * @secattr: the packet's security attributes * * Description: * Add a new entry into the CIPSO label mapping cache. Add the new entry to * head of the cache bucket's list, if the cache bucket is out of room remove * the last entry in the list first. It is important to note that there is * currently no checking for duplicate keys. Returns zero on success, * negative values on failure. * */ int cipso_v4_cache_add(const struct sk_buff *skb, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; u32 bkt; struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL; unsigned char *cipso_ptr; u32 cipso_ptr_len; if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) return 0; cipso_ptr = CIPSO_V4_OPTPTR(skb); cipso_ptr_len = cipso_ptr[1]; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; entry->key = kmemdup(cipso_ptr, cipso_ptr_len, GFP_ATOMIC); if (entry->key == NULL) { ret_val = -ENOMEM; goto cache_add_failure; } entry->key_len = cipso_ptr_len; entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len); atomic_inc(&secattr->cache->refcount); entry->lsm_data = secattr->cache; bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); spin_lock_bh(&cipso_v4_cache[bkt].lock); if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache[bkt].size += 1; } else { old_entry = list_entry(cipso_v4_cache[bkt].list.prev, struct cipso_v4_map_cache_entry, list); list_del(&old_entry->list); list_add(&entry->list, &cipso_v4_cache[bkt].list); cipso_v4_cache_entry_free(old_entry); } spin_unlock_bh(&cipso_v4_cache[bkt].lock); return 0; cache_add_failure: if (entry) cipso_v4_cache_entry_free(entry); return ret_val; } /* * DOI List Functions */ /** * cipso_v4_doi_search - Searches for a DOI definition * @doi: the DOI to search for * * Description: * Search the DOI definition list for a DOI definition with a DOI value that * matches @doi. The caller is responsible for calling rcu_read_[un]lock(). * Returns a pointer to the DOI definition on success and NULL on failure. */ static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) { struct cipso_v4_doi *iter; list_for_each_entry_rcu(iter, &cipso_v4_doi_list, list) if (iter->doi == doi && atomic_read(&iter->refcount)) return iter; return NULL; } /** * cipso_v4_doi_add - Add a new DOI to the CIPSO protocol engine * @doi_def: the DOI structure * @audit_info: NetLabel audit information * * Description: * The caller defines a new DOI for use by the CIPSO engine and calls this * function to add it to the list of acceptable domains. The caller must * ensure that the mapping table specified in @doi_def->map meets all of the * requirements of the mapping type (see cipso_ipv4.h for details). Returns * zero on success and non-zero on failure. * */ int cipso_v4_doi_add(struct cipso_v4_doi *doi_def, struct netlbl_audit *audit_info) { int ret_val = -EINVAL; u32 iter; u32 doi; u32 doi_type; struct audit_buffer *audit_buf; doi = doi_def->doi; doi_type = doi_def->type; if (doi_def == NULL || doi_def->doi == CIPSO_V4_DOI_UNKNOWN) goto doi_add_return; for (iter = 0; iter < CIPSO_V4_TAG_MAXCNT; iter++) { switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: break; case CIPSO_V4_TAG_RANGE: case CIPSO_V4_TAG_ENUM: if (doi_def->type != CIPSO_V4_MAP_PASS) goto doi_add_return; break; case CIPSO_V4_TAG_LOCAL: if (doi_def->type != CIPSO_V4_MAP_LOCAL) goto doi_add_return; break; case CIPSO_V4_TAG_INVALID: if (iter == 0) goto doi_add_return; break; default: goto doi_add_return; } } atomic_set(&doi_def->refcount, 1); spin_lock(&cipso_v4_doi_list_lock); if (cipso_v4_doi_search(doi_def->doi) != NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EEXIST; goto doi_add_return; } list_add_tail_rcu(&doi_def->list, &cipso_v4_doi_list); spin_unlock(&cipso_v4_doi_list_lock); ret_val = 0; doi_add_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_ADD, audit_info); if (audit_buf != NULL) { const char *type_str; switch (doi_type) { case CIPSO_V4_MAP_TRANS: type_str = "trans"; break; case CIPSO_V4_MAP_PASS: type_str = "pass"; break; case CIPSO_V4_MAP_LOCAL: type_str = "local"; break; default: type_str = "(unknown)"; } audit_log_format(audit_buf, " cipso_doi=%u cipso_type=%s res=%u", doi, type_str, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_free - Frees a DOI definition * @entry: the entry's RCU field * * Description: * This function frees all of the memory associated with a DOI definition. * */ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; switch (doi_def->type) { case CIPSO_V4_MAP_TRANS: kfree(doi_def->map.std->lvl.cipso); kfree(doi_def->map.std->lvl.local); kfree(doi_def->map.std->cat.cipso); kfree(doi_def->map.std->cat.local); break; } kfree(doi_def); } /** * cipso_v4_doi_free_rcu - Frees a DOI definition via the RCU pointer * @entry: the entry's RCU field * * Description: * This function is designed to be used as a callback to the call_rcu() * function so that the memory allocated to the DOI definition can be released * safely. * */ static void cipso_v4_doi_free_rcu(struct rcu_head *entry) { struct cipso_v4_doi *doi_def; doi_def = container_of(entry, struct cipso_v4_doi, rcu); cipso_v4_doi_free(doi_def); } /** * cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine * @doi: the DOI value * @audit_secid: the LSM secid to use in the audit message * * Description: * Removes a DOI definition from the CIPSO engine. The NetLabel routines will * be called to release their own LSM domain mappings as well as our own * domain list. Returns zero on success and negative values on failure. * */ int cipso_v4_doi_remove(u32 doi, struct netlbl_audit *audit_info) { int ret_val; struct cipso_v4_doi *doi_def; struct audit_buffer *audit_buf; spin_lock(&cipso_v4_doi_list_lock); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -ENOENT; goto doi_remove_return; } if (!atomic_dec_and_test(&doi_def->refcount)) { spin_unlock(&cipso_v4_doi_list_lock); ret_val = -EBUSY; goto doi_remove_return; } list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); ret_val = 0; doi_remove_return: audit_buf = netlbl_audit_start(AUDIT_MAC_CIPSOV4_DEL, audit_info); if (audit_buf != NULL) { audit_log_format(audit_buf, " cipso_doi=%u res=%u", doi, ret_val == 0 ? 1 : 0); audit_log_end(audit_buf); } return ret_val; } /** * cipso_v4_doi_getdef - Returns a reference to a valid DOI definition * @doi: the DOI value * * Description: * Searches for a valid DOI definition and if one is found it is returned to * the caller. Otherwise NULL is returned. The caller must ensure that * rcu_read_lock() is held while accessing the returned definition and the DOI * definition reference count is decremented when the caller is done. * */ struct cipso_v4_doi *cipso_v4_doi_getdef(u32 doi) { struct cipso_v4_doi *doi_def; rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto doi_getdef_return; if (!atomic_inc_not_zero(&doi_def->refcount)) doi_def = NULL; doi_getdef_return: rcu_read_unlock(); return doi_def; } /** * cipso_v4_doi_putdef - Releases a reference for the given DOI definition * @doi_def: the DOI definition * * Description: * Releases a DOI definition reference obtained from cipso_v4_doi_getdef(). * */ void cipso_v4_doi_putdef(struct cipso_v4_doi *doi_def) { if (doi_def == NULL) return; if (!atomic_dec_and_test(&doi_def->refcount)) return; spin_lock(&cipso_v4_doi_list_lock); list_del_rcu(&doi_def->list); spin_unlock(&cipso_v4_doi_list_lock); cipso_v4_cache_invalidate(); call_rcu(&doi_def->rcu, cipso_v4_doi_free_rcu); } /** * cipso_v4_doi_walk - Iterate through the DOI definitions * @skip_cnt: skip past this number of DOI definitions, updated * @callback: callback for each DOI definition * @cb_arg: argument for the callback function * * Description: * Iterate over the DOI definition list, skipping the first @skip_cnt entries. * For each entry call @callback, if @callback returns a negative value stop * 'walking' through the list and return. Updates the value in @skip_cnt upon * return. Returns zero on success, negative values on failure. * */ int cipso_v4_doi_walk(u32 *skip_cnt, int (*callback) (struct cipso_v4_doi *doi_def, void *arg), void *cb_arg) { int ret_val = -ENOENT; u32 doi_cnt = 0; struct cipso_v4_doi *iter_doi; rcu_read_lock(); list_for_each_entry_rcu(iter_doi, &cipso_v4_doi_list, list) if (atomic_read(&iter_doi->refcount) > 0) { if (doi_cnt++ < *skip_cnt) continue; ret_val = callback(iter_doi, cb_arg); if (ret_val < 0) { doi_cnt--; goto doi_walk_return; } } doi_walk_return: rcu_read_unlock(); *skip_cnt = doi_cnt; return ret_val; } /* * Label Mapping Functions */ /** * cipso_v4_map_lvl_valid - Checks to see if the given level is understood * @doi_def: the DOI definition * @level: the level to check * * Description: * Checks the given level against the given DOI definition and returns a * negative value if the level does not have a valid mapping and a zero value * if the level is defined by the DOI. * */ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL) return 0; break; } return -EFAULT; } /** * cipso_v4_map_lvl_hton - Perform a level mapping from the host to the network * @doi_def: the DOI definition * @host_lvl: the host MLS level * @net_lvl: the network/CIPSO MLS level * * Description: * Perform a label mapping to translate a local MLS level to the correct * CIPSO level using the given DOI definition. Returns zero on success, * negative values otherwise. * */ static int cipso_v4_map_lvl_hton(const struct cipso_v4_doi *doi_def, u32 host_lvl, u32 *net_lvl) { switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *net_lvl = host_lvl; return 0; case CIPSO_V4_MAP_TRANS: if (host_lvl < doi_def->map.std->lvl.local_size && doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { *net_lvl = doi_def->map.std->lvl.local[host_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_lvl_ntoh - Perform a level mapping from the network to the host * @doi_def: the DOI definition * @net_lvl: the network/CIPSO MLS level * @host_lvl: the host MLS level * * Description: * Perform a label mapping to translate a CIPSO level to the correct local MLS * level using the given DOI definition. Returns zero on success, negative * values otherwise. * */ static int cipso_v4_map_lvl_ntoh(const struct cipso_v4_doi *doi_def, u32 net_lvl, u32 *host_lvl) { struct cipso_v4_std_map_tbl *map_tbl; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: *host_lvl = net_lvl; return 0; case CIPSO_V4_MAP_TRANS: map_tbl = doi_def->map.std; if (net_lvl < map_tbl->lvl.cipso_size && map_tbl->lvl.cipso[net_lvl] < CIPSO_V4_INV_LVL) { *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; return 0; } return -EPERM; } return -EINVAL; } /** * cipso_v4_map_cat_rbm_valid - Checks to see if the category bitmap is valid * @doi_def: the DOI definition * @bitmap: category bitmap * @bitmap_len: bitmap length in bytes * * Description: * Checks the given category bitmap against the given DOI definition and * returns a negative value if any of the categories in the bitmap do not have * a valid mapping and a zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rbm_valid(const struct cipso_v4_doi *doi_def, const unsigned char *bitmap, u32 bitmap_len) { int cat = -1; u32 bitmap_len_bits = bitmap_len * 8; u32 cipso_cat_size; u32 *cipso_array; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: return 0; case CIPSO_V4_MAP_TRANS: cipso_cat_size = doi_def->map.std->cat.cipso_size; cipso_array = doi_def->map.std->cat.cipso; for (;;) { cat = cipso_v4_bitmap_walk(bitmap, bitmap_len_bits, cat + 1, 1); if (cat < 0) break; if (cat >= cipso_cat_size || cipso_array[cat] >= CIPSO_V4_INV_CAT) return -EFAULT; } if (cat == -1) return 0; break; } return -EFAULT; } /** * cipso_v4_map_cat_rbm_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO bitmap using the given DOI definition. Returns the minimum * size in bytes of the network bitmap on success, negative values otherwise. * */ static int cipso_v4_map_cat_rbm_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int host_spot = -1; u32 net_spot = CIPSO_V4_INV_CAT; u32 net_spot_max = 0; u32 net_clen_bits = net_cat_len * 8; u32 host_cat_size = 0; u32 *host_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { host_cat_size = doi_def->map.std->cat.local_size; host_cat_array = doi_def->map.std->cat.local; } for (;;) { host_spot = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, host_spot + 1); if (host_spot < 0) break; switch (doi_def->type) { case CIPSO_V4_MAP_PASS: net_spot = host_spot; break; case CIPSO_V4_MAP_TRANS: if (host_spot >= host_cat_size) return -EPERM; net_spot = host_cat_array[host_spot]; if (net_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } if (net_spot >= net_clen_bits) return -ENOSPC; cipso_v4_bitmap_setbit(net_cat, net_spot, 1); if (net_spot > net_spot_max) net_spot_max = net_spot; } if (++net_spot_max % 8) return net_spot_max / 8 + 1; return net_spot_max / 8; } /** * cipso_v4_map_cat_rbm_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category bitmap in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO bitmap to the correct local * MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rbm_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; int net_spot = -1; u32 host_spot = CIPSO_V4_INV_CAT; u32 net_clen_bits = net_cat_len * 8; u32 net_cat_size = 0; u32 *net_cat_array = NULL; if (doi_def->type == CIPSO_V4_MAP_TRANS) { net_cat_size = doi_def->map.std->cat.cipso_size; net_cat_array = doi_def->map.std->cat.cipso; } for (;;) { net_spot = cipso_v4_bitmap_walk(net_cat, net_clen_bits, net_spot + 1, 1); if (net_spot < 0) { if (net_spot == -2) return -EFAULT; return 0; } switch (doi_def->type) { case CIPSO_V4_MAP_PASS: host_spot = net_spot; break; case CIPSO_V4_MAP_TRANS: if (net_spot >= net_cat_size) return -EPERM; host_spot = net_cat_array[net_spot]; if (host_spot >= CIPSO_V4_INV_CAT) return -EPERM; break; } ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, host_spot, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return -EINVAL; } /** * cipso_v4_map_cat_enum_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @enumcat: category list * @enumcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_enum_valid(const struct cipso_v4_doi *doi_def, const unsigned char *enumcat, u32 enumcat_len) { u16 cat; int cat_prev = -1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || enumcat_len & 0x01) return -EFAULT; for (iter = 0; iter < enumcat_len; iter += 2) { cat = get_unaligned_be16(&enumcat[iter]); if (cat <= cat_prev) return -EFAULT; cat_prev = cat; } return 0; } /** * cipso_v4_map_cat_enum_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_enum_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int cat = -1; u32 cat_iter = 0; for (;;) { cat = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, cat + 1); if (cat < 0) break; if ((cat_iter + 2) > net_cat_len) return -ENOSPC; *((__be16 *)&net_cat[cat_iter]) = htons(cat); cat_iter += 2; } return cat_iter; } /** * cipso_v4_map_cat_enum_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_enum_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; for (iter = 0; iter < net_cat_len; iter += 2) { ret_val = netlbl_secattr_catmap_setbit(secattr->attr.mls.cat, get_unaligned_be16(&net_cat[iter]), GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /** * cipso_v4_map_cat_rng_valid - Checks to see if the categories are valid * @doi_def: the DOI definition * @rngcat: category list * @rngcat_len: length of the category list in bytes * * Description: * Checks the given categories against the given DOI definition and returns a * negative value if any of the categories do not have a valid mapping and a * zero value if all of the categories are valid. * */ static int cipso_v4_map_cat_rng_valid(const struct cipso_v4_doi *doi_def, const unsigned char *rngcat, u32 rngcat_len) { u16 cat_high; u16 cat_low; u32 cat_prev = CIPSO_V4_MAX_REM_CATS + 1; u32 iter; if (doi_def->type != CIPSO_V4_MAP_PASS || rngcat_len & 0x01) return -EFAULT; for (iter = 0; iter < rngcat_len; iter += 4) { cat_high = get_unaligned_be16(&rngcat[iter]); if ((iter + 4) <= rngcat_len) cat_low = get_unaligned_be16(&rngcat[iter + 2]); else cat_low = 0; if (cat_high > cat_prev) return -EFAULT; cat_prev = cat_low; } return 0; } /** * cipso_v4_map_cat_rng_hton - Perform a category mapping from host to network * @doi_def: the DOI definition * @secattr: the security attributes * @net_cat: the zero'd out category list in network/CIPSO format * @net_cat_len: the length of the CIPSO category list in bytes * * Description: * Perform a label mapping to translate a local MLS category bitmap to the * correct CIPSO category list using the given DOI definition. Returns the * size in bytes of the network category bitmap on success, negative values * otherwise. * */ static int cipso_v4_map_cat_rng_hton(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *net_cat, u32 net_cat_len) { int iter = -1; u16 array[CIPSO_V4_TAG_RNG_CAT_MAX * 2]; u32 array_cnt = 0; u32 cat_size = 0; /* make sure we don't overflow the 'array[]' variable */ if (net_cat_len > (CIPSO_V4_OPT_LEN_MAX - CIPSO_V4_HDR_LEN - CIPSO_V4_TAG_RNG_BLEN)) return -ENOSPC; for (;;) { iter = netlbl_secattr_catmap_walk(secattr->attr.mls.cat, iter + 1); if (iter < 0) break; cat_size += (iter == 0 ? 0 : sizeof(u16)); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; iter = netlbl_secattr_catmap_walk_rng(secattr->attr.mls.cat, iter); if (iter < 0) return -EFAULT; cat_size += sizeof(u16); if (cat_size > net_cat_len) return -ENOSPC; array[array_cnt++] = iter; } for (iter = 0; array_cnt > 0;) { *((__be16 *)&net_cat[iter]) = htons(array[--array_cnt]); iter += 2; array_cnt--; if (array[array_cnt] != 0) { *((__be16 *)&net_cat[iter]) = htons(array[array_cnt]); iter += 2; } } return cat_size; } /** * cipso_v4_map_cat_rng_ntoh - Perform a category mapping from network to host * @doi_def: the DOI definition * @net_cat: the category list in network/CIPSO format * @net_cat_len: the length of the CIPSO bitmap in bytes * @secattr: the security attributes * * Description: * Perform a label mapping to translate a CIPSO category list to the correct * local MLS category bitmap using the given DOI definition. Returns zero on * success, negative values on failure. * */ static int cipso_v4_map_cat_rng_ntoh(const struct cipso_v4_doi *doi_def, const unsigned char *net_cat, u32 net_cat_len, struct netlbl_lsm_secattr *secattr) { int ret_val; u32 net_iter; u16 cat_low; u16 cat_high; for (net_iter = 0; net_iter < net_cat_len; net_iter += 4) { cat_high = get_unaligned_be16(&net_cat[net_iter]); if ((net_iter + 4) <= net_cat_len) cat_low = get_unaligned_be16(&net_cat[net_iter + 2]); else cat_low = 0; ret_val = netlbl_secattr_catmap_setrng(secattr->attr.mls.cat, cat_low, cat_high, GFP_ATOMIC); if (ret_val != 0) return ret_val; } return 0; } /* * Protocol Handling Functions */ /** * cipso_v4_gentag_hdr - Generate a CIPSO option header * @doi_def: the DOI definition * @len: the total tag length in bytes, not including this header * @buf: the CIPSO option buffer * * Description: * Write a CIPSO header into the beginning of @buffer. * */ static void cipso_v4_gentag_hdr(const struct cipso_v4_doi *doi_def, unsigned char *buf, u32 len) { buf[0] = IPOPT_CIPSO; buf[1] = CIPSO_V4_HDR_LEN + len; *(__be32 *)&buf[2] = htonl(doi_def->doi); } /** * cipso_v4_gentag_rbm - Generate a CIPSO restricted bitmap tag (type #1) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the restricted bitmap tag, tag type #1. The * actual buffer length may be larger than the indicated size due to * translation between host and network category bitmaps. Returns the size of * the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if ((secattr->flags & NETLBL_SECATTR_MLS_LVL) == 0) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rbm_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; /* This will send packets using the "optimized" format when * possible as specified in section 3.4.2.6 of the * CIPSO draft. */ if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) tag_len = 14; else tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RBITMAP; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rbm - Parse a CIPSO restricted bitmap tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO restricted bitmap tag (tag type #1) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_rbm(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rbm_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_enum - Generate a CIPSO enumerated tag (type #2) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the enumerated tag, tag type #2. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_enum(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_enum_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_ENUM; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_enum - Parse a CIPSO enumerated tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO enumerated tag (tag type #2) and return the security * attributes in @secattr. Return zero on success, negatives values on * failure. * */ static int cipso_v4_parsetag_enum(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_enum_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_rng - Generate a CIPSO ranged tag (type #5) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the ranged tag, tag type #5. Returns the * size of the tag on success, negative values on failure. * */ static int cipso_v4_gentag_rng(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { int ret_val; u32 tag_len; u32 level; if (!(secattr->flags & NETLBL_SECATTR_MLS_LVL)) return -EPERM; ret_val = cipso_v4_map_lvl_hton(doi_def, secattr->attr.mls.lvl, &level); if (ret_val != 0) return ret_val; if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { ret_val = cipso_v4_map_cat_rng_hton(doi_def, secattr, &buffer[4], buffer_len - 4); if (ret_val < 0) return ret_val; tag_len = 4 + ret_val; } else tag_len = 4; buffer[0] = CIPSO_V4_TAG_RANGE; buffer[1] = tag_len; buffer[3] = level; return tag_len; } /** * cipso_v4_parsetag_rng - Parse a CIPSO ranged tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO ranged tag (tag type #5) and return the security attributes * in @secattr. Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_rng(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { int ret_val; u8 tag_len = tag[1]; u32 level; ret_val = cipso_v4_map_lvl_ntoh(doi_def, tag[3], &level); if (ret_val != 0) return ret_val; secattr->attr.mls.lvl = level; secattr->flags |= NETLBL_SECATTR_MLS_LVL; if (tag_len > 4) { secattr->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); if (secattr->attr.mls.cat == NULL) return -ENOMEM; ret_val = cipso_v4_map_cat_rng_ntoh(doi_def, &tag[4], tag_len - 4, secattr); if (ret_val != 0) { netlbl_secattr_catmap_free(secattr->attr.mls.cat); return ret_val; } secattr->flags |= NETLBL_SECATTR_MLS_CAT; } return 0; } /** * cipso_v4_gentag_loc - Generate a CIPSO local tag (non-standard) * @doi_def: the DOI definition * @secattr: the security attributes * @buffer: the option buffer * @buffer_len: length of buffer in bytes * * Description: * Generate a CIPSO option using the local tag. Returns the size of the tag * on success, negative values on failure. * */ static int cipso_v4_gentag_loc(const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr, unsigned char *buffer, u32 buffer_len) { if (!(secattr->flags & NETLBL_SECATTR_SECID)) return -EPERM; buffer[0] = CIPSO_V4_TAG_LOCAL; buffer[1] = CIPSO_V4_TAG_LOC_BLEN; *(u32 *)&buffer[2] = secattr->attr.secid; return CIPSO_V4_TAG_LOC_BLEN; } /** * cipso_v4_parsetag_loc - Parse a CIPSO local tag * @doi_def: the DOI definition * @tag: the CIPSO tag * @secattr: the security attributes * * Description: * Parse a CIPSO local tag and return the security attributes in @secattr. * Return zero on success, negatives values on failure. * */ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def, const unsigned char *tag, struct netlbl_lsm_secattr *secattr) { secattr->attr.secid = *(u32 *)&tag[2]; secattr->flags |= NETLBL_SECATTR_SECID; return 0; } /** * cipso_v4_validate - Validate a CIPSO option * @option: the start of the option, on error it is set to point to the error * * Description: * This routine is called to validate a CIPSO option, it checks all of the * fields to ensure that they are at least valid, see the draft snippet below * for details. If the option is valid then a zero value is returned and * the value of @option is unchanged. If the option is invalid then a * non-zero value is returned and @option is adjusted to point to the * offending portion of the option. From the IETF draft ... * * "If any field within the CIPSO options, such as the DOI identifier, is not * recognized the IP datagram is discarded and an ICMP 'parameter problem' * (type 12) is generated and returned. The ICMP code field is set to 'bad * parameter' (code 0) and the pointer is set to the start of the CIPSO field * that is unrecognized." * */ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) { unsigned char *opt = *option; unsigned char *tag; unsigned char opt_iter; unsigned char err_offset = 0; u8 opt_len; u8 tag_len; struct cipso_v4_doi *doi_def = NULL; u32 tag_iter; /* caller already checks for length values that are too large */ opt_len = opt[1]; if (opt_len < 8) { err_offset = 1; goto validate_return; } rcu_read_lock(); doi_def = cipso_v4_doi_search(get_unaligned_be32(&opt[2])); if (doi_def == NULL) { err_offset = 2; goto validate_return_locked; } opt_iter = CIPSO_V4_HDR_LEN; tag = opt + opt_iter; while (opt_iter < opt_len) { for (tag_iter = 0; doi_def->tags[tag_iter] != tag[0];) if (doi_def->tags[tag_iter] == CIPSO_V4_TAG_INVALID || ++tag_iter == CIPSO_V4_TAG_MAXCNT) { err_offset = opt_iter; goto validate_return_locked; } tag_len = tag[1]; if (tag_len > (opt_len - opt_iter)) { err_offset = opt_iter + 1; goto validate_return_locked; } switch (tag[0]) { case CIPSO_V4_TAG_RBITMAP: if (tag_len < CIPSO_V4_TAG_RBM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } /* We are already going to do all the verification * necessary at the socket layer so from our point of * view it is safe to turn these checks off (and less * work), however, the CIPSO draft says we should do * all the CIPSO validations here but it doesn't * really specify _exactly_ what we need to validate * ... so, just make it a sysctl tunable. */ if (cipso_v4_rbm_strictvalid) { if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RBM_BLEN && cipso_v4_map_cat_rbm_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } } break; case CIPSO_V4_TAG_ENUM: if (tag_len < CIPSO_V4_TAG_ENUM_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_ENUM_BLEN && cipso_v4_map_cat_enum_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_RANGE: if (tag_len < CIPSO_V4_TAG_RNG_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } if (cipso_v4_map_lvl_valid(doi_def, tag[3]) < 0) { err_offset = opt_iter + 3; goto validate_return_locked; } if (tag_len > CIPSO_V4_TAG_RNG_BLEN && cipso_v4_map_cat_rng_valid(doi_def, &tag[4], tag_len - 4) < 0) { err_offset = opt_iter + 4; goto validate_return_locked; } break; case CIPSO_V4_TAG_LOCAL: /* This is a non-standard tag that we only allow for * local connections, so if the incoming interface is * not the loopback device drop the packet. */ if (!(skb->dev->flags & IFF_LOOPBACK)) { err_offset = opt_iter; goto validate_return_locked; } if (tag_len != CIPSO_V4_TAG_LOC_BLEN) { err_offset = opt_iter + 1; goto validate_return_locked; } break; default: err_offset = opt_iter; goto validate_return_locked; } tag += tag_len; opt_iter += tag_len; } validate_return_locked: rcu_read_unlock(); validate_return: *option = opt + err_offset; return err_offset; } /** * cipso_v4_error - Send the correct response for a bad packet * @skb: the packet * @error: the error code * @gateway: CIPSO gateway flag * * Description: * Based on the error code given in @error, send an ICMP error message back to * the originating host. From the IETF draft ... * * "If the contents of the CIPSO [option] are valid but the security label is * outside of the configured host or port label range, the datagram is * discarded and an ICMP 'destination unreachable' (type 3) is generated and * returned. The code field of the ICMP is set to 'communication with * destination network administratively prohibited' (code 9) or to * 'communication with destination host administratively prohibited' * (code 10). The value of the code is dependent on whether the originator * of the ICMP message is acting as a CIPSO host or a CIPSO gateway. The * recipient of the ICMP message MUST be able to handle either value. The * same procedure is performed if a CIPSO [option] can not be added to an * IP packet because it is too large to fit in the IP options area." * * "If the error is triggered by receipt of an ICMP message, the message is * discarded and no response is permitted (consistent with general ICMP * processing rules)." * */ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway) { if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES) return; if (gateway) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0); else icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0); } /** * cipso_v4_genopt - Generate a CIPSO option * @buf: the option buffer * @buf_len: the size of opt_buf * @doi_def: the CIPSO DOI to use * @secattr: the security attributes * * Description: * Generate a CIPSO option using the DOI definition and security attributes * passed to the function. Returns the length of the option on success and * negative values on failure. * */ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; u32 iter; if (buf_len <= CIPSO_V4_HDR_LEN) return -ENOSPC; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ iter = 0; do { memset(buf, 0, buf_len); switch (doi_def->tags[iter]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_gentag_rbm(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_gentag_enum(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_gentag_rng(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_gentag_loc(doi_def, secattr, &buf[CIPSO_V4_HDR_LEN], buf_len - CIPSO_V4_HDR_LEN); break; default: return -EPERM; } iter++; } while (ret_val < 0 && iter < CIPSO_V4_TAG_MAXCNT && doi_def->tags[iter] != CIPSO_V4_TAG_INVALID); if (ret_val < 0) return ret_val; cipso_v4_gentag_hdr(doi_def, buf, ret_val); return CIPSO_V4_HDR_LEN + ret_val; } static void opt_kfree_rcu(struct rcu_head *head) { kfree(container_of(head, struct ip_options_rcu, rcu)); } /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. This function requires * exclusive access to @sk, which means it either needs to be in the * process of being created or locked. Returns zero on success and negative * values on failure. * */ int cipso_v4_sock_setattr(struct sock *sk, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *old, *opt = NULL; struct inet_sock *sk_inet; struct inet_connection_sock *sk_conn; /* In the case of sock_create_lite(), the sock->sk field is not * defined yet but it is not a problem as the only users of these * "lite" PF_INET sockets are functions which do an accept() call * afterwards so we will label the socket as part of the accept(). */ if (sk == NULL) return 0; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto socket_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto socket_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; sk_inet = inet_sk(sk); old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk)); if (sk_inet->is_icsk) { sk_conn = inet_csk(sk); if (old) sk_conn->icsk_ext_hdr_len -= old->opt.optlen; sk_conn->icsk_ext_hdr_len += opt->opt.optlen; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) call_rcu(&old->rcu, opt_kfree_rcu); return 0; socket_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_req_setattr - Add a CIPSO option to a connection request socket * @req: the connection request socket * @doi_def: the CIPSO DOI to use * @secattr: the specific security attributes of the socket * * Description: * Set the CIPSO option on the given socket using the DOI definition and * security attributes passed to the function. Returns zero on success and * negative values on failure. * */ int cipso_v4_req_setattr(struct request_sock *req, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val = -EPERM; unsigned char *buf = NULL; u32 buf_len; u32 opt_len; struct ip_options_rcu *opt = NULL; struct inet_request_sock *req_inet; /* We allocate the maximum CIPSO option size here so we are probably * being a little wasteful, but it makes our life _much_ easier later * on and after all we are only talking about 40 bytes. */ buf_len = CIPSO_V4_OPT_LEN_MAX; buf = kmalloc(buf_len, GFP_ATOMIC); if (buf == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) goto req_setattr_failure; buf_len = ret_val; /* We can't use ip_options_get() directly because it makes a call to * ip_options_get_alloc() which allocates memory with GFP_KERNEL and * we won't always have CAP_NET_RAW even though we _always_ want to * set the IPOPT_CIPSO option. */ opt_len = (buf_len + 3) & ~3; opt = kzalloc(sizeof(*opt) + opt_len, GFP_ATOMIC); if (opt == NULL) { ret_val = -ENOMEM; goto req_setattr_failure; } memcpy(opt->opt.__data, buf, buf_len); opt->opt.optlen = opt_len; opt->opt.cipso = sizeof(struct iphdr); kfree(buf); buf = NULL; req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) call_rcu(&opt->rcu, opt_kfree_rcu); return 0; req_setattr_failure: kfree(buf); kfree(opt); return ret_val; } /** * cipso_v4_delopt - Delete the CIPSO option from a set of IP options * @opt_ptr: IP option pointer * * Description: * Deletes the CIPSO IP option from a set of IP options and makes the necessary * adjustments to the IP option structure. Returns zero on success, negative * values on failure. * */ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) { int hdr_delta = 0; struct ip_options_rcu *opt = *opt_ptr; if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { u8 cipso_len; u8 cipso_off; unsigned char *cipso_ptr; int iter; int optlen_new; cipso_off = opt->opt.cipso - sizeof(struct iphdr); cipso_ptr = &opt->opt.__data[cipso_off]; cipso_len = cipso_ptr[1]; if (opt->opt.srr > opt->opt.cipso) opt->opt.srr -= cipso_len; if (opt->opt.rr > opt->opt.cipso) opt->opt.rr -= cipso_len; if (opt->opt.ts > opt->opt.cipso) opt->opt.ts -= cipso_len; if (opt->opt.router_alert > opt->opt.cipso) opt->opt.router_alert -= cipso_len; opt->opt.cipso = 0; memmove(cipso_ptr, cipso_ptr + cipso_len, opt->opt.optlen - cipso_off - cipso_len); /* determining the new total option length is tricky because of * the padding necessary, the only thing i can think to do at * this point is walk the options one-by-one, skipping the * padding at the end to determine the actual option size and * from there we can determine the new total option length */ iter = 0; optlen_new = 0; while (iter < opt->opt.optlen) if (opt->opt.__data[iter] != IPOPT_NOP) { iter += opt->opt.__data[iter + 1]; optlen_new = iter; } else iter++; hdr_delta = opt->opt.optlen; opt->opt.optlen = (optlen_new + 3) & ~3; hdr_delta -= opt->opt.optlen; } else { /* only the cipso option was present on the socket so we can * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; call_rcu(&opt->rcu, opt_kfree_rcu); } return hdr_delta; } /** * cipso_v4_sock_delattr - Delete the CIPSO option from a socket * @sk: the socket * * Description: * Removes the CIPSO option from a socket, if present. * */ void cipso_v4_sock_delattr(struct sock *sk) { int hdr_delta; struct ip_options_rcu *opt; struct inet_sock *sk_inet; sk_inet = inet_sk(sk); opt = rcu_dereference_protected(sk_inet->inet_opt, 1); if (opt == NULL || opt->opt.cipso == 0) return; hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); if (sk_inet->is_icsk && hdr_delta > 0) { struct inet_connection_sock *sk_conn = inet_csk(sk); sk_conn->icsk_ext_hdr_len -= hdr_delta; sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); } } /** * cipso_v4_req_delattr - Delete the CIPSO option from a request socket * @reg: the request socket * * Description: * Removes the CIPSO option from a request socket, if present. * */ void cipso_v4_req_delattr(struct request_sock *req) { struct ip_options_rcu *opt; struct inet_request_sock *req_inet; req_inet = inet_rsk(req); opt = req_inet->opt; if (opt == NULL || opt->opt.cipso == 0) return; cipso_v4_delopt(&req_inet->opt); } /** * cipso_v4_getattr - Helper function for the cipso_v4_*_getattr functions * @cipso: the CIPSO v4 option * @secattr: the security attributes * * Description: * Inspect @cipso and return the security attributes in @secattr. Returns zero * on success and negative values on failure. * */ static int cipso_v4_getattr(const unsigned char *cipso, struct netlbl_lsm_secattr *secattr) { int ret_val = -ENOMSG; u32 doi; struct cipso_v4_doi *doi_def; if (cipso_v4_cache_check(cipso, cipso[1], secattr) == 0) return 0; doi = get_unaligned_be32(&cipso[2]); rcu_read_lock(); doi_def = cipso_v4_doi_search(doi); if (doi_def == NULL) goto getattr_return; /* XXX - This code assumes only one tag per CIPSO option which isn't * really a good assumption to make but since we only support the MAC * tags right now it is a safe assumption. */ switch (cipso[6]) { case CIPSO_V4_TAG_RBITMAP: ret_val = cipso_v4_parsetag_rbm(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_ENUM: ret_val = cipso_v4_parsetag_enum(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_RANGE: ret_val = cipso_v4_parsetag_rng(doi_def, &cipso[6], secattr); break; case CIPSO_V4_TAG_LOCAL: ret_val = cipso_v4_parsetag_loc(doi_def, &cipso[6], secattr); break; } if (ret_val == 0) secattr->type = NETLBL_NLTYPE_CIPSOV4; getattr_return: rcu_read_unlock(); return ret_val; } /** * cipso_v4_sock_getattr - Get the security attributes from a sock * @sk: the sock * @secattr: the security attributes * * Description: * Query @sk to see if there is a CIPSO option attached to the sock and if * there is return the CIPSO security attributes in @secattr. This function * requires that @sk be locked, or privately held, but it does not do any * locking itself. Returns zero on success and negative values on failure. * */ int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; } /** * cipso_v4_skbuff_setattr - Set the CIPSO option on a packet * @skb: the packet * @secattr: the security attributes * * Description: * Set the CIPSO option on the given packet based on the security attributes. * Returns a pointer to the IP header on success and NULL on failure. * */ int cipso_v4_skbuff_setattr(struct sk_buff *skb, const struct cipso_v4_doi *doi_def, const struct netlbl_lsm_secattr *secattr) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char buf[CIPSO_V4_OPT_LEN_MAX]; u32 buf_len = CIPSO_V4_OPT_LEN_MAX; u32 opt_len; int len_delta; ret_val = cipso_v4_genopt(buf, buf_len, doi_def, secattr); if (ret_val < 0) return ret_val; buf_len = ret_val; opt_len = (buf_len + 3) & ~3; /* we overwrite any existing options to ensure that we have enough * room for the CIPSO option, the reason is that we _need_ to guarantee * that the security label is applied to the packet - we do the same * thing when using the socket options and it hasn't caused a problem, * if we need to we can always revisit this choice later */ len_delta = opt_len - opt->optlen; /* if we don't ensure enough headroom we could panic on the skb_push() * call below so make sure we have enough, we are also "mangling" the * packet so we should probably do a copy-on-write call anyway */ ret_val = skb_cow(skb, skb_headroom(skb) + len_delta); if (ret_val < 0) return ret_val; if (len_delta > 0) { /* we assume that the header + opt->optlen have already been * "pushed" in ip_options_build() or similar */ iph = ip_hdr(skb); skb_push(skb, len_delta); memmove((char *)iph - len_delta, iph, iph->ihl << 2); skb_reset_network_header(skb); iph = ip_hdr(skb); } else if (len_delta < 0) { iph = ip_hdr(skb); memset(iph + 1, IPOPT_NOP, opt->optlen); } else iph = ip_hdr(skb); if (opt->optlen > 0) memset(opt, 0, sizeof(*opt)); opt->optlen = opt_len; opt->cipso = sizeof(struct iphdr); opt->is_changed = 1; /* we have to do the following because we are being called from a * netfilter hook which means the packet already has had the header * fields populated and the checksum calculated - yes this means we * are doing more work than needed but we do it to keep the core * stack clean and tidy */ memcpy(iph + 1, buf, buf_len); if (opt_len > buf_len) memset((char *)(iph + 1) + buf_len, 0, opt_len - buf_len); if (len_delta != 0) { iph->ihl = 5 + (opt_len >> 2); iph->tot_len = htons(skb->len); } ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_delattr - Delete any CIPSO options from a packet * @skb: the packet * * Description: * Removes any and all CIPSO options from the given packet. Returns zero on * success, negative values on failure. * */ int cipso_v4_skbuff_delattr(struct sk_buff *skb) { int ret_val; struct iphdr *iph; struct ip_options *opt = &IPCB(skb)->opt; unsigned char *cipso_ptr; if (opt->cipso == 0) return 0; /* since we are changing the packet we should make a copy */ ret_val = skb_cow(skb, skb_headroom(skb)); if (ret_val < 0) return ret_val; /* the easiest thing to do is just replace the cipso option with noop * options since we don't change the size of the packet, although we * still need to recalculate the checksum */ iph = ip_hdr(skb); cipso_ptr = (unsigned char *)iph + opt->cipso; memset(cipso_ptr, IPOPT_NOOP, cipso_ptr[1]); opt->cipso = 0; opt->is_changed = 1; ip_send_check(iph); return 0; } /** * cipso_v4_skbuff_getattr - Get the security attributes from the CIPSO option * @skb: the packet * @secattr: the security attributes * * Description: * Parse the given packet's CIPSO option and return the security attributes. * Returns zero on success and negative values on failure. * */ int cipso_v4_skbuff_getattr(const struct sk_buff *skb, struct netlbl_lsm_secattr *secattr) { return cipso_v4_getattr(CIPSO_V4_OPTPTR(skb), secattr); } /* * Setup Functions */ /** * cipso_v4_init - Initialize the CIPSO module * * Description: * Initialize the CIPSO module and prepare it for use. Returns zero on success * and negative values on failure. * */ static int __init cipso_v4_init(void) { int ret_val; ret_val = cipso_v4_cache_init(); if (ret_val != 0) panic("Failed to initialize the CIPSO/IPv4 cache (%d)\n", ret_val); return 0; } subsys_initcall(cipso_v4_init);
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options *opt; opt = inet_sk(sk)->opt; if (opt == NULL || opt->cipso == 0) return -ENOMSG; return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), secattr); }
int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) { struct ip_options_rcu *opt; int res = -ENOMSG; rcu_read_lock(); opt = rcu_dereference(inet_sk(sk)->inet_opt); if (opt && opt->opt.cipso) res = cipso_v4_getattr(opt->opt.__data + opt->opt.cipso - sizeof(struct iphdr), secattr); rcu_read_unlock(); return res; }
{'added': [(1860, 'static void opt_kfree_rcu(struct rcu_head *head)'), (1861, '{'), (1862, '\tkfree(container_of(head, struct ip_options_rcu, rcu));'), (1863, '}'), (1864, ''), (1887, '\tstruct ip_options_rcu *old, *opt = NULL;'), (1923, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (1924, '\topt->opt.optlen = opt_len;'), (1925, '\topt->opt.cipso = sizeof(struct iphdr);'), (1930, ''), (1931, '\told = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));'), (1934, '\t\tif (old)'), (1935, '\t\t\tsk_conn->icsk_ext_hdr_len -= old->opt.optlen;'), (1936, '\t\tsk_conn->icsk_ext_hdr_len += opt->opt.optlen;'), (1939, '\trcu_assign_pointer(sk_inet->inet_opt, opt);'), (1940, '\tif (old)'), (1941, '\t\tcall_rcu(&old->rcu, opt_kfree_rcu);'), (1971, '\tstruct ip_options_rcu *opt = NULL;'), (1999, '\tmemcpy(opt->opt.__data, buf, buf_len);'), (2000, '\topt->opt.optlen = opt_len;'), (2001, '\topt->opt.cipso = sizeof(struct iphdr);'), (2007, '\tif (opt)'), (2008, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2028, 'static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)'), (2031, '\tstruct ip_options_rcu *opt = *opt_ptr;'), (2033, '\tif (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {'), (2040, '\t\tcipso_off = opt->opt.cipso - sizeof(struct iphdr);'), (2041, '\t\tcipso_ptr = &opt->opt.__data[cipso_off];'), (2044, '\t\tif (opt->opt.srr > opt->opt.cipso)'), (2045, '\t\t\topt->opt.srr -= cipso_len;'), (2046, '\t\tif (opt->opt.rr > opt->opt.cipso)'), (2047, '\t\t\topt->opt.rr -= cipso_len;'), (2048, '\t\tif (opt->opt.ts > opt->opt.cipso)'), (2049, '\t\t\topt->opt.ts -= cipso_len;'), (2050, '\t\tif (opt->opt.router_alert > opt->opt.cipso)'), (2051, '\t\t\topt->opt.router_alert -= cipso_len;'), (2052, '\t\topt->opt.cipso = 0;'), (2055, '\t\t\topt->opt.optlen - cipso_off - cipso_len);'), (2064, '\t\twhile (iter < opt->opt.optlen)'), (2065, '\t\t\tif (opt->opt.__data[iter] != IPOPT_NOP) {'), (2066, '\t\t\t\titer += opt->opt.__data[iter + 1];'), (2070, '\t\thdr_delta = opt->opt.optlen;'), (2071, '\t\topt->opt.optlen = (optlen_new + 3) & ~3;'), (2072, '\t\thdr_delta -= opt->opt.optlen;'), (2077, '\t\thdr_delta = opt->opt.optlen;'), (2078, '\t\tcall_rcu(&opt->rcu, opt_kfree_rcu);'), (2095, '\tstruct ip_options_rcu *opt;'), (2099, '\topt = rcu_dereference_protected(sk_inet->inet_opt, 1);'), (2100, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2103, '\thdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);'), (2121, '\tstruct ip_options_rcu *opt;'), (2126, '\tif (opt == NULL || opt->opt.cipso == 0)'), (2196, '\tstruct ip_options_rcu *opt;'), (2197, '\tint res = -ENOMSG;'), (2199, '\trcu_read_lock();'), (2200, '\topt = rcu_dereference(inet_sk(sk)->inet_opt);'), (2201, '\tif (opt && opt->opt.cipso)'), (2202, '\t\tres = cipso_v4_getattr(opt->opt.__data +'), (2203, '\t\t\t\t\t\topt->opt.cipso -'), (2204, '\t\t\t\t\t\tsizeof(struct iphdr),'), (2205, '\t\t\t\t secattr);'), (2206, '\trcu_read_unlock();'), (2207, '\treturn res;')], 'deleted': [(1882, '\tstruct ip_options *opt = NULL;'), (1918, '\tmemcpy(opt->__data, buf, buf_len);'), (1919, '\topt->optlen = opt_len;'), (1920, '\topt->cipso = sizeof(struct iphdr);'), (1927, '\t\tif (sk_inet->opt)'), (1928, '\t\t\tsk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen;'), (1929, '\t\tsk_conn->icsk_ext_hdr_len += opt->optlen;'), (1932, '\topt = xchg(&sk_inet->opt, opt);'), (1933, '\tkfree(opt);'), (1963, '\tstruct ip_options *opt = NULL;'), (1991, '\tmemcpy(opt->__data, buf, buf_len);'), (1992, '\topt->optlen = opt_len;'), (1993, '\topt->cipso = sizeof(struct iphdr);'), (1999, '\tkfree(opt);'), (2019, 'static int cipso_v4_delopt(struct ip_options **opt_ptr)'), (2022, '\tstruct ip_options *opt = *opt_ptr;'), (2024, '\tif (opt->srr || opt->rr || opt->ts || opt->router_alert) {'), (2031, '\t\tcipso_off = opt->cipso - sizeof(struct iphdr);'), (2032, '\t\tcipso_ptr = &opt->__data[cipso_off];'), (2035, '\t\tif (opt->srr > opt->cipso)'), (2036, '\t\t\topt->srr -= cipso_len;'), (2037, '\t\tif (opt->rr > opt->cipso)'), (2038, '\t\t\topt->rr -= cipso_len;'), (2039, '\t\tif (opt->ts > opt->cipso)'), (2040, '\t\t\topt->ts -= cipso_len;'), (2041, '\t\tif (opt->router_alert > opt->cipso)'), (2042, '\t\t\topt->router_alert -= cipso_len;'), (2043, '\t\topt->cipso = 0;'), (2046, '\t\t\topt->optlen - cipso_off - cipso_len);'), (2055, '\t\twhile (iter < opt->optlen)'), (2056, '\t\t\tif (opt->__data[iter] != IPOPT_NOP) {'), (2057, '\t\t\t\titer += opt->__data[iter + 1];'), (2061, '\t\thdr_delta = opt->optlen;'), (2062, '\t\topt->optlen = (optlen_new + 3) & ~3;'), (2063, '\t\thdr_delta -= opt->optlen;'), (2068, '\t\thdr_delta = opt->optlen;'), (2069, '\t\tkfree(opt);'), (2086, '\tstruct ip_options *opt;'), (2090, '\topt = sk_inet->opt;'), (2091, '\tif (opt == NULL || opt->cipso == 0)'), (2094, '\thdr_delta = cipso_v4_delopt(&sk_inet->opt);'), (2112, '\tstruct ip_options *opt;'), (2117, '\tif (opt == NULL || opt->cipso == 0)'), (2187, '\tstruct ip_options *opt;'), (2189, '\topt = inet_sk(sk)->opt;'), (2190, '\tif (opt == NULL || opt->cipso == 0)'), (2191, '\t\treturn -ENOMSG;'), (2192, ''), (2193, '\treturn cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr),'), (2194, '\t\t\t\tsecattr);')]}
63
50
1,360
7,485
9
64
3
https://github.com/torvalds/linux
CVE-2012-3552
CWE-362
1,525
unistr.cpp
C++
UnicodeString::doAppend
// © 2016 and later: Unicode, Inc. and others. // License & terms of use: http://www.unicode.org/copyright.html /* ****************************************************************************** * Copyright (C) 1999-2016, International Business Machines Corporation and * others. All Rights Reserved. ****************************************************************************** * * File unistr.cpp * * Modification History: * * Date Name Description * 09/25/98 stephen Creation. * 04/20/99 stephen Overhauled per 4/16 code review. * 07/09/99 stephen Renamed {hi,lo},{byte,word} to icu_X for HP/UX * 11/18/99 aliu Added handleReplaceBetween() to make inherit from * Replaceable. * 06/25/01 grhoten Removed the dependency on iostream ****************************************************************************** */ #include "unicode/utypes.h" #include "unicode/appendable.h" #include "unicode/putil.h" #include "cstring.h" #include "cmemory.h" #include "unicode/ustring.h" #include "unicode/unistr.h" #include "unicode/utf.h" #include "unicode/utf16.h" #include "uelement.h" #include "ustr_imp.h" #include "umutex.h" #include "uassert.h" #if 0 #include <iostream> using namespace std; //DEBUGGING void print(const UnicodeString& s, const char *name) { UChar c; cout << name << ":|"; for(int i = 0; i < s.length(); ++i) { c = s[i]; if(c>= 0x007E || c < 0x0020) cout << "[0x" << hex << s[i] << "]"; else cout << (char) s[i]; } cout << '|' << endl; } void print(const UChar *s, int32_t len, const char *name) { UChar c; cout << name << ":|"; for(int i = 0; i < len; ++i) { c = s[i]; if(c>= 0x007E || c < 0x0020) cout << "[0x" << hex << s[i] << "]"; else cout << (char) s[i]; } cout << '|' << endl; } // END DEBUGGING #endif // Local function definitions for now // need to copy areas that may overlap static inline void us_arrayCopy(const UChar *src, int32_t srcStart, UChar *dst, int32_t dstStart, int32_t count) { if(count>0) { uprv_memmove(dst+dstStart, src+srcStart, (size_t)count*sizeof(*src)); } } // u_unescapeAt() callback to get a UChar from a UnicodeString U_CDECL_BEGIN static UChar U_CALLCONV UnicodeString_charAt(int32_t offset, void *context) { return ((icu::UnicodeString*) context)->charAt(offset); } U_CDECL_END U_NAMESPACE_BEGIN /* The Replaceable virtual destructor can't be defined in the header due to how AIX works with multiple definitions of virtual functions. */ Replaceable::~Replaceable() {} UOBJECT_DEFINE_RTTI_IMPLEMENTATION(UnicodeString) UnicodeString U_EXPORT2 operator+ (const UnicodeString &s1, const UnicodeString &s2) { return UnicodeString(s1.length()+s2.length()+1, (UChar32)0, 0). append(s1). append(s2); } //======================================== // Reference Counting functions, put at top of file so that optimizing compilers // have a chance to automatically inline. //======================================== void UnicodeString::addRef() { umtx_atomic_inc((u_atomic_int32_t *)fUnion.fFields.fArray - 1); } int32_t UnicodeString::removeRef() { return umtx_atomic_dec((u_atomic_int32_t *)fUnion.fFields.fArray - 1); } int32_t UnicodeString::refCount() const { return umtx_loadAcquire(*((u_atomic_int32_t *)fUnion.fFields.fArray - 1)); } void UnicodeString::releaseArray() { if((fUnion.fFields.fLengthAndFlags & kRefCounted) && removeRef() == 0) { uprv_free((int32_t *)fUnion.fFields.fArray - 1); } } //======================================== // Constructors //======================================== // The default constructor is inline in unistr.h. UnicodeString::UnicodeString(int32_t capacity, UChar32 c, int32_t count) { fUnion.fFields.fLengthAndFlags = 0; if(count <= 0 || (uint32_t)c > 0x10ffff) { // just allocate and do not do anything else allocate(capacity); } else if(c <= 0xffff) { int32_t length = count; if(capacity < length) { capacity = length; } if(allocate(capacity)) { UChar *array = getArrayStart(); UChar unit = (UChar)c; for(int32_t i = 0; i < length; ++i) { array[i] = unit; } setLength(length); } } else { // supplementary code point, write surrogate pairs if(count > (INT32_MAX / 2)) { // We would get more than 2G UChars. allocate(capacity); return; } int32_t length = count * 2; if(capacity < length) { capacity = length; } if(allocate(capacity)) { UChar *array = getArrayStart(); UChar lead = U16_LEAD(c); UChar trail = U16_TRAIL(c); for(int32_t i = 0; i < length; i += 2) { array[i] = lead; array[i + 1] = trail; } setLength(length); } } } UnicodeString::UnicodeString(UChar ch) { fUnion.fFields.fLengthAndFlags = kLength1 | kShortString; fUnion.fStackFields.fBuffer[0] = ch; } UnicodeString::UnicodeString(UChar32 ch) { fUnion.fFields.fLengthAndFlags = kShortString; int32_t i = 0; UBool isError = FALSE; U16_APPEND(fUnion.fStackFields.fBuffer, i, US_STACKBUF_SIZE, ch, isError); // We test isError so that the compiler does not complain that we don't. // If isError then i==0 which is what we want anyway. if(!isError) { setShortLength(i); } } UnicodeString::UnicodeString(const UChar *text) { fUnion.fFields.fLengthAndFlags = kShortString; doAppend(text, 0, -1); } UnicodeString::UnicodeString(const UChar *text, int32_t textLength) { fUnion.fFields.fLengthAndFlags = kShortString; doAppend(text, 0, textLength); } UnicodeString::UnicodeString(UBool isTerminated, ConstChar16Ptr textPtr, int32_t textLength) { fUnion.fFields.fLengthAndFlags = kReadonlyAlias; const UChar *text = textPtr; if(text == NULL) { // treat as an empty string, do not alias setToEmpty(); } else if(textLength < -1 || (textLength == -1 && !isTerminated) || (textLength >= 0 && isTerminated && text[textLength] != 0) ) { setToBogus(); } else { if(textLength == -1) { // text is terminated, or else it would have failed the above test textLength = u_strlen(text); } setArray(const_cast<UChar *>(text), textLength, isTerminated ? textLength + 1 : textLength); } } UnicodeString::UnicodeString(UChar *buff, int32_t buffLength, int32_t buffCapacity) { fUnion.fFields.fLengthAndFlags = kWritableAlias; if(buff == NULL) { // treat as an empty string, do not alias setToEmpty(); } else if(buffLength < -1 || buffCapacity < 0 || buffLength > buffCapacity) { setToBogus(); } else { if(buffLength == -1) { // fLength = u_strlen(buff); but do not look beyond buffCapacity const UChar *p = buff, *limit = buff + buffCapacity; while(p != limit && *p != 0) { ++p; } buffLength = (int32_t)(p - buff); } setArray(buff, buffLength, buffCapacity); } } UnicodeString::UnicodeString(const char *src, int32_t length, EInvariant) { fUnion.fFields.fLengthAndFlags = kShortString; if(src==NULL) { // treat as an empty string } else { if(length<0) { length=(int32_t)uprv_strlen(src); } if(cloneArrayIfNeeded(length, length, FALSE)) { u_charsToUChars(src, getArrayStart(), length); setLength(length); } else { setToBogus(); } } } #if U_CHARSET_IS_UTF8 UnicodeString::UnicodeString(const char *codepageData) { fUnion.fFields.fLengthAndFlags = kShortString; if(codepageData != 0) { setToUTF8(codepageData); } } UnicodeString::UnicodeString(const char *codepageData, int32_t dataLength) { fUnion.fFields.fLengthAndFlags = kShortString; // if there's nothing to convert, do nothing if(codepageData == 0 || dataLength == 0 || dataLength < -1) { return; } if(dataLength == -1) { dataLength = (int32_t)uprv_strlen(codepageData); } setToUTF8(StringPiece(codepageData, dataLength)); } // else see unistr_cnv.cpp #endif UnicodeString::UnicodeString(const UnicodeString& that) { fUnion.fFields.fLengthAndFlags = kShortString; copyFrom(that); } UnicodeString::UnicodeString(UnicodeString &&src) U_NOEXCEPT { copyFieldsFrom(src, TRUE); } UnicodeString::UnicodeString(const UnicodeString& that, int32_t srcStart) { fUnion.fFields.fLengthAndFlags = kShortString; setTo(that, srcStart); } UnicodeString::UnicodeString(const UnicodeString& that, int32_t srcStart, int32_t srcLength) { fUnion.fFields.fLengthAndFlags = kShortString; setTo(that, srcStart, srcLength); } // Replaceable base class clone() default implementation, does not clone Replaceable * Replaceable::clone() const { return NULL; } // UnicodeString overrides clone() with a real implementation UnicodeString * UnicodeString::clone() const { return new UnicodeString(*this); } //======================================== // array allocation //======================================== namespace { const int32_t kGrowSize = 128; // The number of bytes for one int32_t reference counter and capacity UChars // must fit into a 32-bit size_t (at least when on a 32-bit platform). // We also add one for the NUL terminator, to avoid reallocation in getTerminatedBuffer(), // and round up to a multiple of 16 bytes. // This means that capacity must be at most (0xfffffff0 - 4) / 2 - 1 = 0x7ffffff5. // (With more complicated checks we could go up to 0x7ffffffd without rounding up, // but that does not seem worth it.) const int32_t kMaxCapacity = 0x7ffffff5; int32_t getGrowCapacity(int32_t newLength) { int32_t growSize = (newLength >> 2) + kGrowSize; if(growSize <= (kMaxCapacity - newLength)) { return newLength + growSize; } else { return kMaxCapacity; } } } // namespace UBool UnicodeString::allocate(int32_t capacity) { if(capacity <= US_STACKBUF_SIZE) { fUnion.fFields.fLengthAndFlags = kShortString; return TRUE; } if(capacity <= kMaxCapacity) { ++capacity; // for the NUL // Switch to size_t which is unsigned so that we can allocate up to 4GB. // Reference counter + UChars. size_t numBytes = sizeof(int32_t) + (size_t)capacity * U_SIZEOF_UCHAR; // Round up to a multiple of 16. numBytes = (numBytes + 15) & ~15; int32_t *array = (int32_t *) uprv_malloc(numBytes); if(array != NULL) { // set initial refCount and point behind the refCount *array++ = 1; numBytes -= sizeof(int32_t); // have fArray point to the first UChar fUnion.fFields.fArray = (UChar *)array; fUnion.fFields.fCapacity = (int32_t)(numBytes / U_SIZEOF_UCHAR); fUnion.fFields.fLengthAndFlags = kLongString; return TRUE; } } fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; return FALSE; } //======================================== // Destructor //======================================== #ifdef UNISTR_COUNT_FINAL_STRING_LENGTHS static u_atomic_int32_t finalLengthCounts[0x400]; // UnicodeString::kMaxShortLength+1 static u_atomic_int32_t beyondCount(0); U_CAPI void unistr_printLengths() { int32_t i; for(i = 0; i <= 59; ++i) { printf("%2d, %9d\n", i, (int32_t)finalLengthCounts[i]); } int32_t beyond = beyondCount; for(; i < UPRV_LENGTHOF(finalLengthCounts); ++i) { beyond += finalLengthCounts[i]; } printf(">59, %9d\n", beyond); } #endif UnicodeString::~UnicodeString() { #ifdef UNISTR_COUNT_FINAL_STRING_LENGTHS // Count lengths of strings at the end of their lifetime. // Useful for discussion of a desirable stack buffer size. // Count the contents length, not the optional NUL terminator nor further capacity. // Ignore open-buffer strings and strings which alias external storage. if((fUnion.fFields.fLengthAndFlags&(kOpenGetBuffer|kReadonlyAlias|kWritableAlias)) == 0) { if(hasShortLength()) { umtx_atomic_inc(finalLengthCounts + getShortLength()); } else { umtx_atomic_inc(&beyondCount); } } #endif releaseArray(); } //======================================== // Factory methods //======================================== UnicodeString UnicodeString::fromUTF8(StringPiece utf8) { UnicodeString result; result.setToUTF8(utf8); return result; } UnicodeString UnicodeString::fromUTF32(const UChar32 *utf32, int32_t length) { UnicodeString result; int32_t capacity; // Most UTF-32 strings will be BMP-only and result in a same-length // UTF-16 string. We overestimate the capacity just slightly, // just in case there are a few supplementary characters. if(length <= US_STACKBUF_SIZE) { capacity = US_STACKBUF_SIZE; } else { capacity = length + (length >> 4) + 4; } do { UChar *utf16 = result.getBuffer(capacity); int32_t length16; UErrorCode errorCode = U_ZERO_ERROR; u_strFromUTF32WithSub(utf16, result.getCapacity(), &length16, utf32, length, 0xfffd, // Substitution character. NULL, // Don't care about number of substitutions. &errorCode); result.releaseBuffer(length16); if(errorCode == U_BUFFER_OVERFLOW_ERROR) { capacity = length16 + 1; // +1 for the terminating NUL. continue; } else if(U_FAILURE(errorCode)) { result.setToBogus(); } break; } while(TRUE); return result; } //======================================== // Assignment //======================================== UnicodeString & UnicodeString::operator=(const UnicodeString &src) { return copyFrom(src); } UnicodeString & UnicodeString::fastCopyFrom(const UnicodeString &src) { return copyFrom(src, TRUE); } UnicodeString & UnicodeString::copyFrom(const UnicodeString &src, UBool fastCopy) { // if assigning to ourselves, do nothing if(this == &src) { return *this; } // is the right side bogus? if(src.isBogus()) { setToBogus(); return *this; } // delete the current contents releaseArray(); if(src.isEmpty()) { // empty string - use the stack buffer setToEmpty(); return *this; } // fLength>0 and not an "open" src.getBuffer(minCapacity) fUnion.fFields.fLengthAndFlags = src.fUnion.fFields.fLengthAndFlags; switch(src.fUnion.fFields.fLengthAndFlags & kAllStorageFlags) { case kShortString: // short string using the stack buffer, do the same uprv_memcpy(fUnion.fStackFields.fBuffer, src.fUnion.fStackFields.fBuffer, getShortLength() * U_SIZEOF_UCHAR); break; case kLongString: // src uses a refCounted string buffer, use that buffer with refCount // src is const, use a cast - we don't actually change it ((UnicodeString &)src).addRef(); // copy all fields, share the reference-counted buffer fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } break; case kReadonlyAlias: if(fastCopy) { // src is a readonly alias, do the same // -> maintain the readonly alias as such fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } break; } // else if(!fastCopy) fall through to case kWritableAlias // -> allocate a new buffer and copy the contents U_FALLTHROUGH; case kWritableAlias: { // src is a writable alias; we make a copy of that instead int32_t srcLength = src.length(); if(allocate(srcLength)) { u_memcpy(getArrayStart(), src.getArrayStart(), srcLength); setLength(srcLength); break; } // if there is not enough memory, then fall through to setting to bogus U_FALLTHROUGH; } default: // if src is bogus, set ourselves to bogus // do not call setToBogus() here because fArray and flags are not consistent here fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; break; } return *this; } UnicodeString &UnicodeString::operator=(UnicodeString &&src) U_NOEXCEPT { // No explicit check for self move assignment, consistent with standard library. // Self move assignment causes no crash nor leak but might make the object bogus. releaseArray(); copyFieldsFrom(src, TRUE); return *this; } // Same as move assignment except without memory management. void UnicodeString::copyFieldsFrom(UnicodeString &src, UBool setSrcToBogus) U_NOEXCEPT { int16_t lengthAndFlags = fUnion.fFields.fLengthAndFlags = src.fUnion.fFields.fLengthAndFlags; if(lengthAndFlags & kUsingStackBuffer) { // Short string using the stack buffer, copy the contents. // Check for self assignment to prevent "overlap in memcpy" warnings, // although it should be harmless to copy a buffer to itself exactly. if(this != &src) { uprv_memcpy(fUnion.fStackFields.fBuffer, src.fUnion.fStackFields.fBuffer, getShortLength() * U_SIZEOF_UCHAR); } } else { // In all other cases, copy all fields. fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } if(setSrcToBogus) { // Set src to bogus without releasing any memory. src.fUnion.fFields.fLengthAndFlags = kIsBogus; src.fUnion.fFields.fArray = NULL; src.fUnion.fFields.fCapacity = 0; } } } void UnicodeString::swap(UnicodeString &other) U_NOEXCEPT { UnicodeString temp; // Empty short string: Known not to need releaseArray(). // Copy fields without resetting source values in between. temp.copyFieldsFrom(*this, FALSE); this->copyFieldsFrom(other, FALSE); other.copyFieldsFrom(temp, FALSE); // Set temp to an empty string so that other's memory is not released twice. temp.fUnion.fFields.fLengthAndFlags = kShortString; } //======================================== // Miscellaneous operations //======================================== UnicodeString UnicodeString::unescape() const { UnicodeString result(length(), (UChar32)0, (int32_t)0); // construct with capacity if (result.isBogus()) { return result; } const UChar *array = getBuffer(); int32_t len = length(); int32_t prev = 0; for (int32_t i=0;;) { if (i == len) { result.append(array, prev, len - prev); break; } if (array[i++] == 0x5C /*'\\'*/) { result.append(array, prev, (i - 1) - prev); UChar32 c = unescapeAt(i); // advances i if (c < 0) { result.remove(); // return empty string break; // invalid escape sequence } result.append(c); prev = i; } } return result; } UChar32 UnicodeString::unescapeAt(int32_t &offset) const { return u_unescapeAt(UnicodeString_charAt, &offset, length(), (void*)this); } //======================================== // Read-only implementation //======================================== UBool UnicodeString::doEquals(const UnicodeString &text, int32_t len) const { // Requires: this & text not bogus and have same lengths. // Byte-wise comparison works for equality regardless of endianness. return uprv_memcmp(getArrayStart(), text.getArrayStart(), len * U_SIZEOF_UCHAR) == 0; } int8_t UnicodeString::doCompare( int32_t start, int32_t length, const UChar *srcChars, int32_t srcStart, int32_t srcLength) const { // compare illegal string values if(isBogus()) { return -1; } // pin indices to legal values pinIndices(start, length); if(srcChars == NULL) { // treat const UChar *srcChars==NULL as an empty string return length == 0 ? 0 : 1; } // get the correct pointer const UChar *chars = getArrayStart(); chars += start; srcChars += srcStart; int32_t minLength; int8_t lengthResult; // get the srcLength if necessary if(srcLength < 0) { srcLength = u_strlen(srcChars + srcStart); } // are we comparing different lengths? if(length != srcLength) { if(length < srcLength) { minLength = length; lengthResult = -1; } else { minLength = srcLength; lengthResult = 1; } } else { minLength = length; lengthResult = 0; } /* * note that uprv_memcmp() returns an int but we return an int8_t; * we need to take care not to truncate the result - * one way to do this is to right-shift the value to * move the sign bit into the lower 8 bits and making sure that this * does not become 0 itself */ if(minLength > 0 && chars != srcChars) { int32_t result; # if U_IS_BIG_ENDIAN // big-endian: byte comparison works result = uprv_memcmp(chars, srcChars, minLength * sizeof(UChar)); if(result != 0) { return (int8_t)(result >> 15 | 1); } # else // little-endian: compare UChar units do { result = ((int32_t)*(chars++) - (int32_t)*(srcChars++)); if(result != 0) { return (int8_t)(result >> 15 | 1); } } while(--minLength > 0); # endif } return lengthResult; } /* String compare in code point order - doCompare() compares in code unit order. */ int8_t UnicodeString::doCompareCodePointOrder(int32_t start, int32_t length, const UChar *srcChars, int32_t srcStart, int32_t srcLength) const { // compare illegal string values // treat const UChar *srcChars==NULL as an empty string if(isBogus()) { return -1; } // pin indices to legal values pinIndices(start, length); if(srcChars == NULL) { srcStart = srcLength = 0; } int32_t diff = uprv_strCompare(getArrayStart() + start, length, (srcChars!=NULL)?(srcChars + srcStart):NULL, srcLength, FALSE, TRUE); /* translate the 32-bit result into an 8-bit one */ if(diff!=0) { return (int8_t)(diff >> 15 | 1); } else { return 0; } } int32_t UnicodeString::getLength() const { return length(); } UChar UnicodeString::getCharAt(int32_t offset) const { return charAt(offset); } UChar32 UnicodeString::getChar32At(int32_t offset) const { return char32At(offset); } UChar32 UnicodeString::char32At(int32_t offset) const { int32_t len = length(); if((uint32_t)offset < (uint32_t)len) { const UChar *array = getArrayStart(); UChar32 c; U16_GET(array, 0, offset, len, c); return c; } else { return kInvalidUChar; } } int32_t UnicodeString::getChar32Start(int32_t offset) const { if((uint32_t)offset < (uint32_t)length()) { const UChar *array = getArrayStart(); U16_SET_CP_START(array, 0, offset); return offset; } else { return 0; } } int32_t UnicodeString::getChar32Limit(int32_t offset) const { int32_t len = length(); if((uint32_t)offset < (uint32_t)len) { const UChar *array = getArrayStart(); U16_SET_CP_LIMIT(array, 0, offset, len); return offset; } else { return len; } } int32_t UnicodeString::countChar32(int32_t start, int32_t length) const { pinIndices(start, length); // if(isBogus()) then fArray==0 and start==0 - u_countChar32() checks for NULL return u_countChar32(getArrayStart()+start, length); } UBool UnicodeString::hasMoreChar32Than(int32_t start, int32_t length, int32_t number) const { pinIndices(start, length); // if(isBogus()) then fArray==0 and start==0 - u_strHasMoreChar32Than() checks for NULL return u_strHasMoreChar32Than(getArrayStart()+start, length, number); } int32_t UnicodeString::moveIndex32(int32_t index, int32_t delta) const { // pin index int32_t len = length(); if(index<0) { index=0; } else if(index>len) { index=len; } const UChar *array = getArrayStart(); if(delta>0) { U16_FWD_N(array, index, len, delta); } else { U16_BACK_N(array, 0, index, -delta); } return index; } void UnicodeString::doExtract(int32_t start, int32_t length, UChar *dst, int32_t dstStart) const { // pin indices to legal values pinIndices(start, length); // do not copy anything if we alias dst itself const UChar *array = getArrayStart(); if(array + start != dst + dstStart) { us_arrayCopy(array, start, dst, dstStart, length); } } int32_t UnicodeString::extract(Char16Ptr dest, int32_t destCapacity, UErrorCode &errorCode) const { int32_t len = length(); if(U_SUCCESS(errorCode)) { if(isBogus() || destCapacity<0 || (destCapacity>0 && dest==0)) { errorCode=U_ILLEGAL_ARGUMENT_ERROR; } else { const UChar *array = getArrayStart(); if(len>0 && len<=destCapacity && array!=dest) { u_memcpy(dest, array, len); } return u_terminateUChars(dest, destCapacity, len, &errorCode); } } return len; } int32_t UnicodeString::extract(int32_t start, int32_t length, char *target, int32_t targetCapacity, enum EInvariant) const { // if the arguments are illegal, then do nothing if(targetCapacity < 0 || (targetCapacity > 0 && target == NULL)) { return 0; } // pin the indices to legal values pinIndices(start, length); if(length <= targetCapacity) { u_UCharsToChars(getArrayStart() + start, target, length); } UErrorCode status = U_ZERO_ERROR; return u_terminateChars(target, targetCapacity, length, &status); } UnicodeString UnicodeString::tempSubString(int32_t start, int32_t len) const { pinIndices(start, len); const UChar *array = getBuffer(); // not getArrayStart() to check kIsBogus & kOpenGetBuffer if(array==NULL) { array=fUnion.fStackFields.fBuffer; // anything not NULL because that would make an empty string len=-2; // bogus result string } return UnicodeString(FALSE, array + start, len); } int32_t UnicodeString::toUTF8(int32_t start, int32_t len, char *target, int32_t capacity) const { pinIndices(start, len); int32_t length8; UErrorCode errorCode = U_ZERO_ERROR; u_strToUTF8WithSub(target, capacity, &length8, getBuffer() + start, len, 0xFFFD, // Standard substitution character. NULL, // Don't care about number of substitutions. &errorCode); return length8; } #if U_CHARSET_IS_UTF8 int32_t UnicodeString::extract(int32_t start, int32_t len, char *target, uint32_t dstSize) const { // if the arguments are illegal, then do nothing if(/*dstSize < 0 || */(dstSize > 0 && target == 0)) { return 0; } return toUTF8(start, len, target, dstSize <= 0x7fffffff ? (int32_t)dstSize : 0x7fffffff); } // else see unistr_cnv.cpp #endif void UnicodeString::extractBetween(int32_t start, int32_t limit, UnicodeString& target) const { pinIndex(start); pinIndex(limit); doExtract(start, limit - start, target); } // When converting from UTF-16 to UTF-8, the result will have at most 3 times // as many bytes as the source has UChars. // The "worst cases" are writing systems like Indic, Thai and CJK with // 3:1 bytes:UChars. void UnicodeString::toUTF8(ByteSink &sink) const { int32_t length16 = length(); if(length16 != 0) { char stackBuffer[1024]; int32_t capacity = (int32_t)sizeof(stackBuffer); UBool utf8IsOwned = FALSE; char *utf8 = sink.GetAppendBuffer(length16 < capacity ? length16 : capacity, 3*length16, stackBuffer, capacity, &capacity); int32_t length8 = 0; UErrorCode errorCode = U_ZERO_ERROR; u_strToUTF8WithSub(utf8, capacity, &length8, getBuffer(), length16, 0xFFFD, // Standard substitution character. NULL, // Don't care about number of substitutions. &errorCode); if(errorCode == U_BUFFER_OVERFLOW_ERROR) { utf8 = (char *)uprv_malloc(length8); if(utf8 != NULL) { utf8IsOwned = TRUE; errorCode = U_ZERO_ERROR; u_strToUTF8WithSub(utf8, length8, &length8, getBuffer(), length16, 0xFFFD, // Standard substitution character. NULL, // Don't care about number of substitutions. &errorCode); } else { errorCode = U_MEMORY_ALLOCATION_ERROR; } } if(U_SUCCESS(errorCode)) { sink.Append(utf8, length8); sink.Flush(); } if(utf8IsOwned) { uprv_free(utf8); } } } int32_t UnicodeString::toUTF32(UChar32 *utf32, int32_t capacity, UErrorCode &errorCode) const { int32_t length32=0; if(U_SUCCESS(errorCode)) { // getBuffer() and u_strToUTF32WithSub() check for illegal arguments. u_strToUTF32WithSub(utf32, capacity, &length32, getBuffer(), length(), 0xfffd, // Substitution character. NULL, // Don't care about number of substitutions. &errorCode); } return length32; } int32_t UnicodeString::indexOf(const UChar *srcChars, int32_t srcStart, int32_t srcLength, int32_t start, int32_t length) const { if(isBogus() || srcChars == 0 || srcStart < 0 || srcLength == 0) { return -1; } // UnicodeString does not find empty substrings if(srcLength < 0 && srcChars[srcStart] == 0) { return -1; } // get the indices within bounds pinIndices(start, length); // find the first occurrence of the substring const UChar *array = getArrayStart(); const UChar *match = u_strFindFirst(array + start, length, srcChars + srcStart, srcLength); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doIndexOf(UChar c, int32_t start, int32_t length) const { // pin indices pinIndices(start, length); // find the first occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memchr(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doIndexOf(UChar32 c, int32_t start, int32_t length) const { // pin indices pinIndices(start, length); // find the first occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memchr32(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::lastIndexOf(const UChar *srcChars, int32_t srcStart, int32_t srcLength, int32_t start, int32_t length) const { if(isBogus() || srcChars == 0 || srcStart < 0 || srcLength == 0) { return -1; } // UnicodeString does not find empty substrings if(srcLength < 0 && srcChars[srcStart] == 0) { return -1; } // get the indices within bounds pinIndices(start, length); // find the last occurrence of the substring const UChar *array = getArrayStart(); const UChar *match = u_strFindLast(array + start, length, srcChars + srcStart, srcLength); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doLastIndexOf(UChar c, int32_t start, int32_t length) const { if(isBogus()) { return -1; } // pin indices pinIndices(start, length); // find the last occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memrchr(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doLastIndexOf(UChar32 c, int32_t start, int32_t length) const { // pin indices pinIndices(start, length); // find the last occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memrchr32(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } //======================================== // Write implementation //======================================== UnicodeString& UnicodeString::findAndReplace(int32_t start, int32_t length, const UnicodeString& oldText, int32_t oldStart, int32_t oldLength, const UnicodeString& newText, int32_t newStart, int32_t newLength) { if(isBogus() || oldText.isBogus() || newText.isBogus()) { return *this; } pinIndices(start, length); oldText.pinIndices(oldStart, oldLength); newText.pinIndices(newStart, newLength); if(oldLength == 0) { return *this; } while(length > 0 && length >= oldLength) { int32_t pos = indexOf(oldText, oldStart, oldLength, start, length); if(pos < 0) { // no more oldText's here: done break; } else { // we found oldText, replace it by newText and go beyond it replace(pos, oldLength, newText, newStart, newLength); length -= pos + oldLength - start; start = pos + newLength; } } return *this; } void UnicodeString::setToBogus() { releaseArray(); fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; } // turn a bogus string into an empty one void UnicodeString::unBogus() { if(fUnion.fFields.fLengthAndFlags & kIsBogus) { setToEmpty(); } } const char16_t * UnicodeString::getTerminatedBuffer() { if(!isWritable()) { return nullptr; } UChar *array = getArrayStart(); int32_t len = length(); if(len < getCapacity()) { if(fUnion.fFields.fLengthAndFlags & kBufferIsReadonly) { // If len<capacity on a read-only alias, then array[len] is // either the original NUL (if constructed with (TRUE, s, length)) // or one of the original string contents characters (if later truncated), // therefore we can assume that array[len] is initialized memory. if(array[len] == 0) { return array; } } else if(((fUnion.fFields.fLengthAndFlags & kRefCounted) == 0 || refCount() == 1)) { // kRefCounted: Do not write the NUL if the buffer is shared. // That is mostly safe, except when the length of one copy was modified // without copy-on-write, e.g., via truncate(newLength) or remove(void). // Then the NUL would be written into the middle of another copy's string. // Otherwise, the buffer is fully writable and it is anyway safe to write the NUL. // Do not test if there is a NUL already because it might be uninitialized memory. // (That would be safe, but tools like valgrind & Purify would complain.) array[len] = 0; return array; } } if(len<INT32_MAX && cloneArrayIfNeeded(len+1)) { array = getArrayStart(); array[len] = 0; return array; } else { return nullptr; } } // setTo() analogous to the readonly-aliasing constructor with the same signature UnicodeString & UnicodeString::setTo(UBool isTerminated, ConstChar16Ptr textPtr, int32_t textLength) { if(fUnion.fFields.fLengthAndFlags & kOpenGetBuffer) { // do not modify a string that has an "open" getBuffer(minCapacity) return *this; } const UChar *text = textPtr; if(text == NULL) { // treat as an empty string, do not alias releaseArray(); setToEmpty(); return *this; } if( textLength < -1 || (textLength == -1 && !isTerminated) || (textLength >= 0 && isTerminated && text[textLength] != 0) ) { setToBogus(); return *this; } releaseArray(); if(textLength == -1) { // text is terminated, or else it would have failed the above test textLength = u_strlen(text); } fUnion.fFields.fLengthAndFlags = kReadonlyAlias; setArray((UChar *)text, textLength, isTerminated ? textLength + 1 : textLength); return *this; } // setTo() analogous to the writable-aliasing constructor with the same signature UnicodeString & UnicodeString::setTo(UChar *buffer, int32_t buffLength, int32_t buffCapacity) { if(fUnion.fFields.fLengthAndFlags & kOpenGetBuffer) { // do not modify a string that has an "open" getBuffer(minCapacity) return *this; } if(buffer == NULL) { // treat as an empty string, do not alias releaseArray(); setToEmpty(); return *this; } if(buffLength < -1 || buffCapacity < 0 || buffLength > buffCapacity) { setToBogus(); return *this; } else if(buffLength == -1) { // buffLength = u_strlen(buff); but do not look beyond buffCapacity const UChar *p = buffer, *limit = buffer + buffCapacity; while(p != limit && *p != 0) { ++p; } buffLength = (int32_t)(p - buffer); } releaseArray(); fUnion.fFields.fLengthAndFlags = kWritableAlias; setArray(buffer, buffLength, buffCapacity); return *this; } UnicodeString &UnicodeString::setToUTF8(StringPiece utf8) { unBogus(); int32_t length = utf8.length(); int32_t capacity; // The UTF-16 string will be at most as long as the UTF-8 string. if(length <= US_STACKBUF_SIZE) { capacity = US_STACKBUF_SIZE; } else { capacity = length + 1; // +1 for the terminating NUL. } UChar *utf16 = getBuffer(capacity); int32_t length16; UErrorCode errorCode = U_ZERO_ERROR; u_strFromUTF8WithSub(utf16, getCapacity(), &length16, utf8.data(), length, 0xfffd, // Substitution character. NULL, // Don't care about number of substitutions. &errorCode); releaseBuffer(length16); if(U_FAILURE(errorCode)) { setToBogus(); } return *this; } UnicodeString& UnicodeString::setCharAt(int32_t offset, UChar c) { int32_t len = length(); if(cloneArrayIfNeeded() && len > 0) { if(offset < 0) { offset = 0; } else if(offset >= len) { offset = len - 1; } getArrayStart()[offset] = c; } return *this; } UnicodeString& UnicodeString::replace(int32_t start, int32_t _length, UChar32 srcChar) { UChar buffer[U16_MAX_LENGTH]; int32_t count = 0; UBool isError = FALSE; U16_APPEND(buffer, count, U16_MAX_LENGTH, srcChar, isError); // We test isError so that the compiler does not complain that we don't. // If isError (srcChar is not a valid code point) then count==0 which means // we remove the source segment rather than replacing it with srcChar. return doReplace(start, _length, buffer, 0, isError ? 0 : count); } UnicodeString& UnicodeString::append(UChar32 srcChar) { UChar buffer[U16_MAX_LENGTH]; int32_t _length = 0; UBool isError = FALSE; U16_APPEND(buffer, _length, U16_MAX_LENGTH, srcChar, isError); // We test isError so that the compiler does not complain that we don't. // If isError then _length==0 which turns the doAppend() into a no-op anyway. return isError ? *this : doAppend(buffer, 0, _length); } UnicodeString& UnicodeString::doReplace( int32_t start, int32_t length, const UnicodeString& src, int32_t srcStart, int32_t srcLength) { // pin the indices to legal values src.pinIndices(srcStart, srcLength); // get the characters from src // and replace the range in ourselves with them return doReplace(start, length, src.getArrayStart(), srcStart, srcLength); } UnicodeString& UnicodeString::doReplace(int32_t start, int32_t length, const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable()) { return *this; } int32_t oldLength = this->length(); // optimize (read-only alias).remove(0, start) and .remove(start, end) if((fUnion.fFields.fLengthAndFlags&kBufferIsReadonly) && srcLength == 0) { if(start == 0) { // remove prefix by adjusting the array pointer pinIndex(length); fUnion.fFields.fArray += length; fUnion.fFields.fCapacity -= length; setLength(oldLength - length); return *this; } else { pinIndex(start); if(length >= (oldLength - start)) { // remove suffix by reducing the length (like truncate()) setLength(start); fUnion.fFields.fCapacity = start; // not NUL-terminated any more return *this; } } } if(start == oldLength) { return doAppend(srcChars, srcStart, srcLength); } if(srcChars == 0) { srcLength = 0; } else { // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if (srcLength < 0) { // get the srcLength if necessary srcLength = u_strlen(srcChars); } } // pin the indices to legal values pinIndices(start, length); // Calculate the size of the string after the replace. // Avoid int32_t overflow. int32_t newLength = oldLength - length; if(srcLength > (INT32_MAX - newLength)) { setToBogus(); return *this; } newLength += srcLength; // Check for insertion into ourself const UChar *oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doReplace(start, length, copy.getArrayStart(), 0, srcLength); } // cloneArrayIfNeeded(doCopyArray=FALSE) may change fArray but will not copy the current contents; // therefore we need to keep the current fArray UChar oldStackBuffer[US_STACKBUF_SIZE]; if((fUnion.fFields.fLengthAndFlags&kUsingStackBuffer) && (newLength > US_STACKBUF_SIZE)) { // copy the stack buffer contents because it will be overwritten with // fUnion.fFields values u_memcpy(oldStackBuffer, oldArray, oldLength); oldArray = oldStackBuffer; } // clone our array and allocate a bigger array if needed int32_t *bufferToDelete = 0; if(!cloneArrayIfNeeded(newLength, getGrowCapacity(newLength), FALSE, &bufferToDelete) ) { return *this; } // now do the replace UChar *newArray = getArrayStart(); if(newArray != oldArray) { // if fArray changed, then we need to copy everything except what will change us_arrayCopy(oldArray, 0, newArray, 0, start); us_arrayCopy(oldArray, start + length, newArray, start + srcLength, oldLength - (start + length)); } else if(length != srcLength) { // fArray did not change; copy only the portion that isn't changing, leaving a hole us_arrayCopy(oldArray, start + length, newArray, start + srcLength, oldLength - (start + length)); } // now fill in the hole with the new string us_arrayCopy(srcChars, 0, newArray, start, srcLength); setLength(newLength); // delayed delete in case srcChars == fArray when we started, and // to keep oldArray alive for the above operations if (bufferToDelete) { uprv_free(bufferToDelete); } return *this; } // Versions of doReplace() only for append() variants. // doReplace() and doAppend() optimize for different cases. UnicodeString& UnicodeString::doAppend(const UnicodeString& src, int32_t srcStart, int32_t srcLength) { if(srcLength == 0) { return *this; } // pin the indices to legal values src.pinIndices(srcStart, srcLength); return doAppend(src.getArrayStart(), srcStart, srcLength); } UnicodeString& UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength = oldLength + srcLength; // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; } /** * Replaceable API */ void UnicodeString::handleReplaceBetween(int32_t start, int32_t limit, const UnicodeString& text) { replaceBetween(start, limit, text); } /** * Replaceable API */ void UnicodeString::copy(int32_t start, int32_t limit, int32_t dest) { if (limit <= start) { return; // Nothing to do; avoid bogus malloc call } UChar* text = (UChar*) uprv_malloc( sizeof(UChar) * (limit - start) ); // Check to make sure text is not null. if (text != NULL) { extractBetween(start, limit, text, 0); insert(dest, text, 0, limit - start); uprv_free(text); } } /** * Replaceable API * * NOTE: This is for the Replaceable class. There is no rep.cpp, * so we implement this function here. */ UBool Replaceable::hasMetaData() const { return TRUE; } /** * Replaceable API */ UBool UnicodeString::hasMetaData() const { return FALSE; } UnicodeString& UnicodeString::doReverse(int32_t start, int32_t length) { if(length <= 1 || !cloneArrayIfNeeded()) { return *this; } // pin the indices to legal values pinIndices(start, length); if(length <= 1) { // pinIndices() might have shrunk the length return *this; } UChar *left = getArrayStart() + start; UChar *right = left + length - 1; // -1 for inclusive boundary (length>=2) UChar swap; UBool hasSupplementary = FALSE; // Before the loop we know left<right because length>=2. do { hasSupplementary |= (UBool)U16_IS_LEAD(swap = *left); hasSupplementary |= (UBool)U16_IS_LEAD(*left++ = *right); *right-- = swap; } while(left < right); // Make sure to test the middle code unit of an odd-length string. // Redundant if the length is even. hasSupplementary |= (UBool)U16_IS_LEAD(*left); /* if there are supplementary code points in the reversed range, then re-swap their surrogates */ if(hasSupplementary) { UChar swap2; left = getArrayStart() + start; right = left + length - 1; // -1 so that we can look at *(left+1) if left<right while(left < right) { if(U16_IS_TRAIL(swap = *left) && U16_IS_LEAD(swap2 = *(left + 1))) { *left++ = swap2; *left++ = swap; } else { ++left; } } } return *this; } UBool UnicodeString::padLeading(int32_t targetLength, UChar padChar) { int32_t oldLength = length(); if(oldLength >= targetLength || !cloneArrayIfNeeded(targetLength)) { return FALSE; } else { // move contents up by padding width UChar *array = getArrayStart(); int32_t start = targetLength - oldLength; us_arrayCopy(array, 0, array, start, oldLength); // fill in padding character while(--start >= 0) { array[start] = padChar; } setLength(targetLength); return TRUE; } } UBool UnicodeString::padTrailing(int32_t targetLength, UChar padChar) { int32_t oldLength = length(); if(oldLength >= targetLength || !cloneArrayIfNeeded(targetLength)) { return FALSE; } else { // fill in padding character UChar *array = getArrayStart(); int32_t length = targetLength; while(--length >= oldLength) { array[length] = padChar; } setLength(targetLength); return TRUE; } } //======================================== // Hashing //======================================== int32_t UnicodeString::doHashCode() const { /* Delegate hash computation to uhash. This makes UnicodeString * hashing consistent with UChar* hashing. */ int32_t hashCode = ustr_hashUCharsN(getArrayStart(), length()); if (hashCode == kInvalidHashCode) { hashCode = kEmptyHashCode; } return hashCode; } //======================================== // External Buffer //======================================== char16_t * UnicodeString::getBuffer(int32_t minCapacity) { if(minCapacity>=-1 && cloneArrayIfNeeded(minCapacity)) { fUnion.fFields.fLengthAndFlags|=kOpenGetBuffer; setZeroLength(); return getArrayStart(); } else { return nullptr; } } void UnicodeString::releaseBuffer(int32_t newLength) { if(fUnion.fFields.fLengthAndFlags&kOpenGetBuffer && newLength>=-1) { // set the new fLength int32_t capacity=getCapacity(); if(newLength==-1) { // the new length is the string length, capped by fCapacity const UChar *array=getArrayStart(), *p=array, *limit=array+capacity; while(p<limit && *p!=0) { ++p; } newLength=(int32_t)(p-array); } else if(newLength>capacity) { newLength=capacity; } setLength(newLength); fUnion.fFields.fLengthAndFlags&=~kOpenGetBuffer; } } //======================================== // Miscellaneous //======================================== UBool UnicodeString::cloneArrayIfNeeded(int32_t newCapacity, int32_t growCapacity, UBool doCopyArray, int32_t **pBufferToDelete, UBool forceClone) { // default parameters need to be static, therefore // the defaults are -1 to have convenience defaults if(newCapacity == -1) { newCapacity = getCapacity(); } // while a getBuffer(minCapacity) is "open", // prevent any modifications of the string by returning FALSE here // if the string is bogus, then only an assignment or similar can revive it if(!isWritable()) { return FALSE; } /* * We need to make a copy of the array if * the buffer is read-only, or * the buffer is refCounted (shared), and refCount>1, or * the buffer is too small. * Return FALSE if memory could not be allocated. */ if(forceClone || fUnion.fFields.fLengthAndFlags & kBufferIsReadonly || (fUnion.fFields.fLengthAndFlags & kRefCounted && refCount() > 1) || newCapacity > getCapacity() ) { // check growCapacity for default value and use of the stack buffer if(growCapacity < 0) { growCapacity = newCapacity; } else if(newCapacity <= US_STACKBUF_SIZE && growCapacity > US_STACKBUF_SIZE) { growCapacity = US_STACKBUF_SIZE; } // save old values UChar oldStackBuffer[US_STACKBUF_SIZE]; UChar *oldArray; int32_t oldLength = length(); int16_t flags = fUnion.fFields.fLengthAndFlags; if(flags&kUsingStackBuffer) { U_ASSERT(!(flags&kRefCounted)); /* kRefCounted and kUsingStackBuffer are mutally exclusive */ if(doCopyArray && growCapacity > US_STACKBUF_SIZE) { // copy the stack buffer contents because it will be overwritten with // fUnion.fFields values us_arrayCopy(fUnion.fStackFields.fBuffer, 0, oldStackBuffer, 0, oldLength); oldArray = oldStackBuffer; } else { oldArray = NULL; // no need to copy from the stack buffer to itself } } else { oldArray = fUnion.fFields.fArray; U_ASSERT(oldArray!=NULL); /* when stack buffer is not used, oldArray must have a non-NULL reference */ } // allocate a new array if(allocate(growCapacity) || (newCapacity < growCapacity && allocate(newCapacity)) ) { if(doCopyArray) { // copy the contents // do not copy more than what fits - it may be smaller than before int32_t minLength = oldLength; newCapacity = getCapacity(); if(newCapacity < minLength) { minLength = newCapacity; } if(oldArray != NULL) { us_arrayCopy(oldArray, 0, getArrayStart(), 0, minLength); } setLength(minLength); } else { setZeroLength(); } // release the old array if(flags & kRefCounted) { // the array is refCounted; decrement and release if 0 u_atomic_int32_t *pRefCount = ((u_atomic_int32_t *)oldArray - 1); if(umtx_atomic_dec(pRefCount) == 0) { if(pBufferToDelete == 0) { // Note: cast to (void *) is needed with MSVC, where u_atomic_int32_t // is defined as volatile. (Volatile has useful non-standard behavior // with this compiler.) uprv_free((void *)pRefCount); } else { // the caller requested to delete it himself *pBufferToDelete = (int32_t *)pRefCount; } } } } else { // not enough memory for growCapacity and not even for the smaller newCapacity // reset the old values for setToBogus() to release the array if(!(flags&kUsingStackBuffer)) { fUnion.fFields.fArray = oldArray; } fUnion.fFields.fLengthAndFlags = flags; setToBogus(); return FALSE; } } return TRUE; } // UnicodeStringAppendable ------------------------------------------------- *** UnicodeStringAppendable::~UnicodeStringAppendable() {} UBool UnicodeStringAppendable::appendCodeUnit(UChar c) { return str.doAppend(&c, 0, 1).isWritable(); } UBool UnicodeStringAppendable::appendCodePoint(UChar32 c) { UChar buffer[U16_MAX_LENGTH]; int32_t cLength = 0; UBool isError = FALSE; U16_APPEND(buffer, cLength, U16_MAX_LENGTH, c, isError); return !isError && str.doAppend(buffer, 0, cLength).isWritable(); } UBool UnicodeStringAppendable::appendString(const UChar *s, int32_t length) { return str.doAppend(s, 0, length).isWritable(); } UBool UnicodeStringAppendable::reserveAppendCapacity(int32_t appendCapacity) { return str.cloneArrayIfNeeded(str.length() + appendCapacity); } UChar * UnicodeStringAppendable::getAppendBuffer(int32_t minCapacity, int32_t desiredCapacityHint, UChar *scratch, int32_t scratchCapacity, int32_t *resultCapacity) { if(minCapacity < 1 || scratchCapacity < minCapacity) { *resultCapacity = 0; return NULL; } int32_t oldLength = str.length(); if(minCapacity <= (kMaxCapacity - oldLength) && desiredCapacityHint <= (kMaxCapacity - oldLength) && str.cloneArrayIfNeeded(oldLength + minCapacity, oldLength + desiredCapacityHint)) { *resultCapacity = str.getCapacity() - oldLength; return str.getArrayStart() + oldLength; } *resultCapacity = scratchCapacity; return scratch; } U_NAMESPACE_END U_NAMESPACE_USE U_CAPI int32_t U_EXPORT2 uhash_hashUnicodeString(const UElement key) { const UnicodeString *str = (const UnicodeString*) key.pointer; return (str == NULL) ? 0 : str->hashCode(); } // Moved here from uhash_us.cpp so that using a UVector of UnicodeString* // does not depend on hashtable code. U_CAPI UBool U_EXPORT2 uhash_compareUnicodeString(const UElement key1, const UElement key2) { const UnicodeString *str1 = (const UnicodeString*) key1.pointer; const UnicodeString *str2 = (const UnicodeString*) key2.pointer; if (str1 == str2) { return TRUE; } if (str1 == NULL || str2 == NULL) { return FALSE; } return *str1 == *str2; } #ifdef U_STATIC_IMPLEMENTATION /* This should never be called. It is defined here to make sure that the virtual vector deleting destructor is defined within unistr.cpp. The vector deleting destructor is already a part of UObject, but defining it here makes sure that it is included with this object file. This makes sure that static library dependencies are kept to a minimum. */ static void uprv_UnicodeStringDummy(void) { delete [] (new UnicodeString[2]); } #endif
// © 2016 and later: Unicode, Inc. and others. // License & terms of use: http://www.unicode.org/copyright.html /* ****************************************************************************** * Copyright (C) 1999-2016, International Business Machines Corporation and * others. All Rights Reserved. ****************************************************************************** * * File unistr.cpp * * Modification History: * * Date Name Description * 09/25/98 stephen Creation. * 04/20/99 stephen Overhauled per 4/16 code review. * 07/09/99 stephen Renamed {hi,lo},{byte,word} to icu_X for HP/UX * 11/18/99 aliu Added handleReplaceBetween() to make inherit from * Replaceable. * 06/25/01 grhoten Removed the dependency on iostream ****************************************************************************** */ #include "unicode/utypes.h" #include "unicode/appendable.h" #include "unicode/putil.h" #include "cstring.h" #include "cmemory.h" #include "unicode/ustring.h" #include "unicode/unistr.h" #include "unicode/utf.h" #include "unicode/utf16.h" #include "uelement.h" #include "ustr_imp.h" #include "umutex.h" #include "uassert.h" #if 0 #include <iostream> using namespace std; //DEBUGGING void print(const UnicodeString& s, const char *name) { UChar c; cout << name << ":|"; for(int i = 0; i < s.length(); ++i) { c = s[i]; if(c>= 0x007E || c < 0x0020) cout << "[0x" << hex << s[i] << "]"; else cout << (char) s[i]; } cout << '|' << endl; } void print(const UChar *s, int32_t len, const char *name) { UChar c; cout << name << ":|"; for(int i = 0; i < len; ++i) { c = s[i]; if(c>= 0x007E || c < 0x0020) cout << "[0x" << hex << s[i] << "]"; else cout << (char) s[i]; } cout << '|' << endl; } // END DEBUGGING #endif // Local function definitions for now // need to copy areas that may overlap static inline void us_arrayCopy(const UChar *src, int32_t srcStart, UChar *dst, int32_t dstStart, int32_t count) { if(count>0) { uprv_memmove(dst+dstStart, src+srcStart, (size_t)count*sizeof(*src)); } } // u_unescapeAt() callback to get a UChar from a UnicodeString U_CDECL_BEGIN static UChar U_CALLCONV UnicodeString_charAt(int32_t offset, void *context) { return ((icu::UnicodeString*) context)->charAt(offset); } U_CDECL_END U_NAMESPACE_BEGIN /* The Replaceable virtual destructor can't be defined in the header due to how AIX works with multiple definitions of virtual functions. */ Replaceable::~Replaceable() {} UOBJECT_DEFINE_RTTI_IMPLEMENTATION(UnicodeString) UnicodeString U_EXPORT2 operator+ (const UnicodeString &s1, const UnicodeString &s2) { return UnicodeString(s1.length()+s2.length()+1, (UChar32)0, 0). append(s1). append(s2); } //======================================== // Reference Counting functions, put at top of file so that optimizing compilers // have a chance to automatically inline. //======================================== void UnicodeString::addRef() { umtx_atomic_inc((u_atomic_int32_t *)fUnion.fFields.fArray - 1); } int32_t UnicodeString::removeRef() { return umtx_atomic_dec((u_atomic_int32_t *)fUnion.fFields.fArray - 1); } int32_t UnicodeString::refCount() const { return umtx_loadAcquire(*((u_atomic_int32_t *)fUnion.fFields.fArray - 1)); } void UnicodeString::releaseArray() { if((fUnion.fFields.fLengthAndFlags & kRefCounted) && removeRef() == 0) { uprv_free((int32_t *)fUnion.fFields.fArray - 1); } } //======================================== // Constructors //======================================== // The default constructor is inline in unistr.h. UnicodeString::UnicodeString(int32_t capacity, UChar32 c, int32_t count) { fUnion.fFields.fLengthAndFlags = 0; if(count <= 0 || (uint32_t)c > 0x10ffff) { // just allocate and do not do anything else allocate(capacity); } else if(c <= 0xffff) { int32_t length = count; if(capacity < length) { capacity = length; } if(allocate(capacity)) { UChar *array = getArrayStart(); UChar unit = (UChar)c; for(int32_t i = 0; i < length; ++i) { array[i] = unit; } setLength(length); } } else { // supplementary code point, write surrogate pairs if(count > (INT32_MAX / 2)) { // We would get more than 2G UChars. allocate(capacity); return; } int32_t length = count * 2; if(capacity < length) { capacity = length; } if(allocate(capacity)) { UChar *array = getArrayStart(); UChar lead = U16_LEAD(c); UChar trail = U16_TRAIL(c); for(int32_t i = 0; i < length; i += 2) { array[i] = lead; array[i + 1] = trail; } setLength(length); } } } UnicodeString::UnicodeString(UChar ch) { fUnion.fFields.fLengthAndFlags = kLength1 | kShortString; fUnion.fStackFields.fBuffer[0] = ch; } UnicodeString::UnicodeString(UChar32 ch) { fUnion.fFields.fLengthAndFlags = kShortString; int32_t i = 0; UBool isError = FALSE; U16_APPEND(fUnion.fStackFields.fBuffer, i, US_STACKBUF_SIZE, ch, isError); // We test isError so that the compiler does not complain that we don't. // If isError then i==0 which is what we want anyway. if(!isError) { setShortLength(i); } } UnicodeString::UnicodeString(const UChar *text) { fUnion.fFields.fLengthAndFlags = kShortString; doAppend(text, 0, -1); } UnicodeString::UnicodeString(const UChar *text, int32_t textLength) { fUnion.fFields.fLengthAndFlags = kShortString; doAppend(text, 0, textLength); } UnicodeString::UnicodeString(UBool isTerminated, ConstChar16Ptr textPtr, int32_t textLength) { fUnion.fFields.fLengthAndFlags = kReadonlyAlias; const UChar *text = textPtr; if(text == NULL) { // treat as an empty string, do not alias setToEmpty(); } else if(textLength < -1 || (textLength == -1 && !isTerminated) || (textLength >= 0 && isTerminated && text[textLength] != 0) ) { setToBogus(); } else { if(textLength == -1) { // text is terminated, or else it would have failed the above test textLength = u_strlen(text); } setArray(const_cast<UChar *>(text), textLength, isTerminated ? textLength + 1 : textLength); } } UnicodeString::UnicodeString(UChar *buff, int32_t buffLength, int32_t buffCapacity) { fUnion.fFields.fLengthAndFlags = kWritableAlias; if(buff == NULL) { // treat as an empty string, do not alias setToEmpty(); } else if(buffLength < -1 || buffCapacity < 0 || buffLength > buffCapacity) { setToBogus(); } else { if(buffLength == -1) { // fLength = u_strlen(buff); but do not look beyond buffCapacity const UChar *p = buff, *limit = buff + buffCapacity; while(p != limit && *p != 0) { ++p; } buffLength = (int32_t)(p - buff); } setArray(buff, buffLength, buffCapacity); } } UnicodeString::UnicodeString(const char *src, int32_t length, EInvariant) { fUnion.fFields.fLengthAndFlags = kShortString; if(src==NULL) { // treat as an empty string } else { if(length<0) { length=(int32_t)uprv_strlen(src); } if(cloneArrayIfNeeded(length, length, FALSE)) { u_charsToUChars(src, getArrayStart(), length); setLength(length); } else { setToBogus(); } } } #if U_CHARSET_IS_UTF8 UnicodeString::UnicodeString(const char *codepageData) { fUnion.fFields.fLengthAndFlags = kShortString; if(codepageData != 0) { setToUTF8(codepageData); } } UnicodeString::UnicodeString(const char *codepageData, int32_t dataLength) { fUnion.fFields.fLengthAndFlags = kShortString; // if there's nothing to convert, do nothing if(codepageData == 0 || dataLength == 0 || dataLength < -1) { return; } if(dataLength == -1) { dataLength = (int32_t)uprv_strlen(codepageData); } setToUTF8(StringPiece(codepageData, dataLength)); } // else see unistr_cnv.cpp #endif UnicodeString::UnicodeString(const UnicodeString& that) { fUnion.fFields.fLengthAndFlags = kShortString; copyFrom(that); } UnicodeString::UnicodeString(UnicodeString &&src) U_NOEXCEPT { copyFieldsFrom(src, TRUE); } UnicodeString::UnicodeString(const UnicodeString& that, int32_t srcStart) { fUnion.fFields.fLengthAndFlags = kShortString; setTo(that, srcStart); } UnicodeString::UnicodeString(const UnicodeString& that, int32_t srcStart, int32_t srcLength) { fUnion.fFields.fLengthAndFlags = kShortString; setTo(that, srcStart, srcLength); } // Replaceable base class clone() default implementation, does not clone Replaceable * Replaceable::clone() const { return NULL; } // UnicodeString overrides clone() with a real implementation UnicodeString * UnicodeString::clone() const { return new UnicodeString(*this); } //======================================== // array allocation //======================================== namespace { const int32_t kGrowSize = 128; // The number of bytes for one int32_t reference counter and capacity UChars // must fit into a 32-bit size_t (at least when on a 32-bit platform). // We also add one for the NUL terminator, to avoid reallocation in getTerminatedBuffer(), // and round up to a multiple of 16 bytes. // This means that capacity must be at most (0xfffffff0 - 4) / 2 - 1 = 0x7ffffff5. // (With more complicated checks we could go up to 0x7ffffffd without rounding up, // but that does not seem worth it.) const int32_t kMaxCapacity = 0x7ffffff5; int32_t getGrowCapacity(int32_t newLength) { int32_t growSize = (newLength >> 2) + kGrowSize; if(growSize <= (kMaxCapacity - newLength)) { return newLength + growSize; } else { return kMaxCapacity; } } } // namespace UBool UnicodeString::allocate(int32_t capacity) { if(capacity <= US_STACKBUF_SIZE) { fUnion.fFields.fLengthAndFlags = kShortString; return TRUE; } if(capacity <= kMaxCapacity) { ++capacity; // for the NUL // Switch to size_t which is unsigned so that we can allocate up to 4GB. // Reference counter + UChars. size_t numBytes = sizeof(int32_t) + (size_t)capacity * U_SIZEOF_UCHAR; // Round up to a multiple of 16. numBytes = (numBytes + 15) & ~15; int32_t *array = (int32_t *) uprv_malloc(numBytes); if(array != NULL) { // set initial refCount and point behind the refCount *array++ = 1; numBytes -= sizeof(int32_t); // have fArray point to the first UChar fUnion.fFields.fArray = (UChar *)array; fUnion.fFields.fCapacity = (int32_t)(numBytes / U_SIZEOF_UCHAR); fUnion.fFields.fLengthAndFlags = kLongString; return TRUE; } } fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; return FALSE; } //======================================== // Destructor //======================================== #ifdef UNISTR_COUNT_FINAL_STRING_LENGTHS static u_atomic_int32_t finalLengthCounts[0x400]; // UnicodeString::kMaxShortLength+1 static u_atomic_int32_t beyondCount(0); U_CAPI void unistr_printLengths() { int32_t i; for(i = 0; i <= 59; ++i) { printf("%2d, %9d\n", i, (int32_t)finalLengthCounts[i]); } int32_t beyond = beyondCount; for(; i < UPRV_LENGTHOF(finalLengthCounts); ++i) { beyond += finalLengthCounts[i]; } printf(">59, %9d\n", beyond); } #endif UnicodeString::~UnicodeString() { #ifdef UNISTR_COUNT_FINAL_STRING_LENGTHS // Count lengths of strings at the end of their lifetime. // Useful for discussion of a desirable stack buffer size. // Count the contents length, not the optional NUL terminator nor further capacity. // Ignore open-buffer strings and strings which alias external storage. if((fUnion.fFields.fLengthAndFlags&(kOpenGetBuffer|kReadonlyAlias|kWritableAlias)) == 0) { if(hasShortLength()) { umtx_atomic_inc(finalLengthCounts + getShortLength()); } else { umtx_atomic_inc(&beyondCount); } } #endif releaseArray(); } //======================================== // Factory methods //======================================== UnicodeString UnicodeString::fromUTF8(StringPiece utf8) { UnicodeString result; result.setToUTF8(utf8); return result; } UnicodeString UnicodeString::fromUTF32(const UChar32 *utf32, int32_t length) { UnicodeString result; int32_t capacity; // Most UTF-32 strings will be BMP-only and result in a same-length // UTF-16 string. We overestimate the capacity just slightly, // just in case there are a few supplementary characters. if(length <= US_STACKBUF_SIZE) { capacity = US_STACKBUF_SIZE; } else { capacity = length + (length >> 4) + 4; } do { UChar *utf16 = result.getBuffer(capacity); int32_t length16; UErrorCode errorCode = U_ZERO_ERROR; u_strFromUTF32WithSub(utf16, result.getCapacity(), &length16, utf32, length, 0xfffd, // Substitution character. NULL, // Don't care about number of substitutions. &errorCode); result.releaseBuffer(length16); if(errorCode == U_BUFFER_OVERFLOW_ERROR) { capacity = length16 + 1; // +1 for the terminating NUL. continue; } else if(U_FAILURE(errorCode)) { result.setToBogus(); } break; } while(TRUE); return result; } //======================================== // Assignment //======================================== UnicodeString & UnicodeString::operator=(const UnicodeString &src) { return copyFrom(src); } UnicodeString & UnicodeString::fastCopyFrom(const UnicodeString &src) { return copyFrom(src, TRUE); } UnicodeString & UnicodeString::copyFrom(const UnicodeString &src, UBool fastCopy) { // if assigning to ourselves, do nothing if(this == &src) { return *this; } // is the right side bogus? if(src.isBogus()) { setToBogus(); return *this; } // delete the current contents releaseArray(); if(src.isEmpty()) { // empty string - use the stack buffer setToEmpty(); return *this; } // fLength>0 and not an "open" src.getBuffer(minCapacity) fUnion.fFields.fLengthAndFlags = src.fUnion.fFields.fLengthAndFlags; switch(src.fUnion.fFields.fLengthAndFlags & kAllStorageFlags) { case kShortString: // short string using the stack buffer, do the same uprv_memcpy(fUnion.fStackFields.fBuffer, src.fUnion.fStackFields.fBuffer, getShortLength() * U_SIZEOF_UCHAR); break; case kLongString: // src uses a refCounted string buffer, use that buffer with refCount // src is const, use a cast - we don't actually change it ((UnicodeString &)src).addRef(); // copy all fields, share the reference-counted buffer fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } break; case kReadonlyAlias: if(fastCopy) { // src is a readonly alias, do the same // -> maintain the readonly alias as such fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } break; } // else if(!fastCopy) fall through to case kWritableAlias // -> allocate a new buffer and copy the contents U_FALLTHROUGH; case kWritableAlias: { // src is a writable alias; we make a copy of that instead int32_t srcLength = src.length(); if(allocate(srcLength)) { u_memcpy(getArrayStart(), src.getArrayStart(), srcLength); setLength(srcLength); break; } // if there is not enough memory, then fall through to setting to bogus U_FALLTHROUGH; } default: // if src is bogus, set ourselves to bogus // do not call setToBogus() here because fArray and flags are not consistent here fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; break; } return *this; } UnicodeString &UnicodeString::operator=(UnicodeString &&src) U_NOEXCEPT { // No explicit check for self move assignment, consistent with standard library. // Self move assignment causes no crash nor leak but might make the object bogus. releaseArray(); copyFieldsFrom(src, TRUE); return *this; } // Same as move assignment except without memory management. void UnicodeString::copyFieldsFrom(UnicodeString &src, UBool setSrcToBogus) U_NOEXCEPT { int16_t lengthAndFlags = fUnion.fFields.fLengthAndFlags = src.fUnion.fFields.fLengthAndFlags; if(lengthAndFlags & kUsingStackBuffer) { // Short string using the stack buffer, copy the contents. // Check for self assignment to prevent "overlap in memcpy" warnings, // although it should be harmless to copy a buffer to itself exactly. if(this != &src) { uprv_memcpy(fUnion.fStackFields.fBuffer, src.fUnion.fStackFields.fBuffer, getShortLength() * U_SIZEOF_UCHAR); } } else { // In all other cases, copy all fields. fUnion.fFields.fArray = src.fUnion.fFields.fArray; fUnion.fFields.fCapacity = src.fUnion.fFields.fCapacity; if(!hasShortLength()) { fUnion.fFields.fLength = src.fUnion.fFields.fLength; } if(setSrcToBogus) { // Set src to bogus without releasing any memory. src.fUnion.fFields.fLengthAndFlags = kIsBogus; src.fUnion.fFields.fArray = NULL; src.fUnion.fFields.fCapacity = 0; } } } void UnicodeString::swap(UnicodeString &other) U_NOEXCEPT { UnicodeString temp; // Empty short string: Known not to need releaseArray(). // Copy fields without resetting source values in between. temp.copyFieldsFrom(*this, FALSE); this->copyFieldsFrom(other, FALSE); other.copyFieldsFrom(temp, FALSE); // Set temp to an empty string so that other's memory is not released twice. temp.fUnion.fFields.fLengthAndFlags = kShortString; } //======================================== // Miscellaneous operations //======================================== UnicodeString UnicodeString::unescape() const { UnicodeString result(length(), (UChar32)0, (int32_t)0); // construct with capacity if (result.isBogus()) { return result; } const UChar *array = getBuffer(); int32_t len = length(); int32_t prev = 0; for (int32_t i=0;;) { if (i == len) { result.append(array, prev, len - prev); break; } if (array[i++] == 0x5C /*'\\'*/) { result.append(array, prev, (i - 1) - prev); UChar32 c = unescapeAt(i); // advances i if (c < 0) { result.remove(); // return empty string break; // invalid escape sequence } result.append(c); prev = i; } } return result; } UChar32 UnicodeString::unescapeAt(int32_t &offset) const { return u_unescapeAt(UnicodeString_charAt, &offset, length(), (void*)this); } //======================================== // Read-only implementation //======================================== UBool UnicodeString::doEquals(const UnicodeString &text, int32_t len) const { // Requires: this & text not bogus and have same lengths. // Byte-wise comparison works for equality regardless of endianness. return uprv_memcmp(getArrayStart(), text.getArrayStart(), len * U_SIZEOF_UCHAR) == 0; } int8_t UnicodeString::doCompare( int32_t start, int32_t length, const UChar *srcChars, int32_t srcStart, int32_t srcLength) const { // compare illegal string values if(isBogus()) { return -1; } // pin indices to legal values pinIndices(start, length); if(srcChars == NULL) { // treat const UChar *srcChars==NULL as an empty string return length == 0 ? 0 : 1; } // get the correct pointer const UChar *chars = getArrayStart(); chars += start; srcChars += srcStart; int32_t minLength; int8_t lengthResult; // get the srcLength if necessary if(srcLength < 0) { srcLength = u_strlen(srcChars + srcStart); } // are we comparing different lengths? if(length != srcLength) { if(length < srcLength) { minLength = length; lengthResult = -1; } else { minLength = srcLength; lengthResult = 1; } } else { minLength = length; lengthResult = 0; } /* * note that uprv_memcmp() returns an int but we return an int8_t; * we need to take care not to truncate the result - * one way to do this is to right-shift the value to * move the sign bit into the lower 8 bits and making sure that this * does not become 0 itself */ if(minLength > 0 && chars != srcChars) { int32_t result; # if U_IS_BIG_ENDIAN // big-endian: byte comparison works result = uprv_memcmp(chars, srcChars, minLength * sizeof(UChar)); if(result != 0) { return (int8_t)(result >> 15 | 1); } # else // little-endian: compare UChar units do { result = ((int32_t)*(chars++) - (int32_t)*(srcChars++)); if(result != 0) { return (int8_t)(result >> 15 | 1); } } while(--minLength > 0); # endif } return lengthResult; } /* String compare in code point order - doCompare() compares in code unit order. */ int8_t UnicodeString::doCompareCodePointOrder(int32_t start, int32_t length, const UChar *srcChars, int32_t srcStart, int32_t srcLength) const { // compare illegal string values // treat const UChar *srcChars==NULL as an empty string if(isBogus()) { return -1; } // pin indices to legal values pinIndices(start, length); if(srcChars == NULL) { srcStart = srcLength = 0; } int32_t diff = uprv_strCompare(getArrayStart() + start, length, (srcChars!=NULL)?(srcChars + srcStart):NULL, srcLength, FALSE, TRUE); /* translate the 32-bit result into an 8-bit one */ if(diff!=0) { return (int8_t)(diff >> 15 | 1); } else { return 0; } } int32_t UnicodeString::getLength() const { return length(); } UChar UnicodeString::getCharAt(int32_t offset) const { return charAt(offset); } UChar32 UnicodeString::getChar32At(int32_t offset) const { return char32At(offset); } UChar32 UnicodeString::char32At(int32_t offset) const { int32_t len = length(); if((uint32_t)offset < (uint32_t)len) { const UChar *array = getArrayStart(); UChar32 c; U16_GET(array, 0, offset, len, c); return c; } else { return kInvalidUChar; } } int32_t UnicodeString::getChar32Start(int32_t offset) const { if((uint32_t)offset < (uint32_t)length()) { const UChar *array = getArrayStart(); U16_SET_CP_START(array, 0, offset); return offset; } else { return 0; } } int32_t UnicodeString::getChar32Limit(int32_t offset) const { int32_t len = length(); if((uint32_t)offset < (uint32_t)len) { const UChar *array = getArrayStart(); U16_SET_CP_LIMIT(array, 0, offset, len); return offset; } else { return len; } } int32_t UnicodeString::countChar32(int32_t start, int32_t length) const { pinIndices(start, length); // if(isBogus()) then fArray==0 and start==0 - u_countChar32() checks for NULL return u_countChar32(getArrayStart()+start, length); } UBool UnicodeString::hasMoreChar32Than(int32_t start, int32_t length, int32_t number) const { pinIndices(start, length); // if(isBogus()) then fArray==0 and start==0 - u_strHasMoreChar32Than() checks for NULL return u_strHasMoreChar32Than(getArrayStart()+start, length, number); } int32_t UnicodeString::moveIndex32(int32_t index, int32_t delta) const { // pin index int32_t len = length(); if(index<0) { index=0; } else if(index>len) { index=len; } const UChar *array = getArrayStart(); if(delta>0) { U16_FWD_N(array, index, len, delta); } else { U16_BACK_N(array, 0, index, -delta); } return index; } void UnicodeString::doExtract(int32_t start, int32_t length, UChar *dst, int32_t dstStart) const { // pin indices to legal values pinIndices(start, length); // do not copy anything if we alias dst itself const UChar *array = getArrayStart(); if(array + start != dst + dstStart) { us_arrayCopy(array, start, dst, dstStart, length); } } int32_t UnicodeString::extract(Char16Ptr dest, int32_t destCapacity, UErrorCode &errorCode) const { int32_t len = length(); if(U_SUCCESS(errorCode)) { if(isBogus() || destCapacity<0 || (destCapacity>0 && dest==0)) { errorCode=U_ILLEGAL_ARGUMENT_ERROR; } else { const UChar *array = getArrayStart(); if(len>0 && len<=destCapacity && array!=dest) { u_memcpy(dest, array, len); } return u_terminateUChars(dest, destCapacity, len, &errorCode); } } return len; } int32_t UnicodeString::extract(int32_t start, int32_t length, char *target, int32_t targetCapacity, enum EInvariant) const { // if the arguments are illegal, then do nothing if(targetCapacity < 0 || (targetCapacity > 0 && target == NULL)) { return 0; } // pin the indices to legal values pinIndices(start, length); if(length <= targetCapacity) { u_UCharsToChars(getArrayStart() + start, target, length); } UErrorCode status = U_ZERO_ERROR; return u_terminateChars(target, targetCapacity, length, &status); } UnicodeString UnicodeString::tempSubString(int32_t start, int32_t len) const { pinIndices(start, len); const UChar *array = getBuffer(); // not getArrayStart() to check kIsBogus & kOpenGetBuffer if(array==NULL) { array=fUnion.fStackFields.fBuffer; // anything not NULL because that would make an empty string len=-2; // bogus result string } return UnicodeString(FALSE, array + start, len); } int32_t UnicodeString::toUTF8(int32_t start, int32_t len, char *target, int32_t capacity) const { pinIndices(start, len); int32_t length8; UErrorCode errorCode = U_ZERO_ERROR; u_strToUTF8WithSub(target, capacity, &length8, getBuffer() + start, len, 0xFFFD, // Standard substitution character. NULL, // Don't care about number of substitutions. &errorCode); return length8; } #if U_CHARSET_IS_UTF8 int32_t UnicodeString::extract(int32_t start, int32_t len, char *target, uint32_t dstSize) const { // if the arguments are illegal, then do nothing if(/*dstSize < 0 || */(dstSize > 0 && target == 0)) { return 0; } return toUTF8(start, len, target, dstSize <= 0x7fffffff ? (int32_t)dstSize : 0x7fffffff); } // else see unistr_cnv.cpp #endif void UnicodeString::extractBetween(int32_t start, int32_t limit, UnicodeString& target) const { pinIndex(start); pinIndex(limit); doExtract(start, limit - start, target); } // When converting from UTF-16 to UTF-8, the result will have at most 3 times // as many bytes as the source has UChars. // The "worst cases" are writing systems like Indic, Thai and CJK with // 3:1 bytes:UChars. void UnicodeString::toUTF8(ByteSink &sink) const { int32_t length16 = length(); if(length16 != 0) { char stackBuffer[1024]; int32_t capacity = (int32_t)sizeof(stackBuffer); UBool utf8IsOwned = FALSE; char *utf8 = sink.GetAppendBuffer(length16 < capacity ? length16 : capacity, 3*length16, stackBuffer, capacity, &capacity); int32_t length8 = 0; UErrorCode errorCode = U_ZERO_ERROR; u_strToUTF8WithSub(utf8, capacity, &length8, getBuffer(), length16, 0xFFFD, // Standard substitution character. NULL, // Don't care about number of substitutions. &errorCode); if(errorCode == U_BUFFER_OVERFLOW_ERROR) { utf8 = (char *)uprv_malloc(length8); if(utf8 != NULL) { utf8IsOwned = TRUE; errorCode = U_ZERO_ERROR; u_strToUTF8WithSub(utf8, length8, &length8, getBuffer(), length16, 0xFFFD, // Standard substitution character. NULL, // Don't care about number of substitutions. &errorCode); } else { errorCode = U_MEMORY_ALLOCATION_ERROR; } } if(U_SUCCESS(errorCode)) { sink.Append(utf8, length8); sink.Flush(); } if(utf8IsOwned) { uprv_free(utf8); } } } int32_t UnicodeString::toUTF32(UChar32 *utf32, int32_t capacity, UErrorCode &errorCode) const { int32_t length32=0; if(U_SUCCESS(errorCode)) { // getBuffer() and u_strToUTF32WithSub() check for illegal arguments. u_strToUTF32WithSub(utf32, capacity, &length32, getBuffer(), length(), 0xfffd, // Substitution character. NULL, // Don't care about number of substitutions. &errorCode); } return length32; } int32_t UnicodeString::indexOf(const UChar *srcChars, int32_t srcStart, int32_t srcLength, int32_t start, int32_t length) const { if(isBogus() || srcChars == 0 || srcStart < 0 || srcLength == 0) { return -1; } // UnicodeString does not find empty substrings if(srcLength < 0 && srcChars[srcStart] == 0) { return -1; } // get the indices within bounds pinIndices(start, length); // find the first occurrence of the substring const UChar *array = getArrayStart(); const UChar *match = u_strFindFirst(array + start, length, srcChars + srcStart, srcLength); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doIndexOf(UChar c, int32_t start, int32_t length) const { // pin indices pinIndices(start, length); // find the first occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memchr(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doIndexOf(UChar32 c, int32_t start, int32_t length) const { // pin indices pinIndices(start, length); // find the first occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memchr32(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::lastIndexOf(const UChar *srcChars, int32_t srcStart, int32_t srcLength, int32_t start, int32_t length) const { if(isBogus() || srcChars == 0 || srcStart < 0 || srcLength == 0) { return -1; } // UnicodeString does not find empty substrings if(srcLength < 0 && srcChars[srcStart] == 0) { return -1; } // get the indices within bounds pinIndices(start, length); // find the last occurrence of the substring const UChar *array = getArrayStart(); const UChar *match = u_strFindLast(array + start, length, srcChars + srcStart, srcLength); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doLastIndexOf(UChar c, int32_t start, int32_t length) const { if(isBogus()) { return -1; } // pin indices pinIndices(start, length); // find the last occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memrchr(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } int32_t UnicodeString::doLastIndexOf(UChar32 c, int32_t start, int32_t length) const { // pin indices pinIndices(start, length); // find the last occurrence of c const UChar *array = getArrayStart(); const UChar *match = u_memrchr32(array + start, c, length); if(match == NULL) { return -1; } else { return (int32_t)(match - array); } } //======================================== // Write implementation //======================================== UnicodeString& UnicodeString::findAndReplace(int32_t start, int32_t length, const UnicodeString& oldText, int32_t oldStart, int32_t oldLength, const UnicodeString& newText, int32_t newStart, int32_t newLength) { if(isBogus() || oldText.isBogus() || newText.isBogus()) { return *this; } pinIndices(start, length); oldText.pinIndices(oldStart, oldLength); newText.pinIndices(newStart, newLength); if(oldLength == 0) { return *this; } while(length > 0 && length >= oldLength) { int32_t pos = indexOf(oldText, oldStart, oldLength, start, length); if(pos < 0) { // no more oldText's here: done break; } else { // we found oldText, replace it by newText and go beyond it replace(pos, oldLength, newText, newStart, newLength); length -= pos + oldLength - start; start = pos + newLength; } } return *this; } void UnicodeString::setToBogus() { releaseArray(); fUnion.fFields.fLengthAndFlags = kIsBogus; fUnion.fFields.fArray = 0; fUnion.fFields.fCapacity = 0; } // turn a bogus string into an empty one void UnicodeString::unBogus() { if(fUnion.fFields.fLengthAndFlags & kIsBogus) { setToEmpty(); } } const char16_t * UnicodeString::getTerminatedBuffer() { if(!isWritable()) { return nullptr; } UChar *array = getArrayStart(); int32_t len = length(); if(len < getCapacity()) { if(fUnion.fFields.fLengthAndFlags & kBufferIsReadonly) { // If len<capacity on a read-only alias, then array[len] is // either the original NUL (if constructed with (TRUE, s, length)) // or one of the original string contents characters (if later truncated), // therefore we can assume that array[len] is initialized memory. if(array[len] == 0) { return array; } } else if(((fUnion.fFields.fLengthAndFlags & kRefCounted) == 0 || refCount() == 1)) { // kRefCounted: Do not write the NUL if the buffer is shared. // That is mostly safe, except when the length of one copy was modified // without copy-on-write, e.g., via truncate(newLength) or remove(void). // Then the NUL would be written into the middle of another copy's string. // Otherwise, the buffer is fully writable and it is anyway safe to write the NUL. // Do not test if there is a NUL already because it might be uninitialized memory. // (That would be safe, but tools like valgrind & Purify would complain.) array[len] = 0; return array; } } if(len<INT32_MAX && cloneArrayIfNeeded(len+1)) { array = getArrayStart(); array[len] = 0; return array; } else { return nullptr; } } // setTo() analogous to the readonly-aliasing constructor with the same signature UnicodeString & UnicodeString::setTo(UBool isTerminated, ConstChar16Ptr textPtr, int32_t textLength) { if(fUnion.fFields.fLengthAndFlags & kOpenGetBuffer) { // do not modify a string that has an "open" getBuffer(minCapacity) return *this; } const UChar *text = textPtr; if(text == NULL) { // treat as an empty string, do not alias releaseArray(); setToEmpty(); return *this; } if( textLength < -1 || (textLength == -1 && !isTerminated) || (textLength >= 0 && isTerminated && text[textLength] != 0) ) { setToBogus(); return *this; } releaseArray(); if(textLength == -1) { // text is terminated, or else it would have failed the above test textLength = u_strlen(text); } fUnion.fFields.fLengthAndFlags = kReadonlyAlias; setArray((UChar *)text, textLength, isTerminated ? textLength + 1 : textLength); return *this; } // setTo() analogous to the writable-aliasing constructor with the same signature UnicodeString & UnicodeString::setTo(UChar *buffer, int32_t buffLength, int32_t buffCapacity) { if(fUnion.fFields.fLengthAndFlags & kOpenGetBuffer) { // do not modify a string that has an "open" getBuffer(minCapacity) return *this; } if(buffer == NULL) { // treat as an empty string, do not alias releaseArray(); setToEmpty(); return *this; } if(buffLength < -1 || buffCapacity < 0 || buffLength > buffCapacity) { setToBogus(); return *this; } else if(buffLength == -1) { // buffLength = u_strlen(buff); but do not look beyond buffCapacity const UChar *p = buffer, *limit = buffer + buffCapacity; while(p != limit && *p != 0) { ++p; } buffLength = (int32_t)(p - buffer); } releaseArray(); fUnion.fFields.fLengthAndFlags = kWritableAlias; setArray(buffer, buffLength, buffCapacity); return *this; } UnicodeString &UnicodeString::setToUTF8(StringPiece utf8) { unBogus(); int32_t length = utf8.length(); int32_t capacity; // The UTF-16 string will be at most as long as the UTF-8 string. if(length <= US_STACKBUF_SIZE) { capacity = US_STACKBUF_SIZE; } else { capacity = length + 1; // +1 for the terminating NUL. } UChar *utf16 = getBuffer(capacity); int32_t length16; UErrorCode errorCode = U_ZERO_ERROR; u_strFromUTF8WithSub(utf16, getCapacity(), &length16, utf8.data(), length, 0xfffd, // Substitution character. NULL, // Don't care about number of substitutions. &errorCode); releaseBuffer(length16); if(U_FAILURE(errorCode)) { setToBogus(); } return *this; } UnicodeString& UnicodeString::setCharAt(int32_t offset, UChar c) { int32_t len = length(); if(cloneArrayIfNeeded() && len > 0) { if(offset < 0) { offset = 0; } else if(offset >= len) { offset = len - 1; } getArrayStart()[offset] = c; } return *this; } UnicodeString& UnicodeString::replace(int32_t start, int32_t _length, UChar32 srcChar) { UChar buffer[U16_MAX_LENGTH]; int32_t count = 0; UBool isError = FALSE; U16_APPEND(buffer, count, U16_MAX_LENGTH, srcChar, isError); // We test isError so that the compiler does not complain that we don't. // If isError (srcChar is not a valid code point) then count==0 which means // we remove the source segment rather than replacing it with srcChar. return doReplace(start, _length, buffer, 0, isError ? 0 : count); } UnicodeString& UnicodeString::append(UChar32 srcChar) { UChar buffer[U16_MAX_LENGTH]; int32_t _length = 0; UBool isError = FALSE; U16_APPEND(buffer, _length, U16_MAX_LENGTH, srcChar, isError); // We test isError so that the compiler does not complain that we don't. // If isError then _length==0 which turns the doAppend() into a no-op anyway. return isError ? *this : doAppend(buffer, 0, _length); } UnicodeString& UnicodeString::doReplace( int32_t start, int32_t length, const UnicodeString& src, int32_t srcStart, int32_t srcLength) { // pin the indices to legal values src.pinIndices(srcStart, srcLength); // get the characters from src // and replace the range in ourselves with them return doReplace(start, length, src.getArrayStart(), srcStart, srcLength); } UnicodeString& UnicodeString::doReplace(int32_t start, int32_t length, const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable()) { return *this; } int32_t oldLength = this->length(); // optimize (read-only alias).remove(0, start) and .remove(start, end) if((fUnion.fFields.fLengthAndFlags&kBufferIsReadonly) && srcLength == 0) { if(start == 0) { // remove prefix by adjusting the array pointer pinIndex(length); fUnion.fFields.fArray += length; fUnion.fFields.fCapacity -= length; setLength(oldLength - length); return *this; } else { pinIndex(start); if(length >= (oldLength - start)) { // remove suffix by reducing the length (like truncate()) setLength(start); fUnion.fFields.fCapacity = start; // not NUL-terminated any more return *this; } } } if(start == oldLength) { return doAppend(srcChars, srcStart, srcLength); } if(srcChars == 0) { srcLength = 0; } else { // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if (srcLength < 0) { // get the srcLength if necessary srcLength = u_strlen(srcChars); } } // pin the indices to legal values pinIndices(start, length); // Calculate the size of the string after the replace. // Avoid int32_t overflow. int32_t newLength = oldLength - length; if(srcLength > (INT32_MAX - newLength)) { setToBogus(); return *this; } newLength += srcLength; // Check for insertion into ourself const UChar *oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doReplace(start, length, copy.getArrayStart(), 0, srcLength); } // cloneArrayIfNeeded(doCopyArray=FALSE) may change fArray but will not copy the current contents; // therefore we need to keep the current fArray UChar oldStackBuffer[US_STACKBUF_SIZE]; if((fUnion.fFields.fLengthAndFlags&kUsingStackBuffer) && (newLength > US_STACKBUF_SIZE)) { // copy the stack buffer contents because it will be overwritten with // fUnion.fFields values u_memcpy(oldStackBuffer, oldArray, oldLength); oldArray = oldStackBuffer; } // clone our array and allocate a bigger array if needed int32_t *bufferToDelete = 0; if(!cloneArrayIfNeeded(newLength, getGrowCapacity(newLength), FALSE, &bufferToDelete) ) { return *this; } // now do the replace UChar *newArray = getArrayStart(); if(newArray != oldArray) { // if fArray changed, then we need to copy everything except what will change us_arrayCopy(oldArray, 0, newArray, 0, start); us_arrayCopy(oldArray, start + length, newArray, start + srcLength, oldLength - (start + length)); } else if(length != srcLength) { // fArray did not change; copy only the portion that isn't changing, leaving a hole us_arrayCopy(oldArray, start + length, newArray, start + srcLength, oldLength - (start + length)); } // now fill in the hole with the new string us_arrayCopy(srcChars, 0, newArray, start, srcLength); setLength(newLength); // delayed delete in case srcChars == fArray when we started, and // to keep oldArray alive for the above operations if (bufferToDelete) { uprv_free(bufferToDelete); } return *this; } // Versions of doReplace() only for append() variants. // doReplace() and doAppend() optimize for different cases. UnicodeString& UnicodeString::doAppend(const UnicodeString& src, int32_t srcStart, int32_t srcLength) { if(srcLength == 0) { return *this; } // pin the indices to legal values src.pinIndices(srcStart, srcLength); return doAppend(src.getArrayStart(), srcStart, srcLength); } UnicodeString& UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength; if (uprv_add32_overflow(oldLength, srcLength, &newLength)) { setToBogus(); return *this; } // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; } /** * Replaceable API */ void UnicodeString::handleReplaceBetween(int32_t start, int32_t limit, const UnicodeString& text) { replaceBetween(start, limit, text); } /** * Replaceable API */ void UnicodeString::copy(int32_t start, int32_t limit, int32_t dest) { if (limit <= start) { return; // Nothing to do; avoid bogus malloc call } UChar* text = (UChar*) uprv_malloc( sizeof(UChar) * (limit - start) ); // Check to make sure text is not null. if (text != NULL) { extractBetween(start, limit, text, 0); insert(dest, text, 0, limit - start); uprv_free(text); } } /** * Replaceable API * * NOTE: This is for the Replaceable class. There is no rep.cpp, * so we implement this function here. */ UBool Replaceable::hasMetaData() const { return TRUE; } /** * Replaceable API */ UBool UnicodeString::hasMetaData() const { return FALSE; } UnicodeString& UnicodeString::doReverse(int32_t start, int32_t length) { if(length <= 1 || !cloneArrayIfNeeded()) { return *this; } // pin the indices to legal values pinIndices(start, length); if(length <= 1) { // pinIndices() might have shrunk the length return *this; } UChar *left = getArrayStart() + start; UChar *right = left + length - 1; // -1 for inclusive boundary (length>=2) UChar swap; UBool hasSupplementary = FALSE; // Before the loop we know left<right because length>=2. do { hasSupplementary |= (UBool)U16_IS_LEAD(swap = *left); hasSupplementary |= (UBool)U16_IS_LEAD(*left++ = *right); *right-- = swap; } while(left < right); // Make sure to test the middle code unit of an odd-length string. // Redundant if the length is even. hasSupplementary |= (UBool)U16_IS_LEAD(*left); /* if there are supplementary code points in the reversed range, then re-swap their surrogates */ if(hasSupplementary) { UChar swap2; left = getArrayStart() + start; right = left + length - 1; // -1 so that we can look at *(left+1) if left<right while(left < right) { if(U16_IS_TRAIL(swap = *left) && U16_IS_LEAD(swap2 = *(left + 1))) { *left++ = swap2; *left++ = swap; } else { ++left; } } } return *this; } UBool UnicodeString::padLeading(int32_t targetLength, UChar padChar) { int32_t oldLength = length(); if(oldLength >= targetLength || !cloneArrayIfNeeded(targetLength)) { return FALSE; } else { // move contents up by padding width UChar *array = getArrayStart(); int32_t start = targetLength - oldLength; us_arrayCopy(array, 0, array, start, oldLength); // fill in padding character while(--start >= 0) { array[start] = padChar; } setLength(targetLength); return TRUE; } } UBool UnicodeString::padTrailing(int32_t targetLength, UChar padChar) { int32_t oldLength = length(); if(oldLength >= targetLength || !cloneArrayIfNeeded(targetLength)) { return FALSE; } else { // fill in padding character UChar *array = getArrayStart(); int32_t length = targetLength; while(--length >= oldLength) { array[length] = padChar; } setLength(targetLength); return TRUE; } } //======================================== // Hashing //======================================== int32_t UnicodeString::doHashCode() const { /* Delegate hash computation to uhash. This makes UnicodeString * hashing consistent with UChar* hashing. */ int32_t hashCode = ustr_hashUCharsN(getArrayStart(), length()); if (hashCode == kInvalidHashCode) { hashCode = kEmptyHashCode; } return hashCode; } //======================================== // External Buffer //======================================== char16_t * UnicodeString::getBuffer(int32_t minCapacity) { if(minCapacity>=-1 && cloneArrayIfNeeded(minCapacity)) { fUnion.fFields.fLengthAndFlags|=kOpenGetBuffer; setZeroLength(); return getArrayStart(); } else { return nullptr; } } void UnicodeString::releaseBuffer(int32_t newLength) { if(fUnion.fFields.fLengthAndFlags&kOpenGetBuffer && newLength>=-1) { // set the new fLength int32_t capacity=getCapacity(); if(newLength==-1) { // the new length is the string length, capped by fCapacity const UChar *array=getArrayStart(), *p=array, *limit=array+capacity; while(p<limit && *p!=0) { ++p; } newLength=(int32_t)(p-array); } else if(newLength>capacity) { newLength=capacity; } setLength(newLength); fUnion.fFields.fLengthAndFlags&=~kOpenGetBuffer; } } //======================================== // Miscellaneous //======================================== UBool UnicodeString::cloneArrayIfNeeded(int32_t newCapacity, int32_t growCapacity, UBool doCopyArray, int32_t **pBufferToDelete, UBool forceClone) { // default parameters need to be static, therefore // the defaults are -1 to have convenience defaults if(newCapacity == -1) { newCapacity = getCapacity(); } // while a getBuffer(minCapacity) is "open", // prevent any modifications of the string by returning FALSE here // if the string is bogus, then only an assignment or similar can revive it if(!isWritable()) { return FALSE; } /* * We need to make a copy of the array if * the buffer is read-only, or * the buffer is refCounted (shared), and refCount>1, or * the buffer is too small. * Return FALSE if memory could not be allocated. */ if(forceClone || fUnion.fFields.fLengthAndFlags & kBufferIsReadonly || (fUnion.fFields.fLengthAndFlags & kRefCounted && refCount() > 1) || newCapacity > getCapacity() ) { // check growCapacity for default value and use of the stack buffer if(growCapacity < 0) { growCapacity = newCapacity; } else if(newCapacity <= US_STACKBUF_SIZE && growCapacity > US_STACKBUF_SIZE) { growCapacity = US_STACKBUF_SIZE; } // save old values UChar oldStackBuffer[US_STACKBUF_SIZE]; UChar *oldArray; int32_t oldLength = length(); int16_t flags = fUnion.fFields.fLengthAndFlags; if(flags&kUsingStackBuffer) { U_ASSERT(!(flags&kRefCounted)); /* kRefCounted and kUsingStackBuffer are mutally exclusive */ if(doCopyArray && growCapacity > US_STACKBUF_SIZE) { // copy the stack buffer contents because it will be overwritten with // fUnion.fFields values us_arrayCopy(fUnion.fStackFields.fBuffer, 0, oldStackBuffer, 0, oldLength); oldArray = oldStackBuffer; } else { oldArray = NULL; // no need to copy from the stack buffer to itself } } else { oldArray = fUnion.fFields.fArray; U_ASSERT(oldArray!=NULL); /* when stack buffer is not used, oldArray must have a non-NULL reference */ } // allocate a new array if(allocate(growCapacity) || (newCapacity < growCapacity && allocate(newCapacity)) ) { if(doCopyArray) { // copy the contents // do not copy more than what fits - it may be smaller than before int32_t minLength = oldLength; newCapacity = getCapacity(); if(newCapacity < minLength) { minLength = newCapacity; } if(oldArray != NULL) { us_arrayCopy(oldArray, 0, getArrayStart(), 0, minLength); } setLength(minLength); } else { setZeroLength(); } // release the old array if(flags & kRefCounted) { // the array is refCounted; decrement and release if 0 u_atomic_int32_t *pRefCount = ((u_atomic_int32_t *)oldArray - 1); if(umtx_atomic_dec(pRefCount) == 0) { if(pBufferToDelete == 0) { // Note: cast to (void *) is needed with MSVC, where u_atomic_int32_t // is defined as volatile. (Volatile has useful non-standard behavior // with this compiler.) uprv_free((void *)pRefCount); } else { // the caller requested to delete it himself *pBufferToDelete = (int32_t *)pRefCount; } } } } else { // not enough memory for growCapacity and not even for the smaller newCapacity // reset the old values for setToBogus() to release the array if(!(flags&kUsingStackBuffer)) { fUnion.fFields.fArray = oldArray; } fUnion.fFields.fLengthAndFlags = flags; setToBogus(); return FALSE; } } return TRUE; } // UnicodeStringAppendable ------------------------------------------------- *** UnicodeStringAppendable::~UnicodeStringAppendable() {} UBool UnicodeStringAppendable::appendCodeUnit(UChar c) { return str.doAppend(&c, 0, 1).isWritable(); } UBool UnicodeStringAppendable::appendCodePoint(UChar32 c) { UChar buffer[U16_MAX_LENGTH]; int32_t cLength = 0; UBool isError = FALSE; U16_APPEND(buffer, cLength, U16_MAX_LENGTH, c, isError); return !isError && str.doAppend(buffer, 0, cLength).isWritable(); } UBool UnicodeStringAppendable::appendString(const UChar *s, int32_t length) { return str.doAppend(s, 0, length).isWritable(); } UBool UnicodeStringAppendable::reserveAppendCapacity(int32_t appendCapacity) { return str.cloneArrayIfNeeded(str.length() + appendCapacity); } UChar * UnicodeStringAppendable::getAppendBuffer(int32_t minCapacity, int32_t desiredCapacityHint, UChar *scratch, int32_t scratchCapacity, int32_t *resultCapacity) { if(minCapacity < 1 || scratchCapacity < minCapacity) { *resultCapacity = 0; return NULL; } int32_t oldLength = str.length(); if(minCapacity <= (kMaxCapacity - oldLength) && desiredCapacityHint <= (kMaxCapacity - oldLength) && str.cloneArrayIfNeeded(oldLength + minCapacity, oldLength + desiredCapacityHint)) { *resultCapacity = str.getCapacity() - oldLength; return str.getArrayStart() + oldLength; } *resultCapacity = scratchCapacity; return scratch; } U_NAMESPACE_END U_NAMESPACE_USE U_CAPI int32_t U_EXPORT2 uhash_hashUnicodeString(const UElement key) { const UnicodeString *str = (const UnicodeString*) key.pointer; return (str == NULL) ? 0 : str->hashCode(); } // Moved here from uhash_us.cpp so that using a UVector of UnicodeString* // does not depend on hashtable code. U_CAPI UBool U_EXPORT2 uhash_compareUnicodeString(const UElement key1, const UElement key2) { const UnicodeString *str1 = (const UnicodeString*) key1.pointer; const UnicodeString *str2 = (const UnicodeString*) key2.pointer; if (str1 == str2) { return TRUE; } if (str1 == NULL || str2 == NULL) { return FALSE; } return *str1 == *str2; } #ifdef U_STATIC_IMPLEMENTATION /* This should never be called. It is defined here to make sure that the virtual vector deleting destructor is defined within unistr.cpp. The vector deleting destructor is already a part of UObject, but defining it here makes sure that it is included with this object file. This makes sure that static library dependencies are kept to a minimum. */ static void uprv_UnicodeStringDummy(void) { delete [] (new UnicodeString[2]); } #endif
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength = oldLength + srcLength; // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; }
UnicodeString::doAppend(const UChar *srcChars, int32_t srcStart, int32_t srcLength) { if(!isWritable() || srcLength == 0 || srcChars == NULL) { return *this; } // Perform all remaining operations relative to srcChars + srcStart. // From this point forward, do not use srcStart. srcChars += srcStart; if(srcLength < 0) { // get the srcLength if necessary if((srcLength = u_strlen(srcChars)) == 0) { return *this; } } int32_t oldLength = length(); int32_t newLength; if (uprv_add32_overflow(oldLength, srcLength, &newLength)) { setToBogus(); return *this; } // Check for append onto ourself const UChar* oldArray = getArrayStart(); if (isBufferWritable() && oldArray < srcChars + srcLength && srcChars < oldArray + oldLength) { // Copy into a new UnicodeString and start over UnicodeString copy(srcChars, srcLength); if (copy.isBogus()) { setToBogus(); return *this; } return doAppend(copy.getArrayStart(), 0, srcLength); } // optimize append() onto a large-enough, owned string if((newLength <= getCapacity() && isBufferWritable()) || cloneArrayIfNeeded(newLength, getGrowCapacity(newLength))) { UChar *newArray = getArrayStart(); // Do not copy characters when // UChar *buffer=str.getAppendBuffer(...); // is followed by // str.append(buffer, length); // or // str.appendString(buffer, length) // or similar. if(srcChars != newArray + oldLength) { us_arrayCopy(srcChars, 0, newArray, oldLength, srcLength); } setLength(newLength); } return *this; }
{'added': [(1566, ' int32_t newLength;'), (1567, ' if (uprv_add32_overflow(oldLength, srcLength, &newLength)) {'), (1568, ' setToBogus();'), (1569, ' return *this;'), (1570, ' }')], 'deleted': [(1566, ' int32_t newLength = oldLength + srcLength;')]}
5
1
1,437
8,275
33
218
14
https://github.com/unicode-org/icu
CVE-2020-10531
CWE-787
3,167
filter_manager.cc
C++
Envoy::Http::ActiveStreamDecoderFilter::complete
#include "source/common/http/filter_manager.h" #include <functional> #include "envoy/http/header_map.h" #include "envoy/matcher/matcher.h" #include "source/common/common/enum_to_int.h" #include "source/common/common/scope_tracked_object_stack.h" #include "source/common/common/scope_tracker.h" #include "source/common/http/codes.h" #include "source/common/http/header_map_impl.h" #include "source/common/http/header_utility.h" #include "source/common/http/utility.h" #include "matching/data_impl.h" namespace Envoy { namespace Http { namespace { REGISTER_FACTORY(SkipActionFactory, Matcher::ActionFactory<Matching::HttpFilterActionContext>); template <class T> using FilterList = std::list<std::unique_ptr<T>>; // Shared helper for recording the latest filter used. template <class T> void recordLatestDataFilter(const typename FilterList<T>::iterator current_filter, T*& latest_filter, const FilterList<T>& filters) { // If this is the first time we're calling onData, just record the current filter. if (latest_filter == nullptr) { latest_filter = current_filter->get(); return; } // We want to keep this pointing at the latest filter in the filter list that has received the // onData callback. To do so, we compare the current latest with the *previous* filter. If they // match, then we must be processing a new filter for the first time. We omit this check if we're // the first filter, since the above check handles that case. // // We compare against the previous filter to avoid multiple filter iterations from resetting the // pointer: If we just set latest to current, then the first onData filter iteration would // correctly iterate over the filters and set latest, but on subsequent onData iterations // we'd start from the beginning again, potentially allowing filter N to modify the buffer even // though filter M > N was the filter that inserted data into the buffer. if (current_filter != filters.begin() && latest_filter == std::prev(current_filter)->get()) { latest_filter = current_filter->get(); } } } // namespace void ActiveStreamFilterBase::commonContinue() { // TODO(mattklein123): Raise an error if this is called during a callback. if (!canContinue()) { ENVOY_STREAM_LOG(trace, "cannot continue filter chain: filter={}", *this, static_cast<const void*>(this)); return; } // Set ScopeTrackerScopeState if there's no existing crash context. ScopeTrackedObjectStack encapsulated_object; absl::optional<ScopeTrackerScopeState> state; if (parent_.dispatcher_.trackedObjectStackIsEmpty()) { restoreContextOnContinue(encapsulated_object); state.emplace(&encapsulated_object, parent_.dispatcher_); } ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", *this, static_cast<const void*>(this)); ASSERT(!canIterate(), "Attempting to continue iteration while the IterationState is already Continue"); // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the // filter iteration starts with the current filter instead of the next one. if (stoppedAll()) { iterate_from_current_filter_ = true; } allowIteration(); // Only resume with do1xxHeaders() if we've actually seen 1xx headers. if (has1xxheaders()) { continued_1xx_headers_ = true; do1xxHeaders(); // If the response headers have not yet come in, don't continue on with // headers and body. doHeaders expects request headers to exist. if (!parent_.filter_manager_callbacks_.responseHeaders()) { return; } } // Make sure that we handle the zero byte data frame case. We make no effort to optimize this // case in terms of merging it into a header only request/response. This could be done in the // future. if (!headers_continued_) { headers_continued_ = true; doHeaders(complete() && !bufferedData() && !hasTrailers()); } doMetadata(); if (bufferedData()) { doData(complete() && !hasTrailers()); } if (hasTrailers()) { doTrailers(); } iterate_from_current_filter_ = false; } bool ActiveStreamFilterBase::commonHandleAfter1xxHeadersCallback(FilterHeadersStatus status) { ASSERT(parent_.state_.has_1xx_headers_); ASSERT(!continued_1xx_headers_); ASSERT(canIterate()); if (status == FilterHeadersStatus::StopIteration) { iteration_state_ = IterationState::StopSingleIteration; return false; } else { ASSERT(status == FilterHeadersStatus::Continue); continued_1xx_headers_ = true; return true; } } bool ActiveStreamFilterBase::commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& end_stream) { ASSERT(!headers_continued_); ASSERT(canIterate()); switch (status) { case FilterHeadersStatus::StopIteration: iteration_state_ = IterationState::StopSingleIteration; break; case FilterHeadersStatus::StopAllIterationAndBuffer: iteration_state_ = IterationState::StopAllBuffer; break; case FilterHeadersStatus::StopAllIterationAndWatermark: iteration_state_ = IterationState::StopAllWatermark; break; case FilterHeadersStatus::ContinueAndDontEndStream: end_stream = false; headers_continued_ = true; ENVOY_STREAM_LOG(debug, "converting to headers and body (body not available yet)", parent_); break; case FilterHeadersStatus::Continue: headers_continued_ = true; break; } handleMetadataAfterHeadersCallback(); if (stoppedAll() || status == FilterHeadersStatus::StopIteration) { return false; } else { return true; } } void ActiveStreamFilterBase::commonHandleBufferData(Buffer::Instance& provided_data) { // The way we do buffering is a little complicated which is why we have this common function // which is used for both encoding and decoding. When data first comes into our filter pipeline, // we send it through. Any filter can choose to stop iteration and buffer or not. If we then // continue iteration in the future, we use the buffered data. A future filter can stop and // buffer again. In this case, since we are already operating on buffered data, we don't // rebuffer, because we assume the filter has modified the buffer as it wishes in place. if (bufferedData().get() != &provided_data) { if (!bufferedData()) { bufferedData() = createBuffer(); } bufferedData()->move(provided_data); } } bool ActiveStreamFilterBase::commonHandleAfterDataCallback(FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming) { if (status == FilterDataStatus::Continue) { if (iteration_state_ == IterationState::StopSingleIteration) { commonHandleBufferData(provided_data); commonContinue(); return false; } else { ASSERT(headers_continued_); } } else { iteration_state_ = IterationState::StopSingleIteration; if (status == FilterDataStatus::StopIterationAndBuffer || status == FilterDataStatus::StopIterationAndWatermark) { buffer_was_streaming = status == FilterDataStatus::StopIterationAndWatermark; commonHandleBufferData(provided_data); } else if (complete() && !hasTrailers() && !bufferedData() && // If the stream is destroyed, no need to handle the data buffer or trailers. // This can occur if the filter calls sendLocalReply. !parent_.state_.destroyed_) { // If this filter is doing StopIterationNoBuffer and this stream is terminated with a zero // byte data frame, we need to create an empty buffer to make sure that when commonContinue // is called, the pipeline resumes with an empty data frame with end_stream = true ASSERT(end_stream_); bufferedData() = createBuffer(); } return false; } return true; } bool ActiveStreamFilterBase::commonHandleAfterTrailersCallback(FilterTrailersStatus status) { if (status == FilterTrailersStatus::Continue) { if (iteration_state_ == IterationState::StopSingleIteration) { commonContinue(); return false; } else { ASSERT(headers_continued_); } } else if (status == FilterTrailersStatus::StopIteration) { if (canIterate()) { iteration_state_ = IterationState::StopSingleIteration; } return false; } return true; } const Network::Connection* ActiveStreamFilterBase::connection() { return parent_.connection(); } Event::Dispatcher& ActiveStreamFilterBase::dispatcher() { return parent_.dispatcher_; } StreamInfo::StreamInfo& ActiveStreamFilterBase::streamInfo() { return parent_.stream_info_; } Tracing::Span& ActiveStreamFilterBase::activeSpan() { return parent_.filter_manager_callbacks_.activeSpan(); } const ScopeTrackedObject& ActiveStreamFilterBase::scope() { return parent_.filter_manager_callbacks_.scope(); } void ActiveStreamFilterBase::restoreContextOnContinue( ScopeTrackedObjectStack& tracked_object_stack) { parent_.contextOnContinue(tracked_object_stack); } Tracing::Config& ActiveStreamFilterBase::tracingConfig() { return parent_.filter_manager_callbacks_.tracingConfig(); } Upstream::ClusterInfoConstSharedPtr ActiveStreamFilterBase::clusterInfo() { return parent_.filter_manager_callbacks_.clusterInfo(); } Router::RouteConstSharedPtr ActiveStreamFilterBase::route() { return route(nullptr); } Router::RouteConstSharedPtr ActiveStreamFilterBase::route(const Router::RouteCallback& cb) { return parent_.filter_manager_callbacks_.route(cb); } void ActiveStreamFilterBase::setRoute(Router::RouteConstSharedPtr route) { parent_.filter_manager_callbacks_.setRoute(std::move(route)); } void ActiveStreamFilterBase::clearRouteCache() { parent_.filter_manager_callbacks_.clearRouteCache(); } void ActiveStreamFilterBase::resetIdleTimer() { parent_.filter_manager_callbacks_.resetIdleTimer(); } void FilterMatchState::evaluateMatchTreeWithNewData(MatchDataUpdateFunc update_func) { if (match_tree_evaluated_ || !matching_data_) { return; } update_func(*matching_data_); const auto match_result = Matcher::evaluateMatch<HttpMatchingData>(*match_tree_, *matching_data_); match_tree_evaluated_ = match_result.match_state_ == Matcher::MatchState::MatchComplete; if (match_tree_evaluated_ && match_result.result_) { const auto result = match_result.result_(); if (SkipAction().typeUrl() == result->typeUrl()) { skip_filter_ = true; } else { filter_->onMatchCallback(*result); } } } bool ActiveStreamDecoderFilter::canContinue() { // It is possible for the connection manager to respond directly to a request even while // a filter is trying to continue. If a response has already happened, we should not // continue to further filters. A concrete example of this is a filter buffering data, the // last data frame comes in and the filter continues, but the final buffering takes the stream // over the high watermark such that a 413 is returned. return !parent_.state_.local_complete_; } Buffer::InstancePtr ActiveStreamDecoderFilter::createBuffer() { auto buffer = dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->requestDataDrained(); }, [this]() -> void { this->requestDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return buffer; } Buffer::InstancePtr& ActiveStreamDecoderFilter::bufferedData() { return parent_.buffered_request_data_; } bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_complete_; } void ActiveStreamDecoderFilter::doHeaders(bool end_stream) { parent_.decodeHeaders(this, *parent_.filter_manager_callbacks_.requestHeaders(), end_stream); } void ActiveStreamDecoderFilter::doData(bool end_stream) { parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamDecoderFilter::doTrailers() { parent_.decodeTrailers(this, *parent_.filter_manager_callbacks_.requestTrailers()); } bool ActiveStreamDecoderFilter::hasTrailers() { return parent_.filter_manager_callbacks_.requestTrailers().has_value(); } void ActiveStreamDecoderFilter::drainSavedRequestMetadata() { ASSERT(saved_request_metadata_ != nullptr); for (auto& metadata_map : *getSavedRequestMetadata()) { parent_.decodeMetadata(this, *metadata_map); } getSavedRequestMetadata()->clear(); } void ActiveStreamDecoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. const bool saved_state = iterate_from_current_filter_; iterate_from_current_filter_ = true; // If decodeHeaders() returns StopAllIteration, we should skip draining metadata, and wait // for doMetadata() to drain the metadata after iteration continues. if (!stoppedAll() && saved_request_metadata_ != nullptr && !getSavedRequestMetadata()->empty()) { drainSavedRequestMetadata(); } // Restores the original value of iterate_from_current_filter_. iterate_from_current_filter_ = saved_state; } RequestTrailerMap& ActiveStreamDecoderFilter::addDecodedTrailers() { return parent_.addDecodedTrailers(); } void ActiveStreamDecoderFilter::addDecodedData(Buffer::Instance& data, bool streaming) { parent_.addDecodedData(*this, data, streaming); } MetadataMapVector& ActiveStreamDecoderFilter::addDecodedMetadata() { return parent_.addDecodedMetadata(); } void ActiveStreamDecoderFilter::injectDecodedDataToFilterChain(Buffer::Instance& data, bool end_stream) { if (!headers_continued_) { headers_continued_ = true; doHeaders(false); } parent_.decodeData(this, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } const Buffer::Instance* ActiveStreamDecoderFilter::decodingBuffer() { return parent_.buffered_request_data_.get(); } void ActiveStreamDecoderFilter::modifyDecodingBuffer( std::function<void(Buffer::Instance&)> callback) { ASSERT(parent_.state_.latest_data_decoding_filter_ == this); callback(*parent_.buffered_request_data_.get()); } void ActiveStreamDecoderFilter::sendLocalReply( Code code, absl::string_view body, std::function<void(ResponseHeaderMap& headers)> modify_headers, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { parent_.sendLocalReply(code, body, modify_headers, grpc_status, details); } void ActiveStreamDecoderFilter::encode1xxHeaders(ResponseHeaderMapPtr&& headers) { // If Envoy is not configured to proxy 100-Continue responses, swallow the 100 Continue // here. This avoids the potential situation where Envoy strips Expect: 100-Continue and sends a // 100-Continue, then proxies a duplicate 100 Continue from upstream. if (parent_.proxy_100_continue_) { parent_.filter_manager_callbacks_.setInformationalHeaders(std::move(headers)); parent_.encode1xxHeaders(nullptr, *parent_.filter_manager_callbacks_.informationalHeaders()); } } ResponseHeaderMapOptRef ActiveStreamDecoderFilter::informationalHeaders() const { return parent_.filter_manager_callbacks_.informationalHeaders(); } void ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) { parent_.stream_info_.setResponseCodeDetails(details); parent_.filter_manager_callbacks_.setResponseHeaders(std::move(headers)); parent_.encodeHeaders(nullptr, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream); } ResponseHeaderMapOptRef ActiveStreamDecoderFilter::responseHeaders() const { return parent_.filter_manager_callbacks_.responseHeaders(); } void ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { parent_.encodeData(nullptr, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamDecoderFilter::encodeTrailers(ResponseTrailerMapPtr&& trailers) { parent_.filter_manager_callbacks_.setResponseTrailers(std::move(trailers)); parent_.encodeTrailers(nullptr, *parent_.filter_manager_callbacks_.responseTrailers()); } ResponseTrailerMapOptRef ActiveStreamDecoderFilter::responseTrailers() const { return parent_.filter_manager_callbacks_.responseTrailers(); } void ActiveStreamDecoderFilter::encodeMetadata(MetadataMapPtr&& metadata_map_ptr) { parent_.encodeMetadata(nullptr, std::move(metadata_map_ptr)); } void ActiveStreamDecoderFilter::onDecoderFilterAboveWriteBufferHighWatermark() { parent_.filter_manager_callbacks_.onDecoderFilterAboveWriteBufferHighWatermark(); } void ActiveStreamDecoderFilter::requestDataTooLarge() { ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_); if (parent_.state_.decoder_filters_streaming_) { onDecoderFilterAboveWriteBufferHighWatermark(); } else { parent_.filter_manager_callbacks_.onRequestDataTooLarge(); sendLocalReply(Code::PayloadTooLarge, CodeUtility::toString(Code::PayloadTooLarge), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestPayloadTooLarge); } } void FilterManager::addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, FilterMatchStateSharedPtr match_state, bool dual_filter) { ActiveStreamDecoderFilterPtr wrapper( new ActiveStreamDecoderFilter(*this, filter, match_state, dual_filter)); // If we're a dual handling filter, have the encoding wrapper be the only thing registering itself // as the handling filter. if (match_state) { match_state->filter_ = filter.get(); } filter->setDecoderFilterCallbacks(*wrapper); // Note: configured decoder filters are appended to decoder_filters_. // This means that if filters are configured in the following order (assume all three filters are // both decoder/encoder filters): // http_filters: // - A // - B // - C // The decoder filter chain will iterate through filters A, B, C. LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } void FilterManager::addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, FilterMatchStateSharedPtr match_state, bool dual_filter) { ActiveStreamEncoderFilterPtr wrapper( new ActiveStreamEncoderFilter(*this, filter, match_state, dual_filter)); if (match_state) { match_state->filter_ = filter.get(); } filter->setEncoderFilterCallbacks(*wrapper); // Note: configured encoder filters are prepended to encoder_filters_. // This means that if filters are configured in the following order (assume all three filters are // both decoder/encoder filters): // http_filters: // - A // - B // - C // The encoder filter chain will iterate through filters C, B, A. LinkedList::moveIntoList(std::move(wrapper), encoder_filters_); } void FilterManager::addAccessLogHandler(AccessLog::InstanceSharedPtr handler) { access_log_handlers_.push_back(handler); } void FilterManager::maybeContinueDecoding( const std::list<ActiveStreamDecoderFilterPtr>::iterator& continue_data_entry) { if (continue_data_entry != decoder_filters_.end()) { // We use the continueDecoding() code since it will correctly handle not calling // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code // expects it. ASSERT(buffered_request_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueDecoding(); } } void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers, bool end_stream) { // Headers filter iteration should always start with the next filter if available. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext); std::list<ActiveStreamDecoderFilterPtr>::iterator continue_data_entry = decoder_filters_.end(); for (; entry != decoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&](auto& matching_data) { matching_data.onRequestHeaders(headers); }); if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); state_.filter_call_state_ |= FilterCallState::DecodeHeaders; (*entry)->end_stream_ = (end_stream && continue_data_entry == decoder_filters_.end()); FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_); if (state_.decoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "decodeHeaders filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterHeadersStatus::StopIteration; } ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "decodeHeaders when end_stream is already false"); state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders; ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); (*entry)->decode_headers_called_ = true; const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); ENVOY_BUG(!continue_iteration || !state_.local_complete_, "Filter did not return StopAll or StopIteration after sending a local reply."); // If this filter ended the stream, decodeComplete() should be called for it. if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } // Skip processing metadata after sending local reply if (state_.local_complete_ && std::next(entry) != decoder_filters_.end()) { maybeContinueDecoding(continue_data_entry); return; } const bool new_metadata_added = processNewlyAddedMetadata(); // If end_stream is set in headers, and a filter adds new metadata, we need to delay end_stream // in headers by inserting an empty data frame with end_stream set. The empty data frame is sent // after the new metadata. if ((*entry)->end_stream_ && new_metadata_added && !buffered_request_data_) { Buffer::OwnedImpl empty_data(""); ENVOY_STREAM_LOG( trace, "inserting an empty data frame for end_stream due metadata being added.", *this); // Metadata frame doesn't carry end of stream bit. We need an empty data frame to end the // stream. addDecodedData(*((*entry).get()), empty_data, true); } if (!continue_iteration && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added body. maybeContinueDecoding(continue_data_entry); return; } // Here we handle the case where we have a header only request, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. if (end_stream && buffered_request_data_ && continue_data_entry == decoder_filters_.end()) { continue_data_entry = entry; } } maybeContinueDecoding(continue_data_entry); if (end_stream) { disarmRequestTimeout(); } } void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { ScopeTrackerScopeState scope(&*this, dispatcher_); filter_manager_callbacks_.resetIdleTimer(); const bool fix_added_trailers = Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fix_added_trailers"); // If a response is complete or a reset has been sent, filters do not care about further body // data. Just drop it. if (state_.local_complete_) { return; } auto trailers_added_entry = decoder_filters_.end(); const bool trailers_exists_at_start = filter_manager_callbacks_.requestTrailers().has_value(); // Filter iteration may start at the current filter. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, filter_iteration_start_state); for (; entry != decoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame types, return now. if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // // In following case, ActiveStreamFilterBase::commonContinue() could be called recursively and // its doData() is called with wrong data. // // There are 3 decode filters and "wrapper" refers to ActiveStreamFilter object. // // filter0->decodeHeaders(_, true) // return STOP // filter0->continueDecoding() // wrapper0->commonContinue() // wrapper0->decodeHeaders(_, _, true) // filter1->decodeHeaders(_, true) // filter1->addDecodeData() // return CONTINUE // filter2->decodeHeaders(_, false) // return CONTINUE // wrapper1->commonContinue() // Detects data is added. // wrapper1->doData() // wrapper1->decodeData() // filter2->decodeData(_, true) // return CONTINUE // wrapper0->doData() // This should not be called // wrapper0->decodeData() // filter1->decodeData(_, true) // It will cause assertions. // // One way to solve this problem is to mark end_stream_ for each filter. // If a filter is already marked as end_stream_ when decodeData() is called, bails out the // whole function. If just skip the filter, the codes after the loop will be called with // wrong data. For encodeData, the response_encoder->encode() will be called. if ((*entry)->end_stream_) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeData)); // We check the request_trailers_ pointer here in case addDecodedTrailers // is called in decodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. if (end_stream) { state_.filter_call_state_ |= FilterCallState::LastDataFrame; } recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_); state_.filter_call_state_ |= FilterCallState::DecodeData; (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.requestTrailers(); FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } state_.filter_call_state_ &= ~FilterCallState::DecodeData; if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (state_.decoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "decodeData filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); return; } processNewlyAddedMetadata(); if (!trailers_exists_at_start && filter_manager_callbacks_.requestTrailers() && trailers_added_entry == decoder_filters_.end()) { if (fix_added_trailers) { end_stream = false; } trailers_added_entry = entry; } if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.decoder_filters_streaming_) && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added trailers. if (fix_added_trailers) { break; } else { return; } } } // If trailers were adding during decodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != decoder_filters_.end()) { decodeTrailers(trailers_added_entry->get(), *filter_manager_callbacks_.requestTrailers()); } if (end_stream) { disarmRequestTimeout(); } } RequestTrailerMap& FilterManager::addDecodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); filter_manager_callbacks_.setRequestTrailers(RequestTrailerMapImpl::create()); return *filter_manager_callbacks_.requestTrailers(); } void FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::DecodeHeaders) || (state_.filter_call_state_ & FilterCallState::DecodeData) || ((state_.filter_call_state_ & FilterCallState::DecodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. state_.decoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { IS_ENVOY_BUG("Invalid request data"); sendLocalReply(Http::Code::BadGateway, "Filter error", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().FilterAddedInvalidRequestData); } } MetadataMapVector& FilterManager::addDecodedMetadata() { return *getRequestMetadataMapVector(); } void FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers) { // See decodeData() above for why we check local_complete_ here. if (state_.local_complete_) { return; } // Filter iteration may start at the current filter. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); for (; entry != decoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&](auto& matching_data) { matching_data.onRequestTrailers(trailers); }); if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, return now. if ((*entry)->stoppedAll()) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers)); state_.filter_call_state_ |= FilterCallState::DecodeTrailers; FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers); (*entry)->handle_->decodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (state_.decoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "decodeTrailers filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterTrailersStatus::StopIteration; } processNewlyAddedMetadata(); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { return; } } disarmRequestTimeout(); } void FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMap& metadata_map) { // Filter iteration may start at the current filter. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); for (; entry != decoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, stores metadata and returns. // If the filter pointed by entry hasn't returned from decodeHeaders, stores newly added // metadata in case decodeHeaders returns StopAllIteration. The latter can happen when headers // callbacks generate new metadata. if (!(*entry)->decode_headers_called_ || (*entry)->stoppedAll()) { Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map); (*entry)->getSavedRequestMetadata()->emplace_back(std::move(metadata_map_ptr)); return; } FilterMetadataStatus status = (*entry)->handle_->decodeMetadata(metadata_map); ENVOY_STREAM_LOG(trace, "decode metadata called: filter={} status={}, metadata: {}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status), metadata_map); } } void FilterManager::maybeEndDecode(bool end_stream) { ASSERT(!state_.remote_complete_); state_.remote_complete_ = end_stream; if (end_stream) { stream_info_.downstreamTiming().onLastDownstreamRxByteReceived(dispatcher().timeSource()); ENVOY_STREAM_LOG(debug, "request end stream", *this); } } void FilterManager::disarmRequestTimeout() { filter_manager_callbacks_.disarmRequestTimeout(); } std::list<ActiveStreamEncoderFilterPtr>::iterator FilterManager::commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, FilterIterationStartState filter_iteration_start_state) { // Only do base state setting on the initial call. Subsequent calls for filtering do not touch // the base state. if (filter == nullptr) { ASSERT(!state_.local_complete_); state_.local_complete_ = end_stream; return encoder_filters_.begin(); } if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && (*(filter->entry()))->iterate_from_current_filter_) { // The filter iteration has been stopped for all frame types, and now the iteration continues. // The current filter's encoding callback has not be called. Call it now. return filter->entry(); } return std::next(filter->entry()); } std::list<ActiveStreamDecoderFilterPtr>::iterator FilterManager::commonDecodePrefix(ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state) { if (!filter) { return decoder_filters_.begin(); } if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && (*(filter->entry()))->iterate_from_current_filter_) { // The filter iteration has been stopped for all frame types, and now the iteration continues. // The current filter's callback function has not been called. Call it now. return filter->entry(); } return std::next(filter->entry()); } void FilterManager::onLocalReply(StreamFilterBase::LocalReplyData& data) { state_.under_on_local_reply_ = true; filter_manager_callbacks_.onLocalReply(data.code_); for (auto entry : filters_) { if (entry->onLocalReply(data) == LocalErrorStatus::ContinueAndResetStream) { data.reset_imminent_ = true; } } state_.under_on_local_reply_ = false; } void FilterManager::sendLocalReply( Code code, absl::string_view body, const std::function<void(ResponseHeaderMap& headers)>& modify_headers, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { ASSERT(!state_.under_on_local_reply_); const bool is_head_request = state_.is_head_request_; const bool is_grpc_request = state_.is_grpc_request_; // Stop filter chain iteration if local reply was sent while filter decoding or encoding callbacks // are running. if (state_.filter_call_state_ & (FilterCallState::DecodeHeaders | FilterCallState::DecodeData | FilterCallState::DecodeTrailers)) { state_.decoder_filter_chain_aborted_ = true; } else if (state_.filter_call_state_ & (FilterCallState::EncodeHeaders | FilterCallState::EncodeData | FilterCallState::EncodeTrailers)) { state_.encoder_filter_chain_aborted_ = true; } stream_info_.setResponseCodeDetails(details); StreamFilterBase::LocalReplyData data{code, details, false}; FilterManager::onLocalReply(data); if (data.reset_imminent_) { ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. onLocalReply requested reset.", *this, details); filter_manager_callbacks_.resetStream(); return; } if (!filter_manager_callbacks_.responseHeaders().has_value()) { // If the response has not started at all, send the response through the filter chain. sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request, grpc_status, details); } else if (!state_.non_100_response_headers_encoded_) { ENVOY_STREAM_LOG(debug, "Sending local reply with details {} directly to the encoder", *this, details); // In this case, at least the header and possibly the body has started // processing through the filter chain, but no non-informational headers // have been sent downstream. To ensure that filters don't get their // state machine screwed up, bypass the filter chain and send the local // reply directly to the codec. // sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, grpc_status); } else { // If we land in this branch, response headers have already been sent to the client. // All we can do at this point is reset the stream. ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent", *this, details); // TODO(snowp): This means we increment the tx_reset stat which we weren't doing previously. // Intended? filter_manager_callbacks_.resetStream(); } } void FilterManager::sendLocalReplyViaFilterChain( bool is_grpc_request, Code code, absl::string_view body, const std::function<void(ResponseHeaderMap& headers)>& modify_headers, bool is_head_request, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); ASSERT(!filter_manager_callbacks_.responseHeaders().has_value()); // For early error handling, do a best-effort attempt to create a filter chain // to ensure access logging. If the filter chain already exists this will be // a no-op. createFilterChain(); Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ [this, modify_headers](ResponseHeaderMap& headers) -> void { if (streamInfo().route() && streamInfo().route()->routeEntry()) { streamInfo().route()->routeEntry()->finalizeResponseHeaders(headers, streamInfo()); } if (modify_headers) { modify_headers(headers); } }, [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { // TODO(snowp): This &get() business isn't nice, rework LocalReply and others to accept // opt refs. local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().ptr(), response_headers, stream_info_, code, body, content_type); }, [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { filter_manager_callbacks_.setResponseHeaders(std::move(headers)); // TODO: Start encoding from the last decoder filter that saw the // request instead. encodeHeaders(nullptr, filter_manager_callbacks_.responseHeaders().ref(), end_stream); }, [this](Buffer::Instance& data, bool end_stream) -> void { // TODO: Start encoding from the last decoder filter that saw the // request instead. encodeData(nullptr, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); }}, Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request}); } void FilterManager::sendDirectLocalReply( Code code, absl::string_view body, const std::function<void(ResponseHeaderMap&)>& modify_headers, bool is_head_request, const absl::optional<Grpc::Status::GrpcStatus> grpc_status) { // Make sure we won't end up with nested watermark calls from the body buffer. state_.encoder_filters_streaming_ = true; Http::Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ [this, modify_headers](ResponseHeaderMap& headers) -> void { if (streamInfo().route() && streamInfo().route()->routeEntry()) { streamInfo().route()->routeEntry()->finalizeResponseHeaders(headers, streamInfo()); } if (modify_headers) { modify_headers(headers); } }, [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().ptr(), response_headers, stream_info_, code, body, content_type); }, [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { // Move the response headers into the FilterManager to make sure they're visible to // access logs. filter_manager_callbacks_.setResponseHeaders(std::move(response_headers)); state_.non_100_response_headers_encoded_ = true; filter_manager_callbacks_.encodeHeaders(*filter_manager_callbacks_.responseHeaders(), end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(end_stream); }, [&](Buffer::Instance& data, bool end_stream) -> void { filter_manager_callbacks_.encodeData(data, end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(end_stream); }}, Utility::LocalReplyData{state_.is_grpc_request_, code, body, grpc_status, is_head_request}); } void FilterManager::encode1xxHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { filter_manager_callbacks_.resetIdleTimer(); ASSERT(proxy_100_continue_); // The caller must guarantee that encode1xxHeaders() is invoked at most once. ASSERT(!state_.has_1xx_headers_ || filter != nullptr); // Make sure commonContinue continues encode1xxHeaders. state_.has_1xx_headers_ = true; // Similar to the block in encodeHeaders, run encode1xxHeaders on each // filter. This is simpler than that case because 100 continue implies no // end-stream, and because there are normal headers coming there's no need for // complex continuation logic. // 100-continue filter iteration should always start with the next filter if available. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode1xxHeaders)); state_.filter_call_state_ |= FilterCallState::Encode1xxHeaders; FilterHeadersStatus status = (*entry)->handle_->encode1xxHeaders(headers); state_.filter_call_state_ &= ~FilterCallState::Encode1xxHeaders; ENVOY_STREAM_LOG(trace, "encode 1xx continue headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!(*entry)->commonHandleAfter1xxHeadersCallback(status)) { return; } } filter_manager_callbacks_.encode1xxHeaders(headers); } void FilterManager::maybeContinueEncoding( const std::list<ActiveStreamEncoderFilterPtr>::iterator& continue_data_entry) { if (continue_data_entry != encoder_filters_.end()) { // We use the continueEncoding() code since it will correctly handle not calling // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code // expects it. ASSERT(buffered_response_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueEncoding(); } } void FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers, bool end_stream) { // See encodeHeaders() comments in include/envoy/http/filter.h for why the 1xx precondition holds. ASSERT(!CodeUtility::is1xx(Utility::getResponseStatus(headers)) || Utility::getResponseStatus(headers) == enumToInt(Http::Code::SwitchingProtocols)); filter_manager_callbacks_.resetIdleTimer(); disarmRequestTimeout(); // Headers filter iteration should always start with the next filter if available. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, end_stream, FilterIterationStartState::AlwaysStartFromNext); std::list<ActiveStreamEncoderFilterPtr>::iterator continue_data_entry = encoder_filters_.end(); for (; entry != encoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&headers](auto& matching_data) { matching_data.onResponseHeaders(headers); }); if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeHeaders)); state_.filter_call_state_ |= FilterCallState::EncodeHeaders; (*entry)->end_stream_ = (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); if (state_.encoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "encodeHeaders filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterHeadersStatus::StopIteration; } ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "encodeHeaders when end_stream is already false"); state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders; ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); (*entry)->encode_headers_called_ = true; const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); // If this filter ended the stream, encodeComplete() should be called for it. if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } if (!continue_iteration) { if (!(*entry)->end_stream_) { maybeContinueEncoding(continue_data_entry); } return; } // Here we handle the case where we have a header only response, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. if (end_stream && buffered_response_data_ && continue_data_entry == encoder_filters_.end()) { continue_data_entry = entry; } } // Check if the filter chain above did not remove critical headers or set malformed header values. // We could do this at the codec in order to prevent other places than the filter chain from // removing critical headers, but it will come with the implementation complexity. // See the previous attempt (#15658) for detail, and for now we choose to protect only against // filter chains. const auto status = HeaderUtility::checkRequiredResponseHeaders(headers); if (!status.ok()) { // If the check failed, then we reply with BadGateway, and stop the further processing. sendLocalReply( Http::Code::BadGateway, status.message(), nullptr, absl::nullopt, absl::StrCat(StreamInfo::ResponseCodeDetails::get().FilterRemovedRequiredResponseHeaders, "{", StringUtil::replaceAllEmptySpace(status.message()), "}")); return; } const bool modified_end_stream = (end_stream && continue_data_entry == encoder_filters_.end()); state_.non_100_response_headers_encoded_ = true; filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(modified_end_stream); if (!modified_end_stream) { maybeContinueEncoding(continue_data_entry); } } void FilterManager::encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr) { filter_manager_callbacks_.resetIdleTimer(); std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::CanStartFromCurrent); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, stores metadata and returns. // If the filter pointed by entry hasn't returned from encodeHeaders, stores newly added // metadata in case encodeHeaders returns StopAllIteration. The latter can happen when headers // callbacks generate new metadata. if (!(*entry)->encode_headers_called_ || (*entry)->stoppedAll()) { (*entry)->getSavedResponseMetadata()->emplace_back(std::move(metadata_map_ptr)); return; } FilterMetadataStatus status = (*entry)->handle_->encodeMetadata(*metadata_map_ptr); ENVOY_STREAM_LOG(trace, "encode metadata called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); } // TODO(soya3129): update stats with metadata. // Now encode metadata via the codec. if (!metadata_map_ptr->empty()) { MetadataMapVector metadata_map_vector; metadata_map_vector.emplace_back(std::move(metadata_map_ptr)); filter_manager_callbacks_.encodeMetadata(metadata_map_vector); } } ResponseTrailerMap& FilterManager::addEncodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); // Trailers can only be added once. ASSERT(!filter_manager_callbacks_.responseTrailers()); filter_manager_callbacks_.setResponseTrailers(ResponseTrailerMapImpl::create()); return *filter_manager_callbacks_.responseTrailers(); } void FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::EncodeHeaders) || (state_.filter_call_state_ & FilterCallState::EncodeData) || ((state_.filter_call_state_ & FilterCallState::EncodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. state_.encoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { IS_ENVOY_BUG("Invalid response data"); sendLocalReply(Http::Code::BadGateway, "Filter error", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().FilterAddedInvalidResponseData); } } void FilterManager::encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { filter_manager_callbacks_.resetIdleTimer(); // Filter iteration may start at the current filter. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, end_stream, filter_iteration_start_state); auto trailers_added_entry = encoder_filters_.end(); const bool trailers_exists_at_start = filter_manager_callbacks_.responseTrailers().has_value(); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, return now. if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // For details, please see the comment in the ActiveStream::decodeData() function. if ((*entry)->end_stream_) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeData)); // We check the response_trailers_ pointer here in case addEncodedTrailers // is called in encodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. state_.filter_call_state_ |= FilterCallState::EncodeData; if (end_stream) { state_.filter_call_state_ |= FilterCallState::LastDataFrame; } recordLatestDataFilter(entry, state_.latest_data_encoding_filter_, encoder_filters_); (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.responseTrailers(); FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); if (state_.encoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "encodeData filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterDataStatus::StopIterationNoBuffer; } if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } state_.filter_call_state_ &= ~FilterCallState::EncodeData; if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!trailers_exists_at_start && filter_manager_callbacks_.responseTrailers() && trailers_added_entry == encoder_filters_.end()) { trailers_added_entry = entry; } if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.encoder_filters_streaming_)) { return; } } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); filter_manager_callbacks_.encodeData(data, modified_end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(modified_end_stream); // If trailers were adding during encodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != encoder_filters_.end()) { encodeTrailers(trailers_added_entry->get(), *filter_manager_callbacks_.responseTrailers()); } } void FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers) { filter_manager_callbacks_.resetIdleTimer(); // Filter iteration may start at the current filter. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent); for (; entry != encoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&](auto& matching_data) { matching_data.onResponseTrailers(trailers); }); if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, return now. if ((*entry)->stoppedAll()) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers)); state_.filter_call_state_ |= FilterCallState::EncodeTrailers; FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers); (*entry)->handle_->encodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers; ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { return; } } filter_manager_callbacks_.encodeTrailers(trailers); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(true); } void FilterManager::maybeEndEncode(bool end_stream) { if (end_stream) { filter_manager_callbacks_.endStream(); } } bool FilterManager::processNewlyAddedMetadata() { if (request_metadata_map_vector_ == nullptr) { return false; } for (const auto& metadata_map : *getRequestMetadataMapVector()) { decodeMetadata(nullptr, *metadata_map); } getRequestMetadataMapVector()->clear(); return true; } bool FilterManager::handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, bool& filter_streaming) { if (filter.stoppedAll()) { ASSERT(!filter.canIterate()); filter_streaming = filter.iteration_state_ == ActiveStreamFilterBase::IterationState::StopAllWatermark; filter.commonHandleBufferData(data); return true; } return false; } void FilterManager::callHighWatermarkCallbacks() { ++high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { watermark_callbacks->onAboveWriteBufferHighWatermark(); } } void FilterManager::callLowWatermarkCallbacks() { ASSERT(high_watermark_count_ > 0); --high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { watermark_callbacks->onBelowWriteBufferLowWatermark(); } } void FilterManager::setBufferLimit(uint32_t new_limit) { ENVOY_STREAM_LOG(debug, "setting buffer limit to {}", *this, new_limit); buffer_limit_ = new_limit; if (buffered_request_data_) { buffered_request_data_->setWatermarks(buffer_limit_); } if (buffered_response_data_) { buffered_response_data_->setWatermarks(buffer_limit_); } } void FilterManager::contextOnContinue(ScopeTrackedObjectStack& tracked_object_stack) { tracked_object_stack.add(connection_); tracked_object_stack.add(filter_manager_callbacks_.scope()); } bool FilterManager::createFilterChain() { if (state_.created_filter_chain_) { return false; } bool upgrade_rejected = false; const HeaderEntry* upgrade = nullptr; if (filter_manager_callbacks_.requestHeaders()) { upgrade = filter_manager_callbacks_.requestHeaders()->Upgrade(); // Treat CONNECT requests as a special upgrade case. if (!upgrade && HeaderUtility::isConnect(*filter_manager_callbacks_.requestHeaders())) { upgrade = filter_manager_callbacks_.requestHeaders()->Method(); } } state_.created_filter_chain_ = true; if (upgrade != nullptr) { const Router::RouteEntry::UpgradeMap* upgrade_map = filter_manager_callbacks_.upgradeMap(); if (filter_chain_factory_.createUpgradeFilterChain(upgrade->value().getStringView(), upgrade_map, *this)) { filter_manager_callbacks_.upgradeFilterChainCreated(); return true; } else { upgrade_rejected = true; // Fall through to the default filter chain. The function calling this // will send a local reply indicating that the upgrade failed. } } filter_chain_factory_.createFilterChain(*this); return !upgrade_rejected; } void ActiveStreamDecoderFilter::requestDataDrained() { // If this is called it means the call to requestDataTooLarge() was a // streaming call, or a 413 would have been sent. onDecoderFilterBelowWriteBufferLowWatermark(); } void ActiveStreamDecoderFilter::onDecoderFilterBelowWriteBufferLowWatermark() { parent_.filter_manager_callbacks_.onDecoderFilterBelowWriteBufferLowWatermark(); } void ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { // This is called exactly once per upstream-stream, by the router filter. Therefore, we // expect the same callbacks to not be registered twice. ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), &watermark_callbacks) == parent_.watermark_callbacks_.end()); parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks); for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) { watermark_callbacks.onAboveWriteBufferHighWatermark(); } } void ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), &watermark_callbacks) != parent_.watermark_callbacks_.end()); parent_.watermark_callbacks_.remove(&watermark_callbacks); } void ActiveStreamDecoderFilter::setDecoderBufferLimit(uint32_t limit) { parent_.setBufferLimit(limit); } uint32_t ActiveStreamDecoderFilter::decoderBufferLimit() { return parent_.buffer_limit_; } bool ActiveStreamDecoderFilter::recreateStream(const ResponseHeaderMap* headers) { // Because the filter's and the HCM view of if the stream has a body and if // the stream is complete may differ, re-check bytesReceived() to make sure // there was no body from the HCM's point of view. if (!complete()) { return false; } parent_.stream_info_.setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().InternalRedirect); if (headers != nullptr) { // The call to setResponseHeaders is needed to ensure that the headers are properly logged in // access logs before the stream is destroyed. Since the function expects a ResponseHeaderPtr&&, // ownership of the headers must be passed. This cannot happen earlier in the flow (such as in // the call to setupRedirect) because at that point it is still possible for the headers to be // used in a different logical branch. We work around this by creating a copy and passing // ownership of the copy instead. ResponseHeaderMapPtr headers_copy = createHeaderMap<ResponseHeaderMapImpl>(*headers); parent_.filter_manager_callbacks_.setResponseHeaders(std::move(headers_copy)); parent_.filter_manager_callbacks_.chargeStats(*headers); } parent_.filter_manager_callbacks_.recreateStream(parent_.stream_info_.filter_state_); return true; } void ActiveStreamDecoderFilter::addUpstreamSocketOptions( const Network::Socket::OptionsSharedPtr& options) { Network::Socket::appendOptions(parent_.upstream_options_, options); } Network::Socket::OptionsSharedPtr ActiveStreamDecoderFilter::getUpstreamSocketOptions() const { return parent_.upstream_options_; } void ActiveStreamDecoderFilter::requestRouteConfigUpdate( Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { parent_.filter_manager_callbacks_.requestRouteConfigUpdate(std::move(route_config_updated_cb)); } absl::optional<Router::ConfigConstSharedPtr> ActiveStreamDecoderFilter::routeConfig() { return parent_.filter_manager_callbacks_.routeConfig(); } Buffer::InstancePtr ActiveStreamEncoderFilter::createBuffer() { auto buffer = dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->responseDataDrained(); }, [this]() -> void { this->responseDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return buffer; } Buffer::InstancePtr& ActiveStreamEncoderFilter::bufferedData() { return parent_.buffered_response_data_; } bool ActiveStreamEncoderFilter::complete() { return parent_.state_.local_complete_; } bool ActiveStreamEncoderFilter::has1xxheaders() { return parent_.state_.has_1xx_headers_ && !continued_1xx_headers_; } void ActiveStreamEncoderFilter::do1xxHeaders() { parent_.encode1xxHeaders(this, *parent_.filter_manager_callbacks_.informationalHeaders()); } void ActiveStreamEncoderFilter::doHeaders(bool end_stream) { parent_.encodeHeaders(this, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream); } void ActiveStreamEncoderFilter::doData(bool end_stream) { parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamEncoderFilter::drainSavedResponseMetadata() { ASSERT(saved_response_metadata_ != nullptr); for (auto& metadata_map : *getSavedResponseMetadata()) { parent_.encodeMetadata(this, std::move(metadata_map)); } getSavedResponseMetadata()->clear(); } void ActiveStreamEncoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. const bool saved_state = iterate_from_current_filter_; iterate_from_current_filter_ = true; // If encodeHeaders() returns StopAllIteration, we should skip draining metadata, and wait // for doMetadata() to drain the metadata after iteration continues. if (!stoppedAll() && saved_response_metadata_ != nullptr && !getSavedResponseMetadata()->empty()) { drainSavedResponseMetadata(); } // Restores the original value of iterate_from_current_filter_. iterate_from_current_filter_ = saved_state; } void ActiveStreamEncoderFilter::doTrailers() { parent_.encodeTrailers(this, *parent_.filter_manager_callbacks_.responseTrailers()); } bool ActiveStreamEncoderFilter::hasTrailers() { return parent_.filter_manager_callbacks_.responseTrailers().has_value(); } void ActiveStreamEncoderFilter::addEncodedData(Buffer::Instance& data, bool streaming) { return parent_.addEncodedData(*this, data, streaming); } void ActiveStreamEncoderFilter::injectEncodedDataToFilterChain(Buffer::Instance& data, bool end_stream) { if (!headers_continued_) { headers_continued_ = true; doHeaders(false); } parent_.encodeData(this, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } ResponseTrailerMap& ActiveStreamEncoderFilter::addEncodedTrailers() { return parent_.addEncodedTrailers(); } void ActiveStreamEncoderFilter::addEncodedMetadata(MetadataMapPtr&& metadata_map_ptr) { return parent_.encodeMetadata(this, std::move(metadata_map_ptr)); } void ActiveStreamEncoderFilter::onEncoderFilterAboveWriteBufferHighWatermark() { ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", parent_); parent_.callHighWatermarkCallbacks(); } void ActiveStreamEncoderFilter::onEncoderFilterBelowWriteBufferLowWatermark() { ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", parent_); parent_.callLowWatermarkCallbacks(); } void ActiveStreamEncoderFilter::setEncoderBufferLimit(uint32_t limit) { parent_.setBufferLimit(limit); } uint32_t ActiveStreamEncoderFilter::encoderBufferLimit() { return parent_.buffer_limit_; } void ActiveStreamEncoderFilter::continueEncoding() { commonContinue(); } const Buffer::Instance* ActiveStreamEncoderFilter::encodingBuffer() { return parent_.buffered_response_data_.get(); } void ActiveStreamEncoderFilter::modifyEncodingBuffer( std::function<void(Buffer::Instance&)> callback) { ASSERT(parent_.state_.latest_data_encoding_filter_ == this); callback(*parent_.buffered_response_data_.get()); } void ActiveStreamEncoderFilter::sendLocalReply( Code code, absl::string_view body, std::function<void(ResponseHeaderMap& headers)> modify_headers, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { parent_.sendLocalReply(code, body, modify_headers, grpc_status, details); } Http1StreamEncoderOptionsOptRef ActiveStreamEncoderFilter::http1StreamEncoderOptions() { // TODO(mattklein123): At some point we might want to actually wrap this interface but for now // we give the filter direct access to the encoder options. return parent_.filter_manager_callbacks_.http1StreamEncoderOptions(); } void ActiveStreamEncoderFilter::responseDataTooLarge() { if (parent_.state_.encoder_filters_streaming_) { onEncoderFilterAboveWriteBufferHighWatermark(); } else { parent_.filter_manager_callbacks_.onResponseDataTooLarge(); // In this case, sendLocalReply will either send a response directly to the encoder, or // reset the stream. parent_.sendLocalReply( Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } } void ActiveStreamEncoderFilter::responseDataDrained() { onEncoderFilterBelowWriteBufferLowWatermark(); } void ActiveStreamFilterBase::resetStream() { parent_.filter_manager_callbacks_.resetStream(); } uint64_t ActiveStreamFilterBase::streamId() const { return parent_.streamId(); } Buffer::BufferMemoryAccountSharedPtr ActiveStreamDecoderFilter::account() const { return parent_.account(); } void ActiveStreamDecoderFilter::setUpstreamOverrideHost(absl::string_view host) { parent_.upstream_override_host_.emplace(std::move(host)); } absl::optional<absl::string_view> ActiveStreamDecoderFilter::upstreamOverrideHost() const { return parent_.upstream_override_host_; } } // namespace Http } // namespace Envoy
#include "source/common/http/filter_manager.h" #include <functional> #include "envoy/http/header_map.h" #include "envoy/matcher/matcher.h" #include "source/common/common/enum_to_int.h" #include "source/common/common/scope_tracked_object_stack.h" #include "source/common/common/scope_tracker.h" #include "source/common/http/codes.h" #include "source/common/http/header_map_impl.h" #include "source/common/http/header_utility.h" #include "source/common/http/utility.h" #include "matching/data_impl.h" namespace Envoy { namespace Http { namespace { REGISTER_FACTORY(SkipActionFactory, Matcher::ActionFactory<Matching::HttpFilterActionContext>); template <class T> using FilterList = std::list<std::unique_ptr<T>>; // Shared helper for recording the latest filter used. template <class T> void recordLatestDataFilter(const typename FilterList<T>::iterator current_filter, T*& latest_filter, const FilterList<T>& filters) { // If this is the first time we're calling onData, just record the current filter. if (latest_filter == nullptr) { latest_filter = current_filter->get(); return; } // We want to keep this pointing at the latest filter in the filter list that has received the // onData callback. To do so, we compare the current latest with the *previous* filter. If they // match, then we must be processing a new filter for the first time. We omit this check if we're // the first filter, since the above check handles that case. // // We compare against the previous filter to avoid multiple filter iterations from resetting the // pointer: If we just set latest to current, then the first onData filter iteration would // correctly iterate over the filters and set latest, but on subsequent onData iterations // we'd start from the beginning again, potentially allowing filter N to modify the buffer even // though filter M > N was the filter that inserted data into the buffer. if (current_filter != filters.begin() && latest_filter == std::prev(current_filter)->get()) { latest_filter = current_filter->get(); } } } // namespace void ActiveStreamFilterBase::commonContinue() { // TODO(mattklein123): Raise an error if this is called during a callback. if (!canContinue()) { ENVOY_STREAM_LOG(trace, "cannot continue filter chain: filter={}", *this, static_cast<const void*>(this)); return; } // Set ScopeTrackerScopeState if there's no existing crash context. ScopeTrackedObjectStack encapsulated_object; absl::optional<ScopeTrackerScopeState> state; if (parent_.dispatcher_.trackedObjectStackIsEmpty()) { restoreContextOnContinue(encapsulated_object); state.emplace(&encapsulated_object, parent_.dispatcher_); } ENVOY_STREAM_LOG(trace, "continuing filter chain: filter={}", *this, static_cast<const void*>(this)); ASSERT(!canIterate(), "Attempting to continue iteration while the IterationState is already Continue"); // If iteration has stopped for all frame types, set iterate_from_current_filter_ to true so the // filter iteration starts with the current filter instead of the next one. if (stoppedAll()) { iterate_from_current_filter_ = true; } allowIteration(); // Only resume with do1xxHeaders() if we've actually seen 1xx headers. if (has1xxheaders()) { continued_1xx_headers_ = true; do1xxHeaders(); // If the response headers have not yet come in, don't continue on with // headers and body. doHeaders expects request headers to exist. if (!parent_.filter_manager_callbacks_.responseHeaders()) { return; } } // Make sure that we handle the zero byte data frame case. We make no effort to optimize this // case in terms of merging it into a header only request/response. This could be done in the // future. if (!headers_continued_) { headers_continued_ = true; doHeaders(complete() && !bufferedData() && !hasTrailers()); } doMetadata(); if (bufferedData()) { doData(complete() && !hasTrailers()); } if (hasTrailers()) { doTrailers(); } iterate_from_current_filter_ = false; } bool ActiveStreamFilterBase::commonHandleAfter1xxHeadersCallback(FilterHeadersStatus status) { ASSERT(parent_.state_.has_1xx_headers_); ASSERT(!continued_1xx_headers_); ASSERT(canIterate()); if (status == FilterHeadersStatus::StopIteration) { iteration_state_ = IterationState::StopSingleIteration; return false; } else { ASSERT(status == FilterHeadersStatus::Continue); continued_1xx_headers_ = true; return true; } } bool ActiveStreamFilterBase::commonHandleAfterHeadersCallback(FilterHeadersStatus status, bool& end_stream) { ASSERT(!headers_continued_); ASSERT(canIterate()); switch (status) { case FilterHeadersStatus::StopIteration: iteration_state_ = IterationState::StopSingleIteration; break; case FilterHeadersStatus::StopAllIterationAndBuffer: iteration_state_ = IterationState::StopAllBuffer; break; case FilterHeadersStatus::StopAllIterationAndWatermark: iteration_state_ = IterationState::StopAllWatermark; break; case FilterHeadersStatus::ContinueAndDontEndStream: end_stream = false; headers_continued_ = true; ENVOY_STREAM_LOG(debug, "converting to headers and body (body not available yet)", parent_); break; case FilterHeadersStatus::Continue: headers_continued_ = true; break; } handleMetadataAfterHeadersCallback(); if (stoppedAll() || status == FilterHeadersStatus::StopIteration) { return false; } else { return true; } } void ActiveStreamFilterBase::commonHandleBufferData(Buffer::Instance& provided_data) { // The way we do buffering is a little complicated which is why we have this common function // which is used for both encoding and decoding. When data first comes into our filter pipeline, // we send it through. Any filter can choose to stop iteration and buffer or not. If we then // continue iteration in the future, we use the buffered data. A future filter can stop and // buffer again. In this case, since we are already operating on buffered data, we don't // rebuffer, because we assume the filter has modified the buffer as it wishes in place. if (bufferedData().get() != &provided_data) { if (!bufferedData()) { bufferedData() = createBuffer(); } bufferedData()->move(provided_data); } } bool ActiveStreamFilterBase::commonHandleAfterDataCallback(FilterDataStatus status, Buffer::Instance& provided_data, bool& buffer_was_streaming) { if (status == FilterDataStatus::Continue) { if (iteration_state_ == IterationState::StopSingleIteration) { commonHandleBufferData(provided_data); commonContinue(); return false; } else { ASSERT(headers_continued_); } } else { iteration_state_ = IterationState::StopSingleIteration; if (status == FilterDataStatus::StopIterationAndBuffer || status == FilterDataStatus::StopIterationAndWatermark) { buffer_was_streaming = status == FilterDataStatus::StopIterationAndWatermark; commonHandleBufferData(provided_data); } else if (complete() && !hasTrailers() && !bufferedData() && // If the stream is destroyed, no need to handle the data buffer or trailers. // This can occur if the filter calls sendLocalReply. !parent_.state_.destroyed_) { // If this filter is doing StopIterationNoBuffer and this stream is terminated with a zero // byte data frame, we need to create an empty buffer to make sure that when commonContinue // is called, the pipeline resumes with an empty data frame with end_stream = true ASSERT(end_stream_); bufferedData() = createBuffer(); } return false; } return true; } bool ActiveStreamFilterBase::commonHandleAfterTrailersCallback(FilterTrailersStatus status) { if (status == FilterTrailersStatus::Continue) { if (iteration_state_ == IterationState::StopSingleIteration) { commonContinue(); return false; } else { ASSERT(headers_continued_); } } else if (status == FilterTrailersStatus::StopIteration) { if (canIterate()) { iteration_state_ = IterationState::StopSingleIteration; } return false; } return true; } const Network::Connection* ActiveStreamFilterBase::connection() { return parent_.connection(); } Event::Dispatcher& ActiveStreamFilterBase::dispatcher() { return parent_.dispatcher_; } StreamInfo::StreamInfo& ActiveStreamFilterBase::streamInfo() { return parent_.stream_info_; } Tracing::Span& ActiveStreamFilterBase::activeSpan() { return parent_.filter_manager_callbacks_.activeSpan(); } const ScopeTrackedObject& ActiveStreamFilterBase::scope() { return parent_.filter_manager_callbacks_.scope(); } void ActiveStreamFilterBase::restoreContextOnContinue( ScopeTrackedObjectStack& tracked_object_stack) { parent_.contextOnContinue(tracked_object_stack); } Tracing::Config& ActiveStreamFilterBase::tracingConfig() { return parent_.filter_manager_callbacks_.tracingConfig(); } Upstream::ClusterInfoConstSharedPtr ActiveStreamFilterBase::clusterInfo() { return parent_.filter_manager_callbacks_.clusterInfo(); } Router::RouteConstSharedPtr ActiveStreamFilterBase::route() { return route(nullptr); } Router::RouteConstSharedPtr ActiveStreamFilterBase::route(const Router::RouteCallback& cb) { return parent_.filter_manager_callbacks_.route(cb); } void ActiveStreamFilterBase::setRoute(Router::RouteConstSharedPtr route) { parent_.filter_manager_callbacks_.setRoute(std::move(route)); } void ActiveStreamFilterBase::clearRouteCache() { parent_.filter_manager_callbacks_.clearRouteCache(); } void ActiveStreamFilterBase::resetIdleTimer() { parent_.filter_manager_callbacks_.resetIdleTimer(); } void FilterMatchState::evaluateMatchTreeWithNewData(MatchDataUpdateFunc update_func) { if (match_tree_evaluated_ || !matching_data_) { return; } update_func(*matching_data_); const auto match_result = Matcher::evaluateMatch<HttpMatchingData>(*match_tree_, *matching_data_); match_tree_evaluated_ = match_result.match_state_ == Matcher::MatchState::MatchComplete; if (match_tree_evaluated_ && match_result.result_) { const auto result = match_result.result_(); if (SkipAction().typeUrl() == result->typeUrl()) { skip_filter_ = true; } else { filter_->onMatchCallback(*result); } } } bool ActiveStreamDecoderFilter::canContinue() { // It is possible for the connection manager to respond directly to a request even while // a filter is trying to continue. If a response has already happened, we should not // continue to further filters. A concrete example of this is a filter buffering data, the // last data frame comes in and the filter continues, but the final buffering takes the stream // over the high watermark such that a 413 is returned. return !parent_.state_.local_complete_; } bool ActiveStreamEncoderFilter::canContinue() { // As with ActiveStreamDecoderFilter::canContinue() make sure we do not // continue if a local reply has been sent. return !parent_.state_.remote_encode_complete_; } Buffer::InstancePtr ActiveStreamDecoderFilter::createBuffer() { auto buffer = dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->requestDataDrained(); }, [this]() -> void { this->requestDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return buffer; } Buffer::InstancePtr& ActiveStreamDecoderFilter::bufferedData() { return parent_.buffered_request_data_; } bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_decode_complete_; } void ActiveStreamDecoderFilter::doHeaders(bool end_stream) { parent_.decodeHeaders(this, *parent_.filter_manager_callbacks_.requestHeaders(), end_stream); } void ActiveStreamDecoderFilter::doData(bool end_stream) { parent_.decodeData(this, *parent_.buffered_request_data_, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamDecoderFilter::doTrailers() { parent_.decodeTrailers(this, *parent_.filter_manager_callbacks_.requestTrailers()); } bool ActiveStreamDecoderFilter::hasTrailers() { return parent_.filter_manager_callbacks_.requestTrailers().has_value(); } void ActiveStreamDecoderFilter::drainSavedRequestMetadata() { ASSERT(saved_request_metadata_ != nullptr); for (auto& metadata_map : *getSavedRequestMetadata()) { parent_.decodeMetadata(this, *metadata_map); } getSavedRequestMetadata()->clear(); } void ActiveStreamDecoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. const bool saved_state = iterate_from_current_filter_; iterate_from_current_filter_ = true; // If decodeHeaders() returns StopAllIteration, we should skip draining metadata, and wait // for doMetadata() to drain the metadata after iteration continues. if (!stoppedAll() && saved_request_metadata_ != nullptr && !getSavedRequestMetadata()->empty()) { drainSavedRequestMetadata(); } // Restores the original value of iterate_from_current_filter_. iterate_from_current_filter_ = saved_state; } RequestTrailerMap& ActiveStreamDecoderFilter::addDecodedTrailers() { return parent_.addDecodedTrailers(); } void ActiveStreamDecoderFilter::addDecodedData(Buffer::Instance& data, bool streaming) { parent_.addDecodedData(*this, data, streaming); } MetadataMapVector& ActiveStreamDecoderFilter::addDecodedMetadata() { return parent_.addDecodedMetadata(); } void ActiveStreamDecoderFilter::injectDecodedDataToFilterChain(Buffer::Instance& data, bool end_stream) { if (!headers_continued_) { headers_continued_ = true; doHeaders(false); } parent_.decodeData(this, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamDecoderFilter::continueDecoding() { commonContinue(); } const Buffer::Instance* ActiveStreamDecoderFilter::decodingBuffer() { return parent_.buffered_request_data_.get(); } void ActiveStreamDecoderFilter::modifyDecodingBuffer( std::function<void(Buffer::Instance&)> callback) { ASSERT(parent_.state_.latest_data_decoding_filter_ == this); callback(*parent_.buffered_request_data_.get()); } void ActiveStreamDecoderFilter::sendLocalReply( Code code, absl::string_view body, std::function<void(ResponseHeaderMap& headers)> modify_headers, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { parent_.sendLocalReply(code, body, modify_headers, grpc_status, details); } void ActiveStreamDecoderFilter::encode1xxHeaders(ResponseHeaderMapPtr&& headers) { // If Envoy is not configured to proxy 100-Continue responses, swallow the 100 Continue // here. This avoids the potential situation where Envoy strips Expect: 100-Continue and sends a // 100-Continue, then proxies a duplicate 100 Continue from upstream. if (parent_.proxy_100_continue_) { parent_.filter_manager_callbacks_.setInformationalHeaders(std::move(headers)); parent_.encode1xxHeaders(nullptr, *parent_.filter_manager_callbacks_.informationalHeaders()); } } ResponseHeaderMapOptRef ActiveStreamDecoderFilter::informationalHeaders() const { return parent_.filter_manager_callbacks_.informationalHeaders(); } void ActiveStreamDecoderFilter::encodeHeaders(ResponseHeaderMapPtr&& headers, bool end_stream, absl::string_view details) { parent_.stream_info_.setResponseCodeDetails(details); parent_.filter_manager_callbacks_.setResponseHeaders(std::move(headers)); parent_.encodeHeaders(nullptr, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream); } ResponseHeaderMapOptRef ActiveStreamDecoderFilter::responseHeaders() const { return parent_.filter_manager_callbacks_.responseHeaders(); } void ActiveStreamDecoderFilter::encodeData(Buffer::Instance& data, bool end_stream) { parent_.encodeData(nullptr, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamDecoderFilter::encodeTrailers(ResponseTrailerMapPtr&& trailers) { parent_.filter_manager_callbacks_.setResponseTrailers(std::move(trailers)); parent_.encodeTrailers(nullptr, *parent_.filter_manager_callbacks_.responseTrailers()); } ResponseTrailerMapOptRef ActiveStreamDecoderFilter::responseTrailers() const { return parent_.filter_manager_callbacks_.responseTrailers(); } void ActiveStreamDecoderFilter::encodeMetadata(MetadataMapPtr&& metadata_map_ptr) { parent_.encodeMetadata(nullptr, std::move(metadata_map_ptr)); } void ActiveStreamDecoderFilter::onDecoderFilterAboveWriteBufferHighWatermark() { parent_.filter_manager_callbacks_.onDecoderFilterAboveWriteBufferHighWatermark(); } void ActiveStreamDecoderFilter::requestDataTooLarge() { ENVOY_STREAM_LOG(debug, "request data too large watermark exceeded", parent_); if (parent_.state_.decoder_filters_streaming_) { onDecoderFilterAboveWriteBufferHighWatermark(); } else { parent_.filter_manager_callbacks_.onRequestDataTooLarge(); sendLocalReply(Code::PayloadTooLarge, CodeUtility::toString(Code::PayloadTooLarge), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().RequestPayloadTooLarge); } } void FilterManager::addStreamDecoderFilterWorker(StreamDecoderFilterSharedPtr filter, FilterMatchStateSharedPtr match_state, bool dual_filter) { ActiveStreamDecoderFilterPtr wrapper( new ActiveStreamDecoderFilter(*this, filter, match_state, dual_filter)); // If we're a dual handling filter, have the encoding wrapper be the only thing registering itself // as the handling filter. if (match_state) { match_state->filter_ = filter.get(); } filter->setDecoderFilterCallbacks(*wrapper); // Note: configured decoder filters are appended to decoder_filters_. // This means that if filters are configured in the following order (assume all three filters are // both decoder/encoder filters): // http_filters: // - A // - B // - C // The decoder filter chain will iterate through filters A, B, C. LinkedList::moveIntoListBack(std::move(wrapper), decoder_filters_); } void FilterManager::addStreamEncoderFilterWorker(StreamEncoderFilterSharedPtr filter, FilterMatchStateSharedPtr match_state, bool dual_filter) { ActiveStreamEncoderFilterPtr wrapper( new ActiveStreamEncoderFilter(*this, filter, match_state, dual_filter)); if (match_state) { match_state->filter_ = filter.get(); } filter->setEncoderFilterCallbacks(*wrapper); // Note: configured encoder filters are prepended to encoder_filters_. // This means that if filters are configured in the following order (assume all three filters are // both decoder/encoder filters): // http_filters: // - A // - B // - C // The encoder filter chain will iterate through filters C, B, A. LinkedList::moveIntoList(std::move(wrapper), encoder_filters_); } void FilterManager::addAccessLogHandler(AccessLog::InstanceSharedPtr handler) { access_log_handlers_.push_back(handler); } void FilterManager::maybeContinueDecoding( const std::list<ActiveStreamDecoderFilterPtr>::iterator& continue_data_entry) { if (continue_data_entry != decoder_filters_.end()) { // We use the continueDecoding() code since it will correctly handle not calling // decodeHeaders() again. Fake setting StopSingleIteration since the continueDecoding() code // expects it. ASSERT(buffered_request_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueDecoding(); } } void FilterManager::decodeHeaders(ActiveStreamDecoderFilter* filter, RequestHeaderMap& headers, bool end_stream) { // Headers filter iteration should always start with the next filter if available. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::AlwaysStartFromNext); std::list<ActiveStreamDecoderFilterPtr>::iterator continue_data_entry = decoder_filters_.end(); for (; entry != decoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&](auto& matching_data) { matching_data.onRequestHeaders(headers); }); if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeHeaders)); state_.filter_call_state_ |= FilterCallState::DecodeHeaders; (*entry)->end_stream_ = (end_stream && continue_data_entry == decoder_filters_.end()); FilterHeadersStatus status = (*entry)->decodeHeaders(headers, (*entry)->end_stream_); if (state_.decoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "decodeHeaders filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterHeadersStatus::StopIteration; } ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "decodeHeaders when end_stream is already false"); state_.filter_call_state_ &= ~FilterCallState::DecodeHeaders; ENVOY_STREAM_LOG(trace, "decode headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); (*entry)->decode_headers_called_ = true; const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); ENVOY_BUG(!continue_iteration || !state_.local_complete_, "Filter did not return StopAll or StopIteration after sending a local reply."); // If this filter ended the stream, decodeComplete() should be called for it. if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } // Skip processing metadata after sending local reply if (state_.local_complete_ && std::next(entry) != decoder_filters_.end()) { maybeContinueDecoding(continue_data_entry); return; } const bool new_metadata_added = processNewlyAddedMetadata(); // If end_stream is set in headers, and a filter adds new metadata, we need to delay end_stream // in headers by inserting an empty data frame with end_stream set. The empty data frame is sent // after the new metadata. if ((*entry)->end_stream_ && new_metadata_added && !buffered_request_data_) { Buffer::OwnedImpl empty_data(""); ENVOY_STREAM_LOG( trace, "inserting an empty data frame for end_stream due metadata being added.", *this); // Metadata frame doesn't carry end of stream bit. We need an empty data frame to end the // stream. addDecodedData(*((*entry).get()), empty_data, true); } if (!continue_iteration && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added body. maybeContinueDecoding(continue_data_entry); return; } // Here we handle the case where we have a header only request, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. if (end_stream && buffered_request_data_ && continue_data_entry == decoder_filters_.end()) { continue_data_entry = entry; } } maybeContinueDecoding(continue_data_entry); if (end_stream) { disarmRequestTimeout(); } } void FilterManager::decodeData(ActiveStreamDecoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { ScopeTrackerScopeState scope(&*this, dispatcher_); filter_manager_callbacks_.resetIdleTimer(); const bool fix_added_trailers = Runtime::runtimeFeatureEnabled("envoy.reloadable_features.fix_added_trailers"); // If a response is complete or a reset has been sent, filters do not care about further body // data. Just drop it. if (state_.local_complete_) { return; } auto trailers_added_entry = decoder_filters_.end(); const bool trailers_exists_at_start = filter_manager_callbacks_.requestTrailers().has_value(); // Filter iteration may start at the current filter. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, filter_iteration_start_state); for (; entry != decoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame types, return now. if (handleDataIfStopAll(**entry, data, state_.decoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // // In following case, ActiveStreamFilterBase::commonContinue() could be called recursively and // its doData() is called with wrong data. // // There are 3 decode filters and "wrapper" refers to ActiveStreamFilter object. // // filter0->decodeHeaders(_, true) // return STOP // filter0->continueDecoding() // wrapper0->commonContinue() // wrapper0->decodeHeaders(_, _, true) // filter1->decodeHeaders(_, true) // filter1->addDecodeData() // return CONTINUE // filter2->decodeHeaders(_, false) // return CONTINUE // wrapper1->commonContinue() // Detects data is added. // wrapper1->doData() // wrapper1->decodeData() // filter2->decodeData(_, true) // return CONTINUE // wrapper0->doData() // This should not be called // wrapper0->decodeData() // filter1->decodeData(_, true) // It will cause assertions. // // One way to solve this problem is to mark end_stream_ for each filter. // If a filter is already marked as end_stream_ when decodeData() is called, bails out the // whole function. If just skip the filter, the codes after the loop will be called with // wrong data. For encodeData, the response_encoder->encode() will be called. if ((*entry)->end_stream_) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeData)); // We check the request_trailers_ pointer here in case addDecodedTrailers // is called in decodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. if (end_stream) { state_.filter_call_state_ |= FilterCallState::LastDataFrame; } recordLatestDataFilter(entry, state_.latest_data_decoding_filter_, decoder_filters_); state_.filter_call_state_ |= FilterCallState::DecodeData; (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.requestTrailers(); FilterDataStatus status = (*entry)->handle_->decodeData(data, (*entry)->end_stream_); if ((*entry)->end_stream_) { (*entry)->handle_->decodeComplete(); } state_.filter_call_state_ &= ~FilterCallState::DecodeData; if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } ENVOY_STREAM_LOG(trace, "decode data called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (state_.decoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "decodeData filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); return; } processNewlyAddedMetadata(); if (!trailers_exists_at_start && filter_manager_callbacks_.requestTrailers() && trailers_added_entry == decoder_filters_.end()) { if (fix_added_trailers) { end_stream = false; } trailers_added_entry = entry; } if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.decoder_filters_streaming_) && std::next(entry) != decoder_filters_.end()) { // Stop iteration IFF this is not the last filter. If it is the last filter, continue with // processing since we need to handle the case where a terminal filter wants to buffer, but // a previous filter has added trailers. if (fix_added_trailers) { break; } else { return; } } } // If trailers were adding during decodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != decoder_filters_.end()) { decodeTrailers(trailers_added_entry->get(), *filter_manager_callbacks_.requestTrailers()); } if (end_stream) { disarmRequestTimeout(); } } RequestTrailerMap& FilterManager::addDecodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); filter_manager_callbacks_.setRequestTrailers(RequestTrailerMapImpl::create()); return *filter_manager_callbacks_.requestTrailers(); } void FilterManager::addDecodedData(ActiveStreamDecoderFilter& filter, Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::DecodeHeaders) || (state_.filter_call_state_ & FilterCallState::DecodeData) || ((state_.filter_call_state_ & FilterCallState::DecodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. state_.decoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); } else if (state_.filter_call_state_ & FilterCallState::DecodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. decodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { IS_ENVOY_BUG("Invalid request data"); sendLocalReply(Http::Code::BadGateway, "Filter error", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().FilterAddedInvalidRequestData); } } MetadataMapVector& FilterManager::addDecodedMetadata() { return *getRequestMetadataMapVector(); } void FilterManager::decodeTrailers(ActiveStreamDecoderFilter* filter, RequestTrailerMap& trailers) { // See decodeData() above for why we check local_complete_ here. if (state_.local_complete_) { return; } // Filter iteration may start at the current filter. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); for (; entry != decoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&](auto& matching_data) { matching_data.onRequestTrailers(trailers); }); if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, return now. if ((*entry)->stoppedAll()) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::DecodeTrailers)); state_.filter_call_state_ |= FilterCallState::DecodeTrailers; FilterTrailersStatus status = (*entry)->handle_->decodeTrailers(trailers); (*entry)->handle_->decodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::DecodeTrailers; ENVOY_STREAM_LOG(trace, "decode trailers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (state_.decoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "decodeTrailers filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterTrailersStatus::StopIteration; } processNewlyAddedMetadata(); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { return; } } disarmRequestTimeout(); } void FilterManager::decodeMetadata(ActiveStreamDecoderFilter* filter, MetadataMap& metadata_map) { // Filter iteration may start at the current filter. std::list<ActiveStreamDecoderFilterPtr>::iterator entry = commonDecodePrefix(filter, FilterIterationStartState::CanStartFromCurrent); for (; entry != decoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, stores metadata and returns. // If the filter pointed by entry hasn't returned from decodeHeaders, stores newly added // metadata in case decodeHeaders returns StopAllIteration. The latter can happen when headers // callbacks generate new metadata. if (!(*entry)->decode_headers_called_ || (*entry)->stoppedAll()) { Http::MetadataMapPtr metadata_map_ptr = std::make_unique<Http::MetadataMap>(metadata_map); (*entry)->getSavedRequestMetadata()->emplace_back(std::move(metadata_map_ptr)); return; } FilterMetadataStatus status = (*entry)->handle_->decodeMetadata(metadata_map); ENVOY_STREAM_LOG(trace, "decode metadata called: filter={} status={}, metadata: {}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status), metadata_map); } } void FilterManager::maybeEndDecode(bool end_stream) { ASSERT(!state_.remote_decode_complete_); state_.remote_decode_complete_ = end_stream; if (end_stream) { stream_info_.downstreamTiming().onLastDownstreamRxByteReceived(dispatcher().timeSource()); ENVOY_STREAM_LOG(debug, "request end stream", *this); } } void FilterManager::disarmRequestTimeout() { filter_manager_callbacks_.disarmRequestTimeout(); } std::list<ActiveStreamEncoderFilterPtr>::iterator FilterManager::commonEncodePrefix(ActiveStreamEncoderFilter* filter, bool end_stream, FilterIterationStartState filter_iteration_start_state) { // Only do base state setting on the initial call. Subsequent calls for filtering do not touch // the base state. if (filter == nullptr) { ASSERT(!state_.local_complete_); state_.local_complete_ = end_stream; return encoder_filters_.begin(); } if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && (*(filter->entry()))->iterate_from_current_filter_) { // The filter iteration has been stopped for all frame types, and now the iteration continues. // The current filter's encoding callback has not be called. Call it now. return filter->entry(); } return std::next(filter->entry()); } std::list<ActiveStreamDecoderFilterPtr>::iterator FilterManager::commonDecodePrefix(ActiveStreamDecoderFilter* filter, FilterIterationStartState filter_iteration_start_state) { if (!filter) { return decoder_filters_.begin(); } if (filter_iteration_start_state == FilterIterationStartState::CanStartFromCurrent && (*(filter->entry()))->iterate_from_current_filter_) { // The filter iteration has been stopped for all frame types, and now the iteration continues. // The current filter's callback function has not been called. Call it now. return filter->entry(); } return std::next(filter->entry()); } void FilterManager::onLocalReply(StreamFilterBase::LocalReplyData& data) { state_.under_on_local_reply_ = true; filter_manager_callbacks_.onLocalReply(data.code_); for (auto entry : filters_) { if (entry->onLocalReply(data) == LocalErrorStatus::ContinueAndResetStream) { data.reset_imminent_ = true; } } state_.under_on_local_reply_ = false; } void FilterManager::sendLocalReply( Code code, absl::string_view body, const std::function<void(ResponseHeaderMap& headers)>& modify_headers, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { ASSERT(!state_.under_on_local_reply_); const bool is_head_request = state_.is_head_request_; const bool is_grpc_request = state_.is_grpc_request_; // Stop filter chain iteration if local reply was sent while filter decoding or encoding callbacks // are running. if (state_.filter_call_state_ & (FilterCallState::DecodeHeaders | FilterCallState::DecodeData | FilterCallState::DecodeTrailers)) { state_.decoder_filter_chain_aborted_ = true; } else if (state_.filter_call_state_ & (FilterCallState::EncodeHeaders | FilterCallState::EncodeData | FilterCallState::EncodeTrailers)) { state_.encoder_filter_chain_aborted_ = true; } stream_info_.setResponseCodeDetails(details); StreamFilterBase::LocalReplyData data{code, details, false}; FilterManager::onLocalReply(data); if (data.reset_imminent_) { ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. onLocalReply requested reset.", *this, details); filter_manager_callbacks_.resetStream(); return; } if (!filter_manager_callbacks_.responseHeaders().has_value()) { // If the response has not started at all, send the response through the filter chain. sendLocalReplyViaFilterChain(is_grpc_request, code, body, modify_headers, is_head_request, grpc_status, details); } else if (!state_.non_100_response_headers_encoded_) { ENVOY_STREAM_LOG(debug, "Sending local reply with details {} directly to the encoder", *this, details); // In this case, at least the header and possibly the body has started // processing through the filter chain, but no non-informational headers // have been sent downstream. To ensure that filters don't get their // state machine screwed up, bypass the filter chain and send the local // reply directly to the codec. // sendDirectLocalReply(code, body, modify_headers, state_.is_head_request_, grpc_status); } else { // If we land in this branch, response headers have already been sent to the client. // All we can do at this point is reset the stream. ENVOY_STREAM_LOG(debug, "Resetting stream due to {}. Prior headers have already been sent", *this, details); // TODO(snowp): This means we increment the tx_reset stat which we weren't doing previously. // Intended? filter_manager_callbacks_.resetStream(); } } void FilterManager::sendLocalReplyViaFilterChain( bool is_grpc_request, Code code, absl::string_view body, const std::function<void(ResponseHeaderMap& headers)>& modify_headers, bool is_head_request, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { ENVOY_STREAM_LOG(debug, "Sending local reply with details {}", *this, details); ASSERT(!filter_manager_callbacks_.responseHeaders().has_value()); // For early error handling, do a best-effort attempt to create a filter chain // to ensure access logging. If the filter chain already exists this will be // a no-op. createFilterChain(); Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ [this, modify_headers](ResponseHeaderMap& headers) -> void { if (streamInfo().route() && streamInfo().route()->routeEntry()) { streamInfo().route()->routeEntry()->finalizeResponseHeaders(headers, streamInfo()); } if (modify_headers) { modify_headers(headers); } }, [this](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { // TODO(snowp): This &get() business isn't nice, rework LocalReply and others to accept // opt refs. local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().ptr(), response_headers, stream_info_, code, body, content_type); }, [this, modify_headers](ResponseHeaderMapPtr&& headers, bool end_stream) -> void { filter_manager_callbacks_.setResponseHeaders(std::move(headers)); // TODO: Start encoding from the last decoder filter that saw the // request instead. encodeHeaders(nullptr, filter_manager_callbacks_.responseHeaders().ref(), end_stream); }, [this](Buffer::Instance& data, bool end_stream) -> void { // TODO: Start encoding from the last decoder filter that saw the // request instead. encodeData(nullptr, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); }}, Utility::LocalReplyData{is_grpc_request, code, body, grpc_status, is_head_request}); } void FilterManager::sendDirectLocalReply( Code code, absl::string_view body, const std::function<void(ResponseHeaderMap&)>& modify_headers, bool is_head_request, const absl::optional<Grpc::Status::GrpcStatus> grpc_status) { // Make sure we won't end up with nested watermark calls from the body buffer. state_.encoder_filters_streaming_ = true; Http::Utility::sendLocalReply( state_.destroyed_, Utility::EncodeFunctions{ [this, modify_headers](ResponseHeaderMap& headers) -> void { if (streamInfo().route() && streamInfo().route()->routeEntry()) { streamInfo().route()->routeEntry()->finalizeResponseHeaders(headers, streamInfo()); } if (modify_headers) { modify_headers(headers); } }, [&](ResponseHeaderMap& response_headers, Code& code, std::string& body, absl::string_view& content_type) -> void { local_reply_.rewrite(filter_manager_callbacks_.requestHeaders().ptr(), response_headers, stream_info_, code, body, content_type); }, [&](ResponseHeaderMapPtr&& response_headers, bool end_stream) -> void { // Move the response headers into the FilterManager to make sure they're visible to // access logs. filter_manager_callbacks_.setResponseHeaders(std::move(response_headers)); state_.non_100_response_headers_encoded_ = true; filter_manager_callbacks_.encodeHeaders(*filter_manager_callbacks_.responseHeaders(), end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(end_stream); }, [&](Buffer::Instance& data, bool end_stream) -> void { filter_manager_callbacks_.encodeData(data, end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(end_stream); }}, Utility::LocalReplyData{state_.is_grpc_request_, code, body, grpc_status, is_head_request}); } void FilterManager::encode1xxHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers) { filter_manager_callbacks_.resetIdleTimer(); ASSERT(proxy_100_continue_); // The caller must guarantee that encode1xxHeaders() is invoked at most once. ASSERT(!state_.has_1xx_headers_ || filter != nullptr); // Make sure commonContinue continues encode1xxHeaders. state_.has_1xx_headers_ = true; // Similar to the block in encodeHeaders, run encode1xxHeaders on each // filter. This is simpler than that case because 100 continue implies no // end-stream, and because there are normal headers coming there's no need for // complex continuation logic. // 100-continue filter iteration should always start with the next filter if available. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::AlwaysStartFromNext); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::Encode1xxHeaders)); state_.filter_call_state_ |= FilterCallState::Encode1xxHeaders; FilterHeadersStatus status = (*entry)->handle_->encode1xxHeaders(headers); state_.filter_call_state_ &= ~FilterCallState::Encode1xxHeaders; ENVOY_STREAM_LOG(trace, "encode 1xx continue headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!(*entry)->commonHandleAfter1xxHeadersCallback(status)) { return; } } filter_manager_callbacks_.encode1xxHeaders(headers); } void FilterManager::maybeContinueEncoding( const std::list<ActiveStreamEncoderFilterPtr>::iterator& continue_data_entry) { if (continue_data_entry != encoder_filters_.end()) { // We use the continueEncoding() code since it will correctly handle not calling // encodeHeaders() again. Fake setting StopSingleIteration since the continueEncoding() code // expects it. ASSERT(buffered_response_data_); (*continue_data_entry)->iteration_state_ = ActiveStreamFilterBase::IterationState::StopSingleIteration; (*continue_data_entry)->continueEncoding(); } } void FilterManager::encodeHeaders(ActiveStreamEncoderFilter* filter, ResponseHeaderMap& headers, bool end_stream) { // See encodeHeaders() comments in include/envoy/http/filter.h for why the 1xx precondition holds. ASSERT(!CodeUtility::is1xx(Utility::getResponseStatus(headers)) || Utility::getResponseStatus(headers) == enumToInt(Http::Code::SwitchingProtocols)); filter_manager_callbacks_.resetIdleTimer(); disarmRequestTimeout(); // Headers filter iteration should always start with the next filter if available. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, end_stream, FilterIterationStartState::AlwaysStartFromNext); std::list<ActiveStreamEncoderFilterPtr>::iterator continue_data_entry = encoder_filters_.end(); for (; entry != encoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&headers](auto& matching_data) { matching_data.onResponseHeaders(headers); }); if ((*entry)->skipFilter()) { continue; } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeHeaders)); state_.filter_call_state_ |= FilterCallState::EncodeHeaders; (*entry)->end_stream_ = (end_stream && continue_data_entry == encoder_filters_.end()); FilterHeadersStatus status = (*entry)->handle_->encodeHeaders(headers, (*entry)->end_stream_); if (state_.encoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "encodeHeaders filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterHeadersStatus::StopIteration; } ASSERT(!(status == FilterHeadersStatus::ContinueAndDontEndStream && !(*entry)->end_stream_), "Filters should not return FilterHeadersStatus::ContinueAndDontEndStream from " "encodeHeaders when end_stream is already false"); state_.filter_call_state_ &= ~FilterCallState::EncodeHeaders; ENVOY_STREAM_LOG(trace, "encode headers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); (*entry)->encode_headers_called_ = true; const auto continue_iteration = (*entry)->commonHandleAfterHeadersCallback(status, end_stream); // If this filter ended the stream, encodeComplete() should be called for it. if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } if (!continue_iteration) { if (!(*entry)->end_stream_) { maybeContinueEncoding(continue_data_entry); } return; } // Here we handle the case where we have a header only response, but a filter adds a body // to it. We need to not raise end_stream = true to further filters during inline iteration. if (end_stream && buffered_response_data_ && continue_data_entry == encoder_filters_.end()) { continue_data_entry = entry; } } // Check if the filter chain above did not remove critical headers or set malformed header values. // We could do this at the codec in order to prevent other places than the filter chain from // removing critical headers, but it will come with the implementation complexity. // See the previous attempt (#15658) for detail, and for now we choose to protect only against // filter chains. const auto status = HeaderUtility::checkRequiredResponseHeaders(headers); if (!status.ok()) { // If the check failed, then we reply with BadGateway, and stop the further processing. sendLocalReply( Http::Code::BadGateway, status.message(), nullptr, absl::nullopt, absl::StrCat(StreamInfo::ResponseCodeDetails::get().FilterRemovedRequiredResponseHeaders, "{", StringUtil::replaceAllEmptySpace(status.message()), "}")); return; } const bool modified_end_stream = (end_stream && continue_data_entry == encoder_filters_.end()); state_.non_100_response_headers_encoded_ = true; filter_manager_callbacks_.encodeHeaders(headers, modified_end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(modified_end_stream); if (!modified_end_stream) { maybeContinueEncoding(continue_data_entry); } } void FilterManager::encodeMetadata(ActiveStreamEncoderFilter* filter, MetadataMapPtr&& metadata_map_ptr) { filter_manager_callbacks_.resetIdleTimer(); std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, false, FilterIterationStartState::CanStartFromCurrent); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, stores metadata and returns. // If the filter pointed by entry hasn't returned from encodeHeaders, stores newly added // metadata in case encodeHeaders returns StopAllIteration. The latter can happen when headers // callbacks generate new metadata. if (!(*entry)->encode_headers_called_ || (*entry)->stoppedAll()) { (*entry)->getSavedResponseMetadata()->emplace_back(std::move(metadata_map_ptr)); return; } FilterMetadataStatus status = (*entry)->handle_->encodeMetadata(*metadata_map_ptr); ENVOY_STREAM_LOG(trace, "encode metadata called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); } // TODO(soya3129): update stats with metadata. // Now encode metadata via the codec. if (!metadata_map_ptr->empty()) { MetadataMapVector metadata_map_vector; metadata_map_vector.emplace_back(std::move(metadata_map_ptr)); filter_manager_callbacks_.encodeMetadata(metadata_map_vector); } } ResponseTrailerMap& FilterManager::addEncodedTrailers() { // Trailers can only be added during the last data frame (i.e. end_stream = true). ASSERT(state_.filter_call_state_ & FilterCallState::LastDataFrame); // Trailers can only be added once. ASSERT(!filter_manager_callbacks_.responseTrailers()); filter_manager_callbacks_.setResponseTrailers(ResponseTrailerMapImpl::create()); return *filter_manager_callbacks_.responseTrailers(); } void FilterManager::addEncodedData(ActiveStreamEncoderFilter& filter, Buffer::Instance& data, bool streaming) { if (state_.filter_call_state_ == 0 || (state_.filter_call_state_ & FilterCallState::EncodeHeaders) || (state_.filter_call_state_ & FilterCallState::EncodeData) || ((state_.filter_call_state_ & FilterCallState::EncodeTrailers) && !filter.canIterate())) { // Make sure if this triggers watermarks, the correct action is taken. state_.encoder_filters_streaming_ = streaming; // If no call is happening or we are in the decode headers/data callback, buffer the data. // Inline processing happens in the decodeHeaders() callback if necessary. filter.commonHandleBufferData(data); } else if (state_.filter_call_state_ & FilterCallState::EncodeTrailers) { // In this case we need to inline dispatch the data to further filters. If those filters // choose to buffer/stop iteration that's fine. encodeData(&filter, data, false, FilterIterationStartState::AlwaysStartFromNext); } else { IS_ENVOY_BUG("Invalid response data"); sendLocalReply(Http::Code::BadGateway, "Filter error", nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().FilterAddedInvalidResponseData); } } void FilterManager::encodeData(ActiveStreamEncoderFilter* filter, Buffer::Instance& data, bool end_stream, FilterIterationStartState filter_iteration_start_state) { filter_manager_callbacks_.resetIdleTimer(); // Filter iteration may start at the current filter. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, end_stream, filter_iteration_start_state); auto trailers_added_entry = encoder_filters_.end(); const bool trailers_exists_at_start = filter_manager_callbacks_.responseTrailers().has_value(); for (; entry != encoder_filters_.end(); entry++) { if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, return now. if (handleDataIfStopAll(**entry, data, state_.encoder_filters_streaming_)) { return; } // If end_stream_ is marked for a filter, the data is not for this filter and filters after. // For details, please see the comment in the ActiveStream::decodeData() function. if ((*entry)->end_stream_) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeData)); // We check the response_trailers_ pointer here in case addEncodedTrailers // is called in encodeData during a previous filter invocation, at which point we communicate to // the current and future filters that the stream has not yet ended. state_.filter_call_state_ |= FilterCallState::EncodeData; if (end_stream) { state_.filter_call_state_ |= FilterCallState::LastDataFrame; } recordLatestDataFilter(entry, state_.latest_data_encoding_filter_, encoder_filters_); (*entry)->end_stream_ = end_stream && !filter_manager_callbacks_.responseTrailers(); FilterDataStatus status = (*entry)->handle_->encodeData(data, (*entry)->end_stream_); if (state_.encoder_filter_chain_aborted_) { ENVOY_STREAM_LOG(trace, "encodeData filter iteration aborted due to local reply: filter={}", *this, static_cast<const void*>((*entry).get())); status = FilterDataStatus::StopIterationNoBuffer; } if ((*entry)->end_stream_) { (*entry)->handle_->encodeComplete(); } state_.filter_call_state_ &= ~FilterCallState::EncodeData; if (end_stream) { state_.filter_call_state_ &= ~FilterCallState::LastDataFrame; } ENVOY_STREAM_LOG(trace, "encode data called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!trailers_exists_at_start && filter_manager_callbacks_.responseTrailers() && trailers_added_entry == encoder_filters_.end()) { trailers_added_entry = entry; } if (!(*entry)->commonHandleAfterDataCallback(status, data, state_.encoder_filters_streaming_)) { return; } } const bool modified_end_stream = end_stream && trailers_added_entry == encoder_filters_.end(); filter_manager_callbacks_.encodeData(data, modified_end_stream); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(modified_end_stream); // If trailers were adding during encodeData we need to trigger decodeTrailers in order // to allow filters to process the trailers. if (trailers_added_entry != encoder_filters_.end()) { encodeTrailers(trailers_added_entry->get(), *filter_manager_callbacks_.responseTrailers()); } } void FilterManager::encodeTrailers(ActiveStreamEncoderFilter* filter, ResponseTrailerMap& trailers) { filter_manager_callbacks_.resetIdleTimer(); // Filter iteration may start at the current filter. std::list<ActiveStreamEncoderFilterPtr>::iterator entry = commonEncodePrefix(filter, true, FilterIterationStartState::CanStartFromCurrent); for (; entry != encoder_filters_.end(); entry++) { (*entry)->maybeEvaluateMatchTreeWithNewData( [&](auto& matching_data) { matching_data.onResponseTrailers(trailers); }); if ((*entry)->skipFilter()) { continue; } // If the filter pointed by entry has stopped for all frame type, return now. if ((*entry)->stoppedAll()) { return; } ASSERT(!(state_.filter_call_state_ & FilterCallState::EncodeTrailers)); state_.filter_call_state_ |= FilterCallState::EncodeTrailers; FilterTrailersStatus status = (*entry)->handle_->encodeTrailers(trailers); (*entry)->handle_->encodeComplete(); (*entry)->end_stream_ = true; state_.filter_call_state_ &= ~FilterCallState::EncodeTrailers; ENVOY_STREAM_LOG(trace, "encode trailers called: filter={} status={}", *this, static_cast<const void*>((*entry).get()), static_cast<uint64_t>(status)); if (!(*entry)->commonHandleAfterTrailersCallback(status)) { return; } } filter_manager_callbacks_.encodeTrailers(trailers); if (state_.saw_downstream_reset_) { return; } maybeEndEncode(true); } void FilterManager::maybeEndEncode(bool end_stream) { if (end_stream) { ASSERT(!state_.remote_encode_complete_); state_.remote_encode_complete_ = true; filter_manager_callbacks_.endStream(); } } bool FilterManager::processNewlyAddedMetadata() { if (request_metadata_map_vector_ == nullptr) { return false; } for (const auto& metadata_map : *getRequestMetadataMapVector()) { decodeMetadata(nullptr, *metadata_map); } getRequestMetadataMapVector()->clear(); return true; } bool FilterManager::handleDataIfStopAll(ActiveStreamFilterBase& filter, Buffer::Instance& data, bool& filter_streaming) { if (filter.stoppedAll()) { ASSERT(!filter.canIterate()); filter_streaming = filter.iteration_state_ == ActiveStreamFilterBase::IterationState::StopAllWatermark; filter.commonHandleBufferData(data); return true; } return false; } void FilterManager::callHighWatermarkCallbacks() { ++high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { watermark_callbacks->onAboveWriteBufferHighWatermark(); } } void FilterManager::callLowWatermarkCallbacks() { ASSERT(high_watermark_count_ > 0); --high_watermark_count_; for (auto watermark_callbacks : watermark_callbacks_) { watermark_callbacks->onBelowWriteBufferLowWatermark(); } } void FilterManager::setBufferLimit(uint32_t new_limit) { ENVOY_STREAM_LOG(debug, "setting buffer limit to {}", *this, new_limit); buffer_limit_ = new_limit; if (buffered_request_data_) { buffered_request_data_->setWatermarks(buffer_limit_); } if (buffered_response_data_) { buffered_response_data_->setWatermarks(buffer_limit_); } } void FilterManager::contextOnContinue(ScopeTrackedObjectStack& tracked_object_stack) { tracked_object_stack.add(connection_); tracked_object_stack.add(filter_manager_callbacks_.scope()); } bool FilterManager::createFilterChain() { if (state_.created_filter_chain_) { return false; } bool upgrade_rejected = false; const HeaderEntry* upgrade = nullptr; if (filter_manager_callbacks_.requestHeaders()) { upgrade = filter_manager_callbacks_.requestHeaders()->Upgrade(); // Treat CONNECT requests as a special upgrade case. if (!upgrade && HeaderUtility::isConnect(*filter_manager_callbacks_.requestHeaders())) { upgrade = filter_manager_callbacks_.requestHeaders()->Method(); } } state_.created_filter_chain_ = true; if (upgrade != nullptr) { const Router::RouteEntry::UpgradeMap* upgrade_map = filter_manager_callbacks_.upgradeMap(); if (filter_chain_factory_.createUpgradeFilterChain(upgrade->value().getStringView(), upgrade_map, *this)) { filter_manager_callbacks_.upgradeFilterChainCreated(); return true; } else { upgrade_rejected = true; // Fall through to the default filter chain. The function calling this // will send a local reply indicating that the upgrade failed. } } filter_chain_factory_.createFilterChain(*this); return !upgrade_rejected; } void ActiveStreamDecoderFilter::requestDataDrained() { // If this is called it means the call to requestDataTooLarge() was a // streaming call, or a 413 would have been sent. onDecoderFilterBelowWriteBufferLowWatermark(); } void ActiveStreamDecoderFilter::onDecoderFilterBelowWriteBufferLowWatermark() { parent_.filter_manager_callbacks_.onDecoderFilterBelowWriteBufferLowWatermark(); } void ActiveStreamDecoderFilter::addDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { // This is called exactly once per upstream-stream, by the router filter. Therefore, we // expect the same callbacks to not be registered twice. ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), &watermark_callbacks) == parent_.watermark_callbacks_.end()); parent_.watermark_callbacks_.emplace(parent_.watermark_callbacks_.end(), &watermark_callbacks); for (uint32_t i = 0; i < parent_.high_watermark_count_; ++i) { watermark_callbacks.onAboveWriteBufferHighWatermark(); } } void ActiveStreamDecoderFilter::removeDownstreamWatermarkCallbacks( DownstreamWatermarkCallbacks& watermark_callbacks) { ASSERT(std::find(parent_.watermark_callbacks_.begin(), parent_.watermark_callbacks_.end(), &watermark_callbacks) != parent_.watermark_callbacks_.end()); parent_.watermark_callbacks_.remove(&watermark_callbacks); } void ActiveStreamDecoderFilter::setDecoderBufferLimit(uint32_t limit) { parent_.setBufferLimit(limit); } uint32_t ActiveStreamDecoderFilter::decoderBufferLimit() { return parent_.buffer_limit_; } bool ActiveStreamDecoderFilter::recreateStream(const ResponseHeaderMap* headers) { // Because the filter's and the HCM view of if the stream has a body and if // the stream is complete may differ, re-check bytesReceived() to make sure // there was no body from the HCM's point of view. if (!complete()) { return false; } parent_.stream_info_.setResponseCodeDetails( StreamInfo::ResponseCodeDetails::get().InternalRedirect); if (headers != nullptr) { // The call to setResponseHeaders is needed to ensure that the headers are properly logged in // access logs before the stream is destroyed. Since the function expects a ResponseHeaderPtr&&, // ownership of the headers must be passed. This cannot happen earlier in the flow (such as in // the call to setupRedirect) because at that point it is still possible for the headers to be // used in a different logical branch. We work around this by creating a copy and passing // ownership of the copy instead. ResponseHeaderMapPtr headers_copy = createHeaderMap<ResponseHeaderMapImpl>(*headers); parent_.filter_manager_callbacks_.setResponseHeaders(std::move(headers_copy)); parent_.filter_manager_callbacks_.chargeStats(*headers); } parent_.filter_manager_callbacks_.recreateStream(parent_.stream_info_.filter_state_); return true; } void ActiveStreamDecoderFilter::addUpstreamSocketOptions( const Network::Socket::OptionsSharedPtr& options) { Network::Socket::appendOptions(parent_.upstream_options_, options); } Network::Socket::OptionsSharedPtr ActiveStreamDecoderFilter::getUpstreamSocketOptions() const { return parent_.upstream_options_; } void ActiveStreamDecoderFilter::requestRouteConfigUpdate( Http::RouteConfigUpdatedCallbackSharedPtr route_config_updated_cb) { parent_.filter_manager_callbacks_.requestRouteConfigUpdate(std::move(route_config_updated_cb)); } absl::optional<Router::ConfigConstSharedPtr> ActiveStreamDecoderFilter::routeConfig() { return parent_.filter_manager_callbacks_.routeConfig(); } Buffer::InstancePtr ActiveStreamEncoderFilter::createBuffer() { auto buffer = dispatcher().getWatermarkFactory().createBuffer( [this]() -> void { this->responseDataDrained(); }, [this]() -> void { this->responseDataTooLarge(); }, []() -> void { /* TODO(adisuissa): Handle overflow watermark */ }); buffer->setWatermarks(parent_.buffer_limit_); return buffer; } Buffer::InstancePtr& ActiveStreamEncoderFilter::bufferedData() { return parent_.buffered_response_data_; } bool ActiveStreamEncoderFilter::complete() { return parent_.state_.local_complete_; } bool ActiveStreamEncoderFilter::has1xxheaders() { return parent_.state_.has_1xx_headers_ && !continued_1xx_headers_; } void ActiveStreamEncoderFilter::do1xxHeaders() { parent_.encode1xxHeaders(this, *parent_.filter_manager_callbacks_.informationalHeaders()); } void ActiveStreamEncoderFilter::doHeaders(bool end_stream) { parent_.encodeHeaders(this, *parent_.filter_manager_callbacks_.responseHeaders(), end_stream); } void ActiveStreamEncoderFilter::doData(bool end_stream) { parent_.encodeData(this, *parent_.buffered_response_data_, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } void ActiveStreamEncoderFilter::drainSavedResponseMetadata() { ASSERT(saved_response_metadata_ != nullptr); for (auto& metadata_map : *getSavedResponseMetadata()) { parent_.encodeMetadata(this, std::move(metadata_map)); } getSavedResponseMetadata()->clear(); } void ActiveStreamEncoderFilter::handleMetadataAfterHeadersCallback() { // If we drain accumulated metadata, the iteration must start with the current filter. const bool saved_state = iterate_from_current_filter_; iterate_from_current_filter_ = true; // If encodeHeaders() returns StopAllIteration, we should skip draining metadata, and wait // for doMetadata() to drain the metadata after iteration continues. if (!stoppedAll() && saved_response_metadata_ != nullptr && !getSavedResponseMetadata()->empty()) { drainSavedResponseMetadata(); } // Restores the original value of iterate_from_current_filter_. iterate_from_current_filter_ = saved_state; } void ActiveStreamEncoderFilter::doTrailers() { parent_.encodeTrailers(this, *parent_.filter_manager_callbacks_.responseTrailers()); } bool ActiveStreamEncoderFilter::hasTrailers() { return parent_.filter_manager_callbacks_.responseTrailers().has_value(); } void ActiveStreamEncoderFilter::addEncodedData(Buffer::Instance& data, bool streaming) { return parent_.addEncodedData(*this, data, streaming); } void ActiveStreamEncoderFilter::injectEncodedDataToFilterChain(Buffer::Instance& data, bool end_stream) { if (!headers_continued_) { headers_continued_ = true; doHeaders(false); } parent_.encodeData(this, data, end_stream, FilterManager::FilterIterationStartState::CanStartFromCurrent); } ResponseTrailerMap& ActiveStreamEncoderFilter::addEncodedTrailers() { return parent_.addEncodedTrailers(); } void ActiveStreamEncoderFilter::addEncodedMetadata(MetadataMapPtr&& metadata_map_ptr) { return parent_.encodeMetadata(this, std::move(metadata_map_ptr)); } void ActiveStreamEncoderFilter::onEncoderFilterAboveWriteBufferHighWatermark() { ENVOY_STREAM_LOG(debug, "Disabling upstream stream due to filter callbacks.", parent_); parent_.callHighWatermarkCallbacks(); } void ActiveStreamEncoderFilter::onEncoderFilterBelowWriteBufferLowWatermark() { ENVOY_STREAM_LOG(debug, "Enabling upstream stream due to filter callbacks.", parent_); parent_.callLowWatermarkCallbacks(); } void ActiveStreamEncoderFilter::setEncoderBufferLimit(uint32_t limit) { parent_.setBufferLimit(limit); } uint32_t ActiveStreamEncoderFilter::encoderBufferLimit() { return parent_.buffer_limit_; } void ActiveStreamEncoderFilter::continueEncoding() { commonContinue(); } const Buffer::Instance* ActiveStreamEncoderFilter::encodingBuffer() { return parent_.buffered_response_data_.get(); } void ActiveStreamEncoderFilter::modifyEncodingBuffer( std::function<void(Buffer::Instance&)> callback) { ASSERT(parent_.state_.latest_data_encoding_filter_ == this); callback(*parent_.buffered_response_data_.get()); } void ActiveStreamEncoderFilter::sendLocalReply( Code code, absl::string_view body, std::function<void(ResponseHeaderMap& headers)> modify_headers, const absl::optional<Grpc::Status::GrpcStatus> grpc_status, absl::string_view details) { parent_.sendLocalReply(code, body, modify_headers, grpc_status, details); } Http1StreamEncoderOptionsOptRef ActiveStreamEncoderFilter::http1StreamEncoderOptions() { // TODO(mattklein123): At some point we might want to actually wrap this interface but for now // we give the filter direct access to the encoder options. return parent_.filter_manager_callbacks_.http1StreamEncoderOptions(); } void ActiveStreamEncoderFilter::responseDataTooLarge() { ENVOY_STREAM_LOG(debug, "response data too large watermark exceeded", parent_); if (parent_.state_.encoder_filters_streaming_) { onEncoderFilterAboveWriteBufferHighWatermark(); } else { parent_.filter_manager_callbacks_.onResponseDataTooLarge(); // In this case, sendLocalReply will either send a response directly to the encoder, or // reset the stream. parent_.sendLocalReply( Http::Code::InternalServerError, CodeUtility::toString(Http::Code::InternalServerError), nullptr, absl::nullopt, StreamInfo::ResponseCodeDetails::get().ResponsePayloadTooLarge); } } void ActiveStreamEncoderFilter::responseDataDrained() { onEncoderFilterBelowWriteBufferLowWatermark(); } void ActiveStreamFilterBase::resetStream() { parent_.filter_manager_callbacks_.resetStream(); } uint64_t ActiveStreamFilterBase::streamId() const { return parent_.streamId(); } Buffer::BufferMemoryAccountSharedPtr ActiveStreamDecoderFilter::account() const { return parent_.account(); } void ActiveStreamDecoderFilter::setUpstreamOverrideHost(absl::string_view host) { parent_.upstream_override_host_.emplace(std::move(host)); } absl::optional<absl::string_view> ActiveStreamDecoderFilter::upstreamOverrideHost() const { return parent_.upstream_override_host_; } } // namespace Http } // namespace Envoy
bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_complete_; }
bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_decode_complete_; }
{'added': [(306, 'bool ActiveStreamEncoderFilter::canContinue() {'), (307, ' // As with ActiveStreamDecoderFilter::canContinue() make sure we do not'), (308, ' // continue if a local reply has been sent.'), (309, ' return !parent_.state_.remote_encode_complete_;'), (310, '}'), (311, ''), (325, 'bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_decode_complete_; }'), (841, ' ASSERT(!state_.remote_decode_complete_);'), (842, ' state_.remote_decode_complete_ = end_stream;'), (1365, ' ASSERT(!state_.remote_encode_complete_);'), (1366, ' state_.remote_encode_complete_ = true;'), (1657, ' ENVOY_STREAM_LOG(debug, "response data too large watermark exceeded", parent_);')], 'deleted': [(319, 'bool ActiveStreamDecoderFilter::complete() { return parent_.state_.remote_complete_; }'), (835, ' ASSERT(!state_.remote_complete_);'), (836, ' state_.remote_complete_ = end_stream;')]}
12
3
1,216
8,866
1
14
1
https://github.com/envoyproxy/envoy
CVE-2021-43825
CWE-416
2,974
rtadv.c
C
rtadv_read
/* Router advertisement * Copyright (C) 2005 6WIND <jean-mickael.guerin@6wind.com> * Copyright (C) 1999 Kunihiro Ishiguro * * This file is part of GNU Zebra. * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * GNU Zebra is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <zebra.h> #include "memory.h" #include "sockopt.h" #include "thread.h" #include "if.h" #include "log.h" #include "prefix.h" #include "linklist.h" #include "command.h" #include "privs.h" #include "vrf.h" #include "zebra/interface.h" #include "zebra/rtadv.h" #include "zebra/debug.h" #include "zebra/rib.h" #include "zebra/zserv.h" extern struct zebra_privs_t zserv_privs; #if defined (HAVE_IPV6) && defined (HAVE_RTADV) #ifdef OPEN_BSD #include <netinet/icmp6.h> #endif /* If RFC2133 definition is used. */ #ifndef IPV6_JOIN_GROUP #define IPV6_JOIN_GROUP IPV6_ADD_MEMBERSHIP #endif #ifndef IPV6_LEAVE_GROUP #define IPV6_LEAVE_GROUP IPV6_DROP_MEMBERSHIP #endif #define ALLNODE "ff02::1" #define ALLROUTER "ff02::2" extern struct zebra_t zebrad; enum rtadv_event {RTADV_START, RTADV_STOP, RTADV_TIMER, RTADV_TIMER_MSEC, RTADV_READ}; static void rtadv_event (struct zebra_vrf *, enum rtadv_event, int); static int if_join_all_router (int, struct interface *); static int if_leave_all_router (int, struct interface *); static int rtadv_recv_packet (int sock, u_char *buf, int buflen, struct sockaddr_in6 *from, ifindex_t *ifindex, int *hoplimit) { int ret; struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_addr dst; char adata[1024]; /* Fill in message and iovec. */ msg.msg_name = (void *) from; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = sizeof adata; iov.iov_base = buf; iov.iov_len = buflen; /* If recvmsg fail return minus value. */ ret = recvmsg (sock, &msg, 0); if (ret < 0) return ret; for (cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) { /* I want interface index which this packet comes from. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_PKTINFO) { struct in6_pktinfo *ptr; ptr = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); *ifindex = ptr->ipi6_ifindex; memcpy(&dst, &ptr->ipi6_addr, sizeof(ptr->ipi6_addr)); } /* Incoming packet's hop limit. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_HOPLIMIT) { int *hoptr = (int *) CMSG_DATA (cmsgptr); *hoplimit = *hoptr; } } return ret; } #define RTADV_MSG_SIZE 4096 /* Send router advertisement packet. */ static void rtadv_send_packet (int sock, struct interface *ifp) { struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_pktinfo *pkt; struct sockaddr_in6 addr; #ifdef HAVE_STRUCT_SOCKADDR_DL struct sockaddr_dl *sdl; #endif /* HAVE_STRUCT_SOCKADDR_DL */ static void *adata = NULL; unsigned char buf[RTADV_MSG_SIZE]; struct nd_router_advert *rtadv; int ret; int len = 0; struct zebra_if *zif; struct rtadv_prefix *rprefix; u_char all_nodes_addr[] = {0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,1}; struct listnode *node; u_int16_t pkt_RouterLifetime; /* * Allocate control message bufffer. This is dynamic because * CMSG_SPACE is not guaranteed not to call a function. Note that * the size will be different on different architectures due to * differing alignment rules. */ if (adata == NULL) { /* XXX Free on shutdown. */ adata = malloc(CMSG_SPACE(sizeof(struct in6_pktinfo))); if (adata == NULL) zlog_err("rtadv_send_packet: can't malloc control data\n"); } /* Logging of packet. */ if (IS_ZEBRA_DEBUG_PACKET) zlog_debug ("Router advertisement send to %s", ifp->name); /* Fill in sockaddr_in6. */ memset (&addr, 0, sizeof (struct sockaddr_in6)); addr.sin6_family = AF_INET6; #ifdef SIN6_LEN addr.sin6_len = sizeof (struct sockaddr_in6); #endif /* SIN6_LEN */ addr.sin6_port = htons (IPPROTO_ICMPV6); IPV6_ADDR_COPY (&addr.sin6_addr, all_nodes_addr); /* Fetch interface information. */ zif = ifp->info; /* Make router advertisement message. */ rtadv = (struct nd_router_advert *) buf; rtadv->nd_ra_type = ND_ROUTER_ADVERT; rtadv->nd_ra_code = 0; rtadv->nd_ra_cksum = 0; rtadv->nd_ra_curhoplimit = 64; /* RFC4191: Default Router Preference is 0 if Router Lifetime is 0. */ rtadv->nd_ra_flags_reserved = zif->rtadv.AdvDefaultLifetime == 0 ? 0 : zif->rtadv.DefaultPreference; rtadv->nd_ra_flags_reserved <<= 3; if (zif->rtadv.AdvManagedFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; if (zif->rtadv.AdvOtherConfigFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; if (zif->rtadv.AdvHomeAgentFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; /* Note that according to Neighbor Discovery (RFC 4861 [18]), * AdvDefaultLifetime is by default based on the value of * MaxRtrAdvInterval. AdvDefaultLifetime is used in the Router Lifetime * field of Router Advertisements. Given that this field is expressed * in seconds, a small MaxRtrAdvInterval value can result in a zero * value for this field. To prevent this, routers SHOULD keep * AdvDefaultLifetime in at least one second, even if the use of * MaxRtrAdvInterval would result in a smaller value. -- RFC6275, 7.5 */ pkt_RouterLifetime = zif->rtadv.AdvDefaultLifetime != -1 ? zif->rtadv.AdvDefaultLifetime : MAX (1, 0.003 * zif->rtadv.MaxRtrAdvInterval); rtadv->nd_ra_router_lifetime = htons (pkt_RouterLifetime); rtadv->nd_ra_reachable = htonl (zif->rtadv.AdvReachableTime); rtadv->nd_ra_retransmit = htonl (0); len = sizeof (struct nd_router_advert); /* If both the Home Agent Preference and Home Agent Lifetime are set to * their default values specified above, this option SHOULD NOT be * included in the Router Advertisement messages sent by this home * agent. -- RFC6275, 7.4 */ if ( zif->rtadv.AdvHomeAgentFlag && (zif->rtadv.HomeAgentPreference || zif->rtadv.HomeAgentLifetime != -1) ) { struct nd_opt_homeagent_info *ndopt_hai = (struct nd_opt_homeagent_info *)(buf + len); ndopt_hai->nd_opt_hai_type = ND_OPT_HA_INFORMATION; ndopt_hai->nd_opt_hai_len = 1; ndopt_hai->nd_opt_hai_reserved = 0; ndopt_hai->nd_opt_hai_preference = htons(zif->rtadv.HomeAgentPreference); /* 16-bit unsigned integer. The lifetime associated with the home * agent in units of seconds. The default value is the same as the * Router Lifetime, as specified in the main body of the Router * Advertisement. The maximum value corresponds to 18.2 hours. A * value of 0 MUST NOT be used. -- RFC6275, 7.5 */ ndopt_hai->nd_opt_hai_lifetime = htons ( zif->rtadv.HomeAgentLifetime != -1 ? zif->rtadv.HomeAgentLifetime : MAX (1, pkt_RouterLifetime) /* 0 is OK for RL, but not for HAL*/ ); len += sizeof(struct nd_opt_homeagent_info); } if (zif->rtadv.AdvIntervalOption) { struct nd_opt_adv_interval *ndopt_adv = (struct nd_opt_adv_interval *)(buf + len); ndopt_adv->nd_opt_ai_type = ND_OPT_ADV_INTERVAL; ndopt_adv->nd_opt_ai_len = 1; ndopt_adv->nd_opt_ai_reserved = 0; ndopt_adv->nd_opt_ai_interval = htonl(zif->rtadv.MaxRtrAdvInterval); len += sizeof(struct nd_opt_adv_interval); } /* Fill in prefix. */ for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { struct nd_opt_prefix_info *pinfo; pinfo = (struct nd_opt_prefix_info *) (buf + len); pinfo->nd_opt_pi_type = ND_OPT_PREFIX_INFORMATION; pinfo->nd_opt_pi_len = 4; pinfo->nd_opt_pi_prefix_len = rprefix->prefix.prefixlen; pinfo->nd_opt_pi_flags_reserved = 0; if (rprefix->AdvOnLinkFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_ONLINK; if (rprefix->AdvAutonomousFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_AUTO; if (rprefix->AdvRouterAddressFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_RADDR; pinfo->nd_opt_pi_valid_time = htonl (rprefix->AdvValidLifetime); pinfo->nd_opt_pi_preferred_time = htonl (rprefix->AdvPreferredLifetime); pinfo->nd_opt_pi_reserved2 = 0; IPV6_ADDR_COPY (&pinfo->nd_opt_pi_prefix, &rprefix->prefix.prefix); #ifdef DEBUG { u_char buf[INET6_ADDRSTRLEN]; zlog_debug ("DEBUG %s", inet_ntop (AF_INET6, &pinfo->nd_opt_pi_prefix, buf, INET6_ADDRSTRLEN)); } #endif /* DEBUG */ len += sizeof (struct nd_opt_prefix_info); } /* Hardware address. */ if (ifp->hw_addr_len != 0) { buf[len++] = ND_OPT_SOURCE_LINKADDR; /* Option length should be rounded up to next octet if the link address does not end on an octet boundary. */ buf[len++] = (ifp->hw_addr_len + 9) >> 3; memcpy (buf + len, ifp->hw_addr, ifp->hw_addr_len); len += ifp->hw_addr_len; /* Pad option to end on an octet boundary. */ memset (buf + len, 0, -(ifp->hw_addr_len + 2) & 0x7); len += -(ifp->hw_addr_len + 2) & 0x7; } /* MTU */ if (zif->rtadv.AdvLinkMTU) { struct nd_opt_mtu * opt = (struct nd_opt_mtu *) (buf + len); opt->nd_opt_mtu_type = ND_OPT_MTU; opt->nd_opt_mtu_len = 1; opt->nd_opt_mtu_reserved = 0; opt->nd_opt_mtu_mtu = htonl (zif->rtadv.AdvLinkMTU); len += sizeof (struct nd_opt_mtu); } msg.msg_name = (void *) &addr; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo)); msg.msg_flags = 0; iov.iov_base = buf; iov.iov_len = len; cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); cmsgptr->cmsg_level = IPPROTO_IPV6; cmsgptr->cmsg_type = IPV6_PKTINFO; pkt = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); memset (&pkt->ipi6_addr, 0, sizeof (struct in6_addr)); pkt->ipi6_ifindex = ifp->ifindex; ret = sendmsg (sock, &msg, 0); if (ret < 0) { zlog_err ("rtadv_send_packet: sendmsg %d (%s)\n", errno, safe_strerror(errno)); } } static int rtadv_timer (struct thread *thread) { struct zebra_vrf *zvrf = THREAD_ARG (thread); struct listnode *node, *nnode; struct interface *ifp; struct zebra_if *zif; int period; zvrf->rtadv.ra_timer = NULL; if (zvrf->rtadv.adv_msec_if_count == 0) { period = 1000; /* 1 s */ rtadv_event (zvrf, RTADV_TIMER, 1 /* 1 s */); } else { period = 10; /* 10 ms */ rtadv_event (zvrf, RTADV_TIMER_MSEC, 10 /* 10 ms */); } for (ALL_LIST_ELEMENTS (vrf_iflist (zvrf->vrf_id), node, nnode, ifp)) { if (if_is_loopback (ifp) || ! if_is_operative (ifp)) continue; zif = ifp->info; if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvIntervalTimer -= period; if (zif->rtadv.AdvIntervalTimer <= 0) { /* FIXME: using MaxRtrAdvInterval each time isn't what section 6.2.4 of RFC4861 tells to do. */ zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; rtadv_send_packet (zvrf->rtadv.sock, ifp); } } } return 0; } static void rtadv_process_solicit (struct interface *ifp) { struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); zlog_info ("Router solicitation received on %s vrf %u", ifp->name, zvrf->vrf_id); rtadv_send_packet (zvrf->rtadv.sock, ifp); } static void rtadv_process_advert (void) { zlog_info ("Router advertisement received"); } static void rtadv_process_packet (u_char *buf, unsigned int len, ifindex_t ifindex, int hoplimit, vrf_id_t vrf_id) { struct icmp6_hdr *icmph; struct interface *ifp; struct zebra_if *zif; /* Interface search. */ ifp = if_lookup_by_index_vrf (ifindex, vrf_id); if (ifp == NULL) { zlog_warn ("Unknown interface index: %d, vrf %u", ifindex, vrf_id); return; } if (if_is_loopback (ifp)) return; /* Check interface configuration. */ zif = ifp->info; if (! zif->rtadv.AdvSendAdvertisements) return; /* ICMP message length check. */ if (len < sizeof (struct icmp6_hdr)) { zlog_warn ("Invalid ICMPV6 packet length: %d", len); return; } icmph = (struct icmp6_hdr *) buf; /* ICMP message type check. */ if (icmph->icmp6_type != ND_ROUTER_SOLICIT && icmph->icmp6_type != ND_ROUTER_ADVERT) { zlog_warn ("Unwanted ICMPV6 message type: %d", icmph->icmp6_type); return; } /* Hoplimit check. */ if (hoplimit >= 0 && hoplimit != 255) { zlog_warn ("Invalid hoplimit %d for router advertisement ICMP packet", hoplimit); return; } /* Check ICMP message type. */ if (icmph->icmp6_type == ND_ROUTER_SOLICIT) rtadv_process_solicit (ifp); else if (icmph->icmp6_type == ND_ROUTER_ADVERT) rtadv_process_advert (); return; } static int rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; } static int rtadv_make_socket (vrf_id_t vrf_id) { int sock; int ret; struct icmp6_filter filter; if ( zserv_privs.change (ZPRIVS_RAISE) ) zlog_err ("rtadv_make_socket: could not raise privs, %s", safe_strerror (errno) ); sock = vrf_socket (AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, vrf_id); if ( zserv_privs.change (ZPRIVS_LOWER) ) zlog_err ("rtadv_make_socket: could not lower privs, %s", safe_strerror (errno) ); /* When we can't make ICMPV6 socket simply back. Router advertisement feature will not be supported. */ if (sock < 0) { close (sock); return -1; } ret = setsockopt_ipv6_pktinfo (sock, 1); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_loop (sock, 0); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_unicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_hoplimit (sock, 1); if (ret < 0) { close (sock); return ret; } ICMP6_FILTER_SETBLOCKALL(&filter); ICMP6_FILTER_SETPASS (ND_ROUTER_SOLICIT, &filter); ICMP6_FILTER_SETPASS (ND_ROUTER_ADVERT, &filter); ret = setsockopt (sock, IPPROTO_ICMPV6, ICMP6_FILTER, &filter, sizeof (struct icmp6_filter)); if (ret < 0) { zlog_info ("ICMP6_FILTER set fail: %s", safe_strerror (errno)); return ret; } return sock; } static struct rtadv_prefix * rtadv_prefix_new (void) { return XCALLOC (MTYPE_RTADV_PREFIX, sizeof (struct rtadv_prefix)); } static void rtadv_prefix_free (struct rtadv_prefix *rtadv_prefix) { XFREE (MTYPE_RTADV_PREFIX, rtadv_prefix); } static struct rtadv_prefix * rtadv_prefix_lookup (struct list *rplist, struct prefix_ipv6 *p) { struct listnode *node; struct rtadv_prefix *rprefix; for (ALL_LIST_ELEMENTS_RO (rplist, node, rprefix)) if (prefix_same ((struct prefix *) &rprefix->prefix, (struct prefix *) p)) return rprefix; return NULL; } static struct rtadv_prefix * rtadv_prefix_get (struct list *rplist, struct prefix_ipv6 *p) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (rplist, p); if (rprefix) return rprefix; rprefix = rtadv_prefix_new (); memcpy (&rprefix->prefix, p, sizeof (struct prefix_ipv6)); listnode_add (rplist, rprefix); return rprefix; } static void rtadv_prefix_set (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_get (zif->rtadv.AdvPrefixList, &rp->prefix); /* Set parameters. */ rprefix->AdvValidLifetime = rp->AdvValidLifetime; rprefix->AdvPreferredLifetime = rp->AdvPreferredLifetime; rprefix->AdvOnLinkFlag = rp->AdvOnLinkFlag; rprefix->AdvAutonomousFlag = rp->AdvAutonomousFlag; rprefix->AdvRouterAddressFlag = rp->AdvRouterAddressFlag; } static int rtadv_prefix_reset (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (zif->rtadv.AdvPrefixList, &rp->prefix); if (rprefix != NULL) { listnode_delete (zif->rtadv.AdvPrefixList, (void *) rprefix); rtadv_prefix_free (rprefix); return 1; } else return 0; } DEFUN (ipv6_nd_suppress_ra, ipv6_nd_suppress_ra_cmd, "ipv6 nd suppress-ra", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 0; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count--; if_leave_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 0) rtadv_event (zvrf, RTADV_STOP, 0); } return CMD_SUCCESS; } DEFUN (no_ipv6_nd_suppress_ra, no_ipv6_nd_suppress_ra_cmd, "no ipv6 nd suppress-ra", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (! zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 1; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count++; if_join_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 1) rtadv_event (zvrf, RTADV_START, zvrf->rtadv.sock); } return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval_msec, ipv6_nd_ra_interval_msec_cmd, "ipv6 nd ra-interval msec <70-1800000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 70, 1800000); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime * 1000)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; if (interval % 1000) zvrf->rtadv.adv_msec_if_count++; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval, ipv6_nd_ra_interval_cmd, "ipv6 nd ra-interval <1-1800>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in seconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 1, 1800); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; /* convert to milliseconds */ interval = interval * 1000; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_cmd, "no ipv6 nd ra-interval", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = (struct interface *) vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; zif->rtadv.MaxRtrAdvInterval = RTADV_MAX_RTR_ADV_INTERVAL; zif->rtadv.MinRtrAdvInterval = RTADV_MIN_RTR_ADV_INTERVAL; zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_val_cmd, "no ipv6 nd ra-interval <1-1800>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_msec_val_cmd, "no ipv6 nd ra-interval msec <1-1800000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") DEFUN (ipv6_nd_ra_lifetime, ipv6_nd_ra_lifetime_cmd, "ipv6 nd ra-lifetime <0-9000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") { int lifetime; struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; VTY_GET_INTEGER_RANGE ("router lifetime", lifetime, argv[0], 0, 9000); /* The value to be placed in the Router Lifetime field * of Router Advertisements sent from the interface, * in seconds. MUST be either zero or between * MaxRtrAdvInterval and 9000 seconds. -- RFC4861, 6.2.1 */ if ((lifetime != 0 && lifetime * 1000 < zif->rtadv.MaxRtrAdvInterval)) { vty_out (vty, "This ra-lifetime would conflict with configured ra-interval%s", VTY_NEWLINE); return CMD_WARNING; } zif->rtadv.AdvDefaultLifetime = lifetime; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_cmd, "no ipv6 nd ra-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvDefaultLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_val_cmd, "no ipv6 nd ra-lifetime <0-9000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") DEFUN (ipv6_nd_reachable_time, ipv6_nd_reachable_time_cmd, "ipv6 nd reachable-time <1-3600000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("reachable time", zif->rtadv.AdvReachableTime, argv[0], 1, RTADV_MAX_REACHABLE_TIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_cmd, "no ipv6 nd reachable-time", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvReachableTime = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_val_cmd, "no ipv6 nd reachable-time <1-3600000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") DEFUN (ipv6_nd_homeagent_preference, ipv6_nd_homeagent_preference_cmd, "ipv6 nd home-agent-preference <0-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent preference", zif->rtadv.HomeAgentPreference, argv[0], 0, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_cmd, "no ipv6 nd home-agent-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentPreference = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_val_cmd, "no ipv6 nd home-agent-preference <0-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") DEFUN (ipv6_nd_homeagent_lifetime, ipv6_nd_homeagent_lifetime_cmd, "ipv6 nd home-agent-lifetime <0-65520>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent lifetime", zif->rtadv.HomeAgentLifetime, argv[0], 0, RTADV_MAX_HALIFETIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_cmd, "no ipv6 nd home-agent-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_val_cmd, "no ipv6 nd home-agent-lifetime <0-65520>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") DEFUN (ipv6_nd_managed_config_flag, ipv6_nd_managed_config_flag_cmd, "ipv6 nd managed-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_managed_config_flag, no_ipv6_nd_managed_config_flag_cmd, "no ipv6 nd managed-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_homeagent_config_flag, ipv6_nd_homeagent_config_flag_cmd, "ipv6 nd home-agent-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_config_flag, no_ipv6_nd_homeagent_config_flag_cmd, "no ipv6 nd home-agent-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_adv_interval_config_option, ipv6_nd_adv_interval_config_option_cmd, "ipv6 nd adv-interval-option", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_adv_interval_config_option, no_ipv6_nd_adv_interval_config_option_cmd, "no ipv6 nd adv-interval-option", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_other_config_flag, ipv6_nd_other_config_flag_cmd, "ipv6 nd other-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_other_config_flag, no_ipv6_nd_other_config_flag_cmd, "no ipv6 nd other-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_prefix, ipv6_nd_prefix_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n" "Set Router Address flag\n") { int i; int ret; int cursor = 1; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ rp.AdvOnLinkFlag = 1; rp.AdvAutonomousFlag = 1; rp.AdvRouterAddressFlag = 0; rp.AdvValidLifetime = RTADV_VALID_LIFETIME; rp.AdvPreferredLifetime = RTADV_PREFERRED_LIFETIME; if (argc > 1) { if ((isdigit((unsigned char)argv[1][0])) || strncmp (argv[1], "i", 1) == 0) { if ( strncmp (argv[1], "i", 1) == 0) rp.AdvValidLifetime = UINT32_MAX; else rp.AdvValidLifetime = (u_int32_t) strtoll (argv[1], (char **)NULL, 10); if ( strncmp (argv[2], "i", 1) == 0) rp.AdvPreferredLifetime = UINT32_MAX; else rp.AdvPreferredLifetime = (u_int32_t) strtoll (argv[2], (char **)NULL, 10); if (rp.AdvPreferredLifetime > rp.AdvValidLifetime) { vty_out (vty, "Invalid preferred lifetime%s", VTY_NEWLINE); return CMD_WARNING; } cursor = cursor + 2; } if (argc > cursor) { for (i = cursor; i < argc; i++) { if (strncmp (argv[i], "of", 2) == 0) rp.AdvOnLinkFlag = 0; if (strncmp (argv[i], "no", 2) == 0) rp.AdvAutonomousFlag = 0; if (strncmp (argv[i], "ro", 2) == 0) rp.AdvRouterAddressFlag = 1; } } } rtadv_prefix_set (zebra_if, &rp); return CMD_SUCCESS; } ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_nortaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rev_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_prefix_cmd, "ipv6 nd prefix X:X::X:X/M", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") DEFUN (no_ipv6_nd_prefix, no_ipv6_nd_prefix_cmd, "no ipv6 nd prefix IPV6PREFIX", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") { int ret; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ ret = rtadv_prefix_reset (zebra_if, &rp); if (!ret) { vty_out (vty, "Non-exist IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } return CMD_SUCCESS; } DEFUN (ipv6_nd_router_preference, ipv6_nd_router_preference_cmd, "ipv6 nd router-preference (high|medium|low)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") { struct interface *ifp; struct zebra_if *zif; int i = 0; ifp = (struct interface *) vty->index; zif = ifp->info; while (0 != rtadv_pref_strs[i]) { if (strncmp (argv[0], rtadv_pref_strs[i], 1) == 0) { zif->rtadv.DefaultPreference = i; return CMD_SUCCESS; } i++; } return CMD_ERR_NO_MATCH; } DEFUN (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_cmd, "no ipv6 nd router-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.DefaultPreference = RTADV_PREF_MEDIUM; /* Default per RFC4191. */ return CMD_SUCCESS; } ALIAS (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_val_cmd, "no ipv6 nd router-preference (high|medium|low)", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") DEFUN (ipv6_nd_mtu, ipv6_nd_mtu_cmd, "ipv6 nd mtu <1-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("MTU", zif->rtadv.AdvLinkMTU, argv[0], 1, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_mtu, no_ipv6_nd_mtu_cmd, "no ipv6 nd mtu", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; zif->rtadv.AdvLinkMTU = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_mtu, no_ipv6_nd_mtu_val_cmd, "no ipv6 nd mtu <1-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") /* Write configuration about router advertisement. */ void rtadv_config_write (struct vty *vty, struct interface *ifp) { struct zebra_if *zif; struct listnode *node; struct rtadv_prefix *rprefix; char buf[PREFIX_STRLEN]; int interval; zif = ifp->info; if (! if_is_loopback (ifp)) { if (zif->rtadv.AdvSendAdvertisements) vty_out (vty, " no ipv6 nd suppress-ra%s", VTY_NEWLINE); } interval = zif->rtadv.MaxRtrAdvInterval; if (interval % 1000) vty_out (vty, " ipv6 nd ra-interval msec %d%s", interval, VTY_NEWLINE); else if (interval != RTADV_MAX_RTR_ADV_INTERVAL) vty_out (vty, " ipv6 nd ra-interval %d%s", interval / 1000, VTY_NEWLINE); if (zif->rtadv.AdvIntervalOption) vty_out (vty, " ipv6 nd adv-interval-option%s", VTY_NEWLINE); if (zif->rtadv.AdvDefaultLifetime != -1) vty_out (vty, " ipv6 nd ra-lifetime %d%s", zif->rtadv.AdvDefaultLifetime, VTY_NEWLINE); if (zif->rtadv.HomeAgentPreference) vty_out (vty, " ipv6 nd home-agent-preference %u%s", zif->rtadv.HomeAgentPreference, VTY_NEWLINE); if (zif->rtadv.HomeAgentLifetime != -1) vty_out (vty, " ipv6 nd home-agent-lifetime %u%s", zif->rtadv.HomeAgentLifetime, VTY_NEWLINE); if (zif->rtadv.AdvHomeAgentFlag) vty_out (vty, " ipv6 nd home-agent-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvReachableTime) vty_out (vty, " ipv6 nd reachable-time %d%s", zif->rtadv.AdvReachableTime, VTY_NEWLINE); if (zif->rtadv.AdvManagedFlag) vty_out (vty, " ipv6 nd managed-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvOtherConfigFlag) vty_out (vty, " ipv6 nd other-config-flag%s", VTY_NEWLINE); if (zif->rtadv.DefaultPreference != RTADV_PREF_MEDIUM) vty_out (vty, " ipv6 nd router-preference %s%s", rtadv_pref_strs[zif->rtadv.DefaultPreference], VTY_NEWLINE); if (zif->rtadv.AdvLinkMTU) vty_out (vty, " ipv6 nd mtu %d%s", zif->rtadv.AdvLinkMTU, VTY_NEWLINE); for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { vty_out (vty, " ipv6 nd prefix %s", prefix2str (&rprefix->prefix, buf, sizeof(buf))); if ((rprefix->AdvValidLifetime != RTADV_VALID_LIFETIME) || (rprefix->AdvPreferredLifetime != RTADV_PREFERRED_LIFETIME)) { if (rprefix->AdvValidLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvValidLifetime); if (rprefix->AdvPreferredLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvPreferredLifetime); } if (!rprefix->AdvOnLinkFlag) vty_out (vty, " off-link"); if (!rprefix->AdvAutonomousFlag) vty_out (vty, " no-autoconfig"); if (rprefix->AdvRouterAddressFlag) vty_out (vty, " router-address"); vty_out (vty, "%s", VTY_NEWLINE); } } static void rtadv_event (struct zebra_vrf *zvrf, enum rtadv_event event, int val) { struct rtadv *rtadv = &zvrf->rtadv; switch (event) { case RTADV_START: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_event (zebrad.master, rtadv_timer, zvrf, 0); break; case RTADV_STOP: if (rtadv->ra_timer) { thread_cancel (rtadv->ra_timer); rtadv->ra_timer = NULL; } if (rtadv->ra_read) { thread_cancel (rtadv->ra_read); rtadv->ra_read = NULL; } break; case RTADV_TIMER: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_TIMER_MSEC: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer_msec (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_READ: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); break; default: break; } return; } void rtadv_init (struct zebra_vrf *zvrf) { zvrf->rtadv.sock = rtadv_make_socket (zvrf->vrf_id); } void rtadv_terminate (struct zebra_vrf *zvrf) { rtadv_event (zvrf, RTADV_STOP, 0); if (zvrf->rtadv.sock >= 0) { close (zvrf->rtadv.sock); zvrf->rtadv.sock = -1; } zvrf->rtadv.adv_if_count = 0; zvrf->rtadv.adv_msec_if_count = 0; } void rtadv_cmd_init (void) { install_element (INTERFACE_NODE, &ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_msec_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_val_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_msec_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_nortaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_prefix_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_val_cmd); } static int if_join_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_JOIN_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_JOIN_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s join to all-routers multicast group", ifp->name); return 0; } static int if_leave_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_LEAVE_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s leave from all-routers multicast group", ifp->name); return 0; } #else void rtadv_init (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_terminate (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_cmd_init (void) { /* Empty.*/; } #endif /* HAVE_RTADV && HAVE_IPV6 */
/* Router advertisement * Copyright (C) 2005 6WIND <jean-mickael.guerin@6wind.com> * Copyright (C) 1999 Kunihiro Ishiguro * * This file is part of GNU Zebra. * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * GNU Zebra is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the Free * Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. */ #include <zebra.h> #include "memory.h" #include "sockopt.h" #include "thread.h" #include "if.h" #include "log.h" #include "prefix.h" #include "linklist.h" #include "command.h" #include "privs.h" #include "vrf.h" #include "zebra/interface.h" #include "zebra/rtadv.h" #include "zebra/debug.h" #include "zebra/rib.h" #include "zebra/zserv.h" extern struct zebra_privs_t zserv_privs; #if defined (HAVE_IPV6) && defined (HAVE_RTADV) #ifdef OPEN_BSD #include <netinet/icmp6.h> #endif /* If RFC2133 definition is used. */ #ifndef IPV6_JOIN_GROUP #define IPV6_JOIN_GROUP IPV6_ADD_MEMBERSHIP #endif #ifndef IPV6_LEAVE_GROUP #define IPV6_LEAVE_GROUP IPV6_DROP_MEMBERSHIP #endif #define ALLNODE "ff02::1" #define ALLROUTER "ff02::2" extern struct zebra_t zebrad; enum rtadv_event {RTADV_START, RTADV_STOP, RTADV_TIMER, RTADV_TIMER_MSEC, RTADV_READ}; static void rtadv_event (struct zebra_vrf *, enum rtadv_event, int); static int if_join_all_router (int, struct interface *); static int if_leave_all_router (int, struct interface *); static int rtadv_recv_packet (int sock, u_char *buf, int buflen, struct sockaddr_in6 *from, ifindex_t *ifindex, int *hoplimit) { int ret; struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_addr dst; char adata[1024]; /* Fill in message and iovec. */ msg.msg_name = (void *) from; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = sizeof adata; iov.iov_base = buf; iov.iov_len = buflen; /* If recvmsg fail return minus value. */ ret = recvmsg (sock, &msg, 0); if (ret < 0) return ret; for (cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr != NULL; cmsgptr = CMSG_NXTHDR(&msg, cmsgptr)) { /* I want interface index which this packet comes from. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_PKTINFO) { struct in6_pktinfo *ptr; ptr = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); *ifindex = ptr->ipi6_ifindex; memcpy(&dst, &ptr->ipi6_addr, sizeof(ptr->ipi6_addr)); } /* Incoming packet's hop limit. */ if (cmsgptr->cmsg_level == IPPROTO_IPV6 && cmsgptr->cmsg_type == IPV6_HOPLIMIT) { int *hoptr = (int *) CMSG_DATA (cmsgptr); *hoplimit = *hoptr; } } return ret; } #define RTADV_MSG_SIZE 4096 /* Send router advertisement packet. */ static void rtadv_send_packet (int sock, struct interface *ifp) { struct msghdr msg; struct iovec iov; struct cmsghdr *cmsgptr; struct in6_pktinfo *pkt; struct sockaddr_in6 addr; #ifdef HAVE_STRUCT_SOCKADDR_DL struct sockaddr_dl *sdl; #endif /* HAVE_STRUCT_SOCKADDR_DL */ static void *adata = NULL; unsigned char buf[RTADV_MSG_SIZE]; struct nd_router_advert *rtadv; int ret; int len = 0; struct zebra_if *zif; struct rtadv_prefix *rprefix; u_char all_nodes_addr[] = {0xff,0x02,0,0,0,0,0,0,0,0,0,0,0,0,0,1}; struct listnode *node; u_int16_t pkt_RouterLifetime; /* * Allocate control message bufffer. This is dynamic because * CMSG_SPACE is not guaranteed not to call a function. Note that * the size will be different on different architectures due to * differing alignment rules. */ if (adata == NULL) { /* XXX Free on shutdown. */ adata = malloc(CMSG_SPACE(sizeof(struct in6_pktinfo))); if (adata == NULL) zlog_err("rtadv_send_packet: can't malloc control data\n"); } /* Logging of packet. */ if (IS_ZEBRA_DEBUG_PACKET) zlog_debug ("Router advertisement send to %s", ifp->name); /* Fill in sockaddr_in6. */ memset (&addr, 0, sizeof (struct sockaddr_in6)); addr.sin6_family = AF_INET6; #ifdef SIN6_LEN addr.sin6_len = sizeof (struct sockaddr_in6); #endif /* SIN6_LEN */ addr.sin6_port = htons (IPPROTO_ICMPV6); IPV6_ADDR_COPY (&addr.sin6_addr, all_nodes_addr); /* Fetch interface information. */ zif = ifp->info; /* Make router advertisement message. */ rtadv = (struct nd_router_advert *) buf; rtadv->nd_ra_type = ND_ROUTER_ADVERT; rtadv->nd_ra_code = 0; rtadv->nd_ra_cksum = 0; rtadv->nd_ra_curhoplimit = 64; /* RFC4191: Default Router Preference is 0 if Router Lifetime is 0. */ rtadv->nd_ra_flags_reserved = zif->rtadv.AdvDefaultLifetime == 0 ? 0 : zif->rtadv.DefaultPreference; rtadv->nd_ra_flags_reserved <<= 3; if (zif->rtadv.AdvManagedFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; if (zif->rtadv.AdvOtherConfigFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; if (zif->rtadv.AdvHomeAgentFlag) rtadv->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; /* Note that according to Neighbor Discovery (RFC 4861 [18]), * AdvDefaultLifetime is by default based on the value of * MaxRtrAdvInterval. AdvDefaultLifetime is used in the Router Lifetime * field of Router Advertisements. Given that this field is expressed * in seconds, a small MaxRtrAdvInterval value can result in a zero * value for this field. To prevent this, routers SHOULD keep * AdvDefaultLifetime in at least one second, even if the use of * MaxRtrAdvInterval would result in a smaller value. -- RFC6275, 7.5 */ pkt_RouterLifetime = zif->rtadv.AdvDefaultLifetime != -1 ? zif->rtadv.AdvDefaultLifetime : MAX (1, 0.003 * zif->rtadv.MaxRtrAdvInterval); rtadv->nd_ra_router_lifetime = htons (pkt_RouterLifetime); rtadv->nd_ra_reachable = htonl (zif->rtadv.AdvReachableTime); rtadv->nd_ra_retransmit = htonl (0); len = sizeof (struct nd_router_advert); /* If both the Home Agent Preference and Home Agent Lifetime are set to * their default values specified above, this option SHOULD NOT be * included in the Router Advertisement messages sent by this home * agent. -- RFC6275, 7.4 */ if ( zif->rtadv.AdvHomeAgentFlag && (zif->rtadv.HomeAgentPreference || zif->rtadv.HomeAgentLifetime != -1) ) { struct nd_opt_homeagent_info *ndopt_hai = (struct nd_opt_homeagent_info *)(buf + len); ndopt_hai->nd_opt_hai_type = ND_OPT_HA_INFORMATION; ndopt_hai->nd_opt_hai_len = 1; ndopt_hai->nd_opt_hai_reserved = 0; ndopt_hai->nd_opt_hai_preference = htons(zif->rtadv.HomeAgentPreference); /* 16-bit unsigned integer. The lifetime associated with the home * agent in units of seconds. The default value is the same as the * Router Lifetime, as specified in the main body of the Router * Advertisement. The maximum value corresponds to 18.2 hours. A * value of 0 MUST NOT be used. -- RFC6275, 7.5 */ ndopt_hai->nd_opt_hai_lifetime = htons ( zif->rtadv.HomeAgentLifetime != -1 ? zif->rtadv.HomeAgentLifetime : MAX (1, pkt_RouterLifetime) /* 0 is OK for RL, but not for HAL*/ ); len += sizeof(struct nd_opt_homeagent_info); } if (zif->rtadv.AdvIntervalOption) { struct nd_opt_adv_interval *ndopt_adv = (struct nd_opt_adv_interval *)(buf + len); ndopt_adv->nd_opt_ai_type = ND_OPT_ADV_INTERVAL; ndopt_adv->nd_opt_ai_len = 1; ndopt_adv->nd_opt_ai_reserved = 0; ndopt_adv->nd_opt_ai_interval = htonl(zif->rtadv.MaxRtrAdvInterval); len += sizeof(struct nd_opt_adv_interval); } /* Fill in prefix. */ for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { struct nd_opt_prefix_info *pinfo; pinfo = (struct nd_opt_prefix_info *) (buf + len); pinfo->nd_opt_pi_type = ND_OPT_PREFIX_INFORMATION; pinfo->nd_opt_pi_len = 4; pinfo->nd_opt_pi_prefix_len = rprefix->prefix.prefixlen; pinfo->nd_opt_pi_flags_reserved = 0; if (rprefix->AdvOnLinkFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_ONLINK; if (rprefix->AdvAutonomousFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_AUTO; if (rprefix->AdvRouterAddressFlag) pinfo->nd_opt_pi_flags_reserved |= ND_OPT_PI_FLAG_RADDR; pinfo->nd_opt_pi_valid_time = htonl (rprefix->AdvValidLifetime); pinfo->nd_opt_pi_preferred_time = htonl (rprefix->AdvPreferredLifetime); pinfo->nd_opt_pi_reserved2 = 0; IPV6_ADDR_COPY (&pinfo->nd_opt_pi_prefix, &rprefix->prefix.prefix); #ifdef DEBUG { u_char buf[INET6_ADDRSTRLEN]; zlog_debug ("DEBUG %s", inet_ntop (AF_INET6, &pinfo->nd_opt_pi_prefix, buf, INET6_ADDRSTRLEN)); } #endif /* DEBUG */ len += sizeof (struct nd_opt_prefix_info); } /* Hardware address. */ if (ifp->hw_addr_len != 0) { buf[len++] = ND_OPT_SOURCE_LINKADDR; /* Option length should be rounded up to next octet if the link address does not end on an octet boundary. */ buf[len++] = (ifp->hw_addr_len + 9) >> 3; memcpy (buf + len, ifp->hw_addr, ifp->hw_addr_len); len += ifp->hw_addr_len; /* Pad option to end on an octet boundary. */ memset (buf + len, 0, -(ifp->hw_addr_len + 2) & 0x7); len += -(ifp->hw_addr_len + 2) & 0x7; } /* MTU */ if (zif->rtadv.AdvLinkMTU) { struct nd_opt_mtu * opt = (struct nd_opt_mtu *) (buf + len); opt->nd_opt_mtu_type = ND_OPT_MTU; opt->nd_opt_mtu_len = 1; opt->nd_opt_mtu_reserved = 0; opt->nd_opt_mtu_mtu = htonl (zif->rtadv.AdvLinkMTU); len += sizeof (struct nd_opt_mtu); } msg.msg_name = (void *) &addr; msg.msg_namelen = sizeof (struct sockaddr_in6); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = (void *) adata; msg.msg_controllen = CMSG_SPACE(sizeof(struct in6_pktinfo)); msg.msg_flags = 0; iov.iov_base = buf; iov.iov_len = len; cmsgptr = ZCMSG_FIRSTHDR(&msg); cmsgptr->cmsg_len = CMSG_LEN(sizeof(struct in6_pktinfo)); cmsgptr->cmsg_level = IPPROTO_IPV6; cmsgptr->cmsg_type = IPV6_PKTINFO; pkt = (struct in6_pktinfo *) CMSG_DATA (cmsgptr); memset (&pkt->ipi6_addr, 0, sizeof (struct in6_addr)); pkt->ipi6_ifindex = ifp->ifindex; ret = sendmsg (sock, &msg, 0); if (ret < 0) { zlog_err ("rtadv_send_packet: sendmsg %d (%s)\n", errno, safe_strerror(errno)); } } static int rtadv_timer (struct thread *thread) { struct zebra_vrf *zvrf = THREAD_ARG (thread); struct listnode *node, *nnode; struct interface *ifp; struct zebra_if *zif; int period; zvrf->rtadv.ra_timer = NULL; if (zvrf->rtadv.adv_msec_if_count == 0) { period = 1000; /* 1 s */ rtadv_event (zvrf, RTADV_TIMER, 1 /* 1 s */); } else { period = 10; /* 10 ms */ rtadv_event (zvrf, RTADV_TIMER_MSEC, 10 /* 10 ms */); } for (ALL_LIST_ELEMENTS (vrf_iflist (zvrf->vrf_id), node, nnode, ifp)) { if (if_is_loopback (ifp) || ! if_is_operative (ifp)) continue; zif = ifp->info; if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvIntervalTimer -= period; if (zif->rtadv.AdvIntervalTimer <= 0) { /* FIXME: using MaxRtrAdvInterval each time isn't what section 6.2.4 of RFC4861 tells to do. */ zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; rtadv_send_packet (zvrf->rtadv.sock, ifp); } } } return 0; } static void rtadv_process_solicit (struct interface *ifp) { struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); zlog_info ("Router solicitation received on %s vrf %u", ifp->name, zvrf->vrf_id); rtadv_send_packet (zvrf->rtadv.sock, ifp); } static void rtadv_process_advert (void) { zlog_info ("Router advertisement received"); } static void rtadv_process_packet (u_char *buf, unsigned int len, ifindex_t ifindex, int hoplimit, vrf_id_t vrf_id) { struct icmp6_hdr *icmph; struct interface *ifp; struct zebra_if *zif; /* Interface search. */ ifp = if_lookup_by_index_vrf (ifindex, vrf_id); if (ifp == NULL) { zlog_warn ("Unknown interface index: %d, vrf %u", ifindex, vrf_id); return; } if (if_is_loopback (ifp)) return; /* Check interface configuration. */ zif = ifp->info; if (! zif->rtadv.AdvSendAdvertisements) return; /* ICMP message length check. */ if (len < sizeof (struct icmp6_hdr)) { zlog_warn ("Invalid ICMPV6 packet length: %d", len); return; } icmph = (struct icmp6_hdr *) buf; /* ICMP message type check. */ if (icmph->icmp6_type != ND_ROUTER_SOLICIT && icmph->icmp6_type != ND_ROUTER_ADVERT) { zlog_warn ("Unwanted ICMPV6 message type: %d", icmph->icmp6_type); return; } /* Hoplimit check. */ if (hoplimit >= 0 && hoplimit != 255) { zlog_warn ("Invalid hoplimit %d for router advertisement ICMP packet", hoplimit); return; } /* Check ICMP message type. */ if (icmph->icmp6_type == ND_ROUTER_SOLICIT) rtadv_process_solicit (ifp); else if (icmph->icmp6_type == ND_ROUTER_ADVERT) rtadv_process_advert (); return; } static int rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, sizeof (buf), &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; } static int rtadv_make_socket (vrf_id_t vrf_id) { int sock; int ret; struct icmp6_filter filter; if ( zserv_privs.change (ZPRIVS_RAISE) ) zlog_err ("rtadv_make_socket: could not raise privs, %s", safe_strerror (errno) ); sock = vrf_socket (AF_INET6, SOCK_RAW, IPPROTO_ICMPV6, vrf_id); if ( zserv_privs.change (ZPRIVS_LOWER) ) zlog_err ("rtadv_make_socket: could not lower privs, %s", safe_strerror (errno) ); /* When we can't make ICMPV6 socket simply back. Router advertisement feature will not be supported. */ if (sock < 0) { close (sock); return -1; } ret = setsockopt_ipv6_pktinfo (sock, 1); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_loop (sock, 0); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_unicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_multicast_hops (sock, 255); if (ret < 0) { close (sock); return ret; } ret = setsockopt_ipv6_hoplimit (sock, 1); if (ret < 0) { close (sock); return ret; } ICMP6_FILTER_SETBLOCKALL(&filter); ICMP6_FILTER_SETPASS (ND_ROUTER_SOLICIT, &filter); ICMP6_FILTER_SETPASS (ND_ROUTER_ADVERT, &filter); ret = setsockopt (sock, IPPROTO_ICMPV6, ICMP6_FILTER, &filter, sizeof (struct icmp6_filter)); if (ret < 0) { zlog_info ("ICMP6_FILTER set fail: %s", safe_strerror (errno)); return ret; } return sock; } static struct rtadv_prefix * rtadv_prefix_new (void) { return XCALLOC (MTYPE_RTADV_PREFIX, sizeof (struct rtadv_prefix)); } static void rtadv_prefix_free (struct rtadv_prefix *rtadv_prefix) { XFREE (MTYPE_RTADV_PREFIX, rtadv_prefix); } static struct rtadv_prefix * rtadv_prefix_lookup (struct list *rplist, struct prefix_ipv6 *p) { struct listnode *node; struct rtadv_prefix *rprefix; for (ALL_LIST_ELEMENTS_RO (rplist, node, rprefix)) if (prefix_same ((struct prefix *) &rprefix->prefix, (struct prefix *) p)) return rprefix; return NULL; } static struct rtadv_prefix * rtadv_prefix_get (struct list *rplist, struct prefix_ipv6 *p) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (rplist, p); if (rprefix) return rprefix; rprefix = rtadv_prefix_new (); memcpy (&rprefix->prefix, p, sizeof (struct prefix_ipv6)); listnode_add (rplist, rprefix); return rprefix; } static void rtadv_prefix_set (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_get (zif->rtadv.AdvPrefixList, &rp->prefix); /* Set parameters. */ rprefix->AdvValidLifetime = rp->AdvValidLifetime; rprefix->AdvPreferredLifetime = rp->AdvPreferredLifetime; rprefix->AdvOnLinkFlag = rp->AdvOnLinkFlag; rprefix->AdvAutonomousFlag = rp->AdvAutonomousFlag; rprefix->AdvRouterAddressFlag = rp->AdvRouterAddressFlag; } static int rtadv_prefix_reset (struct zebra_if *zif, struct rtadv_prefix *rp) { struct rtadv_prefix *rprefix; rprefix = rtadv_prefix_lookup (zif->rtadv.AdvPrefixList, &rp->prefix); if (rprefix != NULL) { listnode_delete (zif->rtadv.AdvPrefixList, (void *) rprefix); rtadv_prefix_free (rprefix); return 1; } else return 0; } DEFUN (ipv6_nd_suppress_ra, ipv6_nd_suppress_ra_cmd, "ipv6 nd suppress-ra", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 0; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count--; if_leave_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 0) rtadv_event (zvrf, RTADV_STOP, 0); } return CMD_SUCCESS; } DEFUN (no_ipv6_nd_suppress_ra, no_ipv6_nd_suppress_ra_cmd, "no ipv6 nd suppress-ra", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Suppress Router Advertisement\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (if_is_loopback (ifp)) { vty_out (vty, "Invalid interface%s", VTY_NEWLINE); return CMD_WARNING; } if (! zif->rtadv.AdvSendAdvertisements) { zif->rtadv.AdvSendAdvertisements = 1; zif->rtadv.AdvIntervalTimer = 0; zvrf->rtadv.adv_if_count++; if_join_all_router (zvrf->rtadv.sock, ifp); if (zvrf->rtadv.adv_if_count == 1) rtadv_event (zvrf, RTADV_START, zvrf->rtadv.sock); } return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval_msec, ipv6_nd_ra_interval_msec_cmd, "ipv6 nd ra-interval msec <70-1800000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 70, 1800000); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime * 1000)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; if (interval % 1000) zvrf->rtadv.adv_msec_if_count++; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_ra_interval, ipv6_nd_ra_interval_cmd, "ipv6 nd ra-interval <1-1800>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in seconds\n") { unsigned interval; struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; struct zebra_vrf *zvrf = vrf_info_lookup (ifp->vrf_id); VTY_GET_INTEGER_RANGE ("router advertisement interval", interval, argv[0], 1, 1800); if ((zif->rtadv.AdvDefaultLifetime != -1 && interval > (unsigned)zif->rtadv.AdvDefaultLifetime)) { vty_out (vty, "This ra-interval would conflict with configured ra-lifetime!%s", VTY_NEWLINE); return CMD_WARNING; } if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; /* convert to milliseconds */ interval = interval * 1000; zif->rtadv.MaxRtrAdvInterval = interval; zif->rtadv.MinRtrAdvInterval = 0.33 * interval; zif->rtadv.AdvIntervalTimer = 0; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_cmd, "no ipv6 nd ra-interval", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") { struct interface *ifp; struct zebra_if *zif; struct zebra_vrf *zvrf; ifp = (struct interface *) vty->index; zif = ifp->info; zvrf = vrf_info_lookup (ifp->vrf_id); if (zif->rtadv.MaxRtrAdvInterval % 1000) zvrf->rtadv.adv_msec_if_count--; zif->rtadv.MaxRtrAdvInterval = RTADV_MAX_RTR_ADV_INTERVAL; zif->rtadv.MinRtrAdvInterval = RTADV_MIN_RTR_ADV_INTERVAL; zif->rtadv.AdvIntervalTimer = zif->rtadv.MaxRtrAdvInterval; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_val_cmd, "no ipv6 nd ra-interval <1-1800>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n") ALIAS (no_ipv6_nd_ra_interval, no_ipv6_nd_ra_interval_msec_val_cmd, "no ipv6 nd ra-interval msec <1-1800000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router Advertisement interval\n" "Router Advertisement interval in milliseconds\n") DEFUN (ipv6_nd_ra_lifetime, ipv6_nd_ra_lifetime_cmd, "ipv6 nd ra-lifetime <0-9000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") { int lifetime; struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; VTY_GET_INTEGER_RANGE ("router lifetime", lifetime, argv[0], 0, 9000); /* The value to be placed in the Router Lifetime field * of Router Advertisements sent from the interface, * in seconds. MUST be either zero or between * MaxRtrAdvInterval and 9000 seconds. -- RFC4861, 6.2.1 */ if ((lifetime != 0 && lifetime * 1000 < zif->rtadv.MaxRtrAdvInterval)) { vty_out (vty, "This ra-lifetime would conflict with configured ra-interval%s", VTY_NEWLINE); return CMD_WARNING; } zif->rtadv.AdvDefaultLifetime = lifetime; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_cmd, "no ipv6 nd ra-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvDefaultLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_ra_lifetime, no_ipv6_nd_ra_lifetime_val_cmd, "no ipv6 nd ra-lifetime <0-9000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Router lifetime\n" "Router lifetime in seconds (0 stands for a non-default gw)\n") DEFUN (ipv6_nd_reachable_time, ipv6_nd_reachable_time_cmd, "ipv6 nd reachable-time <1-3600000>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("reachable time", zif->rtadv.AdvReachableTime, argv[0], 1, RTADV_MAX_REACHABLE_TIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_cmd, "no ipv6 nd reachable-time", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvReachableTime = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_reachable_time, no_ipv6_nd_reachable_time_val_cmd, "no ipv6 nd reachable-time <1-3600000>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Reachable time\n" "Reachable time in milliseconds\n") DEFUN (ipv6_nd_homeagent_preference, ipv6_nd_homeagent_preference_cmd, "ipv6 nd home-agent-preference <0-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent preference", zif->rtadv.HomeAgentPreference, argv[0], 0, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_cmd, "no ipv6 nd home-agent-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentPreference = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_preference, no_ipv6_nd_homeagent_preference_val_cmd, "no ipv6 nd home-agent-preference <0-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent preference\n" "preference value (default is 0, least preferred)\n") DEFUN (ipv6_nd_homeagent_lifetime, ipv6_nd_homeagent_lifetime_cmd, "ipv6 nd home-agent-lifetime <0-65520>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("home agent lifetime", zif->rtadv.HomeAgentLifetime, argv[0], 0, RTADV_MAX_HALIFETIME); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_cmd, "no ipv6 nd home-agent-lifetime", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.HomeAgentLifetime = -1; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_homeagent_lifetime, no_ipv6_nd_homeagent_lifetime_val_cmd, "no ipv6 nd home-agent-lifetime <0-65520>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent lifetime\n" "Home Agent lifetime in seconds (0 to track ra-lifetime)\n") DEFUN (ipv6_nd_managed_config_flag, ipv6_nd_managed_config_flag_cmd, "ipv6 nd managed-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_managed_config_flag, no_ipv6_nd_managed_config_flag_cmd, "no ipv6 nd managed-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Managed address configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvManagedFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_homeagent_config_flag, ipv6_nd_homeagent_config_flag_cmd, "ipv6 nd home-agent-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_homeagent_config_flag, no_ipv6_nd_homeagent_config_flag_cmd, "no ipv6 nd home-agent-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Home Agent configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvHomeAgentFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_adv_interval_config_option, ipv6_nd_adv_interval_config_option_cmd, "ipv6 nd adv-interval-option", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_adv_interval_config_option, no_ipv6_nd_adv_interval_config_option_cmd, "no ipv6 nd adv-interval-option", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertisement Interval Option\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvIntervalOption = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_other_config_flag, ipv6_nd_other_config_flag_cmd, "ipv6 nd other-config-flag", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 1; return CMD_SUCCESS; } DEFUN (no_ipv6_nd_other_config_flag, no_ipv6_nd_other_config_flag_cmd, "no ipv6 nd other-config-flag", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Other statefull configuration flag\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.AdvOtherConfigFlag = 0; return CMD_SUCCESS; } DEFUN (ipv6_nd_prefix, ipv6_nd_prefix_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n" "Set Router Address flag\n") { int i; int ret; int cursor = 1; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ rp.AdvOnLinkFlag = 1; rp.AdvAutonomousFlag = 1; rp.AdvRouterAddressFlag = 0; rp.AdvValidLifetime = RTADV_VALID_LIFETIME; rp.AdvPreferredLifetime = RTADV_PREFERRED_LIFETIME; if (argc > 1) { if ((isdigit((unsigned char)argv[1][0])) || strncmp (argv[1], "i", 1) == 0) { if ( strncmp (argv[1], "i", 1) == 0) rp.AdvValidLifetime = UINT32_MAX; else rp.AdvValidLifetime = (u_int32_t) strtoll (argv[1], (char **)NULL, 10); if ( strncmp (argv[2], "i", 1) == 0) rp.AdvPreferredLifetime = UINT32_MAX; else rp.AdvPreferredLifetime = (u_int32_t) strtoll (argv[2], (char **)NULL, 10); if (rp.AdvPreferredLifetime > rp.AdvValidLifetime) { vty_out (vty, "Invalid preferred lifetime%s", VTY_NEWLINE); return CMD_WARNING; } cursor = cursor + 2; } if (argc > cursor) { for (i = cursor; i < argc; i++) { if (strncmp (argv[i], "of", 2) == 0) rp.AdvOnLinkFlag = 0; if (strncmp (argv[i], "no", 2) == 0) rp.AdvAutonomousFlag = 0; if (strncmp (argv[i], "ro", 2) == 0) rp.AdvRouterAddressFlag = 1; } } } rtadv_prefix_set (zebra_if, &rp); return CMD_SUCCESS; } ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_nortaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rev_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|) (off-link|) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for autoconfiguration") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite) (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_val_cmd, "ipv6 nd prefix X:X::X:X/M (<0-4294967295>|infinite) " "(<0-4294967295>|infinite)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Valid lifetime in seconds\n" "Infinite valid lifetime\n" "Preferred lifetime in seconds\n" "Infinite preferred lifetime\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|) (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rev_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|) (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_noauto_cmd, "ipv6 nd prefix X:X::X:X/M (no-autoconfig|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for autoconfiguration\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_offlink_cmd, "ipv6 nd prefix X:X::X:X/M (off-link|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Do not use prefix for onlink determination\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_noval_rtaddr_cmd, "ipv6 nd prefix X:X::X:X/M (router-address|)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n" "Set Router Address flag\n") ALIAS (ipv6_nd_prefix, ipv6_nd_prefix_prefix_cmd, "ipv6 nd prefix X:X::X:X/M", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") DEFUN (no_ipv6_nd_prefix, no_ipv6_nd_prefix_cmd, "no ipv6 nd prefix IPV6PREFIX", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Prefix information\n" "IPv6 prefix\n") { int ret; struct interface *ifp; struct zebra_if *zebra_if; struct rtadv_prefix rp; ifp = (struct interface *) vty->index; zebra_if = ifp->info; ret = str2prefix_ipv6 (argv[0], &rp.prefix); if (!ret) { vty_out (vty, "Malformed IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } apply_mask_ipv6 (&rp.prefix); /* RFC4861 4.6.2 */ ret = rtadv_prefix_reset (zebra_if, &rp); if (!ret) { vty_out (vty, "Non-exist IPv6 prefix%s", VTY_NEWLINE); return CMD_WARNING; } return CMD_SUCCESS; } DEFUN (ipv6_nd_router_preference, ipv6_nd_router_preference_cmd, "ipv6 nd router-preference (high|medium|low)", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") { struct interface *ifp; struct zebra_if *zif; int i = 0; ifp = (struct interface *) vty->index; zif = ifp->info; while (0 != rtadv_pref_strs[i]) { if (strncmp (argv[0], rtadv_pref_strs[i], 1) == 0) { zif->rtadv.DefaultPreference = i; return CMD_SUCCESS; } i++; } return CMD_ERR_NO_MATCH; } DEFUN (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_cmd, "no ipv6 nd router-preference", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n") { struct interface *ifp; struct zebra_if *zif; ifp = (struct interface *) vty->index; zif = ifp->info; zif->rtadv.DefaultPreference = RTADV_PREF_MEDIUM; /* Default per RFC4191. */ return CMD_SUCCESS; } ALIAS (no_ipv6_nd_router_preference, no_ipv6_nd_router_preference_val_cmd, "no ipv6 nd router-preference (high|medium|low)", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Default router preference\n" "High default router preference\n" "Low default router preference\n" "Medium default router preference (default)\n") DEFUN (ipv6_nd_mtu, ipv6_nd_mtu_cmd, "ipv6 nd mtu <1-65535>", "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; VTY_GET_INTEGER_RANGE ("MTU", zif->rtadv.AdvLinkMTU, argv[0], 1, 65535); return CMD_SUCCESS; } DEFUN (no_ipv6_nd_mtu, no_ipv6_nd_mtu_cmd, "no ipv6 nd mtu", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n") { struct interface *ifp = (struct interface *) vty->index; struct zebra_if *zif = ifp->info; zif->rtadv.AdvLinkMTU = 0; return CMD_SUCCESS; } ALIAS (no_ipv6_nd_mtu, no_ipv6_nd_mtu_val_cmd, "no ipv6 nd mtu <1-65535>", NO_STR "Interface IPv6 config commands\n" "Neighbor discovery\n" "Advertised MTU\n" "MTU in bytes\n") /* Write configuration about router advertisement. */ void rtadv_config_write (struct vty *vty, struct interface *ifp) { struct zebra_if *zif; struct listnode *node; struct rtadv_prefix *rprefix; char buf[PREFIX_STRLEN]; int interval; zif = ifp->info; if (! if_is_loopback (ifp)) { if (zif->rtadv.AdvSendAdvertisements) vty_out (vty, " no ipv6 nd suppress-ra%s", VTY_NEWLINE); } interval = zif->rtadv.MaxRtrAdvInterval; if (interval % 1000) vty_out (vty, " ipv6 nd ra-interval msec %d%s", interval, VTY_NEWLINE); else if (interval != RTADV_MAX_RTR_ADV_INTERVAL) vty_out (vty, " ipv6 nd ra-interval %d%s", interval / 1000, VTY_NEWLINE); if (zif->rtadv.AdvIntervalOption) vty_out (vty, " ipv6 nd adv-interval-option%s", VTY_NEWLINE); if (zif->rtadv.AdvDefaultLifetime != -1) vty_out (vty, " ipv6 nd ra-lifetime %d%s", zif->rtadv.AdvDefaultLifetime, VTY_NEWLINE); if (zif->rtadv.HomeAgentPreference) vty_out (vty, " ipv6 nd home-agent-preference %u%s", zif->rtadv.HomeAgentPreference, VTY_NEWLINE); if (zif->rtadv.HomeAgentLifetime != -1) vty_out (vty, " ipv6 nd home-agent-lifetime %u%s", zif->rtadv.HomeAgentLifetime, VTY_NEWLINE); if (zif->rtadv.AdvHomeAgentFlag) vty_out (vty, " ipv6 nd home-agent-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvReachableTime) vty_out (vty, " ipv6 nd reachable-time %d%s", zif->rtadv.AdvReachableTime, VTY_NEWLINE); if (zif->rtadv.AdvManagedFlag) vty_out (vty, " ipv6 nd managed-config-flag%s", VTY_NEWLINE); if (zif->rtadv.AdvOtherConfigFlag) vty_out (vty, " ipv6 nd other-config-flag%s", VTY_NEWLINE); if (zif->rtadv.DefaultPreference != RTADV_PREF_MEDIUM) vty_out (vty, " ipv6 nd router-preference %s%s", rtadv_pref_strs[zif->rtadv.DefaultPreference], VTY_NEWLINE); if (zif->rtadv.AdvLinkMTU) vty_out (vty, " ipv6 nd mtu %d%s", zif->rtadv.AdvLinkMTU, VTY_NEWLINE); for (ALL_LIST_ELEMENTS_RO (zif->rtadv.AdvPrefixList, node, rprefix)) { vty_out (vty, " ipv6 nd prefix %s", prefix2str (&rprefix->prefix, buf, sizeof(buf))); if ((rprefix->AdvValidLifetime != RTADV_VALID_LIFETIME) || (rprefix->AdvPreferredLifetime != RTADV_PREFERRED_LIFETIME)) { if (rprefix->AdvValidLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvValidLifetime); if (rprefix->AdvPreferredLifetime == UINT32_MAX) vty_out (vty, " infinite"); else vty_out (vty, " %u", rprefix->AdvPreferredLifetime); } if (!rprefix->AdvOnLinkFlag) vty_out (vty, " off-link"); if (!rprefix->AdvAutonomousFlag) vty_out (vty, " no-autoconfig"); if (rprefix->AdvRouterAddressFlag) vty_out (vty, " router-address"); vty_out (vty, "%s", VTY_NEWLINE); } } static void rtadv_event (struct zebra_vrf *zvrf, enum rtadv_event event, int val) { struct rtadv *rtadv = &zvrf->rtadv; switch (event) { case RTADV_START: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_event (zebrad.master, rtadv_timer, zvrf, 0); break; case RTADV_STOP: if (rtadv->ra_timer) { thread_cancel (rtadv->ra_timer); rtadv->ra_timer = NULL; } if (rtadv->ra_read) { thread_cancel (rtadv->ra_read); rtadv->ra_read = NULL; } break; case RTADV_TIMER: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_TIMER_MSEC: if (! rtadv->ra_timer) rtadv->ra_timer = thread_add_timer_msec (zebrad.master, rtadv_timer, zvrf, val); break; case RTADV_READ: if (! rtadv->ra_read) rtadv->ra_read = thread_add_read (zebrad.master, rtadv_read, zvrf, val); break; default: break; } return; } void rtadv_init (struct zebra_vrf *zvrf) { zvrf->rtadv.sock = rtadv_make_socket (zvrf->vrf_id); } void rtadv_terminate (struct zebra_vrf *zvrf) { rtadv_event (zvrf, RTADV_STOP, 0); if (zvrf->rtadv.sock >= 0) { close (zvrf->rtadv.sock); zvrf->rtadv.sock = -1; } zvrf->rtadv.adv_if_count = 0; zvrf->rtadv.adv_msec_if_count = 0; } void rtadv_cmd_init (void) { install_element (INTERFACE_NODE, &ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_suppress_ra_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_interval_msec_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_val_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_interval_msec_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_ra_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_reachable_time_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_managed_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_other_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_config_flag_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_homeagent_lifetime_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_adv_interval_config_option_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_nortaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rev_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_noauto_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_offlink_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_noval_rtaddr_cmd); install_element (INTERFACE_NODE, &ipv6_nd_prefix_prefix_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_prefix_cmd); install_element (INTERFACE_NODE, &ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_router_preference_val_cmd); install_element (INTERFACE_NODE, &ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_cmd); install_element (INTERFACE_NODE, &no_ipv6_nd_mtu_val_cmd); } static int if_join_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_JOIN_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_JOIN_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s join to all-routers multicast group", ifp->name); return 0; } static int if_leave_all_router (int sock, struct interface *ifp) { int ret; struct ipv6_mreq mreq; memset (&mreq, 0, sizeof (struct ipv6_mreq)); inet_pton (AF_INET6, ALLROUTER, &mreq.ipv6mr_multiaddr); mreq.ipv6mr_interface = ifp->ifindex; ret = setsockopt (sock, IPPROTO_IPV6, IPV6_LEAVE_GROUP, (char *) &mreq, sizeof mreq); if (ret < 0) zlog_warn ("can't setsockopt IPV6_LEAVE_GROUP: %s", safe_strerror (errno)); zlog_info ("rtadv: %s leave from all-routers multicast group", ifp->name); return 0; } #else void rtadv_init (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_terminate (struct zebra_vrf *zvrf) { /* Empty.*/; } void rtadv_cmd_init (void) { /* Empty.*/; } #endif /* HAVE_RTADV && HAVE_IPV6 */
rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; }
rtadv_read (struct thread *thread) { int sock; int len; u_char buf[RTADV_MSG_SIZE]; struct sockaddr_in6 from; ifindex_t ifindex = 0; int hoplimit = -1; struct zebra_vrf *zvrf = THREAD_ARG (thread); sock = THREAD_FD (thread); zvrf->rtadv.ra_read = NULL; /* Register myself. */ rtadv_event (zvrf, RTADV_READ, sock); len = rtadv_recv_packet (sock, buf, sizeof (buf), &from, &ifindex, &hoplimit); if (len < 0) { zlog_warn ("router solicitation recv failed: %s.", safe_strerror (errno)); return len; } rtadv_process_packet (buf, (unsigned)len, ifindex, hoplimit, zvrf->vrf_id); return 0; }
{'added': [(485, ' len = rtadv_recv_packet (sock, buf, sizeof (buf), &from, &ifindex, &hoplimit);')], 'deleted': [(485, ' len = rtadv_recv_packet (sock, buf, BUFSIZ, &from, &ifindex, &hoplimit);')]}
1
1
1,425
6,821
21
132
2
https://github.com/Quagga/quagga
CVE-2016-1245
CWE-119
3,044
bus-polkit.c
C
bus_verify_polkit_async
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_message_handler_t callback; void *userdata; sd_bus_slot *slot; Hashmap *registry; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); free(q); } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); q->slot = sd_bus_slot_unref(q->slot); q->reply = sd_bus_message_ref(reply); r = sd_bus_message_rewind(q->request, true); if (r < 0) { r = sd_bus_reply_method_errno(q->request, r, NULL); goto finish; } r = q->callback(q->request, q->userdata, &error_buffer); r = bus_maybe_reply_error(q->request, r, &error_buffer); finish: async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
/* SPDX-License-Identifier: LGPL-2.1+ */ #include "bus-internal.h" #include "bus-message.h" #include "bus-polkit.h" #include "strv.h" #include "user-util.h" static int check_good_user(sd_bus_message *m, uid_t good_user) { _cleanup_(sd_bus_creds_unrefp) sd_bus_creds *creds = NULL; uid_t sender_uid; int r; assert(m); if (good_user == UID_INVALID) return 0; r = sd_bus_query_sender_creds(m, SD_BUS_CREDS_EUID, &creds); if (r < 0) return r; /* Don't trust augmented credentials for authorization */ assert_return((sd_bus_creds_get_augmented_mask(creds) & SD_BUS_CREDS_EUID) == 0, -EPERM); r = sd_bus_creds_get_euid(creds, &sender_uid); if (r < 0) return r; return sender_uid == good_user; } #if ENABLE_POLKIT static int bus_message_append_strv_key_value( sd_bus_message *m, const char **l) { const char **k, **v; int r; assert(m); r = sd_bus_message_open_container(m, 'a', "{ss}"); if (r < 0) return r; STRV_FOREACH_PAIR(k, v, l) { r = sd_bus_message_append(m, "{ss}", *k, *v); if (r < 0) return r; } r = sd_bus_message_close_container(m); if (r < 0) return r; return r; } #endif int bus_test_polkit( sd_bus_message *call, int capability, const char *action, const char **details, uid_t good_user, bool *_challenge, sd_bus_error *ret_error) { int r; assert(call); assert(action); /* Tests non-interactively! */ r = check_good_user(call, good_user); if (r != 0) return r; r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT else { _cleanup_(sd_bus_message_unrefp) sd_bus_message *request = NULL; _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL; int authorized = false, challenge = false; const char *sender; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; r = sd_bus_message_new_method_call( call->bus, &request, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( request, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(request, details); if (r < 0) return r; r = sd_bus_message_append(request, "us", 0, NULL); if (r < 0) return r; r = sd_bus_call(call->bus, request, 0, ret_error, &reply); if (r < 0) { /* Treat no PK available as access denied */ if (sd_bus_error_has_name(ret_error, SD_BUS_ERROR_SERVICE_UNKNOWN)) { sd_bus_error_free(ret_error); return -EACCES; } return r; } r = sd_bus_message_enter_container(reply, 'r', "bba{ss}"); if (r < 0) return r; r = sd_bus_message_read(reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (_challenge) { *_challenge = challenge; return 0; } } #endif return -EACCES; } #if ENABLE_POLKIT typedef struct AsyncPolkitQuery { char *action; char **details; sd_bus_message *request, *reply; sd_bus_slot *slot; Hashmap *registry; sd_event_source *defer_event_source; } AsyncPolkitQuery; static void async_polkit_query_free(AsyncPolkitQuery *q) { if (!q) return; sd_bus_slot_unref(q->slot); if (q->registry && q->request) hashmap_remove(q->registry, q->request); sd_bus_message_unref(q->request); sd_bus_message_unref(q->reply); free(q->action); strv_free(q->details); sd_event_source_disable_unref(q->defer_event_source); free(q); } static int async_polkit_defer(sd_event_source *s, void *userdata) { AsyncPolkitQuery *q = userdata; assert(s); /* This is called as idle event source after we processed the async polkit reply, hopefully after the * method call we re-enqueued has been properly processed. */ async_polkit_query_free(q); return 0; } static int async_polkit_callback(sd_bus_message *reply, void *userdata, sd_bus_error *error) { _cleanup_(sd_bus_error_free) sd_bus_error error_buffer = SD_BUS_ERROR_NULL; AsyncPolkitQuery *q = userdata; int r; assert(reply); assert(q); assert(q->slot); q->slot = sd_bus_slot_unref(q->slot); assert(!q->reply); q->reply = sd_bus_message_ref(reply); /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the * whole message processing again, and thus re-validating and re-retrieving the "userdata" field * again. * * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again, * i.e. after the second time the message is processed is complete. */ assert(!q->defer_event_source); r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q); if (r < 0) goto fail; r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE); if (r < 0) goto fail; r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT); if (r < 0) goto fail; r = sd_bus_message_rewind(q->request, true); if (r < 0) goto fail; r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request); if (r < 0) goto fail; return 1; fail: log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m"); (void) sd_bus_reply_method_errno(q->request, r, NULL); async_polkit_query_free(q); return r; } #endif int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; } void bus_verify_polkit_async_registry_free(Hashmap *registry) { #if ENABLE_POLKIT hashmap_free_with_destructor(registry, async_polkit_query_free); #endif }
int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; const char *sender; sd_bus_message_handler_t callback; void *userdata; int c; #endif int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; #if ENABLE_POLKIT if (sd_bus_get_current_message(call->bus) != call) return -EINVAL; callback = sd_bus_get_current_handler(call->bus); if (!callback) return -EINVAL; userdata = sd_bus_get_current_userdata(call->bus); sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), .callback = callback, .userdata = userdata, }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; }
int bus_verify_polkit_async( sd_bus_message *call, int capability, const char *action, const char **details, bool interactive, uid_t good_user, Hashmap **registry, sd_bus_error *ret_error) { #if ENABLE_POLKIT _cleanup_(sd_bus_message_unrefp) sd_bus_message *pk = NULL; AsyncPolkitQuery *q; int c; #endif const char *sender; int r; assert(call); assert(action); assert(registry); r = check_good_user(call, good_user); if (r != 0) return r; #if ENABLE_POLKIT q = hashmap_get(*registry, call); if (q) { int authorized, challenge; /* This is the second invocation of this function, and there's already a response from * polkit, let's process it */ assert(q->reply); /* If the operation we want to authenticate changed between the first and the second time, * let's not use this authentication, it might be out of date as the object and context we * operate on might have changed. */ if (!streq(q->action, action) || !strv_equal(q->details, (char**) details)) return -ESTALE; if (sd_bus_message_is_method_error(q->reply, NULL)) { const sd_bus_error *e; e = sd_bus_message_get_error(q->reply); /* Treat no PK available as access denied */ if (sd_bus_error_has_name(e, SD_BUS_ERROR_SERVICE_UNKNOWN) || sd_bus_error_has_name(e, SD_BUS_ERROR_NAME_HAS_NO_OWNER)) return -EACCES; /* Copy error from polkit reply */ sd_bus_error_copy(ret_error, e); return -sd_bus_error_get_errno(e); } r = sd_bus_message_enter_container(q->reply, 'r', "bba{ss}"); if (r >= 0) r = sd_bus_message_read(q->reply, "bb", &authorized, &challenge); if (r < 0) return r; if (authorized) return 1; if (challenge) return sd_bus_error_set(ret_error, SD_BUS_ERROR_INTERACTIVE_AUTHORIZATION_REQUIRED, "Interactive authentication required."); return -EACCES; } #endif r = sd_bus_query_sender_privilege(call, capability); if (r < 0) return r; else if (r > 0) return 1; sender = sd_bus_message_get_sender(call); if (!sender) return -EBADMSG; #if ENABLE_POLKIT c = sd_bus_message_get_allow_interactive_authorization(call); if (c < 0) return c; if (c > 0) interactive = true; r = hashmap_ensure_allocated(registry, NULL); if (r < 0) return r; r = sd_bus_message_new_method_call( call->bus, &pk, "org.freedesktop.PolicyKit1", "/org/freedesktop/PolicyKit1/Authority", "org.freedesktop.PolicyKit1.Authority", "CheckAuthorization"); if (r < 0) return r; r = sd_bus_message_append( pk, "(sa{sv})s", "system-bus-name", 1, "name", "s", sender, action); if (r < 0) return r; r = bus_message_append_strv_key_value(pk, details); if (r < 0) return r; r = sd_bus_message_append(pk, "us", interactive, NULL); if (r < 0) return r; q = new(AsyncPolkitQuery, 1); if (!q) return -ENOMEM; *q = (AsyncPolkitQuery) { .request = sd_bus_message_ref(call), }; q->action = strdup(action); if (!q->action) { async_polkit_query_free(q); return -ENOMEM; } q->details = strv_copy((char**) details); if (!q->details) { async_polkit_query_free(q); return -ENOMEM; } r = hashmap_put(*registry, call, q); if (r < 0) { async_polkit_query_free(q); return r; } q->registry = *registry; r = sd_bus_call_async(call->bus, &q->slot, pk, async_polkit_callback, q, 0); if (r < 0) { async_polkit_query_free(q); return r; } return 0; #endif return -EACCES; }
{'added': [(163, ''), (165, ' sd_event_source *defer_event_source;'), (183, ' sd_event_source_disable_unref(q->defer_event_source);'), (187, 'static int async_polkit_defer(sd_event_source *s, void *userdata) {'), (188, ' AsyncPolkitQuery *q = userdata;'), (189, ''), (190, ' assert(s);'), (191, ''), (192, ' /* This is called as idle event source after we processed the async polkit reply, hopefully after the'), (193, ' * method call we re-enqueued has been properly processed. */'), (194, ''), (195, ' async_polkit_query_free(q);'), (196, ' return 0;'), (197, '}'), (198, ''), (207, ' assert(q->slot);'), (209, ''), (210, ' assert(!q->reply);'), (213, " /* Now, let's dispatch the original message a second time be re-enqueing. This will then traverse the"), (214, ' * whole message processing again, and thus re-validating and re-retrieving the "userdata" field'), (215, ' * again.'), (216, ' *'), (217, ' * We install an idle event loop event to clean-up the PolicyKit request data when we are idle again,'), (218, ' * i.e. after the second time the message is processed is complete. */'), (219, ''), (220, ' assert(!q->defer_event_source);'), (221, ' r = sd_event_add_defer(sd_bus_get_event(sd_bus_message_get_bus(reply)), &q->defer_event_source, async_polkit_defer, q);'), (222, ' if (r < 0)'), (223, ' goto fail;'), (224, ''), (225, ' r = sd_event_source_set_priority(q->defer_event_source, SD_EVENT_PRIORITY_IDLE);'), (226, ' if (r < 0)'), (227, ' goto fail;'), (228, ''), (229, ' r = sd_event_source_set_enabled(q->defer_event_source, SD_EVENT_ONESHOT);'), (230, ' if (r < 0)'), (231, ' goto fail;'), (232, ''), (234, ' if (r < 0)'), (235, ' goto fail;'), (236, ''), (237, ' r = sd_bus_enqeue_for_read(sd_bus_message_get_bus(q->request), q->request);'), (238, ' if (r < 0)'), (239, ' goto fail;'), (241, ' return 1;'), (243, 'fail:'), (244, ' log_debug_errno(r, "Processing asynchronous PolicyKit reply failed, ignoring: %m");'), (245, ' (void) sd_bus_reply_method_errno(q->request, r, NULL);'), (267, ' const char *sender;'), (335, '#if ENABLE_POLKIT')], 'deleted': [(162, ' sd_bus_message_handler_t callback;'), (163, ' void *userdata;'), (169, ''), (199, ' if (r < 0) {'), (200, ' r = sd_bus_reply_method_errno(q->request, r, NULL);'), (201, ' goto finish;'), (202, ' }'), (204, ' r = q->callback(q->request, q->userdata, &error_buffer);'), (205, ' r = bus_maybe_reply_error(q->request, r, &error_buffer);'), (207, 'finish:'), (209, ''), (228, ' const char *sender;'), (229, ' sd_bus_message_handler_t callback;'), (230, ' void *userdata;'), (296, '#if ENABLE_POLKIT'), (297, ' if (sd_bus_get_current_message(call->bus) != call)'), (298, ' return -EINVAL;'), (299, ''), (300, ' callback = sd_bus_get_current_handler(call->bus);'), (301, ' if (!callback)'), (302, ' return -EINVAL;'), (303, ''), (304, ' userdata = sd_bus_get_current_userdata(call->bus);'), (305, ''), (352, ' .callback = callback,'), (353, ' .userdata = userdata,')]}
50
26
290
1,631
125
701
32
https://github.com/systemd/systemd
CVE-2020-1712
CWE-416
2,435
activations.cc
C++
tflite::ops::builtin::activations::SoftmaxEval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input = GetInput(context, node, 0); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
{'added': [(255, ' const TfLiteTensor* input;'), (256, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (257, ' TfLiteTensor* output;'), (258, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (277, ' const TfLiteTensor* input;'), (278, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (279, ' TfLiteTensor* output;'), (280, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (307, ' TfLiteTensor* output;'), (308, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (313, ' const TfLiteTensor* input;'), (314, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (346, ' const TfLiteTensor* input;'), (347, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (348, ' TfLiteTensor* output;'), (349, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (377, ' const TfLiteTensor* input;'), (378, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (379, ' TfLiteTensor* output;'), (380, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (464, ' const TfLiteTensor* input;'), (465, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (466, ' TfLiteTensor* output;'), (467, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (561, ' const TfLiteTensor* input;'), (562, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (563, ' TfLiteTensor* output;'), (564, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (631, ' const TfLiteTensor* input;'), (632, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (633, ' TfLiteTensor* output;'), (634, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (669, ' const TfLiteTensor* input;'), (670, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (671, ' TfLiteTensor* output;'), (672, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (673, ' const TfLiteTensor* alpha;'), (674, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (726, ' const TfLiteTensor* input;'), (727, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (728, ' TfLiteTensor* output;'), (729, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (756, ' const TfLiteTensor* input;'), (757, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (758, ' TfLiteTensor* output;'), (759, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (789, ' const TfLiteTensor* input;'), (790, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (791, ' TfLiteTensor* output;'), (792, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (842, ' const TfLiteTensor* input;'), (843, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (844, ' TfLiteTensor* output;'), (845, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (875, ' const TfLiteTensor* input;'), (876, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (877, ' TfLiteTensor* output;'), (878, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (951, ' const TfLiteTensor* input;'), (952, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (953, ' TfLiteTensor* output;'), (954, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1101, ' const TfLiteTensor* input;'), (1102, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1103, ' TfLiteTensor* output;'), (1104, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1158, ' const TfLiteTensor* input;'), (1159, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1160, ' TfLiteTensor* output;'), (1161, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1221, ' const TfLiteTensor* input;'), (1222, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1223, ' const TfLiteTensor* alpha;'), (1224, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (1225, ' TfLiteTensor* output;'), (1226, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1335, ' const TfLiteTensor* input;'), (1336, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1337, ' TfLiteTensor* output;'), (1338, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1375, ' const TfLiteTensor* input;'), (1376, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1377, ' TfLiteTensor* output;'), (1378, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1391, ' const TfLiteTensor* input;'), (1392, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1393, ' TfLiteTensor* output;'), (1394, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));')], 'deleted': [(255, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (256, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (275, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (276, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (303, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (308, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (340, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (341, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (369, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (370, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (454, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (455, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (549, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (550, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (617, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (618, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (653, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (654, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (655, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (707, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (708, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (735, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (736, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (766, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (767, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (817, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (818, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (848, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (849, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (922, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (923, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1070, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1071, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1125, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1126, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1186, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1187, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (1188, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1297, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1298, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1335, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1336, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1349, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1350, ' TfLiteTensor* output = GetOutput(context, node, 0);')]}
88
44
1,316
9,729
51
282
9
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
3,259
rdppm.c
C
get_word_rgb_row
/* * rdppm.c * * This file was part of the Independent JPEG Group's software: * Copyright (C) 1991-1997, Thomas G. Lane. * Modified 2009 by Bill Allombert, Guido Vollbeding. * libjpeg-turbo Modifications: * Copyright (C) 2015-2017, D. R. Commander. * For conditions of distribution and use, see the accompanying README.ijg * file. * * This file contains routines to read input images in PPM/PGM format. * The extended 2-byte-per-sample raw PPM/PGM formats are supported. * The PBMPLUS library is NOT required to compile this software * (but it is highly useful as a set of PPM image manipulation programs). * * These routines may need modification for non-Unix environments or * specialized applications. As they stand, they assume input from * an ordinary stdio stream. They further assume that reading begins * at the start of the file; start_input may need work if the * user interface has already read some data (e.g., to determine that * the file is indeed PPM format). */ #include "cmyk.h" #include "cdjpeg.h" /* Common decls for cjpeg/djpeg applications */ #ifdef PPM_SUPPORTED /* Portions of this code are based on the PBMPLUS library, which is: ** ** Copyright (C) 1988 by Jef Poskanzer. ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. */ /* Macros to deal with unsigned chars as efficiently as compiler allows */ #ifdef HAVE_UNSIGNED_CHAR typedef unsigned char U_CHAR; #define UCH(x) ((int)(x)) #else /* !HAVE_UNSIGNED_CHAR */ #ifdef __CHAR_UNSIGNED__ typedef char U_CHAR; #define UCH(x) ((int)(x)) #else typedef char U_CHAR; #define UCH(x) ((int)(x) & 0xFF) #endif #endif /* HAVE_UNSIGNED_CHAR */ #define ReadOK(file, buffer, len) \ (JFREAD(file, buffer, len) == ((size_t)(len))) static int alpha_index[JPEG_NUMCS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 3, 0, 0, -1 }; /* Private version of data source object */ typedef struct { struct cjpeg_source_struct pub; /* public fields */ /* Usually these two pointers point to the same place: */ U_CHAR *iobuffer; /* fread's I/O buffer */ JSAMPROW pixrow; /* compressor input buffer */ size_t buffer_width; /* width of I/O buffer */ JSAMPLE *rescale; /* => maxval-remapping array, or NULL */ int maxval; } ppm_source_struct; typedef ppm_source_struct *ppm_source_ptr; LOCAL(int) pbm_getc(FILE *infile) /* Read next char, skipping over any comments */ /* A comment/newline sequence is returned as a newline */ { register int ch; ch = getc(infile); if (ch == '#') { do { ch = getc(infile); } while (ch != '\n' && ch != EOF); } return ch; } LOCAL(unsigned int) read_pbm_integer(j_compress_ptr cinfo, FILE *infile, unsigned int maxval) /* Read an unsigned decimal integer from the PPM file */ /* Swallows one trailing character after the integer */ /* Note that on a 16-bit-int machine, only values up to 64k can be read. */ /* This should not be a problem in practice. */ { register int ch; register unsigned int val; /* Skip any leading whitespace */ do { ch = pbm_getc(infile); if (ch == EOF) ERREXIT(cinfo, JERR_INPUT_EOF); } while (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'); if (ch < '0' || ch > '9') ERREXIT(cinfo, JERR_PPM_NONNUMERIC); val = ch - '0'; while ((ch = pbm_getc(infile)) >= '0' && ch <= '9') { val *= 10; val += ch - '0'; } if (val > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); return val; } /* * Read one row of pixels. * * We provide several different versions depending on input file format. * In all cases, input is scaled to the size of JSAMPLE. * * A really fast path is provided for reading byte/sample raw files with * maxval = MAXJSAMPLE, which is the normal case for 8-bit data. */ METHODDEF(JDIMENSION) get_text_gray_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PGM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; for (col = cinfo->image_width; col > 0; col--) { *ptr++ = rescale[read_pbm_integer(cinfo, infile, maxval)]; } return 1; } #define GRAY_RGB_READ_LOOP(read_op, alpha_set_op) { \ for (col = cinfo->image_width; col > 0; col--) { \ ptr[rindex] = ptr[gindex] = ptr[bindex] = read_op; \ alpha_set_op \ ptr += ps; \ } \ } METHODDEF(JDIMENSION) get_text_gray_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PGM files with any maxval and converting to extended RGB */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { if (aindex >= 0) GRAY_RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval), ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval),) } else { if (aindex >= 0) GRAY_RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)], ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)],) } return 1; } METHODDEF(JDIMENSION) get_text_gray_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PGM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = read_pbm_integer(cinfo, infile, maxval); rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = rescale[read_pbm_integer(cinfo, infile, maxval)]; rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } #define RGB_READ_LOOP(read_op, alpha_set_op) { \ for (col = cinfo->image_width; col > 0; col--) { \ ptr[rindex] = read_op; \ ptr[gindex] = read_op; \ ptr[bindex] = read_op; \ alpha_set_op \ ptr += ps; \ } \ } METHODDEF(JDIMENSION) get_text_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { if (aindex >= 0) RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval), ptr[aindex] = 0xFF;) else RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval),) } else { if (aindex >= 0) RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)], ptr[aindex] = 0xFF;) else RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)],) } return 1; } METHODDEF(JDIMENSION) get_text_rgb_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PPM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = read_pbm_integer(cinfo, infile, maxval); JSAMPLE g = read_pbm_integer(cinfo, infile, maxval); JSAMPLE b = read_pbm_integer(cinfo, infile, maxval); rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = rescale[read_pbm_integer(cinfo, infile, maxval)]; JSAMPLE g = rescale[read_pbm_integer(cinfo, infile, maxval)]; JSAMPLE b = rescale[read_pbm_integer(cinfo, infile, maxval)]; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } METHODDEF(JDIMENSION) get_scaled_gray_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PGM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { *ptr++ = rescale[UCH(*bufferptr++)]; } return 1; } METHODDEF(JDIMENSION) get_gray_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PGM files with any maxval and converting to extended RGB */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { if (aindex >= 0) GRAY_RGB_READ_LOOP(*bufferptr++, ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(*bufferptr++,) } else { if (aindex >= 0) GRAY_RGB_READ_LOOP(rescale[UCH(*bufferptr++)], ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(rescale[UCH(*bufferptr++)],) } return 1; } METHODDEF(JDIMENSION) get_gray_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PGM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = *bufferptr++; rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = rescale[UCH(*bufferptr++)]; rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } METHODDEF(JDIMENSION) get_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { if (aindex >= 0) RGB_READ_LOOP(*bufferptr++, ptr[aindex] = 0xFF;) else RGB_READ_LOOP(*bufferptr++,) } else { if (aindex >= 0) RGB_READ_LOOP(rescale[UCH(*bufferptr++)], ptr[aindex] = 0xFF;) else RGB_READ_LOOP(rescale[UCH(*bufferptr++)],) } return 1; } METHODDEF(JDIMENSION) get_rgb_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PPM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = *bufferptr++; JSAMPLE g = *bufferptr++; JSAMPLE b = *bufferptr++; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = rescale[UCH(*bufferptr++)]; JSAMPLE g = rescale[UCH(*bufferptr++)]; JSAMPLE b = rescale[UCH(*bufferptr++)]; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } METHODDEF(JDIMENSION) get_raw_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format files with maxval = MAXJSAMPLE. * In this case we just read right into the JSAMPLE buffer! * Note that same code works for PPM and PGM files. */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); return 1; } METHODDEF(JDIMENSION) get_word_gray_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-word-format PGM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { register unsigned int temp; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; } return 1; } METHODDEF(JDIMENSION) get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-word-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { register unsigned int temp; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; } return 1; } /* * Read the file header; return image size and component count. */ METHODDEF(void) start_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { ppm_source_ptr source = (ppm_source_ptr)sinfo; int c; unsigned int w, h, maxval; boolean need_iobuffer, use_raw_buffer, need_rescale; if (getc(source->pub.input_file) != 'P') ERREXIT(cinfo, JERR_PPM_NOT); c = getc(source->pub.input_file); /* subformat discriminator character */ /* detect unsupported variants (ie, PBM) before trying to read header */ switch (c) { case '2': /* it's a text-format PGM file */ case '3': /* it's a text-format PPM file */ case '5': /* it's a raw-format PGM file */ case '6': /* it's a raw-format PPM file */ break; default: ERREXIT(cinfo, JERR_PPM_NOT); break; } /* fetch the remaining header info */ w = read_pbm_integer(cinfo, source->pub.input_file, 65535); h = read_pbm_integer(cinfo, source->pub.input_file, 65535); maxval = read_pbm_integer(cinfo, source->pub.input_file, 65535); if (w <= 0 || h <= 0 || maxval <= 0) /* error check */ ERREXIT(cinfo, JERR_PPM_NOT); cinfo->data_precision = BITS_IN_JSAMPLE; /* we always rescale data to this */ cinfo->image_width = (JDIMENSION)w; cinfo->image_height = (JDIMENSION)h; source->maxval = maxval; /* initialize flags to most common settings */ need_iobuffer = TRUE; /* do we need an I/O buffer? */ use_raw_buffer = FALSE; /* do we map input buffer onto I/O buffer? */ need_rescale = TRUE; /* do we need a rescale array? */ switch (c) { case '2': /* it's a text-format PGM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_GRAYSCALE; TRACEMS2(cinfo, 1, JTRC_PGM_TEXT, w, h); if (cinfo->in_color_space == JCS_GRAYSCALE) source->pub.get_pixel_rows = get_text_gray_row; else if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_text_gray_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_text_gray_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); need_iobuffer = FALSE; break; case '3': /* it's a text-format PPM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_EXT_RGB; TRACEMS2(cinfo, 1, JTRC_PPM_TEXT, w, h); if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_text_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_text_rgb_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); need_iobuffer = FALSE; break; case '5': /* it's a raw-format PGM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_GRAYSCALE; TRACEMS2(cinfo, 1, JTRC_PGM, w, h); if (maxval > 255) { source->pub.get_pixel_rows = get_word_gray_row; } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) && cinfo->in_color_space == JCS_GRAYSCALE) { source->pub.get_pixel_rows = get_raw_row; use_raw_buffer = TRUE; need_rescale = FALSE; } else { if (cinfo->in_color_space == JCS_GRAYSCALE) source->pub.get_pixel_rows = get_scaled_gray_row; else if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_gray_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_gray_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } break; case '6': /* it's a raw-format PPM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_EXT_RGB; TRACEMS2(cinfo, 1, JTRC_PPM, w, h); if (maxval > 255) { source->pub.get_pixel_rows = get_word_rgb_row; } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) && (cinfo->in_color_space == JCS_EXT_RGB #if RGB_RED == 0 && RGB_GREEN == 1 && RGB_BLUE == 2 && RGB_PIXELSIZE == 3 || cinfo->in_color_space == JCS_RGB #endif )) { source->pub.get_pixel_rows = get_raw_row; use_raw_buffer = TRUE; need_rescale = FALSE; } else { if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_rgb_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } break; } if (IsExtRGB(cinfo->in_color_space)) cinfo->input_components = rgb_pixelsize[cinfo->in_color_space]; else if (cinfo->in_color_space == JCS_GRAYSCALE) cinfo->input_components = 1; else if (cinfo->in_color_space == JCS_CMYK) cinfo->input_components = 4; /* Allocate space for I/O buffer: 1 or 3 bytes or words/pixel. */ if (need_iobuffer) { if (c == '6') source->buffer_width = (size_t)w * 3 * ((maxval <= 255) ? sizeof(U_CHAR) : (2 * sizeof(U_CHAR))); else source->buffer_width = (size_t)w * ((maxval <= 255) ? sizeof(U_CHAR) : (2 * sizeof(U_CHAR))); source->iobuffer = (U_CHAR *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, source->buffer_width); } /* Create compressor input buffer. */ if (use_raw_buffer) { /* For unscaled raw-input case, we can just map it onto the I/O buffer. */ /* Synthesize a JSAMPARRAY pointer structure */ source->pixrow = (JSAMPROW)source->iobuffer; source->pub.buffer = &source->pixrow; source->pub.buffer_height = 1; } else { /* Need to translate anyway, so make a separate sample buffer. */ source->pub.buffer = (*cinfo->mem->alloc_sarray) ((j_common_ptr)cinfo, JPOOL_IMAGE, (JDIMENSION)w * cinfo->input_components, (JDIMENSION)1); source->pub.buffer_height = 1; } /* Compute the rescaling array if required. */ if (need_rescale) { long val, half_maxval; /* On 16-bit-int machines we have to be careful of maxval = 65535 */ source->rescale = (JSAMPLE *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, (size_t)(((long)maxval + 1L) * sizeof(JSAMPLE))); half_maxval = maxval / 2; for (val = 0; val <= (long)maxval; val++) { /* The multiplication here must be done in 32 bits to avoid overflow */ source->rescale[val] = (JSAMPLE)((val * MAXJSAMPLE + half_maxval) / maxval); } } } /* * Finish up at the end of the file. */ METHODDEF(void) finish_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { /* no work */ } /* * The module selection routine for PPM format input. */ GLOBAL(cjpeg_source_ptr) jinit_read_ppm(j_compress_ptr cinfo) { ppm_source_ptr source; /* Create module interface object */ source = (ppm_source_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(ppm_source_struct)); /* Fill in method ptrs, except get_pixel_rows which start_input sets */ source->pub.start_input = start_input_ppm; source->pub.finish_input = finish_input_ppm; return (cjpeg_source_ptr)source; } #endif /* PPM_SUPPORTED */
/* * rdppm.c * * This file was part of the Independent JPEG Group's software: * Copyright (C) 1991-1997, Thomas G. Lane. * Modified 2009 by Bill Allombert, Guido Vollbeding. * libjpeg-turbo Modifications: * Copyright (C) 2015-2017, D. R. Commander. * For conditions of distribution and use, see the accompanying README.ijg * file. * * This file contains routines to read input images in PPM/PGM format. * The extended 2-byte-per-sample raw PPM/PGM formats are supported. * The PBMPLUS library is NOT required to compile this software * (but it is highly useful as a set of PPM image manipulation programs). * * These routines may need modification for non-Unix environments or * specialized applications. As they stand, they assume input from * an ordinary stdio stream. They further assume that reading begins * at the start of the file; start_input may need work if the * user interface has already read some data (e.g., to determine that * the file is indeed PPM format). */ #include "cmyk.h" #include "cdjpeg.h" /* Common decls for cjpeg/djpeg applications */ #ifdef PPM_SUPPORTED /* Portions of this code are based on the PBMPLUS library, which is: ** ** Copyright (C) 1988 by Jef Poskanzer. ** ** Permission to use, copy, modify, and distribute this software and its ** documentation for any purpose and without fee is hereby granted, provided ** that the above copyright notice appear in all copies and that both that ** copyright notice and this permission notice appear in supporting ** documentation. This software is provided "as is" without express or ** implied warranty. */ /* Macros to deal with unsigned chars as efficiently as compiler allows */ #ifdef HAVE_UNSIGNED_CHAR typedef unsigned char U_CHAR; #define UCH(x) ((int)(x)) #else /* !HAVE_UNSIGNED_CHAR */ #ifdef __CHAR_UNSIGNED__ typedef char U_CHAR; #define UCH(x) ((int)(x)) #else typedef char U_CHAR; #define UCH(x) ((int)(x) & 0xFF) #endif #endif /* HAVE_UNSIGNED_CHAR */ #define ReadOK(file, buffer, len) \ (JFREAD(file, buffer, len) == ((size_t)(len))) static int alpha_index[JPEG_NUMCS] = { -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, 3, 0, 0, -1 }; /* Private version of data source object */ typedef struct { struct cjpeg_source_struct pub; /* public fields */ /* Usually these two pointers point to the same place: */ U_CHAR *iobuffer; /* fread's I/O buffer */ JSAMPROW pixrow; /* compressor input buffer */ size_t buffer_width; /* width of I/O buffer */ JSAMPLE *rescale; /* => maxval-remapping array, or NULL */ unsigned int maxval; } ppm_source_struct; typedef ppm_source_struct *ppm_source_ptr; LOCAL(int) pbm_getc(FILE *infile) /* Read next char, skipping over any comments */ /* A comment/newline sequence is returned as a newline */ { register int ch; ch = getc(infile); if (ch == '#') { do { ch = getc(infile); } while (ch != '\n' && ch != EOF); } return ch; } LOCAL(unsigned int) read_pbm_integer(j_compress_ptr cinfo, FILE *infile, unsigned int maxval) /* Read an unsigned decimal integer from the PPM file */ /* Swallows one trailing character after the integer */ /* Note that on a 16-bit-int machine, only values up to 64k can be read. */ /* This should not be a problem in practice. */ { register int ch; register unsigned int val; /* Skip any leading whitespace */ do { ch = pbm_getc(infile); if (ch == EOF) ERREXIT(cinfo, JERR_INPUT_EOF); } while (ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'); if (ch < '0' || ch > '9') ERREXIT(cinfo, JERR_PPM_NONNUMERIC); val = ch - '0'; while ((ch = pbm_getc(infile)) >= '0' && ch <= '9') { val *= 10; val += ch - '0'; } if (val > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); return val; } /* * Read one row of pixels. * * We provide several different versions depending on input file format. * In all cases, input is scaled to the size of JSAMPLE. * * A really fast path is provided for reading byte/sample raw files with * maxval = MAXJSAMPLE, which is the normal case for 8-bit data. */ METHODDEF(JDIMENSION) get_text_gray_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PGM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; for (col = cinfo->image_width; col > 0; col--) { *ptr++ = rescale[read_pbm_integer(cinfo, infile, maxval)]; } return 1; } #define GRAY_RGB_READ_LOOP(read_op, alpha_set_op) { \ for (col = cinfo->image_width; col > 0; col--) { \ ptr[rindex] = ptr[gindex] = ptr[bindex] = read_op; \ alpha_set_op \ ptr += ps; \ } \ } METHODDEF(JDIMENSION) get_text_gray_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PGM files with any maxval and converting to extended RGB */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { if (aindex >= 0) GRAY_RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval), ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval),) } else { if (aindex >= 0) GRAY_RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)], ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)],) } return 1; } METHODDEF(JDIMENSION) get_text_gray_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PGM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = read_pbm_integer(cinfo, infile, maxval); rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = rescale[read_pbm_integer(cinfo, infile, maxval)]; rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } #define RGB_READ_LOOP(read_op, alpha_set_op) { \ for (col = cinfo->image_width; col > 0; col--) { \ ptr[rindex] = read_op; \ ptr[gindex] = read_op; \ ptr[bindex] = read_op; \ alpha_set_op \ ptr += ps; \ } \ } METHODDEF(JDIMENSION) get_text_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { if (aindex >= 0) RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval), ptr[aindex] = 0xFF;) else RGB_READ_LOOP(read_pbm_integer(cinfo, infile, maxval),) } else { if (aindex >= 0) RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)], ptr[aindex] = 0xFF;) else RGB_READ_LOOP(rescale[read_pbm_integer(cinfo, infile, maxval)],) } return 1; } METHODDEF(JDIMENSION) get_text_rgb_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading text-format PPM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; FILE *infile = source->pub.input_file; register JSAMPROW ptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; ptr = source->pub.buffer[0]; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = read_pbm_integer(cinfo, infile, maxval); JSAMPLE g = read_pbm_integer(cinfo, infile, maxval); JSAMPLE b = read_pbm_integer(cinfo, infile, maxval); rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = rescale[read_pbm_integer(cinfo, infile, maxval)]; JSAMPLE g = rescale[read_pbm_integer(cinfo, infile, maxval)]; JSAMPLE b = rescale[read_pbm_integer(cinfo, infile, maxval)]; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } METHODDEF(JDIMENSION) get_scaled_gray_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PGM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { *ptr++ = rescale[UCH(*bufferptr++)]; } return 1; } METHODDEF(JDIMENSION) get_gray_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PGM files with any maxval and converting to extended RGB */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { if (aindex >= 0) GRAY_RGB_READ_LOOP(*bufferptr++, ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(*bufferptr++,) } else { if (aindex >= 0) GRAY_RGB_READ_LOOP(rescale[UCH(*bufferptr++)], ptr[aindex] = 0xFF;) else GRAY_RGB_READ_LOOP(rescale[UCH(*bufferptr++)],) } return 1; } METHODDEF(JDIMENSION) get_gray_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PGM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = *bufferptr++; rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE gray = rescale[UCH(*bufferptr++)]; rgb_to_cmyk(gray, gray, gray, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } METHODDEF(JDIMENSION) get_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; register int rindex = rgb_red[cinfo->in_color_space]; register int gindex = rgb_green[cinfo->in_color_space]; register int bindex = rgb_blue[cinfo->in_color_space]; register int aindex = alpha_index[cinfo->in_color_space]; register int ps = rgb_pixelsize[cinfo->in_color_space]; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { if (aindex >= 0) RGB_READ_LOOP(*bufferptr++, ptr[aindex] = 0xFF;) else RGB_READ_LOOP(*bufferptr++,) } else { if (aindex >= 0) RGB_READ_LOOP(rescale[UCH(*bufferptr++)], ptr[aindex] = 0xFF;) else RGB_READ_LOOP(rescale[UCH(*bufferptr++)],) } return 1; } METHODDEF(JDIMENSION) get_rgb_cmyk_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format PPM files with any maxval and converting to CMYK */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; if (maxval == MAXJSAMPLE) { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = *bufferptr++; JSAMPLE g = *bufferptr++; JSAMPLE b = *bufferptr++; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } else { for (col = cinfo->image_width; col > 0; col--) { JSAMPLE r = rescale[UCH(*bufferptr++)]; JSAMPLE g = rescale[UCH(*bufferptr++)]; JSAMPLE b = rescale[UCH(*bufferptr++)]; rgb_to_cmyk(r, g, b, ptr, ptr + 1, ptr + 2, ptr + 3); ptr += 4; } } return 1; } METHODDEF(JDIMENSION) get_raw_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-byte-format files with maxval = MAXJSAMPLE. * In this case we just read right into the JSAMPLE buffer! * Note that same code works for PPM and PGM files. */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); return 1; } METHODDEF(JDIMENSION) get_word_gray_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-word-format PGM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { register unsigned int temp; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; } return 1; } METHODDEF(JDIMENSION) get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-word-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { register unsigned int temp; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; } return 1; } /* * Read the file header; return image size and component count. */ METHODDEF(void) start_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { ppm_source_ptr source = (ppm_source_ptr)sinfo; int c; unsigned int w, h, maxval; boolean need_iobuffer, use_raw_buffer, need_rescale; if (getc(source->pub.input_file) != 'P') ERREXIT(cinfo, JERR_PPM_NOT); c = getc(source->pub.input_file); /* subformat discriminator character */ /* detect unsupported variants (ie, PBM) before trying to read header */ switch (c) { case '2': /* it's a text-format PGM file */ case '3': /* it's a text-format PPM file */ case '5': /* it's a raw-format PGM file */ case '6': /* it's a raw-format PPM file */ break; default: ERREXIT(cinfo, JERR_PPM_NOT); break; } /* fetch the remaining header info */ w = read_pbm_integer(cinfo, source->pub.input_file, 65535); h = read_pbm_integer(cinfo, source->pub.input_file, 65535); maxval = read_pbm_integer(cinfo, source->pub.input_file, 65535); if (w <= 0 || h <= 0 || maxval <= 0) /* error check */ ERREXIT(cinfo, JERR_PPM_NOT); cinfo->data_precision = BITS_IN_JSAMPLE; /* we always rescale data to this */ cinfo->image_width = (JDIMENSION)w; cinfo->image_height = (JDIMENSION)h; source->maxval = maxval; /* initialize flags to most common settings */ need_iobuffer = TRUE; /* do we need an I/O buffer? */ use_raw_buffer = FALSE; /* do we map input buffer onto I/O buffer? */ need_rescale = TRUE; /* do we need a rescale array? */ switch (c) { case '2': /* it's a text-format PGM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_GRAYSCALE; TRACEMS2(cinfo, 1, JTRC_PGM_TEXT, w, h); if (cinfo->in_color_space == JCS_GRAYSCALE) source->pub.get_pixel_rows = get_text_gray_row; else if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_text_gray_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_text_gray_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); need_iobuffer = FALSE; break; case '3': /* it's a text-format PPM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_EXT_RGB; TRACEMS2(cinfo, 1, JTRC_PPM_TEXT, w, h); if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_text_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_text_rgb_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); need_iobuffer = FALSE; break; case '5': /* it's a raw-format PGM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_GRAYSCALE; TRACEMS2(cinfo, 1, JTRC_PGM, w, h); if (maxval > 255) { source->pub.get_pixel_rows = get_word_gray_row; } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) && cinfo->in_color_space == JCS_GRAYSCALE) { source->pub.get_pixel_rows = get_raw_row; use_raw_buffer = TRUE; need_rescale = FALSE; } else { if (cinfo->in_color_space == JCS_GRAYSCALE) source->pub.get_pixel_rows = get_scaled_gray_row; else if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_gray_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_gray_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } break; case '6': /* it's a raw-format PPM file */ if (cinfo->in_color_space == JCS_UNKNOWN) cinfo->in_color_space = JCS_EXT_RGB; TRACEMS2(cinfo, 1, JTRC_PPM, w, h); if (maxval > 255) { source->pub.get_pixel_rows = get_word_rgb_row; } else if (maxval == MAXJSAMPLE && sizeof(JSAMPLE) == sizeof(U_CHAR) && (cinfo->in_color_space == JCS_EXT_RGB #if RGB_RED == 0 && RGB_GREEN == 1 && RGB_BLUE == 2 && RGB_PIXELSIZE == 3 || cinfo->in_color_space == JCS_RGB #endif )) { source->pub.get_pixel_rows = get_raw_row; use_raw_buffer = TRUE; need_rescale = FALSE; } else { if (IsExtRGB(cinfo->in_color_space)) source->pub.get_pixel_rows = get_rgb_row; else if (cinfo->in_color_space == JCS_CMYK) source->pub.get_pixel_rows = get_rgb_cmyk_row; else ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } break; } if (IsExtRGB(cinfo->in_color_space)) cinfo->input_components = rgb_pixelsize[cinfo->in_color_space]; else if (cinfo->in_color_space == JCS_GRAYSCALE) cinfo->input_components = 1; else if (cinfo->in_color_space == JCS_CMYK) cinfo->input_components = 4; /* Allocate space for I/O buffer: 1 or 3 bytes or words/pixel. */ if (need_iobuffer) { if (c == '6') source->buffer_width = (size_t)w * 3 * ((maxval <= 255) ? sizeof(U_CHAR) : (2 * sizeof(U_CHAR))); else source->buffer_width = (size_t)w * ((maxval <= 255) ? sizeof(U_CHAR) : (2 * sizeof(U_CHAR))); source->iobuffer = (U_CHAR *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, source->buffer_width); } /* Create compressor input buffer. */ if (use_raw_buffer) { /* For unscaled raw-input case, we can just map it onto the I/O buffer. */ /* Synthesize a JSAMPARRAY pointer structure */ source->pixrow = (JSAMPROW)source->iobuffer; source->pub.buffer = &source->pixrow; source->pub.buffer_height = 1; } else { /* Need to translate anyway, so make a separate sample buffer. */ source->pub.buffer = (*cinfo->mem->alloc_sarray) ((j_common_ptr)cinfo, JPOOL_IMAGE, (JDIMENSION)w * cinfo->input_components, (JDIMENSION)1); source->pub.buffer_height = 1; } /* Compute the rescaling array if required. */ if (need_rescale) { long val, half_maxval; /* On 16-bit-int machines we have to be careful of maxval = 65535 */ source->rescale = (JSAMPLE *) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, (size_t)(((long)maxval + 1L) * sizeof(JSAMPLE))); half_maxval = maxval / 2; for (val = 0; val <= (long)maxval; val++) { /* The multiplication here must be done in 32 bits to avoid overflow */ source->rescale[val] = (JSAMPLE)((val * MAXJSAMPLE + half_maxval) / maxval); } } } /* * Finish up at the end of the file. */ METHODDEF(void) finish_input_ppm(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) { /* no work */ } /* * The module selection routine for PPM format input. */ GLOBAL(cjpeg_source_ptr) jinit_read_ppm(j_compress_ptr cinfo) { ppm_source_ptr source; /* Create module interface object */ source = (ppm_source_ptr) (*cinfo->mem->alloc_small) ((j_common_ptr)cinfo, JPOOL_IMAGE, sizeof(ppm_source_struct)); /* Fill in method ptrs, except get_pixel_rows which start_input sets */ source->pub.start_input = start_input_ppm; source->pub.finish_input = finish_input_ppm; return (cjpeg_source_ptr)source; } #endif /* PPM_SUPPORTED */
get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-word-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { register unsigned int temp; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_TOOLARGE); *ptr++ = rescale[temp]; } return 1; }
get_word_rgb_row(j_compress_ptr cinfo, cjpeg_source_ptr sinfo) /* This version is for reading raw-word-format PPM files with any maxval */ { ppm_source_ptr source = (ppm_source_ptr)sinfo; register JSAMPROW ptr; register U_CHAR *bufferptr; register JSAMPLE *rescale = source->rescale; JDIMENSION col; unsigned int maxval = source->maxval; if (!ReadOK(source->pub.input_file, source->iobuffer, source->buffer_width)) ERREXIT(cinfo, JERR_INPUT_EOF); ptr = source->pub.buffer[0]; bufferptr = source->iobuffer; for (col = cinfo->image_width; col > 0; col--) { register unsigned int temp; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; temp = UCH(*bufferptr++) << 8; temp |= UCH(*bufferptr++); if (temp > maxval) ERREXIT(cinfo, JERR_PPM_OUTOFRANGE); *ptr++ = rescale[temp]; } return 1; }
{'added': [(78, ' unsigned int maxval;'), (128, ' ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);'), (512, ' ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);'), (539, ' ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);'), (544, ' ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);'), (549, ' ERREXIT(cinfo, JERR_PPM_OUTOFRANGE);')], 'deleted': [(78, ' int maxval;'), (128, ' ERREXIT(cinfo, JERR_PPM_TOOLARGE);'), (512, ' ERREXIT(cinfo, JERR_PPM_TOOLARGE);'), (539, ' ERREXIT(cinfo, JERR_PPM_TOOLARGE);'), (544, ' ERREXIT(cinfo, JERR_PPM_TOOLARGE);'), (549, ' ERREXIT(cinfo, JERR_PPM_TOOLARGE);')]}
6
6
541
3,909
32
244
6
https://github.com/libjpeg-turbo/libjpeg-turbo
CVE-2018-14498
CWE-125
767
print-ip.c
C
ip_printroute
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: IP printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ipproto.h" static const char tstr[] = "[|ip]"; static const struct tok ip_option_values[] = { { IPOPT_EOL, "EOL" }, { IPOPT_NOP, "NOP" }, { IPOPT_TS, "timestamp" }, { IPOPT_SECURITY, "security" }, { IPOPT_RR, "RR" }, { IPOPT_SSRR, "SSRR" }, { IPOPT_LSRR, "LSRR" }, { IPOPT_RA, "RA" }, { IPOPT_RFC1393, "traceroute" }, { 0, NULL } }; /* * print the recorded route in an IP RR, LSRR or SSRR option. */ static void ip_printroute(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int ptr; register u_int len; if (length < 3) { ND_PRINT((ndo, " [bad length %u]", length)); return; } if ((length + 1) & 3) ND_PRINT((ndo, " [bad length %u]", length)); ptr = cp[2] - 1; if (ptr < 3 || ((ptr + 1) & 3) || ptr > length + 1) ND_PRINT((ndo, " [bad ptr %u]", cp[2])); for (len = 3; len < length; len += 4) { ND_PRINT((ndo, " %s", ipaddr_string(ndo, &cp[len]))); if (ptr > len) ND_PRINT((ndo, ",")); } } /* * If source-routing is present and valid, return the final destination. * Otherwise, return IP destination. * * This is used for UDP and TCP pseudo-header in the checksum * calculation. */ static uint32_t ip_finddst(netdissect_options *ndo, const struct ip *ip) { int length; int len; const u_char *cp; uint32_t retval; cp = (const u_char *)(ip + 1); length = (IP_HL(ip) << 2) - sizeof(struct ip); for (; length > 0; cp += len, length -= len) { int tt; ND_TCHECK(*cp); tt = *cp; if (tt == IPOPT_EOL) break; else if (tt == IPOPT_NOP) len = 1; else { ND_TCHECK(cp[1]); len = cp[1]; if (len < 2) break; } ND_TCHECK2(*cp, len); switch (tt) { case IPOPT_SSRR: case IPOPT_LSRR: if (len < 7) break; UNALIGNED_MEMCPY(&retval, cp + len - 4, 4); return retval; } } trunc: UNALIGNED_MEMCPY(&retval, &ip->ip_dst, sizeof(uint32_t)); return retval; } /* * Compute a V4-style checksum by building a pseudoheader. */ int nextproto4_cksum(netdissect_options *ndo, const struct ip *ip, const uint8_t *data, u_int len, u_int covlen, u_int next_proto) { struct phdr { uint32_t src; uint32_t dst; u_char mbz; u_char proto; uint16_t len; } ph; struct cksum_vec vec[2]; /* pseudo-header.. */ ph.len = htons((uint16_t)len); ph.mbz = 0; ph.proto = next_proto; UNALIGNED_MEMCPY(&ph.src, &ip->ip_src, sizeof(uint32_t)); if (IP_HL(ip) == 5) UNALIGNED_MEMCPY(&ph.dst, &ip->ip_dst, sizeof(uint32_t)); else ph.dst = ip_finddst(ndo, ip); vec[0].ptr = (const uint8_t *)(void *)&ph; vec[0].len = sizeof(ph); vec[1].ptr = data; vec[1].len = covlen; return (in_cksum(vec, 2)); } static void ip_printts(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int ptr; register u_int len; int hoplen; const char *type; if (length < 4) { ND_PRINT((ndo, "[bad length %u]", length)); return; } ND_PRINT((ndo, " TS{")); hoplen = ((cp[3]&0xF) != IPOPT_TS_TSONLY) ? 8 : 4; if ((length - 4) & (hoplen-1)) ND_PRINT((ndo, "[bad length %u]", length)); ptr = cp[2] - 1; len = 0; if (ptr < 4 || ((ptr - 4) & (hoplen-1)) || ptr > length + 1) ND_PRINT((ndo, "[bad ptr %u]", cp[2])); switch (cp[3]&0xF) { case IPOPT_TS_TSONLY: ND_PRINT((ndo, "TSONLY")); break; case IPOPT_TS_TSANDADDR: ND_PRINT((ndo, "TS+ADDR")); break; /* * prespecified should really be 3, but some ones might send 2 * instead, and the IPOPT_TS_PRESPEC constant can apparently * have both values, so we have to hard-code it here. */ case 2: ND_PRINT((ndo, "PRESPEC2.0")); break; case 3: /* IPOPT_TS_PRESPEC */ ND_PRINT((ndo, "PRESPEC")); break; default: ND_PRINT((ndo, "[bad ts type %d]", cp[3]&0xF)); goto done; } type = " "; for (len = 4; len < length; len += hoplen) { if (ptr == len) type = " ^ "; ND_PRINT((ndo, "%s%d@%s", type, EXTRACT_32BITS(&cp[len+hoplen-4]), hoplen!=8 ? "" : ipaddr_string(ndo, &cp[len]))); type = " "; } done: ND_PRINT((ndo, "%s", ptr == len ? " ^ " : "")); if (cp[3]>>4) ND_PRINT((ndo, " [%d hops not recorded]} ", cp[3]>>4)); else ND_PRINT((ndo, "}")); } /* * print IP options. */ static void ip_optprint(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int option_len; const char *sep = ""; for (; length > 0; cp += option_len, length -= option_len) { u_int option_code; ND_PRINT((ndo, "%s", sep)); sep = ","; ND_TCHECK(*cp); option_code = *cp; ND_PRINT((ndo, "%s", tok2str(ip_option_values,"unknown %u",option_code))); if (option_code == IPOPT_NOP || option_code == IPOPT_EOL) option_len = 1; else { ND_TCHECK(cp[1]); option_len = cp[1]; if (option_len < 2) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } } if (option_len > length) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } ND_TCHECK2(*cp, option_len); switch (option_code) { case IPOPT_EOL: return; case IPOPT_TS: ip_printts(ndo, cp, option_len); break; case IPOPT_RR: /* fall through */ case IPOPT_SSRR: case IPOPT_LSRR: ip_printroute(ndo, cp, option_len); break; case IPOPT_RA: if (option_len < 4) { ND_PRINT((ndo, " [bad length %u]", option_len)); break; } ND_TCHECK(cp[3]); if (EXTRACT_16BITS(&cp[2]) != 0) ND_PRINT((ndo, " value %u", EXTRACT_16BITS(&cp[2]))); break; case IPOPT_NOP: /* nothing to print - fall through */ case IPOPT_SECURITY: default: break; } } return; trunc: ND_PRINT((ndo, "%s", tstr)); } #define IP_RES 0x8000 static const struct tok ip_frag_values[] = { { IP_MF, "+" }, { IP_DF, "DF" }, { IP_RES, "rsvd" }, /* The RFC3514 evil ;-) bit */ { 0, NULL } }; struct ip_print_demux_state { const struct ip *ip; const u_char *cp; u_int len, off; u_char nh; int advance; }; static void ip_print_demux(netdissect_options *ndo, struct ip_print_demux_state *ipds) { const char *p_name; again: switch (ipds->nh) { case IPPROTO_AH: if (!ND_TTEST(*ipds->cp)) { ND_PRINT((ndo, "[|AH]")); break; } ipds->nh = *ipds->cp; ipds->advance = ah_print(ndo, ipds->cp); if (ipds->advance <= 0) break; ipds->cp += ipds->advance; ipds->len -= ipds->advance; goto again; case IPPROTO_ESP: { int enh, padlen; ipds->advance = esp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, &enh, &padlen); if (ipds->advance <= 0) break; ipds->cp += ipds->advance; ipds->len -= ipds->advance + padlen; ipds->nh = enh & 0xff; goto again; } case IPPROTO_IPCOMP: { ipcomp_print(ndo, ipds->cp); /* * Either this has decompressed the payload and * printed it, in which case there's nothing more * to do, or it hasn't, in which case there's * nothing more to do. */ break; } case IPPROTO_SCTP: sctp_print(ndo, ipds->cp, (const u_char *)ipds->ip, ipds->len); break; case IPPROTO_DCCP: dccp_print(ndo, ipds->cp, (const u_char *)ipds->ip, ipds->len); break; case IPPROTO_TCP: /* pass on the MF bit plus the offset to detect fragments */ tcp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->off & (IP_MF|IP_OFFMASK)); break; case IPPROTO_UDP: /* pass on the MF bit plus the offset to detect fragments */ udp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->off & (IP_MF|IP_OFFMASK)); break; case IPPROTO_ICMP: /* pass on the MF bit plus the offset to detect fragments */ icmp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->off & (IP_MF|IP_OFFMASK)); break; case IPPROTO_PIGP: /* * XXX - the current IANA protocol number assignments * page lists 9 as "any private interior gateway * (used by Cisco for their IGRP)" and 88 as * "EIGRP" from Cisco. * * Recent BSD <netinet/in.h> headers define * IP_PROTO_PIGP as 9 and IP_PROTO_IGRP as 88. * We define IP_PROTO_PIGP as 9 and * IP_PROTO_EIGRP as 88; those names better * match was the current protocol number * assignments say. */ igrp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_EIGRP: eigrp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_ND: ND_PRINT((ndo, " nd %d", ipds->len)); break; case IPPROTO_EGP: egp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_OSPF: ospf_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip); break; case IPPROTO_IGMP: igmp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_IPV4: /* DVMRP multicast tunnel (ip-in-ip encapsulation) */ ip_print(ndo, ipds->cp, ipds->len); if (! ndo->ndo_vflag) { ND_PRINT((ndo, " (ipip-proto-4)")); return; } break; case IPPROTO_IPV6: /* ip6-in-ip encapsulation */ ip6_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_RSVP: rsvp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_GRE: /* do it */ gre_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_MOBILE: mobile_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_PIM: pim_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip); break; case IPPROTO_VRRP: if (ndo->ndo_packettype == PT_CARP) { if (ndo->ndo_vflag) ND_PRINT((ndo, "carp %s > %s: ", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); carp_print(ndo, ipds->cp, ipds->len, ipds->ip->ip_ttl); } else { if (ndo->ndo_vflag) ND_PRINT((ndo, "vrrp %s > %s: ", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); vrrp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->ip->ip_ttl); } break; case IPPROTO_PGM: pgm_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip); break; default: if (ndo->ndo_nflag==0 && (p_name = netdb_protoname(ipds->nh)) != NULL) ND_PRINT((ndo, " %s", p_name)); else ND_PRINT((ndo, " ip-proto-%d", ipds->nh)); ND_PRINT((ndo, " %d", ipds->len)); break; } } void ip_print_inner(netdissect_options *ndo, const u_char *bp, u_int length, u_int nh, const u_char *bp2) { struct ip_print_demux_state ipd; ipd.ip = (const struct ip *)bp2; ipd.cp = bp; ipd.len = length; ipd.off = 0; ipd.nh = nh; ipd.advance = 0; ip_print_demux(ndo, &ipd); } /* * print an IP datagram. */ void ip_print(netdissect_options *ndo, const u_char *bp, u_int length) { struct ip_print_demux_state ipd; struct ip_print_demux_state *ipds=&ipd; const u_char *ipend; u_int hlen; struct cksum_vec vec[1]; uint16_t sum, ip_sum; const char *p_name; ipds->ip = (const struct ip *)bp; ND_TCHECK(ipds->ip->ip_vhl); if (IP_V(ipds->ip) != 4) { /* print version and fail if != 4 */ if (IP_V(ipds->ip) == 6) ND_PRINT((ndo, "IP6, wrong link-layer encapsulation ")); else ND_PRINT((ndo, "IP%u ", IP_V(ipds->ip))); return; } if (!ndo->ndo_eflag) ND_PRINT((ndo, "IP ")); ND_TCHECK(*ipds->ip); if (length < sizeof (struct ip)) { ND_PRINT((ndo, "truncated-ip %u", length)); return; } hlen = IP_HL(ipds->ip) * 4; if (hlen < sizeof (struct ip)) { ND_PRINT((ndo, "bad-hlen %u", hlen)); return; } ipds->len = EXTRACT_16BITS(&ipds->ip->ip_len); if (length < ipds->len) ND_PRINT((ndo, "truncated-ip - %u bytes missing! ", ipds->len - length)); if (ipds->len < hlen) { #ifdef GUESS_TSO if (ipds->len) { ND_PRINT((ndo, "bad-len %u", ipds->len)); return; } else { /* we guess that it is a TSO send */ ipds->len = length; } #else ND_PRINT((ndo, "bad-len %u", ipds->len)); return; #endif /* GUESS_TSO */ } /* * Cut off the snapshot length to the end of the IP payload. */ ipend = bp + ipds->len; if (ipend < ndo->ndo_snapend) ndo->ndo_snapend = ipend; ipds->len -= hlen; ipds->off = EXTRACT_16BITS(&ipds->ip->ip_off); if (ndo->ndo_vflag) { ND_PRINT((ndo, "(tos 0x%x", (int)ipds->ip->ip_tos)); /* ECN bits */ switch (ipds->ip->ip_tos & 0x03) { case 0: break; case 1: ND_PRINT((ndo, ",ECT(1)")); break; case 2: ND_PRINT((ndo, ",ECT(0)")); break; case 3: ND_PRINT((ndo, ",CE")); break; } if (ipds->ip->ip_ttl >= 1) ND_PRINT((ndo, ", ttl %u", ipds->ip->ip_ttl)); /* * for the firewall guys, print id, offset. * On all but the last stick a "+" in the flags portion. * For unfragmented datagrams, note the don't fragment flag. */ ND_PRINT((ndo, ", id %u, offset %u, flags [%s], proto %s (%u)", EXTRACT_16BITS(&ipds->ip->ip_id), (ipds->off & 0x1fff) * 8, bittok2str(ip_frag_values, "none", ipds->off&0xe000), tok2str(ipproto_values,"unknown",ipds->ip->ip_p), ipds->ip->ip_p)); ND_PRINT((ndo, ", length %u", EXTRACT_16BITS(&ipds->ip->ip_len))); if ((hlen - sizeof(struct ip)) > 0) { ND_PRINT((ndo, ", options (")); ip_optprint(ndo, (const u_char *)(ipds->ip + 1), hlen - sizeof(struct ip)); ND_PRINT((ndo, ")")); } if (!ndo->ndo_Kflag && (const u_char *)ipds->ip + hlen <= ndo->ndo_snapend) { vec[0].ptr = (const uint8_t *)(const void *)ipds->ip; vec[0].len = hlen; sum = in_cksum(vec, 1); if (sum != 0) { ip_sum = EXTRACT_16BITS(&ipds->ip->ip_sum); ND_PRINT((ndo, ", bad cksum %x (->%x)!", ip_sum, in_cksum_shouldbe(ip_sum, sum))); } } ND_PRINT((ndo, ")\n ")); } /* * If this is fragment zero, hand it to the next higher * level protocol. */ if ((ipds->off & 0x1fff) == 0) { ipds->cp = (const u_char *)ipds->ip + hlen; ipds->nh = ipds->ip->ip_p; if (ipds->nh != IPPROTO_TCP && ipds->nh != IPPROTO_UDP && ipds->nh != IPPROTO_SCTP && ipds->nh != IPPROTO_DCCP) { ND_PRINT((ndo, "%s > %s: ", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); } ip_print_demux(ndo, ipds); } else { /* * Ultra quiet now means that all this stuff should be * suppressed. */ if (ndo->ndo_qflag > 1) return; /* * This isn't the first frag, so we're missing the * next level protocol header. print the ip addr * and the protocol. */ ND_PRINT((ndo, "%s > %s:", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); if (!ndo->ndo_nflag && (p_name = netdb_protoname(ipds->ip->ip_p)) != NULL) ND_PRINT((ndo, " %s", p_name)); else ND_PRINT((ndo, " ip-proto-%d", ipds->ip->ip_p)); } return; trunc: ND_PRINT((ndo, "%s", tstr)); return; } void ipN_print(netdissect_options *ndo, register const u_char *bp, register u_int length) { if (length < 1) { ND_PRINT((ndo, "truncated-ip %d", length)); return; } ND_TCHECK(*bp); switch (*bp & 0xF0) { case 0x40: ip_print (ndo, bp, length); break; case 0x60: ip6_print (ndo, bp, length); break; default: ND_PRINT((ndo, "unknown ip %d", (*bp & 0xF0) >> 4)); break; } return; trunc: ND_PRINT((ndo, "%s", tstr)); return; } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ /* \summary: IP printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "extract.h" #include "ip.h" #include "ipproto.h" static const char tstr[] = "[|ip]"; static const struct tok ip_option_values[] = { { IPOPT_EOL, "EOL" }, { IPOPT_NOP, "NOP" }, { IPOPT_TS, "timestamp" }, { IPOPT_SECURITY, "security" }, { IPOPT_RR, "RR" }, { IPOPT_SSRR, "SSRR" }, { IPOPT_LSRR, "LSRR" }, { IPOPT_RA, "RA" }, { IPOPT_RFC1393, "traceroute" }, { 0, NULL } }; /* * print the recorded route in an IP RR, LSRR or SSRR option. */ static int ip_printroute(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int ptr; register u_int len; if (length < 3) { ND_PRINT((ndo, " [bad length %u]", length)); return (0); } if ((length + 1) & 3) ND_PRINT((ndo, " [bad length %u]", length)); ND_TCHECK(cp[2]); ptr = cp[2] - 1; if (ptr < 3 || ((ptr + 1) & 3) || ptr > length + 1) ND_PRINT((ndo, " [bad ptr %u]", cp[2])); for (len = 3; len < length; len += 4) { ND_TCHECK2(cp[len], 4); ND_PRINT((ndo, " %s", ipaddr_string(ndo, &cp[len]))); if (ptr > len) ND_PRINT((ndo, ",")); } return (0); trunc: return (-1); } /* * If source-routing is present and valid, return the final destination. * Otherwise, return IP destination. * * This is used for UDP and TCP pseudo-header in the checksum * calculation. */ static uint32_t ip_finddst(netdissect_options *ndo, const struct ip *ip) { int length; int len; const u_char *cp; uint32_t retval; cp = (const u_char *)(ip + 1); length = (IP_HL(ip) << 2) - sizeof(struct ip); for (; length > 0; cp += len, length -= len) { int tt; ND_TCHECK(*cp); tt = *cp; if (tt == IPOPT_EOL) break; else if (tt == IPOPT_NOP) len = 1; else { ND_TCHECK(cp[1]); len = cp[1]; if (len < 2) break; } ND_TCHECK2(*cp, len); switch (tt) { case IPOPT_SSRR: case IPOPT_LSRR: if (len < 7) break; UNALIGNED_MEMCPY(&retval, cp + len - 4, 4); return retval; } } trunc: UNALIGNED_MEMCPY(&retval, &ip->ip_dst, sizeof(uint32_t)); return retval; } /* * Compute a V4-style checksum by building a pseudoheader. */ int nextproto4_cksum(netdissect_options *ndo, const struct ip *ip, const uint8_t *data, u_int len, u_int covlen, u_int next_proto) { struct phdr { uint32_t src; uint32_t dst; u_char mbz; u_char proto; uint16_t len; } ph; struct cksum_vec vec[2]; /* pseudo-header.. */ ph.len = htons((uint16_t)len); ph.mbz = 0; ph.proto = next_proto; UNALIGNED_MEMCPY(&ph.src, &ip->ip_src, sizeof(uint32_t)); if (IP_HL(ip) == 5) UNALIGNED_MEMCPY(&ph.dst, &ip->ip_dst, sizeof(uint32_t)); else ph.dst = ip_finddst(ndo, ip); vec[0].ptr = (const uint8_t *)(void *)&ph; vec[0].len = sizeof(ph); vec[1].ptr = data; vec[1].len = covlen; return (in_cksum(vec, 2)); } static void ip_printts(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int ptr; register u_int len; int hoplen; const char *type; if (length < 4) { ND_PRINT((ndo, "[bad length %u]", length)); return; } ND_PRINT((ndo, " TS{")); hoplen = ((cp[3]&0xF) != IPOPT_TS_TSONLY) ? 8 : 4; if ((length - 4) & (hoplen-1)) ND_PRINT((ndo, "[bad length %u]", length)); ptr = cp[2] - 1; len = 0; if (ptr < 4 || ((ptr - 4) & (hoplen-1)) || ptr > length + 1) ND_PRINT((ndo, "[bad ptr %u]", cp[2])); switch (cp[3]&0xF) { case IPOPT_TS_TSONLY: ND_PRINT((ndo, "TSONLY")); break; case IPOPT_TS_TSANDADDR: ND_PRINT((ndo, "TS+ADDR")); break; /* * prespecified should really be 3, but some ones might send 2 * instead, and the IPOPT_TS_PRESPEC constant can apparently * have both values, so we have to hard-code it here. */ case 2: ND_PRINT((ndo, "PRESPEC2.0")); break; case 3: /* IPOPT_TS_PRESPEC */ ND_PRINT((ndo, "PRESPEC")); break; default: ND_PRINT((ndo, "[bad ts type %d]", cp[3]&0xF)); goto done; } type = " "; for (len = 4; len < length; len += hoplen) { if (ptr == len) type = " ^ "; ND_PRINT((ndo, "%s%d@%s", type, EXTRACT_32BITS(&cp[len+hoplen-4]), hoplen!=8 ? "" : ipaddr_string(ndo, &cp[len]))); type = " "; } done: ND_PRINT((ndo, "%s", ptr == len ? " ^ " : "")); if (cp[3]>>4) ND_PRINT((ndo, " [%d hops not recorded]} ", cp[3]>>4)); else ND_PRINT((ndo, "}")); } /* * print IP options. */ static void ip_optprint(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int option_len; const char *sep = ""; for (; length > 0; cp += option_len, length -= option_len) { u_int option_code; ND_PRINT((ndo, "%s", sep)); sep = ","; ND_TCHECK(*cp); option_code = *cp; ND_PRINT((ndo, "%s", tok2str(ip_option_values,"unknown %u",option_code))); if (option_code == IPOPT_NOP || option_code == IPOPT_EOL) option_len = 1; else { ND_TCHECK(cp[1]); option_len = cp[1]; if (option_len < 2) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } } if (option_len > length) { ND_PRINT((ndo, " [bad length %u]", option_len)); return; } ND_TCHECK2(*cp, option_len); switch (option_code) { case IPOPT_EOL: return; case IPOPT_TS: ip_printts(ndo, cp, option_len); break; case IPOPT_RR: /* fall through */ case IPOPT_SSRR: case IPOPT_LSRR: if (ip_printroute(ndo, cp, option_len) == -1) goto trunc; break; case IPOPT_RA: if (option_len < 4) { ND_PRINT((ndo, " [bad length %u]", option_len)); break; } ND_TCHECK(cp[3]); if (EXTRACT_16BITS(&cp[2]) != 0) ND_PRINT((ndo, " value %u", EXTRACT_16BITS(&cp[2]))); break; case IPOPT_NOP: /* nothing to print - fall through */ case IPOPT_SECURITY: default: break; } } return; trunc: ND_PRINT((ndo, "%s", tstr)); } #define IP_RES 0x8000 static const struct tok ip_frag_values[] = { { IP_MF, "+" }, { IP_DF, "DF" }, { IP_RES, "rsvd" }, /* The RFC3514 evil ;-) bit */ { 0, NULL } }; struct ip_print_demux_state { const struct ip *ip; const u_char *cp; u_int len, off; u_char nh; int advance; }; static void ip_print_demux(netdissect_options *ndo, struct ip_print_demux_state *ipds) { const char *p_name; again: switch (ipds->nh) { case IPPROTO_AH: if (!ND_TTEST(*ipds->cp)) { ND_PRINT((ndo, "[|AH]")); break; } ipds->nh = *ipds->cp; ipds->advance = ah_print(ndo, ipds->cp); if (ipds->advance <= 0) break; ipds->cp += ipds->advance; ipds->len -= ipds->advance; goto again; case IPPROTO_ESP: { int enh, padlen; ipds->advance = esp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, &enh, &padlen); if (ipds->advance <= 0) break; ipds->cp += ipds->advance; ipds->len -= ipds->advance + padlen; ipds->nh = enh & 0xff; goto again; } case IPPROTO_IPCOMP: { ipcomp_print(ndo, ipds->cp); /* * Either this has decompressed the payload and * printed it, in which case there's nothing more * to do, or it hasn't, in which case there's * nothing more to do. */ break; } case IPPROTO_SCTP: sctp_print(ndo, ipds->cp, (const u_char *)ipds->ip, ipds->len); break; case IPPROTO_DCCP: dccp_print(ndo, ipds->cp, (const u_char *)ipds->ip, ipds->len); break; case IPPROTO_TCP: /* pass on the MF bit plus the offset to detect fragments */ tcp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->off & (IP_MF|IP_OFFMASK)); break; case IPPROTO_UDP: /* pass on the MF bit plus the offset to detect fragments */ udp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->off & (IP_MF|IP_OFFMASK)); break; case IPPROTO_ICMP: /* pass on the MF bit plus the offset to detect fragments */ icmp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->off & (IP_MF|IP_OFFMASK)); break; case IPPROTO_PIGP: /* * XXX - the current IANA protocol number assignments * page lists 9 as "any private interior gateway * (used by Cisco for their IGRP)" and 88 as * "EIGRP" from Cisco. * * Recent BSD <netinet/in.h> headers define * IP_PROTO_PIGP as 9 and IP_PROTO_IGRP as 88. * We define IP_PROTO_PIGP as 9 and * IP_PROTO_EIGRP as 88; those names better * match was the current protocol number * assignments say. */ igrp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_EIGRP: eigrp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_ND: ND_PRINT((ndo, " nd %d", ipds->len)); break; case IPPROTO_EGP: egp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_OSPF: ospf_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip); break; case IPPROTO_IGMP: igmp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_IPV4: /* DVMRP multicast tunnel (ip-in-ip encapsulation) */ ip_print(ndo, ipds->cp, ipds->len); if (! ndo->ndo_vflag) { ND_PRINT((ndo, " (ipip-proto-4)")); return; } break; case IPPROTO_IPV6: /* ip6-in-ip encapsulation */ ip6_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_RSVP: rsvp_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_GRE: /* do it */ gre_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_MOBILE: mobile_print(ndo, ipds->cp, ipds->len); break; case IPPROTO_PIM: pim_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip); break; case IPPROTO_VRRP: if (ndo->ndo_packettype == PT_CARP) { if (ndo->ndo_vflag) ND_PRINT((ndo, "carp %s > %s: ", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); carp_print(ndo, ipds->cp, ipds->len, ipds->ip->ip_ttl); } else { if (ndo->ndo_vflag) ND_PRINT((ndo, "vrrp %s > %s: ", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); vrrp_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip, ipds->ip->ip_ttl); } break; case IPPROTO_PGM: pgm_print(ndo, ipds->cp, ipds->len, (const u_char *)ipds->ip); break; default: if (ndo->ndo_nflag==0 && (p_name = netdb_protoname(ipds->nh)) != NULL) ND_PRINT((ndo, " %s", p_name)); else ND_PRINT((ndo, " ip-proto-%d", ipds->nh)); ND_PRINT((ndo, " %d", ipds->len)); break; } } void ip_print_inner(netdissect_options *ndo, const u_char *bp, u_int length, u_int nh, const u_char *bp2) { struct ip_print_demux_state ipd; ipd.ip = (const struct ip *)bp2; ipd.cp = bp; ipd.len = length; ipd.off = 0; ipd.nh = nh; ipd.advance = 0; ip_print_demux(ndo, &ipd); } /* * print an IP datagram. */ void ip_print(netdissect_options *ndo, const u_char *bp, u_int length) { struct ip_print_demux_state ipd; struct ip_print_demux_state *ipds=&ipd; const u_char *ipend; u_int hlen; struct cksum_vec vec[1]; uint16_t sum, ip_sum; const char *p_name; ipds->ip = (const struct ip *)bp; ND_TCHECK(ipds->ip->ip_vhl); if (IP_V(ipds->ip) != 4) { /* print version and fail if != 4 */ if (IP_V(ipds->ip) == 6) ND_PRINT((ndo, "IP6, wrong link-layer encapsulation ")); else ND_PRINT((ndo, "IP%u ", IP_V(ipds->ip))); return; } if (!ndo->ndo_eflag) ND_PRINT((ndo, "IP ")); ND_TCHECK(*ipds->ip); if (length < sizeof (struct ip)) { ND_PRINT((ndo, "truncated-ip %u", length)); return; } hlen = IP_HL(ipds->ip) * 4; if (hlen < sizeof (struct ip)) { ND_PRINT((ndo, "bad-hlen %u", hlen)); return; } ipds->len = EXTRACT_16BITS(&ipds->ip->ip_len); if (length < ipds->len) ND_PRINT((ndo, "truncated-ip - %u bytes missing! ", ipds->len - length)); if (ipds->len < hlen) { #ifdef GUESS_TSO if (ipds->len) { ND_PRINT((ndo, "bad-len %u", ipds->len)); return; } else { /* we guess that it is a TSO send */ ipds->len = length; } #else ND_PRINT((ndo, "bad-len %u", ipds->len)); return; #endif /* GUESS_TSO */ } /* * Cut off the snapshot length to the end of the IP payload. */ ipend = bp + ipds->len; if (ipend < ndo->ndo_snapend) ndo->ndo_snapend = ipend; ipds->len -= hlen; ipds->off = EXTRACT_16BITS(&ipds->ip->ip_off); if (ndo->ndo_vflag) { ND_PRINT((ndo, "(tos 0x%x", (int)ipds->ip->ip_tos)); /* ECN bits */ switch (ipds->ip->ip_tos & 0x03) { case 0: break; case 1: ND_PRINT((ndo, ",ECT(1)")); break; case 2: ND_PRINT((ndo, ",ECT(0)")); break; case 3: ND_PRINT((ndo, ",CE")); break; } if (ipds->ip->ip_ttl >= 1) ND_PRINT((ndo, ", ttl %u", ipds->ip->ip_ttl)); /* * for the firewall guys, print id, offset. * On all but the last stick a "+" in the flags portion. * For unfragmented datagrams, note the don't fragment flag. */ ND_PRINT((ndo, ", id %u, offset %u, flags [%s], proto %s (%u)", EXTRACT_16BITS(&ipds->ip->ip_id), (ipds->off & 0x1fff) * 8, bittok2str(ip_frag_values, "none", ipds->off&0xe000), tok2str(ipproto_values,"unknown",ipds->ip->ip_p), ipds->ip->ip_p)); ND_PRINT((ndo, ", length %u", EXTRACT_16BITS(&ipds->ip->ip_len))); if ((hlen - sizeof(struct ip)) > 0) { ND_PRINT((ndo, ", options (")); ip_optprint(ndo, (const u_char *)(ipds->ip + 1), hlen - sizeof(struct ip)); ND_PRINT((ndo, ")")); } if (!ndo->ndo_Kflag && (const u_char *)ipds->ip + hlen <= ndo->ndo_snapend) { vec[0].ptr = (const uint8_t *)(const void *)ipds->ip; vec[0].len = hlen; sum = in_cksum(vec, 1); if (sum != 0) { ip_sum = EXTRACT_16BITS(&ipds->ip->ip_sum); ND_PRINT((ndo, ", bad cksum %x (->%x)!", ip_sum, in_cksum_shouldbe(ip_sum, sum))); } } ND_PRINT((ndo, ")\n ")); } /* * If this is fragment zero, hand it to the next higher * level protocol. */ if ((ipds->off & 0x1fff) == 0) { ipds->cp = (const u_char *)ipds->ip + hlen; ipds->nh = ipds->ip->ip_p; if (ipds->nh != IPPROTO_TCP && ipds->nh != IPPROTO_UDP && ipds->nh != IPPROTO_SCTP && ipds->nh != IPPROTO_DCCP) { ND_PRINT((ndo, "%s > %s: ", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); } ip_print_demux(ndo, ipds); } else { /* * Ultra quiet now means that all this stuff should be * suppressed. */ if (ndo->ndo_qflag > 1) return; /* * This isn't the first frag, so we're missing the * next level protocol header. print the ip addr * and the protocol. */ ND_PRINT((ndo, "%s > %s:", ipaddr_string(ndo, &ipds->ip->ip_src), ipaddr_string(ndo, &ipds->ip->ip_dst))); if (!ndo->ndo_nflag && (p_name = netdb_protoname(ipds->ip->ip_p)) != NULL) ND_PRINT((ndo, " %s", p_name)); else ND_PRINT((ndo, " ip-proto-%d", ipds->ip->ip_p)); } return; trunc: ND_PRINT((ndo, "%s", tstr)); return; } void ipN_print(netdissect_options *ndo, register const u_char *bp, register u_int length) { if (length < 1) { ND_PRINT((ndo, "truncated-ip %d", length)); return; } ND_TCHECK(*bp); switch (*bp & 0xF0) { case 0x40: ip_print (ndo, bp, length); break; case 0x60: ip6_print (ndo, bp, length); break; default: ND_PRINT((ndo, "unknown ip %d", (*bp & 0xF0) >> 4)); break; } return; trunc: ND_PRINT((ndo, "%s", tstr)); return; } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
ip_printroute(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int ptr; register u_int len; if (length < 3) { ND_PRINT((ndo, " [bad length %u]", length)); return; } if ((length + 1) & 3) ND_PRINT((ndo, " [bad length %u]", length)); ptr = cp[2] - 1; if (ptr < 3 || ((ptr + 1) & 3) || ptr > length + 1) ND_PRINT((ndo, " [bad ptr %u]", cp[2])); for (len = 3; len < length; len += 4) { ND_PRINT((ndo, " %s", ipaddr_string(ndo, &cp[len]))); if (ptr > len) ND_PRINT((ndo, ",")); } }
ip_printroute(netdissect_options *ndo, register const u_char *cp, u_int length) { register u_int ptr; register u_int len; if (length < 3) { ND_PRINT((ndo, " [bad length %u]", length)); return (0); } if ((length + 1) & 3) ND_PRINT((ndo, " [bad length %u]", length)); ND_TCHECK(cp[2]); ptr = cp[2] - 1; if (ptr < 3 || ((ptr + 1) & 3) || ptr > length + 1) ND_PRINT((ndo, " [bad ptr %u]", cp[2])); for (len = 3; len < length; len += 4) { ND_TCHECK2(cp[len], 4); ND_PRINT((ndo, " %s", ipaddr_string(ndo, &cp[len]))); if (ptr > len) ND_PRINT((ndo, ",")); } return (0); trunc: return (-1); }
{'added': [(57, 'static int'), (66, '\t\treturn (0);'), (70, '\tND_TCHECK(cp[2]);'), (76, '\t\tND_TCHECK2(cp[len], 4);'), (81, '\treturn (0);'), (82, ''), (83, 'trunc:'), (84, '\treturn (-1);'), (287, '\t\t\tif (ip_printroute(ndo, cp, option_len) == -1)'), (288, '\t\t\t\tgoto trunc;')], 'deleted': [(57, 'static void'), (66, '\t\treturn;'), (281, '\t\t\tip_printroute(ndo, cp, option_len);')]}
10
3
519
3,441
20
163
8
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-13022
CWE-125
564
locale_methods.c
C
get_icu_value_src_php
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Kirti Velankar <kirtig@yahoo-inc.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <unicode/ustring.h> #include <unicode/udata.h> #include <unicode/putil.h> #include <unicode/ures.h> #include "php_intl.h" #include "locale.h" #include "locale_class.h" #include "locale_methods.h" #include "intl_convert.h" #include "intl_data.h" #include <zend_API.h> #include <zend.h> #include <php.h> #include "main/php_ini.h" #include "ext/standard/php_smart_str.h" ZEND_EXTERN_MODULE_GLOBALS( intl ) /* Sizes required for the strings "variant15" , "extlang11", "private12" etc. */ #define SEPARATOR "_" #define SEPARATOR1 "-" #define DELIMITER "-_" #define EXTLANG_PREFIX "a" #define PRIVATE_PREFIX "x" #define DISP_NAME "name" #define MAX_NO_VARIANT 15 #define MAX_NO_EXTLANG 3 #define MAX_NO_PRIVATE 15 #define MAX_NO_LOOKUP_LANG_TAG 100 #define LOC_NOT_FOUND 1 /* Sizes required for the strings "variant15" , "extlang3", "private12" etc. */ #define VARIANT_KEYNAME_LEN 11 #define EXTLANG_KEYNAME_LEN 10 #define PRIVATE_KEYNAME_LEN 11 /* Based on IANA registry at the time of writing this code * */ static const char * const LOC_GRANDFATHERED[] = { "art-lojban", "i-klingon", "i-lux", "i-navajo", "no-bok", "no-nyn", "cel-gaulish", "en-GB-oed", "i-ami", "i-bnn", "i-default", "i-enochian", "i-mingo", "i-pwn", "i-tao", "i-tay", "i-tsu", "sgn-BE-fr", "sgn-BE-nl", "sgn-CH-de", "zh-cmn", "zh-cmn-Hans", "zh-cmn-Hant", "zh-gan" , "zh-guoyu", "zh-hakka", "zh-min", "zh-min-nan", "zh-wuu", "zh-xiang", "zh-yue", NULL }; /* Based on IANA registry at the time of writing this code * This array lists the preferred values for the grandfathered tags if applicable * This is in sync with the array LOC_GRANDFATHERED * e.g. the offsets of the grandfathered tags match the offset of the preferred value */ static const int LOC_PREFERRED_GRANDFATHERED_LEN = 6; static const char * const LOC_PREFERRED_GRANDFATHERED[] = { "jbo", "tlh", "lb", "nv", "nb", "nn", NULL }; /*returns TRUE if a is an ID separator FALSE otherwise*/ #define isIDSeparator(a) (a == '_' || a == '-') #define isKeywordSeparator(a) (a == '@' ) #define isEndOfTag(a) (a == '\0' ) #define isPrefixLetter(a) ((a=='x')||(a=='X')||(a=='i')||(a=='I')) /*returns TRUE if one of the special prefixes is here (s=string) 'x-' or 'i-' */ #define isIDPrefix(s) (isPrefixLetter(s[0])&&isIDSeparator(s[1])) #define isKeywordPrefix(s) ( isKeywordSeparator(s[0]) ) /* Dot terminates it because of POSIX form where dot precedes the codepage * except for variant */ #define isTerminator(a) ((a==0)||(a=='.')||(a=='@')) /* {{{ return the offset of 'key' in the array 'list'. * returns -1 if not present */ static int16_t findOffset(const char* const* list, const char* key) { const char* const* anchor = list; while (*list != NULL) { if (strcmp(key, *list) == 0) { return (int16_t)(list - anchor); } list++; } return -1; } /*}}}*/ static char* getPreferredTag(const char* gf_tag) { char* result = NULL; int grOffset = 0; grOffset = findOffset( LOC_GRANDFATHERED ,gf_tag); if(grOffset < 0) { return NULL; } if( grOffset < LOC_PREFERRED_GRANDFATHERED_LEN ){ /* return preferred tag */ result = estrdup( LOC_PREFERRED_GRANDFATHERED[grOffset] ); } else { /* Return correct grandfathered language tag */ result = estrdup( LOC_GRANDFATHERED[grOffset] ); } return result; } /* {{{ * returns the position of next token for lookup * or -1 if no token * strtokr equivalent search for token in reverse direction */ static int getStrrtokenPos(char* str, int savedPos) { int result =-1; int i; for(i=savedPos-1; i>=0; i--) { if(isIDSeparator(*(str+i)) ){ /* delimiter found; check for singleton */ if(i>=2 && isIDSeparator(*(str+i-2)) ){ /* a singleton; so send the position of token before the singleton */ result = i-2; } else { result = i; } break; } } if(result < 1){ /* Just in case inavlid locale e.g. '-x-xyz' or '-sl_Latn' */ result =-1; } return result; } /* }}} */ /* {{{ * returns the position of a singleton if present * returns -1 if no singleton * strtok equivalent search for singleton */ static int getSingletonPos(const char* str) { int result =-1; int i=0; int len = 0; if( str && ((len=strlen(str))>0) ){ for( i=0; i<len ; i++){ if( isIDSeparator(*(str+i)) ){ if( i==1){ /* string is of the form x-avy or a-prv1 */ result =0; break; } else { /* delimiter found; check for singleton */ if( isIDSeparator(*(str+i+2)) ){ /* a singleton; so send the position of separator before singleton */ result = i+1; break; } } } }/* end of for */ } return result; } /* }}} */ /* {{{ proto static string Locale::getDefault( ) Get default locale */ /* }}} */ /* {{{ proto static string locale_get_default( ) Get default locale */ PHP_NAMED_FUNCTION(zif_locale_get_default) { RETURN_STRING( intl_locale_get_default( TSRMLS_C ), TRUE ); } /* }}} */ /* {{{ proto static string Locale::setDefault( string $locale ) Set default locale */ /* }}} */ /* {{{ proto static string locale_set_default( string $locale ) Set default locale */ PHP_NAMED_FUNCTION(zif_locale_set_default) { char* locale_name = NULL; int len=0; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &locale_name ,&len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_set_default: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(len == 0) { locale_name = (char *)uloc_getDefault() ; len = strlen(locale_name); } zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); RETURN_TRUE; } /* }}} */ /* {{{ * Gets the value from ICU * common code shared by get_primary_language,get_script or get_region or get_variant * result = 0 if error, 1 if successful , -1 if no value */ static char* get_icu_value_internal( const char* loc_name , char* tag_name, int* result , int fromParseLocale) { char* tag_value = NULL; int32_t tag_value_len = 512; int singletonPos = 0; char* mod_loc_name = NULL; int grOffset = 0; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; if( strcmp(tag_name, LOC_CANONICALIZE_TAG) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ return estrdup(loc_name); } else { /* Since Grandfathered , no value , do nothing , retutn NULL */ return NULL; } } if( fromParseLocale==1 ){ /* Handle singletons */ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ if( strlen(loc_name)>1 && (isIDPrefix(loc_name) == 1) ){ return estrdup(loc_name); } } singletonPos = getSingletonPos( loc_name ); if( singletonPos == 0){ /* singleton at start of script, region , variant etc. * or invalid singleton at start of language */ return NULL; } else if( singletonPos > 0 ){ /* singleton at some position except at start * strip off the singleton and rest of the loc_name */ mod_loc_name = estrndup ( loc_name , singletonPos-1); } } /* end of if fromParse */ } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name == NULL){ mod_loc_name = estrdup(loc_name ); } /* Proceed to ICU */ do{ tag_value = erealloc( tag_value , buflen ); tag_value_len = buflen; if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getScript ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_LANG_TAG )==0 ){ buflen = uloc_getLanguage ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getCountry ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getVariant ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_CANONICALIZE_TAG)==0 ){ buflen = uloc_canonicalize ( mod_loc_name ,tag_value , tag_value_len , &status); } if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; continue; } /* Error in retriving data */ *result = 0; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } } while( buflen > tag_value_len ); if( buflen ==0 ){ /* No value found */ *result = -1; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } else { *result = 1; } if( mod_loc_name ){ efree( mod_loc_name); } return tag_value; } /* }}} */ /* {{{ * Gets the value from ICU , called when PHP userspace function is called * common code shared by get_primary_language,get_script or get_region or get_variant */ static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; char* tag_value = NULL; char* empty_result = ""; int result = 0; char* msg = NULL; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name ,&loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Call ICU get */ tag_value = get_icu_value_internal( loc_name , tag_name , &result ,0); /* No value found */ if( result == -1 ) { if( tag_value){ efree( tag_value); } RETURN_STRING( empty_result , TRUE); } /* value found */ if( tag_value){ RETURN_STRING( tag_value , FALSE); } /* Error encountered while fetching the value */ if( result ==0) { spprintf(&msg , 0, "locale_get_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_NULL(); } } /* }}} */ /* {{{ proto static string Locale::getScript($locale) * gets the script for the $locale }}} */ /* {{{ proto static string locale_get_script($locale) * gets the script for the $locale */ PHP_FUNCTION( locale_get_script ) { get_icu_value_src_php( LOC_SCRIPT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getRegion($locale) * gets the region for the $locale }}} */ /* {{{ proto static string locale_get_region($locale) * gets the region for the $locale */ PHP_FUNCTION( locale_get_region ) { get_icu_value_src_php( LOC_REGION_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getPrimaryLanguage($locale) * gets the primary language for the $locale }}} */ /* {{{ proto static string locale_get_primary_language($locale) * gets the primary language for the $locale */ PHP_FUNCTION(locale_get_primary_language ) { get_icu_value_src_php( LOC_LANG_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ * common code shared by display_xyz functions to get the value from ICU }}} */ static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; const char* disp_loc_name = NULL; int disp_loc_name_len = 0; int free_loc_name = 0; UChar* disp_name = NULL; int32_t disp_name_len = 0; char* mod_loc_name = NULL; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; char* utf8value = NULL; int utf8value_len = 0; char* msg = NULL; int grOffset = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &loc_name, &loc_name_len , &disp_loc_name ,&disp_loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_display_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len > ULOC_FULLNAME_CAPACITY) { /* See bug 67397: overlong locale names cause trouble in uloc_getDisplayName */ spprintf(&msg , 0, "locale_get_display_%s : name too long", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } if( strcmp(tag_name, DISP_NAME) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ mod_loc_name = getPreferredTag( loc_name ); } else { /* Since Grandfathered, no value, do nothing, retutn NULL */ RETURN_FALSE; } } } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name==NULL ){ mod_loc_name = estrdup( loc_name ); } /* Check if disp_loc_name passed , if not use default locale */ if( !disp_loc_name){ disp_loc_name = estrdup(intl_locale_get_default(TSRMLS_C)); free_loc_name = 1; } /* Get the disp_value for the given locale */ do{ disp_name = erealloc( disp_name , buflen * sizeof(UChar) ); disp_name_len = buflen; if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ buflen = uloc_getDisplayLanguage ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getDisplayScript ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getDisplayCountry ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getDisplayVariant ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , DISP_NAME)==0 ){ buflen = uloc_getDisplayName ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } /* U_STRING_NOT_TERMINATED_WARNING is admissible here; don't look for it */ if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; continue; } spprintf(&msg, 0, "locale_get_display_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); if( disp_name){ efree( disp_name ); } if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } RETURN_FALSE; } } while( buflen > disp_name_len ); if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } /* Convert display locale name from UTF-16 to UTF-8. */ intl_convert_utf16_to_utf8( &utf8value, &utf8value_len, disp_name, buflen, &status ); efree( disp_name ); if( U_FAILURE( status ) ) { spprintf(&msg, 0, "locale_get_display_%s :error converting display name for %s to UTF-8", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } RETVAL_STRINGL( utf8value, utf8value_len , FALSE); } /* }}} */ /* {{{ proto static string Locale::getDisplayName($locale[, $in_locale = null]) * gets the name for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_name($locale[, $in_locale = null]) * gets the name for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_name) { get_icu_disp_value_src_php( DISP_NAME , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getDisplayLanguage($locale[, $in_locale = null]) * gets the language for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_language($locale[, $in_locale = null]) * gets the language for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_language) { get_icu_disp_value_src_php( LOC_LANG_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getDisplayScript($locale, $in_locale = null) * gets the script for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_script($locale, $in_locale = null) * gets the script for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_script) { get_icu_disp_value_src_php( LOC_SCRIPT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getDisplayRegion($locale, $in_locale = null) * gets the region for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_region($locale, $in_locale = null) * gets the region for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_region) { get_icu_disp_value_src_php( LOC_REGION_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ * proto static string Locale::getDisplayVariant($locale, $in_locale = null) * gets the variant for the $locale in $in_locale or default_locale }}} */ /* {{{ * proto static string get_display_variant($locale, $in_locale = null) * gets the variant for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_variant) { get_icu_disp_value_src_php( LOC_VARIANT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static array getKeywords(string $locale) { * return an associative array containing keyword-value * pairs for this locale. The keys are keys to the array (doh!) * }}}*/ /* {{{ proto static array locale_get_keywords(string $locale) { * return an associative array containing keyword-value * pairs for this locale. The keys are keys to the array (doh!) */ PHP_FUNCTION( locale_get_keywords ) { UEnumeration* e = NULL; UErrorCode status = U_ZERO_ERROR; const char* kw_key = NULL; int32_t kw_key_len = 0; const char* loc_name = NULL; int loc_name_len = 0; /* ICU expects the buffer to be allocated before calling the function and so the buffer size has been explicitly specified ICU uloc.h #define ULOC_KEYWORD_AND_VALUES_CAPACITY 100 hence the kw_value buffer size is 100 */ char* kw_value = NULL; int32_t kw_value_len = 100; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name, &loc_name_len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_get_keywords: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Get the keywords */ e = uloc_openKeywords( loc_name, &status ); if( e != NULL ) { /* Traverse it, filling the return array. */ array_init( return_value ); while( ( kw_key = uenum_next( e, &kw_key_len, &status ) ) != NULL ){ kw_value = ecalloc( 1 , kw_value_len ); /* Get the keyword value for each keyword */ kw_value_len=uloc_getKeywordValue( loc_name,kw_key, kw_value, kw_value_len , &status ); if (status == U_BUFFER_OVERFLOW_ERROR) { status = U_ZERO_ERROR; kw_value = erealloc( kw_value , kw_value_len+1); kw_value_len=uloc_getKeywordValue( loc_name,kw_key, kw_value, kw_value_len+1 , &status ); } else if(!U_FAILURE(status)) { kw_value = erealloc( kw_value , kw_value_len+1); } if (U_FAILURE(status)) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_get_keywords: Error encountered while getting the keyword value for the keyword", 0 TSRMLS_CC ); if( kw_value){ efree( kw_value ); } zval_dtor(return_value); RETURN_FALSE; } add_assoc_stringl( return_value, (char *)kw_key, kw_value , kw_value_len, 0); } /* end of while */ } /* end of if e!=NULL */ uenum_close( e ); } /* }}} */ /* {{{ proto static string Locale::canonicalize($locale) * @return string the canonicalized locale * }}} */ /* {{{ proto static string locale_canonicalize(Locale $loc, string $locale) * @param string $locale The locale string to canonicalize */ PHP_FUNCTION(locale_canonicalize) { get_icu_value_src_php( LOC_CANONICALIZE_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ append_key_value * Internal function which is called from locale_compose * gets the value for the key_name and appends to the loc_name * returns 1 if successful , -1 if not found , * 0 if array element is not a string , -2 if buffer-overflow */ static int append_key_value(smart_str* loc_name, HashTable* hash_arr, char* key_name) { zval** ele_value = NULL; if(zend_hash_find(hash_arr , key_name , strlen(key_name) + 1 ,(void **)&ele_value ) == SUCCESS ) { if(Z_TYPE_PP(ele_value)!= IS_STRING ){ /* element value is not a string */ return FAILURE; } if(strcmp(key_name, LOC_LANG_TAG) != 0 && strcmp(key_name, LOC_GRANDFATHERED_LANG_TAG)!=0 ) { /* not lang or grandfathered tag */ smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); } smart_str_appendl(loc_name, Z_STRVAL_PP(ele_value) , Z_STRLEN_PP(ele_value)); return SUCCESS; } return LOC_NOT_FOUND; } /* }}} */ /* {{{ append_prefix , appends the prefix needed * e.g. private adds 'x' */ static void add_prefix(smart_str* loc_name, char* key_name) { if( strncmp(key_name , LOC_PRIVATE_TAG , 7) == 0 ){ smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, PRIVATE_PREFIX , sizeof(PRIVATE_PREFIX)-1); } } /* }}} */ /* {{{ append_multiple_key_values * Internal function which is called from locale_compose * gets the multiple values for the key_name and appends to the loc_name * used for 'variant','extlang','private' * returns 1 if successful , -1 if not found , * 0 if array element is not a string , -2 if buffer-overflow */ static int append_multiple_key_values(smart_str* loc_name, HashTable* hash_arr, char* key_name TSRMLS_DC) { zval** ele_value = NULL; int i = 0; int isFirstSubtag = 0; int max_value = 0; /* Variant/ Extlang/Private etc. */ if( zend_hash_find( hash_arr , key_name , strlen(key_name) + 1 ,(void **)&ele_value ) == SUCCESS ) { if( Z_TYPE_PP(ele_value) == IS_STRING ){ add_prefix( loc_name , key_name); smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, Z_STRVAL_PP(ele_value) , Z_STRLEN_PP(ele_value)); return SUCCESS; } else if(Z_TYPE_PP(ele_value) == IS_ARRAY ) { HashPosition pos; HashTable *arr = HASH_OF(*ele_value); zval **data = NULL; zend_hash_internal_pointer_reset_ex(arr, &pos); while(zend_hash_get_current_data_ex(arr, (void **)&data, &pos) != FAILURE) { if(Z_TYPE_PP(data) != IS_STRING) { return FAILURE; } if (isFirstSubtag++ == 0){ add_prefix(loc_name , key_name); } smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, Z_STRVAL_PP(data) , Z_STRLEN_PP(data)); zend_hash_move_forward_ex(arr, &pos); } return SUCCESS; } else { return FAILURE; } } else { char cur_key_name[31]; /* Decide the max_value: the max. no. of elements allowed */ if( strcmp(key_name , LOC_VARIANT_TAG) ==0 ){ max_value = MAX_NO_VARIANT; } if( strcmp(key_name , LOC_EXTLANG_TAG) ==0 ){ max_value = MAX_NO_EXTLANG; } if( strcmp(key_name , LOC_PRIVATE_TAG) ==0 ){ max_value = MAX_NO_PRIVATE; } /* Multiple variant values as variant0, variant1 ,variant2 */ isFirstSubtag = 0; for( i=0 ; i< max_value; i++ ){ snprintf( cur_key_name , 30, "%s%d", key_name , i); if( zend_hash_find( hash_arr , cur_key_name , strlen(cur_key_name) + 1,(void **)&ele_value ) == SUCCESS ){ if( Z_TYPE_PP(ele_value)!= IS_STRING ){ /* variant is not a string */ return FAILURE; } /* Add the contents */ if (isFirstSubtag++ == 0){ add_prefix(loc_name , cur_key_name); } smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, Z_STRVAL_PP(ele_value) , Z_STRLEN_PP(ele_value)); } } /* end of for */ } /* end of else */ return SUCCESS; } /* }}} */ /*{{{ * If applicable sets error message and aborts locale_compose gracefully * returns 0 if locale_compose needs to be aborted * otherwise returns 1 */ static int handleAppendResult( int result, smart_str* loc_name TSRMLS_DC) { intl_error_reset( NULL TSRMLS_CC ); if( result == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array element is not a string", 0 TSRMLS_CC ); smart_str_free(loc_name); return 0; } return 1; } /* }}} */ #define RETURN_SMART_STR(s) smart_str_0((s)); RETURN_STRINGL((s)->c, (s)->len, 0) /* {{{ proto static string Locale::composeLocale($array) * Creates a locale by combining the parts of locale-ID passed * }}} */ /* {{{ proto static string compose_locale($array) * Creates a locale by combining the parts of locale-ID passed * }}} */ PHP_FUNCTION(locale_compose) { smart_str loc_name_s = {0}; smart_str *loc_name = &loc_name_s; zval* arr = NULL; HashTable* hash_arr = NULL; int result = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "a", &arr) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } hash_arr = HASH_OF( arr ); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) RETURN_FALSE; /* Check for grandfathered first */ result = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG); if( result == SUCCESS){ RETURN_SMART_STR(loc_name); } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Not grandfathered */ result = append_key_value(loc_name, hash_arr , LOC_LANG_TAG); if( result == LOC_NOT_FOUND ){ intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array does not contain 'language' tag.", 0 TSRMLS_CC ); smart_str_free(loc_name); RETURN_FALSE; } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Extlang */ result = append_multiple_key_values(loc_name, hash_arr , LOC_EXTLANG_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Script */ result = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Region */ result = append_key_value( loc_name, hash_arr , LOC_REGION_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Variant */ result = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Private */ result = append_multiple_key_values( loc_name, hash_arr , LOC_PRIVATE_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } RETURN_SMART_STR(loc_name); } /* }}} */ /*{{{ * Parses the locale and returns private subtags if existing * else returns NULL * e.g. for locale='en_US-x-prv1-prv2-prv3' * returns a pointer to the string 'prv1-prv2-prv3' */ static char* get_private_subtags(const char* loc_name) { char* result =NULL; int singletonPos = 0; int len =0; const char* mod_loc_name =NULL; if( loc_name && (len = strlen(loc_name)>0 ) ){ mod_loc_name = loc_name ; len = strlen(mod_loc_name); while( (singletonPos = getSingletonPos(mod_loc_name))!= -1){ if( singletonPos!=-1){ if( (*(mod_loc_name+singletonPos)=='x') || (*(mod_loc_name+singletonPos)=='X') ){ /* private subtag start found */ if( singletonPos + 2 == len){ /* loc_name ends with '-x-' ; return NULL */ } else{ /* result = mod_loc_name + singletonPos +2; */ result = estrndup(mod_loc_name + singletonPos+2 , (len -( singletonPos +2) ) ); } break; } else{ if( singletonPos + 1 >= len){ /* String end */ break; } else { /* singleton found but not a private subtag , hence check further in the string for the private subtag */ mod_loc_name = mod_loc_name + singletonPos +1; len = strlen(mod_loc_name); } } } } /* end of while */ } return result; } /* }}} */ /* {{{ code used by locale_parse */ static int add_array_entry(const char* loc_name, zval* hash_arr, char* key_name TSRMLS_DC) { char* key_value = NULL; char* cur_key_name = NULL; char* token = NULL; char* last_ptr = NULL; int result = 0; int cur_result = 0; int cnt = 0; if( strcmp(key_name , LOC_PRIVATE_TAG)==0 ){ key_value = get_private_subtags( loc_name ); result = 1; } else { key_value = get_icu_value_internal( loc_name , key_name , &result,1 ); } if( (strcmp(key_name , LOC_PRIVATE_TAG)==0) || ( strcmp(key_name , LOC_VARIANT_TAG)==0) ){ if( result > 0 && key_value){ /* Tokenize on the "_" or "-" */ token = php_strtok_r( key_value , DELIMITER ,&last_ptr); if( cur_key_name ){ efree( cur_key_name); } cur_key_name = (char*)ecalloc( 25, 25); sprintf( cur_key_name , "%s%d", key_name , cnt++); add_assoc_string( hash_arr, cur_key_name , token ,TRUE ); /* tokenize on the "_" or "-" and stop at singleton if any */ while( (token = php_strtok_r(NULL , DELIMITER , &last_ptr)) && (strlen(token)>1) ){ sprintf( cur_key_name , "%s%d", key_name , cnt++); add_assoc_string( hash_arr, cur_key_name , token , TRUE ); } /* if( strcmp(key_name, LOC_PRIVATE_TAG) == 0 ){ } */ } } else { if( result == 1 ){ add_assoc_string( hash_arr, key_name , key_value , TRUE ); cur_result = 1; } } if( cur_key_name ){ efree( cur_key_name); } /*if( key_name != LOC_PRIVATE_TAG && key_value){*/ if( key_value){ efree(key_value); } return cur_result; } /* }}} */ /* {{{ proto static array Locale::parseLocale($locale) * parses a locale-id into an array the different parts of it }}} */ /* {{{ proto static array parse_locale($locale) * parses a locale-id into an array the different parts of it */ PHP_FUNCTION(locale_parse) { const char* loc_name = NULL; int loc_name_len = 0; int grOffset = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name, &loc_name_len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_parse: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } array_init( return_value ); grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ add_assoc_string( return_value , LOC_GRANDFATHERED_LANG_TAG , estrdup(loc_name) ,FALSE ); } else{ /* Not grandfathered */ add_array_entry( loc_name , return_value , LOC_LANG_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_SCRIPT_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_REGION_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_VARIANT_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_PRIVATE_TAG TSRMLS_CC); } } /* }}} */ /* {{{ proto static array Locale::getAllVariants($locale) * gets an array containing the list of variants, or null }}} */ /* {{{ proto static array locale_get_all_variants($locale) * gets an array containing the list of variants, or null */ PHP_FUNCTION(locale_get_all_variants) { const char* loc_name = NULL; int loc_name_len = 0; int result = 0; char* token = NULL; char* variant = NULL; char* saved_ptr = NULL; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name, &loc_name_len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_parse: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } array_init( return_value ); /* If the locale is grandfathered, stop, no variants */ if( findOffset( LOC_GRANDFATHERED , loc_name ) >= 0 ){ /* ("Grandfathered Tag. No variants."); */ } else { /* Call ICU variant */ variant = get_icu_value_internal( loc_name , LOC_VARIANT_TAG , &result ,0); if( result > 0 && variant){ /* Tokenize on the "_" or "-" */ token = php_strtok_r( variant , DELIMITER , &saved_ptr); add_next_index_stringl( return_value, token , strlen(token) ,TRUE ); /* tokenize on the "_" or "-" and stop at singleton if any */ while( (token = php_strtok_r(NULL , DELIMITER, &saved_ptr)) && (strlen(token)>1) ){ add_next_index_stringl( return_value, token , strlen(token) ,TRUE ); } } if( variant ){ efree( variant ); } } } /* }}} */ /*{{{ * Converts to lower case and also replaces all hyphens with the underscore */ static int strToMatch(const char* str ,char *retstr) { char* anchor = NULL; const char* anchor1 = NULL; int result = 0; if( (!str) || str[0] == '\0'){ return result; } else { anchor = retstr; anchor1 = str; while( (*str)!='\0' ){ if( *str == '-' ){ *retstr = '_'; } else { *retstr = tolower(*str); } str++; retstr++; } *retstr = '\0'; retstr= anchor; str= anchor1; result = 1; } return(result); } /* }}} */ /* {{{ proto static boolean Locale::filterMatches(string $langtag, string $locale[, bool $canonicalize]) * Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm */ /* }}} */ /* {{{ proto boolean locale_filter_matches(string $langtag, string $locale[, bool $canonicalize]) * Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm */ PHP_FUNCTION(locale_filter_matches) { char* lang_tag = NULL; int lang_tag_len = 0; const char* loc_range = NULL; int loc_range_len = 0; int result = 0; char* token = 0; char* chrcheck = NULL; char* can_lang_tag = NULL; char* can_loc_range = NULL; char* cur_lang_tag = NULL; char* cur_loc_range = NULL; zend_bool boolCanonical = 0; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "ss|b", &lang_tag, &lang_tag_len , &loc_range , &loc_range_len , &boolCanonical) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_filter_matches: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_range_len == 0) { loc_range = intl_locale_get_default(TSRMLS_C); } if( strcmp(loc_range,"*")==0){ RETURN_TRUE; } if( boolCanonical ){ /* canonicalize loc_range */ can_loc_range=get_icu_value_internal( loc_range , LOC_CANONICALIZE_TAG , &result , 0); if( result ==0) { intl_error_set( NULL, status, "locale_filter_matches : unable to canonicalize loc_range" , 0 TSRMLS_CC ); RETURN_FALSE; } /* canonicalize lang_tag */ can_lang_tag = get_icu_value_internal( lang_tag , LOC_CANONICALIZE_TAG , &result , 0); if( result ==0) { intl_error_set( NULL, status, "locale_filter_matches : unable to canonicalize lang_tag" , 0 TSRMLS_CC ); RETURN_FALSE; } /* Convert to lower case for case-insensitive comparison */ cur_lang_tag = ecalloc( 1, strlen(can_lang_tag) + 1); /* Convert to lower case for case-insensitive comparison */ result = strToMatch( can_lang_tag , cur_lang_tag); if( result == 0) { efree( cur_lang_tag ); efree( can_lang_tag ); RETURN_FALSE; } cur_loc_range = ecalloc( 1, strlen(can_loc_range) + 1); result = strToMatch( can_loc_range , cur_loc_range ); if( result == 0) { efree( cur_lang_tag ); efree( can_lang_tag ); efree( cur_loc_range ); efree( can_loc_range ); RETURN_FALSE; } /* check if prefix */ token = strstr( cur_lang_tag , cur_loc_range ); if( token && (token==cur_lang_tag) ){ /* check if the char. after match is SEPARATOR */ chrcheck = token + (strlen(cur_loc_range)); if( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } if( can_lang_tag){ efree( can_lang_tag ); } if( can_loc_range){ efree( can_loc_range ); } RETURN_TRUE; } } /* No prefix as loc_range */ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } if( can_lang_tag){ efree( can_lang_tag ); } if( can_loc_range){ efree( can_loc_range ); } RETURN_FALSE; } /* end of if isCanonical */ else{ /* Convert to lower case for case-insensitive comparison */ cur_lang_tag = ecalloc( 1, strlen(lang_tag ) + 1); result = strToMatch( lang_tag , cur_lang_tag); if( result == 0) { efree( cur_lang_tag ); RETURN_FALSE; } cur_loc_range = ecalloc( 1, strlen(loc_range ) + 1); result = strToMatch( loc_range , cur_loc_range ); if( result == 0) { efree( cur_lang_tag ); efree( cur_loc_range ); RETURN_FALSE; } /* check if prefix */ token = strstr( cur_lang_tag , cur_loc_range ); if( token && (token==cur_lang_tag) ){ /* check if the char. after match is SEPARATOR */ chrcheck = token + (strlen(cur_loc_range)); if( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } RETURN_TRUE; } } /* No prefix as loc_range */ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } RETURN_FALSE; } } /* }}} */ static void array_cleanup( char* arr[] , int arr_size) { int i=0; for( i=0; i< arr_size; i++ ){ if( arr[i*2] ){ efree( arr[i*2]); } } efree(arr); } #define LOOKUP_CLEAN_RETURN(value) array_cleanup(cur_arr, cur_arr_len); return (value) /* {{{ * returns the lookup result to lookup_loc_range_src_php * internal function */ static char* lookup_loc_range(const char* loc_range, HashTable* hash_arr, int canonicalize TSRMLS_DC) { int i = 0; int cur_arr_len = 0; int result = 0; char* lang_tag = NULL; zval** ele_value = NULL; char** cur_arr = NULL; char* cur_loc_range = NULL; char* can_loc_range = NULL; int saved_pos = 0; char* return_value = NULL; cur_arr = ecalloc(zend_hash_num_elements(hash_arr)*2, sizeof(char *)); /* convert the array to lowercase , also replace hyphens with the underscore and store it in cur_arr */ for(zend_hash_internal_pointer_reset(hash_arr); zend_hash_has_more_elements(hash_arr) == SUCCESS; zend_hash_move_forward(hash_arr)) { if (zend_hash_get_current_data(hash_arr, (void**)&ele_value) == FAILURE) { /* Should never actually fail since the key is known to exist.*/ continue; } if(Z_TYPE_PP(ele_value)!= IS_STRING) { /* element value is not a string */ intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: locale array element is not a string", 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } cur_arr[cur_arr_len*2] = estrndup(Z_STRVAL_PP(ele_value), Z_STRLEN_PP(ele_value)); result = strToMatch(Z_STRVAL_PP(ele_value), cur_arr[cur_arr_len*2]); if(result == 0) { intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag", 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } cur_arr[cur_arr_len*2+1] = Z_STRVAL_PP(ele_value); cur_arr_len++ ; } /* end of for */ /* Canonicalize array elements */ if(canonicalize) { for(i=0; i<cur_arr_len; i++) { lang_tag = get_icu_value_internal(cur_arr[i*2], LOC_CANONICALIZE_TAG, &result, 0); if(result != 1 || lang_tag == NULL || !lang_tag[0]) { if(lang_tag) { efree(lang_tag); } intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag" , 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } cur_arr[i*2] = erealloc(cur_arr[i*2], strlen(lang_tag)+1); result = strToMatch(lang_tag, cur_arr[i*2]); efree(lang_tag); if(result == 0) { intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag" , 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } } } if(canonicalize) { /* Canonicalize the loc_range */ can_loc_range = get_icu_value_internal(loc_range, LOC_CANONICALIZE_TAG, &result , 0); if( result != 1 || can_loc_range == NULL || !can_loc_range[0]) { /* Error */ intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize loc_range" , 0 TSRMLS_CC ); if(can_loc_range) { efree(can_loc_range); } LOOKUP_CLEAN_RETURN(NULL); } else { loc_range = can_loc_range; } } cur_loc_range = ecalloc(1, strlen(loc_range)+1); /* convert to lower and replace hyphens */ result = strToMatch(loc_range, cur_loc_range); if(can_loc_range) { efree(can_loc_range); } if(result == 0) { intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag" , 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } /* Lookup for the lang_tag match */ saved_pos = strlen(cur_loc_range); while(saved_pos > 0) { for(i=0; i< cur_arr_len; i++){ if(cur_arr[i*2] != NULL && strlen(cur_arr[i*2]) == saved_pos && strncmp(cur_loc_range, cur_arr[i*2], saved_pos) == 0) { /* Match found */ return_value = estrdup(canonicalize?cur_arr[i*2]:cur_arr[i*2+1]); efree(cur_loc_range); LOOKUP_CLEAN_RETURN(return_value); } } saved_pos = getStrrtokenPos(cur_loc_range, saved_pos); } /* Match not found */ efree(cur_loc_range); LOOKUP_CLEAN_RETURN(NULL); } /* }}} */ /* {{{ proto string Locale::lookup(array $langtag, string $locale[, bool $canonicalize[, string $default = null]]) * Searchs the items in $langtag for the best match to the language * range */ /* }}} */ /* {{{ proto string locale_lookup(array $langtag, string $locale[, bool $canonicalize[, string $default = null]]) * Searchs the items in $langtag for the best match to the language * range */ PHP_FUNCTION(locale_lookup) { char* fallback_loc = NULL; int fallback_loc_len = 0; const char* loc_range = NULL; int loc_range_len = 0; zval* arr = NULL; HashTable* hash_arr = NULL; zend_bool boolCanonical = 0; char* result =NULL; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "as|bs", &arr, &loc_range, &loc_range_len, &boolCanonical, &fallback_loc, &fallback_loc_len) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_lookup: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_range_len == 0) { loc_range = intl_locale_get_default(TSRMLS_C); } hash_arr = HASH_OF(arr); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) { RETURN_EMPTY_STRING(); } result = lookup_loc_range(loc_range, hash_arr, boolCanonical TSRMLS_CC); if(result == NULL || result[0] == '\0') { if( fallback_loc ) { result = estrndup(fallback_loc, fallback_loc_len); } else { RETURN_EMPTY_STRING(); } } RETVAL_STRINGL(result, strlen(result), 0); } /* }}} */ /* {{{ proto string Locale::acceptFromHttp(string $http_accept) * Tries to find out best available locale based on HTTP �Accept-Language� header */ /* }}} */ /* {{{ proto string locale_accept_from_http(string $http_accept) * Tries to find out best available locale based on HTTP �Accept-Language� header */ PHP_FUNCTION(locale_accept_from_http) { UEnumeration *available; char *http_accept = NULL; int http_accept_len; UErrorCode status = 0; int len; char resultLocale[INTL_MAX_LOCALE_LEN+1]; UAcceptResult outResult; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &http_accept, &http_accept_len) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_accept_from_http: unable to parse input parameters", 0 TSRMLS_CC ); RETURN_FALSE; } available = ures_openAvailableLocales(NULL, &status); INTL_CHECK_STATUS(status, "locale_accept_from_http: failed to retrieve locale list"); len = uloc_acceptLanguageFromHTTP(resultLocale, INTL_MAX_LOCALE_LEN, &outResult, http_accept, available, &status); uenum_close(available); INTL_CHECK_STATUS(status, "locale_accept_from_http: failed to find acceptable locale"); if (len < 0 || outResult == ULOC_ACCEPT_FAILED) { RETURN_FALSE; } RETURN_STRINGL(resultLocale, len, 1); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 *can_loc_len */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Kirti Velankar <kirtig@yahoo-inc.com> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <unicode/ustring.h> #include <unicode/udata.h> #include <unicode/putil.h> #include <unicode/ures.h> #include "php_intl.h" #include "locale.h" #include "locale_class.h" #include "locale_methods.h" #include "intl_convert.h" #include "intl_data.h" #include <zend_API.h> #include <zend.h> #include <php.h> #include "main/php_ini.h" #include "ext/standard/php_smart_str.h" ZEND_EXTERN_MODULE_GLOBALS( intl ) /* Sizes required for the strings "variant15" , "extlang11", "private12" etc. */ #define SEPARATOR "_" #define SEPARATOR1 "-" #define DELIMITER "-_" #define EXTLANG_PREFIX "a" #define PRIVATE_PREFIX "x" #define DISP_NAME "name" #define MAX_NO_VARIANT 15 #define MAX_NO_EXTLANG 3 #define MAX_NO_PRIVATE 15 #define MAX_NO_LOOKUP_LANG_TAG 100 #define LOC_NOT_FOUND 1 /* Sizes required for the strings "variant15" , "extlang3", "private12" etc. */ #define VARIANT_KEYNAME_LEN 11 #define EXTLANG_KEYNAME_LEN 10 #define PRIVATE_KEYNAME_LEN 11 /* Based on IANA registry at the time of writing this code * */ static const char * const LOC_GRANDFATHERED[] = { "art-lojban", "i-klingon", "i-lux", "i-navajo", "no-bok", "no-nyn", "cel-gaulish", "en-GB-oed", "i-ami", "i-bnn", "i-default", "i-enochian", "i-mingo", "i-pwn", "i-tao", "i-tay", "i-tsu", "sgn-BE-fr", "sgn-BE-nl", "sgn-CH-de", "zh-cmn", "zh-cmn-Hans", "zh-cmn-Hant", "zh-gan" , "zh-guoyu", "zh-hakka", "zh-min", "zh-min-nan", "zh-wuu", "zh-xiang", "zh-yue", NULL }; /* Based on IANA registry at the time of writing this code * This array lists the preferred values for the grandfathered tags if applicable * This is in sync with the array LOC_GRANDFATHERED * e.g. the offsets of the grandfathered tags match the offset of the preferred value */ static const int LOC_PREFERRED_GRANDFATHERED_LEN = 6; static const char * const LOC_PREFERRED_GRANDFATHERED[] = { "jbo", "tlh", "lb", "nv", "nb", "nn", NULL }; /*returns TRUE if a is an ID separator FALSE otherwise*/ #define isIDSeparator(a) (a == '_' || a == '-') #define isKeywordSeparator(a) (a == '@' ) #define isEndOfTag(a) (a == '\0' ) #define isPrefixLetter(a) ((a=='x')||(a=='X')||(a=='i')||(a=='I')) /*returns TRUE if one of the special prefixes is here (s=string) 'x-' or 'i-' */ #define isIDPrefix(s) (isPrefixLetter(s[0])&&isIDSeparator(s[1])) #define isKeywordPrefix(s) ( isKeywordSeparator(s[0]) ) /* Dot terminates it because of POSIX form where dot precedes the codepage * except for variant */ #define isTerminator(a) ((a==0)||(a=='.')||(a=='@')) /* {{{ return the offset of 'key' in the array 'list'. * returns -1 if not present */ static int16_t findOffset(const char* const* list, const char* key) { const char* const* anchor = list; while (*list != NULL) { if (strcmp(key, *list) == 0) { return (int16_t)(list - anchor); } list++; } return -1; } /*}}}*/ static char* getPreferredTag(const char* gf_tag) { char* result = NULL; int grOffset = 0; grOffset = findOffset( LOC_GRANDFATHERED ,gf_tag); if(grOffset < 0) { return NULL; } if( grOffset < LOC_PREFERRED_GRANDFATHERED_LEN ){ /* return preferred tag */ result = estrdup( LOC_PREFERRED_GRANDFATHERED[grOffset] ); } else { /* Return correct grandfathered language tag */ result = estrdup( LOC_GRANDFATHERED[grOffset] ); } return result; } /* {{{ * returns the position of next token for lookup * or -1 if no token * strtokr equivalent search for token in reverse direction */ static int getStrrtokenPos(char* str, int savedPos) { int result =-1; int i; for(i=savedPos-1; i>=0; i--) { if(isIDSeparator(*(str+i)) ){ /* delimiter found; check for singleton */ if(i>=2 && isIDSeparator(*(str+i-2)) ){ /* a singleton; so send the position of token before the singleton */ result = i-2; } else { result = i; } break; } } if(result < 1){ /* Just in case inavlid locale e.g. '-x-xyz' or '-sl_Latn' */ result =-1; } return result; } /* }}} */ /* {{{ * returns the position of a singleton if present * returns -1 if no singleton * strtok equivalent search for singleton */ static int getSingletonPos(const char* str) { int result =-1; int i=0; int len = 0; if( str && ((len=strlen(str))>0) ){ for( i=0; i<len ; i++){ if( isIDSeparator(*(str+i)) ){ if( i==1){ /* string is of the form x-avy or a-prv1 */ result =0; break; } else { /* delimiter found; check for singleton */ if( isIDSeparator(*(str+i+2)) ){ /* a singleton; so send the position of separator before singleton */ result = i+1; break; } } } }/* end of for */ } return result; } /* }}} */ /* {{{ proto static string Locale::getDefault( ) Get default locale */ /* }}} */ /* {{{ proto static string locale_get_default( ) Get default locale */ PHP_NAMED_FUNCTION(zif_locale_get_default) { RETURN_STRING( intl_locale_get_default( TSRMLS_C ), TRUE ); } /* }}} */ /* {{{ proto static string Locale::setDefault( string $locale ) Set default locale */ /* }}} */ /* {{{ proto static string locale_set_default( string $locale ) Set default locale */ PHP_NAMED_FUNCTION(zif_locale_set_default) { char* locale_name = NULL; int len=0; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &locale_name ,&len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_set_default: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(len == 0) { locale_name = (char *)uloc_getDefault() ; len = strlen(locale_name); } zend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME); RETURN_TRUE; } /* }}} */ /* {{{ * Gets the value from ICU * common code shared by get_primary_language,get_script or get_region or get_variant * result = 0 if error, 1 if successful , -1 if no value */ static char* get_icu_value_internal( const char* loc_name , char* tag_name, int* result , int fromParseLocale) { char* tag_value = NULL; int32_t tag_value_len = 512; int singletonPos = 0; char* mod_loc_name = NULL; int grOffset = 0; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; if( strcmp(tag_name, LOC_CANONICALIZE_TAG) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ return estrdup(loc_name); } else { /* Since Grandfathered , no value , do nothing , retutn NULL */ return NULL; } } if( fromParseLocale==1 ){ /* Handle singletons */ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ if( strlen(loc_name)>1 && (isIDPrefix(loc_name) == 1) ){ return estrdup(loc_name); } } singletonPos = getSingletonPos( loc_name ); if( singletonPos == 0){ /* singleton at start of script, region , variant etc. * or invalid singleton at start of language */ return NULL; } else if( singletonPos > 0 ){ /* singleton at some position except at start * strip off the singleton and rest of the loc_name */ mod_loc_name = estrndup ( loc_name , singletonPos-1); } } /* end of if fromParse */ } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name == NULL){ mod_loc_name = estrdup(loc_name ); } /* Proceed to ICU */ do{ tag_value = erealloc( tag_value , buflen ); tag_value_len = buflen; if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getScript ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_LANG_TAG )==0 ){ buflen = uloc_getLanguage ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getCountry ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getVariant ( mod_loc_name ,tag_value , tag_value_len , &status); } if( strcmp(tag_name , LOC_CANONICALIZE_TAG)==0 ){ buflen = uloc_canonicalize ( mod_loc_name ,tag_value , tag_value_len , &status); } if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; buflen++; /* add space for \0 */ continue; } /* Error in retriving data */ *result = 0; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } } while( buflen > tag_value_len ); if( buflen ==0 ){ /* No value found */ *result = -1; if( tag_value ){ efree( tag_value ); } if( mod_loc_name ){ efree( mod_loc_name); } return NULL; } else { *result = 1; } if( mod_loc_name ){ efree( mod_loc_name); } return tag_value; } /* }}} */ /* {{{ * Gets the value from ICU , called when PHP userspace function is called * common code shared by get_primary_language,get_script or get_region or get_variant */ static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; char* tag_value = NULL; char* empty_result = ""; int result = 0; char* msg = NULL; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name ,&loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Call ICU get */ tag_value = get_icu_value_internal( loc_name , tag_name , &result ,0); /* No value found */ if( result == -1 ) { if( tag_value){ efree( tag_value); } RETURN_STRING( empty_result , TRUE); } /* value found */ if( tag_value){ RETURN_STRING( tag_value , FALSE); } /* Error encountered while fetching the value */ if( result ==0) { spprintf(&msg , 0, "locale_get_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_NULL(); } } /* }}} */ /* {{{ proto static string Locale::getScript($locale) * gets the script for the $locale }}} */ /* {{{ proto static string locale_get_script($locale) * gets the script for the $locale */ PHP_FUNCTION( locale_get_script ) { get_icu_value_src_php( LOC_SCRIPT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getRegion($locale) * gets the region for the $locale }}} */ /* {{{ proto static string locale_get_region($locale) * gets the region for the $locale */ PHP_FUNCTION( locale_get_region ) { get_icu_value_src_php( LOC_REGION_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getPrimaryLanguage($locale) * gets the primary language for the $locale }}} */ /* {{{ proto static string locale_get_primary_language($locale) * gets the primary language for the $locale */ PHP_FUNCTION(locale_get_primary_language ) { get_icu_value_src_php( LOC_LANG_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ * common code shared by display_xyz functions to get the value from ICU }}} */ static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; const char* disp_loc_name = NULL; int disp_loc_name_len = 0; int free_loc_name = 0; UChar* disp_name = NULL; int32_t disp_name_len = 0; char* mod_loc_name = NULL; int32_t buflen = 512; UErrorCode status = U_ZERO_ERROR; char* utf8value = NULL; int utf8value_len = 0; char* msg = NULL; int grOffset = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &loc_name, &loc_name_len , &disp_loc_name ,&disp_loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_display_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len > ULOC_FULLNAME_CAPACITY) { /* See bug 67397: overlong locale names cause trouble in uloc_getDisplayName */ spprintf(&msg , 0, "locale_get_display_%s : name too long", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } if( strcmp(tag_name, DISP_NAME) != 0 ){ /* Handle grandfathered languages */ grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ mod_loc_name = getPreferredTag( loc_name ); } else { /* Since Grandfathered, no value, do nothing, retutn NULL */ RETURN_FALSE; } } } /* end of if != LOC_CANONICAL_TAG */ if( mod_loc_name==NULL ){ mod_loc_name = estrdup( loc_name ); } /* Check if disp_loc_name passed , if not use default locale */ if( !disp_loc_name){ disp_loc_name = estrdup(intl_locale_get_default(TSRMLS_C)); free_loc_name = 1; } /* Get the disp_value for the given locale */ do{ disp_name = erealloc( disp_name , buflen * sizeof(UChar) ); disp_name_len = buflen; if( strcmp(tag_name , LOC_LANG_TAG)==0 ){ buflen = uloc_getDisplayLanguage ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_SCRIPT_TAG)==0 ){ buflen = uloc_getDisplayScript ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_REGION_TAG)==0 ){ buflen = uloc_getDisplayCountry ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , LOC_VARIANT_TAG)==0 ){ buflen = uloc_getDisplayVariant ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } else if( strcmp(tag_name , DISP_NAME)==0 ){ buflen = uloc_getDisplayName ( mod_loc_name , disp_loc_name , disp_name , disp_name_len , &status); } /* U_STRING_NOT_TERMINATED_WARNING is admissible here; don't look for it */ if( U_FAILURE( status ) ) { if( status == U_BUFFER_OVERFLOW_ERROR ) { status = U_ZERO_ERROR; continue; } spprintf(&msg, 0, "locale_get_display_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); if( disp_name){ efree( disp_name ); } if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } RETURN_FALSE; } } while( buflen > disp_name_len ); if( mod_loc_name){ efree( mod_loc_name ); } if (free_loc_name) { efree((void *)disp_loc_name); disp_loc_name = NULL; } /* Convert display locale name from UTF-16 to UTF-8. */ intl_convert_utf16_to_utf8( &utf8value, &utf8value_len, disp_name, buflen, &status ); efree( disp_name ); if( U_FAILURE( status ) ) { spprintf(&msg, 0, "locale_get_display_%s :error converting display name for %s to UTF-8", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } RETVAL_STRINGL( utf8value, utf8value_len , FALSE); } /* }}} */ /* {{{ proto static string Locale::getDisplayName($locale[, $in_locale = null]) * gets the name for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_name($locale[, $in_locale = null]) * gets the name for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_name) { get_icu_disp_value_src_php( DISP_NAME , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getDisplayLanguage($locale[, $in_locale = null]) * gets the language for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_language($locale[, $in_locale = null]) * gets the language for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_language) { get_icu_disp_value_src_php( LOC_LANG_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getDisplayScript($locale, $in_locale = null) * gets the script for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_script($locale, $in_locale = null) * gets the script for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_script) { get_icu_disp_value_src_php( LOC_SCRIPT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static string Locale::getDisplayRegion($locale, $in_locale = null) * gets the region for the $locale in $in_locale or default_locale }}} */ /* {{{ proto static string get_display_region($locale, $in_locale = null) * gets the region for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_region) { get_icu_disp_value_src_php( LOC_REGION_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ * proto static string Locale::getDisplayVariant($locale, $in_locale = null) * gets the variant for the $locale in $in_locale or default_locale }}} */ /* {{{ * proto static string get_display_variant($locale, $in_locale = null) * gets the variant for the $locale in $in_locale or default_locale */ PHP_FUNCTION(locale_get_display_variant) { get_icu_disp_value_src_php( LOC_VARIANT_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ proto static array getKeywords(string $locale) { * return an associative array containing keyword-value * pairs for this locale. The keys are keys to the array (doh!) * }}}*/ /* {{{ proto static array locale_get_keywords(string $locale) { * return an associative array containing keyword-value * pairs for this locale. The keys are keys to the array (doh!) */ PHP_FUNCTION( locale_get_keywords ) { UEnumeration* e = NULL; UErrorCode status = U_ZERO_ERROR; const char* kw_key = NULL; int32_t kw_key_len = 0; const char* loc_name = NULL; int loc_name_len = 0; /* ICU expects the buffer to be allocated before calling the function and so the buffer size has been explicitly specified ICU uloc.h #define ULOC_KEYWORD_AND_VALUES_CAPACITY 100 hence the kw_value buffer size is 100 */ char* kw_value = NULL; int32_t kw_value_len = 100; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name, &loc_name_len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_get_keywords: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Get the keywords */ e = uloc_openKeywords( loc_name, &status ); if( e != NULL ) { /* Traverse it, filling the return array. */ array_init( return_value ); while( ( kw_key = uenum_next( e, &kw_key_len, &status ) ) != NULL ){ kw_value = ecalloc( 1 , kw_value_len ); /* Get the keyword value for each keyword */ kw_value_len=uloc_getKeywordValue( loc_name,kw_key, kw_value, kw_value_len , &status ); if (status == U_BUFFER_OVERFLOW_ERROR) { status = U_ZERO_ERROR; kw_value = erealloc( kw_value , kw_value_len+1); kw_value_len=uloc_getKeywordValue( loc_name,kw_key, kw_value, kw_value_len+1 , &status ); } else if(!U_FAILURE(status)) { kw_value = erealloc( kw_value , kw_value_len+1); } if (U_FAILURE(status)) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_get_keywords: Error encountered while getting the keyword value for the keyword", 0 TSRMLS_CC ); if( kw_value){ efree( kw_value ); } zval_dtor(return_value); RETURN_FALSE; } add_assoc_stringl( return_value, (char *)kw_key, kw_value , kw_value_len, 0); } /* end of while */ } /* end of if e!=NULL */ uenum_close( e ); } /* }}} */ /* {{{ proto static string Locale::canonicalize($locale) * @return string the canonicalized locale * }}} */ /* {{{ proto static string locale_canonicalize(Locale $loc, string $locale) * @param string $locale The locale string to canonicalize */ PHP_FUNCTION(locale_canonicalize) { get_icu_value_src_php( LOC_CANONICALIZE_TAG , INTERNAL_FUNCTION_PARAM_PASSTHRU ); } /* }}} */ /* {{{ append_key_value * Internal function which is called from locale_compose * gets the value for the key_name and appends to the loc_name * returns 1 if successful , -1 if not found , * 0 if array element is not a string , -2 if buffer-overflow */ static int append_key_value(smart_str* loc_name, HashTable* hash_arr, char* key_name) { zval** ele_value = NULL; if(zend_hash_find(hash_arr , key_name , strlen(key_name) + 1 ,(void **)&ele_value ) == SUCCESS ) { if(Z_TYPE_PP(ele_value)!= IS_STRING ){ /* element value is not a string */ return FAILURE; } if(strcmp(key_name, LOC_LANG_TAG) != 0 && strcmp(key_name, LOC_GRANDFATHERED_LANG_TAG)!=0 ) { /* not lang or grandfathered tag */ smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); } smart_str_appendl(loc_name, Z_STRVAL_PP(ele_value) , Z_STRLEN_PP(ele_value)); return SUCCESS; } return LOC_NOT_FOUND; } /* }}} */ /* {{{ append_prefix , appends the prefix needed * e.g. private adds 'x' */ static void add_prefix(smart_str* loc_name, char* key_name) { if( strncmp(key_name , LOC_PRIVATE_TAG , 7) == 0 ){ smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, PRIVATE_PREFIX , sizeof(PRIVATE_PREFIX)-1); } } /* }}} */ /* {{{ append_multiple_key_values * Internal function which is called from locale_compose * gets the multiple values for the key_name and appends to the loc_name * used for 'variant','extlang','private' * returns 1 if successful , -1 if not found , * 0 if array element is not a string , -2 if buffer-overflow */ static int append_multiple_key_values(smart_str* loc_name, HashTable* hash_arr, char* key_name TSRMLS_DC) { zval** ele_value = NULL; int i = 0; int isFirstSubtag = 0; int max_value = 0; /* Variant/ Extlang/Private etc. */ if( zend_hash_find( hash_arr , key_name , strlen(key_name) + 1 ,(void **)&ele_value ) == SUCCESS ) { if( Z_TYPE_PP(ele_value) == IS_STRING ){ add_prefix( loc_name , key_name); smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, Z_STRVAL_PP(ele_value) , Z_STRLEN_PP(ele_value)); return SUCCESS; } else if(Z_TYPE_PP(ele_value) == IS_ARRAY ) { HashPosition pos; HashTable *arr = HASH_OF(*ele_value); zval **data = NULL; zend_hash_internal_pointer_reset_ex(arr, &pos); while(zend_hash_get_current_data_ex(arr, (void **)&data, &pos) != FAILURE) { if(Z_TYPE_PP(data) != IS_STRING) { return FAILURE; } if (isFirstSubtag++ == 0){ add_prefix(loc_name , key_name); } smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, Z_STRVAL_PP(data) , Z_STRLEN_PP(data)); zend_hash_move_forward_ex(arr, &pos); } return SUCCESS; } else { return FAILURE; } } else { char cur_key_name[31]; /* Decide the max_value: the max. no. of elements allowed */ if( strcmp(key_name , LOC_VARIANT_TAG) ==0 ){ max_value = MAX_NO_VARIANT; } if( strcmp(key_name , LOC_EXTLANG_TAG) ==0 ){ max_value = MAX_NO_EXTLANG; } if( strcmp(key_name , LOC_PRIVATE_TAG) ==0 ){ max_value = MAX_NO_PRIVATE; } /* Multiple variant values as variant0, variant1 ,variant2 */ isFirstSubtag = 0; for( i=0 ; i< max_value; i++ ){ snprintf( cur_key_name , 30, "%s%d", key_name , i); if( zend_hash_find( hash_arr , cur_key_name , strlen(cur_key_name) + 1,(void **)&ele_value ) == SUCCESS ){ if( Z_TYPE_PP(ele_value)!= IS_STRING ){ /* variant is not a string */ return FAILURE; } /* Add the contents */ if (isFirstSubtag++ == 0){ add_prefix(loc_name , cur_key_name); } smart_str_appendl(loc_name, SEPARATOR , sizeof(SEPARATOR)-1); smart_str_appendl(loc_name, Z_STRVAL_PP(ele_value) , Z_STRLEN_PP(ele_value)); } } /* end of for */ } /* end of else */ return SUCCESS; } /* }}} */ /*{{{ * If applicable sets error message and aborts locale_compose gracefully * returns 0 if locale_compose needs to be aborted * otherwise returns 1 */ static int handleAppendResult( int result, smart_str* loc_name TSRMLS_DC) { intl_error_reset( NULL TSRMLS_CC ); if( result == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array element is not a string", 0 TSRMLS_CC ); smart_str_free(loc_name); return 0; } return 1; } /* }}} */ #define RETURN_SMART_STR(s) smart_str_0((s)); RETURN_STRINGL((s)->c, (s)->len, 0) /* {{{ proto static string Locale::composeLocale($array) * Creates a locale by combining the parts of locale-ID passed * }}} */ /* {{{ proto static string compose_locale($array) * Creates a locale by combining the parts of locale-ID passed * }}} */ PHP_FUNCTION(locale_compose) { smart_str loc_name_s = {0}; smart_str *loc_name = &loc_name_s; zval* arr = NULL; HashTable* hash_arr = NULL; int result = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "a", &arr) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } hash_arr = HASH_OF( arr ); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) RETURN_FALSE; /* Check for grandfathered first */ result = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG); if( result == SUCCESS){ RETURN_SMART_STR(loc_name); } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Not grandfathered */ result = append_key_value(loc_name, hash_arr , LOC_LANG_TAG); if( result == LOC_NOT_FOUND ){ intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_compose: parameter array does not contain 'language' tag.", 0 TSRMLS_CC ); smart_str_free(loc_name); RETURN_FALSE; } if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Extlang */ result = append_multiple_key_values(loc_name, hash_arr , LOC_EXTLANG_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Script */ result = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Region */ result = append_key_value( loc_name, hash_arr , LOC_REGION_TAG); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Variant */ result = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } /* Private */ result = append_multiple_key_values( loc_name, hash_arr , LOC_PRIVATE_TAG TSRMLS_CC); if( !handleAppendResult( result, loc_name TSRMLS_CC)){ RETURN_FALSE; } RETURN_SMART_STR(loc_name); } /* }}} */ /*{{{ * Parses the locale and returns private subtags if existing * else returns NULL * e.g. for locale='en_US-x-prv1-prv2-prv3' * returns a pointer to the string 'prv1-prv2-prv3' */ static char* get_private_subtags(const char* loc_name) { char* result =NULL; int singletonPos = 0; int len =0; const char* mod_loc_name =NULL; if( loc_name && (len = strlen(loc_name)>0 ) ){ mod_loc_name = loc_name ; len = strlen(mod_loc_name); while( (singletonPos = getSingletonPos(mod_loc_name))!= -1){ if( singletonPos!=-1){ if( (*(mod_loc_name+singletonPos)=='x') || (*(mod_loc_name+singletonPos)=='X') ){ /* private subtag start found */ if( singletonPos + 2 == len){ /* loc_name ends with '-x-' ; return NULL */ } else{ /* result = mod_loc_name + singletonPos +2; */ result = estrndup(mod_loc_name + singletonPos+2 , (len -( singletonPos +2) ) ); } break; } else{ if( singletonPos + 1 >= len){ /* String end */ break; } else { /* singleton found but not a private subtag , hence check further in the string for the private subtag */ mod_loc_name = mod_loc_name + singletonPos +1; len = strlen(mod_loc_name); } } } } /* end of while */ } return result; } /* }}} */ /* {{{ code used by locale_parse */ static int add_array_entry(const char* loc_name, zval* hash_arr, char* key_name TSRMLS_DC) { char* key_value = NULL; char* cur_key_name = NULL; char* token = NULL; char* last_ptr = NULL; int result = 0; int cur_result = 0; int cnt = 0; if( strcmp(key_name , LOC_PRIVATE_TAG)==0 ){ key_value = get_private_subtags( loc_name ); result = 1; } else { key_value = get_icu_value_internal( loc_name , key_name , &result,1 ); } if( (strcmp(key_name , LOC_PRIVATE_TAG)==0) || ( strcmp(key_name , LOC_VARIANT_TAG)==0) ){ if( result > 0 && key_value){ /* Tokenize on the "_" or "-" */ token = php_strtok_r( key_value , DELIMITER ,&last_ptr); if( cur_key_name ){ efree( cur_key_name); } cur_key_name = (char*)ecalloc( 25, 25); sprintf( cur_key_name , "%s%d", key_name , cnt++); add_assoc_string( hash_arr, cur_key_name , token ,TRUE ); /* tokenize on the "_" or "-" and stop at singleton if any */ while( (token = php_strtok_r(NULL , DELIMITER , &last_ptr)) && (strlen(token)>1) ){ sprintf( cur_key_name , "%s%d", key_name , cnt++); add_assoc_string( hash_arr, cur_key_name , token , TRUE ); } /* if( strcmp(key_name, LOC_PRIVATE_TAG) == 0 ){ } */ } } else { if( result == 1 ){ add_assoc_string( hash_arr, key_name , key_value , TRUE ); cur_result = 1; } } if( cur_key_name ){ efree( cur_key_name); } /*if( key_name != LOC_PRIVATE_TAG && key_value){*/ if( key_value){ efree(key_value); } return cur_result; } /* }}} */ /* {{{ proto static array Locale::parseLocale($locale) * parses a locale-id into an array the different parts of it }}} */ /* {{{ proto static array parse_locale($locale) * parses a locale-id into an array the different parts of it */ PHP_FUNCTION(locale_parse) { const char* loc_name = NULL; int loc_name_len = 0; int grOffset = 0; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name, &loc_name_len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_parse: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } array_init( return_value ); grOffset = findOffset( LOC_GRANDFATHERED , loc_name ); if( grOffset >= 0 ){ add_assoc_string( return_value , LOC_GRANDFATHERED_LANG_TAG , estrdup(loc_name) ,FALSE ); } else{ /* Not grandfathered */ add_array_entry( loc_name , return_value , LOC_LANG_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_SCRIPT_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_REGION_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_VARIANT_TAG TSRMLS_CC); add_array_entry( loc_name , return_value , LOC_PRIVATE_TAG TSRMLS_CC); } } /* }}} */ /* {{{ proto static array Locale::getAllVariants($locale) * gets an array containing the list of variants, or null }}} */ /* {{{ proto static array locale_get_all_variants($locale) * gets an array containing the list of variants, or null */ PHP_FUNCTION(locale_get_all_variants) { const char* loc_name = NULL; int loc_name_len = 0; int result = 0; char* token = NULL; char* variant = NULL; char* saved_ptr = NULL; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name, &loc_name_len ) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_parse: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } array_init( return_value ); /* If the locale is grandfathered, stop, no variants */ if( findOffset( LOC_GRANDFATHERED , loc_name ) >= 0 ){ /* ("Grandfathered Tag. No variants."); */ } else { /* Call ICU variant */ variant = get_icu_value_internal( loc_name , LOC_VARIANT_TAG , &result ,0); if( result > 0 && variant){ /* Tokenize on the "_" or "-" */ token = php_strtok_r( variant , DELIMITER , &saved_ptr); add_next_index_stringl( return_value, token , strlen(token) ,TRUE ); /* tokenize on the "_" or "-" and stop at singleton if any */ while( (token = php_strtok_r(NULL , DELIMITER, &saved_ptr)) && (strlen(token)>1) ){ add_next_index_stringl( return_value, token , strlen(token) ,TRUE ); } } if( variant ){ efree( variant ); } } } /* }}} */ /*{{{ * Converts to lower case and also replaces all hyphens with the underscore */ static int strToMatch(const char* str ,char *retstr) { char* anchor = NULL; const char* anchor1 = NULL; int result = 0; if( (!str) || str[0] == '\0'){ return result; } else { anchor = retstr; anchor1 = str; while( (*str)!='\0' ){ if( *str == '-' ){ *retstr = '_'; } else { *retstr = tolower(*str); } str++; retstr++; } *retstr = '\0'; retstr= anchor; str= anchor1; result = 1; } return(result); } /* }}} */ /* {{{ proto static boolean Locale::filterMatches(string $langtag, string $locale[, bool $canonicalize]) * Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm */ /* }}} */ /* {{{ proto boolean locale_filter_matches(string $langtag, string $locale[, bool $canonicalize]) * Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm */ PHP_FUNCTION(locale_filter_matches) { char* lang_tag = NULL; int lang_tag_len = 0; const char* loc_range = NULL; int loc_range_len = 0; int result = 0; char* token = 0; char* chrcheck = NULL; char* can_lang_tag = NULL; char* can_loc_range = NULL; char* cur_lang_tag = NULL; char* cur_loc_range = NULL; zend_bool boolCanonical = 0; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "ss|b", &lang_tag, &lang_tag_len , &loc_range , &loc_range_len , &boolCanonical) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_filter_matches: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_range_len == 0) { loc_range = intl_locale_get_default(TSRMLS_C); } if( strcmp(loc_range,"*")==0){ RETURN_TRUE; } if( boolCanonical ){ /* canonicalize loc_range */ can_loc_range=get_icu_value_internal( loc_range , LOC_CANONICALIZE_TAG , &result , 0); if( result ==0) { intl_error_set( NULL, status, "locale_filter_matches : unable to canonicalize loc_range" , 0 TSRMLS_CC ); RETURN_FALSE; } /* canonicalize lang_tag */ can_lang_tag = get_icu_value_internal( lang_tag , LOC_CANONICALIZE_TAG , &result , 0); if( result ==0) { intl_error_set( NULL, status, "locale_filter_matches : unable to canonicalize lang_tag" , 0 TSRMLS_CC ); RETURN_FALSE; } /* Convert to lower case for case-insensitive comparison */ cur_lang_tag = ecalloc( 1, strlen(can_lang_tag) + 1); /* Convert to lower case for case-insensitive comparison */ result = strToMatch( can_lang_tag , cur_lang_tag); if( result == 0) { efree( cur_lang_tag ); efree( can_lang_tag ); RETURN_FALSE; } cur_loc_range = ecalloc( 1, strlen(can_loc_range) + 1); result = strToMatch( can_loc_range , cur_loc_range ); if( result == 0) { efree( cur_lang_tag ); efree( can_lang_tag ); efree( cur_loc_range ); efree( can_loc_range ); RETURN_FALSE; } /* check if prefix */ token = strstr( cur_lang_tag , cur_loc_range ); if( token && (token==cur_lang_tag) ){ /* check if the char. after match is SEPARATOR */ chrcheck = token + (strlen(cur_loc_range)); if( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } if( can_lang_tag){ efree( can_lang_tag ); } if( can_loc_range){ efree( can_loc_range ); } RETURN_TRUE; } } /* No prefix as loc_range */ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } if( can_lang_tag){ efree( can_lang_tag ); } if( can_loc_range){ efree( can_loc_range ); } RETURN_FALSE; } /* end of if isCanonical */ else{ /* Convert to lower case for case-insensitive comparison */ cur_lang_tag = ecalloc( 1, strlen(lang_tag ) + 1); result = strToMatch( lang_tag , cur_lang_tag); if( result == 0) { efree( cur_lang_tag ); RETURN_FALSE; } cur_loc_range = ecalloc( 1, strlen(loc_range ) + 1); result = strToMatch( loc_range , cur_loc_range ); if( result == 0) { efree( cur_lang_tag ); efree( cur_loc_range ); RETURN_FALSE; } /* check if prefix */ token = strstr( cur_lang_tag , cur_loc_range ); if( token && (token==cur_lang_tag) ){ /* check if the char. after match is SEPARATOR */ chrcheck = token + (strlen(cur_loc_range)); if( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } RETURN_TRUE; } } /* No prefix as loc_range */ if( cur_lang_tag){ efree( cur_lang_tag ); } if( cur_loc_range){ efree( cur_loc_range ); } RETURN_FALSE; } } /* }}} */ static void array_cleanup( char* arr[] , int arr_size) { int i=0; for( i=0; i< arr_size; i++ ){ if( arr[i*2] ){ efree( arr[i*2]); } } efree(arr); } #define LOOKUP_CLEAN_RETURN(value) array_cleanup(cur_arr, cur_arr_len); return (value) /* {{{ * returns the lookup result to lookup_loc_range_src_php * internal function */ static char* lookup_loc_range(const char* loc_range, HashTable* hash_arr, int canonicalize TSRMLS_DC) { int i = 0; int cur_arr_len = 0; int result = 0; char* lang_tag = NULL; zval** ele_value = NULL; char** cur_arr = NULL; char* cur_loc_range = NULL; char* can_loc_range = NULL; int saved_pos = 0; char* return_value = NULL; cur_arr = ecalloc(zend_hash_num_elements(hash_arr)*2, sizeof(char *)); /* convert the array to lowercase , also replace hyphens with the underscore and store it in cur_arr */ for(zend_hash_internal_pointer_reset(hash_arr); zend_hash_has_more_elements(hash_arr) == SUCCESS; zend_hash_move_forward(hash_arr)) { if (zend_hash_get_current_data(hash_arr, (void**)&ele_value) == FAILURE) { /* Should never actually fail since the key is known to exist.*/ continue; } if(Z_TYPE_PP(ele_value)!= IS_STRING) { /* element value is not a string */ intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: locale array element is not a string", 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } cur_arr[cur_arr_len*2] = estrndup(Z_STRVAL_PP(ele_value), Z_STRLEN_PP(ele_value)); result = strToMatch(Z_STRVAL_PP(ele_value), cur_arr[cur_arr_len*2]); if(result == 0) { intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag", 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } cur_arr[cur_arr_len*2+1] = Z_STRVAL_PP(ele_value); cur_arr_len++ ; } /* end of for */ /* Canonicalize array elements */ if(canonicalize) { for(i=0; i<cur_arr_len; i++) { lang_tag = get_icu_value_internal(cur_arr[i*2], LOC_CANONICALIZE_TAG, &result, 0); if(result != 1 || lang_tag == NULL || !lang_tag[0]) { if(lang_tag) { efree(lang_tag); } intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag" , 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } cur_arr[i*2] = erealloc(cur_arr[i*2], strlen(lang_tag)+1); result = strToMatch(lang_tag, cur_arr[i*2]); efree(lang_tag); if(result == 0) { intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag" , 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } } } if(canonicalize) { /* Canonicalize the loc_range */ can_loc_range = get_icu_value_internal(loc_range, LOC_CANONICALIZE_TAG, &result , 0); if( result != 1 || can_loc_range == NULL || !can_loc_range[0]) { /* Error */ intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize loc_range" , 0 TSRMLS_CC ); if(can_loc_range) { efree(can_loc_range); } LOOKUP_CLEAN_RETURN(NULL); } else { loc_range = can_loc_range; } } cur_loc_range = ecalloc(1, strlen(loc_range)+1); /* convert to lower and replace hyphens */ result = strToMatch(loc_range, cur_loc_range); if(can_loc_range) { efree(can_loc_range); } if(result == 0) { intl_error_set(NULL, U_ILLEGAL_ARGUMENT_ERROR, "lookup_loc_range: unable to canonicalize lang_tag" , 0 TSRMLS_CC); LOOKUP_CLEAN_RETURN(NULL); } /* Lookup for the lang_tag match */ saved_pos = strlen(cur_loc_range); while(saved_pos > 0) { for(i=0; i< cur_arr_len; i++){ if(cur_arr[i*2] != NULL && strlen(cur_arr[i*2]) == saved_pos && strncmp(cur_loc_range, cur_arr[i*2], saved_pos) == 0) { /* Match found */ return_value = estrdup(canonicalize?cur_arr[i*2]:cur_arr[i*2+1]); efree(cur_loc_range); LOOKUP_CLEAN_RETURN(return_value); } } saved_pos = getStrrtokenPos(cur_loc_range, saved_pos); } /* Match not found */ efree(cur_loc_range); LOOKUP_CLEAN_RETURN(NULL); } /* }}} */ /* {{{ proto string Locale::lookup(array $langtag, string $locale[, bool $canonicalize[, string $default = null]]) * Searchs the items in $langtag for the best match to the language * range */ /* }}} */ /* {{{ proto string locale_lookup(array $langtag, string $locale[, bool $canonicalize[, string $default = null]]) * Searchs the items in $langtag for the best match to the language * range */ PHP_FUNCTION(locale_lookup) { char* fallback_loc = NULL; int fallback_loc_len = 0; const char* loc_range = NULL; int loc_range_len = 0; zval* arr = NULL; HashTable* hash_arr = NULL; zend_bool boolCanonical = 0; char* result =NULL; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "as|bs", &arr, &loc_range, &loc_range_len, &boolCanonical, &fallback_loc, &fallback_loc_len) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_lookup: unable to parse input params", 0 TSRMLS_CC ); RETURN_FALSE; } if(loc_range_len == 0) { loc_range = intl_locale_get_default(TSRMLS_C); } hash_arr = HASH_OF(arr); if( !hash_arr || zend_hash_num_elements( hash_arr ) == 0 ) { RETURN_EMPTY_STRING(); } result = lookup_loc_range(loc_range, hash_arr, boolCanonical TSRMLS_CC); if(result == NULL || result[0] == '\0') { if( fallback_loc ) { result = estrndup(fallback_loc, fallback_loc_len); } else { RETURN_EMPTY_STRING(); } } RETVAL_STRINGL(result, strlen(result), 0); } /* }}} */ /* {{{ proto string Locale::acceptFromHttp(string $http_accept) * Tries to find out best available locale based on HTTP �Accept-Language� header */ /* }}} */ /* {{{ proto string locale_accept_from_http(string $http_accept) * Tries to find out best available locale based on HTTP �Accept-Language� header */ PHP_FUNCTION(locale_accept_from_http) { UEnumeration *available; char *http_accept = NULL; int http_accept_len; UErrorCode status = 0; int len; char resultLocale[INTL_MAX_LOCALE_LEN+1]; UAcceptResult outResult; if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &http_accept, &http_accept_len) == FAILURE) { intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, "locale_accept_from_http: unable to parse input parameters", 0 TSRMLS_CC ); RETURN_FALSE; } available = ures_openAvailableLocales(NULL, &status); INTL_CHECK_STATUS(status, "locale_accept_from_http: failed to retrieve locale list"); len = uloc_acceptLanguageFromHTTP(resultLocale, INTL_MAX_LOCALE_LEN, &outResult, http_accept, available, &status); uenum_close(available); INTL_CHECK_STATUS(status, "locale_accept_from_http: failed to find acceptable locale"); if (len < 0 || outResult == ULOC_ACCEPT_FAILED) { RETURN_FALSE; } RETURN_STRINGL(resultLocale, len, 1); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 *can_loc_len */
static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; char* tag_value = NULL; char* empty_result = ""; int result = 0; char* msg = NULL; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name ,&loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Call ICU get */ tag_value = get_icu_value_internal( loc_name , tag_name , &result ,0); /* No value found */ if( result == -1 ) { if( tag_value){ efree( tag_value); } RETURN_STRING( empty_result , TRUE); } /* value found */ if( tag_value){ RETURN_STRING( tag_value , FALSE); } /* Error encountered while fetching the value */ if( result ==0) { spprintf(&msg , 0, "locale_get_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_NULL(); } }
static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS) { const char* loc_name = NULL; int loc_name_len = 0; char* tag_value = NULL; char* empty_result = ""; int result = 0; char* msg = NULL; UErrorCode status = U_ZERO_ERROR; intl_error_reset( NULL TSRMLS_CC ); if(zend_parse_parameters( ZEND_NUM_ARGS() TSRMLS_CC, "s", &loc_name ,&loc_name_len ) == FAILURE) { spprintf(&msg , 0, "locale_get_%s : unable to parse input params", tag_name ); intl_error_set( NULL, U_ILLEGAL_ARGUMENT_ERROR, msg , 1 TSRMLS_CC ); efree(msg); RETURN_FALSE; } if(loc_name_len == 0) { loc_name = intl_locale_get_default(TSRMLS_C); } /* Call ICU get */ tag_value = get_icu_value_internal( loc_name , tag_name , &result ,0); /* No value found */ if( result == -1 ) { if( tag_value){ efree( tag_value); } RETURN_STRING( empty_result , TRUE); } /* value found */ if( tag_value){ RETURN_STRING( tag_value , FALSE); } /* Error encountered while fetching the value */ if( result ==0) { spprintf(&msg , 0, "locale_get_%s : unable to get locale %s", tag_name , tag_name ); intl_error_set( NULL, status, msg , 1 TSRMLS_CC ); efree(msg); RETURN_NULL(); } }
{'added': [(68, '\t"cel-gaulish",\t\t"en-GB-oed",\t\t"i-ami",'), (69, '\t"i-bnn",\t\t"i-default",\t\t"i-enochian",'), (70, '\t"i-mingo",\t\t"i-pwn", \t\t"i-tao",'), (75, '\t"zh-min-nan", \t\t"zh-wuu", \t\t"zh-xiang",'), (81, '* This is in sync with the array LOC_GRANDFATHERED'), (87, '\t"nv", \t\t\t"nb",\t\t\t"nn",'), (125, '{'), (144, '* returns the position of next token for lookup'), (146, '* strtokr equivalent search for token in reverse direction'), (152, ''), (174, '* returns the position of a singleton if present'), (183, ''), (201, ''), (227, '\tint len=0;'), (243, '\tzend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME);'), (250, '* Gets the value from ICU'), (287, '\t\tsingletonPos = getSingletonPos( loc_name );'), (302, '\t\tmod_loc_name = estrdup(loc_name );'), (329, '\t\t\t\tbuflen++; /* add space for \\0 */'), (370, 'static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS)'), (426, '/* {{{ proto static string Locale::getScript($locale)'), (427, ' * gets the script for the $locale'), (429, '/* {{{ proto static string locale_get_script($locale)'), (430, ' * gets the script for the $locale'), (432, 'PHP_FUNCTION( locale_get_script )'), (438, '/* {{{ proto static string Locale::getRegion($locale)'), (439, ' * gets the region for the $locale'), (441, '/* {{{ proto static string locale_get_region($locale)'), (442, ' * gets the region for the $locale'), (444, 'PHP_FUNCTION( locale_get_region )'), (450, '/* {{{ proto static string Locale::getPrimaryLanguage($locale)'), (451, ' * gets the primary language for the $locale'), (453, '/* {{{ proto static string locale_get_primary_language($locale)'), (454, ' * gets the primary language for the $locale'), (456, 'PHP_FUNCTION(locale_get_primary_language )'), (464, ' * common code shared by display_xyz functions to get the value from ICU'), (466, 'static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS)'), (492, '\t\t&loc_name, &loc_name_len ,'), (529, ''), (608, 'PHP_FUNCTION(locale_get_display_name)'), (620, 'PHP_FUNCTION(locale_get_display_language)'), (632, 'PHP_FUNCTION(locale_get_display_script)'), (644, 'PHP_FUNCTION(locale_get_display_region)'), (658, 'PHP_FUNCTION(locale_get_display_variant)'), (671, ' */'), (683, '/*'), (684, '\tICU expects the buffer to be allocated before calling the function'), (685, '\tand so the buffer size has been explicitly specified'), (686, '\tICU uloc.h #define \tULOC_KEYWORD_AND_VALUES_CAPACITY 100'), (725, '\t\t\t}'), (744, ' /* {{{ proto static string Locale::canonicalize($locale)'), (745, ' * @return string the canonicalized locale'), (747, ' /* {{{ proto static string locale_canonicalize(Locale $loc, string $locale)'), (756, '/* {{{ append_key_value'), (759, '* returns 1 if successful , -1 if not found ,'), (771, '\t\tif(strcmp(key_name, LOC_LANG_TAG) != 0 &&'), (796, '/* {{{ append_multiple_key_values'), (799, "* used for 'variant','extlang','private'"), (800, '* returns 1 if successful , -1 if not found ,'), (854, '\t\tfor( i=0 ; i< max_value; i++ ){'), (855, '\t\t\tsnprintf( cur_key_name , 30, "%s%d", key_name , i);'), (877, '* returns 0 if locale_compose needs to be aborted'), (894, '/* {{{ proto static string Locale::composeLocale($array)'), (895, '* Creates a locale by combining the parts of locale-ID passed'), (897, '/* {{{ proto static string compose_locale($array)'), (898, '* Creates a locale by combining the parts of locale-ID passed'), (924, '\tresult = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG);'), (933, '\tresult = append_key_value(loc_name, hash_arr , LOC_LANG_TAG);'), (951, '\tresult = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG);'), (955, ''), (963, '\tresult = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC);'), (989, '\tint \tlen =0;'), (993, '\t\tmod_loc_name = loc_name ;'), (997, '\t\t\tif( singletonPos!=-1){'), (998, "\t\t\t\tif( (*(mod_loc_name+singletonPos)=='x') || (*(mod_loc_name+singletonPos)=='X') ){"), (1023, ''), (1048, '\tif( (strcmp(key_name , LOC_PRIVATE_TAG)==0) ||'), (1052, '\t\t\ttoken = php_strtok_r( key_value , DELIMITER ,&last_ptr);'), (1057, '\t\t\tsprintf( cur_key_name , "%s%d", key_name , cnt++);'), (1061, '\t\t\t\tsprintf( cur_key_name , "%s%d", key_name , cnt++);'), (1081, '\t\tefree(key_value);'), (1087, '/* {{{ proto static array Locale::parseLocale($locale)'), (1090, '/* {{{ proto static array parse_locale($locale)'), (1148, ''), (1166, '\tif( findOffset( LOC_GRANDFATHERED , loc_name ) >= 0 ){'), (1169, '\telse {'), (1174, '\t\t\ttoken = php_strtok_r( variant , DELIMITER , &saved_ptr);'), (1185, ''), (1224, "* Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm"), (1228, "* Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm"), (1247, '\tzend_bool \tboolCanonical \t= 0;'), (1251, ''), (1253, '\t\t&lang_tag, &lang_tag_len , &loc_range , &loc_range_len ,'), (1274, '\t\t\tintl_error_set( NULL, status,'), (1282, '\t\t\tintl_error_set( NULL, status,'), (1310, ''), (1314, '\t\t\tif( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){'), (1350, ''), (1366, ''), (1370, '\t\t\tif( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){'), (1397, '\tfor( i=0; i< arr_size; i++ ){'), (1407, '* returns the lookup result to lookup_loc_range_src_php'), (1431, ''), (1440, '\t\t}'), (1448, '\t\tcur_arr_len++ ;'), (1453, '\t\tfor(i=0; i<cur_arr_len; i++) {'), (1463, '\t\t\tresult = strToMatch(lang_tag, cur_arr[i*2]);'), (1486, '\t}'), (1490, '\tresult = strToMatch(loc_range, cur_loc_range);'), (1502, '\t\tfor(i=0; i< cur_arr_len; i++){'), (1503, '\t\t\tif(cur_arr[i*2] != NULL && strlen(cur_arr[i*2]) == saved_pos && strncmp(cur_loc_range, cur_arr[i*2], saved_pos) == 0) {'), (1519, '/* {{{ proto string Locale::lookup(array $langtag, string $locale[, bool $canonicalize[, string $default = null]])'), (1521, '* range'), (1526, '* range'), (1556, '\t}'), (1557, ''), (1594, ''), (1597, '\tlen = uloc_acceptLanguageFromHTTP(resultLocale, INTL_MAX_LOCALE_LEN,')], 'deleted': [(68, '\t"cel-gaulish",\t\t"en-GB-oed",\t\t"i-ami",'), (69, '\t"i-bnn",\t\t"i-default",\t\t"i-enochian",'), (70, '\t"i-mingo",\t\t"i-pwn", \t\t"i-tao",'), (75, '\t"zh-min-nan", \t\t"zh-wuu", \t\t"zh-xiang",'), (81, '* This is in sync with the array LOC_GRANDFATHERED'), (87, '\t"nv", \t\t\t"nb",\t\t\t"nn",'), (125, '{'), (144, '* returns the position of next token for lookup'), (146, '* strtokr equivalent search for token in reverse direction'), (152, ''), (174, '* returns the position of a singleton if present'), (183, ''), (201, ''), (227, '\tint len=0;'), (243, '\tzend_alter_ini_entry(LOCALE_INI_NAME, sizeof(LOCALE_INI_NAME), locale_name, len, PHP_INI_USER, PHP_INI_STAGE_RUNTIME);'), (250, '* Gets the value from ICU'), (287, '\t\tsingletonPos = getSingletonPos( loc_name );'), (302, '\t\tmod_loc_name = estrdup(loc_name );'), (369, 'static void get_icu_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS)'), (425, '/* {{{ proto static string Locale::getScript($locale)'), (426, ' * gets the script for the $locale'), (428, '/* {{{ proto static string locale_get_script($locale)'), (429, ' * gets the script for the $locale'), (431, 'PHP_FUNCTION( locale_get_script )'), (437, '/* {{{ proto static string Locale::getRegion($locale)'), (438, ' * gets the region for the $locale'), (440, '/* {{{ proto static string locale_get_region($locale)'), (441, ' * gets the region for the $locale'), (443, 'PHP_FUNCTION( locale_get_region )'), (449, '/* {{{ proto static string Locale::getPrimaryLanguage($locale)'), (450, ' * gets the primary language for the $locale'), (452, '/* {{{ proto static string locale_get_primary_language($locale)'), (453, ' * gets the primary language for the $locale'), (455, 'PHP_FUNCTION(locale_get_primary_language )'), (463, ' * common code shared by display_xyz functions to get the value from ICU'), (465, 'static void get_icu_disp_value_src_php( char* tag_name, INTERNAL_FUNCTION_PARAMETERS)'), (491, '\t\t&loc_name, &loc_name_len ,'), (528, ''), (607, 'PHP_FUNCTION(locale_get_display_name)'), (619, 'PHP_FUNCTION(locale_get_display_language)'), (631, 'PHP_FUNCTION(locale_get_display_script)'), (643, 'PHP_FUNCTION(locale_get_display_region)'), (657, 'PHP_FUNCTION(locale_get_display_variant)'), (670, ' */'), (682, '/*'), (683, '\tICU expects the buffer to be allocated before calling the function'), (684, '\tand so the buffer size has been explicitly specified'), (685, '\tICU uloc.h #define \tULOC_KEYWORD_AND_VALUES_CAPACITY 100'), (724, '\t\t\t}'), (743, ' /* {{{ proto static string Locale::canonicalize($locale)'), (744, ' * @return string the canonicalized locale'), (746, ' /* {{{ proto static string locale_canonicalize(Locale $loc, string $locale)'), (755, '/* {{{ append_key_value'), (758, '* returns 1 if successful , -1 if not found ,'), (770, '\t\tif(strcmp(key_name, LOC_LANG_TAG) != 0 &&'), (795, '/* {{{ append_multiple_key_values'), (798, "* used for 'variant','extlang','private'"), (799, '* returns 1 if successful , -1 if not found ,'), (853, '\t\tfor( i=0 ; i< max_value; i++ ){'), (854, '\t\t\tsnprintf( cur_key_name , 30, "%s%d", key_name , i);'), (876, '* returns 0 if locale_compose needs to be aborted'), (893, '/* {{{ proto static string Locale::composeLocale($array)'), (894, '* Creates a locale by combining the parts of locale-ID passed'), (896, '/* {{{ proto static string compose_locale($array)'), (897, '* Creates a locale by combining the parts of locale-ID passed'), (923, '\tresult = append_key_value(loc_name, hash_arr, LOC_GRANDFATHERED_LANG_TAG);'), (932, '\tresult = append_key_value(loc_name, hash_arr , LOC_LANG_TAG);'), (950, '\tresult = append_key_value(loc_name, hash_arr , LOC_SCRIPT_TAG);'), (954, ''), (962, '\tresult = append_multiple_key_values( loc_name, hash_arr , LOC_VARIANT_TAG TSRMLS_CC);'), (988, '\tint \tlen =0;'), (992, '\t\tmod_loc_name = loc_name ;'), (996, '\t\t\tif( singletonPos!=-1){'), (997, "\t\t\t\tif( (*(mod_loc_name+singletonPos)=='x') || (*(mod_loc_name+singletonPos)=='X') ){"), (1022, ''), (1047, '\tif( (strcmp(key_name , LOC_PRIVATE_TAG)==0) ||'), (1051, '\t\t\ttoken = php_strtok_r( key_value , DELIMITER ,&last_ptr);'), (1056, '\t\t\tsprintf( cur_key_name , "%s%d", key_name , cnt++);'), (1060, '\t\t\t\tsprintf( cur_key_name , "%s%d", key_name , cnt++);'), (1080, '\t\tefree(key_value);'), (1086, '/* {{{ proto static array Locale::parseLocale($locale)'), (1089, '/* {{{ proto static array parse_locale($locale)'), (1147, ''), (1165, '\tif( findOffset( LOC_GRANDFATHERED , loc_name ) >= 0 ){'), (1168, '\telse {'), (1173, '\t\t\ttoken = php_strtok_r( variant , DELIMITER , &saved_ptr);'), (1184, ''), (1223, "* Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm"), (1227, "* Checks if a $langtag filter matches with $locale according to RFC 4647's basic filtering algorithm"), (1246, '\tzend_bool \tboolCanonical \t= 0;'), (1250, ''), (1252, '\t\t&lang_tag, &lang_tag_len , &loc_range , &loc_range_len ,'), (1273, '\t\t\tintl_error_set( NULL, status,'), (1281, '\t\t\tintl_error_set( NULL, status,'), (1309, ''), (1313, '\t\t\tif( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){'), (1349, ''), (1365, ''), (1369, '\t\t\tif( isIDSeparator(*chrcheck) || isEndOfTag(*chrcheck) ){'), (1396, '\tfor( i=0; i< arr_size; i++ ){'), (1406, '* returns the lookup result to lookup_loc_range_src_php'), (1430, ''), (1439, '\t\t}'), (1447, '\t\tcur_arr_len++ ;'), (1452, '\t\tfor(i=0; i<cur_arr_len; i++) {'), (1462, '\t\t\tresult = strToMatch(lang_tag, cur_arr[i*2]);'), (1485, '\t}'), (1489, '\tresult = strToMatch(loc_range, cur_loc_range);'), (1501, '\t\tfor(i=0; i< cur_arr_len; i++){'), (1502, '\t\t\tif(cur_arr[i*2] != NULL && strlen(cur_arr[i*2]) == saved_pos && strncmp(cur_loc_range, cur_arr[i*2], saved_pos) == 0) {'), (1518, '/* {{{ proto string Locale::lookup(array $langtag, string $locale[, bool $canonicalize[, string $default = null]])'), (1520, '* range'), (1525, '* range'), (1555, '\t}'), (1556, ''), (1593, ''), (1596, '\tlen = uloc_acceptLanguageFromHTTP(resultLocale, INTL_MAX_LOCALE_LEN,')]}
118
117
1,039
6,142
37
221
7
https://github.com/php/php-src
CVE-2016-5093
CWE-125
1,272
uas-detect.h
C
uas_find_uas_alt_setting
#include <linux/usb.h> #include <linux/usb/hcd.h> #include "usb.h" static int uas_is_interface(struct usb_host_interface *intf) { return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE && intf->desc.bInterfaceSubClass == USB_SC_SCSI && intf->desc.bInterfaceProtocol == USB_PR_UAS); } static int uas_find_uas_alt_setting(struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt->desc.bAlternateSetting; } return -ENODEV; } static int uas_find_endpoints(struct usb_host_interface *alt, struct usb_host_endpoint *eps[]) { struct usb_host_endpoint *endpoint = alt->endpoint; unsigned i, n_endpoints = alt->desc.bNumEndpoints; for (i = 0; i < n_endpoints; i++) { unsigned char *extra = endpoint[i].extra; int len = endpoint[i].extralen; while (len >= 3) { if (extra[1] == USB_DT_PIPE_USAGE) { unsigned pipe_id = extra[2]; if (pipe_id > 0 && pipe_id < 5) eps[pipe_id - 1] = &endpoint[i]; break; } len -= extra[0]; extra += extra[0]; } } if (!eps[0] || !eps[1] || !eps[2] || !eps[3]) return -ENODEV; return 0; } static int uas_use_uas_driver(struct usb_interface *intf, const struct usb_device_id *id, unsigned long *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); unsigned long flags = id->driver_info; int r, alt; alt = uas_find_uas_alt_setting(intf); if (alt < 0) return 0; r = uas_find_endpoints(&intf->altsetting[alt], eps); if (r < 0) return 0; /* * ASMedia has a number of usb3 to sata bridge chips, at the time of * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the * entire line, so the device-id is useless to determine if we're * dealing with an ASM1051 (which we want to avoid). * * The ASM1153 can be identified by config.MaxPower == 0, * where as the ASM105x models have config.MaxPower == 36. * * Differentiating between the ASM1053 and ASM1051 is trickier, when * connected over USB-3 we can look at the number of streams supported, * ASM1051 supports 32 streams, where as early ASM1053 versions support * 16 streams, newer ASM1053-s also support 32 streams, but have a * different prod-id. * * (*) ASM1051 chips do work with UAS with some disks (with the * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { if (udev->actconfig->desc.bMaxPower == 0) { /* ASM1153, do nothing */ } else if (udev->speed < USB_SPEED_SUPER) { /* No streams info, assume ASM1051 */ flags |= US_FL_IGNORE_UAS; } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; } else { /* ASM1053, these have issues with large transfers */ flags |= US_FL_MAX_SECTORS_240; } } usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { dev_warn(&udev->dev, "UAS is blacklisted for this device, using usb-storage instead\n"); return 0; } if (udev->bus->sg_tablesize == 0) { dev_warn(&udev->dev, "The driver for the USB controller %s does not support scatter-gather which is\n", hcd->driver->description); dev_warn(&udev->dev, "required by the UAS driver. Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams) { dev_warn(&udev->dev, "USB controller %s does not support streams, which are required by the UAS driver.\n", hcd_to_bus(hcd)->bus_name); dev_warn(&udev->dev, "Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (flags_ret) *flags_ret = flags; return 1; }
#include <linux/usb.h> #include <linux/usb/hcd.h> #include "usb.h" static int uas_is_interface(struct usb_host_interface *intf) { return (intf->desc.bInterfaceClass == USB_CLASS_MASS_STORAGE && intf->desc.bInterfaceSubClass == USB_SC_SCSI && intf->desc.bInterfaceProtocol == USB_PR_UAS); } static struct usb_host_interface *uas_find_uas_alt_setting( struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt; } return NULL; } static int uas_find_endpoints(struct usb_host_interface *alt, struct usb_host_endpoint *eps[]) { struct usb_host_endpoint *endpoint = alt->endpoint; unsigned i, n_endpoints = alt->desc.bNumEndpoints; for (i = 0; i < n_endpoints; i++) { unsigned char *extra = endpoint[i].extra; int len = endpoint[i].extralen; while (len >= 3) { if (extra[1] == USB_DT_PIPE_USAGE) { unsigned pipe_id = extra[2]; if (pipe_id > 0 && pipe_id < 5) eps[pipe_id - 1] = &endpoint[i]; break; } len -= extra[0]; extra += extra[0]; } } if (!eps[0] || !eps[1] || !eps[2] || !eps[3]) return -ENODEV; return 0; } static int uas_use_uas_driver(struct usb_interface *intf, const struct usb_device_id *id, unsigned long *flags_ret) { struct usb_host_endpoint *eps[4] = { }; struct usb_device *udev = interface_to_usbdev(intf); struct usb_hcd *hcd = bus_to_hcd(udev->bus); unsigned long flags = id->driver_info; struct usb_host_interface *alt; int r; alt = uas_find_uas_alt_setting(intf); if (!alt) return 0; r = uas_find_endpoints(alt, eps); if (r < 0) return 0; /* * ASMedia has a number of usb3 to sata bridge chips, at the time of * this writing the following versions exist: * ASM1051 - no uas support version * ASM1051 - with broken (*) uas support * ASM1053 - with working uas support, but problems with large xfers * ASM1153 - with working uas support * * Devices with these chips re-use a number of device-ids over the * entire line, so the device-id is useless to determine if we're * dealing with an ASM1051 (which we want to avoid). * * The ASM1153 can be identified by config.MaxPower == 0, * where as the ASM105x models have config.MaxPower == 36. * * Differentiating between the ASM1053 and ASM1051 is trickier, when * connected over USB-3 we can look at the number of streams supported, * ASM1051 supports 32 streams, where as early ASM1053 versions support * 16 streams, newer ASM1053-s also support 32 streams, but have a * different prod-id. * * (*) ASM1051 chips do work with UAS with some disks (with the * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { if (udev->actconfig->desc.bMaxPower == 0) { /* ASM1153, do nothing */ } else if (udev->speed < USB_SPEED_SUPER) { /* No streams info, assume ASM1051 */ flags |= US_FL_IGNORE_UAS; } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; } else { /* ASM1053, these have issues with large transfers */ flags |= US_FL_MAX_SECTORS_240; } } usb_stor_adjust_quirks(udev, &flags); if (flags & US_FL_IGNORE_UAS) { dev_warn(&udev->dev, "UAS is blacklisted for this device, using usb-storage instead\n"); return 0; } if (udev->bus->sg_tablesize == 0) { dev_warn(&udev->dev, "The driver for the USB controller %s does not support scatter-gather which is\n", hcd->driver->description); dev_warn(&udev->dev, "required by the UAS driver. Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (udev->speed >= USB_SPEED_SUPER && !hcd->can_do_streams) { dev_warn(&udev->dev, "USB controller %s does not support streams, which are required by the UAS driver.\n", hcd_to_bus(hcd)->bus_name); dev_warn(&udev->dev, "Please try an other USB controller if you wish to use UAS.\n"); return 0; } if (flags_ret) *flags_ret = flags; return 1; }
static int uas_find_uas_alt_setting(struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt->desc.bAlternateSetting; } return -ENODEV; }
static struct usb_host_interface *uas_find_uas_alt_setting( struct usb_interface *intf) { int i; for (i = 0; i < intf->num_altsetting; i++) { struct usb_host_interface *alt = &intf->altsetting[i]; if (uas_is_interface(alt)) return alt; } return NULL; }
{'added': [(12, 'static struct usb_host_interface *uas_find_uas_alt_setting('), (13, '\t\tstruct usb_interface *intf)'), (21, '\t\t\treturn alt;'), (24, '\treturn NULL;'), (62, '\tstruct usb_host_interface *alt;'), (63, '\tint r;'), (66, '\tif (!alt)'), (69, '\tr = uas_find_endpoints(alt, eps);')], 'deleted': [(12, 'static int uas_find_uas_alt_setting(struct usb_interface *intf)'), (20, '\t\t\treturn alt->desc.bAlternateSetting;'), (23, '\treturn -ENODEV;'), (61, '\tint r, alt;'), (62, ''), (65, '\tif (alt < 0)'), (68, '\tr = uas_find_endpoints(&intf->altsetting[alt], eps);')]}
8
7
97
605
10
60
3
https://github.com/torvalds/linux
CVE-2017-16530
CWE-125
1,913
libmspack.c
C
mspack_fmap_free
/* * Glue code for libmspack handling. * Author: 웃 Sebastian Andrzej Siewior * ✉ sebastian @ breakpoint ̣cc */ #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <fcntl.h> #include <mspack.h> #include "clamav.h" #include "fmap.h" #include "scanners.h" #include "others.h" enum mspack_type { FILETYPE_DUNNO, FILETYPE_FMAP, FILETYPE_FILENAME, }; struct mspack_name { fmap_t *fmap; off_t org; }; struct mspack_system_ex { struct mspack_system ops; off_t max_size; }; struct mspack_handle { enum mspack_type type; fmap_t *fmap; off_t org; off_t offset; FILE *f; off_t max_size; }; static struct mspack_file *mspack_fmap_open(struct mspack_system *self, const char *filename, int mode) { struct mspack_name *mspack_name; struct mspack_handle *mspack_handle; struct mspack_system_ex *self_ex; const char *fmode; const struct mspack_system *mptr = self; if (!filename) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return NULL; } mspack_handle = malloc(sizeof(*mspack_handle)); if (!mspack_handle) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return NULL; } switch (mode) { case MSPACK_SYS_OPEN_READ: mspack_handle->type = FILETYPE_FMAP; mspack_name = (struct mspack_name *)filename; mspack_handle->fmap = mspack_name->fmap; mspack_handle->org = mspack_name->org; mspack_handle->offset = 0; return (struct mspack_file *)mspack_handle; case MSPACK_SYS_OPEN_WRITE: fmode = "wb"; break; case MSPACK_SYS_OPEN_UPDATE: fmode = "r+b"; break; case MSPACK_SYS_OPEN_APPEND: fmode = "ab"; break; default: cli_dbgmsg("%s() wrong mode\n", __func__); goto out_err; } mspack_handle->type = FILETYPE_FILENAME; mspack_handle->f = fopen(filename, fmode); if (!mspack_handle->f) { cli_dbgmsg("%s() failed %d\n", __func__, __LINE__); goto out_err; } self_ex = (struct mspack_system_ex *)((char *)mptr - offsetof(struct mspack_system_ex,ops)); mspack_handle->max_size = self_ex->max_size; return (struct mspack_file *)mspack_handle; out_err: free(mspack_handle); return NULL; } static void mspack_fmap_close(struct mspack_file *file) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; if (!mspack_handle) return; if (mspack_handle->type == FILETYPE_FILENAME) fclose(mspack_handle->f); free(mspack_handle); } static int mspack_fmap_read(struct mspack_file *file, void *buffer, int bytes) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; off_t offset; size_t count; int ret; if (bytes < 0) { cli_dbgmsg("%s() %d\n", __func__, __LINE__); return -1; } if (!mspack_handle) { cli_dbgmsg("%s() %d\n", __func__, __LINE__); return -1; } if (mspack_handle->type == FILETYPE_FMAP) { offset = mspack_handle->offset + mspack_handle->org; ret = fmap_readn(mspack_handle->fmap, buffer, offset, bytes); if (ret != bytes) { cli_dbgmsg("%s() %d %d, %d\n", __func__, __LINE__, bytes, ret); return ret; } mspack_handle->offset += bytes; return bytes; } count = fread(buffer, bytes, 1, mspack_handle->f); if (count < 1) { cli_dbgmsg("%s() %d %d, %zd\n", __func__, __LINE__, bytes, count); return -1; } return bytes; } static int mspack_fmap_write(struct mspack_file *file, void *buffer, int bytes) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; size_t count; off_t max_size; if (bytes < 0 || !mspack_handle) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (mspack_handle->type == FILETYPE_FMAP) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (!bytes) return 0; max_size = mspack_handle->max_size; if (!max_size) return bytes; max_size = max_size < (off_t) bytes ? max_size : (off_t) bytes; mspack_handle->max_size -= max_size; count = fwrite(buffer, max_size, 1, mspack_handle->f); if (count < 1) { cli_dbgmsg("%s() err %m <%zd %d>\n", __func__, count, bytes); return -1; } return bytes; } static int mspack_fmap_seek(struct mspack_file *file, off_t offset, int mode) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; if (!mspack_handle) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (mspack_handle->type == FILETYPE_FMAP) { off_t new_pos; switch (mode) { case MSPACK_SYS_SEEK_START: new_pos = offset; break; case MSPACK_SYS_SEEK_CUR: new_pos = mspack_handle->offset + offset; break; case MSPACK_SYS_SEEK_END: new_pos = mspack_handle->fmap->len + offset; break; default: cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (new_pos < 0 || new_pos > mspack_handle->fmap->len) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } mspack_handle->offset = new_pos; return 0; } switch (mode) { case MSPACK_SYS_SEEK_START: mode = SEEK_SET; break; case MSPACK_SYS_SEEK_CUR: mode = SEEK_CUR; break; case MSPACK_SYS_SEEK_END: mode = SEEK_END; break; default: cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } return fseek(mspack_handle->f, offset, mode); } static off_t mspack_fmap_tell(struct mspack_file *file) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; if (!mspack_handle) return -1; if (mspack_handle->type == FILETYPE_FMAP) return mspack_handle->offset; return (off_t) ftell(mspack_handle->f); } static void mspack_fmap_message(struct mspack_file *file, const char *fmt, ...) { cli_dbgmsg("%s() %s\n", __func__, fmt); } static void *mspack_fmap_alloc(struct mspack_system *self, size_t num) { return malloc(num); } static void mspack_fmap_free(void *mem) { free(mem); } static void mspack_fmap_copy(void *src, void *dst, size_t num) { memcpy(dst, src, num); } static struct mspack_system mspack_sys_fmap_ops = { .open = mspack_fmap_open, .close = mspack_fmap_close, .read = mspack_fmap_read, .write = mspack_fmap_write, .seek = mspack_fmap_seek, .tell = mspack_fmap_tell, .message = mspack_fmap_message, .alloc = mspack_fmap_alloc, .free = mspack_fmap_free, .copy = mspack_fmap_copy, }; static int cli_scanfile(const char *filename, cli_ctx *ctx) { int fd, ret; /* internal version of cl_scanfile with arec/mrec preserved */ fd = safe_open(filename, O_RDONLY|O_BINARY); if (fd < 0) return CL_EOPEN; ret = cli_magic_scandesc(fd, ctx); close(fd); return ret; } int cli_scanmscab(cli_ctx *ctx, off_t sfx_offset) { struct mscab_decompressor *cab_d; struct mscabd_cabinet *cab_h; struct mscabd_file *cab_f; int ret; int files; int virus_num = 0; struct mspack_name mspack_fmap = { .fmap = *ctx->fmap, .org = sfx_offset, }; struct mspack_system_ex ops_ex; memset(&ops_ex, 0, sizeof(struct mspack_system_ex)); ops_ex.ops = mspack_sys_fmap_ops; MSPACK_SYS_SELFTEST(ret); if (ret) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } cab_d = mspack_create_cab_decompressor(&ops_ex.ops); if (!cab_d) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } cab_h = cab_d->open(cab_d, (char *)&mspack_fmap); if (!cab_h) { ret = CL_EFORMAT; cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); goto out_dest; } files = 0; for (cab_f = cab_h->files; cab_f; cab_f = cab_f->next) { off_t max_size; char *tmp_fname; ret = cli_matchmeta(ctx, cab_f->filename, 0, cab_f->length, 0, files, 0, NULL); if (ret) { if (ret == CL_VIRUS) { virus_num++; if (!SCAN_ALL) break; } goto out_close; } if (ctx->engine->maxscansize) { if (ctx->scansize >= ctx->engine->maxscansize) { ret = CL_CLEAN; break; } } if (ctx->engine->maxscansize && ctx->scansize + ctx->engine->maxfilesize >= ctx->engine->maxscansize) max_size = ctx->engine->maxscansize - ctx->scansize; else max_size = ctx->engine->maxfilesize ? ctx->engine->maxfilesize : 0xffffffff; tmp_fname = cli_gentemp(ctx->engine->tmpdir); if (!tmp_fname) { ret = CL_EMEM; break; } ops_ex.max_size = max_size; /* scan */ ret = cab_d->extract(cab_d, cab_f, tmp_fname); if (ret) /* Failed to extract. Try to scan what is there */ cli_dbgmsg("%s() failed to extract %d\n", __func__, ret); ret = cli_scanfile(tmp_fname, ctx); if (ret == CL_VIRUS) virus_num++; if (!ctx->engine->keeptmp) { if (!access(tmp_fname, R_OK) && cli_unlink(tmp_fname)) { free(tmp_fname); ret = CL_EUNLINK; break; } } free(tmp_fname); files++; if (ret == CL_VIRUS && SCAN_ALL) continue; if (ret) break; } out_close: cab_d->close(cab_d, cab_h); out_dest: mspack_destroy_cab_decompressor(cab_d); if (virus_num) return CL_VIRUS; return ret; } int cli_scanmschm(cli_ctx *ctx) { struct mschm_decompressor *mschm_d; struct mschmd_header *mschm_h; struct mschmd_file *mschm_f; int ret; int files; int virus_num = 0; struct mspack_name mspack_fmap = { .fmap = *ctx->fmap, }; struct mspack_system_ex ops_ex; memset(&ops_ex, 0, sizeof(struct mspack_system_ex)); ops_ex.ops = mspack_sys_fmap_ops; MSPACK_SYS_SELFTEST(ret); if (ret) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } mschm_d = mspack_create_chm_decompressor(&ops_ex.ops); if (!mschm_d) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } mschm_h = mschm_d->open(mschm_d, (char *)&mspack_fmap); if (!mschm_h) { ret = CL_EFORMAT; cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); goto out_dest; } files = 0; for (mschm_f = mschm_h->files; mschm_f; mschm_f = mschm_f->next) { off_t max_size; char *tmp_fname; ret = cli_matchmeta(ctx, mschm_f->filename, 0, mschm_f->length, 0, files, 0, NULL); if (ret) { if (ret == CL_VIRUS) { virus_num++; if (!SCAN_ALL) break; } goto out_close; } if (ctx->engine->maxscansize) { if (ctx->scansize >= ctx->engine->maxscansize) { ret = CL_CLEAN; break; } } if (ctx->engine->maxscansize && ctx->scansize + ctx->engine->maxfilesize >= ctx->engine->maxscansize) max_size = ctx->engine->maxscansize - ctx->scansize; else max_size = ctx->engine->maxfilesize ? ctx->engine->maxfilesize : 0xffffffff; ops_ex.max_size = max_size; tmp_fname = cli_gentemp(ctx->engine->tmpdir); if (!tmp_fname) { ret = CL_EMEM; break; } /* scan */ ret = mschm_d->extract(mschm_d, mschm_f, tmp_fname); if (ret) /* Failed to extract. Try to scan what is there */ cli_dbgmsg("%s() failed to extract %d\n", __func__, ret); ret = cli_scanfile(tmp_fname, ctx); if (ret == CL_VIRUS) virus_num++; if (!ctx->engine->keeptmp) { if (!access(tmp_fname, R_OK) && cli_unlink(tmp_fname)) { free(tmp_fname); ret = CL_EUNLINK; break; } } free(tmp_fname); files++; if (ret == CL_VIRUS && SCAN_ALL) continue; if (ret) break; } out_close: mschm_d->close(mschm_d, mschm_h); out_dest: mspack_destroy_chm_decompressor(mschm_d); if (virus_num) return CL_VIRUS; return ret; return 0; }
/* * Glue code for libmspack handling. * Author: 웃 Sebastian Andrzej Siewior * ✉ sebastian @ breakpoint ̣cc */ #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <fcntl.h> #include <mspack.h> #include "clamav.h" #include "fmap.h" #include "scanners.h" #include "others.h" enum mspack_type { FILETYPE_DUNNO, FILETYPE_FMAP, FILETYPE_FILENAME, }; struct mspack_name { fmap_t *fmap; off_t org; }; struct mspack_system_ex { struct mspack_system ops; off_t max_size; }; struct mspack_handle { enum mspack_type type; fmap_t *fmap; off_t org; off_t offset; FILE *f; off_t max_size; }; static struct mspack_file *mspack_fmap_open(struct mspack_system *self, const char *filename, int mode) { struct mspack_name *mspack_name; struct mspack_handle *mspack_handle; struct mspack_system_ex *self_ex; const char *fmode; const struct mspack_system *mptr = self; if (!filename) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return NULL; } mspack_handle = malloc(sizeof(*mspack_handle)); if (!mspack_handle) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return NULL; } switch (mode) { case MSPACK_SYS_OPEN_READ: mspack_handle->type = FILETYPE_FMAP; mspack_name = (struct mspack_name *)filename; mspack_handle->fmap = mspack_name->fmap; mspack_handle->org = mspack_name->org; mspack_handle->offset = 0; return (struct mspack_file *)mspack_handle; case MSPACK_SYS_OPEN_WRITE: fmode = "wb"; break; case MSPACK_SYS_OPEN_UPDATE: fmode = "r+b"; break; case MSPACK_SYS_OPEN_APPEND: fmode = "ab"; break; default: cli_dbgmsg("%s() wrong mode\n", __func__); goto out_err; } mspack_handle->type = FILETYPE_FILENAME; mspack_handle->f = fopen(filename, fmode); if (!mspack_handle->f) { cli_dbgmsg("%s() failed %d\n", __func__, __LINE__); goto out_err; } self_ex = (struct mspack_system_ex *)((char *)mptr - offsetof(struct mspack_system_ex,ops)); mspack_handle->max_size = self_ex->max_size; return (struct mspack_file *)mspack_handle; out_err: free(mspack_handle); return NULL; } static void mspack_fmap_close(struct mspack_file *file) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; if (!mspack_handle) return; if (mspack_handle->type == FILETYPE_FILENAME) fclose(mspack_handle->f); free(mspack_handle); } static int mspack_fmap_read(struct mspack_file *file, void *buffer, int bytes) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; off_t offset; size_t count; int ret; if (bytes < 0) { cli_dbgmsg("%s() %d\n", __func__, __LINE__); return -1; } if (!mspack_handle) { cli_dbgmsg("%s() %d\n", __func__, __LINE__); return -1; } if (mspack_handle->type == FILETYPE_FMAP) { offset = mspack_handle->offset + mspack_handle->org; ret = fmap_readn(mspack_handle->fmap, buffer, offset, bytes); if (ret != bytes) { cli_dbgmsg("%s() %d %d, %d\n", __func__, __LINE__, bytes, ret); return ret; } mspack_handle->offset += bytes; return bytes; } count = fread(buffer, bytes, 1, mspack_handle->f); if (count < 1) { cli_dbgmsg("%s() %d %d, %zd\n", __func__, __LINE__, bytes, count); return -1; } return bytes; } static int mspack_fmap_write(struct mspack_file *file, void *buffer, int bytes) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; size_t count; off_t max_size; if (bytes < 0 || !mspack_handle) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (mspack_handle->type == FILETYPE_FMAP) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (!bytes) return 0; max_size = mspack_handle->max_size; if (!max_size) return bytes; max_size = max_size < (off_t) bytes ? max_size : (off_t) bytes; mspack_handle->max_size -= max_size; count = fwrite(buffer, max_size, 1, mspack_handle->f); if (count < 1) { cli_dbgmsg("%s() err %m <%zd %d>\n", __func__, count, bytes); return -1; } return bytes; } static int mspack_fmap_seek(struct mspack_file *file, off_t offset, int mode) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; if (!mspack_handle) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (mspack_handle->type == FILETYPE_FMAP) { off_t new_pos; switch (mode) { case MSPACK_SYS_SEEK_START: new_pos = offset; break; case MSPACK_SYS_SEEK_CUR: new_pos = mspack_handle->offset + offset; break; case MSPACK_SYS_SEEK_END: new_pos = mspack_handle->fmap->len + offset; break; default: cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } if (new_pos < 0 || new_pos > mspack_handle->fmap->len) { cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } mspack_handle->offset = new_pos; return 0; } switch (mode) { case MSPACK_SYS_SEEK_START: mode = SEEK_SET; break; case MSPACK_SYS_SEEK_CUR: mode = SEEK_CUR; break; case MSPACK_SYS_SEEK_END: mode = SEEK_END; break; default: cli_dbgmsg("%s() err %d\n", __func__, __LINE__); return -1; } return fseek(mspack_handle->f, offset, mode); } static off_t mspack_fmap_tell(struct mspack_file *file) { struct mspack_handle *mspack_handle = (struct mspack_handle *)file; if (!mspack_handle) return -1; if (mspack_handle->type == FILETYPE_FMAP) return mspack_handle->offset; return (off_t) ftell(mspack_handle->f); } static void mspack_fmap_message(struct mspack_file *file, const char *fmt, ...) { cli_dbgmsg("%s() %s\n", __func__, fmt); } static void *mspack_fmap_alloc(struct mspack_system *self, size_t num) { return malloc(num); } static void mspack_fmap_free(void *mem) { if(mem) { free(mem); mem = NULL; } return; } static void mspack_fmap_copy(void *src, void *dst, size_t num) { memcpy(dst, src, num); } static struct mspack_system mspack_sys_fmap_ops = { .open = mspack_fmap_open, .close = mspack_fmap_close, .read = mspack_fmap_read, .write = mspack_fmap_write, .seek = mspack_fmap_seek, .tell = mspack_fmap_tell, .message = mspack_fmap_message, .alloc = mspack_fmap_alloc, .free = mspack_fmap_free, .copy = mspack_fmap_copy, }; static int cli_scanfile(const char *filename, cli_ctx *ctx) { int fd, ret; /* internal version of cl_scanfile with arec/mrec preserved */ fd = safe_open(filename, O_RDONLY|O_BINARY); if (fd < 0) return CL_EOPEN; ret = cli_magic_scandesc(fd, ctx); close(fd); return ret; } int cli_scanmscab(cli_ctx *ctx, off_t sfx_offset) { struct mscab_decompressor *cab_d; struct mscabd_cabinet *cab_h; struct mscabd_file *cab_f; int ret; int files; int virus_num = 0; struct mspack_name mspack_fmap = { .fmap = *ctx->fmap, .org = sfx_offset, }; struct mspack_system_ex ops_ex; memset(&ops_ex, 0, sizeof(struct mspack_system_ex)); ops_ex.ops = mspack_sys_fmap_ops; MSPACK_SYS_SELFTEST(ret); if (ret) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } cab_d = mspack_create_cab_decompressor(&ops_ex.ops); if (!cab_d) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } cab_h = cab_d->open(cab_d, (char *)&mspack_fmap); if (!cab_h) { ret = CL_EFORMAT; cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); goto out_dest; } files = 0; for (cab_f = cab_h->files; cab_f; cab_f = cab_f->next) { off_t max_size; char *tmp_fname; ret = cli_matchmeta(ctx, cab_f->filename, 0, cab_f->length, 0, files, 0, NULL); if (ret) { if (ret == CL_VIRUS) { virus_num++; if (!SCAN_ALL) break; } goto out_close; } if (ctx->engine->maxscansize) { if (ctx->scansize >= ctx->engine->maxscansize) { ret = CL_CLEAN; break; } } if (ctx->engine->maxscansize && ctx->scansize + ctx->engine->maxfilesize >= ctx->engine->maxscansize) max_size = ctx->engine->maxscansize - ctx->scansize; else max_size = ctx->engine->maxfilesize ? ctx->engine->maxfilesize : 0xffffffff; tmp_fname = cli_gentemp(ctx->engine->tmpdir); if (!tmp_fname) { ret = CL_EMEM; break; } ops_ex.max_size = max_size; /* scan */ ret = cab_d->extract(cab_d, cab_f, tmp_fname); if (ret) /* Failed to extract. Try to scan what is there */ cli_dbgmsg("%s() failed to extract %d\n", __func__, ret); ret = cli_scanfile(tmp_fname, ctx); if (ret == CL_VIRUS) virus_num++; if (!ctx->engine->keeptmp) { if (!access(tmp_fname, R_OK) && cli_unlink(tmp_fname)) { free(tmp_fname); ret = CL_EUNLINK; break; } } free(tmp_fname); files++; if (ret == CL_VIRUS && SCAN_ALL) continue; if (ret) break; } out_close: cab_d->close(cab_d, cab_h); out_dest: mspack_destroy_cab_decompressor(cab_d); if (virus_num) return CL_VIRUS; return ret; } int cli_scanmschm(cli_ctx *ctx) { struct mschm_decompressor *mschm_d; struct mschmd_header *mschm_h; struct mschmd_file *mschm_f; int ret; int files; int virus_num = 0; struct mspack_name mspack_fmap = { .fmap = *ctx->fmap, }; struct mspack_system_ex ops_ex; memset(&ops_ex, 0, sizeof(struct mspack_system_ex)); ops_ex.ops = mspack_sys_fmap_ops; MSPACK_SYS_SELFTEST(ret); if (ret) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } mschm_d = mspack_create_chm_decompressor(&ops_ex.ops); if (!mschm_d) { cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); return CL_EUNPACK; } mschm_h = mschm_d->open(mschm_d, (char *)&mspack_fmap); if (!mschm_h) { ret = CL_EFORMAT; cli_dbgmsg("%s() failed at %d\n", __func__, __LINE__); goto out_dest; } files = 0; for (mschm_f = mschm_h->files; mschm_f; mschm_f = mschm_f->next) { off_t max_size; char *tmp_fname; ret = cli_matchmeta(ctx, mschm_f->filename, 0, mschm_f->length, 0, files, 0, NULL); if (ret) { if (ret == CL_VIRUS) { virus_num++; if (!SCAN_ALL) break; } goto out_close; } if (ctx->engine->maxscansize) { if (ctx->scansize >= ctx->engine->maxscansize) { ret = CL_CLEAN; break; } } if (ctx->engine->maxscansize && ctx->scansize + ctx->engine->maxfilesize >= ctx->engine->maxscansize) max_size = ctx->engine->maxscansize - ctx->scansize; else max_size = ctx->engine->maxfilesize ? ctx->engine->maxfilesize : 0xffffffff; ops_ex.max_size = max_size; tmp_fname = cli_gentemp(ctx->engine->tmpdir); if (!tmp_fname) { ret = CL_EMEM; break; } /* scan */ ret = mschm_d->extract(mschm_d, mschm_f, tmp_fname); if (ret) /* Failed to extract. Try to scan what is there */ cli_dbgmsg("%s() failed to extract %d\n", __func__, ret); ret = cli_scanfile(tmp_fname, ctx); if (ret == CL_VIRUS) virus_num++; if (!ctx->engine->keeptmp) { if (!access(tmp_fname, R_OK) && cli_unlink(tmp_fname)) { free(tmp_fname); ret = CL_EUNLINK; break; } } free(tmp_fname); files++; if (ret == CL_VIRUS && SCAN_ALL) continue; if (ret) break; } out_close: mschm_d->close(mschm_d, mschm_h); out_dest: mspack_destroy_chm_decompressor(mschm_d); if (virus_num) return CL_VIRUS; return ret; return 0; }
static void mspack_fmap_free(void *mem) { free(mem); }
static void mspack_fmap_free(void *mem) { if(mem) { free(mem); mem = NULL; } return; }
{'added': [(267, ' if(mem) {'), (268, ' free(mem);'), (269, ' mem = NULL;'), (270, ' }'), (271, ' return;')], 'deleted': [(267, '\tfree(mem);')]}
5
1
433
2,255
4
13
1
https://github.com/vrtadmin/clamav-devel
CVE-2017-6419
CWE-119
1,767
ptabasic.c
C
ptaReadStream
/*====================================================================* - Copyright (C) 2001 Leptonica. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *====================================================================*/ /*! * \file ptabasic.c * <pre> * * Pta creation, destruction, copy, clone, empty * PTA *ptaCreate() * PTA *ptaCreateFromNuma() * void ptaDestroy() * PTA *ptaCopy() * PTA *ptaCopyRange() * PTA *ptaClone() * l_int32 ptaEmpty() * * Pta array extension * l_int32 ptaAddPt() * static l_int32 ptaExtendArrays() * * Pta insertion and removal * l_int32 ptaInsertPt() * l_int32 ptaRemovePt() * * Pta accessors * l_int32 ptaGetRefcount() * l_int32 ptaChangeRefcount() * l_int32 ptaGetCount() * l_int32 ptaGetPt() * l_int32 ptaGetIPt() * l_int32 ptaSetPt() * l_int32 ptaGetArrays() * * Pta serialized for I/O * PTA *ptaRead() * PTA *ptaReadStream() * PTA *ptaReadMem() * l_int32 ptaWrite() * l_int32 ptaWriteStream() * l_int32 ptaWriteMem() * * Ptaa creation, destruction * PTAA *ptaaCreate() * void ptaaDestroy() * * Ptaa array extension * l_int32 ptaaAddPta() * static l_int32 ptaaExtendArray() * * Ptaa accessors * l_int32 ptaaGetCount() * l_int32 ptaaGetPta() * l_int32 ptaaGetPt() * * Ptaa array modifiers * l_int32 ptaaInitFull() * l_int32 ptaaReplacePta() * l_int32 ptaaAddPt() * l_int32 ptaaTruncate() * * Ptaa serialized for I/O * PTAA *ptaaRead() * PTAA *ptaaReadStream() * PTAA *ptaaReadMem() * l_int32 ptaaWrite() * l_int32 ptaaWriteStream() * l_int32 ptaaWriteMem() * </pre> */ #include <string.h> #include "allheaders.h" static const l_int32 INITIAL_PTR_ARRAYSIZE = 20; /* n'import quoi */ /* Static functions */ static l_int32 ptaExtendArrays(PTA *pta); static l_int32 ptaaExtendArray(PTAA *ptaa); /*---------------------------------------------------------------------* * Pta creation, destruction, copy, clone * *---------------------------------------------------------------------*/ /*! * \brief ptaCreate() * * \param[in] n initial array sizes * \return pta, or NULL on error. */ PTA * ptaCreate(l_int32 n) { PTA *pta; PROCNAME("ptaCreate"); if (n <= 0) n = INITIAL_PTR_ARRAYSIZE; pta = (PTA *)LEPT_CALLOC(1, sizeof(PTA)); pta->n = 0; pta->nalloc = n; ptaChangeRefcount(pta, 1); /* sets to 1 */ pta->x = (l_float32 *)LEPT_CALLOC(n, sizeof(l_float32)); pta->y = (l_float32 *)LEPT_CALLOC(n, sizeof(l_float32)); if (!pta->x || !pta->y) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("x and y arrays not both made", procName, NULL); } return pta; } /*! * \brief ptaCreateFromNuma() * * \param[in] nax [optional] can be null * \param[in] nay * \return pta, or NULL on error. */ PTA * ptaCreateFromNuma(NUMA *nax, NUMA *nay) { l_int32 i, n; l_float32 startx, delx, xval, yval; PTA *pta; PROCNAME("ptaCreateFromNuma"); if (!nay) return (PTA *)ERROR_PTR("nay not defined", procName, NULL); n = numaGetCount(nay); if (nax && numaGetCount(nax) != n) return (PTA *)ERROR_PTR("nax and nay sizes differ", procName, NULL); pta = ptaCreate(n); numaGetParameters(nay, &startx, &delx); for (i = 0; i < n; i++) { if (nax) numaGetFValue(nax, i, &xval); else /* use implicit x values from nay */ xval = startx + i * delx; numaGetFValue(nay, i, &yval); ptaAddPt(pta, xval, yval); } return pta; } /*! * \brief ptaDestroy() * * \param[in,out] ppta to be nulled * \return void * * <pre> * Notes: * (1) Decrements the ref count and, if 0, destroys the pta. * (2) Always nulls the input ptr. * </pre> */ void ptaDestroy(PTA **ppta) { PTA *pta; PROCNAME("ptaDestroy"); if (ppta == NULL) { L_WARNING("ptr address is NULL!\n", procName); return; } if ((pta = *ppta) == NULL) return; ptaChangeRefcount(pta, -1); if (ptaGetRefcount(pta) <= 0) { LEPT_FREE(pta->x); LEPT_FREE(pta->y); LEPT_FREE(pta); } *ppta = NULL; return; } /*! * \brief ptaCopy() * * \param[in] pta * \return copy of pta, or NULL on error */ PTA * ptaCopy(PTA *pta) { l_int32 i; l_float32 x, y; PTA *npta; PROCNAME("ptaCopy"); if (!pta) return (PTA *)ERROR_PTR("pta not defined", procName, NULL); if ((npta = ptaCreate(pta->nalloc)) == NULL) return (PTA *)ERROR_PTR("npta not made", procName, NULL); for (i = 0; i < pta->n; i++) { ptaGetPt(pta, i, &x, &y); ptaAddPt(npta, x, y); } return npta; } /*! * \brief ptaCopyRange() * * \param[in] ptas * \param[in] istart starting index in ptas * \param[in] iend ending index in ptas; use 0 to copy to end * \return 0 if OK, 1 on error */ PTA * ptaCopyRange(PTA *ptas, l_int32 istart, l_int32 iend) { l_int32 n, i, x, y; PTA *ptad; PROCNAME("ptaCopyRange"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); n = ptaGetCount(ptas); if (istart < 0) istart = 0; if (istart >= n) return (PTA *)ERROR_PTR("istart out of bounds", procName, NULL); if (iend <= 0 || iend >= n) iend = n - 1; if (istart > iend) return (PTA *)ERROR_PTR("istart > iend; no pts", procName, NULL); if ((ptad = ptaCreate(iend - istart + 1)) == NULL) return (PTA *)ERROR_PTR("ptad not made", procName, NULL); for (i = istart; i <= iend; i++) { ptaGetIPt(ptas, i, &x, &y); ptaAddPt(ptad, x, y); } return ptad; } /*! * \brief ptaClone() * * \param[in] pta * \return ptr to same pta, or NULL on error */ PTA * ptaClone(PTA *pta) { PROCNAME("ptaClone"); if (!pta) return (PTA *)ERROR_PTR("pta not defined", procName, NULL); ptaChangeRefcount(pta, 1); return pta; } /*! * \brief ptaEmpty() * * \param[in] pta * \return 0 if OK, 1 on error * * <pre> * Notes: * This only resets the Pta::n field, for reuse * </pre> */ l_int32 ptaEmpty(PTA *pta) { PROCNAME("ptaEmpty"); if (!pta) return ERROR_INT("ptad not defined", procName, 1); pta->n = 0; return 0; } /*---------------------------------------------------------------------* * Pta array extension * *---------------------------------------------------------------------*/ /*! * \brief ptaAddPt() * * \param[in] pta * \param[in] x, y * \return 0 if OK, 1 on error */ l_int32 ptaAddPt(PTA *pta, l_float32 x, l_float32 y) { l_int32 n; PROCNAME("ptaAddPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = pta->n; if (n >= pta->nalloc) ptaExtendArrays(pta); pta->x[n] = x; pta->y[n] = y; pta->n++; return 0; } /*! * \brief ptaExtendArrays() * * \param[in] pta * \return 0 if OK; 1 on error */ static l_int32 ptaExtendArrays(PTA *pta) { PROCNAME("ptaExtendArrays"); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((pta->x = (l_float32 *)reallocNew((void **)&pta->x, sizeof(l_float32) * pta->nalloc, 2 * sizeof(l_float32) * pta->nalloc)) == NULL) return ERROR_INT("new x array not returned", procName, 1); if ((pta->y = (l_float32 *)reallocNew((void **)&pta->y, sizeof(l_float32) * pta->nalloc, 2 * sizeof(l_float32) * pta->nalloc)) == NULL) return ERROR_INT("new y array not returned", procName, 1); pta->nalloc = 2 * pta->nalloc; return 0; } /*---------------------------------------------------------------------* * Pta insertion and removal * *---------------------------------------------------------------------*/ /*! * \brief ptaInsertPt() * * \param[in] pta * \param[in] index at which pt is to be inserted * \param[in] x, y point values * \return 0 if OK; 1 on error */ l_int32 ptaInsertPt(PTA *pta, l_int32 index, l_int32 x, l_int32 y) { l_int32 i, n; PROCNAME("ptaInsertPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); if (index < 0 || index > n) return ERROR_INT("index not in {0...n}", procName, 1); if (n > pta->nalloc) ptaExtendArrays(pta); pta->n++; for (i = n; i > index; i--) { pta->x[i] = pta->x[i - 1]; pta->y[i] = pta->y[i - 1]; } pta->x[index] = x; pta->y[index] = y; return 0; } /*! * \brief ptaRemovePt() * * \param[in] pta * \param[in] index of point to be removed * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This shifts pta[i] --> pta[i - 1] for all i > index. * (2) It should not be used repeatedly on large arrays, * because the function is O(n). * </pre> */ l_int32 ptaRemovePt(PTA *pta, l_int32 index) { l_int32 i, n; PROCNAME("ptaRemovePt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); if (index < 0 || index >= n) return ERROR_INT("index not in {0...n - 1}", procName, 1); /* Remove the point */ for (i = index + 1; i < n; i++) { pta->x[i - 1] = pta->x[i]; pta->y[i - 1] = pta->y[i]; } pta->n--; return 0; } /*---------------------------------------------------------------------* * Pta accessors * *---------------------------------------------------------------------*/ l_int32 ptaGetRefcount(PTA *pta) { PROCNAME("ptaGetRefcount"); if (!pta) return ERROR_INT("pta not defined", procName, 1); return pta->refcount; } l_int32 ptaChangeRefcount(PTA *pta, l_int32 delta) { PROCNAME("ptaChangeRefcount"); if (!pta) return ERROR_INT("pta not defined", procName, 1); pta->refcount += delta; return 0; } /*! * \brief ptaGetCount() * * \param[in] pta * \return count, or 0 if no pta */ l_int32 ptaGetCount(PTA *pta) { PROCNAME("ptaGetCount"); if (!pta) return ERROR_INT("pta not defined", procName, 0); return pta->n; } /*! * \brief ptaGetPt() * * \param[in] pta * \param[in] index into arrays * \param[out] px [optional] float x value * \param[out] py [optional] float y value * \return 0 if OK; 1 on error */ l_int32 ptaGetPt(PTA *pta, l_int32 index, l_float32 *px, l_float32 *py) { PROCNAME("ptaGetPt"); if (px) *px = 0; if (py) *py = 0; if (!pta) return ERROR_INT("pta not defined", procName, 1); if (index < 0 || index >= pta->n) return ERROR_INT("invalid index", procName, 1); if (px) *px = pta->x[index]; if (py) *py = pta->y[index]; return 0; } /*! * \brief ptaGetIPt() * * \param[in] pta * \param[in] index into arrays * \param[out] px [optional] integer x value * \param[out] py [optional] integer y value * \return 0 if OK; 1 on error */ l_int32 ptaGetIPt(PTA *pta, l_int32 index, l_int32 *px, l_int32 *py) { PROCNAME("ptaGetIPt"); if (px) *px = 0; if (py) *py = 0; if (!pta) return ERROR_INT("pta not defined", procName, 1); if (index < 0 || index >= pta->n) return ERROR_INT("invalid index", procName, 1); if (px) *px = (l_int32)(pta->x[index] + 0.5); if (py) *py = (l_int32)(pta->y[index] + 0.5); return 0; } /*! * \brief ptaSetPt() * * \param[in] pta * \param[in] index into arrays * \param[in] x, y * \return 0 if OK; 1 on error */ l_int32 ptaSetPt(PTA *pta, l_int32 index, l_float32 x, l_float32 y) { PROCNAME("ptaSetPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); if (index < 0 || index >= pta->n) return ERROR_INT("invalid index", procName, 1); pta->x[index] = x; pta->y[index] = y; return 0; } /*! * \brief ptaGetArrays() * * \param[in] pta * \param[out] pnax [optional] numa of x array * \param[out] pnay [optional] numa of y array * \return 0 if OK; 1 on error or if pta is empty * * <pre> * Notes: * (1) This copies the internal arrays into new Numas. * </pre> */ l_int32 ptaGetArrays(PTA *pta, NUMA **pnax, NUMA **pnay) { l_int32 i, n; NUMA *nax, *nay; PROCNAME("ptaGetArrays"); if (!pnax && !pnay) return ERROR_INT("no output requested", procName, 1); if (pnax) *pnax = NULL; if (pnay) *pnay = NULL; if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) == 0) return ERROR_INT("pta is empty", procName, 1); if (pnax) { if ((nax = numaCreate(n)) == NULL) return ERROR_INT("nax not made", procName, 1); *pnax = nax; for (i = 0; i < n; i++) nax->array[i] = pta->x[i]; nax->n = n; } if (pnay) { if ((nay = numaCreate(n)) == NULL) return ERROR_INT("nay not made", procName, 1); *pnay = nay; for (i = 0; i < n; i++) nay->array[i] = pta->y[i]; nay->n = n; } return 0; } /*---------------------------------------------------------------------* * Pta serialized for I/O * *---------------------------------------------------------------------*/ /*! * \brief ptaRead() * * \param[in] filename * \return pta, or NULL on error */ PTA * ptaRead(const char *filename) { FILE *fp; PTA *pta; PROCNAME("ptaRead"); if (!filename) return (PTA *)ERROR_PTR("filename not defined", procName, NULL); if ((fp = fopenReadStream(filename)) == NULL) return (PTA *)ERROR_PTR("stream not opened", procName, NULL); pta = ptaReadStream(fp); fclose(fp); if (!pta) return (PTA *)ERROR_PTR("pta not read", procName, NULL); return pta; } /*! * \brief ptaReadStream() * * \param[in] fp file stream * \return pta, or NULL on error */ PTA * ptaReadStream(FILE *fp) { char typestr[128]; l_int32 i, n, ix, iy, type, version; l_float32 x, y; PTA *pta; PROCNAME("ptaReadStream"); if (!fp) return (PTA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\n Pta Version %d\n", &version) != 1) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTA *)ERROR_PTR("invalid pta version", procName, NULL); if (fscanf(fp, " Number of pts = %d; format = %s\n", &n, typestr) != 2) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (!strcmp(typestr, "float")) type = 0; else /* typestr is "integer" */ type = 1; if ((pta = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ if (fscanf(fp, " (%f, %f)\n", &x, &y) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading floats", procName, NULL); } ptaAddPt(pta, x, y); } else { /* data is integer */ if (fscanf(fp, " (%d, %d)\n", &ix, &iy) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading ints", procName, NULL); } ptaAddPt(pta, ix, iy); } } return pta; } /*! * \brief ptaReadMem() * * \param[in] data serialization in ascii * \param[in] size of data in bytes; can use strlen to get it * \return pta, or NULL on error */ PTA * ptaReadMem(const l_uint8 *data, size_t size) { FILE *fp; PTA *pta; PROCNAME("ptaReadMem"); if (!data) return (PTA *)ERROR_PTR("data not defined", procName, NULL); if ((fp = fopenReadFromMemory(data, size)) == NULL) return (PTA *)ERROR_PTR("stream not opened", procName, NULL); pta = ptaReadStream(fp); fclose(fp); if (!pta) L_ERROR("pta not read\n", procName); return pta; } /*! * \brief ptaWrite() * * \param[in] filename * \param[in] pta * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error */ l_int32 ptaWrite(const char *filename, PTA *pta, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaWrite"); if (!filename) return ERROR_INT("filename not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((fp = fopenWriteStream(filename, "w")) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaWriteStream(fp, pta, type); fclose(fp); if (ret) return ERROR_INT("pta not written to stream", procName, 1); return 0; } /*! * \brief ptaWriteStream() * * \param[in] fp file stream * \param[in] pta * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK; 1 on error */ l_int32 ptaWriteStream(FILE *fp, PTA *pta, l_int32 type) { l_int32 i, n, ix, iy; l_float32 x, y; PROCNAME("ptaWriteStream"); if (!fp) return ERROR_INT("stream not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); fprintf(fp, "\n Pta Version %d\n", PTA_VERSION_NUMBER); if (type == 0) fprintf(fp, " Number of pts = %d; format = float\n", n); else /* type == 1 */ fprintf(fp, " Number of pts = %d; format = integer\n", n); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ ptaGetPt(pta, i, &x, &y); fprintf(fp, " (%f, %f)\n", x, y); } else { /* data is integer */ ptaGetIPt(pta, i, &ix, &iy); fprintf(fp, " (%d, %d)\n", ix, iy); } } return 0; } /*! * \brief ptaWriteMem() * * \param[out] pdata data of serialized pta; ascii * \param[out] psize size of returned data * \param[in] pta * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Serializes a pta in memory and puts the result in a buffer. * </pre> */ l_int32 ptaWriteMem(l_uint8 **pdata, size_t *psize, PTA *pta, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaWriteMem"); if (pdata) *pdata = NULL; if (psize) *psize = 0; if (!pdata) return ERROR_INT("&data not defined", procName, 1); if (!psize) return ERROR_INT("&size not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); #if HAVE_FMEMOPEN if ((fp = open_memstream((char **)pdata, psize)) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaWriteStream(fp, pta, type); #else L_INFO("work-around: writing to a temp file\n", procName); #ifdef _WIN32 if ((fp = fopenWriteWinTempfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #else if ((fp = tmpfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #endif /* _WIN32 */ ret = ptaWriteStream(fp, pta, type); rewind(fp); *pdata = l_binaryReadStream(fp, psize); #endif /* HAVE_FMEMOPEN */ fclose(fp); return ret; } /*---------------------------------------------------------------------* * PTAA creation, destruction * *---------------------------------------------------------------------*/ /*! * \brief ptaaCreate() * * \param[in] n initial number of ptrs * \return ptaa, or NULL on error */ PTAA * ptaaCreate(l_int32 n) { PTAA *ptaa; PROCNAME("ptaaCreate"); if (n <= 0) n = INITIAL_PTR_ARRAYSIZE; if ((ptaa = (PTAA *)LEPT_CALLOC(1, sizeof(PTAA))) == NULL) return (PTAA *)ERROR_PTR("ptaa not made", procName, NULL); ptaa->n = 0; ptaa->nalloc = n; if ((ptaa->pta = (PTA **)LEPT_CALLOC(n, sizeof(PTA *))) == NULL) { ptaaDestroy(&ptaa); return (PTAA *)ERROR_PTR("pta ptrs not made", procName, NULL); } return ptaa; } /*! * \brief ptaaDestroy() * * \param[in,out] pptaa to be nulled * \return void */ void ptaaDestroy(PTAA **pptaa) { l_int32 i; PTAA *ptaa; PROCNAME("ptaaDestroy"); if (pptaa == NULL) { L_WARNING("ptr address is NULL!\n", procName); return; } if ((ptaa = *pptaa) == NULL) return; for (i = 0; i < ptaa->n; i++) ptaDestroy(&ptaa->pta[i]); LEPT_FREE(ptaa->pta); LEPT_FREE(ptaa); *pptaa = NULL; return; } /*---------------------------------------------------------------------* * PTAA array extension * *---------------------------------------------------------------------*/ /*! * \brief ptaaAddPta() * * \param[in] ptaa * \param[in] pta to be added * \param[in] copyflag L_INSERT, L_COPY, L_CLONE * \return 0 if OK, 1 on error */ l_int32 ptaaAddPta(PTAA *ptaa, PTA *pta, l_int32 copyflag) { l_int32 n; PTA *ptac; PROCNAME("ptaaAddPta"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if (copyflag == L_INSERT) { ptac = pta; } else if (copyflag == L_COPY) { if ((ptac = ptaCopy(pta)) == NULL) return ERROR_INT("ptac not made", procName, 1); } else if (copyflag == L_CLONE) { if ((ptac = ptaClone(pta)) == NULL) return ERROR_INT("pta clone not made", procName, 1); } else { return ERROR_INT("invalid copyflag", procName, 1); } n = ptaaGetCount(ptaa); if (n >= ptaa->nalloc) ptaaExtendArray(ptaa); ptaa->pta[n] = ptac; ptaa->n++; return 0; } /*! * \brief ptaaExtendArray() * * \param[in] ptaa * \return 0 if OK, 1 on error */ static l_int32 ptaaExtendArray(PTAA *ptaa) { PROCNAME("ptaaExtendArray"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if ((ptaa->pta = (PTA **)reallocNew((void **)&ptaa->pta, sizeof(PTA *) * ptaa->nalloc, 2 * sizeof(PTA *) * ptaa->nalloc)) == NULL) return ERROR_INT("new ptr array not returned", procName, 1); ptaa->nalloc = 2 * ptaa->nalloc; return 0; } /*---------------------------------------------------------------------* * Ptaa accessors * *---------------------------------------------------------------------*/ /*! * \brief ptaaGetCount() * * \param[in] ptaa * \return count, or 0 if no ptaa */ l_int32 ptaaGetCount(PTAA *ptaa) { PROCNAME("ptaaGetCount"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 0); return ptaa->n; } /*! * \brief ptaaGetPta() * * \param[in] ptaa * \param[in] index to the i-th pta * \param[in] accessflag L_COPY or L_CLONE * \return pta, or NULL on error */ PTA * ptaaGetPta(PTAA *ptaa, l_int32 index, l_int32 accessflag) { PROCNAME("ptaaGetPta"); if (!ptaa) return (PTA *)ERROR_PTR("ptaa not defined", procName, NULL); if (index < 0 || index >= ptaa->n) return (PTA *)ERROR_PTR("index not valid", procName, NULL); if (accessflag == L_COPY) return ptaCopy(ptaa->pta[index]); else if (accessflag == L_CLONE) return ptaClone(ptaa->pta[index]); else return (PTA *)ERROR_PTR("invalid accessflag", procName, NULL); } /*! * \brief ptaaGetPt() * * \param[in] ptaa * \param[in] ipta to the i-th pta * \param[in] jpt index to the j-th pt in the pta * \param[out] px [optional] float x value * \param[out] py [optional] float y value * \return 0 if OK; 1 on error */ l_int32 ptaaGetPt(PTAA *ptaa, l_int32 ipta, l_int32 jpt, l_float32 *px, l_float32 *py) { PTA *pta; PROCNAME("ptaaGetPt"); if (px) *px = 0; if (py) *py = 0; if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (ipta < 0 || ipta >= ptaa->n) return ERROR_INT("index ipta not valid", procName, 1); pta = ptaaGetPta(ptaa, ipta, L_CLONE); if (jpt < 0 || jpt >= pta->n) { ptaDestroy(&pta); return ERROR_INT("index jpt not valid", procName, 1); } ptaGetPt(pta, jpt, px, py); ptaDestroy(&pta); return 0; } /*---------------------------------------------------------------------* * Ptaa array modifiers * *---------------------------------------------------------------------*/ /*! * \brief ptaaInitFull() * * \param[in] ptaa can have non-null ptrs in the ptr array * \param[in] pta to be replicated into the entire ptr array * \return 0 if OK; 1 on error */ l_int32 ptaaInitFull(PTAA *ptaa, PTA *pta) { l_int32 n, i; PTA *ptat; PROCNAME("ptaaInitFull"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaa->nalloc; ptaa->n = n; for (i = 0; i < n; i++) { ptat = ptaCopy(pta); ptaaReplacePta(ptaa, i, ptat); } return 0; } /*! * \brief ptaaReplacePta() * * \param[in] ptaa * \param[in] index to the index-th pta * \param[in] pta insert and replace any existing one * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Any existing pta is destroyed, and the input one * is inserted in its place. * (2) If the index is invalid, return 1 (error) * </pre> */ l_int32 ptaaReplacePta(PTAA *ptaa, l_int32 index, PTA *pta) { l_int32 n; PROCNAME("ptaaReplacePta"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaaGetCount(ptaa); if (index < 0 || index >= n) return ERROR_INT("index not valid", procName, 1); ptaDestroy(&ptaa->pta[index]); ptaa->pta[index] = pta; return 0; } /*! * \brief ptaaAddPt() * * \param[in] ptaa * \param[in] ipta to the i-th pta * \param[in] x,y point coordinates * \return 0 if OK; 1 on error */ l_int32 ptaaAddPt(PTAA *ptaa, l_int32 ipta, l_float32 x, l_float32 y) { PTA *pta; PROCNAME("ptaaAddPt"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (ipta < 0 || ipta >= ptaa->n) return ERROR_INT("index ipta not valid", procName, 1); pta = ptaaGetPta(ptaa, ipta, L_CLONE); ptaAddPt(pta, x, y); ptaDestroy(&pta); return 0; } /*! * \brief ptaaTruncate() * * \param[in] ptaa * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This identifies the largest index containing a pta that * has any points within it, destroys all pta above that index, * and resets the count. * </pre> */ l_int32 ptaaTruncate(PTAA *ptaa) { l_int32 i, n, np; PTA *pta; PROCNAME("ptaaTruncate"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); n = ptaaGetCount(ptaa); for (i = n - 1; i >= 0; i--) { pta = ptaaGetPta(ptaa, i, L_CLONE); if (!pta) { ptaa->n--; continue; } np = ptaGetCount(pta); ptaDestroy(&pta); if (np == 0) { ptaDestroy(&ptaa->pta[i]); ptaa->n--; } else { break; } } return 0; } /*---------------------------------------------------------------------* * Ptaa serialized for I/O * *---------------------------------------------------------------------*/ /*! * \brief ptaaRead() * * \param[in] filename * \return ptaa, or NULL on error */ PTAA * ptaaRead(const char *filename) { FILE *fp; PTAA *ptaa; PROCNAME("ptaaRead"); if (!filename) return (PTAA *)ERROR_PTR("filename not defined", procName, NULL); if ((fp = fopenReadStream(filename)) == NULL) return (PTAA *)ERROR_PTR("stream not opened", procName, NULL); ptaa = ptaaReadStream(fp); fclose(fp); if (!ptaa) return (PTAA *)ERROR_PTR("ptaa not read", procName, NULL); return ptaa; } /*! * \brief ptaaReadStream() * * \param[in] fp file stream * \return ptaa, or NULL on error */ PTAA * ptaaReadStream(FILE *fp) { l_int32 i, n, version; PTA *pta; PTAA *ptaa; PROCNAME("ptaaReadStream"); if (!fp) return (PTAA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\nPtaa Version %d\n", &version) != 1) return (PTAA *)ERROR_PTR("not a ptaa file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTAA *)ERROR_PTR("invalid ptaa version", procName, NULL); if (fscanf(fp, "Number of Pta = %d\n", &n) != 1) return (PTAA *)ERROR_PTR("not a ptaa file", procName, NULL); if ((ptaa = ptaaCreate(n)) == NULL) return (PTAA *)ERROR_PTR("ptaa not made", procName, NULL); for (i = 0; i < n; i++) { if ((pta = ptaReadStream(fp)) == NULL) { ptaaDestroy(&ptaa); return (PTAA *)ERROR_PTR("error reading pta", procName, NULL); } ptaaAddPta(ptaa, pta, L_INSERT); } return ptaa; } /*! * \brief ptaaReadMem() * * \param[in] data serialization in ascii * \param[in] size of data in bytes; can use strlen to get it * \return ptaa, or NULL on error */ PTAA * ptaaReadMem(const l_uint8 *data, size_t size) { FILE *fp; PTAA *ptaa; PROCNAME("ptaaReadMem"); if (!data) return (PTAA *)ERROR_PTR("data not defined", procName, NULL); if ((fp = fopenReadFromMemory(data, size)) == NULL) return (PTAA *)ERROR_PTR("stream not opened", procName, NULL); ptaa = ptaaReadStream(fp); fclose(fp); if (!ptaa) L_ERROR("ptaa not read\n", procName); return ptaa; } /*! * \brief ptaaWrite() * * \param[in] filename * \param[in] ptaa * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error */ l_int32 ptaaWrite(const char *filename, PTAA *ptaa, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaaWrite"); if (!filename) return ERROR_INT("filename not defined", procName, 1); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if ((fp = fopenWriteStream(filename, "w")) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaaWriteStream(fp, ptaa, type); fclose(fp); if (ret) return ERROR_INT("ptaa not written to stream", procName, 1); return 0; } /*! * \brief ptaaWriteStream() * * \param[in] fp file stream * \param[in] ptaa * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK; 1 on error */ l_int32 ptaaWriteStream(FILE *fp, PTAA *ptaa, l_int32 type) { l_int32 i, n; PTA *pta; PROCNAME("ptaaWriteStream"); if (!fp) return ERROR_INT("stream not defined", procName, 1); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); n = ptaaGetCount(ptaa); fprintf(fp, "\nPtaa Version %d\n", PTA_VERSION_NUMBER); fprintf(fp, "Number of Pta = %d\n", n); for (i = 0; i < n; i++) { pta = ptaaGetPta(ptaa, i, L_CLONE); ptaWriteStream(fp, pta, type); ptaDestroy(&pta); } return 0; } /*! * \brief ptaaWriteMem() * * \param[out] pdata data of serialized ptaa; ascii * \param[out] psize size of returned data * \param[in] ptaa * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Serializes a ptaa in memory and puts the result in a buffer. * </pre> */ l_int32 ptaaWriteMem(l_uint8 **pdata, size_t *psize, PTAA *ptaa, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaaWriteMem"); if (pdata) *pdata = NULL; if (psize) *psize = 0; if (!pdata) return ERROR_INT("&data not defined", procName, 1); if (!psize) return ERROR_INT("&size not defined", procName, 1); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); #if HAVE_FMEMOPEN if ((fp = open_memstream((char **)pdata, psize)) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaaWriteStream(fp, ptaa, type); #else L_INFO("work-around: writing to a temp file\n", procName); #ifdef _WIN32 if ((fp = fopenWriteWinTempfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #else if ((fp = tmpfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #endif /* _WIN32 */ ret = ptaaWriteStream(fp, ptaa, type); rewind(fp); *pdata = l_binaryReadStream(fp, psize); #endif /* HAVE_FMEMOPEN */ fclose(fp); return ret; }
/*====================================================================* - Copyright (C) 2001 Leptonica. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ANY - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING - NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *====================================================================*/ /*! * \file ptabasic.c * <pre> * * Pta creation, destruction, copy, clone, empty * PTA *ptaCreate() * PTA *ptaCreateFromNuma() * void ptaDestroy() * PTA *ptaCopy() * PTA *ptaCopyRange() * PTA *ptaClone() * l_int32 ptaEmpty() * * Pta array extension * l_int32 ptaAddPt() * static l_int32 ptaExtendArrays() * * Pta insertion and removal * l_int32 ptaInsertPt() * l_int32 ptaRemovePt() * * Pta accessors * l_int32 ptaGetRefcount() * l_int32 ptaChangeRefcount() * l_int32 ptaGetCount() * l_int32 ptaGetPt() * l_int32 ptaGetIPt() * l_int32 ptaSetPt() * l_int32 ptaGetArrays() * * Pta serialized for I/O * PTA *ptaRead() * PTA *ptaReadStream() * PTA *ptaReadMem() * l_int32 ptaWrite() * l_int32 ptaWriteStream() * l_int32 ptaWriteMem() * * Ptaa creation, destruction * PTAA *ptaaCreate() * void ptaaDestroy() * * Ptaa array extension * l_int32 ptaaAddPta() * static l_int32 ptaaExtendArray() * * Ptaa accessors * l_int32 ptaaGetCount() * l_int32 ptaaGetPta() * l_int32 ptaaGetPt() * * Ptaa array modifiers * l_int32 ptaaInitFull() * l_int32 ptaaReplacePta() * l_int32 ptaaAddPt() * l_int32 ptaaTruncate() * * Ptaa serialized for I/O * PTAA *ptaaRead() * PTAA *ptaaReadStream() * PTAA *ptaaReadMem() * l_int32 ptaaWrite() * l_int32 ptaaWriteStream() * l_int32 ptaaWriteMem() * </pre> */ #include <string.h> #include "allheaders.h" static const l_int32 INITIAL_PTR_ARRAYSIZE = 20; /* n'import quoi */ /* Static functions */ static l_int32 ptaExtendArrays(PTA *pta); static l_int32 ptaaExtendArray(PTAA *ptaa); /*---------------------------------------------------------------------* * Pta creation, destruction, copy, clone * *---------------------------------------------------------------------*/ /*! * \brief ptaCreate() * * \param[in] n initial array sizes * \return pta, or NULL on error. */ PTA * ptaCreate(l_int32 n) { PTA *pta; PROCNAME("ptaCreate"); if (n <= 0) n = INITIAL_PTR_ARRAYSIZE; pta = (PTA *)LEPT_CALLOC(1, sizeof(PTA)); pta->n = 0; pta->nalloc = n; ptaChangeRefcount(pta, 1); /* sets to 1 */ pta->x = (l_float32 *)LEPT_CALLOC(n, sizeof(l_float32)); pta->y = (l_float32 *)LEPT_CALLOC(n, sizeof(l_float32)); if (!pta->x || !pta->y) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("x and y arrays not both made", procName, NULL); } return pta; } /*! * \brief ptaCreateFromNuma() * * \param[in] nax [optional] can be null * \param[in] nay * \return pta, or NULL on error. */ PTA * ptaCreateFromNuma(NUMA *nax, NUMA *nay) { l_int32 i, n; l_float32 startx, delx, xval, yval; PTA *pta; PROCNAME("ptaCreateFromNuma"); if (!nay) return (PTA *)ERROR_PTR("nay not defined", procName, NULL); n = numaGetCount(nay); if (nax && numaGetCount(nax) != n) return (PTA *)ERROR_PTR("nax and nay sizes differ", procName, NULL); pta = ptaCreate(n); numaGetParameters(nay, &startx, &delx); for (i = 0; i < n; i++) { if (nax) numaGetFValue(nax, i, &xval); else /* use implicit x values from nay */ xval = startx + i * delx; numaGetFValue(nay, i, &yval); ptaAddPt(pta, xval, yval); } return pta; } /*! * \brief ptaDestroy() * * \param[in,out] ppta to be nulled * \return void * * <pre> * Notes: * (1) Decrements the ref count and, if 0, destroys the pta. * (2) Always nulls the input ptr. * </pre> */ void ptaDestroy(PTA **ppta) { PTA *pta; PROCNAME("ptaDestroy"); if (ppta == NULL) { L_WARNING("ptr address is NULL!\n", procName); return; } if ((pta = *ppta) == NULL) return; ptaChangeRefcount(pta, -1); if (ptaGetRefcount(pta) <= 0) { LEPT_FREE(pta->x); LEPT_FREE(pta->y); LEPT_FREE(pta); } *ppta = NULL; return; } /*! * \brief ptaCopy() * * \param[in] pta * \return copy of pta, or NULL on error */ PTA * ptaCopy(PTA *pta) { l_int32 i; l_float32 x, y; PTA *npta; PROCNAME("ptaCopy"); if (!pta) return (PTA *)ERROR_PTR("pta not defined", procName, NULL); if ((npta = ptaCreate(pta->nalloc)) == NULL) return (PTA *)ERROR_PTR("npta not made", procName, NULL); for (i = 0; i < pta->n; i++) { ptaGetPt(pta, i, &x, &y); ptaAddPt(npta, x, y); } return npta; } /*! * \brief ptaCopyRange() * * \param[in] ptas * \param[in] istart starting index in ptas * \param[in] iend ending index in ptas; use 0 to copy to end * \return 0 if OK, 1 on error */ PTA * ptaCopyRange(PTA *ptas, l_int32 istart, l_int32 iend) { l_int32 n, i, x, y; PTA *ptad; PROCNAME("ptaCopyRange"); if (!ptas) return (PTA *)ERROR_PTR("ptas not defined", procName, NULL); n = ptaGetCount(ptas); if (istart < 0) istart = 0; if (istart >= n) return (PTA *)ERROR_PTR("istart out of bounds", procName, NULL); if (iend <= 0 || iend >= n) iend = n - 1; if (istart > iend) return (PTA *)ERROR_PTR("istart > iend; no pts", procName, NULL); if ((ptad = ptaCreate(iend - istart + 1)) == NULL) return (PTA *)ERROR_PTR("ptad not made", procName, NULL); for (i = istart; i <= iend; i++) { ptaGetIPt(ptas, i, &x, &y); ptaAddPt(ptad, x, y); } return ptad; } /*! * \brief ptaClone() * * \param[in] pta * \return ptr to same pta, or NULL on error */ PTA * ptaClone(PTA *pta) { PROCNAME("ptaClone"); if (!pta) return (PTA *)ERROR_PTR("pta not defined", procName, NULL); ptaChangeRefcount(pta, 1); return pta; } /*! * \brief ptaEmpty() * * \param[in] pta * \return 0 if OK, 1 on error * * <pre> * Notes: * This only resets the Pta::n field, for reuse * </pre> */ l_int32 ptaEmpty(PTA *pta) { PROCNAME("ptaEmpty"); if (!pta) return ERROR_INT("ptad not defined", procName, 1); pta->n = 0; return 0; } /*---------------------------------------------------------------------* * Pta array extension * *---------------------------------------------------------------------*/ /*! * \brief ptaAddPt() * * \param[in] pta * \param[in] x, y * \return 0 if OK, 1 on error */ l_int32 ptaAddPt(PTA *pta, l_float32 x, l_float32 y) { l_int32 n; PROCNAME("ptaAddPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = pta->n; if (n >= pta->nalloc) ptaExtendArrays(pta); pta->x[n] = x; pta->y[n] = y; pta->n++; return 0; } /*! * \brief ptaExtendArrays() * * \param[in] pta * \return 0 if OK; 1 on error */ static l_int32 ptaExtendArrays(PTA *pta) { PROCNAME("ptaExtendArrays"); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((pta->x = (l_float32 *)reallocNew((void **)&pta->x, sizeof(l_float32) * pta->nalloc, 2 * sizeof(l_float32) * pta->nalloc)) == NULL) return ERROR_INT("new x array not returned", procName, 1); if ((pta->y = (l_float32 *)reallocNew((void **)&pta->y, sizeof(l_float32) * pta->nalloc, 2 * sizeof(l_float32) * pta->nalloc)) == NULL) return ERROR_INT("new y array not returned", procName, 1); pta->nalloc = 2 * pta->nalloc; return 0; } /*---------------------------------------------------------------------* * Pta insertion and removal * *---------------------------------------------------------------------*/ /*! * \brief ptaInsertPt() * * \param[in] pta * \param[in] index at which pt is to be inserted * \param[in] x, y point values * \return 0 if OK; 1 on error */ l_int32 ptaInsertPt(PTA *pta, l_int32 index, l_int32 x, l_int32 y) { l_int32 i, n; PROCNAME("ptaInsertPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); if (index < 0 || index > n) return ERROR_INT("index not in {0...n}", procName, 1); if (n > pta->nalloc) ptaExtendArrays(pta); pta->n++; for (i = n; i > index; i--) { pta->x[i] = pta->x[i - 1]; pta->y[i] = pta->y[i - 1]; } pta->x[index] = x; pta->y[index] = y; return 0; } /*! * \brief ptaRemovePt() * * \param[in] pta * \param[in] index of point to be removed * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This shifts pta[i] --> pta[i - 1] for all i > index. * (2) It should not be used repeatedly on large arrays, * because the function is O(n). * </pre> */ l_int32 ptaRemovePt(PTA *pta, l_int32 index) { l_int32 i, n; PROCNAME("ptaRemovePt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); if (index < 0 || index >= n) return ERROR_INT("index not in {0...n - 1}", procName, 1); /* Remove the point */ for (i = index + 1; i < n; i++) { pta->x[i - 1] = pta->x[i]; pta->y[i - 1] = pta->y[i]; } pta->n--; return 0; } /*---------------------------------------------------------------------* * Pta accessors * *---------------------------------------------------------------------*/ l_int32 ptaGetRefcount(PTA *pta) { PROCNAME("ptaGetRefcount"); if (!pta) return ERROR_INT("pta not defined", procName, 1); return pta->refcount; } l_int32 ptaChangeRefcount(PTA *pta, l_int32 delta) { PROCNAME("ptaChangeRefcount"); if (!pta) return ERROR_INT("pta not defined", procName, 1); pta->refcount += delta; return 0; } /*! * \brief ptaGetCount() * * \param[in] pta * \return count, or 0 if no pta */ l_int32 ptaGetCount(PTA *pta) { PROCNAME("ptaGetCount"); if (!pta) return ERROR_INT("pta not defined", procName, 0); return pta->n; } /*! * \brief ptaGetPt() * * \param[in] pta * \param[in] index into arrays * \param[out] px [optional] float x value * \param[out] py [optional] float y value * \return 0 if OK; 1 on error */ l_int32 ptaGetPt(PTA *pta, l_int32 index, l_float32 *px, l_float32 *py) { PROCNAME("ptaGetPt"); if (px) *px = 0; if (py) *py = 0; if (!pta) return ERROR_INT("pta not defined", procName, 1); if (index < 0 || index >= pta->n) return ERROR_INT("invalid index", procName, 1); if (px) *px = pta->x[index]; if (py) *py = pta->y[index]; return 0; } /*! * \brief ptaGetIPt() * * \param[in] pta * \param[in] index into arrays * \param[out] px [optional] integer x value * \param[out] py [optional] integer y value * \return 0 if OK; 1 on error */ l_int32 ptaGetIPt(PTA *pta, l_int32 index, l_int32 *px, l_int32 *py) { PROCNAME("ptaGetIPt"); if (px) *px = 0; if (py) *py = 0; if (!pta) return ERROR_INT("pta not defined", procName, 1); if (index < 0 || index >= pta->n) return ERROR_INT("invalid index", procName, 1); if (px) *px = (l_int32)(pta->x[index] + 0.5); if (py) *py = (l_int32)(pta->y[index] + 0.5); return 0; } /*! * \brief ptaSetPt() * * \param[in] pta * \param[in] index into arrays * \param[in] x, y * \return 0 if OK; 1 on error */ l_int32 ptaSetPt(PTA *pta, l_int32 index, l_float32 x, l_float32 y) { PROCNAME("ptaSetPt"); if (!pta) return ERROR_INT("pta not defined", procName, 1); if (index < 0 || index >= pta->n) return ERROR_INT("invalid index", procName, 1); pta->x[index] = x; pta->y[index] = y; return 0; } /*! * \brief ptaGetArrays() * * \param[in] pta * \param[out] pnax [optional] numa of x array * \param[out] pnay [optional] numa of y array * \return 0 if OK; 1 on error or if pta is empty * * <pre> * Notes: * (1) This copies the internal arrays into new Numas. * </pre> */ l_int32 ptaGetArrays(PTA *pta, NUMA **pnax, NUMA **pnay) { l_int32 i, n; NUMA *nax, *nay; PROCNAME("ptaGetArrays"); if (!pnax && !pnay) return ERROR_INT("no output requested", procName, 1); if (pnax) *pnax = NULL; if (pnay) *pnay = NULL; if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((n = ptaGetCount(pta)) == 0) return ERROR_INT("pta is empty", procName, 1); if (pnax) { if ((nax = numaCreate(n)) == NULL) return ERROR_INT("nax not made", procName, 1); *pnax = nax; for (i = 0; i < n; i++) nax->array[i] = pta->x[i]; nax->n = n; } if (pnay) { if ((nay = numaCreate(n)) == NULL) return ERROR_INT("nay not made", procName, 1); *pnay = nay; for (i = 0; i < n; i++) nay->array[i] = pta->y[i]; nay->n = n; } return 0; } /*---------------------------------------------------------------------* * Pta serialized for I/O * *---------------------------------------------------------------------*/ /*! * \brief ptaRead() * * \param[in] filename * \return pta, or NULL on error */ PTA * ptaRead(const char *filename) { FILE *fp; PTA *pta; PROCNAME("ptaRead"); if (!filename) return (PTA *)ERROR_PTR("filename not defined", procName, NULL); if ((fp = fopenReadStream(filename)) == NULL) return (PTA *)ERROR_PTR("stream not opened", procName, NULL); pta = ptaReadStream(fp); fclose(fp); if (!pta) return (PTA *)ERROR_PTR("pta not read", procName, NULL); return pta; } /*! * \brief ptaReadStream() * * \param[in] fp file stream * \return pta, or NULL on error */ PTA * ptaReadStream(FILE *fp) { char typestr[128]; /* hardcoded below in fscanf */ l_int32 i, n, ix, iy, type, version; l_float32 x, y; PTA *pta; PROCNAME("ptaReadStream"); if (!fp) return (PTA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\n Pta Version %d\n", &version) != 1) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTA *)ERROR_PTR("invalid pta version", procName, NULL); if (fscanf(fp, " Number of pts = %d; format = %127s\n", &n, typestr) != 2) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (!strcmp(typestr, "float")) type = 0; else /* typestr is "integer" */ type = 1; if ((pta = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ if (fscanf(fp, " (%f, %f)\n", &x, &y) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading floats", procName, NULL); } ptaAddPt(pta, x, y); } else { /* data is integer */ if (fscanf(fp, " (%d, %d)\n", &ix, &iy) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading ints", procName, NULL); } ptaAddPt(pta, ix, iy); } } return pta; } /*! * \brief ptaReadMem() * * \param[in] data serialization in ascii * \param[in] size of data in bytes; can use strlen to get it * \return pta, or NULL on error */ PTA * ptaReadMem(const l_uint8 *data, size_t size) { FILE *fp; PTA *pta; PROCNAME("ptaReadMem"); if (!data) return (PTA *)ERROR_PTR("data not defined", procName, NULL); if ((fp = fopenReadFromMemory(data, size)) == NULL) return (PTA *)ERROR_PTR("stream not opened", procName, NULL); pta = ptaReadStream(fp); fclose(fp); if (!pta) L_ERROR("pta not read\n", procName); return pta; } /*! * \brief ptaWrite() * * \param[in] filename * \param[in] pta * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error */ l_int32 ptaWrite(const char *filename, PTA *pta, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaWrite"); if (!filename) return ERROR_INT("filename not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if ((fp = fopenWriteStream(filename, "w")) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaWriteStream(fp, pta, type); fclose(fp); if (ret) return ERROR_INT("pta not written to stream", procName, 1); return 0; } /*! * \brief ptaWriteStream() * * \param[in] fp file stream * \param[in] pta * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK; 1 on error */ l_int32 ptaWriteStream(FILE *fp, PTA *pta, l_int32 type) { l_int32 i, n, ix, iy; l_float32 x, y; PROCNAME("ptaWriteStream"); if (!fp) return ERROR_INT("stream not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaGetCount(pta); fprintf(fp, "\n Pta Version %d\n", PTA_VERSION_NUMBER); if (type == 0) fprintf(fp, " Number of pts = %d; format = float\n", n); else /* type == 1 */ fprintf(fp, " Number of pts = %d; format = integer\n", n); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ ptaGetPt(pta, i, &x, &y); fprintf(fp, " (%f, %f)\n", x, y); } else { /* data is integer */ ptaGetIPt(pta, i, &ix, &iy); fprintf(fp, " (%d, %d)\n", ix, iy); } } return 0; } /*! * \brief ptaWriteMem() * * \param[out] pdata data of serialized pta; ascii * \param[out] psize size of returned data * \param[in] pta * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Serializes a pta in memory and puts the result in a buffer. * </pre> */ l_int32 ptaWriteMem(l_uint8 **pdata, size_t *psize, PTA *pta, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaWriteMem"); if (pdata) *pdata = NULL; if (psize) *psize = 0; if (!pdata) return ERROR_INT("&data not defined", procName, 1); if (!psize) return ERROR_INT("&size not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); #if HAVE_FMEMOPEN if ((fp = open_memstream((char **)pdata, psize)) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaWriteStream(fp, pta, type); #else L_INFO("work-around: writing to a temp file\n", procName); #ifdef _WIN32 if ((fp = fopenWriteWinTempfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #else if ((fp = tmpfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #endif /* _WIN32 */ ret = ptaWriteStream(fp, pta, type); rewind(fp); *pdata = l_binaryReadStream(fp, psize); #endif /* HAVE_FMEMOPEN */ fclose(fp); return ret; } /*---------------------------------------------------------------------* * PTAA creation, destruction * *---------------------------------------------------------------------*/ /*! * \brief ptaaCreate() * * \param[in] n initial number of ptrs * \return ptaa, or NULL on error */ PTAA * ptaaCreate(l_int32 n) { PTAA *ptaa; PROCNAME("ptaaCreate"); if (n <= 0) n = INITIAL_PTR_ARRAYSIZE; if ((ptaa = (PTAA *)LEPT_CALLOC(1, sizeof(PTAA))) == NULL) return (PTAA *)ERROR_PTR("ptaa not made", procName, NULL); ptaa->n = 0; ptaa->nalloc = n; if ((ptaa->pta = (PTA **)LEPT_CALLOC(n, sizeof(PTA *))) == NULL) { ptaaDestroy(&ptaa); return (PTAA *)ERROR_PTR("pta ptrs not made", procName, NULL); } return ptaa; } /*! * \brief ptaaDestroy() * * \param[in,out] pptaa to be nulled * \return void */ void ptaaDestroy(PTAA **pptaa) { l_int32 i; PTAA *ptaa; PROCNAME("ptaaDestroy"); if (pptaa == NULL) { L_WARNING("ptr address is NULL!\n", procName); return; } if ((ptaa = *pptaa) == NULL) return; for (i = 0; i < ptaa->n; i++) ptaDestroy(&ptaa->pta[i]); LEPT_FREE(ptaa->pta); LEPT_FREE(ptaa); *pptaa = NULL; return; } /*---------------------------------------------------------------------* * PTAA array extension * *---------------------------------------------------------------------*/ /*! * \brief ptaaAddPta() * * \param[in] ptaa * \param[in] pta to be added * \param[in] copyflag L_INSERT, L_COPY, L_CLONE * \return 0 if OK, 1 on error */ l_int32 ptaaAddPta(PTAA *ptaa, PTA *pta, l_int32 copyflag) { l_int32 n; PTA *ptac; PROCNAME("ptaaAddPta"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); if (copyflag == L_INSERT) { ptac = pta; } else if (copyflag == L_COPY) { if ((ptac = ptaCopy(pta)) == NULL) return ERROR_INT("ptac not made", procName, 1); } else if (copyflag == L_CLONE) { if ((ptac = ptaClone(pta)) == NULL) return ERROR_INT("pta clone not made", procName, 1); } else { return ERROR_INT("invalid copyflag", procName, 1); } n = ptaaGetCount(ptaa); if (n >= ptaa->nalloc) ptaaExtendArray(ptaa); ptaa->pta[n] = ptac; ptaa->n++; return 0; } /*! * \brief ptaaExtendArray() * * \param[in] ptaa * \return 0 if OK, 1 on error */ static l_int32 ptaaExtendArray(PTAA *ptaa) { PROCNAME("ptaaExtendArray"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if ((ptaa->pta = (PTA **)reallocNew((void **)&ptaa->pta, sizeof(PTA *) * ptaa->nalloc, 2 * sizeof(PTA *) * ptaa->nalloc)) == NULL) return ERROR_INT("new ptr array not returned", procName, 1); ptaa->nalloc = 2 * ptaa->nalloc; return 0; } /*---------------------------------------------------------------------* * Ptaa accessors * *---------------------------------------------------------------------*/ /*! * \brief ptaaGetCount() * * \param[in] ptaa * \return count, or 0 if no ptaa */ l_int32 ptaaGetCount(PTAA *ptaa) { PROCNAME("ptaaGetCount"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 0); return ptaa->n; } /*! * \brief ptaaGetPta() * * \param[in] ptaa * \param[in] index to the i-th pta * \param[in] accessflag L_COPY or L_CLONE * \return pta, or NULL on error */ PTA * ptaaGetPta(PTAA *ptaa, l_int32 index, l_int32 accessflag) { PROCNAME("ptaaGetPta"); if (!ptaa) return (PTA *)ERROR_PTR("ptaa not defined", procName, NULL); if (index < 0 || index >= ptaa->n) return (PTA *)ERROR_PTR("index not valid", procName, NULL); if (accessflag == L_COPY) return ptaCopy(ptaa->pta[index]); else if (accessflag == L_CLONE) return ptaClone(ptaa->pta[index]); else return (PTA *)ERROR_PTR("invalid accessflag", procName, NULL); } /*! * \brief ptaaGetPt() * * \param[in] ptaa * \param[in] ipta to the i-th pta * \param[in] jpt index to the j-th pt in the pta * \param[out] px [optional] float x value * \param[out] py [optional] float y value * \return 0 if OK; 1 on error */ l_int32 ptaaGetPt(PTAA *ptaa, l_int32 ipta, l_int32 jpt, l_float32 *px, l_float32 *py) { PTA *pta; PROCNAME("ptaaGetPt"); if (px) *px = 0; if (py) *py = 0; if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (ipta < 0 || ipta >= ptaa->n) return ERROR_INT("index ipta not valid", procName, 1); pta = ptaaGetPta(ptaa, ipta, L_CLONE); if (jpt < 0 || jpt >= pta->n) { ptaDestroy(&pta); return ERROR_INT("index jpt not valid", procName, 1); } ptaGetPt(pta, jpt, px, py); ptaDestroy(&pta); return 0; } /*---------------------------------------------------------------------* * Ptaa array modifiers * *---------------------------------------------------------------------*/ /*! * \brief ptaaInitFull() * * \param[in] ptaa can have non-null ptrs in the ptr array * \param[in] pta to be replicated into the entire ptr array * \return 0 if OK; 1 on error */ l_int32 ptaaInitFull(PTAA *ptaa, PTA *pta) { l_int32 n, i; PTA *ptat; PROCNAME("ptaaInitFull"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaa->nalloc; ptaa->n = n; for (i = 0; i < n; i++) { ptat = ptaCopy(pta); ptaaReplacePta(ptaa, i, ptat); } return 0; } /*! * \brief ptaaReplacePta() * * \param[in] ptaa * \param[in] index to the index-th pta * \param[in] pta insert and replace any existing one * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Any existing pta is destroyed, and the input one * is inserted in its place. * (2) If the index is invalid, return 1 (error) * </pre> */ l_int32 ptaaReplacePta(PTAA *ptaa, l_int32 index, PTA *pta) { l_int32 n; PROCNAME("ptaaReplacePta"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (!pta) return ERROR_INT("pta not defined", procName, 1); n = ptaaGetCount(ptaa); if (index < 0 || index >= n) return ERROR_INT("index not valid", procName, 1); ptaDestroy(&ptaa->pta[index]); ptaa->pta[index] = pta; return 0; } /*! * \brief ptaaAddPt() * * \param[in] ptaa * \param[in] ipta to the i-th pta * \param[in] x,y point coordinates * \return 0 if OK; 1 on error */ l_int32 ptaaAddPt(PTAA *ptaa, l_int32 ipta, l_float32 x, l_float32 y) { PTA *pta; PROCNAME("ptaaAddPt"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if (ipta < 0 || ipta >= ptaa->n) return ERROR_INT("index ipta not valid", procName, 1); pta = ptaaGetPta(ptaa, ipta, L_CLONE); ptaAddPt(pta, x, y); ptaDestroy(&pta); return 0; } /*! * \brief ptaaTruncate() * * \param[in] ptaa * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) This identifies the largest index containing a pta that * has any points within it, destroys all pta above that index, * and resets the count. * </pre> */ l_int32 ptaaTruncate(PTAA *ptaa) { l_int32 i, n, np; PTA *pta; PROCNAME("ptaaTruncate"); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); n = ptaaGetCount(ptaa); for (i = n - 1; i >= 0; i--) { pta = ptaaGetPta(ptaa, i, L_CLONE); if (!pta) { ptaa->n--; continue; } np = ptaGetCount(pta); ptaDestroy(&pta); if (np == 0) { ptaDestroy(&ptaa->pta[i]); ptaa->n--; } else { break; } } return 0; } /*---------------------------------------------------------------------* * Ptaa serialized for I/O * *---------------------------------------------------------------------*/ /*! * \brief ptaaRead() * * \param[in] filename * \return ptaa, or NULL on error */ PTAA * ptaaRead(const char *filename) { FILE *fp; PTAA *ptaa; PROCNAME("ptaaRead"); if (!filename) return (PTAA *)ERROR_PTR("filename not defined", procName, NULL); if ((fp = fopenReadStream(filename)) == NULL) return (PTAA *)ERROR_PTR("stream not opened", procName, NULL); ptaa = ptaaReadStream(fp); fclose(fp); if (!ptaa) return (PTAA *)ERROR_PTR("ptaa not read", procName, NULL); return ptaa; } /*! * \brief ptaaReadStream() * * \param[in] fp file stream * \return ptaa, or NULL on error */ PTAA * ptaaReadStream(FILE *fp) { l_int32 i, n, version; PTA *pta; PTAA *ptaa; PROCNAME("ptaaReadStream"); if (!fp) return (PTAA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\nPtaa Version %d\n", &version) != 1) return (PTAA *)ERROR_PTR("not a ptaa file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTAA *)ERROR_PTR("invalid ptaa version", procName, NULL); if (fscanf(fp, "Number of Pta = %d\n", &n) != 1) return (PTAA *)ERROR_PTR("not a ptaa file", procName, NULL); if ((ptaa = ptaaCreate(n)) == NULL) return (PTAA *)ERROR_PTR("ptaa not made", procName, NULL); for (i = 0; i < n; i++) { if ((pta = ptaReadStream(fp)) == NULL) { ptaaDestroy(&ptaa); return (PTAA *)ERROR_PTR("error reading pta", procName, NULL); } ptaaAddPta(ptaa, pta, L_INSERT); } return ptaa; } /*! * \brief ptaaReadMem() * * \param[in] data serialization in ascii * \param[in] size of data in bytes; can use strlen to get it * \return ptaa, or NULL on error */ PTAA * ptaaReadMem(const l_uint8 *data, size_t size) { FILE *fp; PTAA *ptaa; PROCNAME("ptaaReadMem"); if (!data) return (PTAA *)ERROR_PTR("data not defined", procName, NULL); if ((fp = fopenReadFromMemory(data, size)) == NULL) return (PTAA *)ERROR_PTR("stream not opened", procName, NULL); ptaa = ptaaReadStream(fp); fclose(fp); if (!ptaa) L_ERROR("ptaa not read\n", procName); return ptaa; } /*! * \brief ptaaWrite() * * \param[in] filename * \param[in] ptaa * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error */ l_int32 ptaaWrite(const char *filename, PTAA *ptaa, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaaWrite"); if (!filename) return ERROR_INT("filename not defined", procName, 1); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); if ((fp = fopenWriteStream(filename, "w")) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaaWriteStream(fp, ptaa, type); fclose(fp); if (ret) return ERROR_INT("ptaa not written to stream", procName, 1); return 0; } /*! * \brief ptaaWriteStream() * * \param[in] fp file stream * \param[in] ptaa * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK; 1 on error */ l_int32 ptaaWriteStream(FILE *fp, PTAA *ptaa, l_int32 type) { l_int32 i, n; PTA *pta; PROCNAME("ptaaWriteStream"); if (!fp) return ERROR_INT("stream not defined", procName, 1); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); n = ptaaGetCount(ptaa); fprintf(fp, "\nPtaa Version %d\n", PTA_VERSION_NUMBER); fprintf(fp, "Number of Pta = %d\n", n); for (i = 0; i < n; i++) { pta = ptaaGetPta(ptaa, i, L_CLONE); ptaWriteStream(fp, pta, type); ptaDestroy(&pta); } return 0; } /*! * \brief ptaaWriteMem() * * \param[out] pdata data of serialized ptaa; ascii * \param[out] psize size of returned data * \param[in] ptaa * \param[in] type 0 for float values; 1 for integer values * \return 0 if OK, 1 on error * * <pre> * Notes: * (1) Serializes a ptaa in memory and puts the result in a buffer. * </pre> */ l_int32 ptaaWriteMem(l_uint8 **pdata, size_t *psize, PTAA *ptaa, l_int32 type) { l_int32 ret; FILE *fp; PROCNAME("ptaaWriteMem"); if (pdata) *pdata = NULL; if (psize) *psize = 0; if (!pdata) return ERROR_INT("&data not defined", procName, 1); if (!psize) return ERROR_INT("&size not defined", procName, 1); if (!ptaa) return ERROR_INT("ptaa not defined", procName, 1); #if HAVE_FMEMOPEN if ((fp = open_memstream((char **)pdata, psize)) == NULL) return ERROR_INT("stream not opened", procName, 1); ret = ptaaWriteStream(fp, ptaa, type); #else L_INFO("work-around: writing to a temp file\n", procName); #ifdef _WIN32 if ((fp = fopenWriteWinTempfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #else if ((fp = tmpfile()) == NULL) return ERROR_INT("tmpfile stream not opened", procName, 1); #endif /* _WIN32 */ ret = ptaaWriteStream(fp, ptaa, type); rewind(fp); *pdata = l_binaryReadStream(fp, psize); #endif /* HAVE_FMEMOPEN */ fclose(fp); return ret; }
ptaReadStream(FILE *fp) { char typestr[128]; l_int32 i, n, ix, iy, type, version; l_float32 x, y; PTA *pta; PROCNAME("ptaReadStream"); if (!fp) return (PTA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\n Pta Version %d\n", &version) != 1) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTA *)ERROR_PTR("invalid pta version", procName, NULL); if (fscanf(fp, " Number of pts = %d; format = %s\n", &n, typestr) != 2) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (!strcmp(typestr, "float")) type = 0; else /* typestr is "integer" */ type = 1; if ((pta = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ if (fscanf(fp, " (%f, %f)\n", &x, &y) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading floats", procName, NULL); } ptaAddPt(pta, x, y); } else { /* data is integer */ if (fscanf(fp, " (%d, %d)\n", &ix, &iy) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading ints", procName, NULL); } ptaAddPt(pta, ix, iy); } } return pta; }
ptaReadStream(FILE *fp) { char typestr[128]; /* hardcoded below in fscanf */ l_int32 i, n, ix, iy, type, version; l_float32 x, y; PTA *pta; PROCNAME("ptaReadStream"); if (!fp) return (PTA *)ERROR_PTR("stream not defined", procName, NULL); if (fscanf(fp, "\n Pta Version %d\n", &version) != 1) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (version != PTA_VERSION_NUMBER) return (PTA *)ERROR_PTR("invalid pta version", procName, NULL); if (fscanf(fp, " Number of pts = %d; format = %127s\n", &n, typestr) != 2) return (PTA *)ERROR_PTR("not a pta file", procName, NULL); if (!strcmp(typestr, "float")) type = 0; else /* typestr is "integer" */ type = 1; if ((pta = ptaCreate(n)) == NULL) return (PTA *)ERROR_PTR("pta not made", procName, NULL); for (i = 0; i < n; i++) { if (type == 0) { /* data is float */ if (fscanf(fp, " (%f, %f)\n", &x, &y) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading floats", procName, NULL); } ptaAddPt(pta, x, y); } else { /* data is integer */ if (fscanf(fp, " (%d, %d)\n", &ix, &iy) != 2) { ptaDestroy(&pta); return (PTA *)ERROR_PTR("error reading ints", procName, NULL); } ptaAddPt(pta, ix, iy); } } return pta; }
{'added': [(692, 'char typestr[128]; /* hardcoded below in fscanf */'), (706, ' if (fscanf(fp, " Number of pts = %d; format = %127s\\n", &n, typestr) != 2)')], 'deleted': [(692, 'char typestr[128];'), (706, ' if (fscanf(fp, " Number of pts = %d; format = %s\\n", &n, typestr) != 2)')]}
2
2
806
5,172
38
309
11
https://github.com/DanBloomberg/leptonica
CVE-2018-7186
CWE-787
2,801
hashtable_lookup.cc
C++
tflite::ops::builtin::Prepare
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Op that looks up items from hashtable. // // Input: // Tensor[0]: Hash key to lookup, dim.size == 1, int32 // Tensor[1]: Key of hashtable, dim.size == 1, int32 // *MUST* be sorted in ascending order. // Tensor[2]: Value of hashtable, dim.size >= 1 // Tensor[1].Dim[0] == Tensor[2].Dim[0] // // Output: // Output[0].dim[0] == Tensor[0].dim[0], num of lookups // Each item in output is a raw bytes copy of corresponding item in input. // When key does not exist in hashtable, the returned bytes are all 0s. // // Output[1].dim = { Tensor[0].dim[0] }, num of lookups // Each item indicates whether the corresponding lookup has a returned value. // 0 for missing key, 1 for found key. #include <stdint.h> #include <cstdlib> #include <cstring> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace { int greater(const void* a, const void* b) { return *static_cast<const int*>(a) - *static_cast<const int*>(b); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* key = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); const TfLiteTensor* value = GetInput(context, node, 2); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); if (value->type == kTfLiteString) { TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1); } TfLiteTensor* hits = GetOutput(context, node, 1); TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8); TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1); hitSize->data[0] = SizeOfDimension(lookup, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, value->type, output->type); TfLiteStatus status = kTfLiteOk; if (output->type != kTfLiteString) { TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); for (int i = 1; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } status = context->ResizeTensor(context, output, outputSize); } if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) { status = kTfLiteError; } return status; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, 0); TfLiteTensor* hits = GetOutput(context, node, 1); const TfLiteTensor* lookup = GetInput(context, node, 0); const TfLiteTensor* key = GetInput(context, node, 1); const TfLiteTensor* value = GetInput(context, node, 2); const int num_rows = SizeOfDimension(value, 0); const int row_bytes = value->bytes / num_rows; void* pointer = nullptr; DynamicBuffer buf; for (int i = 0; i < SizeOfDimension(lookup, 0); i++) { int idx = -1; pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows, sizeof(int32_t), greater); if (pointer != nullptr) { idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) / sizeof(int32_t); } if (idx >= num_rows || idx < 0) { if (output->type == kTfLiteString) { buf.AddString(nullptr, 0); } else { memset(output->data.raw + i * row_bytes, 0, row_bytes); } hits->data.uint8[i] = 0; } else { if (output->type == kTfLiteString) { buf.AddString(GetString(value, idx)); } else { memcpy(output->data.raw + i * row_bytes, value->data.raw + idx * row_bytes, row_bytes); } hits->data.uint8[i] = 1; } } if (output->type == kTfLiteString) { buf.WriteToTensorAsVector(output); } return kTfLiteOk; } } // namespace TfLiteRegistration* Register_HASHTABLE_LOOKUP() { static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // Op that looks up items from hashtable. // // Input: // Tensor[0]: Hash key to lookup, dim.size == 1, int32 // Tensor[1]: Key of hashtable, dim.size == 1, int32 // *MUST* be sorted in ascending order. // Tensor[2]: Value of hashtable, dim.size >= 1 // Tensor[1].Dim[0] == Tensor[2].Dim[0] // // Output: // Output[0].dim[0] == Tensor[0].dim[0], num of lookups // Each item in output is a raw bytes copy of corresponding item in input. // When key does not exist in hashtable, the returned bytes are all 0s. // // Output[1].dim = { Tensor[0].dim[0] }, num of lookups // Each item indicates whether the corresponding lookup has a returned value. // 0 for missing key, 1 for found key. #include <stdint.h> #include <cstdlib> #include <cstring> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace { int greater(const void* a, const void* b) { return *static_cast<const int*>(a) - *static_cast<const int*>(b); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* lookup; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup)); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* key; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key)); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); const TfLiteTensor* value; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value)); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); if (value->type == kTfLiteString) { TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1); } TfLiteTensor* hits; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits)); TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8); TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1); hitSize->data[0] = SizeOfDimension(lookup, 0); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_EQ(context, value->type, output->type); TfLiteStatus status = kTfLiteOk; if (output->type != kTfLiteString) { TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); for (int i = 1; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } status = context->ResizeTensor(context, output, outputSize); } if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) { status = kTfLiteError; } return status; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TfLiteTensor* hits; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits)); const TfLiteTensor* lookup; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup)); const TfLiteTensor* key; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key)); const TfLiteTensor* value; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value)); const int num_rows = SizeOfDimension(value, 0); const int row_bytes = value->bytes / num_rows; void* pointer = nullptr; DynamicBuffer buf; for (int i = 0; i < SizeOfDimension(lookup, 0); i++) { int idx = -1; pointer = bsearch(&(lookup->data.i32[i]), key->data.i32, num_rows, sizeof(int32_t), greater); if (pointer != nullptr) { idx = (reinterpret_cast<char*>(pointer) - (key->data.raw)) / sizeof(int32_t); } if (idx >= num_rows || idx < 0) { if (output->type == kTfLiteString) { buf.AddString(nullptr, 0); } else { memset(output->data.raw + i * row_bytes, 0, row_bytes); } hits->data.uint8[i] = 0; } else { if (output->type == kTfLiteString) { buf.AddString(GetString(value, idx)); } else { memcpy(output->data.raw + i * row_bytes, value->data.raw + idx * row_bytes, row_bytes); } hits->data.uint8[i] = 1; } } if (output->type == kTfLiteString) { buf.WriteToTensorAsVector(output); } return kTfLiteOk; } } // namespace TfLiteRegistration* Register_HASHTABLE_LOOKUP() { static TfLiteRegistration r = {nullptr, nullptr, Prepare, Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* lookup = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* key = GetInput(context, node, 1); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); const TfLiteTensor* value = GetInput(context, node, 2); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); if (value->type == kTfLiteString) { TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1); } TfLiteTensor* hits = GetOutput(context, node, 1); TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8); TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1); hitSize->data[0] = SizeOfDimension(lookup, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, value->type, output->type); TfLiteStatus status = kTfLiteOk; if (output->type != kTfLiteString) { TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); for (int i = 1; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } status = context->ResizeTensor(context, output, outputSize); } if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) { status = kTfLiteError; } return status; }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 2); const TfLiteTensor* lookup; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup)); TF_LITE_ENSURE_EQ(context, NumDimensions(lookup), 1); TF_LITE_ENSURE_EQ(context, lookup->type, kTfLiteInt32); const TfLiteTensor* key; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key)); TF_LITE_ENSURE_EQ(context, NumDimensions(key), 1); TF_LITE_ENSURE_EQ(context, key->type, kTfLiteInt32); const TfLiteTensor* value; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value)); TF_LITE_ENSURE(context, NumDimensions(value) >= 1); TF_LITE_ENSURE_EQ(context, SizeOfDimension(key, 0), SizeOfDimension(value, 0)); if (value->type == kTfLiteString) { TF_LITE_ENSURE_EQ(context, NumDimensions(value), 1); } TfLiteTensor* hits; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits)); TF_LITE_ENSURE_EQ(context, hits->type, kTfLiteUInt8); TfLiteIntArray* hitSize = TfLiteIntArrayCreate(1); hitSize->data[0] = SizeOfDimension(lookup, 0); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_EQ(context, value->type, output->type); TfLiteStatus status = kTfLiteOk; if (output->type != kTfLiteString) { TfLiteIntArray* outputSize = TfLiteIntArrayCreate(NumDimensions(value)); outputSize->data[0] = SizeOfDimension(lookup, 0); for (int i = 1; i < NumDimensions(value); i++) { outputSize->data[i] = SizeOfDimension(value, i); } status = context->ResizeTensor(context, output, outputSize); } if (context->ResizeTensor(context, hits, hitSize) != kTfLiteOk) { status = kTfLiteError; } return status; }
{'added': [(40, '#include "tensorflow/lite/kernels/internal/compatibility.h"'), (58, ' const TfLiteTensor* lookup;'), (59, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));'), (63, ' const TfLiteTensor* key;'), (64, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));'), (68, ' const TfLiteTensor* value;'), (69, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));'), (77, ' TfLiteTensor* hits;'), (78, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));'), (83, ' TfLiteTensor* output;'), (84, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (103, ' TfLiteTensor* output;'), (104, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (105, ' TfLiteTensor* hits;'), (106, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 1, &hits));'), (107, ' const TfLiteTensor* lookup;'), (108, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &lookup));'), (109, ' const TfLiteTensor* key;'), (110, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &key));'), (111, ' const TfLiteTensor* value;'), (112, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &value));')], 'deleted': [(57, ' const TfLiteTensor* lookup = GetInput(context, node, 0);'), (61, ' const TfLiteTensor* key = GetInput(context, node, 1);'), (65, ' const TfLiteTensor* value = GetInput(context, node, 2);'), (73, ' TfLiteTensor* hits = GetOutput(context, node, 1);'), (78, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (97, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (98, ' TfLiteTensor* hits = GetOutput(context, node, 1);'), (99, ' const TfLiteTensor* lookup = GetInput(context, node, 0);'), (100, ' const TfLiteTensor* key = GetInput(context, node, 1);'), (101, ' const TfLiteTensor* value = GetInput(context, node, 2);')]}
21
10
108
884
36
361
5
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
2,420
activations.cc
C++
tflite::ops::builtin::activations::GenericPrepare
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input = GetInput(context, node, 0); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
{'added': [(255, ' const TfLiteTensor* input;'), (256, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (257, ' TfLiteTensor* output;'), (258, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (277, ' const TfLiteTensor* input;'), (278, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (279, ' TfLiteTensor* output;'), (280, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (307, ' TfLiteTensor* output;'), (308, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (313, ' const TfLiteTensor* input;'), (314, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (346, ' const TfLiteTensor* input;'), (347, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (348, ' TfLiteTensor* output;'), (349, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (377, ' const TfLiteTensor* input;'), (378, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (379, ' TfLiteTensor* output;'), (380, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (464, ' const TfLiteTensor* input;'), (465, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (466, ' TfLiteTensor* output;'), (467, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (561, ' const TfLiteTensor* input;'), (562, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (563, ' TfLiteTensor* output;'), (564, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (631, ' const TfLiteTensor* input;'), (632, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (633, ' TfLiteTensor* output;'), (634, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (669, ' const TfLiteTensor* input;'), (670, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (671, ' TfLiteTensor* output;'), (672, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (673, ' const TfLiteTensor* alpha;'), (674, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (726, ' const TfLiteTensor* input;'), (727, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (728, ' TfLiteTensor* output;'), (729, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (756, ' const TfLiteTensor* input;'), (757, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (758, ' TfLiteTensor* output;'), (759, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (789, ' const TfLiteTensor* input;'), (790, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (791, ' TfLiteTensor* output;'), (792, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (842, ' const TfLiteTensor* input;'), (843, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (844, ' TfLiteTensor* output;'), (845, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (875, ' const TfLiteTensor* input;'), (876, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (877, ' TfLiteTensor* output;'), (878, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (951, ' const TfLiteTensor* input;'), (952, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (953, ' TfLiteTensor* output;'), (954, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1101, ' const TfLiteTensor* input;'), (1102, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1103, ' TfLiteTensor* output;'), (1104, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1158, ' const TfLiteTensor* input;'), (1159, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1160, ' TfLiteTensor* output;'), (1161, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1221, ' const TfLiteTensor* input;'), (1222, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1223, ' const TfLiteTensor* alpha;'), (1224, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (1225, ' TfLiteTensor* output;'), (1226, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1335, ' const TfLiteTensor* input;'), (1336, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1337, ' TfLiteTensor* output;'), (1338, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1375, ' const TfLiteTensor* input;'), (1376, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1377, ' TfLiteTensor* output;'), (1378, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1391, ' const TfLiteTensor* input;'), (1392, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1393, ' TfLiteTensor* output;'), (1394, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));')], 'deleted': [(255, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (256, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (275, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (276, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (303, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (308, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (340, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (341, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (369, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (370, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (454, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (455, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (549, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (550, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (617, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (618, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (653, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (654, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (655, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (707, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (708, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (735, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (736, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (766, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (767, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (817, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (818, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (848, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (849, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (922, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (923, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1070, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1071, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1125, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1126, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1186, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1187, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (1188, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1297, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1298, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1335, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1336, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1349, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1350, ' TfLiteTensor* output = GetOutput(context, node, 0);')]}
88
44
1,316
9,729
9
93
1
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
869
print-wb.c
C
wb_prep
/* * Copyright (c) 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <tcpdump-stdinc.h> #include "interface.h" #include "addrtoname.h" #include "extract.h" static const char tstr[] = "[|wb]"; /* XXX need to add byte-swapping macros! */ /* XXX - you mean like the ones in "extract.h"? */ /* * Largest packet size. Everything should fit within this space. * For instance, multiline objects are sent piecewise. */ #define MAXFRAMESIZE 1024 /* * Multiple drawing ops can be sent in one packet. Each one starts on a * an even multiple of DOP_ALIGN bytes, which must be a power of two. */ #define DOP_ALIGN 4 #define DOP_ROUNDUP(x) ((((int)(x)) + (DOP_ALIGN - 1)) & ~(DOP_ALIGN - 1)) #define DOP_NEXT(d)\ ((struct dophdr *)((u_char *)(d) + \ DOP_ROUNDUP(EXTRACT_16BITS(&(d)->dh_len) + sizeof(*(d))))) /* * Format of the whiteboard packet header. * The transport level header. */ struct pkt_hdr { uint32_t ph_src; /* site id of source */ uint32_t ph_ts; /* time stamp (for skew computation) */ uint16_t ph_version; /* version number */ u_char ph_type; /* message type */ u_char ph_flags; /* message flags */ }; /* Packet types */ #define PT_DRAWOP 0 /* drawing operation */ #define PT_ID 1 /* announcement packet */ #define PT_RREQ 2 /* repair request */ #define PT_RREP 3 /* repair reply */ #define PT_KILL 4 /* terminate participation */ #define PT_PREQ 5 /* page vector request */ #define PT_PREP 7 /* page vector reply */ #ifdef PF_USER #undef PF_USER /* {Digital,Tru64} UNIX define this, alas */ #endif /* flags */ #define PF_USER 0x01 /* hint that packet has interactive data */ #define PF_VIS 0x02 /* only visible ops wanted */ struct PageID { uint32_t p_sid; /* session id of initiator */ uint32_t p_uid; /* page number */ }; struct dophdr { uint32_t dh_ts; /* sender's timestamp */ uint16_t dh_len; /* body length */ u_char dh_flags; u_char dh_type; /* body type */ /* body follows */ }; /* * Drawing op sub-types. */ #define DT_RECT 2 #define DT_LINE 3 #define DT_ML 4 #define DT_DEL 5 #define DT_XFORM 6 #define DT_ELL 7 #define DT_CHAR 8 #define DT_STR 9 #define DT_NOP 10 #define DT_PSCODE 11 #define DT_PSCOMP 12 #define DT_REF 13 #define DT_SKIP 14 #define DT_HOLE 15 #define DT_MAXTYPE 15 /* * A drawing operation. */ struct pkt_dop { struct PageID pd_page; /* page that operations apply to */ uint32_t pd_sseq; /* start sequence number */ uint32_t pd_eseq; /* end sequence number */ /* drawing ops follow */ }; /* * A repair request. */ struct pkt_rreq { uint32_t pr_id; /* source id of drawops to be repaired */ struct PageID pr_page; /* page of drawops */ uint32_t pr_sseq; /* start seqno */ uint32_t pr_eseq; /* end seqno */ }; /* * A repair reply. */ struct pkt_rrep { uint32_t pr_id; /* original site id of ops */ struct pkt_dop pr_dop; /* drawing ops follow */ }; struct id_off { uint32_t id; uint32_t off; }; struct pgstate { uint32_t slot; struct PageID page; uint16_t nid; uint16_t rsvd; /* seqptr's */ }; /* * An announcement packet. */ struct pkt_id { uint32_t pi_mslot; struct PageID pi_mpage; /* current page */ struct pgstate pi_ps; /* seqptr's */ /* null-terminated site name */ }; struct pkt_preq { struct PageID pp_page; uint32_t pp_low; uint32_t pp_high; }; struct pkt_prep { uint32_t pp_n; /* size of pageid array */ /* pgstate's follow */ }; static int wb_id(netdissect_options *ndo, const struct pkt_id *id, u_int len) { int i; const char *cp; const struct id_off *io; char c; int nid; ND_PRINT((ndo, " wb-id:")); if (len < sizeof(*id) || !ND_TTEST(*id)) return (-1); len -= sizeof(*id); ND_PRINT((ndo, " %u/%s:%u (max %u/%s:%u) ", EXTRACT_32BITS(&id->pi_ps.slot), ipaddr_string(ndo, &id->pi_ps.page.p_sid), EXTRACT_32BITS(&id->pi_ps.page.p_uid), EXTRACT_32BITS(&id->pi_mslot), ipaddr_string(ndo, &id->pi_mpage.p_sid), EXTRACT_32BITS(&id->pi_mpage.p_uid))); nid = EXTRACT_16BITS(&id->pi_ps.nid); len -= sizeof(*io) * nid; io = (struct id_off *)(id + 1); cp = (char *)(io + nid); if (!ND_TTEST2(cp, len)) { ND_PRINT((ndo, "\"")); fn_print(ndo, (u_char *)cp, (u_char *)cp + len); ND_PRINT((ndo, "\"")); } c = '<'; for (i = 0; i < nid && ND_TTEST(*io); ++io, ++i) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } if (i >= nid) { ND_PRINT((ndo, ">")); return (0); } return (-1); } static int wb_rreq(netdissect_options *ndo, const struct pkt_rreq *rreq, u_int len) { ND_PRINT((ndo, " wb-rreq:")); if (len < sizeof(*rreq) || !ND_TTEST(*rreq)) return (-1); ND_PRINT((ndo, " please repair %s %s:%u<%u:%u>", ipaddr_string(ndo, &rreq->pr_id), ipaddr_string(ndo, &rreq->pr_page.p_sid), EXTRACT_32BITS(&rreq->pr_page.p_uid), EXTRACT_32BITS(&rreq->pr_sseq), EXTRACT_32BITS(&rreq->pr_eseq))); return (0); } static int wb_preq(netdissect_options *ndo, const struct pkt_preq *preq, u_int len) { ND_PRINT((ndo, " wb-preq:")); if (len < sizeof(*preq) || !ND_TTEST(*preq)) return (-1); ND_PRINT((ndo, " need %u/%s:%u", EXTRACT_32BITS(&preq->pp_low), ipaddr_string(ndo, &preq->pp_page.p_sid), EXTRACT_32BITS(&preq->pp_page.p_uid))); return (0); } static int wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && !ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && !ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); } static const char *dopstr[] = { "dop-0!", "dop-1!", "RECT", "LINE", "ML", "DEL", "XFORM", "ELL", "CHAR", "STR", "NOP", "PSCODE", "PSCOMP", "REF", "SKIP", "HOLE", }; static int wb_dops(netdissect_options *ndo, const struct pkt_dop *dop, uint32_t ss, uint32_t es) { const struct dophdr *dh = (const struct dophdr *)((const u_char *)dop + sizeof(*dop)); ND_PRINT((ndo, " <")); for ( ; ss <= es; ++ss) { int t; if (!ND_TTEST(*dh)) { ND_PRINT((ndo, "%s", tstr)); break; } t = dh->dh_type; if (t > DT_MAXTYPE) ND_PRINT((ndo, " dop-%d!", t)); else { ND_PRINT((ndo, " %s", dopstr[t])); if (t == DT_SKIP || t == DT_HOLE) { uint32_t ts = EXTRACT_32BITS(&dh->dh_ts); ND_PRINT((ndo, "%d", ts - ss + 1)); if (ss > ts || ts > es) { ND_PRINT((ndo, "[|]")); if (ts < ss) return (0); } ss = ts; } } dh = DOP_NEXT(dh); } ND_PRINT((ndo, " >")); return (0); } static int wb_rrep(netdissect_options *ndo, const struct pkt_rrep *rrep, u_int len) { const struct pkt_dop *dop = &rrep->pr_dop; ND_PRINT((ndo, " wb-rrep:")); if (len < sizeof(*rrep) || !ND_TTEST(*rrep)) return (-1); len -= sizeof(*rrep); ND_PRINT((ndo, " for %s %s:%u<%u:%u>", ipaddr_string(ndo, &rrep->pr_id), ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } static int wb_drawop(netdissect_options *ndo, const struct pkt_dop *dop, u_int len) { ND_PRINT((ndo, " wb-dop:")); if (len < sizeof(*dop) || !ND_TTEST(*dop)) return (-1); len -= sizeof(*dop); ND_PRINT((ndo, " %s:%u<%u:%u>", ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } /* * Print whiteboard multicast packets. */ void wb_print(netdissect_options *ndo, register const void *hdr, register u_int len) { register const struct pkt_hdr *ph; ph = (const struct pkt_hdr *)hdr; if (len < sizeof(*ph) || !ND_TTEST(*ph)) { ND_PRINT((ndo, "%s", tstr)); return; } len -= sizeof(*ph); if (ph->ph_flags) ND_PRINT((ndo, "*")); switch (ph->ph_type) { case PT_KILL: ND_PRINT((ndo, " wb-kill")); return; case PT_ID: if (wb_id(ndo, (struct pkt_id *)(ph + 1), len) >= 0) return; break; case PT_RREQ: if (wb_rreq(ndo, (struct pkt_rreq *)(ph + 1), len) >= 0) return; break; case PT_RREP: if (wb_rrep(ndo, (struct pkt_rrep *)(ph + 1), len) >= 0) return; break; case PT_DRAWOP: if (wb_drawop(ndo, (struct pkt_dop *)(ph + 1), len) >= 0) return; break; case PT_PREQ: if (wb_preq(ndo, (struct pkt_preq *)(ph + 1), len) >= 0) return; break; case PT_PREP: if (wb_prep(ndo, (struct pkt_prep *)(ph + 1), len) >= 0) return; break; default: ND_PRINT((ndo, " wb-%d!", ph->ph_type)); return; } }
/* * Copyright (c) 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <tcpdump-stdinc.h> #include "interface.h" #include "addrtoname.h" #include "extract.h" static const char tstr[] = "[|wb]"; /* XXX need to add byte-swapping macros! */ /* XXX - you mean like the ones in "extract.h"? */ /* * Largest packet size. Everything should fit within this space. * For instance, multiline objects are sent piecewise. */ #define MAXFRAMESIZE 1024 /* * Multiple drawing ops can be sent in one packet. Each one starts on a * an even multiple of DOP_ALIGN bytes, which must be a power of two. */ #define DOP_ALIGN 4 #define DOP_ROUNDUP(x) ((((int)(x)) + (DOP_ALIGN - 1)) & ~(DOP_ALIGN - 1)) #define DOP_NEXT(d)\ ((struct dophdr *)((u_char *)(d) + \ DOP_ROUNDUP(EXTRACT_16BITS(&(d)->dh_len) + sizeof(*(d))))) /* * Format of the whiteboard packet header. * The transport level header. */ struct pkt_hdr { uint32_t ph_src; /* site id of source */ uint32_t ph_ts; /* time stamp (for skew computation) */ uint16_t ph_version; /* version number */ u_char ph_type; /* message type */ u_char ph_flags; /* message flags */ }; /* Packet types */ #define PT_DRAWOP 0 /* drawing operation */ #define PT_ID 1 /* announcement packet */ #define PT_RREQ 2 /* repair request */ #define PT_RREP 3 /* repair reply */ #define PT_KILL 4 /* terminate participation */ #define PT_PREQ 5 /* page vector request */ #define PT_PREP 7 /* page vector reply */ #ifdef PF_USER #undef PF_USER /* {Digital,Tru64} UNIX define this, alas */ #endif /* flags */ #define PF_USER 0x01 /* hint that packet has interactive data */ #define PF_VIS 0x02 /* only visible ops wanted */ struct PageID { uint32_t p_sid; /* session id of initiator */ uint32_t p_uid; /* page number */ }; struct dophdr { uint32_t dh_ts; /* sender's timestamp */ uint16_t dh_len; /* body length */ u_char dh_flags; u_char dh_type; /* body type */ /* body follows */ }; /* * Drawing op sub-types. */ #define DT_RECT 2 #define DT_LINE 3 #define DT_ML 4 #define DT_DEL 5 #define DT_XFORM 6 #define DT_ELL 7 #define DT_CHAR 8 #define DT_STR 9 #define DT_NOP 10 #define DT_PSCODE 11 #define DT_PSCOMP 12 #define DT_REF 13 #define DT_SKIP 14 #define DT_HOLE 15 #define DT_MAXTYPE 15 /* * A drawing operation. */ struct pkt_dop { struct PageID pd_page; /* page that operations apply to */ uint32_t pd_sseq; /* start sequence number */ uint32_t pd_eseq; /* end sequence number */ /* drawing ops follow */ }; /* * A repair request. */ struct pkt_rreq { uint32_t pr_id; /* source id of drawops to be repaired */ struct PageID pr_page; /* page of drawops */ uint32_t pr_sseq; /* start seqno */ uint32_t pr_eseq; /* end seqno */ }; /* * A repair reply. */ struct pkt_rrep { uint32_t pr_id; /* original site id of ops */ struct pkt_dop pr_dop; /* drawing ops follow */ }; struct id_off { uint32_t id; uint32_t off; }; struct pgstate { uint32_t slot; struct PageID page; uint16_t nid; uint16_t rsvd; /* seqptr's */ }; /* * An announcement packet. */ struct pkt_id { uint32_t pi_mslot; struct PageID pi_mpage; /* current page */ struct pgstate pi_ps; /* seqptr's */ /* null-terminated site name */ }; struct pkt_preq { struct PageID pp_page; uint32_t pp_low; uint32_t pp_high; }; struct pkt_prep { uint32_t pp_n; /* size of pageid array */ /* pgstate's follow */ }; static int wb_id(netdissect_options *ndo, const struct pkt_id *id, u_int len) { int i; const char *cp; const struct id_off *io; char c; int nid; ND_PRINT((ndo, " wb-id:")); if (len < sizeof(*id) || !ND_TTEST(*id)) return (-1); len -= sizeof(*id); ND_PRINT((ndo, " %u/%s:%u (max %u/%s:%u) ", EXTRACT_32BITS(&id->pi_ps.slot), ipaddr_string(ndo, &id->pi_ps.page.p_sid), EXTRACT_32BITS(&id->pi_ps.page.p_uid), EXTRACT_32BITS(&id->pi_mslot), ipaddr_string(ndo, &id->pi_mpage.p_sid), EXTRACT_32BITS(&id->pi_mpage.p_uid))); nid = EXTRACT_16BITS(&id->pi_ps.nid); len -= sizeof(*io) * nid; io = (struct id_off *)(id + 1); cp = (char *)(io + nid); if (ND_TTEST2(cp, len)) { ND_PRINT((ndo, "\"")); fn_print(ndo, (u_char *)cp, (u_char *)cp + len); ND_PRINT((ndo, "\"")); } c = '<'; for (i = 0; i < nid && ND_TTEST(*io); ++io, ++i) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } if (i >= nid) { ND_PRINT((ndo, ">")); return (0); } return (-1); } static int wb_rreq(netdissect_options *ndo, const struct pkt_rreq *rreq, u_int len) { ND_PRINT((ndo, " wb-rreq:")); if (len < sizeof(*rreq) || !ND_TTEST(*rreq)) return (-1); ND_PRINT((ndo, " please repair %s %s:%u<%u:%u>", ipaddr_string(ndo, &rreq->pr_id), ipaddr_string(ndo, &rreq->pr_page.p_sid), EXTRACT_32BITS(&rreq->pr_page.p_uid), EXTRACT_32BITS(&rreq->pr_sseq), EXTRACT_32BITS(&rreq->pr_eseq))); return (0); } static int wb_preq(netdissect_options *ndo, const struct pkt_preq *preq, u_int len) { ND_PRINT((ndo, " wb-preq:")); if (len < sizeof(*preq) || !ND_TTEST(*preq)) return (-1); ND_PRINT((ndo, " need %u/%s:%u", EXTRACT_32BITS(&preq->pp_low), ipaddr_string(ndo, &preq->pp_page.p_sid), EXTRACT_32BITS(&preq->pp_page.p_uid))); return (0); } static int wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); } static const char *dopstr[] = { "dop-0!", "dop-1!", "RECT", "LINE", "ML", "DEL", "XFORM", "ELL", "CHAR", "STR", "NOP", "PSCODE", "PSCOMP", "REF", "SKIP", "HOLE", }; static int wb_dops(netdissect_options *ndo, const struct pkt_dop *dop, uint32_t ss, uint32_t es) { const struct dophdr *dh = (const struct dophdr *)((const u_char *)dop + sizeof(*dop)); ND_PRINT((ndo, " <")); for ( ; ss <= es; ++ss) { int t; if (!ND_TTEST(*dh)) { ND_PRINT((ndo, "%s", tstr)); break; } t = dh->dh_type; if (t > DT_MAXTYPE) ND_PRINT((ndo, " dop-%d!", t)); else { ND_PRINT((ndo, " %s", dopstr[t])); if (t == DT_SKIP || t == DT_HOLE) { uint32_t ts = EXTRACT_32BITS(&dh->dh_ts); ND_PRINT((ndo, "%d", ts - ss + 1)); if (ss > ts || ts > es) { ND_PRINT((ndo, "[|]")); if (ts < ss) return (0); } ss = ts; } } dh = DOP_NEXT(dh); } ND_PRINT((ndo, " >")); return (0); } static int wb_rrep(netdissect_options *ndo, const struct pkt_rrep *rrep, u_int len) { const struct pkt_dop *dop = &rrep->pr_dop; ND_PRINT((ndo, " wb-rrep:")); if (len < sizeof(*rrep) || !ND_TTEST(*rrep)) return (-1); len -= sizeof(*rrep); ND_PRINT((ndo, " for %s %s:%u<%u:%u>", ipaddr_string(ndo, &rrep->pr_id), ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } static int wb_drawop(netdissect_options *ndo, const struct pkt_dop *dop, u_int len) { ND_PRINT((ndo, " wb-dop:")); if (len < sizeof(*dop) || !ND_TTEST(*dop)) return (-1); len -= sizeof(*dop); ND_PRINT((ndo, " %s:%u<%u:%u>", ipaddr_string(ndo, &dop->pd_page.p_sid), EXTRACT_32BITS(&dop->pd_page.p_uid), EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); if (ndo->ndo_vflag) return (wb_dops(ndo, dop, EXTRACT_32BITS(&dop->pd_sseq), EXTRACT_32BITS(&dop->pd_eseq))); return (0); } /* * Print whiteboard multicast packets. */ void wb_print(netdissect_options *ndo, register const void *hdr, register u_int len) { register const struct pkt_hdr *ph; ph = (const struct pkt_hdr *)hdr; if (len < sizeof(*ph) || !ND_TTEST(*ph)) { ND_PRINT((ndo, "%s", tstr)); return; } len -= sizeof(*ph); if (ph->ph_flags) ND_PRINT((ndo, "*")); switch (ph->ph_type) { case PT_KILL: ND_PRINT((ndo, " wb-kill")); return; case PT_ID: if (wb_id(ndo, (struct pkt_id *)(ph + 1), len) >= 0) return; break; case PT_RREQ: if (wb_rreq(ndo, (struct pkt_rreq *)(ph + 1), len) >= 0) return; break; case PT_RREP: if (wb_rrep(ndo, (struct pkt_rrep *)(ph + 1), len) >= 0) return; break; case PT_DRAWOP: if (wb_drawop(ndo, (struct pkt_dop *)(ph + 1), len) >= 0) return; break; case PT_PREQ: if (wb_preq(ndo, (struct pkt_preq *)(ph + 1), len) >= 0) return; break; case PT_PREP: if (wb_prep(ndo, (struct pkt_prep *)(ph + 1), len) >= 0) return; break; default: ND_PRINT((ndo, " wb-%d!", ph->ph_type)); return; } }
wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && !ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && !ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); }
wb_prep(netdissect_options *ndo, const struct pkt_prep *prep, u_int len) { int n; const struct pgstate *ps; const u_char *ep = ndo->ndo_snapend; ND_PRINT((ndo, " wb-prep:")); if (len < sizeof(*prep)) { return (-1); } n = EXTRACT_32BITS(&prep->pp_n); ps = (const struct pgstate *)(prep + 1); while (--n >= 0 && ND_TTEST(*ps)) { const struct id_off *io, *ie; char c = '<'; ND_PRINT((ndo, " %u/%s:%u", EXTRACT_32BITS(&ps->slot), ipaddr_string(ndo, &ps->page.p_sid), EXTRACT_32BITS(&ps->page.p_uid))); io = (struct id_off *)(ps + 1); for (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) { ND_PRINT((ndo, "%c%s:%u", c, ipaddr_string(ndo, &io->id), EXTRACT_32BITS(&io->off))); c = ','; } ND_PRINT((ndo, ">")); ps = (struct pgstate *)io; } return ((u_char *)ps <= ep? 0 : -1); }
{'added': [(204, '\tif (ND_TTEST2(cp, len)) {'), (269, '\twhile (--n >= 0 && ND_TTEST(*ps)) {'), (278, '\t\tfor (ie = io + ps->nid; io < ie && ND_TTEST(*io); ++io) {')], 'deleted': [(204, '\tif (!ND_TTEST2(cp, len)) {'), (269, '\twhile (--n >= 0 && !ND_TTEST(*ps)) {'), (278, '\t\tfor (ie = io + ps->nid; io < ie && !ND_TTEST(*io); ++io) {')]}
3
3
298
1,873
30
261
7
https://github.com/the-tcpdump-group/tcpdump
CVE-2015-3138
CWE-20
2,266
array.c
C
mrb_ary_shift_m
/* ** array.c - Array class ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/string.h> #include <mruby/range.h> #include <mruby/proc.h> #include <mruby/presym.h> #include "value_array.h" #define ARY_DEFAULT_LEN 4 #define ARY_SHRINK_RATIO 5 /* must be larger than 2 */ #define ARY_C_MAX_SIZE (SIZE_MAX / sizeof(mrb_value)) #define ARY_MAX_SIZE ((mrb_int)((ARY_C_MAX_SIZE < (size_t)MRB_INT_MAX) ? ARY_C_MAX_SIZE : MRB_INT_MAX-1)) static struct RArray* ary_new_capa(mrb_state *mrb, mrb_int capa) { struct RArray *a; size_t blen; if (capa > ARY_MAX_SIZE) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } blen = capa * sizeof(mrb_value); a = MRB_OBJ_ALLOC(mrb, MRB_TT_ARRAY, mrb->array_class); if (capa <= MRB_ARY_EMBED_LEN_MAX) { ARY_SET_EMBED_LEN(a, 0); } else { a->as.heap.ptr = (mrb_value *)mrb_malloc(mrb, blen); a->as.heap.aux.capa = capa; a->as.heap.len = 0; } return a; } MRB_API mrb_value mrb_ary_new_capa(mrb_state *mrb, mrb_int capa) { struct RArray *a = ary_new_capa(mrb, capa); return mrb_obj_value(a); } MRB_API mrb_value mrb_ary_new(mrb_state *mrb) { return mrb_ary_new_capa(mrb, 0); } /* * To copy array, use this instead of memcpy because of portability * * gcc on ARM may fail optimization of memcpy * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56620 * * gcc on MIPS also fail * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39755 * * memcpy doesn't exist on freestanding environment * * If you optimize for binary size, use memcpy instead of this at your own risk * of above portability issue. * * See also https://togetter.com/li/462898 (Japanese) */ static inline void array_copy(mrb_value *dst, const mrb_value *src, mrb_int size) { mrb_int i; for (i = 0; i < size; i++) { dst[i] = src[i]; } } static struct RArray* ary_new_from_values(mrb_state *mrb, mrb_int size, const mrb_value *vals) { struct RArray *a = ary_new_capa(mrb, size); array_copy(ARY_PTR(a), vals, size); ARY_SET_LEN(a, size); return a; } MRB_API mrb_value mrb_ary_new_from_values(mrb_state *mrb, mrb_int size, const mrb_value *vals) { struct RArray *a = ary_new_from_values(mrb, size, vals); return mrb_obj_value(a); } MRB_API mrb_value mrb_assoc_new(mrb_state *mrb, mrb_value car, mrb_value cdr) { struct RArray *a; a = ary_new_capa(mrb, 2); ARY_PTR(a)[0] = car; ARY_PTR(a)[1] = cdr; ARY_SET_LEN(a, 2); return mrb_obj_value(a); } static void ary_fill_with_nil(mrb_value *ptr, mrb_int size) { mrb_value nil = mrb_nil_value(); while (size--) { *ptr++ = nil; } } static void ary_modify_check(mrb_state *mrb, struct RArray *a) { mrb_check_frozen(mrb, a); } static void ary_modify(mrb_state *mrb, struct RArray *a) { ary_modify_check(mrb, a); if (ARY_SHARED_P(a)) { mrb_shared_array *shared = a->as.heap.aux.shared; if (shared->refcnt == 1 && a->as.heap.ptr == shared->ptr) { a->as.heap.ptr = shared->ptr; a->as.heap.aux.capa = a->as.heap.len; mrb_free(mrb, shared); } else { mrb_value *ptr, *p; mrb_int len; p = a->as.heap.ptr; len = a->as.heap.len * sizeof(mrb_value); ptr = (mrb_value *)mrb_malloc(mrb, len); if (p) { array_copy(ptr, p, a->as.heap.len); } a->as.heap.ptr = ptr; a->as.heap.aux.capa = a->as.heap.len; mrb_ary_decref(mrb, shared); } ARY_UNSET_SHARED_FLAG(a); } } MRB_API void mrb_ary_modify(mrb_state *mrb, struct RArray* a) { mrb_write_barrier(mrb, (struct RBasic*)a); ary_modify(mrb, a); } static void ary_make_shared(mrb_state *mrb, struct RArray *a) { if (!ARY_SHARED_P(a) && !ARY_EMBED_P(a)) { mrb_shared_array *shared = (mrb_shared_array *)mrb_malloc(mrb, sizeof(mrb_shared_array)); mrb_value *ptr = a->as.heap.ptr; mrb_int len = a->as.heap.len; shared->refcnt = 1; if (a->as.heap.aux.capa > len) { a->as.heap.ptr = shared->ptr = (mrb_value *)mrb_realloc(mrb, ptr, sizeof(mrb_value)*len+1); } else { shared->ptr = ptr; } shared->len = len; a->as.heap.aux.shared = shared; ARY_SET_SHARED_FLAG(a); } } static void ary_expand_capa(mrb_state *mrb, struct RArray *a, mrb_int len) { mrb_int capa = ARY_CAPA(a); if (len > ARY_MAX_SIZE || len < 0) { size_error: mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } if (capa < ARY_DEFAULT_LEN) { capa = ARY_DEFAULT_LEN; } while (capa < len) { if (capa <= ARY_MAX_SIZE / 2) { capa *= 2; } else { capa = len; } } if (capa < len || capa > ARY_MAX_SIZE) { goto size_error; } if (ARY_EMBED_P(a)) { mrb_value *ptr = ARY_EMBED_PTR(a); mrb_int len = ARY_EMBED_LEN(a); mrb_value *expanded_ptr = (mrb_value *)mrb_malloc(mrb, sizeof(mrb_value)*capa); ARY_UNSET_EMBED_FLAG(a); array_copy(expanded_ptr, ptr, len); a->as.heap.len = len; a->as.heap.aux.capa = capa; a->as.heap.ptr = expanded_ptr; } else if (capa > a->as.heap.aux.capa) { mrb_value *expanded_ptr = (mrb_value *)mrb_realloc(mrb, a->as.heap.ptr, sizeof(mrb_value)*capa); a->as.heap.aux.capa = capa; a->as.heap.ptr = expanded_ptr; } } static void ary_shrink_capa(mrb_state *mrb, struct RArray *a) { mrb_int capa; if (ARY_EMBED_P(a)) return; capa = a->as.heap.aux.capa; if (capa < ARY_DEFAULT_LEN * 2) return; if (capa <= a->as.heap.len * ARY_SHRINK_RATIO) return; do { capa /= 2; if (capa < ARY_DEFAULT_LEN) { capa = ARY_DEFAULT_LEN; break; } } while (capa > a->as.heap.len * ARY_SHRINK_RATIO); if (capa > a->as.heap.len && capa < a->as.heap.aux.capa) { a->as.heap.aux.capa = capa; a->as.heap.ptr = (mrb_value *)mrb_realloc(mrb, a->as.heap.ptr, sizeof(mrb_value)*capa); } } MRB_API mrb_value mrb_ary_resize(mrb_state *mrb, mrb_value ary, mrb_int new_len) { mrb_int old_len; struct RArray *a = mrb_ary_ptr(ary); ary_modify(mrb, a); old_len = RARRAY_LEN(ary); if (old_len != new_len) { if (new_len < old_len) { ary_shrink_capa(mrb, a); } else { ary_expand_capa(mrb, a, new_len); ary_fill_with_nil(ARY_PTR(a) + old_len, new_len - old_len); } ARY_SET_LEN(a, new_len); } return ary; } static mrb_value mrb_ary_s_create(mrb_state *mrb, mrb_value klass) { mrb_value ary; const mrb_value *vals; mrb_int len; struct RArray *a; mrb_get_args(mrb, "*!", &vals, &len); ary = mrb_ary_new_from_values(mrb, len, vals); a = mrb_ary_ptr(ary); a->c = mrb_class_ptr(klass); return ary; } static void ary_replace(mrb_state*, struct RArray*, struct RArray*); static void ary_concat(mrb_state *mrb, struct RArray *a, struct RArray *a2) { mrb_int len; if (ARY_LEN(a) == 0) { ary_replace(mrb, a, a2); return; } if (ARY_LEN(a2) > ARY_MAX_SIZE - ARY_LEN(a)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } len = ARY_LEN(a) + ARY_LEN(a2); ary_modify(mrb, a); if (ARY_CAPA(a) < len) { ary_expand_capa(mrb, a, len); } array_copy(ARY_PTR(a)+ARY_LEN(a), ARY_PTR(a2), ARY_LEN(a2)); mrb_write_barrier(mrb, (struct RBasic*)a); ARY_SET_LEN(a, len); } MRB_API void mrb_ary_concat(mrb_state *mrb, mrb_value self, mrb_value other) { struct RArray *a2 = mrb_ary_ptr(other); ary_concat(mrb, mrb_ary_ptr(self), a2); } static mrb_value mrb_ary_concat_m(mrb_state *mrb, mrb_value self) { mrb_value ary; mrb_get_args(mrb, "A", &ary); mrb_ary_concat(mrb, self, ary); return self; } static mrb_value mrb_ary_plus(mrb_state *mrb, mrb_value self) { struct RArray *a1 = mrb_ary_ptr(self); struct RArray *a2; const mrb_value *ptr; mrb_int blen, len1; mrb_get_args(mrb, "a", &ptr, &blen); if (ARY_MAX_SIZE - blen < ARY_LEN(a1)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } len1 = ARY_LEN(a1); a2 = ary_new_capa(mrb, len1 + blen); array_copy(ARY_PTR(a2), ARY_PTR(a1), len1); array_copy(ARY_PTR(a2) + len1, ptr, blen); ARY_SET_LEN(a2, len1+blen); return mrb_obj_value(a2); } #define ARY_REPLACE_SHARED_MIN 20 static void ary_replace(mrb_state *mrb, struct RArray *a, struct RArray *b) { mrb_int len = ARY_LEN(b); ary_modify_check(mrb, a); if (a == b) return; if (ARY_SHARED_P(a)) { mrb_ary_decref(mrb, a->as.heap.aux.shared); a->as.heap.aux.capa = 0; a->as.heap.len = 0; a->as.heap.ptr = NULL; ARY_UNSET_SHARED_FLAG(a); } if (ARY_SHARED_P(b)) { shared_b: if (ARY_EMBED_P(a)) { ARY_UNSET_EMBED_FLAG(a); } else { mrb_free(mrb, a->as.heap.ptr); } a->as.heap.ptr = b->as.heap.ptr; a->as.heap.len = len; a->as.heap.aux.shared = b->as.heap.aux.shared; a->as.heap.aux.shared->refcnt++; ARY_SET_SHARED_FLAG(a); mrb_write_barrier(mrb, (struct RBasic*)a); return; } if (!mrb_frozen_p(b) && len > ARY_REPLACE_SHARED_MIN) { ary_make_shared(mrb, b); goto shared_b; } if (ARY_CAPA(a) < len) ary_expand_capa(mrb, a, len); array_copy(ARY_PTR(a), ARY_PTR(b), len); mrb_write_barrier(mrb, (struct RBasic*)a); ARY_SET_LEN(a, len); } MRB_API void mrb_ary_replace(mrb_state *mrb, mrb_value self, mrb_value other) { struct RArray *a1 = mrb_ary_ptr(self); struct RArray *a2 = mrb_ary_ptr(other); if (a1 != a2) { ary_replace(mrb, a1, a2); } } static mrb_value mrb_ary_replace_m(mrb_state *mrb, mrb_value self) { mrb_value other; mrb_get_args(mrb, "A", &other); mrb_ary_replace(mrb, self, other); return self; } static mrb_value mrb_ary_times(mrb_state *mrb, mrb_value self) { struct RArray *a1 = mrb_ary_ptr(self); struct RArray *a2; mrb_value *ptr; mrb_int times, len1; mrb_get_args(mrb, "i", &times); if (times < 0) { mrb_raise(mrb, E_ARGUMENT_ERROR, "negative argument"); } if (times == 0) return mrb_ary_new(mrb); if (ARY_MAX_SIZE / times < ARY_LEN(a1)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } len1 = ARY_LEN(a1); a2 = ary_new_capa(mrb, len1 * times); ARY_SET_LEN(a2, len1 * times); ptr = ARY_PTR(a2); while (times--) { array_copy(ptr, ARY_PTR(a1), len1); ptr += len1; } return mrb_obj_value(a2); } static mrb_value mrb_ary_reverse_bang(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); if (len > 1) { mrb_value *p1, *p2; ary_modify(mrb, a); p1 = ARY_PTR(a); p2 = p1 + len - 1; while (p1 < p2) { mrb_value tmp = *p1; *p1++ = *p2; *p2-- = tmp; } } return self; } static mrb_value mrb_ary_reverse(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self), *b = ary_new_capa(mrb, ARY_LEN(a)); mrb_int len = ARY_LEN(a); if (len > 0) { mrb_value *p1, *p2, *e; p1 = ARY_PTR(a); e = p1 + len; p2 = ARY_PTR(b) + len - 1; while (p1 < e) { *p2-- = *p1++; } ARY_SET_LEN(b, len); } return mrb_obj_value(b); } MRB_API void mrb_ary_push(mrb_state *mrb, mrb_value ary, mrb_value elem) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); ary_modify(mrb, a); if (len == ARY_CAPA(a)) ary_expand_capa(mrb, a, len + 1); ARY_PTR(a)[len] = elem; ARY_SET_LEN(a, len+1); mrb_field_write_barrier_value(mrb, (struct RBasic*)a, elem); } static mrb_value mrb_ary_push_m(mrb_state *mrb, mrb_value self) { mrb_int argc; const mrb_value *argv; mrb_int len, len2; struct RArray *a; argc = mrb_get_argc(mrb); argv = mrb_get_argv(mrb); a = mrb_ary_ptr(self); ary_modify(mrb, a); len = ARY_LEN(a); len2 = len + argc; if (ARY_CAPA(a) < len2) { ary_expand_capa(mrb, a, len2); } array_copy(ARY_PTR(a)+len, argv, argc); ARY_SET_LEN(a, len2); while (argc--) { mrb_field_write_barrier_value(mrb, (struct RBasic*)a, *argv); argv++; } return self; } MRB_API mrb_value mrb_ary_pop(mrb_state *mrb, mrb_value ary) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); ary_modify_check(mrb, a); if (len == 0) return mrb_nil_value(); ARY_SET_LEN(a, len-1); return ARY_PTR(a)[len-1]; } #define ARY_SHIFT_SHARED_MIN 10 MRB_API mrb_value mrb_ary_shift(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_value val; ary_modify_check(mrb, a); if (len == 0) return mrb_nil_value(); if (ARY_SHARED_P(a)) { L_SHIFT: val = a->as.heap.ptr[0]; a->as.heap.ptr++; a->as.heap.len--; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len; val = *ptr; while (--size) { *ptr = *(ptr+1); ++ptr; } ARY_SET_LEN(a, len-1); } return val; } static mrb_value mrb_ary_shift_m(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_int n; mrb_value val; if (mrb_get_args(mrb, "|i", &n) == 0) { return mrb_ary_shift(mrb, self); }; ary_modify_check(mrb, a); if (len == 0 || n == 0) return mrb_ary_new(mrb); if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift"); if (n > len) n = len; val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a)); if (ARY_SHARED_P(a)) { L_SHIFT: a->as.heap.ptr+=n; a->as.heap.len-=n; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else if (len == n) { ARY_SET_LEN(a, 0); } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len-n; while (size--) { *ptr = *(ptr+n); ++ptr; } ARY_SET_LEN(a, len-n); } return val; } /* self = [1,2,3] item = 0 self.unshift item p self #=> [0, 1, 2, 3] */ MRB_API mrb_value mrb_ary_unshift(mrb_state *mrb, mrb_value self, mrb_value item) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); if (ARY_SHARED_P(a) && a->as.heap.aux.shared->refcnt == 1 /* shared only referenced from this array */ && a->as.heap.ptr - a->as.heap.aux.shared->ptr >= 1) /* there's room for unshifted item */ { a->as.heap.ptr--; a->as.heap.ptr[0] = item; } else { mrb_value *ptr; ary_modify(mrb, a); if (ARY_CAPA(a) < len + 1) ary_expand_capa(mrb, a, len + 1); ptr = ARY_PTR(a); value_move(ptr + 1, ptr, len); ptr[0] = item; } ARY_SET_LEN(a, len+1); mrb_field_write_barrier_value(mrb, (struct RBasic*)a, item); return self; } static mrb_value mrb_ary_unshift_m(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); const mrb_value *vals; mrb_value *ptr; mrb_int alen, len; mrb_get_args(mrb, "*!", &vals, &alen); if (alen == 0) { ary_modify_check(mrb, a); return self; } len = ARY_LEN(a); if (alen > ARY_MAX_SIZE - len) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } if (ARY_SHARED_P(a) && a->as.heap.aux.shared->refcnt == 1 /* shared only referenced from this array */ && a->as.heap.ptr - a->as.heap.aux.shared->ptr >= alen) /* there's room for unshifted item */ { ary_modify_check(mrb, a); a->as.heap.ptr -= alen; ptr = a->as.heap.ptr; } else { mrb_bool same = vals == ARY_PTR(a); ary_modify(mrb, a); if (ARY_CAPA(a) < len + alen) ary_expand_capa(mrb, a, len + alen); ptr = ARY_PTR(a); value_move(ptr + alen, ptr, len); if (same) vals = ptr; } array_copy(ptr, vals, alen); ARY_SET_LEN(a, len+alen); while (alen--) { mrb_field_write_barrier_value(mrb, (struct RBasic*)a, vals[alen]); } return self; } MRB_API void mrb_ary_set(mrb_state *mrb, mrb_value ary, mrb_int n, mrb_value val) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); ary_modify(mrb, a); /* range check */ if (n < 0) { n += len; if (n < 0) { mrb_raisef(mrb, E_INDEX_ERROR, "index %i out of array", n - len); } } if (len <= n) { if (ARY_CAPA(a) <= n) ary_expand_capa(mrb, a, n + 1); ary_fill_with_nil(ARY_PTR(a) + len, n + 1 - len); ARY_SET_LEN(a, n+1); } ARY_PTR(a)[n] = val; mrb_field_write_barrier_value(mrb, (struct RBasic*)a, val); } static struct RArray* ary_dup(mrb_state *mrb, struct RArray *a) { return ary_new_from_values(mrb, ARY_LEN(a), ARY_PTR(a)); } MRB_API mrb_value mrb_ary_splice(mrb_state *mrb, mrb_value ary, mrb_int head, mrb_int len, mrb_value rpl) { struct RArray *a = mrb_ary_ptr(ary); mrb_int alen = ARY_LEN(a); const mrb_value *argv; mrb_int argc; mrb_int tail; ary_modify(mrb, a); /* len check */ if (len < 0) mrb_raisef(mrb, E_INDEX_ERROR, "negative length (%i)", len); /* range check */ if (head < 0) { head += alen; if (head < 0) goto out_of_range; } if (head > ARY_MAX_SIZE - len) { out_of_range: mrb_raisef(mrb, E_INDEX_ERROR, "index %i is out of array", head); } tail = head + len; if (alen < len || alen < tail) { len = alen - head; tail = head + len; } /* size check */ if (mrb_array_p(rpl)) { argc = RARRAY_LEN(rpl); argv = RARRAY_PTR(rpl); if (argv == ARY_PTR(a)) { struct RArray *r; if (argc > 32767) { mrb_raise(mrb, E_ARGUMENT_ERROR, "too big recursive splice"); } r = ary_dup(mrb, a); argv = ARY_PTR(r); } } else if (mrb_undef_p(rpl)) { argc = 0; argv = NULL; } else { argc = 1; argv = &rpl; } if (head >= alen) { if (head > ARY_MAX_SIZE - argc) goto out_of_range; len = head + argc; if (len > ARY_CAPA(a)) { ary_expand_capa(mrb, a, len); } ary_fill_with_nil(ARY_PTR(a) + alen, head - alen); if (argc > 0) { array_copy(ARY_PTR(a) + head, argv, argc); } ARY_SET_LEN(a, len); } else { mrb_int newlen; if (alen - len > ARY_MAX_SIZE - argc) { head = alen + argc - len; goto out_of_range; } newlen = alen + argc - len; if (newlen > ARY_CAPA(a)) { ary_expand_capa(mrb, a, newlen); } if (len != argc) { mrb_value *ptr = ARY_PTR(a); value_move(ptr + head + argc, ptr + tail, alen - tail); ARY_SET_LEN(a, newlen); } if (argc > 0) { value_move(ARY_PTR(a) + head, argv, argc); } } mrb_write_barrier(mrb, (struct RBasic*)a); return ary; } void mrb_ary_decref(mrb_state *mrb, mrb_shared_array *shared) { shared->refcnt--; if (shared->refcnt == 0) { mrb_free(mrb, shared->ptr); mrb_free(mrb, shared); } } static mrb_value ary_subseq(mrb_state *mrb, struct RArray *a, mrb_int beg, mrb_int len) { struct RArray *b; if (!ARY_SHARED_P(a) && len <= ARY_SHIFT_SHARED_MIN) { return mrb_ary_new_from_values(mrb, len, ARY_PTR(a)+beg); } ary_make_shared(mrb, a); b = MRB_OBJ_ALLOC(mrb, MRB_TT_ARRAY, mrb->array_class); b->as.heap.ptr = a->as.heap.ptr + beg; b->as.heap.len = len; b->as.heap.aux.shared = a->as.heap.aux.shared; b->as.heap.aux.shared->refcnt++; ARY_SET_SHARED_FLAG(b); return mrb_obj_value(b); } mrb_value mrb_ary_subseq(mrb_state *mrb, mrb_value ary, mrb_int beg, mrb_int len) { struct RArray *a = mrb_ary_ptr(ary); return ary_subseq(mrb, a, beg, len); } static mrb_int aget_index(mrb_state *mrb, mrb_value index) { if (mrb_integer_p(index)) { return mrb_integer(index); } #ifndef MRB_NO_FLOAT else if (mrb_float_p(index)) { return (mrb_int)mrb_float(index); } #endif else { mrb_int i, argc; const mrb_value *argv; mrb_get_args(mrb, "i*!", &i, &argv, &argc); return i; } } /* * call-seq: * ary[index] -> obj or nil * ary[start, length] -> new_ary or nil * ary[range] -> new_ary or nil * ary.slice(index) -> obj or nil * ary.slice(start, length) -> new_ary or nil * ary.slice(range) -> new_ary or nil * * Element Reference --- Returns the element at +index+, or returns a * subarray starting at the +start+ index and continuing for +length+ * elements, or returns a subarray specified by +range+ of indices. * * Negative indices count backward from the end of the array (-1 is the last * element). For +start+ and +range+ cases the starting index is just before * an element. Additionally, an empty array is returned when the starting * index for an element range is at the end of the array. * * Returns +nil+ if the index (or starting index) are out of range. * * a = [ "a", "b", "c", "d", "e" ] * a[1] => "b" * a[1,2] => ["b", "c"] * a[1..-2] => ["b", "c", "d"] * */ static mrb_value mrb_ary_aget(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int i; mrb_int len, alen; mrb_value index; if (mrb_get_argc(mrb) == 1) { index = mrb_get_arg1(mrb); switch (mrb_type(index)) { /* a[n..m] */ case MRB_TT_RANGE: if (mrb_range_beg_len(mrb, index, &i, &len, ARY_LEN(a), TRUE) == MRB_RANGE_OK) { return ary_subseq(mrb, a, i, len); } else { return mrb_nil_value(); } case MRB_TT_INTEGER: return mrb_ary_ref(mrb, self, mrb_integer(index)); default: return mrb_ary_ref(mrb, self, aget_index(mrb, index)); } } mrb_get_args(mrb, "oi", &index, &len); i = aget_index(mrb, index); alen = ARY_LEN(a); if (i < 0) i += alen; if (i < 0 || alen < i) return mrb_nil_value(); if (len < 0) return mrb_nil_value(); if (alen == i) return mrb_ary_new(mrb); if (len > alen - i) len = alen - i; return ary_subseq(mrb, a, i, len); } /* * call-seq: * ary[index] = obj -> obj * ary[start, length] = obj or other_ary or nil -> obj or other_ary or nil * ary[range] = obj or other_ary or nil -> obj or other_ary or nil * * Element Assignment --- Sets the element at +index+, or replaces a subarray * from the +start+ index for +length+ elements, or replaces a subarray * specified by the +range+ of indices. * * If indices are greater than the current capacity of the array, the array * grows automatically. Elements are inserted into the array at +start+ if * +length+ is zero. * * Negative indices will count backward from the end of the array. For * +start+ and +range+ cases the starting index is just before an element. * * An IndexError is raised if a negative index points past the beginning of * the array. * * See also Array#push, and Array#unshift. * * a = Array.new * a[4] = "4"; #=> [nil, nil, nil, nil, "4"] * a[0, 3] = [ 'a', 'b', 'c' ] #=> ["a", "b", "c", nil, "4"] * a[1..2] = [ 1, 2 ] #=> ["a", 1, 2, nil, "4"] * a[0, 2] = "?" #=> ["?", 2, nil, "4"] * a[0..2] = "A" #=> ["A", "4"] * a[-1] = "Z" #=> ["A", "Z"] * a[1..-1] = nil #=> ["A", nil] * a[1..-1] = [] #=> ["A"] * a[0, 0] = [ 1, 2 ] #=> [1, 2, "A"] * a[3, 0] = "B" #=> [1, 2, "A", "B"] */ static mrb_value mrb_ary_aset(mrb_state *mrb, mrb_value self) { mrb_value v1, v2, v3; mrb_int i, len; ary_modify(mrb, mrb_ary_ptr(self)); if (mrb_get_argc(mrb) == 2) { const mrb_value *vs = mrb_get_argv(mrb); v1 = vs[0]; v2 = vs[1]; /* a[n..m] = v */ switch (mrb_range_beg_len(mrb, v1, &i, &len, RARRAY_LEN(self), FALSE)) { case MRB_RANGE_TYPE_MISMATCH: mrb_ary_set(mrb, self, aget_index(mrb, v1), v2); break; case MRB_RANGE_OK: mrb_ary_splice(mrb, self, i, len, v2); break; case MRB_RANGE_OUT: mrb_raisef(mrb, E_RANGE_ERROR, "%v out of range", v1); break; } return v2; } mrb_get_args(mrb, "ooo", &v1, &v2, &v3); /* a[n,m] = v */ mrb_ary_splice(mrb, self, aget_index(mrb, v1), aget_index(mrb, v2), v3); return v3; } static mrb_value mrb_ary_delete_at(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int index; mrb_value val; mrb_value *ptr; mrb_int len, alen; mrb_get_args(mrb, "i", &index); alen = ARY_LEN(a); if (index < 0) index += alen; if (index < 0 || alen <= index) return mrb_nil_value(); ary_modify(mrb, a); ptr = ARY_PTR(a); val = ptr[index]; ptr += index; len = alen - index; while (--len) { *ptr = *(ptr+1); ++ptr; } ARY_SET_LEN(a, alen-1); ary_shrink_capa(mrb, a); return val; } static mrb_value mrb_ary_first(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int size, alen; if (mrb_get_argc(mrb) == 0) { return (ARY_LEN(a) > 0)? ARY_PTR(a)[0]: mrb_nil_value(); } mrb_get_args(mrb, "|i", &size); if (size < 0) { mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array size"); } alen = ARY_LEN(a); if (size > alen) size = alen; if (ARY_SHARED_P(a)) { return ary_subseq(mrb, a, 0, size); } return mrb_ary_new_from_values(mrb, size, ARY_PTR(a)); } static mrb_value mrb_ary_last(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int n, size, alen; n = mrb_get_args(mrb, "|i", &size); alen = ARY_LEN(a); if (n == 0) { return (alen > 0) ? ARY_PTR(a)[alen - 1]: mrb_nil_value(); } if (size < 0) { mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array size"); } if (size > alen) size = alen; if (ARY_SHARED_P(a) || size > ARY_DEFAULT_LEN) { return ary_subseq(mrb, a, alen - size, size); } return mrb_ary_new_from_values(mrb, size, ARY_PTR(a) + alen - size); } static mrb_value mrb_ary_index_m(mrb_state *mrb, mrb_value self) { mrb_value obj = mrb_get_arg1(mrb); mrb_int i; for (i = 0; i < RARRAY_LEN(self); i++) { if (mrb_equal(mrb, RARRAY_PTR(self)[i], obj)) { return mrb_int_value(mrb, i); } } return mrb_nil_value(); } static mrb_value mrb_ary_rindex_m(mrb_state *mrb, mrb_value self) { mrb_value obj = mrb_get_arg1(mrb); mrb_int i, len; for (i = RARRAY_LEN(self) - 1; i >= 0; i--) { if (mrb_equal(mrb, RARRAY_PTR(self)[i], obj)) { return mrb_int_value(mrb, i); } if (i > (len = RARRAY_LEN(self))) { i = len; } } return mrb_nil_value(); } MRB_API mrb_value mrb_ary_splat(mrb_state *mrb, mrb_value v) { mrb_value ary; struct RArray *a; if (mrb_array_p(v)) { a = ary_dup(mrb, mrb_ary_ptr(v)); return mrb_obj_value(a); } if (!mrb_respond_to(mrb, v, MRB_SYM(to_a))) { return mrb_ary_new_from_values(mrb, 1, &v); } ary = mrb_funcall_id(mrb, v, MRB_SYM(to_a), 0); if (mrb_nil_p(ary)) { return mrb_ary_new_from_values(mrb, 1, &v); } mrb_ensure_array_type(mrb, ary); a = mrb_ary_ptr(ary); a = ary_dup(mrb, a); return mrb_obj_value(a); } static mrb_value mrb_ary_size(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); return mrb_int_value(mrb, ARY_LEN(a)); } MRB_API mrb_value mrb_ary_clear(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); ary_modify(mrb, a); if (ARY_SHARED_P(a)) { mrb_ary_decref(mrb, a->as.heap.aux.shared); ARY_UNSET_SHARED_FLAG(a); } else if (!ARY_EMBED_P(a)){ mrb_free(mrb, a->as.heap.ptr); } if (MRB_ARY_EMBED_LEN_MAX > 0) { ARY_SET_EMBED_LEN(a, 0); } else { a->as.heap.ptr = NULL; a->as.heap.aux.capa = 0; ARY_SET_LEN(a, 0); } return self; } static mrb_value mrb_ary_clear_m(mrb_state *mrb, mrb_value self) { return mrb_ary_clear(mrb, self); } static mrb_value mrb_ary_empty_p(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); return mrb_bool_value(ARY_LEN(a) == 0); } MRB_API mrb_value mrb_ary_entry(mrb_value ary, mrb_int n) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); /* range check */ if (n < 0) n += len; if (n < 0 || len <= n) return mrb_nil_value(); return ARY_PTR(a)[n]; } static mrb_value join_ary(mrb_state *mrb, mrb_value ary, mrb_value sep, mrb_value list) { mrb_int i; mrb_value result, val, tmp; /* check recursive */ for (i=0; i<RARRAY_LEN(list); i++) { if (mrb_obj_equal(mrb, ary, RARRAY_PTR(list)[i])) { mrb_raise(mrb, E_ARGUMENT_ERROR, "recursive array join"); } } mrb_ary_push(mrb, list, ary); result = mrb_str_new_capa(mrb, 64); for (i=0; i<RARRAY_LEN(ary); i++) { if (i > 0 && !mrb_nil_p(sep)) { mrb_str_cat_str(mrb, result, sep); } val = RARRAY_PTR(ary)[i]; switch (mrb_type(val)) { case MRB_TT_ARRAY: ary_join: val = join_ary(mrb, val, sep, list); /* fall through */ case MRB_TT_STRING: str_join: mrb_str_cat_str(mrb, result, val); break; default: if (!mrb_immediate_p(val)) { tmp = mrb_check_string_type(mrb, val); if (!mrb_nil_p(tmp)) { val = tmp; goto str_join; } tmp = mrb_check_array_type(mrb, val); if (!mrb_nil_p(tmp)) { val = tmp; goto ary_join; } } val = mrb_obj_as_string(mrb, val); goto str_join; } } mrb_ary_pop(mrb, list); return result; } MRB_API mrb_value mrb_ary_join(mrb_state *mrb, mrb_value ary, mrb_value sep) { if (!mrb_nil_p(sep)) { sep = mrb_obj_as_string(mrb, sep); } return join_ary(mrb, ary, sep, mrb_ary_new(mrb)); } /* * call-seq: * ary.join(sep="") -> str * * Returns a string created by converting each element of the array to * a string, separated by <i>sep</i>. * * [ "a", "b", "c" ].join #=> "abc" * [ "a", "b", "c" ].join("-") #=> "a-b-c" */ static mrb_value mrb_ary_join_m(mrb_state *mrb, mrb_value ary) { mrb_value sep = mrb_nil_value(); mrb_get_args(mrb, "|S!", &sep); return mrb_ary_join(mrb, ary, sep); } static mrb_value mrb_ary_eq(mrb_state *mrb, mrb_value ary1) { mrb_value ary2 = mrb_get_arg1(mrb); mrb->c->ci->mid = 0; if (mrb_obj_equal(mrb, ary1, ary2)) return mrb_true_value(); if (!mrb_array_p(ary2)) { return mrb_false_value(); } if (RARRAY_LEN(ary1) != RARRAY_LEN(ary2)) return mrb_false_value(); return ary2; } static mrb_value mrb_ary_cmp(mrb_state *mrb, mrb_value ary1) { mrb_value ary2 = mrb_get_arg1(mrb); mrb->c->ci->mid = 0; if (mrb_obj_equal(mrb, ary1, ary2)) return mrb_fixnum_value(0); if (!mrb_array_p(ary2)) { return mrb_nil_value(); } return ary2; } /* internal method to convert multi-value to single value */ static mrb_value mrb_ary_svalue(mrb_state *mrb, mrb_value ary) { switch (RARRAY_LEN(ary)) { case 0: return mrb_nil_value(); case 1: return RARRAY_PTR(ary)[0]; default: return ary; } } void mrb_init_array(mrb_state *mrb) { struct RClass *a; mrb->array_class = a = mrb_define_class(mrb, "Array", mrb->object_class); /* 15.2.12 */ MRB_SET_INSTANCE_TT(a, MRB_TT_ARRAY); mrb_define_class_method(mrb, a, "[]", mrb_ary_s_create, MRB_ARGS_ANY()); /* 15.2.12.4.1 */ mrb_define_method(mrb, a, "+", mrb_ary_plus, MRB_ARGS_REQ(1)); /* 15.2.12.5.1 */ mrb_define_method(mrb, a, "*", mrb_ary_times, MRB_ARGS_REQ(1)); /* 15.2.12.5.2 */ mrb_define_method(mrb, a, "<<", mrb_ary_push_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.3 */ mrb_define_method(mrb, a, "[]", mrb_ary_aget, MRB_ARGS_ARG(1,1)); /* 15.2.12.5.4 */ mrb_define_method(mrb, a, "[]=", mrb_ary_aset, MRB_ARGS_ARG(2,1)); /* 15.2.12.5.5 */ mrb_define_method(mrb, a, "clear", mrb_ary_clear_m, MRB_ARGS_NONE()); /* 15.2.12.5.6 */ mrb_define_method(mrb, a, "concat", mrb_ary_concat_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.8 */ mrb_define_method(mrb, a, "delete_at", mrb_ary_delete_at, MRB_ARGS_REQ(1)); /* 15.2.12.5.9 */ mrb_define_method(mrb, a, "empty?", mrb_ary_empty_p, MRB_ARGS_NONE()); /* 15.2.12.5.12 */ mrb_define_method(mrb, a, "first", mrb_ary_first, MRB_ARGS_OPT(1)); /* 15.2.12.5.13 */ mrb_define_method(mrb, a, "index", mrb_ary_index_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.14 */ mrb_define_method(mrb, a, "initialize_copy", mrb_ary_replace_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.16 */ mrb_define_method(mrb, a, "join", mrb_ary_join_m, MRB_ARGS_OPT(1)); /* 15.2.12.5.17 */ mrb_define_method(mrb, a, "last", mrb_ary_last, MRB_ARGS_OPT(1)); /* 15.2.12.5.18 */ mrb_define_method(mrb, a, "length", mrb_ary_size, MRB_ARGS_NONE()); /* 15.2.12.5.19 */ mrb_define_method(mrb, a, "pop", mrb_ary_pop, MRB_ARGS_NONE()); /* 15.2.12.5.21 */ mrb_define_method(mrb, a, "push", mrb_ary_push_m, MRB_ARGS_ANY()); /* 15.2.12.5.22 */ mrb_define_method(mrb, a, "replace", mrb_ary_replace_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.23 */ mrb_define_method(mrb, a, "reverse", mrb_ary_reverse, MRB_ARGS_NONE()); /* 15.2.12.5.24 */ mrb_define_method(mrb, a, "reverse!", mrb_ary_reverse_bang, MRB_ARGS_NONE()); /* 15.2.12.5.25 */ mrb_define_method(mrb, a, "rindex", mrb_ary_rindex_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.26 */ mrb_define_method(mrb, a, "shift", mrb_ary_shift_m, MRB_ARGS_OPT(1)); /* 15.2.12.5.27 */ mrb_define_method(mrb, a, "size", mrb_ary_size, MRB_ARGS_NONE()); /* 15.2.12.5.28 */ mrb_define_method(mrb, a, "slice", mrb_ary_aget, MRB_ARGS_ARG(1,1)); /* 15.2.12.5.29 */ mrb_define_method(mrb, a, "unshift", mrb_ary_unshift_m, MRB_ARGS_ANY()); /* 15.2.12.5.30 */ mrb_define_method(mrb, a, "__ary_eq", mrb_ary_eq, MRB_ARGS_REQ(1)); mrb_define_method(mrb, a, "__ary_cmp", mrb_ary_cmp, MRB_ARGS_REQ(1)); mrb_define_method(mrb, a, "__ary_index", mrb_ary_index_m, MRB_ARGS_REQ(1)); /* kept for mruby-array-ext */ mrb_define_method(mrb, a, "__svalue", mrb_ary_svalue, MRB_ARGS_NONE()); }
/* ** array.c - Array class ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/array.h> #include <mruby/class.h> #include <mruby/string.h> #include <mruby/range.h> #include <mruby/proc.h> #include <mruby/presym.h> #include "value_array.h" #define ARY_DEFAULT_LEN 4 #define ARY_SHRINK_RATIO 5 /* must be larger than 2 */ #define ARY_C_MAX_SIZE (SIZE_MAX / sizeof(mrb_value)) #define ARY_MAX_SIZE ((mrb_int)((ARY_C_MAX_SIZE < (size_t)MRB_INT_MAX) ? ARY_C_MAX_SIZE : MRB_INT_MAX-1)) static struct RArray* ary_new_capa(mrb_state *mrb, mrb_int capa) { struct RArray *a; size_t blen; if (capa > ARY_MAX_SIZE) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } blen = capa * sizeof(mrb_value); a = MRB_OBJ_ALLOC(mrb, MRB_TT_ARRAY, mrb->array_class); if (capa <= MRB_ARY_EMBED_LEN_MAX) { ARY_SET_EMBED_LEN(a, 0); } else { a->as.heap.ptr = (mrb_value *)mrb_malloc(mrb, blen); a->as.heap.aux.capa = capa; a->as.heap.len = 0; } return a; } MRB_API mrb_value mrb_ary_new_capa(mrb_state *mrb, mrb_int capa) { struct RArray *a = ary_new_capa(mrb, capa); return mrb_obj_value(a); } MRB_API mrb_value mrb_ary_new(mrb_state *mrb) { return mrb_ary_new_capa(mrb, 0); } /* * To copy array, use this instead of memcpy because of portability * * gcc on ARM may fail optimization of memcpy * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56620 * * gcc on MIPS also fail * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39755 * * memcpy doesn't exist on freestanding environment * * If you optimize for binary size, use memcpy instead of this at your own risk * of above portability issue. * * See also https://togetter.com/li/462898 (Japanese) */ static inline void array_copy(mrb_value *dst, const mrb_value *src, mrb_int size) { mrb_int i; for (i = 0; i < size; i++) { dst[i] = src[i]; } } static struct RArray* ary_new_from_values(mrb_state *mrb, mrb_int size, const mrb_value *vals) { struct RArray *a = ary_new_capa(mrb, size); array_copy(ARY_PTR(a), vals, size); ARY_SET_LEN(a, size); return a; } MRB_API mrb_value mrb_ary_new_from_values(mrb_state *mrb, mrb_int size, const mrb_value *vals) { struct RArray *a = ary_new_from_values(mrb, size, vals); return mrb_obj_value(a); } MRB_API mrb_value mrb_assoc_new(mrb_state *mrb, mrb_value car, mrb_value cdr) { struct RArray *a; a = ary_new_capa(mrb, 2); ARY_PTR(a)[0] = car; ARY_PTR(a)[1] = cdr; ARY_SET_LEN(a, 2); return mrb_obj_value(a); } static void ary_fill_with_nil(mrb_value *ptr, mrb_int size) { mrb_value nil = mrb_nil_value(); while (size--) { *ptr++ = nil; } } static void ary_modify_check(mrb_state *mrb, struct RArray *a) { mrb_check_frozen(mrb, a); } static void ary_modify(mrb_state *mrb, struct RArray *a) { ary_modify_check(mrb, a); if (ARY_SHARED_P(a)) { mrb_shared_array *shared = a->as.heap.aux.shared; if (shared->refcnt == 1 && a->as.heap.ptr == shared->ptr) { a->as.heap.ptr = shared->ptr; a->as.heap.aux.capa = a->as.heap.len; mrb_free(mrb, shared); } else { mrb_value *ptr, *p; mrb_int len; p = a->as.heap.ptr; len = a->as.heap.len * sizeof(mrb_value); ptr = (mrb_value *)mrb_malloc(mrb, len); if (p) { array_copy(ptr, p, a->as.heap.len); } a->as.heap.ptr = ptr; a->as.heap.aux.capa = a->as.heap.len; mrb_ary_decref(mrb, shared); } ARY_UNSET_SHARED_FLAG(a); } } MRB_API void mrb_ary_modify(mrb_state *mrb, struct RArray* a) { mrb_write_barrier(mrb, (struct RBasic*)a); ary_modify(mrb, a); } static void ary_make_shared(mrb_state *mrb, struct RArray *a) { if (!ARY_SHARED_P(a) && !ARY_EMBED_P(a)) { mrb_shared_array *shared = (mrb_shared_array *)mrb_malloc(mrb, sizeof(mrb_shared_array)); mrb_value *ptr = a->as.heap.ptr; mrb_int len = a->as.heap.len; shared->refcnt = 1; if (a->as.heap.aux.capa > len) { a->as.heap.ptr = shared->ptr = (mrb_value *)mrb_realloc(mrb, ptr, sizeof(mrb_value)*len+1); } else { shared->ptr = ptr; } shared->len = len; a->as.heap.aux.shared = shared; ARY_SET_SHARED_FLAG(a); } } static void ary_expand_capa(mrb_state *mrb, struct RArray *a, mrb_int len) { mrb_int capa = ARY_CAPA(a); if (len > ARY_MAX_SIZE || len < 0) { size_error: mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } if (capa < ARY_DEFAULT_LEN) { capa = ARY_DEFAULT_LEN; } while (capa < len) { if (capa <= ARY_MAX_SIZE / 2) { capa *= 2; } else { capa = len; } } if (capa < len || capa > ARY_MAX_SIZE) { goto size_error; } if (ARY_EMBED_P(a)) { mrb_value *ptr = ARY_EMBED_PTR(a); mrb_int len = ARY_EMBED_LEN(a); mrb_value *expanded_ptr = (mrb_value *)mrb_malloc(mrb, sizeof(mrb_value)*capa); ARY_UNSET_EMBED_FLAG(a); array_copy(expanded_ptr, ptr, len); a->as.heap.len = len; a->as.heap.aux.capa = capa; a->as.heap.ptr = expanded_ptr; } else if (capa > a->as.heap.aux.capa) { mrb_value *expanded_ptr = (mrb_value *)mrb_realloc(mrb, a->as.heap.ptr, sizeof(mrb_value)*capa); a->as.heap.aux.capa = capa; a->as.heap.ptr = expanded_ptr; } } static void ary_shrink_capa(mrb_state *mrb, struct RArray *a) { mrb_int capa; if (ARY_EMBED_P(a)) return; capa = a->as.heap.aux.capa; if (capa < ARY_DEFAULT_LEN * 2) return; if (capa <= a->as.heap.len * ARY_SHRINK_RATIO) return; do { capa /= 2; if (capa < ARY_DEFAULT_LEN) { capa = ARY_DEFAULT_LEN; break; } } while (capa > a->as.heap.len * ARY_SHRINK_RATIO); if (capa > a->as.heap.len && capa < a->as.heap.aux.capa) { a->as.heap.aux.capa = capa; a->as.heap.ptr = (mrb_value *)mrb_realloc(mrb, a->as.heap.ptr, sizeof(mrb_value)*capa); } } MRB_API mrb_value mrb_ary_resize(mrb_state *mrb, mrb_value ary, mrb_int new_len) { mrb_int old_len; struct RArray *a = mrb_ary_ptr(ary); ary_modify(mrb, a); old_len = RARRAY_LEN(ary); if (old_len != new_len) { if (new_len < old_len) { ary_shrink_capa(mrb, a); } else { ary_expand_capa(mrb, a, new_len); ary_fill_with_nil(ARY_PTR(a) + old_len, new_len - old_len); } ARY_SET_LEN(a, new_len); } return ary; } static mrb_value mrb_ary_s_create(mrb_state *mrb, mrb_value klass) { mrb_value ary; const mrb_value *vals; mrb_int len; struct RArray *a; mrb_get_args(mrb, "*!", &vals, &len); ary = mrb_ary_new_from_values(mrb, len, vals); a = mrb_ary_ptr(ary); a->c = mrb_class_ptr(klass); return ary; } static void ary_replace(mrb_state*, struct RArray*, struct RArray*); static void ary_concat(mrb_state *mrb, struct RArray *a, struct RArray *a2) { mrb_int len; if (ARY_LEN(a) == 0) { ary_replace(mrb, a, a2); return; } if (ARY_LEN(a2) > ARY_MAX_SIZE - ARY_LEN(a)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } len = ARY_LEN(a) + ARY_LEN(a2); ary_modify(mrb, a); if (ARY_CAPA(a) < len) { ary_expand_capa(mrb, a, len); } array_copy(ARY_PTR(a)+ARY_LEN(a), ARY_PTR(a2), ARY_LEN(a2)); mrb_write_barrier(mrb, (struct RBasic*)a); ARY_SET_LEN(a, len); } MRB_API void mrb_ary_concat(mrb_state *mrb, mrb_value self, mrb_value other) { struct RArray *a2 = mrb_ary_ptr(other); ary_concat(mrb, mrb_ary_ptr(self), a2); } static mrb_value mrb_ary_concat_m(mrb_state *mrb, mrb_value self) { mrb_value ary; mrb_get_args(mrb, "A", &ary); mrb_ary_concat(mrb, self, ary); return self; } static mrb_value mrb_ary_plus(mrb_state *mrb, mrb_value self) { struct RArray *a1 = mrb_ary_ptr(self); struct RArray *a2; const mrb_value *ptr; mrb_int blen, len1; mrb_get_args(mrb, "a", &ptr, &blen); if (ARY_MAX_SIZE - blen < ARY_LEN(a1)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } len1 = ARY_LEN(a1); a2 = ary_new_capa(mrb, len1 + blen); array_copy(ARY_PTR(a2), ARY_PTR(a1), len1); array_copy(ARY_PTR(a2) + len1, ptr, blen); ARY_SET_LEN(a2, len1+blen); return mrb_obj_value(a2); } #define ARY_REPLACE_SHARED_MIN 20 static void ary_replace(mrb_state *mrb, struct RArray *a, struct RArray *b) { mrb_int len = ARY_LEN(b); ary_modify_check(mrb, a); if (a == b) return; if (ARY_SHARED_P(a)) { mrb_ary_decref(mrb, a->as.heap.aux.shared); a->as.heap.aux.capa = 0; a->as.heap.len = 0; a->as.heap.ptr = NULL; ARY_UNSET_SHARED_FLAG(a); } if (ARY_SHARED_P(b)) { shared_b: if (ARY_EMBED_P(a)) { ARY_UNSET_EMBED_FLAG(a); } else { mrb_free(mrb, a->as.heap.ptr); } a->as.heap.ptr = b->as.heap.ptr; a->as.heap.len = len; a->as.heap.aux.shared = b->as.heap.aux.shared; a->as.heap.aux.shared->refcnt++; ARY_SET_SHARED_FLAG(a); mrb_write_barrier(mrb, (struct RBasic*)a); return; } if (!mrb_frozen_p(b) && len > ARY_REPLACE_SHARED_MIN) { ary_make_shared(mrb, b); goto shared_b; } if (ARY_CAPA(a) < len) ary_expand_capa(mrb, a, len); array_copy(ARY_PTR(a), ARY_PTR(b), len); mrb_write_barrier(mrb, (struct RBasic*)a); ARY_SET_LEN(a, len); } MRB_API void mrb_ary_replace(mrb_state *mrb, mrb_value self, mrb_value other) { struct RArray *a1 = mrb_ary_ptr(self); struct RArray *a2 = mrb_ary_ptr(other); if (a1 != a2) { ary_replace(mrb, a1, a2); } } static mrb_value mrb_ary_replace_m(mrb_state *mrb, mrb_value self) { mrb_value other; mrb_get_args(mrb, "A", &other); mrb_ary_replace(mrb, self, other); return self; } static mrb_value mrb_ary_times(mrb_state *mrb, mrb_value self) { struct RArray *a1 = mrb_ary_ptr(self); struct RArray *a2; mrb_value *ptr; mrb_int times, len1; mrb_get_args(mrb, "i", &times); if (times < 0) { mrb_raise(mrb, E_ARGUMENT_ERROR, "negative argument"); } if (times == 0) return mrb_ary_new(mrb); if (ARY_MAX_SIZE / times < ARY_LEN(a1)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } len1 = ARY_LEN(a1); a2 = ary_new_capa(mrb, len1 * times); ARY_SET_LEN(a2, len1 * times); ptr = ARY_PTR(a2); while (times--) { array_copy(ptr, ARY_PTR(a1), len1); ptr += len1; } return mrb_obj_value(a2); } static mrb_value mrb_ary_reverse_bang(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); if (len > 1) { mrb_value *p1, *p2; ary_modify(mrb, a); p1 = ARY_PTR(a); p2 = p1 + len - 1; while (p1 < p2) { mrb_value tmp = *p1; *p1++ = *p2; *p2-- = tmp; } } return self; } static mrb_value mrb_ary_reverse(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self), *b = ary_new_capa(mrb, ARY_LEN(a)); mrb_int len = ARY_LEN(a); if (len > 0) { mrb_value *p1, *p2, *e; p1 = ARY_PTR(a); e = p1 + len; p2 = ARY_PTR(b) + len - 1; while (p1 < e) { *p2-- = *p1++; } ARY_SET_LEN(b, len); } return mrb_obj_value(b); } MRB_API void mrb_ary_push(mrb_state *mrb, mrb_value ary, mrb_value elem) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); ary_modify(mrb, a); if (len == ARY_CAPA(a)) ary_expand_capa(mrb, a, len + 1); ARY_PTR(a)[len] = elem; ARY_SET_LEN(a, len+1); mrb_field_write_barrier_value(mrb, (struct RBasic*)a, elem); } static mrb_value mrb_ary_push_m(mrb_state *mrb, mrb_value self) { mrb_int argc; const mrb_value *argv; mrb_int len, len2; struct RArray *a; argc = mrb_get_argc(mrb); argv = mrb_get_argv(mrb); a = mrb_ary_ptr(self); ary_modify(mrb, a); len = ARY_LEN(a); len2 = len + argc; if (ARY_CAPA(a) < len2) { ary_expand_capa(mrb, a, len2); } array_copy(ARY_PTR(a)+len, argv, argc); ARY_SET_LEN(a, len2); while (argc--) { mrb_field_write_barrier_value(mrb, (struct RBasic*)a, *argv); argv++; } return self; } MRB_API mrb_value mrb_ary_pop(mrb_state *mrb, mrb_value ary) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); ary_modify_check(mrb, a); if (len == 0) return mrb_nil_value(); ARY_SET_LEN(a, len-1); return ARY_PTR(a)[len-1]; } #define ARY_SHIFT_SHARED_MIN 10 MRB_API mrb_value mrb_ary_shift(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_value val; ary_modify_check(mrb, a); if (len == 0) return mrb_nil_value(); if (ARY_SHARED_P(a)) { L_SHIFT: val = a->as.heap.ptr[0]; a->as.heap.ptr++; a->as.heap.len--; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len; val = *ptr; while (--size) { *ptr = *(ptr+1); ++ptr; } ARY_SET_LEN(a, len-1); } return val; } static mrb_value mrb_ary_shift_m(mrb_state *mrb, mrb_value self) { mrb_int n; if (mrb_get_args(mrb, "|i", &n) == 0) { return mrb_ary_shift(mrb, self); } struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_value val; ary_modify_check(mrb, a); if (len == 0 || n == 0) return mrb_ary_new(mrb); if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift"); if (n > len) n = len; val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a)); if (ARY_SHARED_P(a)) { L_SHIFT: a->as.heap.ptr+=n; a->as.heap.len-=n; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else if (len == n) { ARY_SET_LEN(a, 0); } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len-n; while (size--) { *ptr = *(ptr+n); ++ptr; } ARY_SET_LEN(a, len-n); } return val; } /* self = [1,2,3] item = 0 self.unshift item p self #=> [0, 1, 2, 3] */ MRB_API mrb_value mrb_ary_unshift(mrb_state *mrb, mrb_value self, mrb_value item) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); if (ARY_SHARED_P(a) && a->as.heap.aux.shared->refcnt == 1 /* shared only referenced from this array */ && a->as.heap.ptr - a->as.heap.aux.shared->ptr >= 1) /* there's room for unshifted item */ { a->as.heap.ptr--; a->as.heap.ptr[0] = item; } else { mrb_value *ptr; ary_modify(mrb, a); if (ARY_CAPA(a) < len + 1) ary_expand_capa(mrb, a, len + 1); ptr = ARY_PTR(a); value_move(ptr + 1, ptr, len); ptr[0] = item; } ARY_SET_LEN(a, len+1); mrb_field_write_barrier_value(mrb, (struct RBasic*)a, item); return self; } static mrb_value mrb_ary_unshift_m(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); const mrb_value *vals; mrb_value *ptr; mrb_int alen, len; mrb_get_args(mrb, "*!", &vals, &alen); if (alen == 0) { ary_modify_check(mrb, a); return self; } len = ARY_LEN(a); if (alen > ARY_MAX_SIZE - len) { mrb_raise(mrb, E_ARGUMENT_ERROR, "array size too big"); } if (ARY_SHARED_P(a) && a->as.heap.aux.shared->refcnt == 1 /* shared only referenced from this array */ && a->as.heap.ptr - a->as.heap.aux.shared->ptr >= alen) /* there's room for unshifted item */ { ary_modify_check(mrb, a); a->as.heap.ptr -= alen; ptr = a->as.heap.ptr; } else { mrb_bool same = vals == ARY_PTR(a); ary_modify(mrb, a); if (ARY_CAPA(a) < len + alen) ary_expand_capa(mrb, a, len + alen); ptr = ARY_PTR(a); value_move(ptr + alen, ptr, len); if (same) vals = ptr; } array_copy(ptr, vals, alen); ARY_SET_LEN(a, len+alen); while (alen--) { mrb_field_write_barrier_value(mrb, (struct RBasic*)a, vals[alen]); } return self; } MRB_API void mrb_ary_set(mrb_state *mrb, mrb_value ary, mrb_int n, mrb_value val) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); ary_modify(mrb, a); /* range check */ if (n < 0) { n += len; if (n < 0) { mrb_raisef(mrb, E_INDEX_ERROR, "index %i out of array", n - len); } } if (len <= n) { if (ARY_CAPA(a) <= n) ary_expand_capa(mrb, a, n + 1); ary_fill_with_nil(ARY_PTR(a) + len, n + 1 - len); ARY_SET_LEN(a, n+1); } ARY_PTR(a)[n] = val; mrb_field_write_barrier_value(mrb, (struct RBasic*)a, val); } static struct RArray* ary_dup(mrb_state *mrb, struct RArray *a) { return ary_new_from_values(mrb, ARY_LEN(a), ARY_PTR(a)); } MRB_API mrb_value mrb_ary_splice(mrb_state *mrb, mrb_value ary, mrb_int head, mrb_int len, mrb_value rpl) { struct RArray *a = mrb_ary_ptr(ary); mrb_int alen = ARY_LEN(a); const mrb_value *argv; mrb_int argc; mrb_int tail; ary_modify(mrb, a); /* len check */ if (len < 0) mrb_raisef(mrb, E_INDEX_ERROR, "negative length (%i)", len); /* range check */ if (head < 0) { head += alen; if (head < 0) goto out_of_range; } if (head > ARY_MAX_SIZE - len) { out_of_range: mrb_raisef(mrb, E_INDEX_ERROR, "index %i is out of array", head); } tail = head + len; if (alen < len || alen < tail) { len = alen - head; tail = head + len; } /* size check */ if (mrb_array_p(rpl)) { argc = RARRAY_LEN(rpl); argv = RARRAY_PTR(rpl); if (argv == ARY_PTR(a)) { struct RArray *r; if (argc > 32767) { mrb_raise(mrb, E_ARGUMENT_ERROR, "too big recursive splice"); } r = ary_dup(mrb, a); argv = ARY_PTR(r); } } else if (mrb_undef_p(rpl)) { argc = 0; argv = NULL; } else { argc = 1; argv = &rpl; } if (head >= alen) { if (head > ARY_MAX_SIZE - argc) goto out_of_range; len = head + argc; if (len > ARY_CAPA(a)) { ary_expand_capa(mrb, a, len); } ary_fill_with_nil(ARY_PTR(a) + alen, head - alen); if (argc > 0) { array_copy(ARY_PTR(a) + head, argv, argc); } ARY_SET_LEN(a, len); } else { mrb_int newlen; if (alen - len > ARY_MAX_SIZE - argc) { head = alen + argc - len; goto out_of_range; } newlen = alen + argc - len; if (newlen > ARY_CAPA(a)) { ary_expand_capa(mrb, a, newlen); } if (len != argc) { mrb_value *ptr = ARY_PTR(a); value_move(ptr + head + argc, ptr + tail, alen - tail); ARY_SET_LEN(a, newlen); } if (argc > 0) { value_move(ARY_PTR(a) + head, argv, argc); } } mrb_write_barrier(mrb, (struct RBasic*)a); return ary; } void mrb_ary_decref(mrb_state *mrb, mrb_shared_array *shared) { shared->refcnt--; if (shared->refcnt == 0) { mrb_free(mrb, shared->ptr); mrb_free(mrb, shared); } } static mrb_value ary_subseq(mrb_state *mrb, struct RArray *a, mrb_int beg, mrb_int len) { struct RArray *b; if (!ARY_SHARED_P(a) && len <= ARY_SHIFT_SHARED_MIN) { return mrb_ary_new_from_values(mrb, len, ARY_PTR(a)+beg); } ary_make_shared(mrb, a); b = MRB_OBJ_ALLOC(mrb, MRB_TT_ARRAY, mrb->array_class); b->as.heap.ptr = a->as.heap.ptr + beg; b->as.heap.len = len; b->as.heap.aux.shared = a->as.heap.aux.shared; b->as.heap.aux.shared->refcnt++; ARY_SET_SHARED_FLAG(b); return mrb_obj_value(b); } mrb_value mrb_ary_subseq(mrb_state *mrb, mrb_value ary, mrb_int beg, mrb_int len) { struct RArray *a = mrb_ary_ptr(ary); return ary_subseq(mrb, a, beg, len); } static mrb_int aget_index(mrb_state *mrb, mrb_value index) { if (mrb_integer_p(index)) { return mrb_integer(index); } #ifndef MRB_NO_FLOAT else if (mrb_float_p(index)) { return (mrb_int)mrb_float(index); } #endif else { mrb_int i, argc; const mrb_value *argv; mrb_get_args(mrb, "i*!", &i, &argv, &argc); return i; } } /* * call-seq: * ary[index] -> obj or nil * ary[start, length] -> new_ary or nil * ary[range] -> new_ary or nil * ary.slice(index) -> obj or nil * ary.slice(start, length) -> new_ary or nil * ary.slice(range) -> new_ary or nil * * Element Reference --- Returns the element at +index+, or returns a * subarray starting at the +start+ index and continuing for +length+ * elements, or returns a subarray specified by +range+ of indices. * * Negative indices count backward from the end of the array (-1 is the last * element). For +start+ and +range+ cases the starting index is just before * an element. Additionally, an empty array is returned when the starting * index for an element range is at the end of the array. * * Returns +nil+ if the index (or starting index) are out of range. * * a = [ "a", "b", "c", "d", "e" ] * a[1] => "b" * a[1,2] => ["b", "c"] * a[1..-2] => ["b", "c", "d"] * */ static mrb_value mrb_ary_aget(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int i; mrb_int len, alen; mrb_value index; if (mrb_get_argc(mrb) == 1) { index = mrb_get_arg1(mrb); switch (mrb_type(index)) { /* a[n..m] */ case MRB_TT_RANGE: if (mrb_range_beg_len(mrb, index, &i, &len, ARY_LEN(a), TRUE) == MRB_RANGE_OK) { return ary_subseq(mrb, a, i, len); } else { return mrb_nil_value(); } case MRB_TT_INTEGER: return mrb_ary_ref(mrb, self, mrb_integer(index)); default: return mrb_ary_ref(mrb, self, aget_index(mrb, index)); } } mrb_get_args(mrb, "oi", &index, &len); i = aget_index(mrb, index); alen = ARY_LEN(a); if (i < 0) i += alen; if (i < 0 || alen < i) return mrb_nil_value(); if (len < 0) return mrb_nil_value(); if (alen == i) return mrb_ary_new(mrb); if (len > alen - i) len = alen - i; return ary_subseq(mrb, a, i, len); } /* * call-seq: * ary[index] = obj -> obj * ary[start, length] = obj or other_ary or nil -> obj or other_ary or nil * ary[range] = obj or other_ary or nil -> obj or other_ary or nil * * Element Assignment --- Sets the element at +index+, or replaces a subarray * from the +start+ index for +length+ elements, or replaces a subarray * specified by the +range+ of indices. * * If indices are greater than the current capacity of the array, the array * grows automatically. Elements are inserted into the array at +start+ if * +length+ is zero. * * Negative indices will count backward from the end of the array. For * +start+ and +range+ cases the starting index is just before an element. * * An IndexError is raised if a negative index points past the beginning of * the array. * * See also Array#push, and Array#unshift. * * a = Array.new * a[4] = "4"; #=> [nil, nil, nil, nil, "4"] * a[0, 3] = [ 'a', 'b', 'c' ] #=> ["a", "b", "c", nil, "4"] * a[1..2] = [ 1, 2 ] #=> ["a", 1, 2, nil, "4"] * a[0, 2] = "?" #=> ["?", 2, nil, "4"] * a[0..2] = "A" #=> ["A", "4"] * a[-1] = "Z" #=> ["A", "Z"] * a[1..-1] = nil #=> ["A", nil] * a[1..-1] = [] #=> ["A"] * a[0, 0] = [ 1, 2 ] #=> [1, 2, "A"] * a[3, 0] = "B" #=> [1, 2, "A", "B"] */ static mrb_value mrb_ary_aset(mrb_state *mrb, mrb_value self) { mrb_value v1, v2, v3; mrb_int i, len; ary_modify(mrb, mrb_ary_ptr(self)); if (mrb_get_argc(mrb) == 2) { const mrb_value *vs = mrb_get_argv(mrb); v1 = vs[0]; v2 = vs[1]; /* a[n..m] = v */ switch (mrb_range_beg_len(mrb, v1, &i, &len, RARRAY_LEN(self), FALSE)) { case MRB_RANGE_TYPE_MISMATCH: mrb_ary_set(mrb, self, aget_index(mrb, v1), v2); break; case MRB_RANGE_OK: mrb_ary_splice(mrb, self, i, len, v2); break; case MRB_RANGE_OUT: mrb_raisef(mrb, E_RANGE_ERROR, "%v out of range", v1); break; } return v2; } mrb_get_args(mrb, "ooo", &v1, &v2, &v3); /* a[n,m] = v */ mrb_ary_splice(mrb, self, aget_index(mrb, v1), aget_index(mrb, v2), v3); return v3; } static mrb_value mrb_ary_delete_at(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int index; mrb_value val; mrb_value *ptr; mrb_int len, alen; mrb_get_args(mrb, "i", &index); alen = ARY_LEN(a); if (index < 0) index += alen; if (index < 0 || alen <= index) return mrb_nil_value(); ary_modify(mrb, a); ptr = ARY_PTR(a); val = ptr[index]; ptr += index; len = alen - index; while (--len) { *ptr = *(ptr+1); ++ptr; } ARY_SET_LEN(a, alen-1); ary_shrink_capa(mrb, a); return val; } static mrb_value mrb_ary_first(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int size, alen; if (mrb_get_argc(mrb) == 0) { return (ARY_LEN(a) > 0)? ARY_PTR(a)[0]: mrb_nil_value(); } mrb_get_args(mrb, "|i", &size); if (size < 0) { mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array size"); } alen = ARY_LEN(a); if (size > alen) size = alen; if (ARY_SHARED_P(a)) { return ary_subseq(mrb, a, 0, size); } return mrb_ary_new_from_values(mrb, size, ARY_PTR(a)); } static mrb_value mrb_ary_last(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int n, size, alen; n = mrb_get_args(mrb, "|i", &size); alen = ARY_LEN(a); if (n == 0) { return (alen > 0) ? ARY_PTR(a)[alen - 1]: mrb_nil_value(); } if (size < 0) { mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array size"); } if (size > alen) size = alen; if (ARY_SHARED_P(a) || size > ARY_DEFAULT_LEN) { return ary_subseq(mrb, a, alen - size, size); } return mrb_ary_new_from_values(mrb, size, ARY_PTR(a) + alen - size); } static mrb_value mrb_ary_index_m(mrb_state *mrb, mrb_value self) { mrb_value obj = mrb_get_arg1(mrb); mrb_int i; for (i = 0; i < RARRAY_LEN(self); i++) { if (mrb_equal(mrb, RARRAY_PTR(self)[i], obj)) { return mrb_int_value(mrb, i); } } return mrb_nil_value(); } static mrb_value mrb_ary_rindex_m(mrb_state *mrb, mrb_value self) { mrb_value obj = mrb_get_arg1(mrb); mrb_int i, len; for (i = RARRAY_LEN(self) - 1; i >= 0; i--) { if (mrb_equal(mrb, RARRAY_PTR(self)[i], obj)) { return mrb_int_value(mrb, i); } if (i > (len = RARRAY_LEN(self))) { i = len; } } return mrb_nil_value(); } MRB_API mrb_value mrb_ary_splat(mrb_state *mrb, mrb_value v) { mrb_value ary; struct RArray *a; if (mrb_array_p(v)) { a = ary_dup(mrb, mrb_ary_ptr(v)); return mrb_obj_value(a); } if (!mrb_respond_to(mrb, v, MRB_SYM(to_a))) { return mrb_ary_new_from_values(mrb, 1, &v); } ary = mrb_funcall_id(mrb, v, MRB_SYM(to_a), 0); if (mrb_nil_p(ary)) { return mrb_ary_new_from_values(mrb, 1, &v); } mrb_ensure_array_type(mrb, ary); a = mrb_ary_ptr(ary); a = ary_dup(mrb, a); return mrb_obj_value(a); } static mrb_value mrb_ary_size(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); return mrb_int_value(mrb, ARY_LEN(a)); } MRB_API mrb_value mrb_ary_clear(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); ary_modify(mrb, a); if (ARY_SHARED_P(a)) { mrb_ary_decref(mrb, a->as.heap.aux.shared); ARY_UNSET_SHARED_FLAG(a); } else if (!ARY_EMBED_P(a)){ mrb_free(mrb, a->as.heap.ptr); } if (MRB_ARY_EMBED_LEN_MAX > 0) { ARY_SET_EMBED_LEN(a, 0); } else { a->as.heap.ptr = NULL; a->as.heap.aux.capa = 0; ARY_SET_LEN(a, 0); } return self; } static mrb_value mrb_ary_clear_m(mrb_state *mrb, mrb_value self) { return mrb_ary_clear(mrb, self); } static mrb_value mrb_ary_empty_p(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); return mrb_bool_value(ARY_LEN(a) == 0); } MRB_API mrb_value mrb_ary_entry(mrb_value ary, mrb_int n) { struct RArray *a = mrb_ary_ptr(ary); mrb_int len = ARY_LEN(a); /* range check */ if (n < 0) n += len; if (n < 0 || len <= n) return mrb_nil_value(); return ARY_PTR(a)[n]; } static mrb_value join_ary(mrb_state *mrb, mrb_value ary, mrb_value sep, mrb_value list) { mrb_int i; mrb_value result, val, tmp; /* check recursive */ for (i=0; i<RARRAY_LEN(list); i++) { if (mrb_obj_equal(mrb, ary, RARRAY_PTR(list)[i])) { mrb_raise(mrb, E_ARGUMENT_ERROR, "recursive array join"); } } mrb_ary_push(mrb, list, ary); result = mrb_str_new_capa(mrb, 64); for (i=0; i<RARRAY_LEN(ary); i++) { if (i > 0 && !mrb_nil_p(sep)) { mrb_str_cat_str(mrb, result, sep); } val = RARRAY_PTR(ary)[i]; switch (mrb_type(val)) { case MRB_TT_ARRAY: ary_join: val = join_ary(mrb, val, sep, list); /* fall through */ case MRB_TT_STRING: str_join: mrb_str_cat_str(mrb, result, val); break; default: if (!mrb_immediate_p(val)) { tmp = mrb_check_string_type(mrb, val); if (!mrb_nil_p(tmp)) { val = tmp; goto str_join; } tmp = mrb_check_array_type(mrb, val); if (!mrb_nil_p(tmp)) { val = tmp; goto ary_join; } } val = mrb_obj_as_string(mrb, val); goto str_join; } } mrb_ary_pop(mrb, list); return result; } MRB_API mrb_value mrb_ary_join(mrb_state *mrb, mrb_value ary, mrb_value sep) { if (!mrb_nil_p(sep)) { sep = mrb_obj_as_string(mrb, sep); } return join_ary(mrb, ary, sep, mrb_ary_new(mrb)); } /* * call-seq: * ary.join(sep="") -> str * * Returns a string created by converting each element of the array to * a string, separated by <i>sep</i>. * * [ "a", "b", "c" ].join #=> "abc" * [ "a", "b", "c" ].join("-") #=> "a-b-c" */ static mrb_value mrb_ary_join_m(mrb_state *mrb, mrb_value ary) { mrb_value sep = mrb_nil_value(); mrb_get_args(mrb, "|S!", &sep); return mrb_ary_join(mrb, ary, sep); } static mrb_value mrb_ary_eq(mrb_state *mrb, mrb_value ary1) { mrb_value ary2 = mrb_get_arg1(mrb); mrb->c->ci->mid = 0; if (mrb_obj_equal(mrb, ary1, ary2)) return mrb_true_value(); if (!mrb_array_p(ary2)) { return mrb_false_value(); } if (RARRAY_LEN(ary1) != RARRAY_LEN(ary2)) return mrb_false_value(); return ary2; } static mrb_value mrb_ary_cmp(mrb_state *mrb, mrb_value ary1) { mrb_value ary2 = mrb_get_arg1(mrb); mrb->c->ci->mid = 0; if (mrb_obj_equal(mrb, ary1, ary2)) return mrb_fixnum_value(0); if (!mrb_array_p(ary2)) { return mrb_nil_value(); } return ary2; } /* internal method to convert multi-value to single value */ static mrb_value mrb_ary_svalue(mrb_state *mrb, mrb_value ary) { switch (RARRAY_LEN(ary)) { case 0: return mrb_nil_value(); case 1: return RARRAY_PTR(ary)[0]; default: return ary; } } void mrb_init_array(mrb_state *mrb) { struct RClass *a; mrb->array_class = a = mrb_define_class(mrb, "Array", mrb->object_class); /* 15.2.12 */ MRB_SET_INSTANCE_TT(a, MRB_TT_ARRAY); mrb_define_class_method(mrb, a, "[]", mrb_ary_s_create, MRB_ARGS_ANY()); /* 15.2.12.4.1 */ mrb_define_method(mrb, a, "+", mrb_ary_plus, MRB_ARGS_REQ(1)); /* 15.2.12.5.1 */ mrb_define_method(mrb, a, "*", mrb_ary_times, MRB_ARGS_REQ(1)); /* 15.2.12.5.2 */ mrb_define_method(mrb, a, "<<", mrb_ary_push_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.3 */ mrb_define_method(mrb, a, "[]", mrb_ary_aget, MRB_ARGS_ARG(1,1)); /* 15.2.12.5.4 */ mrb_define_method(mrb, a, "[]=", mrb_ary_aset, MRB_ARGS_ARG(2,1)); /* 15.2.12.5.5 */ mrb_define_method(mrb, a, "clear", mrb_ary_clear_m, MRB_ARGS_NONE()); /* 15.2.12.5.6 */ mrb_define_method(mrb, a, "concat", mrb_ary_concat_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.8 */ mrb_define_method(mrb, a, "delete_at", mrb_ary_delete_at, MRB_ARGS_REQ(1)); /* 15.2.12.5.9 */ mrb_define_method(mrb, a, "empty?", mrb_ary_empty_p, MRB_ARGS_NONE()); /* 15.2.12.5.12 */ mrb_define_method(mrb, a, "first", mrb_ary_first, MRB_ARGS_OPT(1)); /* 15.2.12.5.13 */ mrb_define_method(mrb, a, "index", mrb_ary_index_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.14 */ mrb_define_method(mrb, a, "initialize_copy", mrb_ary_replace_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.16 */ mrb_define_method(mrb, a, "join", mrb_ary_join_m, MRB_ARGS_OPT(1)); /* 15.2.12.5.17 */ mrb_define_method(mrb, a, "last", mrb_ary_last, MRB_ARGS_OPT(1)); /* 15.2.12.5.18 */ mrb_define_method(mrb, a, "length", mrb_ary_size, MRB_ARGS_NONE()); /* 15.2.12.5.19 */ mrb_define_method(mrb, a, "pop", mrb_ary_pop, MRB_ARGS_NONE()); /* 15.2.12.5.21 */ mrb_define_method(mrb, a, "push", mrb_ary_push_m, MRB_ARGS_ANY()); /* 15.2.12.5.22 */ mrb_define_method(mrb, a, "replace", mrb_ary_replace_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.23 */ mrb_define_method(mrb, a, "reverse", mrb_ary_reverse, MRB_ARGS_NONE()); /* 15.2.12.5.24 */ mrb_define_method(mrb, a, "reverse!", mrb_ary_reverse_bang, MRB_ARGS_NONE()); /* 15.2.12.5.25 */ mrb_define_method(mrb, a, "rindex", mrb_ary_rindex_m, MRB_ARGS_REQ(1)); /* 15.2.12.5.26 */ mrb_define_method(mrb, a, "shift", mrb_ary_shift_m, MRB_ARGS_OPT(1)); /* 15.2.12.5.27 */ mrb_define_method(mrb, a, "size", mrb_ary_size, MRB_ARGS_NONE()); /* 15.2.12.5.28 */ mrb_define_method(mrb, a, "slice", mrb_ary_aget, MRB_ARGS_ARG(1,1)); /* 15.2.12.5.29 */ mrb_define_method(mrb, a, "unshift", mrb_ary_unshift_m, MRB_ARGS_ANY()); /* 15.2.12.5.30 */ mrb_define_method(mrb, a, "__ary_eq", mrb_ary_eq, MRB_ARGS_REQ(1)); mrb_define_method(mrb, a, "__ary_cmp", mrb_ary_cmp, MRB_ARGS_REQ(1)); mrb_define_method(mrb, a, "__ary_index", mrb_ary_index_m, MRB_ARGS_REQ(1)); /* kept for mruby-array-ext */ mrb_define_method(mrb, a, "__svalue", mrb_ary_svalue, MRB_ARGS_NONE()); }
mrb_ary_shift_m(mrb_state *mrb, mrb_value self) { struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_int n; mrb_value val; if (mrb_get_args(mrb, "|i", &n) == 0) { return mrb_ary_shift(mrb, self); }; ary_modify_check(mrb, a); if (len == 0 || n == 0) return mrb_ary_new(mrb); if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift"); if (n > len) n = len; val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a)); if (ARY_SHARED_P(a)) { L_SHIFT: a->as.heap.ptr+=n; a->as.heap.len-=n; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else if (len == n) { ARY_SET_LEN(a, 0); } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len-n; while (size--) { *ptr = *(ptr+n); ++ptr; } ARY_SET_LEN(a, len-n); } return val; }
mrb_ary_shift_m(mrb_state *mrb, mrb_value self) { mrb_int n; if (mrb_get_args(mrb, "|i", &n) == 0) { return mrb_ary_shift(mrb, self); } struct RArray *a = mrb_ary_ptr(self); mrb_int len = ARY_LEN(a); mrb_value val; ary_modify_check(mrb, a); if (len == 0 || n == 0) return mrb_ary_new(mrb); if (n < 0) mrb_raise(mrb, E_ARGUMENT_ERROR, "negative array shift"); if (n > len) n = len; val = mrb_ary_new_from_values(mrb, n, ARY_PTR(a)); if (ARY_SHARED_P(a)) { L_SHIFT: a->as.heap.ptr+=n; a->as.heap.len-=n; return val; } if (len > ARY_SHIFT_SHARED_MIN) { ary_make_shared(mrb, a); goto L_SHIFT; } else if (len == n) { ARY_SET_LEN(a, 0); } else { mrb_value *ptr = ARY_PTR(a); mrb_int size = len-n; while (size--) { *ptr = *(ptr+n); ++ptr; } ARY_SET_LEN(a, len-n); } return val; }
{'added': [(588, ' }'), (589, ''), (590, ' struct RArray *a = mrb_ary_ptr(self);'), (591, ' mrb_int len = ARY_LEN(a);'), (592, ' mrb_value val;'), (593, '')], 'deleted': [(584, ' struct RArray *a = mrb_ary_ptr(self);'), (585, ' mrb_int len = ARY_LEN(a);'), (587, ' mrb_value val;'), (591, ' };')]}
6
4
1,074
7,307
38
241
10
https://github.com/mruby/mruby
CVE-2021-4188
CWE-476
976
opj_mj2_extract.c
C
main
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2003-2007, Francois-Olivier Devaux * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "openjpeg.h" #include "cio.h" #include "j2k.h" #include "jp2.h" #include "mj2.h" /* -------------------------------------------------------------------------- */ /** sample error callback expecting a FILE* client object */ void error_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[ERROR] %s", msg); } /** sample warning callback expecting a FILE* client object */ void warning_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[WARNING] %s", msg); } /** sample debug callback expecting a FILE* client object */ void info_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[INFO] %s", msg); } /* -------------------------------------------------------------------------- */ int main(int argc, char *argv[]) { opj_dinfo_t* dinfo; opj_event_mgr_t event_mgr; /* event manager */ int tnum; unsigned int snum; opj_mj2_t *movie; mj2_tk_t *track; mj2_sample_t *sample; unsigned char* frame_codestream; FILE *file, *outfile; char outfilename[50]; mj2_dparameters_t parameters; if (argc != 3) { printf("Usage: %s mj2filename output_location\n", argv[0]); printf("Example: %s foreman.mj2 output/foreman\n", argv[0]); return 1; } file = fopen(argv[1], "rb"); if (!file) { fprintf(stderr, "failed to open %s for reading\n", argv[1]); return 1; } /* configure the event callbacks (not required) setting of each callback is optional */ memset(&event_mgr, 0, sizeof(opj_event_mgr_t)); event_mgr.error_handler = error_callback; event_mgr.warning_handler = warning_callback; event_mgr.info_handler = info_callback; /* get a MJ2 decompressor handle */ dinfo = mj2_create_decompress(); /* catch events using our callbacks and give a local context */ opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr); /* setup the decoder decoding parameters using user parameters */ memset(&parameters, 0, sizeof(mj2_dparameters_t)); movie = (opj_mj2_t*) dinfo->mj2_handle; mj2_setup_decoder(movie, &parameters); if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/ return 1; } /* Decode first video track */ tnum = 0; while (movie->tk[tnum].track_type != 0) { tnum ++; } track = &movie->tk[tnum]; fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples); for (snum = 0; snum < track->num_samples; snum++) { sample = &track->sample[snum]; frame_codestream = (unsigned char*) malloc(sample->sample_size - 8); /* Skipping JP2C marker*/ fseek(file, sample->offset + 8, SEEK_SET); fread(frame_codestream, sample->sample_size - 8, 1, file); /* Assuming that jp and ftyp markers size do*/ sprintf(outfilename, "%s_%05d.j2k", argv[2], snum); outfile = fopen(outfilename, "wb"); if (!outfile) { fprintf(stderr, "failed to open %s for writing\n", outfilename); return 1; } fwrite(frame_codestream, sample->sample_size - 8, 1, outfile); fclose(outfile); free(frame_codestream); } fclose(file); fprintf(stdout, "%d frames correctly extracted\n", snum); /* free remaining structures */ if (dinfo) { mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle); } return 0; }
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2003-2007, Francois-Olivier Devaux * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "openjpeg.h" #include "cio.h" #include "j2k.h" #include "jp2.h" #include "mj2.h" /* -------------------------------------------------------------------------- */ /** sample error callback expecting a FILE* client object */ void error_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[ERROR] %s", msg); } /** sample warning callback expecting a FILE* client object */ void warning_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[WARNING] %s", msg); } /** sample debug callback expecting a FILE* client object */ void info_callback(const char *msg, void *client_data) { FILE *stream = (FILE*)client_data; fprintf(stream, "[INFO] %s", msg); } /* -------------------------------------------------------------------------- */ int main(int argc, char *argv[]) { opj_dinfo_t* dinfo; opj_event_mgr_t event_mgr; /* event manager */ int tnum; unsigned int snum; opj_mj2_t *movie; mj2_tk_t *track; mj2_sample_t *sample; unsigned char* frame_codestream; FILE *file, *outfile; char outfilename[50]; mj2_dparameters_t parameters; if (argc != 3) { printf("Usage: %s mj2filename output_location\n", argv[0]); printf("Example: %s foreman.mj2 output/foreman\n", argv[0]); return 1; } file = fopen(argv[1], "rb"); if (!file) { fprintf(stderr, "failed to open %s for reading\n", argv[1]); return 1; } /* configure the event callbacks (not required) setting of each callback is optional */ memset(&event_mgr, 0, sizeof(opj_event_mgr_t)); event_mgr.error_handler = error_callback; event_mgr.warning_handler = warning_callback; event_mgr.info_handler = info_callback; /* get a MJ2 decompressor handle */ dinfo = mj2_create_decompress(); /* catch events using our callbacks and give a local context */ opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr); /* setup the decoder decoding parameters using user parameters */ memset(&parameters, 0, sizeof(mj2_dparameters_t)); movie = (opj_mj2_t*) dinfo->mj2_handle; mj2_setup_decoder(movie, &parameters); if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/ return 1; } /* Decode first video track */ tnum = 0; while (movie->tk[tnum].track_type != 0) { tnum ++; } track = &movie->tk[tnum]; fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples); for (snum = 0; snum < track->num_samples; snum++) { sample = &track->sample[snum]; frame_codestream = (unsigned char*) malloc(sample->sample_size - 8); /* Skipping JP2C marker*/ fseek(file, sample->offset + 8, SEEK_SET); fread(frame_codestream, sample->sample_size - 8, 1, file); /* Assuming that jp and ftyp markers size do*/ { int num = snprintf(outfilename, sizeof(outfilename), "%s_%05d.j2k", argv[2], snum); if (num >= sizeof(outfilename)) { fprintf(stderr, "maximum length of output prefix exceeded\n"); free(frame_codestream); return 1; } } outfile = fopen(outfilename, "wb"); if (!outfile) { fprintf(stderr, "failed to open %s for writing\n", outfilename); free(frame_codestream); return 1; } fwrite(frame_codestream, sample->sample_size - 8, 1, outfile); fclose(outfile); free(frame_codestream); } fclose(file); fprintf(stdout, "%d frames correctly extracted\n", snum); /* free remaining structures */ if (dinfo) { mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle); } return 0; }
int main(int argc, char *argv[]) { opj_dinfo_t* dinfo; opj_event_mgr_t event_mgr; /* event manager */ int tnum; unsigned int snum; opj_mj2_t *movie; mj2_tk_t *track; mj2_sample_t *sample; unsigned char* frame_codestream; FILE *file, *outfile; char outfilename[50]; mj2_dparameters_t parameters; if (argc != 3) { printf("Usage: %s mj2filename output_location\n", argv[0]); printf("Example: %s foreman.mj2 output/foreman\n", argv[0]); return 1; } file = fopen(argv[1], "rb"); if (!file) { fprintf(stderr, "failed to open %s for reading\n", argv[1]); return 1; } /* configure the event callbacks (not required) setting of each callback is optional */ memset(&event_mgr, 0, sizeof(opj_event_mgr_t)); event_mgr.error_handler = error_callback; event_mgr.warning_handler = warning_callback; event_mgr.info_handler = info_callback; /* get a MJ2 decompressor handle */ dinfo = mj2_create_decompress(); /* catch events using our callbacks and give a local context */ opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr); /* setup the decoder decoding parameters using user parameters */ memset(&parameters, 0, sizeof(mj2_dparameters_t)); movie = (opj_mj2_t*) dinfo->mj2_handle; mj2_setup_decoder(movie, &parameters); if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/ return 1; } /* Decode first video track */ tnum = 0; while (movie->tk[tnum].track_type != 0) { tnum ++; } track = &movie->tk[tnum]; fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples); for (snum = 0; snum < track->num_samples; snum++) { sample = &track->sample[snum]; frame_codestream = (unsigned char*) malloc(sample->sample_size - 8); /* Skipping JP2C marker*/ fseek(file, sample->offset + 8, SEEK_SET); fread(frame_codestream, sample->sample_size - 8, 1, file); /* Assuming that jp and ftyp markers size do*/ sprintf(outfilename, "%s_%05d.j2k", argv[2], snum); outfile = fopen(outfilename, "wb"); if (!outfile) { fprintf(stderr, "failed to open %s for writing\n", outfilename); return 1; } fwrite(frame_codestream, sample->sample_size - 8, 1, outfile); fclose(outfile); free(frame_codestream); } fclose(file); fprintf(stdout, "%d frames correctly extracted\n", snum); /* free remaining structures */ if (dinfo) { mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle); } return 0; }
int main(int argc, char *argv[]) { opj_dinfo_t* dinfo; opj_event_mgr_t event_mgr; /* event manager */ int tnum; unsigned int snum; opj_mj2_t *movie; mj2_tk_t *track; mj2_sample_t *sample; unsigned char* frame_codestream; FILE *file, *outfile; char outfilename[50]; mj2_dparameters_t parameters; if (argc != 3) { printf("Usage: %s mj2filename output_location\n", argv[0]); printf("Example: %s foreman.mj2 output/foreman\n", argv[0]); return 1; } file = fopen(argv[1], "rb"); if (!file) { fprintf(stderr, "failed to open %s for reading\n", argv[1]); return 1; } /* configure the event callbacks (not required) setting of each callback is optional */ memset(&event_mgr, 0, sizeof(opj_event_mgr_t)); event_mgr.error_handler = error_callback; event_mgr.warning_handler = warning_callback; event_mgr.info_handler = info_callback; /* get a MJ2 decompressor handle */ dinfo = mj2_create_decompress(); /* catch events using our callbacks and give a local context */ opj_set_event_mgr((opj_common_ptr)dinfo, &event_mgr, stderr); /* setup the decoder decoding parameters using user parameters */ memset(&parameters, 0, sizeof(mj2_dparameters_t)); movie = (opj_mj2_t*) dinfo->mj2_handle; mj2_setup_decoder(movie, &parameters); if (mj2_read_struct(file, movie)) { /* Creating the movie structure*/ return 1; } /* Decode first video track */ tnum = 0; while (movie->tk[tnum].track_type != 0) { tnum ++; } track = &movie->tk[tnum]; fprintf(stdout, "Extracting %d frames from file...\n", track->num_samples); for (snum = 0; snum < track->num_samples; snum++) { sample = &track->sample[snum]; frame_codestream = (unsigned char*) malloc(sample->sample_size - 8); /* Skipping JP2C marker*/ fseek(file, sample->offset + 8, SEEK_SET); fread(frame_codestream, sample->sample_size - 8, 1, file); /* Assuming that jp and ftyp markers size do*/ { int num = snprintf(outfilename, sizeof(outfilename), "%s_%05d.j2k", argv[2], snum); if (num >= sizeof(outfilename)) { fprintf(stderr, "maximum length of output prefix exceeded\n"); free(frame_codestream); return 1; } } outfile = fopen(outfilename, "wb"); if (!outfile) { fprintf(stderr, "failed to open %s for writing\n", outfilename); free(frame_codestream); return 1; } fwrite(frame_codestream, sample->sample_size - 8, 1, outfile); fclose(outfile); free(frame_codestream); } fclose(file); fprintf(stdout, "%d frames correctly extracted\n", snum); /* free remaining structures */ if (dinfo) { mj2_destroy_decompress((opj_mj2_t*)dinfo->mj2_handle); } return 0; }
{'added': [(143, ' {'), (144, ' int num = snprintf(outfilename, sizeof(outfilename),'), (145, ' "%s_%05d.j2k", argv[2],'), (146, ' snum);'), (147, ' if (num >= sizeof(outfilename)) {'), (148, ' fprintf(stderr, "maximum length of output prefix exceeded\\n");'), (149, ' free(frame_codestream);'), (150, ' return 1;'), (151, ' }'), (152, ' }'), (153, ''), (157, ' free(frame_codestream);')], 'deleted': [(143, ' sprintf(outfilename, "%s_%05d.j2k", argv[2], snum);')]}
12
1
98
592
65
435
8
https://github.com/uclouvain/openjpeg
CVE-2018-7648
CWE-119
2,433
activations.cc
C++
tflite::ops::builtin::activations::SigmoidEval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input = GetInput(context, node, 0); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
{'added': [(255, ' const TfLiteTensor* input;'), (256, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (257, ' TfLiteTensor* output;'), (258, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (277, ' const TfLiteTensor* input;'), (278, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (279, ' TfLiteTensor* output;'), (280, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (307, ' TfLiteTensor* output;'), (308, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (313, ' const TfLiteTensor* input;'), (314, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (346, ' const TfLiteTensor* input;'), (347, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (348, ' TfLiteTensor* output;'), (349, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (377, ' const TfLiteTensor* input;'), (378, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (379, ' TfLiteTensor* output;'), (380, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (464, ' const TfLiteTensor* input;'), (465, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (466, ' TfLiteTensor* output;'), (467, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (561, ' const TfLiteTensor* input;'), (562, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (563, ' TfLiteTensor* output;'), (564, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (631, ' const TfLiteTensor* input;'), (632, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (633, ' TfLiteTensor* output;'), (634, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (669, ' const TfLiteTensor* input;'), (670, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (671, ' TfLiteTensor* output;'), (672, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (673, ' const TfLiteTensor* alpha;'), (674, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (726, ' const TfLiteTensor* input;'), (727, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (728, ' TfLiteTensor* output;'), (729, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (756, ' const TfLiteTensor* input;'), (757, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (758, ' TfLiteTensor* output;'), (759, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (789, ' const TfLiteTensor* input;'), (790, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (791, ' TfLiteTensor* output;'), (792, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (842, ' const TfLiteTensor* input;'), (843, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (844, ' TfLiteTensor* output;'), (845, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (875, ' const TfLiteTensor* input;'), (876, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (877, ' TfLiteTensor* output;'), (878, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (951, ' const TfLiteTensor* input;'), (952, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (953, ' TfLiteTensor* output;'), (954, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1101, ' const TfLiteTensor* input;'), (1102, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1103, ' TfLiteTensor* output;'), (1104, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1158, ' const TfLiteTensor* input;'), (1159, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1160, ' TfLiteTensor* output;'), (1161, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1221, ' const TfLiteTensor* input;'), (1222, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1223, ' const TfLiteTensor* alpha;'), (1224, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (1225, ' TfLiteTensor* output;'), (1226, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1335, ' const TfLiteTensor* input;'), (1336, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1337, ' TfLiteTensor* output;'), (1338, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1375, ' const TfLiteTensor* input;'), (1376, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1377, ' TfLiteTensor* output;'), (1378, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1391, ' const TfLiteTensor* input;'), (1392, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1393, ' TfLiteTensor* output;'), (1394, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));')], 'deleted': [(255, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (256, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (275, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (276, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (303, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (308, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (340, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (341, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (369, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (370, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (454, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (455, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (549, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (550, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (617, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (618, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (653, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (654, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (655, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (707, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (708, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (735, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (736, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (766, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (767, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (817, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (818, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (848, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (849, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (922, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (923, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1070, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1071, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1125, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1126, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1186, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1187, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (1188, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1297, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1298, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1335, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1336, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1349, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1350, ' TfLiteTensor* output = GetOutput(context, node, 0);')]}
88
44
1,316
9,729
71
465
10
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
733
new_rbtree.c
C
r_crbtree_insert
/* BSD 2-Clause License Copyright (c) 2018, lynnl Cleaned up and refactored for r2 in 2021 - 2022: condret All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <r_util.h> static void _set_link(RRBNode *parent, RRBNode *child, const int dir) { if (parent) { parent->link[dir] = child; } if (child) { child->parent = parent; } } R_API RRBTree *r_crbtree_new(RRBFree freefn) { RRBTree *tree = R_NEW0 (RRBTree); if (tree) { tree->free = freefn; } return tree; } R_API void r_crbtree_clear(RRBTree *tree) { r_return_if_fail (tree); RRBNode *iter = tree->root, *save = NULL; // Rotate away the left links into a linked list so that // we can perform iterative destruction of the rbtree while (iter) { if (!iter->link[0]) { save = iter->link[1]; if (tree->free) { tree->free (iter->data); } free (iter); tree->size--; } else { save = iter->link[0]; _set_link (iter, save->link[1], 0); _set_link (save, iter, 1); } iter = save; } tree->root = NULL; } R_API void r_crbtree_free(RRBTree *tree) { if (!tree) { return; } r_crbtree_clear (tree); free (tree); } R_API RRBNode *r_crbtree_find_node(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && cmp, NULL); RRBNode *iter = tree->root; while (iter) { const int dir = cmp (data, iter->data, user); if (!dir) { return iter; } iter = iter->link[dir > 0]; } return NULL; } R_API void *r_crbtree_find(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && cmp, NULL); RRBNode *node = r_crbtree_find_node (tree, data, cmp, user); return node ? node->data : NULL; } static RRBNode *_node_new(void *data, RRBNode *parent) { RRBNode *node = R_NEW0 (RRBNode); r_return_val_if_fail (node, NULL); node->red = 1; node->data = data; node->parent = parent; return node; } #define IS_RED(n) ((n) && (n)->red == 1) static RRBNode *_rot_once(RRBNode *root, int dir) { r_return_val_if_fail (root, NULL); // save is new parent of root and root is parent of save's previous child RRBNode *save = root->link[!dir]; _set_link (root, save->link[dir], !dir); _set_link (save, root, dir); root->red = 1; save->red = 0; return save; } static RRBNode *_rot_twice(RRBNode *root, int dir) { r_return_val_if_fail (root, NULL); _set_link (root, _rot_once (root->link[!dir], !dir), !dir); return _rot_once (root, dir); } R_API bool r_crbtree_insert(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && cmp, false); bool inserted = false; if (tree->root == NULL) { tree->root = _node_new (data, NULL); if (tree->root == NULL) { return false; } inserted = true; goto out_exit; } RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *g = NULL, *parent = &head; /* Grandparent & parent */ RRBNode *p = NULL, *q = tree->root; /* Iterator & parent */ int dir = 0, last = 0; /* Directions */ _set_link (parent, q, 1); for (;;) { if (!q) { /* Insert a node at first null link(also set its parent link) */ q = _node_new (data, p); if (!q) { return false; } p->link[dir] = q; inserted = true; } else if (IS_RED (q->link[0]) && IS_RED (q->link[1])) { /* Simple red violation: color flip */ q->red = 1; q->link[0]->red = 0; q->link[1]->red = 0; } if (IS_RED (q) && IS_RED (p)) { #if 0 // coverity error, parent is never null /* Hard red violation: rotate */ if (!parent) { return false; } #endif int dir2 = parent->link[1] == g; if (q == p->link[last]) { _set_link (parent, _rot_once (g, !last), dir2); } else { _set_link (parent, _rot_twice (g, !last), dir2); } } if (inserted) { break; } last = dir; dir = cmp (data, q->data, user) >= 0; if (g) { parent = g; } g = p; p = q; q = q->link[dir]; } /* Update root(it may different due to root rotation) */ tree->root = head.link[1]; out_exit: /* Invariant: root is black */ tree->root->red = 0; tree->root->parent = NULL; if (inserted) { tree->size++; } return inserted; } static void _exchange_nodes(RRBNode *node_a, RRBNode *node_b) { if (!node_a || !node_b) { return; } RRBNode node_a_tmp, node_b_tmp; memcpy (&node_a_tmp, node_a, sizeof (RRBNode)); memcpy (&node_b_tmp, node_b, sizeof (RRBNode)); node_a->link[0] = node_b_tmp.link[0]; node_a->link[1] = node_b_tmp.link[1]; node_a->red = node_b_tmp.red; node_b->link[0] = node_a_tmp.link[0]; node_b->link[1] = node_a_tmp.link[1]; node_b->red = node_a_tmp.red; if (node_a->parent == node_b->parent) { if (node_a->parent) { if (node_a->parent->link[0] == node_a) { node_a->parent->link[0] = node_b; node_a->parent->link[1] = node_a; } else { node_a->parent->link[1] = node_b; node_a->parent->link[0] = node_a; } } if (node_a->link[0]) { node_a->link[0]->parent = node_a; } if (node_a->link[1]) { node_a->link[1]->parent = node_a; } if (node_b->link[0]) { node_b->link[0]->parent = node_b; } if (node_b->link[1]) { node_b->link[1]->parent = node_b; } return; } RRBNode *parent_a = node_a->parent; RRBNode *parent_b = node_b->parent; if (parent_a) { if (parent_a->link[0] == node_a) { parent_a->link[0] = node_b; } else { parent_a->link[1] = node_b; } } node_b->parent = parent_a; if (parent_b) { if (parent_b->link[0] == node_b) { parent_b->link[0] = node_a; } else { parent_b->link[1] = node_a; } } node_a->parent = parent_b; if (node_a->link[0]) { node_a->link[0]->parent = node_a; } if (node_a->link[1]) { node_a->link[1]->parent = node_a; } if (node_b->link[0]) { node_b->link[0]->parent = node_b; } if (node_b->link[1]) { node_b->link[1]->parent = node_b; } } // remove data from the tree, without freeing it R_API void *r_crbtree_take(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && tree->size && tree->root && cmp, NULL); RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *q = &head, *p = NULL, *g = NULL; RRBNode *found = NULL; int dir = 1, last; _set_link (q, tree->root, 1); /* Find in-order predecessor */ while (q->link[dir]) { last = dir; g = p; p = q; q = q->link[dir]; dir = cmp (data, q->data, user); if (dir == 0 && !found) { found = q; } dir = (bool)(dir > 0); if (IS_RED (q) || IS_RED (q->link[dir])) { continue; } if (IS_RED (q->link[!dir])) { _set_link (p, _rot_once (q, dir), last); p = p->link[last]; } else { RRBNode *sibling = p->link[!last]; if (sibling) { if (!IS_RED (sibling->link[!last]) && !IS_RED (sibling->link[last])) { /* Color flip */ p->red = 0; sibling->red = 1; q->red = 1; } else if (g) { int dir2 = (bool)(g->link[1] == p); if (IS_RED (sibling->link[last])) { _set_link (g, _rot_twice (p, last), dir2); } else { _set_link (g, _rot_once (p, last), dir2); } /* Ensure correct coloring */ q->red = g->link[dir2]->red = 1; g->link[dir2]->link[0]->red = 0; g->link[dir2]->link[1]->red = 0; } } } } void *ret = NULL; /* Replace and remove if found */ if (found) { _set_link (p, q->link[q->link[0] == NULL], p->link[1] == q); if (q != found) { q->link[0] = NULL; q->link[1] = NULL; q->parent = NULL; _exchange_nodes (found, q); } ret = found->data; free (found); tree->size--; } /* Update root node */ tree->root = head.link[1]; if (tree->root) { tree->root->red = 0; tree->root->parent = NULL; } else { r_return_val_if_fail (tree->size == 0, NULL); } return ret; } R_API bool r_crbtree_delete(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && tree->size && tree->root && cmp, false); data = r_crbtree_take (tree, data, cmp, user); if (tree->free) { tree->free (data); } return !!data; } R_API RRBNode *r_crbtree_first_node(RRBTree *tree) { r_return_val_if_fail (tree, NULL); if (!tree->root) { // empty tree return NULL; } RRBNode *node = tree->root; while (node->link[0]) { node = node->link[0]; } return node; } R_API RRBNode *r_crbtree_last_node(RRBTree *tree) { r_return_val_if_fail (tree, NULL); if (!tree->root) { // empty tree return NULL; } RRBNode *node = tree->root; while (node->link[1]) { node = node->link[1]; } return node; } R_API RRBNode *r_rbnode_next(RRBNode *node) { r_return_val_if_fail (node, NULL); if (node->link[1]) { node = node->link[1]; while (node->link[0]) { node = node->link[0]; } return node; } RRBNode *parent = node->parent; while (parent && parent->link[1] == node) { node = parent; parent = node->parent; } return parent; } R_API RRBNode *r_rbnode_prev(RRBNode *node) { r_return_val_if_fail (node, NULL); if (node->link[0]) { node = node->link[0]; while (node->link[1]) { node = node->link[1]; } return node; } RRBNode *parent = node->parent; while (parent && parent->link[0] == node) { node = parent; parent = node->parent; } return parent; }
/* BSD 2-Clause License Copyright (c) 2018, lynnl Cleaned up and refactored for r2 in 2021 - 2022: condret All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <r_util.h> static void _set_link(RRBNode *parent, RRBNode *child, const int dir) { if (parent) { parent->link[dir] = child; } if (child) { child->parent = parent; } } R_API RRBTree *r_crbtree_new(RRBFree freefn) { RRBTree *tree = R_NEW0 (RRBTree); if (tree) { tree->free = freefn; } return tree; } R_API void r_crbtree_clear(RRBTree *tree) { r_return_if_fail (tree); RRBNode *iter = tree->root, *save = NULL; // Rotate away the left links into a linked list so that // we can perform iterative destruction of the rbtree while (iter) { if (!iter->link[0]) { save = iter->link[1]; if (tree->free) { tree->free (iter->data); } free (iter); tree->size--; } else { save = iter->link[0]; _set_link (iter, save->link[1], 0); _set_link (save, iter, 1); } iter = save; } tree->root = NULL; } R_API void r_crbtree_free(RRBTree *tree) { if (!tree) { return; } r_crbtree_clear (tree); free (tree); } R_API RRBNode *r_crbtree_find_node(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && cmp, NULL); RRBNode *iter = tree->root; while (iter) { const int dir = cmp (data, iter->data, user); if (!dir) { return iter; } iter = iter->link[dir > 0]; } return NULL; } R_API void *r_crbtree_find(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && cmp, NULL); RRBNode *node = r_crbtree_find_node (tree, data, cmp, user); return node ? node->data : NULL; } static RRBNode *_node_new(void *data, RRBNode *parent) { RRBNode *node = R_NEW0 (RRBNode); r_return_val_if_fail (node, NULL); node->red = 1; node->data = data; node->parent = parent; return node; } #define IS_RED(n) ((n) && (n)->red == 1) static RRBNode *_rot_once(RRBNode *root, int dir) { r_return_val_if_fail (root, NULL); // save is new parent of root and root is parent of save's previous child RRBNode *save = root->link[!dir]; _set_link (root, save->link[dir], !dir); _set_link (save, root, dir); root->red = 1; save->red = 0; return save; } static RRBNode *_rot_twice(RRBNode *root, int dir) { r_return_val_if_fail (root, NULL); _set_link (root, _rot_once (root->link[!dir], !dir), !dir); return _rot_once (root, dir); } R_API bool r_crbtree_insert(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && cmp, false); bool inserted = false; if (!tree->root) { tree->root = _node_new (data, NULL); if (!tree->root) { return false; } inserted = true; goto out_exit; } RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *g = NULL, *parent = &head; /* Grandparent & parent */ RRBNode *p = NULL, *q = tree->root; /* Iterator & parent */ int dir = 0, last = 0; /* Directions */ _set_link (parent, q, 1); for (;;) { if (!q) { /* Insert a node at first null link(also set its parent link) */ q = _node_new (data, p); if (!q) { return false; } p->link[dir] = q; inserted = true; } else if (IS_RED (q->link[0]) && IS_RED (q->link[1])) { /* Simple red violation: color flip */ q->red = 1; q->link[0]->red = 0; q->link[1]->red = 0; } if (IS_RED (q) && IS_RED (p)) { #if 0 // coverity error, parent is never null /* Hard red violation: rotate */ if (!parent) { return false; } #endif int dir2 = parent->link[1] == g; if (q == p->link[last]) { _set_link (parent, _rot_once (g, !last), dir2); } else { _set_link (parent, _rot_twice (g, !last), dir2); } } if (inserted) { break; } last = dir; dir = cmp (data, q->data, user) >= 0; if (g) { parent = g; } g = p; p = q; q = q->link[dir]; } /* Update root(it may different due to root rotation) */ tree->root = head.link[1]; out_exit: /* Invariant: root is black */ tree->root->red = 0; tree->root->parent = NULL; if (inserted) { tree->size++; } return inserted; } static void _exchange_nodes(RRBNode *node_a, RRBNode *node_b) { if (!node_a || !node_b) { return; } RRBNode node_a_tmp, node_b_tmp; memcpy (&node_a_tmp, node_a, sizeof (RRBNode)); memcpy (&node_b_tmp, node_b, sizeof (RRBNode)); node_a->link[0] = node_b_tmp.link[0]; node_a->link[1] = node_b_tmp.link[1]; node_a->red = node_b_tmp.red; node_b->link[0] = node_a_tmp.link[0]; node_b->link[1] = node_a_tmp.link[1]; node_b->red = node_a_tmp.red; if (node_a->parent == node_b->parent) { if (node_a->parent) { if (node_a->parent->link[0] == node_a) { node_a->parent->link[0] = node_b; node_a->parent->link[1] = node_a; } else { node_a->parent->link[1] = node_b; node_a->parent->link[0] = node_a; } } if (node_a->link[0]) { node_a->link[0]->parent = node_a; } if (node_a->link[1]) { node_a->link[1]->parent = node_a; } if (node_b->link[0]) { node_b->link[0]->parent = node_b; } if (node_b->link[1]) { node_b->link[1]->parent = node_b; } return; } RRBNode *parent_a = node_a->parent; RRBNode *parent_b = node_b->parent; if (parent_a) { if (parent_a->link[0] == node_a) { parent_a->link[0] = node_b; } else { parent_a->link[1] = node_b; } } node_b->parent = parent_a; if (parent_b) { if (parent_b->link[0] == node_b) { parent_b->link[0] = node_a; } else { parent_b->link[1] = node_a; } } node_a->parent = parent_b; if (node_a->link[0]) { node_a->link[0]->parent = node_a; } if (node_a->link[1]) { node_a->link[1]->parent = node_a; } if (node_b->link[0]) { node_b->link[0]->parent = node_b; } if (node_b->link[1]) { node_b->link[1]->parent = node_b; } } // remove data from the tree, without freeing it R_API void *r_crbtree_take(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && tree->size && tree->root && cmp, NULL); RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *q = &head, *p = NULL, *g = NULL; RRBNode *found = NULL; int dir = 1, last; _set_link (q, tree->root, 1); /* Find in-order predecessor */ while (q->link[dir]) { last = dir; g = p; p = q; q = q->link[dir]; dir = cmp (data, q->data, user); if (dir == 0 && !found) { found = q; } dir = (bool)(dir > 0); if (IS_RED (q) || IS_RED (q->link[dir])) { continue; } if (IS_RED (q->link[!dir])) { _set_link (p, _rot_once (q, dir), last); p = p->link[last]; } else { RRBNode *sibling = p->link[!last]; if (sibling) { if (!IS_RED (sibling->link[!last]) && !IS_RED (sibling->link[last])) { /* Color flip */ p->red = 0; sibling->red = 1; q->red = 1; } else if (g) { int dir2 = (bool)(g->link[1] == p); if (IS_RED (sibling->link[last])) { _set_link (g, _rot_twice (p, last), dir2); } else { _set_link (g, _rot_once (p, last), dir2); } /* Ensure correct coloring */ q->red = g->link[dir2]->red = 1; g->link[dir2]->link[0]->red = 0; g->link[dir2]->link[1]->red = 0; } } } } void *ret = NULL; /* Replace and remove if found */ if (found) { _set_link (p, q->link[q->link[0] == NULL], p->link[1] == q); if (q != found) { q->link[0] = NULL; q->link[1] = NULL; q->parent = NULL; _exchange_nodes (found, q); } ret = found->data; free (found); tree->size--; } /* Update root node */ tree->root = head.link[1]; if (tree->root) { tree->root->red = 0; tree->root->parent = NULL; } else { r_return_val_if_fail (tree->size == 0, NULL); } return ret; } R_API bool r_crbtree_delete(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && tree->size && tree->root && cmp, false); data = r_crbtree_take (tree, data, cmp, user); if (tree->free) { tree->free (data); } return !!data; } R_API RRBNode *r_crbtree_first_node(RRBTree *tree) { r_return_val_if_fail (tree, NULL); if (!tree->root) { // empty tree return NULL; } RRBNode *node = tree->root; while (node->link[0]) { node = node->link[0]; } return node; } R_API RRBNode *r_crbtree_last_node(RRBTree *tree) { r_return_val_if_fail (tree, NULL); if (!tree->root) { // empty tree return NULL; } RRBNode *node = tree->root; while (node->link[1]) { node = node->link[1]; } return node; } R_API RRBNode *r_rbnode_next(RRBNode *node) { r_return_val_if_fail (node, NULL); if (node->link[1]) { node = node->link[1]; while (node->link[0]) { node = node->link[0]; } return node; } RRBNode *parent = node->parent; while (parent && parent->link[1] == node) { node = parent; parent = node->parent; } return parent; } R_API RRBNode *r_rbnode_prev(RRBNode *node) { r_return_val_if_fail (node, NULL); if (node->link[0]) { node = node->link[0]; while (node->link[1]) { node = node->link[1]; } return node; } RRBNode *parent = node->parent; while (parent && parent->link[0] == node) { node = parent; parent = node->parent; } return parent; }
R_API bool r_crbtree_insert(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && cmp, false); bool inserted = false; if (tree->root == NULL) { tree->root = _node_new (data, NULL); if (tree->root == NULL) { return false; } inserted = true; goto out_exit; } RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *g = NULL, *parent = &head; /* Grandparent & parent */ RRBNode *p = NULL, *q = tree->root; /* Iterator & parent */ int dir = 0, last = 0; /* Directions */ _set_link (parent, q, 1); for (;;) { if (!q) { /* Insert a node at first null link(also set its parent link) */ q = _node_new (data, p); if (!q) { return false; } p->link[dir] = q; inserted = true; } else if (IS_RED (q->link[0]) && IS_RED (q->link[1])) { /* Simple red violation: color flip */ q->red = 1; q->link[0]->red = 0; q->link[1]->red = 0; } if (IS_RED (q) && IS_RED (p)) { #if 0 // coverity error, parent is never null /* Hard red violation: rotate */ if (!parent) { return false; } #endif int dir2 = parent->link[1] == g; if (q == p->link[last]) { _set_link (parent, _rot_once (g, !last), dir2); } else { _set_link (parent, _rot_twice (g, !last), dir2); } } if (inserted) { break; } last = dir; dir = cmp (data, q->data, user) >= 0; if (g) { parent = g; } g = p; p = q; q = q->link[dir]; } /* Update root(it may different due to root rotation) */ tree->root = head.link[1]; out_exit: /* Invariant: root is black */ tree->root->red = 0; tree->root->parent = NULL; if (inserted) { tree->size++; } return inserted; }
R_API bool r_crbtree_insert(RRBTree *tree, void *data, RRBComparator cmp, void *user) { r_return_val_if_fail (tree && data && cmp, false); bool inserted = false; if (!tree->root) { tree->root = _node_new (data, NULL); if (!tree->root) { return false; } inserted = true; goto out_exit; } RRBNode head; /* Fake tree root */ memset (&head, 0, sizeof (RRBNode)); RRBNode *g = NULL, *parent = &head; /* Grandparent & parent */ RRBNode *p = NULL, *q = tree->root; /* Iterator & parent */ int dir = 0, last = 0; /* Directions */ _set_link (parent, q, 1); for (;;) { if (!q) { /* Insert a node at first null link(also set its parent link) */ q = _node_new (data, p); if (!q) { return false; } p->link[dir] = q; inserted = true; } else if (IS_RED (q->link[0]) && IS_RED (q->link[1])) { /* Simple red violation: color flip */ q->red = 1; q->link[0]->red = 0; q->link[1]->red = 0; } if (IS_RED (q) && IS_RED (p)) { #if 0 // coverity error, parent is never null /* Hard red violation: rotate */ if (!parent) { return false; } #endif int dir2 = parent->link[1] == g; if (q == p->link[last]) { _set_link (parent, _rot_once (g, !last), dir2); } else { _set_link (parent, _rot_twice (g, !last), dir2); } } if (inserted) { break; } last = dir; dir = cmp (data, q->data, user) >= 0; if (g) { parent = g; } g = p; p = q; q = q->link[dir]; } /* Update root(it may different due to root rotation) */ tree->root = head.link[1]; out_exit: /* Invariant: root is black */ tree->root->red = 0; tree->root->parent = NULL; if (inserted) { tree->size++; } return inserted; }
{'added': [(141, '\tif (!tree->root) {'), (143, '\t\tif (!tree->root) {')], 'deleted': [(141, '\tif (tree->root == NULL) {'), (143, '\t\tif (tree->root == NULL) {')]}
2
2
341
2,407
62
413
18
https://github.com/radareorg/radare2
CVE-2022-1444
CWE-416
1,763
proc.c
C
proc_lambda
/* ** proc.c - Proc class ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/class.h> #include <mruby/proc.h> #include <mruby/opcode.h> #include <mruby/data.h> #include <mruby/presym.h> #include <mruby/array.h> #include <mruby/hash.h> static const mrb_code call_iseq[] = { OP_CALL, }; static const mrb_irep call_irep = { 0, /* nlocals */ 2, /* nregs */ 0, /* clen */ MRB_ISEQ_NO_FREE | MRB_IREP_NO_FREE, /* flags */ call_iseq, /* iseq */ NULL, /* pool */ NULL, /* syms */ NULL, /* reps */ NULL, /* lv */ NULL, /* debug_info */ 1, /* ilen */ 0, /* plen */ 0, /* slen */ 1, /* rlen */ 0, /* refcnt */ }; static const struct RProc call_proc = { NULL, NULL, MRB_TT_PROC, MRB_GC_RED, MRB_FL_OBJ_IS_FROZEN | MRB_PROC_SCOPE | MRB_PROC_STRICT, { &call_irep }, NULL, { NULL } }; struct RProc* mrb_proc_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p; mrb_callinfo *ci = mrb->c->ci; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); if (ci) { struct RClass *tc = NULL; if (ci->proc) { if (ci->proc->color != MRB_GC_RED) { tc = MRB_PROC_TARGET_CLASS(ci->proc); } else { tc = mrb_vm_ci_target_class(ci); if (tc && tc->tt == MRB_TT_ICLASS) { tc = tc->c; } } } if (tc == NULL) { tc = mrb_vm_ci_target_class(ci); } p->upper = ci->proc; p->e.target_class = tc; } p->body.irep = irep; if (irep) { mrb_irep_incref(mrb, (mrb_irep*)irep); } return p; } struct REnv* mrb_env_new(mrb_state *mrb, struct mrb_context *c, mrb_callinfo *ci, int nstacks, mrb_value *stack, struct RClass *tc) { struct REnv *e; mrb_int bidx = 1; int n = ci->n; int nk = ci->nk; e = MRB_OBJ_ALLOC(mrb, MRB_TT_ENV, NULL); e->c = tc; MRB_ENV_SET_LEN(e, nstacks); bidx += (n == 15) ? 1 : n; bidx += (nk == 15) ? 1 : (2*nk); MRB_ENV_SET_BIDX(e, bidx); e->mid = ci->mid; e->stack = stack; e->cxt = c; return e; } static void closure_setup(mrb_state *mrb, struct RProc *p) { mrb_callinfo *ci = mrb->c->ci; const struct RProc *up = p->upper; struct REnv *e = NULL; if (ci && (e = mrb_vm_ci_env(ci)) != NULL) { /* do nothing, because e is assigned already */ } else if (up) { struct RClass *tc = ci->u.target_class; e = mrb_env_new(mrb, mrb->c, ci, up->body.irep->nlocals, ci->stack, tc); ci->u.env = e; if (MRB_PROC_ENV_P(up) && MRB_PROC_ENV(up)->cxt == NULL) { e->mid = MRB_PROC_ENV(up)->mid; } } if (e) { p->e.env = e; p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); } } struct RProc* mrb_closure_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p = mrb_proc_new(mrb, irep); closure_setup(mrb, p); return p; } MRB_API struct RProc* mrb_proc_new_cfunc(mrb_state *mrb, mrb_func_t func) { struct RProc *p; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); p->body.func = func; p->flags |= MRB_PROC_CFUNC_FL; p->upper = 0; p->e.target_class = 0; return p; } MRB_API struct RProc* mrb_proc_new_cfunc_with_env(mrb_state *mrb, mrb_func_t func, mrb_int argc, const mrb_value *argv) { struct RProc *p = mrb_proc_new_cfunc(mrb, func); struct REnv *e; int i; p->e.env = e = mrb_env_new(mrb, mrb->c, mrb->c->ci, 0, NULL, NULL); p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); MRB_ENV_CLOSE(e); e->stack = (mrb_value*)mrb_malloc(mrb, sizeof(mrb_value) * argc); MRB_ENV_SET_LEN(e, argc); if (argv) { for (i = 0; i < argc; ++i) { e->stack[i] = argv[i]; } } else { for (i = 0; i < argc; ++i) { SET_NIL_VALUE(e->stack[i]); } } return p; } MRB_API struct RProc* mrb_closure_new_cfunc(mrb_state *mrb, mrb_func_t func, int nlocals) { return mrb_proc_new_cfunc_with_env(mrb, func, nlocals, NULL); } MRB_API mrb_value mrb_proc_cfunc_env_get(mrb_state *mrb, mrb_int idx) { const struct RProc *p = mrb->c->ci->proc; struct REnv *e; if (!p || !MRB_PROC_CFUNC_P(p)) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from non-cfunc proc"); } e = MRB_PROC_ENV(p); if (!e) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from cfunc Proc without REnv"); } if (idx < 0 || MRB_ENV_LEN(e) <= idx) { mrb_raisef(mrb, E_INDEX_ERROR, "Env index out of range: %i (expected: 0 <= index < %i)", idx, MRB_ENV_LEN(e)); } return e->stack[idx]; } void mrb_proc_copy(struct RProc *a, struct RProc *b) { if (a->body.irep) { /* already initialized proc */ return; } a->flags = b->flags; a->body = b->body; if (!MRB_PROC_CFUNC_P(a) && a->body.irep) { mrb_irep_incref(NULL, (mrb_irep*)a->body.irep); } a->upper = b->upper; a->e.env = b->e.env; /* a->e.target_class = a->e.target_class; */ } static mrb_value mrb_proc_s_new(mrb_state *mrb, mrb_value proc_class) { mrb_value blk; mrb_value proc; struct RProc *p; /* Calling Proc.new without a block is not implemented yet */ mrb_get_args(mrb, "&!", &blk); p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb_class_ptr(proc_class)); mrb_proc_copy(p, mrb_proc_ptr(blk)); proc = mrb_obj_value(p); mrb_funcall_with_block(mrb, proc, MRB_SYM(initialize), 0, NULL, proc); if (!MRB_PROC_STRICT_P(p) && mrb->c->ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb->c->ci[-1].u.env) { p->flags |= MRB_PROC_ORPHAN; } return proc; } static mrb_value mrb_proc_init_copy(mrb_state *mrb, mrb_value self) { mrb_value proc = mrb_get_arg1(mrb); if (!mrb_proc_p(proc)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } mrb_proc_copy(mrb_proc_ptr(self), mrb_proc_ptr(proc)); return self; } /* 15.2.17.4.2 */ static mrb_value proc_arity(mrb_state *mrb, mrb_value self) { return mrb_int_value(mrb, mrb_proc_arity(mrb_proc_ptr(self))); } /* 15.3.1.2.6 */ /* 15.3.1.3.27 */ /* * call-seq: * lambda { |...| block } -> a_proc * * Equivalent to <code>Proc.new</code>, except the resulting Proc objects * check the number of parameters passed when called. */ static mrb_value proc_lambda(mrb_state *mrb, mrb_value self) { mrb_value blk; struct RProc *p; mrb_get_args(mrb, "&", &blk); if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Proc object without a block"); } if (!mrb_proc_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p)) { struct RProc *p2 = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, p->c); mrb_proc_copy(p2, p); p2->flags |= MRB_PROC_STRICT; return mrb_obj_value(p2); } return blk; } mrb_int mrb_proc_arity(const struct RProc *p) { const mrb_irep *irep; const mrb_code *pc; mrb_aspec aspec; int ma, op, ra, pa, arity; if (MRB_PROC_CFUNC_P(p)) { /* TODO cfunc aspec not implemented yet */ return -1; } irep = p->body.irep; if (!irep) { return 0; } pc = irep->iseq; /* arity is depend on OP_ENTER */ if (*pc != OP_ENTER) { return 0; } aspec = PEEK_W(pc+1); ma = MRB_ASPEC_REQ(aspec); op = MRB_ASPEC_OPT(aspec); ra = MRB_ASPEC_REST(aspec); pa = MRB_ASPEC_POST(aspec); arity = ra || (MRB_PROC_STRICT_P(p) && op) ? -(ma + pa + 1) : ma + pa; return arity; } mrb_value mrb_proc_local_variables(mrb_state *mrb, const struct RProc *proc) { const mrb_irep *irep; mrb_value vars; size_t i; if (proc == NULL || MRB_PROC_CFUNC_P(proc)) { return mrb_ary_new(mrb); } vars = mrb_hash_new(mrb); while (proc) { if (MRB_PROC_CFUNC_P(proc)) break; irep = proc->body.irep; if (irep->lv) { for (i = 0; i + 1 < irep->nlocals; ++i) { if (irep->lv[i]) { mrb_sym sym = irep->lv[i]; const char *name = mrb_sym_name(mrb, sym); switch (name[0]) { case '*': case '&': break; default: mrb_hash_set(mrb, vars, mrb_symbol_value(sym), mrb_true_value()); break; } } } } if (MRB_PROC_SCOPE_P(proc)) break; proc = proc->upper; } return mrb_hash_keys(mrb, vars); } const struct RProc * mrb_proc_get_caller(mrb_state *mrb, struct REnv **envp) { struct mrb_context *c = mrb->c; mrb_callinfo *ci = (c->ci > c->cibase) ? c->ci - 1 : c->cibase; const struct RProc *proc = ci->proc; if (!proc || MRB_PROC_CFUNC_P(proc)) { if (envp) *envp = NULL; } else { struct RClass *tc = MRB_PROC_TARGET_CLASS(proc); struct REnv *e = mrb_vm_ci_env(ci); if (e == NULL) { int nstacks = proc->body.irep->nlocals; e = mrb_env_new(mrb, c, ci, nstacks, ci->stack, tc); ci->u.env = e; } else if (tc) { e->c = tc; mrb_field_write_barrier(mrb, (struct RBasic*)e, (struct RBasic*)tc); } if (envp) *envp = e; } return proc; } #define IREP_LVAR_MERGE_DEFAULT 50 #define IREP_LVAR_MERGE_MINIMUM 8 #define IREP_LVAR_MERGE_MAXIMUM 240 #ifdef MRB_IREP_LVAR_MERGE_LIMIT # define IREP_LVAR_MERGE_LIMIT \ ((MRB_IREP_LVAR_MERGE_LIMIT) < IREP_LVAR_MERGE_MINIMUM ? IREP_LVAR_MERGE_MINIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT) > IREP_LVAR_MERGE_MAXIMUM ? IREP_LVAR_MERGE_MAXIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT)) #else # define IREP_LVAR_MERGE_LIMIT IREP_LVAR_MERGE_DEFAULT #endif void mrb_proc_merge_lvar(mrb_state *mrb, mrb_irep *irep, struct REnv *env, int num, const mrb_sym *lv, const mrb_value *stack) { mrb_assert(!(irep->flags & MRB_IREP_NO_FREE)); if ((irep->nlocals + num) > IREP_LVAR_MERGE_LIMIT) { mrb_raise(mrb, E_RUNTIME_ERROR, "too many local variables for binding (mruby limitation)"); } if (!lv) { mrb_raise(mrb, E_RUNTIME_ERROR, "unavailable local variable names"); } irep->lv = (mrb_sym*)mrb_realloc(mrb, (mrb_sym*)irep->lv, sizeof(mrb_sym) * (irep->nlocals + num)); env->stack = (mrb_value*)mrb_realloc(mrb, env->stack, sizeof(mrb_value) * (irep->nlocals + 1 /* self */ + num)); mrb_sym *destlv = (mrb_sym*)irep->lv + irep->nlocals - 1 /* self */; mrb_value *destst = env->stack + irep->nlocals; memmove(destlv, lv, sizeof(mrb_sym) * num); if (stack) { memmove(destst, stack, sizeof(mrb_value) * num); for (int i = 0; i < num; i++) { if (!mrb_immediate_p(stack[i])) { mrb_field_write_barrier(mrb, (struct RBasic*)env, (struct RBasic*)mrb_obj_ptr(stack[i])); } } } else { for (int i = num; i > 0; i--, destst++) { *destst = mrb_nil_value(); } } irep->nlocals += num; irep->nregs = irep->nlocals; MRB_ENV_SET_LEN(env, irep->nlocals); } void mrb_init_proc(mrb_state *mrb) { mrb_method_t m; mrb_define_class_method(mrb, mrb->proc_class, "new", mrb_proc_s_new, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); mrb_define_method(mrb, mrb->proc_class, "initialize_copy", mrb_proc_init_copy, MRB_ARGS_REQ(1)); mrb_define_method(mrb, mrb->proc_class, "arity", proc_arity, MRB_ARGS_NONE()); MRB_METHOD_FROM_PROC(m, &call_proc); mrb_define_method_raw(mrb, mrb->proc_class, MRB_SYM(call), m); mrb_define_method_raw(mrb, mrb->proc_class, MRB_OPSYM(aref), m); mrb_define_class_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.2.6 */ mrb_define_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.3.27 */ }
/* ** proc.c - Proc class ** ** See Copyright Notice in mruby.h */ #include <mruby.h> #include <mruby/class.h> #include <mruby/proc.h> #include <mruby/opcode.h> #include <mruby/data.h> #include <mruby/presym.h> #include <mruby/array.h> #include <mruby/hash.h> static const mrb_code call_iseq[] = { OP_CALL, }; static const mrb_irep call_irep = { 0, /* nlocals */ 2, /* nregs */ 0, /* clen */ MRB_ISEQ_NO_FREE | MRB_IREP_NO_FREE, /* flags */ call_iseq, /* iseq */ NULL, /* pool */ NULL, /* syms */ NULL, /* reps */ NULL, /* lv */ NULL, /* debug_info */ 1, /* ilen */ 0, /* plen */ 0, /* slen */ 1, /* rlen */ 0, /* refcnt */ }; static const struct RProc call_proc = { NULL, NULL, MRB_TT_PROC, MRB_GC_RED, MRB_FL_OBJ_IS_FROZEN | MRB_PROC_SCOPE | MRB_PROC_STRICT, { &call_irep }, NULL, { NULL } }; struct RProc* mrb_proc_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p; mrb_callinfo *ci = mrb->c->ci; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); if (ci) { struct RClass *tc = NULL; if (ci->proc) { if (ci->proc->color != MRB_GC_RED) { tc = MRB_PROC_TARGET_CLASS(ci->proc); } else { tc = mrb_vm_ci_target_class(ci); if (tc && tc->tt == MRB_TT_ICLASS) { tc = tc->c; } } } if (tc == NULL) { tc = mrb_vm_ci_target_class(ci); } p->upper = ci->proc; p->e.target_class = tc; } p->body.irep = irep; if (irep) { mrb_irep_incref(mrb, (mrb_irep*)irep); } return p; } struct REnv* mrb_env_new(mrb_state *mrb, struct mrb_context *c, mrb_callinfo *ci, int nstacks, mrb_value *stack, struct RClass *tc) { struct REnv *e; mrb_int bidx = 1; int n = ci->n; int nk = ci->nk; e = MRB_OBJ_ALLOC(mrb, MRB_TT_ENV, NULL); e->c = tc; MRB_ENV_SET_LEN(e, nstacks); bidx += (n == 15) ? 1 : n; bidx += (nk == 15) ? 1 : (2*nk); MRB_ENV_SET_BIDX(e, bidx); e->mid = ci->mid; e->stack = stack; e->cxt = c; return e; } static void closure_setup(mrb_state *mrb, struct RProc *p) { mrb_callinfo *ci = mrb->c->ci; const struct RProc *up = p->upper; struct REnv *e = NULL; if (ci && (e = mrb_vm_ci_env(ci)) != NULL) { /* do nothing, because e is assigned already */ } else if (up) { struct RClass *tc = ci->u.target_class; e = mrb_env_new(mrb, mrb->c, ci, up->body.irep->nlocals, ci->stack, tc); ci->u.env = e; if (MRB_PROC_ENV_P(up) && MRB_PROC_ENV(up)->cxt == NULL) { e->mid = MRB_PROC_ENV(up)->mid; } } if (e) { p->e.env = e; p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); } } struct RProc* mrb_closure_new(mrb_state *mrb, const mrb_irep *irep) { struct RProc *p = mrb_proc_new(mrb, irep); closure_setup(mrb, p); return p; } MRB_API struct RProc* mrb_proc_new_cfunc(mrb_state *mrb, mrb_func_t func) { struct RProc *p; p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb->proc_class); p->body.func = func; p->flags |= MRB_PROC_CFUNC_FL; p->upper = 0; p->e.target_class = 0; return p; } MRB_API struct RProc* mrb_proc_new_cfunc_with_env(mrb_state *mrb, mrb_func_t func, mrb_int argc, const mrb_value *argv) { struct RProc *p = mrb_proc_new_cfunc(mrb, func); struct REnv *e; int i; p->e.env = e = mrb_env_new(mrb, mrb->c, mrb->c->ci, 0, NULL, NULL); p->flags |= MRB_PROC_ENVSET; mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)e); MRB_ENV_CLOSE(e); e->stack = (mrb_value*)mrb_malloc(mrb, sizeof(mrb_value) * argc); MRB_ENV_SET_LEN(e, argc); if (argv) { for (i = 0; i < argc; ++i) { e->stack[i] = argv[i]; } } else { for (i = 0; i < argc; ++i) { SET_NIL_VALUE(e->stack[i]); } } return p; } MRB_API struct RProc* mrb_closure_new_cfunc(mrb_state *mrb, mrb_func_t func, int nlocals) { return mrb_proc_new_cfunc_with_env(mrb, func, nlocals, NULL); } MRB_API mrb_value mrb_proc_cfunc_env_get(mrb_state *mrb, mrb_int idx) { const struct RProc *p = mrb->c->ci->proc; struct REnv *e; if (!p || !MRB_PROC_CFUNC_P(p)) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from non-cfunc proc"); } e = MRB_PROC_ENV(p); if (!e) { mrb_raise(mrb, E_TYPE_ERROR, "Can't get cfunc env from cfunc Proc without REnv"); } if (idx < 0 || MRB_ENV_LEN(e) <= idx) { mrb_raisef(mrb, E_INDEX_ERROR, "Env index out of range: %i (expected: 0 <= index < %i)", idx, MRB_ENV_LEN(e)); } return e->stack[idx]; } void mrb_proc_copy(mrb_state *mrb, struct RProc *a, struct RProc *b) { if (a->body.irep) { /* already initialized proc */ return; } a->flags = b->flags; a->body = b->body; a->upper = b->upper; if (!MRB_PROC_CFUNC_P(a) && a->body.irep) { mrb_irep_incref(mrb, (mrb_irep*)a->body.irep); } a->e.env = b->e.env; /* a->e.target_class = a->e.target_class; */ } static mrb_value mrb_proc_s_new(mrb_state *mrb, mrb_value proc_class) { mrb_value blk; mrb_value proc; struct RProc *p; /* Calling Proc.new without a block is not implemented yet */ mrb_get_args(mrb, "&!", &blk); p = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, mrb_class_ptr(proc_class)); mrb_proc_copy(mrb, p, mrb_proc_ptr(blk)); proc = mrb_obj_value(p); mrb_funcall_with_block(mrb, proc, MRB_SYM(initialize), 0, NULL, proc); if (!MRB_PROC_STRICT_P(p) && mrb->c->ci > mrb->c->cibase && MRB_PROC_ENV(p) == mrb->c->ci[-1].u.env) { p->flags |= MRB_PROC_ORPHAN; } return proc; } static mrb_value mrb_proc_init_copy(mrb_state *mrb, mrb_value self) { mrb_value proc = mrb_get_arg1(mrb); if (!mrb_proc_p(proc)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } mrb_proc_copy(mrb, mrb_proc_ptr(self), mrb_proc_ptr(proc)); return self; } /* 15.2.17.4.2 */ static mrb_value proc_arity(mrb_state *mrb, mrb_value self) { return mrb_int_value(mrb, mrb_proc_arity(mrb_proc_ptr(self))); } /* 15.3.1.2.6 */ /* 15.3.1.3.27 */ /* * call-seq: * lambda { |...| block } -> a_proc * * Equivalent to <code>Proc.new</code>, except the resulting Proc objects * check the number of parameters passed when called. */ static mrb_value proc_lambda(mrb_state *mrb, mrb_value self) { mrb_value blk; struct RProc *p; mrb_get_args(mrb, "&", &blk); if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Proc object without a block"); } if (!mrb_proc_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p)) { struct RProc *p2 = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, p->c); mrb_proc_copy(mrb, p2, p); p2->flags |= MRB_PROC_STRICT; return mrb_obj_value(p2); } return blk; } mrb_int mrb_proc_arity(const struct RProc *p) { const mrb_irep *irep; const mrb_code *pc; mrb_aspec aspec; int ma, op, ra, pa, arity; if (MRB_PROC_CFUNC_P(p)) { /* TODO cfunc aspec not implemented yet */ return -1; } irep = p->body.irep; if (!irep) { return 0; } pc = irep->iseq; /* arity is depend on OP_ENTER */ if (*pc != OP_ENTER) { return 0; } aspec = PEEK_W(pc+1); ma = MRB_ASPEC_REQ(aspec); op = MRB_ASPEC_OPT(aspec); ra = MRB_ASPEC_REST(aspec); pa = MRB_ASPEC_POST(aspec); arity = ra || (MRB_PROC_STRICT_P(p) && op) ? -(ma + pa + 1) : ma + pa; return arity; } mrb_value mrb_proc_local_variables(mrb_state *mrb, const struct RProc *proc) { const mrb_irep *irep; mrb_value vars; size_t i; if (proc == NULL || MRB_PROC_CFUNC_P(proc)) { return mrb_ary_new(mrb); } vars = mrb_hash_new(mrb); while (proc) { if (MRB_PROC_CFUNC_P(proc)) break; irep = proc->body.irep; if (irep->lv) { for (i = 0; i + 1 < irep->nlocals; ++i) { if (irep->lv[i]) { mrb_sym sym = irep->lv[i]; const char *name = mrb_sym_name(mrb, sym); switch (name[0]) { case '*': case '&': break; default: mrb_hash_set(mrb, vars, mrb_symbol_value(sym), mrb_true_value()); break; } } } } if (MRB_PROC_SCOPE_P(proc)) break; proc = proc->upper; } return mrb_hash_keys(mrb, vars); } const struct RProc * mrb_proc_get_caller(mrb_state *mrb, struct REnv **envp) { struct mrb_context *c = mrb->c; mrb_callinfo *ci = (c->ci > c->cibase) ? c->ci - 1 : c->cibase; const struct RProc *proc = ci->proc; if (!proc || MRB_PROC_CFUNC_P(proc)) { if (envp) *envp = NULL; } else { struct RClass *tc = MRB_PROC_TARGET_CLASS(proc); struct REnv *e = mrb_vm_ci_env(ci); if (e == NULL) { int nstacks = proc->body.irep->nlocals; e = mrb_env_new(mrb, c, ci, nstacks, ci->stack, tc); ci->u.env = e; } else if (tc) { e->c = tc; mrb_field_write_barrier(mrb, (struct RBasic*)e, (struct RBasic*)tc); } if (envp) *envp = e; } return proc; } #define IREP_LVAR_MERGE_DEFAULT 50 #define IREP_LVAR_MERGE_MINIMUM 8 #define IREP_LVAR_MERGE_MAXIMUM 240 #ifdef MRB_IREP_LVAR_MERGE_LIMIT # define IREP_LVAR_MERGE_LIMIT \ ((MRB_IREP_LVAR_MERGE_LIMIT) < IREP_LVAR_MERGE_MINIMUM ? IREP_LVAR_MERGE_MINIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT) > IREP_LVAR_MERGE_MAXIMUM ? IREP_LVAR_MERGE_MAXIMUM : \ (MRB_IREP_LVAR_MERGE_LIMIT)) #else # define IREP_LVAR_MERGE_LIMIT IREP_LVAR_MERGE_DEFAULT #endif void mrb_proc_merge_lvar(mrb_state *mrb, mrb_irep *irep, struct REnv *env, int num, const mrb_sym *lv, const mrb_value *stack) { mrb_assert(!(irep->flags & MRB_IREP_NO_FREE)); if ((irep->nlocals + num) > IREP_LVAR_MERGE_LIMIT) { mrb_raise(mrb, E_RUNTIME_ERROR, "too many local variables for binding (mruby limitation)"); } if (!lv) { mrb_raise(mrb, E_RUNTIME_ERROR, "unavailable local variable names"); } irep->lv = (mrb_sym*)mrb_realloc(mrb, (mrb_sym*)irep->lv, sizeof(mrb_sym) * (irep->nlocals + num)); env->stack = (mrb_value*)mrb_realloc(mrb, env->stack, sizeof(mrb_value) * (irep->nlocals + 1 /* self */ + num)); mrb_sym *destlv = (mrb_sym*)irep->lv + irep->nlocals - 1 /* self */; mrb_value *destst = env->stack + irep->nlocals; memmove(destlv, lv, sizeof(mrb_sym) * num); if (stack) { memmove(destst, stack, sizeof(mrb_value) * num); for (int i = 0; i < num; i++) { if (!mrb_immediate_p(stack[i])) { mrb_field_write_barrier(mrb, (struct RBasic*)env, (struct RBasic*)mrb_obj_ptr(stack[i])); } } } else { for (int i = num; i > 0; i--, destst++) { *destst = mrb_nil_value(); } } irep->nlocals += num; irep->nregs = irep->nlocals; MRB_ENV_SET_LEN(env, irep->nlocals); } void mrb_init_proc(mrb_state *mrb) { mrb_method_t m; mrb_define_class_method(mrb, mrb->proc_class, "new", mrb_proc_s_new, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); mrb_define_method(mrb, mrb->proc_class, "initialize_copy", mrb_proc_init_copy, MRB_ARGS_REQ(1)); mrb_define_method(mrb, mrb->proc_class, "arity", proc_arity, MRB_ARGS_NONE()); MRB_METHOD_FROM_PROC(m, &call_proc); mrb_define_method_raw(mrb, mrb->proc_class, MRB_SYM(call), m); mrb_define_method_raw(mrb, mrb->proc_class, MRB_OPSYM(aref), m); mrb_define_class_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.2.6 */ mrb_define_method(mrb, mrb->kernel_module, "lambda", proc_lambda, MRB_ARGS_NONE()|MRB_ARGS_BLOCK()); /* 15.3.1.3.27 */ }
proc_lambda(mrb_state *mrb, mrb_value self) { mrb_value blk; struct RProc *p; mrb_get_args(mrb, "&", &blk); if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Proc object without a block"); } if (!mrb_proc_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p)) { struct RProc *p2 = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, p->c); mrb_proc_copy(p2, p); p2->flags |= MRB_PROC_STRICT; return mrb_obj_value(p2); } return blk; }
proc_lambda(mrb_state *mrb, mrb_value self) { mrb_value blk; struct RProc *p; mrb_get_args(mrb, "&", &blk); if (mrb_nil_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "tried to create Proc object without a block"); } if (!mrb_proc_p(blk)) { mrb_raise(mrb, E_ARGUMENT_ERROR, "not a proc"); } p = mrb_proc_ptr(blk); if (!MRB_PROC_STRICT_P(p)) { struct RProc *p2 = MRB_OBJ_ALLOC(mrb, MRB_TT_PROC, p->c); mrb_proc_copy(mrb, p2, p); p2->flags |= MRB_PROC_STRICT; return mrb_obj_value(p2); } return blk; }
{'added': [(204, 'mrb_proc_copy(mrb_state *mrb, struct RProc *a, struct RProc *b)'), (212, ' a->upper = b->upper;'), (214, ' mrb_irep_incref(mrb, (mrb_irep*)a->body.irep);'), (230, ' mrb_proc_copy(mrb, p, mrb_proc_ptr(blk));'), (248, ' mrb_proc_copy(mrb, mrb_proc_ptr(self), mrb_proc_ptr(proc));'), (284, ' mrb_proc_copy(mrb, p2, p);')], 'deleted': [(204, 'mrb_proc_copy(struct RProc *a, struct RProc *b)'), (213, ' mrb_irep_incref(NULL, (mrb_irep*)a->body.irep);'), (215, ' a->upper = b->upper;'), (230, ' mrb_proc_copy(p, mrb_proc_ptr(blk));'), (248, ' mrb_proc_copy(mrb_proc_ptr(self), mrb_proc_ptr(proc));'), (284, ' mrb_proc_copy(p2, p);')]}
6
6
364
2,483
20
121
4
https://github.com/mruby/mruby
CVE-2021-4110
CWE-476
2,633
sub.cc
C++
tflite::ops::builtin::sub::Prepare
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/sub.h" #include <stddef.h> #include <stdint.h> #include <algorithm> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/optimized/cpu_check.h" #include "tensorflow/lite/kernels/internal/optimized/neon_check.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/add.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" #include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace sub { // This file has three implementation of Sub. enum KernelType { kReference, kGenericOptimized, // Neon-free kNeonOptimized, }; constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; struct OpData { bool requires_broadcast; // These fields are used in both the general 8-bit -> 8bit quantized path, // and the special 16-bit -> 16bit quantized path int input1_shift; int input2_shift; int32 output_activation_min; int32 output_activation_max; // These fields are used only in the general 8-bit -> 8bit quantized path int32 input1_multiplier; int32 input2_multiplier; int32 output_multiplier; int output_shift; int left_shift; int32 input1_offset; int32 input2_offset; int32 output_offset; // This parameter is used to indicate whether // parameter scale is power of two. // It is used in 16-bit -> 16-bit quantization. bool pot_scale_int16; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new OpData; data->requires_broadcast = false; return data; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } TfLiteStatus PrepareGeneralSubOp(TfLiteContext* context, const TfLiteTensor* input_1, const TfLiteTensor* input_2, TfLiteTensor* output, TfLiteSubParams* params, OpData* op_params, int op_sign) { TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16); const auto& input1_quantization_params = input_1->params; const auto& input2_quantization_params = input_2->params; const auto& output_quantization_params = output->params; int32_t integer_type_min = 0; int32_t integer_type_max = 0; if (output->type == kTfLiteUInt8) { integer_type_min = std::numeric_limits<uint8_t>::min(); integer_type_max = std::numeric_limits<uint8_t>::max(); } else if (output->type == kTfLiteInt16) { integer_type_min = std::numeric_limits<int16_t>::min(); integer_type_max = std::numeric_limits<int16_t>::max(); } else { // output->type == kTfLiteInt8 integer_type_min = std::numeric_limits<int8_t>::min(); integer_type_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, input1_quantization_params.zero_point >= integer_type_min); TF_LITE_ENSURE(context, input1_quantization_params.zero_point <= integer_type_max); TF_LITE_ENSURE(context, input2_quantization_params.zero_point >= integer_type_min); TF_LITE_ENSURE(context, input2_quantization_params.zero_point <= integer_type_max); TF_LITE_ENSURE(context, output_quantization_params.zero_point >= integer_type_min); TF_LITE_ENSURE(context, output_quantization_params.zero_point <= integer_type_max); op_params->input1_offset = -input1_quantization_params.zero_point; op_params->input2_offset = -input2_quantization_params.zero_point; op_params->output_offset = output_quantization_params.zero_point; // The shift is set to 15 in case of 16-bit and 20 in case of 8-bit, // accordingly. In case of 16-bit we have 65535 << 15 which is less than 1 << // 31, therefore the addition will still fit in a 32 bit accumulator. op_params->left_shift = output->type == kTfLiteInt16 ? 15 : 20; const double twice_max_input_scale = 2 * std::max(input1_quantization_params.scale, input2_quantization_params.scale); const double real_input1_multiplier = input1_quantization_params.scale / twice_max_input_scale; const double real_input2_multiplier = input2_quantization_params.scale / twice_max_input_scale; const double real_output_multiplier = twice_max_input_scale / ((1 << op_params->left_shift) * output_quantization_params.scale); tflite::QuantizeMultiplierSmallerThanOneExp(real_input1_multiplier, &op_params->input1_multiplier, &op_params->input1_shift); tflite::QuantizeMultiplierSmallerThanOneExp(real_input2_multiplier, &op_params->input2_multiplier, &op_params->input2_shift); op_params->input2_multiplier *= op_sign; tflite::QuantizeMultiplierSmallerThanOneExp(real_output_multiplier, &op_params->output_multiplier, &op_params->output_shift); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &op_params->output_activation_min, &op_params->output_activation_max)); return kTfLiteOk; } TfLiteStatus PrepareInt16SubOpPOT(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, TfLiteSubParams* params, OpData* data) { // 16bit -> 16bit special quantized path, supporting only a rather // narrow case of quantization parameters: zero_points must all be 0 // ("symmetric quantization") and scales must be power-of-two (which // we abbreviate as "POT" below). The intended use case for this path // is in LSTM cells, where, due to the constraints of implementing // some of the math in these LSTM cells in fixed-point arithmetic, // we need to have such symmetric, power-of-two quantization // (Fixed-point formats are inherently symmetric, power-of-two). TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input1_scale_log2_rounded; bool input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); TF_LITE_ENSURE(context, input1_scale_is_pot); int input2_scale_log2_rounded; bool input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); TF_LITE_ENSURE(context, input2_scale_is_pot); int output_scale_log2_rounded; bool output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); TF_LITE_ENSURE(context, output_scale_is_pot); data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded; data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded; // Shifting of one input is supported. The graph quantization should ensure // that the other input matches the output. TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0); TF_LITE_ENSURE(context, data->input1_shift <= 0); TF_LITE_ENSURE(context, data->input2_shift <= 0); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); return kTfLiteOk; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } // 8bit -> 8bit general quantized path, with general rescalings // as well as, 16bit -> 16bit with general rescalings bool pot_scale_int16 = true; bool input1_scale_is_pot = false; bool input2_scale_is_pot = false; bool output_scale_is_pot = false; int input1_scale_log2_rounded{0}; int input2_scale_log2_rounded{0}; int output_scale_log2_rounded{0}; if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 && output->type == kTfLiteInt16) { // In case of 16-bit, there are two implementation: // the scale parameter is a general number // the scale parameter is POT and // zero_point is zero for inputs/output. pot_scale_int16 = (input1->params.zero_point == 0) && (input2->params.zero_point == 0) && (output->params.zero_point == 0); input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot; } data->pot_scale_int16 = pot_scale_int16; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || !pot_scale_int16) { TF_LITE_ENSURE_OK(context, PrepareGeneralSubOp(context, input1, input2, output, params, data, -1)); } else if (output->type == kTfLiteInt16) { // LSTM-special case with scale parameter of POT TF_LITE_ENSURE_OK(context, PrepareInt16SubOpPOT(context, input1, input2, output, params, data)); } return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type, typename data_type> void EvalSubImpl(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, bool requires_broadcast, TfLiteTensor* output) { data_type output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); tflite::ArithmeticParams op_params; SetActivationParams(output_activation_min, output_activation_max, &op_params); switch (kernel_type) { case kReference: if (requires_broadcast) { reference_ops::BroadcastSubSlow( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } else { reference_ops::SubWithActivation( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } break; case kGenericOptimized: case kNeonOptimized: if (requires_broadcast) { optimized_ops::BroadcastSubSlow( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } else { optimized_ops::SubWithActivation( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } break; } } template <KernelType kernel_type> void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { const bool requires_broadcast = data->requires_broadcast; switch (output->type) { case kTfLiteInt32: EvalSubImpl<kernel_type, int32_t>(context, node, params, data, input1, input2, requires_broadcast, output); break; case kTfLiteFloat32: EvalSubImpl<kernel_type, float>(context, node, params, data, input1, input2, requires_broadcast, output); break; case kTfLiteInt64: EvalSubImpl<kernel_type, int64_t>(context, node, params, data, input1, input2, requires_broadcast, output); break; default: TF_LITE_KERNEL_LOG(context, "output type %s is not supported.", TfLiteTypeGetName(output->type)); } } template <KernelType kernel_type> void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { tflite::ArithmeticParams op_params; op_params.left_shift = data->left_shift; op_params.input1_offset = data->input1_offset; op_params.input1_multiplier = data->input1_multiplier; op_params.input1_shift = data->input1_shift; op_params.input2_offset = data->input2_offset; op_params.input2_multiplier = data->input2_multiplier; op_params.input2_shift = data->input2_shift; op_params.output_offset = data->output_offset; op_params.output_multiplier = data->output_multiplier; op_params.output_shift = data->output_shift; SetActivationParams(data->output_activation_min, data->output_activation_max, &op_params); const bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input1), GetTensorShape(input2), &op_params); #define TF_LITE_SUB(type, opname, data_type) \ type::opname(op_params, GetTensorShape(input1), \ GetTensorData<data_type>(input1), GetTensorShape(input2), \ GetTensorData<data_type>(input2), GetTensorShape(output), \ GetTensorData<data_type>(output)) // NOTE: We are using the add kernels. This is possible as the second values // multiplier is negated before being passed down. if (output->type == kTfLiteInt8) { if (need_broadcast) { TF_LITE_SUB(reference_integer_ops, BroadcastAdd4DSlow, int8_t); } else { TF_LITE_SUB(reference_integer_ops, Add, int8_t); } } else if (!data->pot_scale_int16) { if (need_broadcast) { TF_LITE_SUB(reference_ops, BroadcastAdd4DSlow, int16_t); } else { reference_ops::Add(op_params, GetTensorShape(input1), GetTensorData<int16_t>(input1), GetTensorShape(input2), GetTensorData<int16_t>(input2), GetTensorShape(output), GetTensorData<int16_t>(output), false); } } else if (output->type == kTfLiteUInt8) { if (kernel_type == kReference) { if (need_broadcast) { TF_LITE_SUB(reference_ops, BroadcastAdd4DSlow, uint8_t); } else { TF_LITE_SUB(reference_ops, Add, uint8_t); } } else { if (need_broadcast) { optimized_ops::BroadcastAddDispatch( op_params, GetTensorShape(input1), GetTensorData<uint8_t>(input1), GetTensorShape(input2), GetTensorData<uint8_t>(input2), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { TF_LITE_SUB(optimized_ops, Add, uint8_t); } } } else { if (kernel_type == kReference) { if (need_broadcast) { TF_LITE_SUB(reference_ops, BroadcastSubSlow, int16_t); } else { TF_LITE_SUB(reference_ops, Sub16, int16_t); } } else { if (need_broadcast) { TF_LITE_SUB(optimized_ops, BroadcastSubSlow, int16_t); } else { TF_LITE_SUB(optimized_ops, Sub16, int16_t); } } } #undef TF_LITE_SUB } template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32 || output->type == kTfLiteInt64) { EvalSub<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output); } else { context->ReportError( context, "output type %d is not supported, requires float|uint8|int32 types.", output->type); return kTfLiteError; } return kTfLiteOk; } } // namespace sub TfLiteRegistration* Register_SUB_REF() { static TfLiteRegistration r = {sub::Init, sub::Free, sub::Prepare, sub::Eval<sub::kReference>}; return &r; } TfLiteRegistration* Register_SUB_GENERIC_OPT() { static TfLiteRegistration r = {sub::Init, sub::Free, sub::Prepare, sub::Eval<sub::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_SUB_NEON_OPT() { static TfLiteRegistration r = {sub::Init, sub::Free, sub::Prepare, sub::Eval<sub::kNeonOptimized>}; return &r; } TfLiteRegistration* Register_SUB() { #ifdef USE_NEON return Register_SUB_NEON_OPT(); #else return Register_SUB_GENERIC_OPT(); #endif } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/sub.h" #include <stddef.h> #include <stdint.h> #include <algorithm> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/optimized/cpu_check.h" #include "tensorflow/lite/kernels/internal/optimized/neon_check.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/add.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h" #include "tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace builtin { namespace sub { // This file has three implementation of Sub. enum KernelType { kReference, kGenericOptimized, // Neon-free kNeonOptimized, }; constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; struct OpData { bool requires_broadcast; // These fields are used in both the general 8-bit -> 8bit quantized path, // and the special 16-bit -> 16bit quantized path int input1_shift; int input2_shift; int32 output_activation_min; int32 output_activation_max; // These fields are used only in the general 8-bit -> 8bit quantized path int32 input1_multiplier; int32 input2_multiplier; int32 output_multiplier; int output_shift; int left_shift; int32 input1_offset; int32 input2_offset; int32 output_offset; // This parameter is used to indicate whether // parameter scale is power of two. // It is used in 16-bit -> 16-bit quantization. bool pot_scale_int16; }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new OpData; data->requires_broadcast = false; return data; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } TfLiteStatus PrepareGeneralSubOp(TfLiteContext* context, const TfLiteTensor* input_1, const TfLiteTensor* input_2, TfLiteTensor* output, TfLiteSubParams* params, OpData* op_params, int op_sign) { TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16); const auto& input1_quantization_params = input_1->params; const auto& input2_quantization_params = input_2->params; const auto& output_quantization_params = output->params; int32_t integer_type_min = 0; int32_t integer_type_max = 0; if (output->type == kTfLiteUInt8) { integer_type_min = std::numeric_limits<uint8_t>::min(); integer_type_max = std::numeric_limits<uint8_t>::max(); } else if (output->type == kTfLiteInt16) { integer_type_min = std::numeric_limits<int16_t>::min(); integer_type_max = std::numeric_limits<int16_t>::max(); } else { // output->type == kTfLiteInt8 integer_type_min = std::numeric_limits<int8_t>::min(); integer_type_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, input1_quantization_params.zero_point >= integer_type_min); TF_LITE_ENSURE(context, input1_quantization_params.zero_point <= integer_type_max); TF_LITE_ENSURE(context, input2_quantization_params.zero_point >= integer_type_min); TF_LITE_ENSURE(context, input2_quantization_params.zero_point <= integer_type_max); TF_LITE_ENSURE(context, output_quantization_params.zero_point >= integer_type_min); TF_LITE_ENSURE(context, output_quantization_params.zero_point <= integer_type_max); op_params->input1_offset = -input1_quantization_params.zero_point; op_params->input2_offset = -input2_quantization_params.zero_point; op_params->output_offset = output_quantization_params.zero_point; // The shift is set to 15 in case of 16-bit and 20 in case of 8-bit, // accordingly. In case of 16-bit we have 65535 << 15 which is less than 1 << // 31, therefore the addition will still fit in a 32 bit accumulator. op_params->left_shift = output->type == kTfLiteInt16 ? 15 : 20; const double twice_max_input_scale = 2 * std::max(input1_quantization_params.scale, input2_quantization_params.scale); const double real_input1_multiplier = input1_quantization_params.scale / twice_max_input_scale; const double real_input2_multiplier = input2_quantization_params.scale / twice_max_input_scale; const double real_output_multiplier = twice_max_input_scale / ((1 << op_params->left_shift) * output_quantization_params.scale); tflite::QuantizeMultiplierSmallerThanOneExp(real_input1_multiplier, &op_params->input1_multiplier, &op_params->input1_shift); tflite::QuantizeMultiplierSmallerThanOneExp(real_input2_multiplier, &op_params->input2_multiplier, &op_params->input2_shift); op_params->input2_multiplier *= op_sign; tflite::QuantizeMultiplierSmallerThanOneExp(real_output_multiplier, &op_params->output_multiplier, &op_params->output_shift); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &op_params->output_activation_min, &op_params->output_activation_max)); return kTfLiteOk; } TfLiteStatus PrepareInt16SubOpPOT(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, TfLiteSubParams* params, OpData* data) { // 16bit -> 16bit special quantized path, supporting only a rather // narrow case of quantization parameters: zero_points must all be 0 // ("symmetric quantization") and scales must be power-of-two (which // we abbreviate as "POT" below). The intended use case for this path // is in LSTM cells, where, due to the constraints of implementing // some of the math in these LSTM cells in fixed-point arithmetic, // we need to have such symmetric, power-of-two quantization // (Fixed-point formats are inherently symmetric, power-of-two). TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input1_scale_log2_rounded; bool input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); TF_LITE_ENSURE(context, input1_scale_is_pot); int input2_scale_log2_rounded; bool input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); TF_LITE_ENSURE(context, input2_scale_is_pot); int output_scale_log2_rounded; bool output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); TF_LITE_ENSURE(context, output_scale_is_pot); data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded; data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded; // Shifting of one input is supported. The graph quantization should ensure // that the other input matches the output. TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0); TF_LITE_ENSURE(context, data->input1_shift <= 0); TF_LITE_ENSURE(context, data->input2_shift <= 0); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); return kTfLiteOk; } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } // 8bit -> 8bit general quantized path, with general rescalings // as well as, 16bit -> 16bit with general rescalings bool pot_scale_int16 = true; bool input1_scale_is_pot = false; bool input2_scale_is_pot = false; bool output_scale_is_pot = false; int input1_scale_log2_rounded{0}; int input2_scale_log2_rounded{0}; int output_scale_log2_rounded{0}; if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 && output->type == kTfLiteInt16) { // In case of 16-bit, there are two implementation: // the scale parameter is a general number // the scale parameter is POT and // zero_point is zero for inputs/output. pot_scale_int16 = (input1->params.zero_point == 0) && (input2->params.zero_point == 0) && (output->params.zero_point == 0); input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot; } data->pot_scale_int16 = pot_scale_int16; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || !pot_scale_int16) { TF_LITE_ENSURE_OK(context, PrepareGeneralSubOp(context, input1, input2, output, params, data, -1)); } else if (output->type == kTfLiteInt16) { // LSTM-special case with scale parameter of POT TF_LITE_ENSURE_OK(context, PrepareInt16SubOpPOT(context, input1, input2, output, params, data)); } return context->ResizeTensor(context, output, output_size); } template <KernelType kernel_type, typename data_type> void EvalSubImpl(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, bool requires_broadcast, TfLiteTensor* output) { data_type output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); tflite::ArithmeticParams op_params; SetActivationParams(output_activation_min, output_activation_max, &op_params); switch (kernel_type) { case kReference: if (requires_broadcast) { reference_ops::BroadcastSubSlow( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } else { reference_ops::SubWithActivation( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } break; case kGenericOptimized: case kNeonOptimized: if (requires_broadcast) { optimized_ops::BroadcastSubSlow( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } else { optimized_ops::SubWithActivation( op_params, GetTensorShape(input1), GetTensorData<data_type>(input1), GetTensorShape(input2), GetTensorData<data_type>(input2), GetTensorShape(output), GetTensorData<data_type>(output)); } break; } } template <KernelType kernel_type> void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { const bool requires_broadcast = data->requires_broadcast; switch (output->type) { case kTfLiteInt32: EvalSubImpl<kernel_type, int32_t>(context, node, params, data, input1, input2, requires_broadcast, output); break; case kTfLiteFloat32: EvalSubImpl<kernel_type, float>(context, node, params, data, input1, input2, requires_broadcast, output); break; case kTfLiteInt64: EvalSubImpl<kernel_type, int64_t>(context, node, params, data, input1, input2, requires_broadcast, output); break; default: TF_LITE_KERNEL_LOG(context, "output type %s is not supported.", TfLiteTypeGetName(output->type)); } } template <KernelType kernel_type> void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, const OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output) { tflite::ArithmeticParams op_params; op_params.left_shift = data->left_shift; op_params.input1_offset = data->input1_offset; op_params.input1_multiplier = data->input1_multiplier; op_params.input1_shift = data->input1_shift; op_params.input2_offset = data->input2_offset; op_params.input2_multiplier = data->input2_multiplier; op_params.input2_shift = data->input2_shift; op_params.output_offset = data->output_offset; op_params.output_multiplier = data->output_multiplier; op_params.output_shift = data->output_shift; SetActivationParams(data->output_activation_min, data->output_activation_max, &op_params); const bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input1), GetTensorShape(input2), &op_params); #define TF_LITE_SUB(type, opname, data_type) \ type::opname(op_params, GetTensorShape(input1), \ GetTensorData<data_type>(input1), GetTensorShape(input2), \ GetTensorData<data_type>(input2), GetTensorShape(output), \ GetTensorData<data_type>(output)) // NOTE: We are using the add kernels. This is possible as the second values // multiplier is negated before being passed down. if (output->type == kTfLiteInt8) { if (need_broadcast) { TF_LITE_SUB(reference_integer_ops, BroadcastAdd4DSlow, int8_t); } else { TF_LITE_SUB(reference_integer_ops, Add, int8_t); } } else if (!data->pot_scale_int16) { if (need_broadcast) { TF_LITE_SUB(reference_ops, BroadcastAdd4DSlow, int16_t); } else { reference_ops::Add(op_params, GetTensorShape(input1), GetTensorData<int16_t>(input1), GetTensorShape(input2), GetTensorData<int16_t>(input2), GetTensorShape(output), GetTensorData<int16_t>(output), false); } } else if (output->type == kTfLiteUInt8) { if (kernel_type == kReference) { if (need_broadcast) { TF_LITE_SUB(reference_ops, BroadcastAdd4DSlow, uint8_t); } else { TF_LITE_SUB(reference_ops, Add, uint8_t); } } else { if (need_broadcast) { optimized_ops::BroadcastAddDispatch( op_params, GetTensorShape(input1), GetTensorData<uint8_t>(input1), GetTensorShape(input2), GetTensorData<uint8_t>(input2), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { TF_LITE_SUB(optimized_ops, Add, uint8_t); } } } else { if (kernel_type == kReference) { if (need_broadcast) { TF_LITE_SUB(reference_ops, BroadcastSubSlow, int16_t); } else { TF_LITE_SUB(reference_ops, Sub16, int16_t); } } else { if (need_broadcast) { TF_LITE_SUB(optimized_ops, BroadcastSubSlow, int16_t); } else { TF_LITE_SUB(optimized_ops, Sub16, int16_t); } } } #undef TF_LITE_SUB } template <KernelType kernel_type> TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32 || output->type == kTfLiteInt64) { EvalSub<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output); } else { context->ReportError( context, "output type %d is not supported, requires float|uint8|int32 types.", output->type); return kTfLiteError; } return kTfLiteOk; } } // namespace sub TfLiteRegistration* Register_SUB_REF() { static TfLiteRegistration r = {sub::Init, sub::Free, sub::Prepare, sub::Eval<sub::kReference>}; return &r; } TfLiteRegistration* Register_SUB_GENERIC_OPT() { static TfLiteRegistration r = {sub::Init, sub::Free, sub::Prepare, sub::Eval<sub::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_SUB_NEON_OPT() { static TfLiteRegistration r = {sub::Init, sub::Free, sub::Prepare, sub::Eval<sub::kNeonOptimized>}; return &r; } TfLiteRegistration* Register_SUB() { #ifdef USE_NEON return Register_SUB_NEON_OPT(); #else return Register_SUB_GENERIC_OPT(); #endif } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } // 8bit -> 8bit general quantized path, with general rescalings // as well as, 16bit -> 16bit with general rescalings bool pot_scale_int16 = true; bool input1_scale_is_pot = false; bool input2_scale_is_pot = false; bool output_scale_is_pot = false; int input1_scale_log2_rounded{0}; int input2_scale_log2_rounded{0}; int output_scale_log2_rounded{0}; if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 && output->type == kTfLiteInt16) { // In case of 16-bit, there are two implementation: // the scale parameter is a general number // the scale parameter is POT and // zero_point is zero for inputs/output. pot_scale_int16 = (input1->params.zero_point == 0) && (input2->params.zero_point == 0) && (output->params.zero_point == 0); input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot; } data->pot_scale_int16 = pot_scale_int16; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || !pot_scale_int16) { TF_LITE_ENSURE_OK(context, PrepareGeneralSubOp(context, input1, input2, output, params, data, -1)); } else if (output->type == kTfLiteInt16) { // LSTM-special case with scale parameter of POT TF_LITE_ENSURE_OK(context, PrepareInt16SubOpPOT(context, input1, input2, output, params, data)); } return context->ResizeTensor(context, output, output_size); }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); auto* params = reinterpret_cast<TfLiteSubParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } // 8bit -> 8bit general quantized path, with general rescalings // as well as, 16bit -> 16bit with general rescalings bool pot_scale_int16 = true; bool input1_scale_is_pot = false; bool input2_scale_is_pot = false; bool output_scale_is_pot = false; int input1_scale_log2_rounded{0}; int input2_scale_log2_rounded{0}; int output_scale_log2_rounded{0}; if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 && output->type == kTfLiteInt16) { // In case of 16-bit, there are two implementation: // the scale parameter is a general number // the scale parameter is POT and // zero_point is zero for inputs/output. pot_scale_int16 = (input1->params.zero_point == 0) && (input2->params.zero_point == 0) && (output->params.zero_point == 0); input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot; } data->pot_scale_int16 = pot_scale_int16; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || !pot_scale_int16) { TF_LITE_ENSURE_OK(context, PrepareGeneralSubOp(context, input1, input2, output, params, data, -1)); } else if (output->type == kTfLiteInt16) { // LSTM-special case with scale parameter of POT TF_LITE_ENSURE_OK(context, PrepareInt16SubOpPOT(context, input1, input2, output, params, data)); } return context->ResizeTensor(context, output, output_size); }
{'added': [(220, ' const TfLiteTensor* input1;'), (221, ' TF_LITE_ENSURE_OK(context,'), (222, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (223, ' const TfLiteTensor* input2;'), (224, ' TF_LITE_ENSURE_OK(context,'), (225, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (226, ' TfLiteTensor* output;'), (227, ' TF_LITE_ENSURE_OK(context,'), (228, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (444, ' const TfLiteTensor* input1;'), (445, ' TF_LITE_ENSURE_OK(context,'), (446, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (447, ' const TfLiteTensor* input2;'), (448, ' TF_LITE_ENSURE_OK(context,'), (449, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (450, ' TfLiteTensor* output;'), (451, ' TF_LITE_ENSURE_OK(context,'), (452, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(220, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (221, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (222, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (438, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (439, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (440, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
18
6
389
2,572
50
416
13
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
3,176
auth_x.c
C
ceph_x_proc_ticket_reply
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include "crypto.h" #include "auth_x.h" #include "auth_x_protocol.h" #define TEMP_TICKET_BUF_LEN 256 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); static int ceph_x_is_authenticated(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_is_authenticated want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return (ac->want_keys & xi->have_keys) == ac->want_keys; } static int ceph_x_should_authenticate(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_should_authenticate want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return need != 0; } static int ceph_x_encrypt_buflen(int ilen) { return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + sizeof(u32); } static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *ibuf, int ilen, void *obuf, size_t olen) { struct ceph_x_encrypt_header head = { .struct_v = 1, .magic = cpu_to_le64(CEPHX_ENC_MAGIC) }; size_t len = olen - sizeof(u32); int ret; ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, &head, sizeof(head), ibuf, ilen); if (ret) return ret; ceph_encode_32(&obuf, len); return len + sizeof(u32); } static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void *obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); int len, ret; len = ceph_decode_32(p); if (*p + len > end) return -EINVAL; dout("ceph_x_decrypt len %d\n", len); ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) return -EPERM; *p += len; return olen; } /* * get existing (or insert new) ticket handler */ static struct ceph_x_ticket_handler * get_ticket_handler(struct ceph_auth_client *ac, int service) { struct ceph_x_ticket_handler *th; struct ceph_x_info *xi = ac->private; struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node; while (*p) { parent = *p; th = rb_entry(parent, struct ceph_x_ticket_handler, node); if (service < th->service) p = &(*p)->rb_left; else if (service > th->service) p = &(*p)->rb_right; else return th; } /* add it */ th = kzalloc(sizeof(*th), GFP_NOFS); if (!th) return ERR_PTR(-ENOMEM); th->service = service; rb_link_node(&th->node, parent, p); rb_insert_color(&th->node, &xi->ticket_handlers); return th; } static void remove_ticket_handler(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th) { struct ceph_x_info *xi = ac->private; dout("remove_ticket_handler %p %d\n", th, th->service); rb_erase(&th->node, &xi->ticket_handlers); ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); kfree(th); } static int process_one_ticket(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void **p, void *end, void *dbuf, void *ticket_buf) { struct ceph_x_info *xi = ac->private; int type; u8 tkt_struct_v, blob_struct_v; struct ceph_x_ticket_handler *th; void *dp, *dend; int dlen; char is_enc; struct timespec validity; struct ceph_crypto_key old_key; void *tp, *tpend; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; unsigned long new_expires, new_renew_after; u64 new_secret_id; int ret; ceph_decode_need(p, end, sizeof(u32) + 1, bad); type = ceph_decode_32(p); dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); tkt_struct_v = ceph_decode_8(p); if (tkt_struct_v != 1) goto bad; th = get_ticket_handler(ac, type); if (IS_ERR(th)) { ret = PTR_ERR(th); goto out; } /* blob for me */ dlen = ceph_x_decrypt(secret, p, end, dbuf, TEMP_TICKET_BUF_LEN); if (dlen <= 0) { ret = dlen; goto out; } dout(" decrypted %d bytes\n", dlen); dp = dbuf; dend = dp + dlen; tkt_struct_v = ceph_decode_8(&dp); if (tkt_struct_v != 1) goto bad; memcpy(&old_key, &th->session_key, sizeof(old_key)); ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); if (ret) goto out; ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); ceph_decode_timespec(&validity, &new_validity); new_expires = get_seconds() + validity.tv_sec; new_renew_after = new_expires - (validity.tv_sec / 4); dout(" expires=%lu renew_after=%lu\n", new_expires, new_renew_after); /* ticket blob for service */ ceph_decode_8_safe(p, end, is_enc, bad); tp = ticket_buf; if (is_enc) { /* encrypted */ dout(" encrypted ticket\n"); dlen = ceph_x_decrypt(&old_key, p, end, ticket_buf, TEMP_TICKET_BUF_LEN); if (dlen < 0) { ret = dlen; goto out; } dlen = ceph_decode_32(&tp); } else { /* unencrypted */ ceph_decode_32_safe(p, end, dlen, bad); ceph_decode_need(p, end, dlen, bad); ceph_decode_copy(p, ticket_buf, dlen); } tpend = tp + dlen; dout(" ticket blob is %d bytes\n", dlen); ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); blob_struct_v = ceph_decode_8(&tp); new_secret_id = ceph_decode_64(&tp); ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); if (ret) goto out; /* all is well, update our ticket */ ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); th->session_key = new_session_key; th->ticket_blob = new_ticket_blob; th->validity = new_validity; th->secret_id = new_secret_id; th->expires = new_expires; th->renew_after = new_renew_after; dout(" got ticket service %d (%s) secret_id %lld len %d\n", type, ceph_entity_type_name(type), th->secret_id, (int)th->ticket_blob->vec.iov_len); xi->have_keys |= th->service; out: return ret; bad: ret = -EINVAL; goto out; } static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { void *p = buf; char *dbuf; char *ticket_buf; u8 reply_struct_v; u32 num; int ret; dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!dbuf) return -ENOMEM; ret = -ENOMEM; ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!ticket_buf) goto out_dbuf; ceph_decode_8_safe(&p, end, reply_struct_v, bad); if (reply_struct_v != 1) return -EINVAL; ceph_decode_32_safe(&p, end, num, bad); dout("%d tickets\n", num); while (num--) { ret = process_one_ticket(ac, secret, &p, end, dbuf, ticket_buf); if (ret) goto out; } ret = 0; out: kfree(ticket_buf); out_dbuf: kfree(dbuf); return ret; bad: ret = -EINVAL; goto out; } static int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au) { int maxlen; struct ceph_x_authorize_a *msg_a; struct ceph_x_authorize_b msg_b; void *p, *end; int ret; int ticket_blob_len = (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); dout("build_authorizer for %s %p\n", ceph_entity_type_name(th->service), au); maxlen = sizeof(*msg_a) + sizeof(msg_b) + ceph_x_encrypt_buflen(ticket_blob_len); dout(" need len %d\n", maxlen); if (au->buf && au->buf->alloc_len < maxlen) { ceph_buffer_put(au->buf); au->buf = NULL; } if (!au->buf) { au->buf = ceph_buffer_new(maxlen, GFP_NOFS); if (!au->buf) return -ENOMEM; } au->service = th->service; au->secret_id = th->secret_id; msg_a = au->buf->vec.iov_base; msg_a->struct_v = 1; msg_a->global_id = cpu_to_le64(ac->global_id); msg_a->service_id = cpu_to_le32(th->service); msg_a->ticket_blob.struct_v = 1; msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id); msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len); if (ticket_blob_len) { memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base, th->ticket_blob->vec.iov_len); } dout(" th %p secret_id %lld %lld\n", th, th->secret_id, le64_to_cpu(msg_a->ticket_blob.secret_id)); p = msg_a + 1; p += ticket_blob_len; end = au->buf->vec.iov_base + au->buf->vec.iov_len; get_random_bytes(&au->nonce, sizeof(au->nonce)); msg_b.struct_v = 1; msg_b.nonce = cpu_to_le64(au->nonce); ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b), p, end - p); if (ret < 0) goto out_buf; p += ret; au->buf->vec.iov_len = p - au->buf->vec.iov_base; dout(" built authorizer nonce %llx len %d\n", au->nonce, (int)au->buf->vec.iov_len); BUG_ON(au->buf->vec.iov_len > maxlen); return 0; out_buf: ceph_buffer_put(au->buf); au->buf = NULL; return ret; } static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, void **p, void *end) { ceph_decode_need(p, end, 1 + sizeof(u64), bad); ceph_encode_8(p, 1); ceph_encode_64(p, th->secret_id); if (th->ticket_blob) { const char *buf = th->ticket_blob->vec.iov_base; u32 len = th->ticket_blob->vec.iov_len; ceph_encode_32_safe(p, end, len, bad); ceph_encode_copy_safe(p, end, buf, len, bad); } else { ceph_encode_32_safe(p, end, 0, bad); } return 0; bad: return -ERANGE; } static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed) { int want = ac->want_keys; struct ceph_x_info *xi = ac->private; int service; *pneed = ac->want_keys & ~(xi->have_keys); for (service = 1; service <= want; service <<= 1) { struct ceph_x_ticket_handler *th; if (!(ac->want_keys & service)) continue; if (*pneed & service) continue; th = get_ticket_handler(ac, service); if (IS_ERR(th)) { *pneed |= service; continue; } if (get_seconds() >= th->renew_after) *pneed |= service; if (get_seconds() >= th->expires) xi->have_keys &= ~service; } } static int ceph_x_build_request(struct ceph_auth_client *ac, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int need; struct ceph_x_request_header *head = buf; int ret; struct ceph_x_ticket_handler *th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ceph_x_validate_tickets(ac, &need); dout("build_request want %x have %x need %x\n", ac->want_keys, xi->have_keys, need); if (need & CEPH_ENTITY_TYPE_AUTH) { struct ceph_x_authenticate *auth = (void *)(head + 1); void *p = auth + 1; struct ceph_x_challenge_blob tmp; char tmp_enc[40]; u64 *u; if (p > end) return -ERANGE; dout(" get_auth_session_key\n"); head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); /* encrypt and hash */ get_random_bytes(&auth->client_challenge, sizeof(u64)); tmp.client_challenge = auth->client_challenge; tmp.server_challenge = cpu_to_le64(xi->server_challenge); ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), tmp_enc, sizeof(tmp_enc)); if (ret < 0) return ret; auth->struct_v = 1; auth->key = 0; for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) auth->key ^= *(__le64 *)u; dout(" server_challenge %llx client_challenge %llx key %llx\n", xi->server_challenge, le64_to_cpu(auth->client_challenge), le64_to_cpu(auth->key)); /* now encode the old ticket if exists */ ret = ceph_x_encode_ticket(th, &p, end); if (ret < 0) return ret; return p - buf; } if (need) { void *p = head + 1; struct ceph_x_service_ticket_request *req; if (p > end) return -ERANGE; head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); if (ret) return ret; ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base, xi->auth_authorizer.buf->vec.iov_len); req = p; req->keys = cpu_to_le32(need); p += sizeof(*req); return p - buf; } return 0; } static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_x_info *xi = ac->private; struct ceph_x_reply_header *head = buf; struct ceph_x_ticket_handler *th; int len = end - buf; int op; int ret; if (result) return result; /* XXX hmm? */ if (xi->starting) { /* it's a hello */ struct ceph_x_server_challenge *sc = buf; if (len != sizeof(*sc)) return -EINVAL; xi->server_challenge = le64_to_cpu(sc->server_challenge); dout("handle_reply got server challenge %llx\n", xi->server_challenge); xi->starting = false; xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH; return -EAGAIN; } op = le16_to_cpu(head->op); result = le32_to_cpu(head->result); dout("handle_reply op %d result %d\n", op, result); switch (op) { case CEPHX_GET_AUTH_SESSION_KEY: /* verify auth key */ ret = ceph_x_proc_ticket_reply(ac, &xi->secret, buf + sizeof(*head), end); break; case CEPHX_GET_PRINCIPAL_SESSION_KEY: th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_proc_ticket_reply(ac, &th->session_key, buf + sizeof(*head), end); break; default: return -EINVAL; } if (ret) return ret; if (ac->want_keys == xi->have_keys) return 0; return -EAGAIN; } static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; int ret; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = kzalloc(sizeof(*au), GFP_NOFS); if (!au) return -ENOMEM; ret = ceph_x_build_authorizer(ac, th, au); if (ret) { kfree(au); return ret; } auth->authorizer = (struct ceph_authorizer *) au; auth->authorizer_buf = au->buf->vec.iov_base; auth->authorizer_buf_len = au->buf->vec.iov_len; auth->authorizer_reply_buf = au->reply_buf; auth->authorizer_reply_buf_len = sizeof (au->reply_buf); return 0; } static int ceph_x_update_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = (struct ceph_x_authorizer *)auth->authorizer; if (au->secret_id < th->secret_id) { dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", au->service, au->secret_id, th->secret_id); return ceph_x_build_authorizer(ac, th, au); } return 0; } static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { struct ceph_x_authorizer *au = (void *)a; struct ceph_x_ticket_handler *th; int ret = 0; struct ceph_x_authorize_reply reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); th = get_ticket_handler(ac, au->service); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) return -EPERM; if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ret = -EPERM; else ret = 0; dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); return ret; } static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { struct ceph_x_authorizer *au = (void *)a; ceph_buffer_put(au->buf); kfree(au); } static void ceph_x_reset(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; dout("reset\n"); xi->starting = true; xi->server_challenge = 0; } static void ceph_x_destroy(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; struct rb_node *p; dout("ceph_x_destroy %p\n", ac); ceph_crypto_key_destroy(&xi->secret); while ((p = rb_first(&xi->ticket_handlers)) != NULL) { struct ceph_x_ticket_handler *th = rb_entry(p, struct ceph_x_ticket_handler, node); remove_ticket_handler(ac, th); } if (xi->auth_authorizer.buf) ceph_buffer_put(xi->auth_authorizer.buf); kfree(ac->private); ac->private = NULL; } static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (!IS_ERR(th)) memset(&th->validity, 0, sizeof(th->validity)); } static const struct ceph_auth_client_ops ceph_x_ops = { .name = "x", .is_authenticated = ceph_x_is_authenticated, .should_authenticate = ceph_x_should_authenticate, .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, .update_authorizer = ceph_x_update_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, .reset = ceph_x_reset, .destroy = ceph_x_destroy, }; int ceph_x_init(struct ceph_auth_client *ac) { struct ceph_x_info *xi; int ret; dout("ceph_x_init %p\n", ac); ret = -ENOMEM; xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) goto out; ret = -EINVAL; if (!ac->key) { pr_err("no secret set (for auth_x protocol)\n"); goto out_nomem; } ret = ceph_crypto_key_clone(&xi->secret, ac->key); if (ret < 0) { pr_err("cannot clone key: %d\n", ret); goto out_nomem; } xi->starting = true; xi->ticket_handlers = RB_ROOT; ac->protocol = CEPH_AUTH_CEPHX; ac->private = xi; ac->ops = &ceph_x_ops; return 0; out_nomem: kfree(xi); out: return ret; }
#include <linux/ceph/ceph_debug.h> #include <linux/err.h> #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ceph/decode.h> #include <linux/ceph/auth.h> #include "crypto.h" #include "auth_x.h" #include "auth_x_protocol.h" static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); static int ceph_x_is_authenticated(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_is_authenticated want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return (ac->want_keys & xi->have_keys) == ac->want_keys; } static int ceph_x_should_authenticate(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; int need; ceph_x_validate_tickets(ac, &need); dout("ceph_x_should_authenticate want=%d need=%d have=%d\n", ac->want_keys, need, xi->have_keys); return need != 0; } static int ceph_x_encrypt_buflen(int ilen) { return sizeof(struct ceph_x_encrypt_header) + ilen + 16 + sizeof(u32); } static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *ibuf, int ilen, void *obuf, size_t olen) { struct ceph_x_encrypt_header head = { .struct_v = 1, .magic = cpu_to_le64(CEPHX_ENC_MAGIC) }; size_t len = olen - sizeof(u32); int ret; ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len, &head, sizeof(head), ibuf, ilen); if (ret) return ret; ceph_encode_32(&obuf, len); return len + sizeof(u32); } static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end, void **obuf, size_t olen) { struct ceph_x_encrypt_header head; size_t head_len = sizeof(head); int len, ret; len = ceph_decode_32(p); if (*p + len > end) return -EINVAL; dout("ceph_x_decrypt len %d\n", len); if (*obuf == NULL) { *obuf = kmalloc(len, GFP_NOFS); if (!*obuf) return -ENOMEM; olen = len; } ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len); if (ret) return ret; if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) return -EPERM; *p += len; return olen; } /* * get existing (or insert new) ticket handler */ static struct ceph_x_ticket_handler * get_ticket_handler(struct ceph_auth_client *ac, int service) { struct ceph_x_ticket_handler *th; struct ceph_x_info *xi = ac->private; struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node; while (*p) { parent = *p; th = rb_entry(parent, struct ceph_x_ticket_handler, node); if (service < th->service) p = &(*p)->rb_left; else if (service > th->service) p = &(*p)->rb_right; else return th; } /* add it */ th = kzalloc(sizeof(*th), GFP_NOFS); if (!th) return ERR_PTR(-ENOMEM); th->service = service; rb_link_node(&th->node, parent, p); rb_insert_color(&th->node, &xi->ticket_handlers); return th; } static void remove_ticket_handler(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th) { struct ceph_x_info *xi = ac->private; dout("remove_ticket_handler %p %d\n", th, th->service); rb_erase(&th->node, &xi->ticket_handlers); ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); kfree(th); } static int process_one_ticket(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void **p, void *end) { struct ceph_x_info *xi = ac->private; int type; u8 tkt_struct_v, blob_struct_v; struct ceph_x_ticket_handler *th; void *dbuf = NULL; void *dp, *dend; int dlen; char is_enc; struct timespec validity; struct ceph_crypto_key old_key; void *ticket_buf = NULL; void *tp, *tpend; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; unsigned long new_expires, new_renew_after; u64 new_secret_id; int ret; ceph_decode_need(p, end, sizeof(u32) + 1, bad); type = ceph_decode_32(p); dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); tkt_struct_v = ceph_decode_8(p); if (tkt_struct_v != 1) goto bad; th = get_ticket_handler(ac, type); if (IS_ERR(th)) { ret = PTR_ERR(th); goto out; } /* blob for me */ dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0); if (dlen <= 0) { ret = dlen; goto out; } dout(" decrypted %d bytes\n", dlen); dp = dbuf; dend = dp + dlen; tkt_struct_v = ceph_decode_8(&dp); if (tkt_struct_v != 1) goto bad; memcpy(&old_key, &th->session_key, sizeof(old_key)); ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); if (ret) goto out; ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); ceph_decode_timespec(&validity, &new_validity); new_expires = get_seconds() + validity.tv_sec; new_renew_after = new_expires - (validity.tv_sec / 4); dout(" expires=%lu renew_after=%lu\n", new_expires, new_renew_after); /* ticket blob for service */ ceph_decode_8_safe(p, end, is_enc, bad); if (is_enc) { /* encrypted */ dout(" encrypted ticket\n"); dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0); if (dlen < 0) { ret = dlen; goto out; } tp = ticket_buf; dlen = ceph_decode_32(&tp); } else { /* unencrypted */ ceph_decode_32_safe(p, end, dlen, bad); ticket_buf = kmalloc(dlen, GFP_NOFS); if (!ticket_buf) { ret = -ENOMEM; goto out; } tp = ticket_buf; ceph_decode_need(p, end, dlen, bad); ceph_decode_copy(p, ticket_buf, dlen); } tpend = tp + dlen; dout(" ticket blob is %d bytes\n", dlen); ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); blob_struct_v = ceph_decode_8(&tp); new_secret_id = ceph_decode_64(&tp); ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); if (ret) goto out; /* all is well, update our ticket */ ceph_crypto_key_destroy(&th->session_key); if (th->ticket_blob) ceph_buffer_put(th->ticket_blob); th->session_key = new_session_key; th->ticket_blob = new_ticket_blob; th->validity = new_validity; th->secret_id = new_secret_id; th->expires = new_expires; th->renew_after = new_renew_after; dout(" got ticket service %d (%s) secret_id %lld len %d\n", type, ceph_entity_type_name(type), th->secret_id, (int)th->ticket_blob->vec.iov_len); xi->have_keys |= th->service; out: kfree(ticket_buf); kfree(dbuf); return ret; bad: ret = -EINVAL; goto out; } static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { void *p = buf; u8 reply_struct_v; u32 num; int ret; ceph_decode_8_safe(&p, end, reply_struct_v, bad); if (reply_struct_v != 1) return -EINVAL; ceph_decode_32_safe(&p, end, num, bad); dout("%d tickets\n", num); while (num--) { ret = process_one_ticket(ac, secret, &p, end); if (ret) return ret; } return 0; bad: return -EINVAL; } static int ceph_x_build_authorizer(struct ceph_auth_client *ac, struct ceph_x_ticket_handler *th, struct ceph_x_authorizer *au) { int maxlen; struct ceph_x_authorize_a *msg_a; struct ceph_x_authorize_b msg_b; void *p, *end; int ret; int ticket_blob_len = (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0); dout("build_authorizer for %s %p\n", ceph_entity_type_name(th->service), au); maxlen = sizeof(*msg_a) + sizeof(msg_b) + ceph_x_encrypt_buflen(ticket_blob_len); dout(" need len %d\n", maxlen); if (au->buf && au->buf->alloc_len < maxlen) { ceph_buffer_put(au->buf); au->buf = NULL; } if (!au->buf) { au->buf = ceph_buffer_new(maxlen, GFP_NOFS); if (!au->buf) return -ENOMEM; } au->service = th->service; au->secret_id = th->secret_id; msg_a = au->buf->vec.iov_base; msg_a->struct_v = 1; msg_a->global_id = cpu_to_le64(ac->global_id); msg_a->service_id = cpu_to_le32(th->service); msg_a->ticket_blob.struct_v = 1; msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id); msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len); if (ticket_blob_len) { memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base, th->ticket_blob->vec.iov_len); } dout(" th %p secret_id %lld %lld\n", th, th->secret_id, le64_to_cpu(msg_a->ticket_blob.secret_id)); p = msg_a + 1; p += ticket_blob_len; end = au->buf->vec.iov_base + au->buf->vec.iov_len; get_random_bytes(&au->nonce, sizeof(au->nonce)); msg_b.struct_v = 1; msg_b.nonce = cpu_to_le64(au->nonce); ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b), p, end - p); if (ret < 0) goto out_buf; p += ret; au->buf->vec.iov_len = p - au->buf->vec.iov_base; dout(" built authorizer nonce %llx len %d\n", au->nonce, (int)au->buf->vec.iov_len); BUG_ON(au->buf->vec.iov_len > maxlen); return 0; out_buf: ceph_buffer_put(au->buf); au->buf = NULL; return ret; } static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th, void **p, void *end) { ceph_decode_need(p, end, 1 + sizeof(u64), bad); ceph_encode_8(p, 1); ceph_encode_64(p, th->secret_id); if (th->ticket_blob) { const char *buf = th->ticket_blob->vec.iov_base; u32 len = th->ticket_blob->vec.iov_len; ceph_encode_32_safe(p, end, len, bad); ceph_encode_copy_safe(p, end, buf, len, bad); } else { ceph_encode_32_safe(p, end, 0, bad); } return 0; bad: return -ERANGE; } static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed) { int want = ac->want_keys; struct ceph_x_info *xi = ac->private; int service; *pneed = ac->want_keys & ~(xi->have_keys); for (service = 1; service <= want; service <<= 1) { struct ceph_x_ticket_handler *th; if (!(ac->want_keys & service)) continue; if (*pneed & service) continue; th = get_ticket_handler(ac, service); if (IS_ERR(th)) { *pneed |= service; continue; } if (get_seconds() >= th->renew_after) *pneed |= service; if (get_seconds() >= th->expires) xi->have_keys &= ~service; } } static int ceph_x_build_request(struct ceph_auth_client *ac, void *buf, void *end) { struct ceph_x_info *xi = ac->private; int need; struct ceph_x_request_header *head = buf; int ret; struct ceph_x_ticket_handler *th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ceph_x_validate_tickets(ac, &need); dout("build_request want %x have %x need %x\n", ac->want_keys, xi->have_keys, need); if (need & CEPH_ENTITY_TYPE_AUTH) { struct ceph_x_authenticate *auth = (void *)(head + 1); void *p = auth + 1; struct ceph_x_challenge_blob tmp; char tmp_enc[40]; u64 *u; if (p > end) return -ERANGE; dout(" get_auth_session_key\n"); head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY); /* encrypt and hash */ get_random_bytes(&auth->client_challenge, sizeof(u64)); tmp.client_challenge = auth->client_challenge; tmp.server_challenge = cpu_to_le64(xi->server_challenge); ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp), tmp_enc, sizeof(tmp_enc)); if (ret < 0) return ret; auth->struct_v = 1; auth->key = 0; for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++) auth->key ^= *(__le64 *)u; dout(" server_challenge %llx client_challenge %llx key %llx\n", xi->server_challenge, le64_to_cpu(auth->client_challenge), le64_to_cpu(auth->key)); /* now encode the old ticket if exists */ ret = ceph_x_encode_ticket(th, &p, end); if (ret < 0) return ret; return p - buf; } if (need) { void *p = head + 1; struct ceph_x_service_ticket_request *req; if (p > end) return -ERANGE; head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY); ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer); if (ret) return ret; ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base, xi->auth_authorizer.buf->vec.iov_len); req = p; req->keys = cpu_to_le32(need); p += sizeof(*req); return p - buf; } return 0; } static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result, void *buf, void *end) { struct ceph_x_info *xi = ac->private; struct ceph_x_reply_header *head = buf; struct ceph_x_ticket_handler *th; int len = end - buf; int op; int ret; if (result) return result; /* XXX hmm? */ if (xi->starting) { /* it's a hello */ struct ceph_x_server_challenge *sc = buf; if (len != sizeof(*sc)) return -EINVAL; xi->server_challenge = le64_to_cpu(sc->server_challenge); dout("handle_reply got server challenge %llx\n", xi->server_challenge); xi->starting = false; xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH; return -EAGAIN; } op = le16_to_cpu(head->op); result = le32_to_cpu(head->result); dout("handle_reply op %d result %d\n", op, result); switch (op) { case CEPHX_GET_AUTH_SESSION_KEY: /* verify auth key */ ret = ceph_x_proc_ticket_reply(ac, &xi->secret, buf + sizeof(*head), end); break; case CEPHX_GET_PRINCIPAL_SESSION_KEY: th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_proc_ticket_reply(ac, &th->session_key, buf + sizeof(*head), end); break; default: return -EINVAL; } if (ret) return ret; if (ac->want_keys == xi->have_keys) return 0; return -EAGAIN; } static int ceph_x_create_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; int ret; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = kzalloc(sizeof(*au), GFP_NOFS); if (!au) return -ENOMEM; ret = ceph_x_build_authorizer(ac, th, au); if (ret) { kfree(au); return ret; } auth->authorizer = (struct ceph_authorizer *) au; auth->authorizer_buf = au->buf->vec.iov_base; auth->authorizer_buf_len = au->buf->vec.iov_len; auth->authorizer_reply_buf = au->reply_buf; auth->authorizer_reply_buf_len = sizeof (au->reply_buf); return 0; } static int ceph_x_update_authorizer( struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) return PTR_ERR(th); au = (struct ceph_x_authorizer *)auth->authorizer; if (au->secret_id < th->secret_id) { dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", au->service, au->secret_id, th->secret_id); return ceph_x_build_authorizer(ac, th, au); } return 0; } static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { struct ceph_x_authorizer *au = (void *)a; struct ceph_x_ticket_handler *th; int ret = 0; struct ceph_x_authorize_reply reply; void *preply = &reply; void *p = au->reply_buf; void *end = p + sizeof(au->reply_buf); th = get_ticket_handler(ac, au->service); if (IS_ERR(th)) return PTR_ERR(th); ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply)); if (ret < 0) return ret; if (ret != sizeof(reply)) return -EPERM; if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ret = -EPERM; else ret = 0; dout("verify_authorizer_reply nonce %llx got %llx ret %d\n", au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); return ret; } static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { struct ceph_x_authorizer *au = (void *)a; ceph_buffer_put(au->buf); kfree(au); } static void ceph_x_reset(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; dout("reset\n"); xi->starting = true; xi->server_challenge = 0; } static void ceph_x_destroy(struct ceph_auth_client *ac) { struct ceph_x_info *xi = ac->private; struct rb_node *p; dout("ceph_x_destroy %p\n", ac); ceph_crypto_key_destroy(&xi->secret); while ((p = rb_first(&xi->ticket_handlers)) != NULL) { struct ceph_x_ticket_handler *th = rb_entry(p, struct ceph_x_ticket_handler, node); remove_ticket_handler(ac, th); } if (xi->auth_authorizer.buf) ceph_buffer_put(xi->auth_authorizer.buf); kfree(ac->private); ac->private = NULL; } static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { struct ceph_x_ticket_handler *th; th = get_ticket_handler(ac, peer_type); if (!IS_ERR(th)) memset(&th->validity, 0, sizeof(th->validity)); } static const struct ceph_auth_client_ops ceph_x_ops = { .name = "x", .is_authenticated = ceph_x_is_authenticated, .should_authenticate = ceph_x_should_authenticate, .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, .update_authorizer = ceph_x_update_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, .reset = ceph_x_reset, .destroy = ceph_x_destroy, }; int ceph_x_init(struct ceph_auth_client *ac) { struct ceph_x_info *xi; int ret; dout("ceph_x_init %p\n", ac); ret = -ENOMEM; xi = kzalloc(sizeof(*xi), GFP_NOFS); if (!xi) goto out; ret = -EINVAL; if (!ac->key) { pr_err("no secret set (for auth_x protocol)\n"); goto out_nomem; } ret = ceph_crypto_key_clone(&xi->secret, ac->key); if (ret < 0) { pr_err("cannot clone key: %d\n", ret); goto out_nomem; } xi->starting = true; xi->ticket_handlers = RB_ROOT; ac->protocol = CEPH_AUTH_CEPHX; ac->private = xi; ac->ops = &ceph_x_ops; return 0; out_nomem: kfree(xi); out: return ret; }
static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { void *p = buf; char *dbuf; char *ticket_buf; u8 reply_struct_v; u32 num; int ret; dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!dbuf) return -ENOMEM; ret = -ENOMEM; ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); if (!ticket_buf) goto out_dbuf; ceph_decode_8_safe(&p, end, reply_struct_v, bad); if (reply_struct_v != 1) return -EINVAL; ceph_decode_32_safe(&p, end, num, bad); dout("%d tickets\n", num); while (num--) { ret = process_one_ticket(ac, secret, &p, end, dbuf, ticket_buf); if (ret) goto out; } ret = 0; out: kfree(ticket_buf); out_dbuf: kfree(dbuf); return ret; bad: ret = -EINVAL; goto out; }
static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, struct ceph_crypto_key *secret, void *buf, void *end) { void *p = buf; u8 reply_struct_v; u32 num; int ret; ceph_decode_8_safe(&p, end, reply_struct_v, bad); if (reply_struct_v != 1) return -EINVAL; ceph_decode_32_safe(&p, end, num, bad); dout("%d tickets\n", num); while (num--) { ret = process_one_ticket(ac, secret, &p, end); if (ret) return ret; } return 0; bad: return -EINVAL; }
{'added': [(65, '\t\t\t void **p, void *end, void **obuf, size_t olen)'), (76, '\tif (*obuf == NULL) {'), (77, '\t\t*obuf = kmalloc(len, GFP_NOFS);'), (78, '\t\tif (!*obuf)'), (79, '\t\t\treturn -ENOMEM;'), (80, '\t\tolen = len;'), (81, '\t}'), (82, ''), (83, '\tret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len);'), (138, '\t\t\t void **p, void *end)'), (144, '\tvoid *dbuf = NULL;'), (150, '\tvoid *ticket_buf = NULL;'), (175, '\tdlen = ceph_x_decrypt(secret, p, end, &dbuf, 0);'), (205, '\t\tdlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0);'), (210, '\t\ttp = ticket_buf;'), (215, '\t\tticket_buf = kmalloc(dlen, GFP_NOFS);'), (216, '\t\tif (!ticket_buf) {'), (217, '\t\t\tret = -ENOMEM;'), (218, '\t\t\tgoto out;'), (219, '\t\t}'), (220, '\t\ttp = ticket_buf;'), (249, '\tkfree(ticket_buf);'), (250, '\tkfree(dbuf);'), (275, '\t\tret = process_one_ticket(ac, secret, &p, end);'), (277, '\t\t\treturn ret;'), (280, '\treturn 0;'), (283, '\treturn -EINVAL;'), (599, '\tvoid *preply = &reply;'), (606, '\tret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply));')], 'deleted': [(16, '#define TEMP_TICKET_BUF_LEN\t256'), (17, ''), (67, '\t\t\t void **p, void *end, void *obuf, size_t olen)'), (78, '\tret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,'), (79, '\t\t\t *p, len);'), (134, '\t\t\t void **p, void *end,'), (135, '\t\t\t void *dbuf, void *ticket_buf)'), (170, '\tdlen = ceph_x_decrypt(secret, p, end, dbuf,'), (171, '\t\t\t TEMP_TICKET_BUF_LEN);'), (198, '\ttp = ticket_buf;'), (202, '\t\tdlen = ceph_x_decrypt(&old_key, p, end, ticket_buf,'), (203, '\t\t\t\t TEMP_TICKET_BUF_LEN);'), (252, '\tchar *dbuf;'), (253, '\tchar *ticket_buf;'), (258, '\tdbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);'), (259, '\tif (!dbuf)'), (260, '\t\treturn -ENOMEM;'), (261, ''), (262, '\tret = -ENOMEM;'), (263, '\tticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);'), (264, '\tif (!ticket_buf)'), (265, '\t\tgoto out_dbuf;'), (266, ''), (275, '\t\tret = process_one_ticket(ac, secret, &p, end,'), (276, '\t\t\t\t\t dbuf, ticket_buf);'), (278, '\t\t\tgoto out;'), (281, '\tret = 0;'), (282, 'out:'), (283, '\tkfree(ticket_buf);'), (284, 'out_dbuf:'), (285, '\tkfree(dbuf);'), (286, '\treturn ret;'), (289, '\tret = -EINVAL;'), (290, '\tgoto out;'), (612, '\tret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));')]}
29
35
594
3,817
38
189
6
https://github.com/torvalds/linux
CVE-2014-6416
CWE-119
1,606
xsDataView.c
C++
fx_DataView
/* * Copyright (c) 2016-2017 Moddable Tech, Inc. * * This file is part of the Moddable SDK Runtime. * * The Moddable SDK Runtime is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The Moddable SDK Runtime is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with the Moddable SDK Runtime. If not, see <http://www.gnu.org/licenses/>. * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (C) 2010-2016 Marvell International Ltd. * Copyright (C) 2002-2010 Kinoma, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xsAll.h" static txSlot* fxArgToInstance(txMachine* the, txInteger i); static txBoolean fxCheckLength(txMachine* the, txSlot* slot, txInteger* index); static txSlot* fxCheckArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable); static txSlot* fxCheckArrayBufferInstance(txMachine* the, txSlot* slot); static txSlot* fxNewArrayBufferInstance(txMachine* the); static txSlot* fxCheckDataViewInstance(txMachine* the, txSlot* slot); static txInteger fxCheckDataViewSize(txMachine* the, txSlot* view, txSlot* buffer, txBoolean mutable); static txSlot* fxNewDataViewInstance(txMachine* the); static void fxCallTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index, txSlot* item); static txSlot* fxCheckTypedArrayInstance(txMachine* the, txSlot* slot); static int fxCompareTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index); static txSlot* fxConstructTypedArray(txMachine* the); static txSlot* fxNewTypedArrayInstance(txMachine* the, txTypeDispatch* dispatch, txTypeAtomics* atomics); static void fxReduceTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index); static txBoolean fxTypedArrayDefineOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot, txFlag mask); static txBoolean fxTypedArrayDeleteProperty(txMachine* the, txSlot* instance, txID id, txIndex index); static txBoolean fxTypedArrayGetOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot); static txSlot* fxTypedArrayGetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag); static txBoolean fxTypedArrayGetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* value, txSlot* receiver); static txBoolean fxTypedArrayHasProperty(txMachine* the, txSlot* instance, txID id, txIndex index); static void fxTypedArrayOwnKeys(txMachine* the, txSlot* instance, txFlag flag, txSlot* keys); static txSlot* fxTypedArraySetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag); static txBoolean fxTypedArraySetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* value, txSlot* receiver); static void fx_TypedArray_from_object(txMachine* the, txSlot* instance, txSlot* function, txSlot* _this); const txBehavior ICACHE_FLASH_ATTR gxTypedArrayBehavior = { fxTypedArrayGetProperty, fxTypedArraySetProperty, fxOrdinaryCall, fxOrdinaryConstruct, fxTypedArrayDefineOwnProperty, fxTypedArrayDeleteProperty, fxTypedArrayGetOwnProperty, fxTypedArrayGetPropertyValue, fxOrdinaryGetPrototype, fxTypedArrayHasProperty, fxOrdinaryIsExtensible, fxTypedArrayOwnKeys, fxOrdinaryPreventExtensions, fxTypedArraySetPropertyValue, fxOrdinarySetPrototype, }; void *fxArrayBuffer(txMachine* the, txSlot* slot, void* data, txInteger byteLength, txInteger maxByteLength) { txSlot* instance; txSlot* arrayBuffer; txSlot* bufferInfo; if (byteLength < 0) mxRangeError("invalid byteLength %ld", byteLength); mxPush(mxArrayBufferPrototype); instance = fxNewArrayBufferInstance(the); arrayBuffer = instance->next; arrayBuffer->value.arrayBuffer.address = fxNewChunk(the, byteLength); bufferInfo = arrayBuffer->next; bufferInfo->value.bufferInfo.length = byteLength; bufferInfo->value.bufferInfo.maxLength = maxByteLength; if (data != NULL) c_memcpy(arrayBuffer->value.arrayBuffer.address, data, byteLength); else c_memset(arrayBuffer->value.arrayBuffer.address, 0, byteLength); mxPullSlot(slot); return arrayBuffer->value.arrayBuffer.address; } void fxGetArrayBufferData(txMachine* the, txSlot* slot, txInteger byteOffset, void* data, txInteger byteLength) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; if ((byteOffset < 0) || (length < byteOffset)) mxRangeError("out of range byteOffset %ld", byteOffset); if ((byteLength < 0) || (length < (byteOffset + byteLength))) mxRangeError("out of range byteLength %ld", byteLength); c_memcpy(data, arrayBuffer->value.arrayBuffer.address + byteOffset, byteLength); } txInteger fxGetArrayBufferLength(txMachine* the, txSlot* slot) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; return bufferInfo->value.bufferInfo.length; } txInteger fxGetArrayBufferMaxLength(txMachine* the, txSlot* slot) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; return bufferInfo->value.bufferInfo.maxLength; } void fxSetArrayBufferData(txMachine* the, txSlot* slot, txInteger byteOffset, void* data, txInteger byteLength) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; if ((byteOffset < 0) || (length < byteOffset)) mxRangeError("out of range byteOffset %ld", byteOffset); if ((byteLength < 0) || (length < (byteOffset + byteLength))) mxRangeError("out of range byteLength %ld", byteLength); c_memcpy(arrayBuffer->value.arrayBuffer.address + byteOffset, data, byteLength); } void fxSetArrayBufferLength(txMachine* the, txSlot* slot, txInteger target) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; txByte* address = arrayBuffer->value.arrayBuffer.address; if (bufferInfo->value.bufferInfo.maxLength < 0) fxReport(the, "# Use xsArrayBufferResizable instead of xsArrayBuffer\n"); if (length != target) { if (address) address = (txByte*)fxRenewChunk(the, address, target); if (address) { if (length < target) c_memset(address + length, 0, target - length); } else { address = (txByte*)fxNewChunk(the, target); if (length < target) { c_memcpy(address, arrayBuffer->value.arrayBuffer.address, length); c_memset(address + length, 0, target - length); } else c_memcpy(address, arrayBuffer->value.arrayBuffer.address, target); } arrayBuffer->value.arrayBuffer.address = address; bufferInfo->value.bufferInfo.length = target; } } void* fxToArrayBuffer(txMachine* the, txSlot* slot) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; return arrayBuffer->value.arrayBuffer.address; } void fxBuildDataView(txMachine* the) { txSlot* slot; txInteger index; const txTypeDispatch *dispatch; const txTypeAtomics *atomics; txSlot* property; txSlot* constructor; mxPush(mxObjectPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_get_byteLength), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_get_maxByteLength), C_NULL, mxID(_maxByteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_get_resizable), C_NULL, mxID(_resizable), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_concat), 1, mxID(_concat), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_resize), 1, mxID(_resize), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_slice), 2, mxID(_slice), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_transfer), 0, mxID(_transfer), XS_DONT_ENUM_FLAG); slot = fxNextStringXProperty(the, slot, "ArrayBuffer", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG); mxArrayBufferPrototype = *the->stack; slot = fxBuildHostConstructor(the, mxCallback(fx_ArrayBuffer), 1, mxID(_ArrayBuffer)); mxArrayBufferConstructor = *the->stack; slot = fxLastProperty(the, slot); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_fromBigInt), 1, mxID(_fromBigInt), XS_DONT_ENUM_FLAG); #ifndef mxCESU8 slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_fromString), 1, mxID(_fromString), XS_DONT_ENUM_FLAG); #endif slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_isView), 1, mxID(_isView), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_species_get), C_NULL, mxID(_Symbol_species), XS_DONT_ENUM_FLAG); mxPop(); mxPush(mxObjectPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getBigInt64), 1, mxID(_getBigInt64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setBigInt64), 2, mxID(_setBigInt64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getBigUint64), 1, mxID(_getBigUint64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setBigUint64), 2, mxID(_setBigUint64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getFloat32), 1, mxID(_getFloat32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setFloat32), 2, mxID(_setFloat32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getFloat64), 1, mxID(_getFloat64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setFloat64), 2, mxID(_setFloat64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getInt8), 1, mxID(_getInt8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setInt8), 2, mxID(_setInt8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getInt16), 1, mxID(_getInt16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setInt16), 2, mxID(_setInt16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getInt32), 1, mxID(_getInt32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setInt32), 2, mxID(_setInt32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getUint8), 1, mxID(_getUint8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setUint8), 2, mxID(_setUint8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getUint16), 1, mxID(_getUint16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setUint16), 2, mxID(_setUint16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getUint32), 1, mxID(_getUint32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setUint32), 2, mxID(_setUint32), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_DataView_prototype_buffer_get), C_NULL, mxID(_buffer), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_DataView_prototype_byteLength_get), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_DataView_prototype_byteOffset_get), C_NULL, mxID(_byteOffset), XS_DONT_ENUM_FLAG); slot = fxNextStringXProperty(the, slot, "DataView", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG); mxDataViewPrototype = *the->stack; slot = fxBuildHostConstructor(the, mxCallback(fx_DataView), 1, mxID(_DataView)); mxDataViewConstructor = *the->stack; mxPop(); fxNewHostFunction(the, mxCallback(fxTypedArrayGetter), 0, XS_NO_ID); fxNewHostFunction(the, mxCallback(fxTypedArraySetter), 1, XS_NO_ID); mxPushUndefined(); the->stack->flag = XS_DONT_DELETE_FLAG; the->stack->kind = XS_ACCESSOR_KIND; the->stack->value.accessor.getter = (the->stack + 2)->value.reference; the->stack->value.accessor.setter = (the->stack + 1)->value.reference; mxPull(mxTypedArrayAccessor); mxPop(); mxPop(); mxPush(mxObjectPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_at), 1, mxID(_at), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_buffer_get), C_NULL, mxID(_buffer), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_byteLength_get), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_byteOffset_get), C_NULL, mxID(_byteOffset), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_length_get), C_NULL, mxID(_length), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_toStringTag_get), C_NULL, mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_copyWithin), 2, mxID(_copyWithin), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_entries), 0, mxID(_entries), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_every), 1, mxID(_every), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_fill), 1, mxID(_fill), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_filter), 1, mxID(_filter), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_find), 1, mxID(_find), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_findIndex), 1, mxID(_findIndex), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_findLast), 1, mxID(_findLast), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_findLastIndex), 1, mxID(_findLastIndex), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_forEach), 1, mxID(_forEach), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_includes), 1, mxID(_includes), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_indexOf), 1, mxID(_indexOf), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_join), 1, mxID(_join), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_keys), 0, mxID(_keys), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_lastIndexOf), 1, mxID(_lastIndexOf), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_map), 1, mxID(_map), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_reduce), 1, mxID(_reduce), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_reduceRight), 1, mxID(_reduceRight), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_reverse), 0, mxID(_reverse), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_set), 1, mxID(_set), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_slice), 2, mxID(_slice), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_some), 1, mxID(_some), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_sort), 1, mxID(_sort), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_subarray), 2, mxID(_subarray), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_toLocaleString), 0, mxID(_toLocaleString), XS_DONT_ENUM_FLAG); property = mxBehaviorGetProperty(the, mxArrayPrototype.value.reference, mxID(_toString), 0, XS_OWN); slot = fxNextSlotProperty(the, slot, property, mxID(_toString), XS_DONT_ENUM_FLAG); property = slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_values), 0, mxID(_values), XS_DONT_ENUM_FLAG); slot = fxNextSlotProperty(the, slot, property, mxID(_Symbol_iterator), XS_DONT_ENUM_FLAG); mxTypedArrayPrototype = *the->stack; constructor = fxBuildHostConstructor(the, mxCallback(fx_TypedArray), 0, mxID(_TypedArray)); mxTypedArrayConstructor = *the->stack; slot = fxLastProperty(the, constructor); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_from), 1, mxID(_from), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_of), 0, mxID(_of), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_species_get), C_NULL, mxID(_Symbol_species), XS_DONT_ENUM_FLAG); for (index = 0, dispatch = &gxTypeDispatches[0], atomics = &gxTypeAtomics[0]; index < mxTypeArrayCount; index++, dispatch++, atomics++) { mxPush(mxTypedArrayPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextIntegerProperty(the, slot, dispatch->size, mxID(_BYTES_PER_ELEMENT), XS_GET_ONLY); slot = fxBuildHostConstructor(the, mxCallback(fx_TypedArray), 3, mxID(dispatch->constructorID)); the->stackPrototypes[-1 - (txInteger)dispatch->constructorID] = *the->stack; //@@ slot->value.instance.prototype = constructor; property = mxFunctionInstanceHome(slot); slot = property->next; property = fxNextTypeDispatchProperty(the, property, (txTypeDispatch*)dispatch, (txTypeAtomics*)atomics, XS_NO_ID, XS_INTERNAL_FLAG); property->next = slot; slot = fxLastProperty(the, slot); slot = fxNextIntegerProperty(the, slot, dispatch->size, mxID(_BYTES_PER_ELEMENT), XS_GET_ONLY); mxPop(); } mxPop(); } txInteger fxArgToByteLength(txMachine* the, txInteger argi, txInteger length) { txSlot *arg = mxArgv(argi); if ((mxArgc > argi) && (arg->kind != XS_UNDEFINED_KIND)) { txNumber value; if (XS_INTEGER_KIND == arg->kind) { txInteger value = arg->value.integer; if (value < 0) mxRangeError("out of range byteLength"); return value; } value = c_trunc(fxToNumber(the, arg)); if (c_isnan(value)) return 0; if ((value < 0) || (0x7FFFFFFF < value)) mxRangeError("out of range byteLength"); return (txInteger)value; } return length; } txSlot* fxArgToInstance(txMachine* the, txInteger i) { if (mxArgc > i) return fxToInstance(the, mxArgv(i)); mxTypeError("Cannot coerce undefined to object"); return C_NULL; } txBoolean fxCheckLength(txMachine* the, txSlot* slot, txInteger* index) { txNumber number = fxToNumber(the, slot); txNumber check = c_trunc(number); if ((number == check) && (0 <= number) && (number <= 0x7FFFFFFF)) { *index = (txInteger)number; return 1 ; } return 0; } txSlot* fxCheckArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable) { slot = slot->value.reference->next; if (slot->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); if (mutable && (slot->flag & XS_DONT_SET_FLAG)) mxTypeError("ArrayBuffer instance is read-only"); return slot; } txSlot* fxCheckArrayBufferInstance(txMachine* the, txSlot* slot) { if (slot->kind == XS_REFERENCE_KIND) { txSlot* instance = slot->value.reference; if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_ARRAY_BUFFER_KIND)) return instance; } mxTypeError("this is no ArrayBuffer instance"); return C_NULL; } void fxConstructArrayBufferResult(txMachine* the, txSlot* constructor, txInteger length) { txSlot* instance; if (constructor) mxPushSlot(constructor); else { mxPushSlot(mxThis); mxGetID(mxID(_constructor)); } fxToSpeciesConstructor(the, &mxArrayBufferConstructor); mxNew(); mxPushInteger(length); mxRunCount(1); if (the->stack->kind != XS_REFERENCE_KIND) mxTypeError("no instance"); instance = the->stack->value.reference; if (!(instance->next) || (instance->next->kind != XS_ARRAY_BUFFER_KIND)) mxTypeError("no ArrayBuffer instance"); if (!constructor && (mxThis->value.reference == instance)) mxTypeError("same ArrayBuffer instance"); if (instance->next->next->value.bufferInfo.length < length) mxTypeError("smaller ArrayBuffer instance"); mxPullSlot(mxResult); } txSlot* fxNewArrayBufferInstance(txMachine* the) { txSlot* instance; txSlot* property; instance = fxNewObjectInstance(the); property = instance->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_ARRAY_BUFFER_KIND; property->value.arrayBuffer.address = C_NULL; property->value.arrayBuffer.detachKey = C_NULL; property = property->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_BUFFER_INFO_KIND; property->value.bufferInfo.length = 0; property->value.bufferInfo.maxLength = -1; return instance; } void fx_ArrayBuffer(txMachine* the) { txSlot* instance; txInteger byteLength; txInteger maxByteLength = -1; txSlot* property; if (mxIsUndefined(mxTarget)) mxTypeError("call: ArrayBuffer"); mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxArrayBufferPrototype); instance = fxNewArrayBufferInstance(the); mxPullSlot(mxResult); byteLength = fxArgToByteLength(the, 0, 0); if ((mxArgc > 1) && mxIsReference(mxArgv(1))) { mxPushSlot(mxArgv(1)); mxGetID(mxID(_maxByteLength)); mxPullSlot(mxArgv(1)); maxByteLength = fxArgToByteLength(the, 1, -1); } if (maxByteLength >= 0) { if (byteLength > maxByteLength) mxRangeError("byteLength > maxByteLength"); } property = instance->next; property->value.arrayBuffer.address = fxNewChunk(the, byteLength); c_memset(property->value.arrayBuffer.address, 0, byteLength); property = property->next; property->value.bufferInfo.length = byteLength; property->value.bufferInfo.maxLength = maxByteLength; } void fx_ArrayBuffer_fromBigInt(txMachine* the) { txU4 minBytes = 0; txBoolean sign = 0; int endian = EndianBig; if (mxArgc < 1) mxTypeError("no argument"); if (mxArgc > 1) { txInteger m = fxToInteger(the, mxArgv(1)); if (m < 0) mxRangeError("minBytes < 0"); minBytes = (txU4)m; } if ((mxArgc > 2) && fxToBoolean(the, mxArgv(2))) sign = 1; if ((mxArgc > 3) && fxToBoolean(the, mxArgv(3))) endian = EndianLittle; if (gxTypeBigInt.toArrayBuffer) { gxTypeBigInt.toArrayBuffer(the, mxArgv(0), minBytes, sign, endian); } else { mxUnknownError("not built-in"); } } #ifndef mxCESU8 void fx_ArrayBuffer_fromString(txMachine* the) { txSize length; if (mxArgc < 1) mxTypeError("no argument"); length = mxStringLength(fxToString(the, mxArgv(0))); fxConstructArrayBufferResult(the, mxThis, length); c_memcpy(mxResult->value.reference->next->value.arrayBuffer.address, mxArgv(0)->value.string, length); } #endif void fx_ArrayBuffer_isView(txMachine* the) { txSlot* slot; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = 0; if (mxArgc > 0) { slot = mxArgv(0); if (slot->kind == XS_REFERENCE_KIND) { slot = slot->value.reference; if (slot->next) { slot = slot->next; if ((slot->kind == XS_DATA_VIEW_KIND) || (slot->kind == XS_TYPED_ARRAY_KIND)) { mxResult->value.boolean = 1; } } } } } void fx_ArrayBuffer_prototype_get_byteLength(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_INTEGER_KIND; if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxResult->value.integer = 0; else mxResult->value.integer = bufferInfo->value.bufferInfo.length; } void fx_ArrayBuffer_prototype_get_maxByteLength(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_INTEGER_KIND; if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxResult->value.integer = 0; else if (bufferInfo->value.bufferInfo.maxLength >= 0) mxResult->value.integer = bufferInfo->value.bufferInfo.maxLength; else mxResult->value.integer = bufferInfo->value.bufferInfo.length; } void fx_ArrayBuffer_prototype_get_resizable(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = (bufferInfo->value.bufferInfo.maxLength >= 0) ? 1 : 0; } void fx_ArrayBuffer_prototype_concat(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; txInteger c = mxArgc, i = 0; txByte* address; txSlot* slot; while (i < c) { arrayBuffer = C_NULL; bufferInfo = C_NULL; slot = mxArgv(i); if (slot->kind == XS_REFERENCE_KIND) { slot = slot->value.reference->next; if (slot && (slot->kind == XS_ARRAY_BUFFER_KIND)) { arrayBuffer = slot; bufferInfo = slot->next; } } if (arrayBuffer) length = fxAddChunkSizes(the, length, bufferInfo->value.bufferInfo.length); else mxTypeError("arguments[%ld] is no ArrayBuffer instance", i); i++; } fxConstructArrayBufferResult(the, C_NULL, length); arrayBuffer = instance->next; bufferInfo = arrayBuffer->next; address = mxResult->value.reference->next->value.arrayBuffer.address; length = bufferInfo->value.bufferInfo.length; c_memcpy(address, arrayBuffer->value.arrayBuffer.address, length); address += length; i = 0; while (i < c) { arrayBuffer = mxArgv(i)->value.reference->next; bufferInfo = arrayBuffer->next; length = bufferInfo->value.bufferInfo.length; c_memcpy(address, arrayBuffer->value.arrayBuffer.address, length); address += length; i++; } } void fx_ArrayBuffer_prototype_resize(txMachine* the) { /* txSlot* instance = */ fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = fxCheckArrayBufferDetached(the, mxThis, XS_MUTABLE); txSlot* bufferInfo = arrayBuffer->next; txInteger maxByteLength, oldByteLength, newByteLength; txByte* chunk; maxByteLength = bufferInfo->value.bufferInfo.maxLength; if (maxByteLength < 0) mxTypeError("not resizable"); oldByteLength = bufferInfo->value.bufferInfo.length; newByteLength = fxArgToByteLength(the, 0, 0); if (newByteLength > maxByteLength) mxRangeError("newLength > maxByteLength"); chunk = (txByte*)fxRenewChunk(the, arrayBuffer->value.arrayBuffer.address, newByteLength); if (!chunk) { chunk = (txByte*)fxNewChunk(the, newByteLength); c_memcpy(chunk, arrayBuffer->value.arrayBuffer.address, (newByteLength < oldByteLength) ? newByteLength : oldByteLength); } if (newByteLength > oldByteLength) c_memset(chunk + oldByteLength, 0, newByteLength - oldByteLength); arrayBuffer->value.arrayBuffer.address = chunk; bufferInfo->value.bufferInfo.length = newByteLength; } void fx_ArrayBuffer_prototype_slice(txMachine* the) { /* txSlot* instance = */ fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = fxCheckArrayBufferDetached(the, mxThis, XS_IMMUTABLE); txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; txInteger start = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger stop = (txInteger)fxArgToIndex(the, 1, length, length); txSlot* resultBuffer; if (stop < start) stop = start; fxConstructArrayBufferResult(the, C_NULL, stop - start); resultBuffer = fxCheckArrayBufferDetached(the, mxResult, XS_MUTABLE); c_memcpy(resultBuffer->value.arrayBuffer.address, arrayBuffer->value.arrayBuffer.address + start, stop - start); } void fx_ArrayBuffer_prototype_transfer(txMachine* the) { /* txSlot* instance = */ fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = fxCheckArrayBufferDetached(the, mxThis, XS_MUTABLE); txSlot* bufferInfo = arrayBuffer->next; txInteger oldByteLength = bufferInfo->value.bufferInfo.length; txInteger newByteLength = fxArgToByteLength(the, 0, oldByteLength); txSlot* resultBuffer; fxConstructArrayBufferResult(the, C_NULL, newByteLength); resultBuffer = fxCheckArrayBufferDetached(the, mxResult, XS_MUTABLE); c_memcpy(resultBuffer->value.arrayBuffer.address, arrayBuffer->value.arrayBuffer.address, (newByteLength < oldByteLength) ? newByteLength : oldByteLength); if (newByteLength > oldByteLength) c_memset(resultBuffer->value.arrayBuffer.address + oldByteLength, 0, newByteLength - oldByteLength); arrayBuffer->value.arrayBuffer.address = C_NULL; bufferInfo->value.bufferInfo.length = 0; } txSlot* fxCheckDataViewInstance(txMachine* the, txSlot* slot) { if (slot->kind == XS_REFERENCE_KIND) { txSlot* instance = slot->value.reference; if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_DATA_VIEW_KIND)) return instance; } mxTypeError("this is no DataView instance"); return C_NULL; } txInteger fxCheckDataViewSize(txMachine* the, txSlot* view, txSlot* buffer, txBoolean mutable) { txInteger size = view->value.dataView.size; txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); if (mutable && (arrayBuffer->flag & XS_DONT_SET_FLAG)) mxTypeError("read-only buffer"); if (bufferInfo->value.bufferInfo.maxLength >= 0) { txInteger offset = view->value.dataView.offset; txInteger byteLength = bufferInfo->value.bufferInfo.length; if (offset > byteLength) mxTypeError("out of bounds view"); else if (size < 0) size = byteLength - offset; else if (offset + size > byteLength) mxTypeError("out of bounds view"); } return size; } txSlot* fxGetBufferInfo(txMachine* the, txSlot* buffer) { txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; if (arrayBuffer->kind == XS_ARRAY_BUFFER_KIND) { if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); return bufferInfo; } if (arrayBuffer->kind == XS_HOST_KIND) { txInteger byteLength; if (bufferInfo && (bufferInfo->kind == XS_BUFFER_INFO_KIND)) return bufferInfo; mxPushSlot(buffer); mxGetID(mxID(_byteLength)); if (!fxCheckLength(the, the->stack, &byteLength)) mxTypeError("invalid byteLength"); fxReport(the, "# Use xsSetHostBuffer instead of xsSetHostData\n"); mxPop(); bufferInfo = fxNewSlot(the); bufferInfo->next = arrayBuffer->next; bufferInfo->flag = XS_INTERNAL_FLAG; bufferInfo->kind = XS_BUFFER_INFO_KIND; bufferInfo->value.bufferInfo.length = byteLength; bufferInfo->value.bufferInfo.maxLength = -1; arrayBuffer->next = bufferInfo; return bufferInfo; } mxTypeError("invalid buffer"); return C_NULL; } txInteger fxGetDataViewSize(txMachine* the, txSlot* view, txSlot* buffer) { txInteger size = view->value.dataView.size; txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; if (arrayBuffer->value.arrayBuffer.address == C_NULL) return 0; if (bufferInfo->value.bufferInfo.maxLength >= 0) { txInteger offset = view->value.dataView.offset; txInteger byteLength = bufferInfo->value.bufferInfo.length; if (offset > byteLength) size = 0; else if (size < 0) size = byteLength - offset; else if (offset + size > byteLength) size = 0; } return size; } txSlot* fxNewDataViewInstance(txMachine* the) { txSlot* instance; txSlot* property; instance = fxNewObjectInstance(the); property = instance->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_DATA_VIEW_KIND; property->value.dataView.offset = 0; property->value.dataView.size = 0; property = fxNextNullProperty(the, property, XS_NO_ID, XS_INTERNAL_FLAG); return instance; } void fx_DataView(txMachine* the) { txSlot* slot; txBoolean flag = 0; txInteger offset, size; txSlot* info; txSlot* instance; txSlot* view; txSlot* buffer; if (mxIsUndefined(mxTarget)) mxTypeError("call: DataView"); if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { flag = 1; } } if (!flag) mxTypeError("buffer is no ArrayBuffer instance"); offset = fxArgToByteLength(the, 1, 0); info = fxGetBufferInfo(the, mxArgv(0)); if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); if (size >= 0) { if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } else { if (info->value.bufferInfo.maxLength < 0) size = info->value.bufferInfo.length - offset; } mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxDataViewPrototype); instance = fxNewDataViewInstance(the); mxPullSlot(mxResult); view = instance->next; buffer = view->next; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; info = fxGetBufferInfo(the, buffer); if (info->value.bufferInfo.maxLength >= 0) { if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); else if (size >= 0) { if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } } view->value.dataView.offset = offset; view->value.dataView.size = size; } void fx_DataView_prototype_buffer_get(txMachine* the) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; mxResult->kind = buffer->kind; mxResult->value = buffer->value; } void fx_DataView_prototype_byteLength_get(txMachine* the) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; txInteger size = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = size; } void fx_DataView_prototype_byteOffset_get(txMachine* the) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = view->value.dataView.offset; } void fx_DataView_prototype_get(txMachine* the, txNumber delta, txTypeCallback getter) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; txInteger offset = fxArgToByteLength(the, 0, 0); txInteger size; int endian = EndianBig; if ((mxArgc > 1) && fxToBoolean(the, mxArgv(1))) endian = EndianLittle; size = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); if ((size < delta) || ((size - delta) < offset)) mxRangeError("out of range byteOffset"); offset += view->value.dataView.offset; (*getter)(the, buffer->value.reference->next, offset, mxResult, endian); } void fx_DataView_prototype_getBigInt64(txMachine* the) { fx_DataView_prototype_get(the, 8, fxBigInt64Getter); } void fx_DataView_prototype_getBigUint64(txMachine* the) { fx_DataView_prototype_get(the, 8, fxBigUint64Getter); } void fx_DataView_prototype_getFloat32(txMachine* the) { fx_DataView_prototype_get(the, 4, fxFloat32Getter); } void fx_DataView_prototype_getFloat64(txMachine* the) { fx_DataView_prototype_get(the, 8, fxFloat64Getter); } void fx_DataView_prototype_getInt8(txMachine* the) { fx_DataView_prototype_get(the, 1, fxInt8Getter); } void fx_DataView_prototype_getInt16(txMachine* the) { fx_DataView_prototype_get(the, 2, fxInt16Getter); } void fx_DataView_prototype_getInt32(txMachine* the) { fx_DataView_prototype_get(the, 4, fxInt32Getter); } void fx_DataView_prototype_getUint8(txMachine* the) { fx_DataView_prototype_get(the, 1, fxUint8Getter); } void fx_DataView_prototype_getUint16(txMachine* the) { fx_DataView_prototype_get(the, 2, fxUint16Getter); } void fx_DataView_prototype_getUint32(txMachine* the) { fx_DataView_prototype_get(the, 4, fxUint32Getter); } void fx_DataView_prototype_set(txMachine* the, txNumber delta, txTypeCoerce coercer, txTypeCallback setter) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; txInteger offset = fxArgToByteLength(the, 0, 0); txInteger size; int endian = EndianBig; txSlot* value; if (mxArgc > 1) mxPushSlot(mxArgv(1)); else mxPushUndefined(); value = the->stack; (*coercer)(the, value); if ((mxArgc > 2) && fxToBoolean(the, mxArgv(2))) endian = EndianLittle; size = fxCheckDataViewSize(the, view, buffer, XS_MUTABLE); if ((size < delta) || ((size - delta) < offset)) mxRangeError("out of range byteOffset"); offset += view->value.dataView.offset; (*setter)(the, buffer->value.reference->next, offset, value, endian); mxPop(); } void fx_DataView_prototype_setBigInt64(txMachine* the) { fx_DataView_prototype_set(the, 8, fxBigIntCoerce, fxBigInt64Setter); } void fx_DataView_prototype_setBigUint64(txMachine* the) { fx_DataView_prototype_set(the, 8, fxBigIntCoerce, fxBigUint64Setter); } void fx_DataView_prototype_setFloat32(txMachine* the) { fx_DataView_prototype_set(the, 4, fxNumberCoerce, fxFloat32Setter); } void fx_DataView_prototype_setFloat64(txMachine* the) { fx_DataView_prototype_set(the, 8, fxNumberCoerce, fxFloat64Setter); } void fx_DataView_prototype_setInt8(txMachine* the) { fx_DataView_prototype_set(the, 1, fxIntCoerce, fxInt8Setter); } void fx_DataView_prototype_setInt16(txMachine* the) { fx_DataView_prototype_set(the, 2, fxIntCoerce, fxInt16Setter); } void fx_DataView_prototype_setInt32(txMachine* the) { fx_DataView_prototype_set(the, 4, fxIntCoerce, fxInt32Setter); } void fx_DataView_prototype_setUint8(txMachine* the) { fx_DataView_prototype_set(the, 1, fxUintCoerce, fxUint8Setter); } void fx_DataView_prototype_setUint16(txMachine* the) { fx_DataView_prototype_set(the, 2, fxUintCoerce, fxUint16Setter); } void fx_DataView_prototype_setUint32(txMachine* the) { fx_DataView_prototype_set(the, 4, fxUintCoerce, fxUint32Setter); } #define mxTypedArrayDeclarations \ txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); \ txSlot* dispatch = instance->next; \ txSlot* view = dispatch->next; \ txSlot* buffer = view->next; \ txInteger length = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE) >> dispatch->value.typedArray.dispatch->shift #define mxMutableTypedArrayDeclarations \ txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); \ txSlot* dispatch = instance->next; \ txSlot* view = dispatch->next; \ txSlot* buffer = view->next; \ txInteger length = fxCheckDataViewSize(the, view, buffer, XS_MUTABLE) >> dispatch->value.typedArray.dispatch->shift #define mxResultTypedArrayDeclarations \ txSlot* resultInstance = fxCheckTypedArrayInstance(the, mxResult); \ txSlot* resultDispatch = resultInstance->next; \ txSlot* resultView = resultDispatch->next; \ txSlot* resultBuffer = resultView->next; \ txInteger resultLength = fxCheckDataViewSize(the, resultView, resultBuffer, XS_MUTABLE) >> resultDispatch->value.typedArray.dispatch->shift void fxTypedArrayGetter(txMachine* the) { txSlot* instance = fxToInstance(the, mxThis); txSlot* dispatch; while (instance) { if (instance->flag & XS_EXOTIC_FLAG) { dispatch = instance->next; if (dispatch->ID == XS_TYPED_ARRAY_BEHAVIOR) break; } instance = fxGetPrototype(the, instance); } if (instance) { txID id = the->scratch.value.at.id; txIndex index = the->scratch.value.at.index; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), mxResult, EndianNative); } } } void fxTypedArraySetter(txMachine* the) { txSlot* instance = fxToInstance(the, mxThis); txSlot* dispatch; while (instance) { if (instance->flag & XS_EXOTIC_FLAG) { dispatch = instance->next; if (dispatch->ID == XS_TYPED_ARRAY_BEHAVIOR) break; } instance = fxGetPrototype(the, instance); } if (instance) { txSlot* slot = mxArgv(0); txID id = the->scratch.value.at.id; txIndex index = the->scratch.value.at.index; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* arrayBuffer = buffer->value.reference->next; txIndex length; dispatch->value.typedArray.dispatch->coerce(the, slot); if (arrayBuffer->flag & XS_DONT_SET_FLAG) mxTypeError("read-only buffer"); length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->setter)(the, arrayBuffer, view->value.dataView.offset + (index << shift), slot, EndianNative); } } } txBoolean fxTypedArrayDefineOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot, txFlag mask) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* arrayBuffer = buffer->value.reference->next; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if (id || (index >= length)) return 0; if ((mask & XS_DONT_DELETE_FLAG) && (slot->flag & XS_DONT_DELETE_FLAG)) return 0; if ((mask & XS_DONT_ENUM_FLAG) && (slot->flag & XS_DONT_ENUM_FLAG)) return 0; if (mask & XS_ACCESSOR_FLAG) return 0; if ((mask & XS_DONT_SET_FLAG) && (slot->flag & XS_DONT_SET_FLAG)) return 0; if (slot->kind != XS_UNINITIALIZED_KIND) { dispatch->value.typedArray.dispatch->coerce(the, slot); if (arrayBuffer->flag & XS_DONT_SET_FLAG) mxTypeError("read-only buffer"); length = fxGetDataViewSize(the, view, buffer) >> shift; if (index < length) (*dispatch->value.typedArray.dispatch->setter)(the, arrayBuffer, view->value.dataView.offset + (index << shift), slot, EndianNative); } return 1; } return fxOrdinaryDefineOwnProperty(the, instance, id, index, slot, mask); } txBoolean fxTypedArrayDeleteProperty(txMachine* the, txSlot* instance, txID id, txIndex index) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; return ((!id) && (index < length)) ? 0 : 1; } return fxOrdinaryDeleteProperty(the, instance, id, index); } txBoolean fxTypedArrayGetOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), slot, EndianNative); return 1; } slot->kind = XS_UNDEFINED_KIND; slot->flag = XS_NO_FLAG; return 0; } return fxOrdinaryGetOwnProperty(the, instance, id, index, slot); } txSlot* fxTypedArrayGetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag) { if ((!id) || fxIsCanonicalIndex(the, id)) { the->scratch.value.at.id = id; the->scratch.value.at.index = index; return &mxTypedArrayAccessor; } return fxOrdinaryGetProperty(the, instance, id, index, flag); } txBoolean fxTypedArrayGetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* receiver, txSlot* value) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), value, EndianNative); return 1; } value->kind = XS_UNDEFINED_KIND; return 0; } return fxOrdinaryGetPropertyValue(the, instance, id, index, receiver, value); } txBoolean fxTypedArrayHasProperty(txMachine* the, txSlot* instance, txID id, txIndex index) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; return ((!id) && (index < length)) ? 1 : 0; } return fxOrdinaryHasProperty(the, instance, id, index); } void fxTypedArrayOwnKeys(txMachine* the, txSlot* instance, txFlag flag, txSlot* keys) { if (flag & XS_EACH_NAME_FLAG) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if (length) { txIndex index; for (index = 0; index < length; index++) keys = fxQueueKey(the, 0, index, keys); } } fxOrdinaryOwnKeys(the, instance, flag, keys); } txSlot* fxTypedArraySetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag) { if ((!id) || fxIsCanonicalIndex(the, id)) { the->scratch.value.at.id = id; the->scratch.value.at.index = index; return &mxTypedArrayAccessor; } return fxOrdinarySetProperty(the, instance, id, index, flag); } txBoolean fxTypedArraySetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* value, txSlot* receiver) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* arrayBuffer = buffer->value.reference->next; txIndex length; dispatch->value.typedArray.dispatch->coerce(the, value); if (arrayBuffer->flag & XS_DONT_SET_FLAG) mxTypeError("read-only buffer"); length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->setter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), value, EndianNative); } return 1; } return fxOrdinarySetPropertyValue(the, instance, id, index, value, receiver); } void fxCallTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index, txSlot* item) { /* THIS */ if (mxArgc > 1) mxPushSlot(mxArgv(1)); else mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(mxThis); mxGetIndex(index); if (item) { item->kind = the->stack->kind; item->value = the->stack->value; } mxPushInteger(index); mxPushSlot(mxThis); mxRunCount(3); } txSlot* fxCheckTypedArrayInstance(txMachine* the, txSlot* slot) { if (slot->kind == XS_REFERENCE_KIND) { txSlot* instance = slot->value.reference; if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_TYPED_ARRAY_KIND)) return instance; } mxTypeError("this is no TypedArray instance"); return C_NULL; } int fxCompareTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index) { txSlot* slot = the->stack; int result; /* THIS */ mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushUndefined(); (*dispatch->value.typedArray.dispatch->getter)(the, data, view->value.dataView.offset + (index << dispatch->value.typedArray.dispatch->shift), the->stack, EndianNative); mxPushSlot(slot); mxRunCount(2); if (the->stack->kind == XS_INTEGER_KIND) result = the->stack->value.integer; else { txNumber number = fxToNumber(the, the->stack); result = (number < 0) ? -1 : (number > 0) ? 1 : 0; } mxPop(); if (data->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); return result; } txSlot* fxConstructTypedArray(txMachine* the) { txSlot* prototype; txSlot* dispatch; txSlot* instance; if (mxIsUndefined(mxTarget)) mxTypeError("call: TypedArray"); dispatch = mxFunctionInstanceHome(mxFunction->value.reference); dispatch = dispatch->next; prototype = mxBehaviorGetProperty(the, mxFunction->value.reference, mxID(_prototype), 0, XS_ANY); if (!dispatch || (dispatch->kind != XS_TYPED_ARRAY_KIND)) mxTypeError("new: TypedArray"); mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, prototype); instance = fxNewTypedArrayInstance(the, dispatch->value.typedArray.dispatch, dispatch->value.typedArray.atomics); mxPullSlot(mxResult); return instance; } void fxCreateTypedArraySpecies(txMachine* the) { txSlot* instance = fxToInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* constructor = &the->stackPrototypes[-1 - (txInteger)dispatch->value.typedArray.dispatch->constructorID]; mxPushSlot(mxThis); mxGetID(mxID(_constructor)); fxToSpeciesConstructor(the, constructor); mxNew(); } txSlot* fxGetTypedArrayValue(txMachine* the, txSlot* instance, txInteger index) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* data = mxIsReference(buffer) ? fxCheckArrayBufferDetached(the, buffer, XS_IMMUTABLE) : C_NULL; txU2 shift = dispatch->value.typedArray.dispatch->shift; index <<= shift; if ((0 <= index) && ((index + (1 << shift)) <= view->value.dataView.size)) { (*dispatch->value.typedArray.dispatch->getter)(the, data, view->value.dataView.offset + index, &(the->scratch), EndianNative); return &the->scratch; } return C_NULL; } void fxReduceTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index) { /* THIS */ mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(mxResult); mxPushSlot(mxThis); mxGetIndex(index); mxPushInteger(index); mxPushSlot(mxThis); mxRunCount(4); mxPullSlot(mxResult); } txSlot* fxNewTypedArrayInstance(txMachine* the, txTypeDispatch* dispatch, txTypeAtomics* atomics) { txSlot* instance; txSlot* property; instance = fxNewObjectInstance(the); instance->flag |= XS_EXOTIC_FLAG; property = fxNextTypeDispatchProperty(the, instance, dispatch, atomics, XS_TYPED_ARRAY_BEHAVIOR, XS_INTERNAL_FLAG); property = property->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_DATA_VIEW_KIND; property->value.dataView.offset = 0; property->value.dataView.size = 0; property = fxNextNullProperty(the, property, XS_NO_ID, XS_INTERNAL_FLAG); return instance; } void fx_TypedArray(txMachine* the) { txSlot* instance = fxConstructTypedArray(the); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* data = C_NULL; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* slot; if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { txInteger offset = fxArgToByteLength(the, 1, 0); txInteger size; txSlot* info; if (offset & ((1 << shift) - 1)) mxRangeError("invalid byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); info = fxGetBufferInfo(the, mxArgv(0)); if (size >= 0) { size <<= shift; if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } else { if (info->value.bufferInfo.length & ((1 << shift) - 1)) mxRangeError("invalid byteLength %ld", info->value.bufferInfo.length); size = info->value.bufferInfo.length - offset; if (size < 0) mxRangeError("out of range byteLength %ld", size); if (info->value.bufferInfo.maxLength >= 0) size = -1; } view->value.dataView.offset = offset; view->value.dataView.size = size; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; } else if (slot && (slot->kind == XS_TYPED_ARRAY_KIND)) { txSlot* sourceDispatch = slot; txSlot* sourceView = sourceDispatch->next; txSlot* sourceBuffer = sourceView->next; txU2 sourceShift = sourceDispatch->value.typedArray.dispatch->shift; txInteger sourceLength = fxCheckDataViewSize(the, sourceView, sourceBuffer, XS_IMMUTABLE) >> sourceShift; txSlot* sourceData = sourceBuffer->value.reference->next; txInteger sourceDelta = sourceDispatch->value.typedArray.dispatch->size; txInteger sourceOffset = sourceView->value.dataView.offset; txInteger offset = 0; txInteger size = sourceLength << shift; /* THIS */ mxPushUninitialized(); /* FUNCTION */ mxPush(mxArrayBufferConstructor); /* TARGET */ if (sourceData->kind == XS_ARRAY_BUFFER_KIND) { mxPushSlot(sourceBuffer); mxGetID(mxID(_constructor)); fxToSpeciesConstructor(the, &mxArrayBufferConstructor); } else mxPush(mxArrayBufferConstructor); /* RESULT */ mxPushUndefined(); mxPushUninitialized(); mxPushUninitialized(); /* ARGUMENTS */ sourceLength = fxGetDataViewSize(the, sourceView, sourceBuffer) >> sourceShift; size = sourceLength << shift; mxPushInteger(size); mxRunCount(1); mxPullSlot(buffer); sourceLength = fxCheckDataViewSize(the, sourceView, sourceBuffer, XS_IMMUTABLE) >> sourceShift; size = sourceLength << shift; data = fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); view->value.dataView.offset = offset; view->value.dataView.size = size; if (dispatch == sourceDispatch) c_memcpy(data->value.arrayBuffer.address + offset, sourceData->value.arrayBuffer.address + sourceOffset, size); else { txBoolean contentType = (dispatch->value.typedArray.dispatch->constructorID == _BigInt64Array) || (dispatch->value.typedArray.dispatch->constructorID == _BigUint64Array); txBoolean sourceContentType = (sourceDispatch->value.typedArray.dispatch->constructorID == _BigInt64Array) || (sourceDispatch->value.typedArray.dispatch->constructorID == _BigUint64Array); if (contentType != sourceContentType) mxTypeError("incompatible content type"); mxPushUndefined(); while (offset < size) { (*sourceDispatch->value.typedArray.dispatch->getter)(the, sourceData, sourceOffset, the->stack, EndianNative); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*dispatch->value.typedArray.dispatch->setter)(the, data, offset, the->stack, EndianNative); sourceOffset += sourceDelta; offset += 1 << shift; } mxPop(); } } else { fx_TypedArray_from_object(the, instance, C_NULL, C_NULL); } } else { txInteger length = fxArgToByteLength(the, 0, 0); if (length & (((1 << shift) - 1) << (32 - shift))) mxRangeError("out of range byteLength"); length <<= shift; mxPush(mxArrayBufferConstructor); mxNew(); mxPushInteger(length); mxRunCount(1); mxPullSlot(buffer); view->value.dataView.offset = 0; view->value.dataView.size = length; } } void fx_TypedArray_from(txMachine* the) { txSlot* function = C_NULL; txSlot* _this = C_NULL; if (!mxIsReference(mxThis) || !(mxIsConstructor(mxThis->value.reference))) mxTypeError("this is no constructor"); if (mxArgc > 1) { txSlot* slot = mxArgv(1); if (!mxIsUndefined(slot)) { function = slot; if (!fxIsCallable(the, function)) mxTypeError("map is no function"); if (mxArgc > 2) _this = mxArgv(2); } } fx_TypedArray_from_object(the, C_NULL, function, _this); } void fx_TypedArray_from_object(txMachine* the, txSlot* instance, txSlot* function, txSlot* _this) { txSlot* stack = the->stack; txSlot* iterator; txSlot* next; txSlot* value; txSlot* list = C_NULL; txSlot* slot; txSlot* dispatch; txSlot* view; txSlot* buffer; txSlot* data; txU2 shift; txNumber length; mxTemporary(iterator); mxTemporary(next); if (fxGetIterator(the, mxArgv(0), iterator, next, 1)) { list = fxNewInstance(the); slot = list; length = 0; mxTemporary(value); while (fxIteratorNext(the, iterator, next, value)) { slot = fxNextSlotProperty(the, slot, value, XS_NO_ID, XS_NO_FLAG); length++; } } else { mxPushSlot(mxArgv(0)); mxGetID(mxID(_length)); length = fxToLength(the, the->stack); mxPop(); } if (instance) { dispatch = instance->next; view = dispatch->next; buffer = view->next; shift = dispatch->value.typedArray.dispatch->shift; mxPush(mxArrayBufferConstructor); mxNew(); mxPushNumber(length * dispatch->value.typedArray.dispatch->size); mxRunCount(1); mxPullSlot(buffer); data = fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); view->value.dataView.offset = 0; view->value.dataView.size = data->next->value.bufferInfo.length; } else { mxPushSlot(mxThis); mxNew(); mxPushNumber(length); mxRunCount(1); mxPullSlot(mxResult); instance = fxToInstance(the, mxResult); if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_TYPED_ARRAY_KIND)) { dispatch = instance->next; view = dispatch->next; buffer = view->next; data = fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); shift = dispatch->value.typedArray.dispatch->shift; if (view->value.dataView.size < (length * dispatch->value.typedArray.dispatch->size)) mxTypeError("too small TypedArray"); } else mxTypeError("no TypedArray"); } if (list) { txInteger index = 0; slot = list->next; while (slot) { /* ARG0 */ if (function) { /* THIS */ if (_this) mxPushSlot(_this); else mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(slot); mxPushInteger(index); mxRunCount(2); } else mxPushSlot(slot); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*dispatch->value.typedArray.dispatch->setter)(the, data, (index << shift), the->stack, EndianNative); mxPop(); index++; slot = slot->next; } } else { txInteger index = 0; txInteger count = (txInteger)length; while (index < count) { if (function) { /* THIS */ if (_this) mxPushSlot(_this); else mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(mxArgv(0)); mxGetIndex(index); mxPushInteger(index); mxRunCount(2); } else { mxPushSlot(mxArgv(0)); mxGetIndex(index); } (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*dispatch->value.typedArray.dispatch->setter)(the, data, (index << shift), the->stack, EndianNative); mxPop(); index++; } } the->stack = stack; } void fx_TypedArray_of(txMachine* the) { txInteger count = mxArgc; txInteger index = 0; mxPushSlot(mxThis); mxNew(); mxPushInteger(count); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; txU2 shift = resultDispatch->value.typedArray.dispatch->shift; if (resultLength < count) mxTypeError("insufficient TypedArray"); while (index < count) { (*resultDispatch->value.typedArray.dispatch->coerce)(the, mxArgv(index)); if (resultBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << shift), mxArgv(index), EndianNative); index++; } } } void fx_TypedArray_prototype_at(txMachine* the) { mxTypedArrayDeclarations; txInteger index = (mxArgc > 0) ? fxToInteger(the, mxArgv(0)) : 0; if (index < 0) index = length + index; if ((0 <= index) && (index < length)) { mxPushSlot(mxThis); mxGetIndex(index); mxPullSlot(mxResult); } } void fx_TypedArray_prototype_buffer_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; mxResult->kind = buffer->kind; mxResult->value = buffer->value; } void fx_TypedArray_prototype_byteLength_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = fxGetDataViewSize(the, view, buffer); } void fx_TypedArray_prototype_byteOffset_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txInteger offset = view->value.dataView.offset; txInteger size = view->value.dataView.size; txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = 0; if (arrayBuffer->value.arrayBuffer.address == C_NULL) return; if (bufferInfo->value.bufferInfo.maxLength >= 0) { txInteger byteLength = bufferInfo->value.bufferInfo.length; if (offset > byteLength) return; size = (size < 0) ? byteLength : offset + size; if (size > byteLength) return; size -= offset; } mxResult->value.integer = offset; } void fx_TypedArray_prototype_copyWithin(txMachine* the) { mxMutableTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger target = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger start = (txInteger)fxArgToIndex(the, 1, 0, length); txInteger end = (txInteger)fxArgToIndex(the, 2, length, length); txInteger count = end - start; fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); if (count > length - target) count = length - target; if (count > 0) { txByte* address = buffer->value.reference->next->value.arrayBuffer.address + view->value.dataView.offset; c_memmove(address + (target * delta), address + (start * delta), count * delta); mxMeterSome((txU4)count * 2); } mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_entries(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* property; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPush(mxArrayIteratorPrototype); property = fxLastProperty(the, fxNewIteratorInstance(the, mxThis, mxID(_Array))); property = fxNextIntegerProperty(the, property, 2, XS_NO_ID, XS_INTERNAL_FLAG); mxPullSlot(mxResult); } void fx_TypedArray_prototype_every(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = 1; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); mxResult->value.boolean = fxToBoolean(the, the->stack++); if (!mxResult->value.boolean) break; index++; } } void fx_TypedArray_prototype_fill(txMachine* the) { mxMutableTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger start = (txInteger)fxArgToIndex(the, 1, 0, length); txInteger end = (txInteger)fxArgToIndex(the, 2, length, length); start *= delta; end *= delta; start += view->value.dataView.offset; end += view->value.dataView.offset; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); fxCheckDataViewSize(the, view, buffer, XS_MUTABLE); while (start < end) { (*dispatch->value.typedArray.dispatch->setter)(the, buffer->value.reference->next, start, the->stack, EndianNative); start += delta; } mxPop(); mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_filter(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txSlot* list = fxNewInstance(the); txSlot* slot = list; txInteger count = 0; txInteger index = 0; mxPushUndefined(); while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, the->stack); if (fxToBoolean(the, the->stack++)) { count++; slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); } index++; } mxPop(); fxCreateTypedArraySpecies(the); mxPushNumber(count); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; txInteger resultOffset = 0; txInteger resultSize = resultDispatch->value.typedArray.dispatch->size; if (resultLength < count) mxTypeError("insufficient buffer"); slot = list->next; while (slot) { (*resultDispatch->value.typedArray.dispatch->coerce)(the, slot); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultOffset, slot, EndianNative); resultOffset += resultSize; slot = slot->next; } } mxPop(); } void fx_TypedArray_prototype_find(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxPushUndefined(); while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, the->stack); if (fxToBoolean(the, the->stack++)) { mxResult->kind = the->stack->kind; mxResult->value = the->stack->value; break; } index++; } mxPop(); } void fx_TypedArray_prototype_findIndex(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = -1; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); if (fxToBoolean(the, the->stack++)) { mxResult->value.integer = index; break; } index++; } } void fx_TypedArray_prototype_findLast(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = length; mxPushUndefined(); while (index > 0) { index--; fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, the->stack); if (fxToBoolean(the, the->stack++)) { mxResult->kind = the->stack->kind; mxResult->value = the->stack->value; break; } } mxPop(); } void fx_TypedArray_prototype_findLastIndex(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = length; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = -1; while (index > 0) { index--; fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); if (fxToBoolean(the, the->stack++)) { mxResult->value.integer = index; break; } } } void fx_TypedArray_prototype_forEach(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); mxPop(); index++; } } void fx_TypedArray_prototype_includes(txMachine* the) { mxTypedArrayDeclarations; fxBoolean(the, mxResult, 0); if (length) { txInteger index = (txInteger)fxArgToIndex(the, 1, 0, length); txSlot* argument; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); argument = the->stack; while (index < length) { mxPushSlot(mxThis); mxGetIndex(index); if (fxIsSameValue(the, the->stack++, argument, 1)) { mxResult->value.boolean = 1; break; } index++; } mxPop(); } } void fx_TypedArray_prototype_indexOf(txMachine* the) { mxTypedArrayDeclarations; fxInteger(the, mxResult, -1); if (length) { txInteger index = (txInteger)fxArgToIndex(the, 1, 0, length); txSlot* argument; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); argument = the->stack; while (index < length) { mxPushSlot(mxThis); if (fxHasIndex(the, index)) { mxPushSlot(mxThis); mxGetIndex(index); if (fxIsSameSlot(the, the->stack++, argument)) { mxResult->value.integer = index; break; } } index++; } mxPop(); } } void fx_TypedArray_prototype_join(txMachine* the) { mxTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger offset = view->value.dataView.offset; txInteger limit = offset + (length << dispatch->value.typedArray.dispatch->shift); txString string; txSlot* list = fxNewInstance(the); txSlot* slot = list; txBoolean comma = 0; txInteger size = 0; if ((mxArgc > 0) && (mxArgv(0)->kind != XS_UNDEFINED_KIND)) { mxPushSlot(mxArgv(0)); string = fxToString(the, the->stack); the->stack->kind += XS_KEY_KIND - XS_STRING_KIND; the->stack->value.key.sum = mxStringLength(the->stack->value.string); } else { mxPushStringX(","); the->stack->kind += XS_KEY_KIND - XS_STRING_KIND; the->stack->value.key.sum = 1; } length = offset + fxGetDataViewSize(the, view, buffer); while (offset < limit) { if (comma) { slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); size = fxAddChunkSizes(the, size, slot->value.key.sum); } else comma = 1; if (offset < length) { mxPushUndefined(); (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, offset, the->stack, EndianNative); slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); string = fxToString(the, slot); slot->kind += XS_KEY_KIND - XS_STRING_KIND; slot->value.key.sum = mxStringLength(string); size = fxAddChunkSizes(the, size, slot->value.key.sum); mxPop(); } offset += delta; } mxPop(); string = mxResult->value.string = fxNewChunk(the, fxAddChunkSizes(the, size, 1)); slot = list->next; while (slot) { c_memcpy(string, slot->value.key.string, slot->value.key.sum); string += slot->value.key.sum; slot = slot->next; } *string = 0; mxResult->kind = XS_STRING_KIND; mxPop(); } void fx_TypedArray_prototype_keys(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* property; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPush(mxArrayIteratorPrototype); property = fxLastProperty(the, fxNewIteratorInstance(the, mxThis, mxID(_Array))); property = fxNextIntegerProperty(the, property, 1, XS_NO_ID, XS_INTERNAL_FLAG); mxPullSlot(mxResult); } void fx_TypedArray_prototype_lastIndexOf(txMachine* the) { mxTypedArrayDeclarations; fxInteger(the, mxResult, -1); if (length) { txIndex index = (txIndex)fxArgToLastIndex(the, 1, length, length); txSlot* argument; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); argument = the->stack; while (index > 0) { index--; mxPushSlot(mxThis); if (fxHasIndex(the, index)) { mxPushSlot(mxThis); mxGetIndex(index); if (fxIsSameSlot(the, the->stack++, argument)) { mxResult->value.integer = index; break; } } } mxPop(); } } void fx_TypedArray_prototype_length_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = fxGetDataViewSize(the, view, buffer) >> shift; } void fx_TypedArray_prototype_map(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); fxCreateTypedArraySpecies(the); mxPushNumber(length); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; txU2 shift = resultDispatch->value.typedArray.dispatch->shift; txInteger index = 0; if (resultLength < length) mxTypeError("insufficient buffer"); while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); if (resultBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*resultDispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << shift), the->stack, EndianNative); mxPop(); index++; } } } void fx_TypedArray_prototype_reduce(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; if (mxArgc > 1) *mxResult = *mxArgv(1); else if (index < length) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset, mxResult, EndianNative); index++; } else mxTypeError("no initial value"); while (index < length) { fxReduceTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index); index++; } } void fx_TypedArray_prototype_reduceRight(txMachine* the) { mxTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txSlot* function = fxArgToCallback(the, 0); txInteger index = length - 1; if (mxArgc > 1) *mxResult = *mxArgv(1); else if (index >= 0) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index * delta), mxResult, EndianNative); index--; } else mxTypeError("no initial value"); while (index >= 0) { fxReduceTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index); index--; } } void fx_TypedArray_prototype_reverse(txMachine* the) { mxMutableTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; if (length) { txByte tmp; txByte* first = buffer->value.reference->next->value.arrayBuffer.address + view->value.dataView.offset; txByte* last = first + (length << dispatch->value.typedArray.dispatch->shift) - delta; txInteger offset; while (first < last) { for (offset = 0; offset < delta; offset++) { tmp = last[offset]; last[offset] = first[offset]; first[offset] = tmp; } first += delta; last -= delta; } mxMeterSome(length * 4); } mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_set(txMachine* the) { mxMutableTypedArrayDeclarations; txSlot* data = buffer->value.reference->next; txInteger delta = dispatch->value.typedArray.dispatch->size; txSlot* source = fxArgToInstance(the, 0); txInteger target = fxArgToByteLength(the, 1, 0); txInteger offset = view->value.dataView.offset + (target * delta); if (source->next && (source->next->kind == XS_TYPED_ARRAY_KIND)) { txSlot* sourceDispatch = source->next; txSlot* sourceView = sourceDispatch->next; txSlot* sourceBuffer = sourceView->next; txU2 shift = sourceDispatch->value.typedArray.dispatch->shift; txInteger sourceLength = fxCheckDataViewSize(the, sourceView, sourceBuffer, XS_IMMUTABLE) >> shift; txInteger sourceOffset = sourceView->value.dataView.offset; txSlot* sourceData = sourceBuffer->value.reference->next; txInteger limit = offset + (sourceLength * delta); if ((target < 0) || (length - sourceLength < target)) mxRangeError("invalid offset"); if (data == sourceData) { txSlot* resultBuffer; mxPush(mxArrayBufferConstructor); mxNew(); mxPushInteger(sourceLength << shift); mxRunCount(1); resultBuffer = the->stack->value.reference->next; c_memcpy(resultBuffer->value.arrayBuffer.address, sourceData->value.arrayBuffer.address + sourceOffset, sourceLength << shift); sourceData = resultBuffer; sourceOffset = 0; } else mxPushUndefined(); if (dispatch == sourceDispatch) { c_memcpy(data->value.arrayBuffer.address + offset, sourceData->value.arrayBuffer.address + sourceOffset, limit - offset); mxMeterSome(((txU4)(limit - offset)) * 2); } else { txInteger sourceDelta = 1 << shift; mxPushUndefined(); while (offset < limit) { (*sourceDispatch->value.typedArray.dispatch->getter)(the, sourceData, sourceOffset, the->stack, EndianNative); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); if (data->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*dispatch->value.typedArray.dispatch->setter)(the, data, offset, the->stack, EndianNative); sourceOffset += sourceDelta; offset += delta; } mxPop(); } mxPop(); } else { txInteger count, index; mxPushSlot(mxArgv(0)); mxGetID(mxID(_length)); count = fxToInteger(the, the->stack); mxPop(); if ((target < 0) || (length - count < target)) mxRangeError("invalid offset"); index = 0; while (index < count) { mxPushSlot(mxArgv(0)); mxGetIndex(index); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); if (data->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*dispatch->value.typedArray.dispatch->setter)(the, data, offset, the->stack, EndianNative); mxPop(); offset += delta; index++; } } } void fx_TypedArray_prototype_slice(txMachine* the) { mxTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger start = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger end = (txInteger)fxArgToIndex(the, 1, length, length); txInteger count = (end > start) ? end - start : 0; txInteger index = 0; fxCreateTypedArraySpecies(the); mxPushNumber(count); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; if (resultLength < count) mxTypeError("insufficient buffer"); if (count) { length = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPushUndefined(); while ((start < length) && (start < end)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (start * delta), the->stack, EndianNative); (*resultDispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << resultDispatch->value.typedArray.dispatch->shift), the->stack, EndianNative); start++; index++; } while (start < end) { the->stack->kind = XS_UNDEFINED_KIND; (*resultDispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << resultDispatch->value.typedArray.dispatch->shift), the->stack, EndianNative); start++; index++; } mxPop(); } } } void fx_TypedArray_prototype_some(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = 0; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); mxResult->value.boolean = fxToBoolean(the, the->stack++); if (mxResult->value.boolean) break; index++; } } void fx_TypedArray_prototype_sort(txMachine* the) { mxMutableTypedArrayDeclarations; txSlot* data = buffer->value.reference->next; txInteger delta = dispatch->value.typedArray.dispatch->size; txSlot* function = C_NULL; if (mxArgc > 0) { txSlot* slot = mxArgv(0); if (slot->kind != XS_UNDEFINED_KIND) { if (fxIsCallable(the, slot)) function = slot; else mxTypeError("compare is no function"); } } if (function) { /* like GCC qsort */ #define COMPARE(INDEX) \ fxCompareTypedArrayItem(the, function, dispatch, view, data, INDEX) #define MOVE(FROM,TO) \ from = data->value.arrayBuffer.address + view->value.dataView.offset + ((FROM) * delta); \ to = data->value.arrayBuffer.address + view->value.dataView.offset + ((TO) * delta); \ for (k = 0; k < delta; k++) *to++ = *from++ #define PUSH(INDEX) \ mxPushUndefined(); \ (*dispatch->value.typedArray.dispatch->getter)(the, data, view->value.dataView.offset + ((INDEX) * delta), the->stack, EndianNative) #define PULL(INDEX) \ (*dispatch->value.typedArray.dispatch->setter)(the, data, view->value.dataView.offset + ((INDEX) * delta), the->stack++, EndianNative) if (length > 0) { txInteger i, j, k; txByte* from; txByte* to; if (length > mxSortThreshold) { txInteger lo = 0, hi = length - 1; txSortPartition stack[mxSortPartitionCount]; txSortPartition *top = stack + 1; while (stack < top) { txIndex mid = lo + ((hi - lo) >> 1); PUSH(mid); if (COMPARE(lo) > 0) { MOVE(lo, mid); PULL(lo); PUSH(mid); } if (COMPARE(hi) < 0) { MOVE(hi, mid); PULL(hi); PUSH(mid); if (COMPARE(lo) > 0) { MOVE(lo, mid); PULL(lo); PUSH(mid); } } i = lo + 1; j = hi - 1; do { while ((COMPARE(i) < 0) && (i <= j)) i++; while ((COMPARE(j) > 0) && (i <= j)) j--; if (i < j) { PUSH(i); MOVE(j, i); PULL(j); i++; j--; } else if (i == j) { i++; j--; break; } } while (i <= j); if ((j - lo) <= mxSortThreshold) { if ((hi - i) <= mxSortThreshold) { top--; lo = top->lo; hi = top->hi; } else { lo = i; } } else if ((hi - i) <= mxSortThreshold) { hi = j; } else if ((j - lo) > (hi - i)) { top->lo = lo; top->hi = j; top++; lo = i; } else { top->lo = i; top->hi = hi; top++; hi = j; } mxPop(); } } for (i = 1; i < length; i++) { PUSH(i); for (j = i; (j > 0) && (COMPARE(j - 1) > 0); j--) { MOVE(j - 1, j); } PULL(j); } } } else c_qsort(data->value.arrayBuffer.address, length, delta, dispatch->value.typedArray.dispatch->compare); mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_subarray(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txInteger length = fxGetDataViewSize(the, view, buffer) >> shift; txInteger start = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger stop = (txInteger)fxArgToIndex(the, 1, length, length); if (stop < start) stop = start; fxCreateTypedArraySpecies(the); mxPushSlot(buffer); mxPushInteger(view->value.dataView.offset + (start << shift)); mxPushInteger(stop - start); mxRunCount(3); mxPullSlot(mxResult); fxCheckTypedArrayInstance(the, mxResult); } void fx_TypedArray_prototype_toLocaleString(txMachine* the) { mxTypedArrayDeclarations; txInteger index = 0; txString string; txSlot* list = fxNewInstance(the); txSlot* slot = list; txBoolean comma = 0; txInteger size = 0; mxPushStringX(","); the->stack->kind += XS_KEY_KIND - XS_STRING_KIND; the->stack->value.key.sum = 1; while (index < length) { if (comma) { slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); size += slot->value.key.sum; } else comma = 1; mxPushSlot(mxThis); mxGetIndex(index); if ((the->stack->kind != XS_UNDEFINED_KIND) && (the->stack->kind != XS_NULL_KIND)) { mxDub(); mxGetID(mxID(_toLocaleString)); mxCall(); mxRunCount(0); slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); string = fxToString(the, slot); slot->kind += XS_KEY_KIND - XS_STRING_KIND; slot->value.key.sum = mxStringLength(string); size = fxAddChunkSizes(the, size, slot->value.key.sum); } mxPop(); index++; } string = mxResult->value.string = fxNewChunk(the, fxAddChunkSizes(the, size, 1)); slot = list->next; while (slot) { c_memcpy(string, slot->value.key.string, slot->value.key.sum); string += slot->value.key.sum; slot = slot->next; } *string = 0; mxResult->kind = XS_STRING_KIND; mxPop(); } void fx_TypedArray_prototype_toStringTag_get(txMachine* the) { if (mxThis->kind == XS_REFERENCE_KIND) { txSlot* instance = mxThis->value.reference; txSlot* slot = instance->next; if (slot && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_TYPED_ARRAY_KIND)) { txTypeDispatch *dispatch = instance->next->value.typedArray.dispatch; txSlot* key = fxGetKey(the, mxID(dispatch->constructorID)); if (key->kind == XS_KEY_X_KIND) mxResult->kind = XS_STRING_X_KIND; else mxResult->kind = XS_STRING_KIND; mxResult->value.string = key->value.key.string; } } } void fx_TypedArray_prototype_values(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* property; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPush(mxArrayIteratorPrototype); property = fxLastProperty(the, fxNewIteratorInstance(the, mxThis, mxID(_Array))); property = fxNextIntegerProperty(the, property, 0, XS_NO_ID, XS_INTERNAL_FLAG); mxPullSlot(mxResult); } #if mxBigEndian #define mxEndianDouble_BtoN(a) (a) #define mxEndianFloat_BtoN(a) (a) #define mxEndianS64_BtoN(a) (a) #define mxEndianU64_BtoN(a) (a) #define mxEndianS32_BtoN(a) (a) #define mxEndianU32_BtoN(a) (a) #define mxEndianS16_BtoN(a) (a) #define mxEndianU16_BtoN(a) (a) #define mxEndianDouble_NtoB(a) (a) #define mxEndianFloat_NtoB(a) (a) #define mxEndianS64_NtoB(a) (a) #define mxEndianU64_NtoB(a) (a) #define mxEndianS32_NtoB(a) (a) #define mxEndianU32_NtoB(a) (a) #define mxEndianS16_NtoB(a) (a) #define mxEndianU16_NtoB(a) (a) #else #define mxEndianDouble_LtoN(a) (a) #define mxEndianFloat_LtoN(a) (a) #define mxEndianS64_LtoN(a) (a) #define mxEndianU64_LtoN(a) (a) #define mxEndianS32_LtoN(a) (a) #define mxEndianU32_LtoN(a) (a) #define mxEndianS16_LtoN(a) (a) #define mxEndianU16_LtoN(a) (a) #define mxEndianDouble_NtoL(a) (a) #define mxEndianFloat_NtoL(a) (a) #define mxEndianS64_NtoL(a) (a) #define mxEndianU64_NtoL(a) (a) #define mxEndianS32_NtoL(a) (a) #define mxEndianU32_NtoL(a) (a) #define mxEndianS16_NtoL(a) (a) #define mxEndianU16_NtoL(a) (a) #endif #if mxLittleEndian #define mxEndianDouble_BtoN(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_BtoN(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_BtoN(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_BtoN(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_BtoN(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_BtoN(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_BtoN(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_BtoN(a) ((txU2) mxEndian16_Swap(a)) #define mxEndianDouble_NtoB(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_NtoB(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_NtoB(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_NtoB(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_NtoB(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_NtoB(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_NtoB(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_NtoB(a) ((txU2) mxEndian16_Swap(a)) #else #define mxEndianDouble_LtoN(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_LtoN(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_LtoN(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_LtoN(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_LtoN(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_LtoN(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_LtoN(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_LtoN(a) ((txU2) mxEndian16_Swap(a)) #define mxEndianDouble_NtoL(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_NtoL(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_NtoL(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_NtoL(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_NtoL(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_NtoL(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_NtoL(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_NtoL(a) ((txU2) mxEndian16_Swap(a)) #endif #if defined(__GNUC__) || defined(__llvm__) #define mxEndian16_Swap(a) __builtin_bswap16(a) #else static txU2 mxEndian16_Swap(txU2 a) { txU2 b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 2; i++) p2[i] = p1[1 - i]; return b; } #endif #if defined(__GNUC__) || defined(__llvm__) #define mxEndian32_Swap(a) __builtin_bswap32(a) #else static txU4 mxEndian32_Swap(txU4 a) { txU4 b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 4; i++) p2[i] = p1[3 - i]; return b; } #endif #if defined(__GNUC__) || defined(__llvm__) #define mxEndian64_Swap(a) __builtin_bswap64(a) #else static txU8 mxEndian64_Swap(txU8 a) { txU4 b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 8; i++) p2[i] = p1[7 - i]; return b; } #endif static float mxEndianFloat_Swap(float a) { #if defined(__GNUC__) || defined(__llvm__) uint32_t result = __builtin_bswap32(*(uint32_t *)&a); return *(float *)&result; #else float b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 4; i++) p2[i] = p1[3 - i]; return b; #endif } static double mxEndianDouble_Swap(double a) { #if defined(__GNUC__) || defined(__llvm__) uint64_t result = __builtin_bswap64(*(uint64_t *)&a); return *(double *)&result; #else double b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 8; i++) p2[i] = p1[7 - i]; return b; #endif } #define toNative(size, endian) mxEndian##size##_##endian##toN #define fromNative(size, endian) mxEndian##size##_Nto##endian #define IMPORT(size) (endian == EndianBig ? toNative(size, B)(value) : endian == EndianLittle ? toNative(size, L)(value) : (value)) #define EXPORT(size) (endian == EndianBig ? fromNative(size, B)(value) : endian == EndianLittle ? toNative(size, L)(value) : (value)) int fxBigInt64Compare(const void* p, const void* q) { txS8 a = *((txS8*)p); txS8 b = *((txS8*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxBigInt64Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS8 value; #ifdef mxMisalignedSettersCrash value = c_read32(data->value.arrayBuffer.address + offset); #else value = *((txS8*)(data->value.arrayBuffer.address + offset)); #endif value = IMPORT(S64); fxFromBigInt64(the, slot, value); mxMeterOne(); } void fxBigInt64Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS8 value = (txS8)fxToBigInt64(the, slot); #ifdef mxMisalignedSettersCrash value = EXPORT(S64); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS8)); #else *((txS8*)(data->value.arrayBuffer.address + offset)) = EXPORT(S64); #endif mxMeterOne(); } int fxBigUint64Compare(const void* p, const void* q) { txU8 a = *((txU8*)p); txU8 b = *((txU8*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxBigUint64Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU8 value; #ifdef mxMisalignedSettersCrash value = c_read32(data->value.arrayBuffer.address + offset); #else value = *((txU8*)(data->value.arrayBuffer.address + offset)); #endif value = IMPORT(U64); fxFromBigUint64(the, slot, value); mxMeterOne(); } void fxBigUint64Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU8 value = (txU8)fxToBigUint64(the, slot); #ifdef mxMisalignedSettersCrash value = EXPORT(U64); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txU8)); #else *((txU8*)(data->value.arrayBuffer.address + offset)) = EXPORT(U64); #endif mxMeterOne(); } int fxFloat32Compare(const void* p, const void* q) { float a = *((float*)p); float b = *((float*)q); if (c_isnan(a)) { if (c_isnan(b)) return 0; return 1; } if (c_isnan(b)) return -1; if (a < b) return -1; if (a > b) return 1; if (a == 0) { if (c_signbit(a)) { if (c_signbit(b)) return 0; return -1; } if (c_signbit(b)) return 1; } return 0; } void fxFloat32Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { float value; slot->kind = XS_NUMBER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((float*)(data->value.arrayBuffer.address + offset)); #endif slot->value.number = IMPORT(Float); mxMeterOne(); } void fxFloat32Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { float value = (float)slot->value.number; #ifdef mxMisalignedSettersCrash value = EXPORT(Float); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(float)); #else *((float*)(data->value.arrayBuffer.address + offset)) = EXPORT(Float); #endif mxMeterOne(); } int fxFloat64Compare(const void* p, const void* q) { double a = *((double*)p); double b = *((double*)q); if (c_isnan(a)) { if (c_isnan(b)) return 0; return 1; } if (c_isnan(b)) return -1; if (a < b) return -1; if (a > b) return 1; if (a == 0) { if (c_signbit(a)) { if (c_signbit(b)) return 0; return -1; } if (c_signbit(b)) return 1; } return 0; } void fxFloat64Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { double value; slot->kind = XS_NUMBER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((double*)(data->value.arrayBuffer.address + offset)); #endif slot->value.number = IMPORT(Double); mxMeterOne(); } void fxFloat64Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { double value = slot->value.number; #ifdef mxMisalignedSettersCrash value = EXPORT(Double); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(double)); #else *((double*)(data->value.arrayBuffer.address + offset)) = EXPORT(Double); #endif mxMeterOne(); } void fxIntCoerce(txMachine* the, txSlot* slot) { fxToInteger(the, slot); } void fxUintCoerce(txMachine* the, txSlot* slot) { fxToUnsigned(the, slot); } int fxInt8Compare(const void* p, const void* q) { txS1 a = *((txS1*)p); txS1 b = *((txS1*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxInt8Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { slot->kind = XS_INTEGER_KIND; slot->value.integer = *((txS1*)(data->value.arrayBuffer.address + offset)); mxMeterOne(); } void fxInt8Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { *((txS1*)(data->value.arrayBuffer.address + offset)) = (txS1)slot->value.integer; mxMeterOne(); } int fxInt16Compare(const void* p, const void* q) { txS2 a = *((txS2*)p); txS2 b = *((txS2*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxInt16Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS2 value; slot->kind = XS_INTEGER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((txS2*)(data->value.arrayBuffer.address + offset)); #endif slot->value.integer = IMPORT(S16); mxMeterOne(); } void fxInt16Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS2 value = (txS2)slot->value.integer; #ifdef mxMisalignedSettersCrash value = EXPORT(S16); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS2)); #else *((txS2*)(data->value.arrayBuffer.address + offset)) = EXPORT(S16); #endif mxMeterOne(); } int fxInt32Compare(const void* p, const void* q) { txS4 a = *((txS4*)p); txS4 b = *((txS4*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxInt32Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS4 value; slot->kind = XS_INTEGER_KIND; #ifdef mxMisalignedSettersCrash value = c_read32(data->value.arrayBuffer.address + offset); #else value = *((txS4*)(data->value.arrayBuffer.address + offset)); #endif slot->value.integer = IMPORT(S32); mxMeterOne(); } void fxInt32Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS4 value = (txS4)slot->value.integer; #ifdef mxMisalignedSettersCrash value = EXPORT(S32); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS4)); #else *((txS4*)(data->value.arrayBuffer.address + offset)) = EXPORT(S32); #endif mxMeterOne(); } int fxUint8Compare(const void* p, const void* q) { txU1 a = c_read8((txU1*)p); txU1 b = c_read8((txU1*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxUint8Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { slot->kind = XS_INTEGER_KIND; slot->value.integer = c_read8((txU1*)(data->value.arrayBuffer.address + offset)); mxMeterOne(); } void fxUint8Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txUnsigned tmp = (slot->kind == XS_INTEGER_KIND) ? (txUnsigned)slot->value.integer : (txUnsigned)slot->value.number; *((txU1*)(data->value.arrayBuffer.address + offset)) = (txU1)tmp; mxMeterOne(); } int fxUint16Compare(const void* p, const void* q) { txU2 a = *((txU2*)p); txU2 b = *((txU2*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxUint16Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU2 value; slot->kind = XS_INTEGER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((txU2*)(data->value.arrayBuffer.address + offset)); #endif slot->value.integer = IMPORT(U16); mxMeterOne(); } void fxUint16Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txUnsigned tmp = (slot->kind == XS_INTEGER_KIND) ? (txUnsigned)slot->value.integer : (txUnsigned)slot->value.number; txU2 value = (txU2)tmp; #ifdef mxMisalignedSettersCrash value = EXPORT(U16); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txU2)); #else *((txU2*)(data->value.arrayBuffer.address + offset)) = EXPORT(U16); #endif mxMeterOne(); } int fxUint32Compare(const void* p, const void* q) { txU4 a = *((txU4*)p); txU4 b = *((txU4*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxUint32Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { #ifdef mxMisalignedSettersCrash txUnsigned value = c_read32(data->value.arrayBuffer.address + offset); #else txUnsigned value = *((txU4*)(data->value.arrayBuffer.address + offset)); #endif value = IMPORT(U32); if (((txInteger)value) >= 0) { slot->kind = XS_INTEGER_KIND; slot->value.integer = value; } else { slot->kind = XS_NUMBER_KIND; slot->value.number = value; } mxMeterOne(); } void fxUint32Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU4 value = (slot->kind == XS_INTEGER_KIND) ? (txU4)slot->value.integer : (txU4)slot->value.number; #ifdef mxMisalignedSettersCrash value = EXPORT(U32); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txU4)); #else *((txU4*)(data->value.arrayBuffer.address + offset)) = EXPORT(U32); #endif mxMeterOne(); } void fxUint8ClampedSetter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txNumber value = fxToNumber(the, slot); if (value <= 0) value = 0; else if (value >= 255) value = 255; else if (c_isnan(value)) value = 0; else value = c_nearbyint(value); *((txU1*)(data->value.arrayBuffer.address + offset)) = (txU1)value; mxMeterOne(); }
/* * Copyright (c) 2016-2017 Moddable Tech, Inc. * * This file is part of the Moddable SDK Runtime. * * The Moddable SDK Runtime is free software: you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * The Moddable SDK Runtime is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with the Moddable SDK Runtime. If not, see <http://www.gnu.org/licenses/>. * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (C) 2010-2016 Marvell International Ltd. * Copyright (C) 2002-2010 Kinoma, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xsAll.h" static txSlot* fxArgToInstance(txMachine* the, txInteger i); static txBoolean fxCheckLength(txMachine* the, txSlot* slot, txInteger* index); static txSlot* fxCheckArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable); static txSlot* fxCheckArrayBufferInstance(txMachine* the, txSlot* slot); static txSlot* fxNewArrayBufferInstance(txMachine* the); static txSlot* fxCheckDataViewInstance(txMachine* the, txSlot* slot); static txInteger fxCheckDataViewSize(txMachine* the, txSlot* view, txSlot* buffer, txBoolean mutable); static txSlot* fxNewDataViewInstance(txMachine* the); static void fxCallTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index, txSlot* item); static txSlot* fxCheckTypedArrayInstance(txMachine* the, txSlot* slot); static int fxCompareTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index); static txSlot* fxConstructTypedArray(txMachine* the); static txSlot* fxNewTypedArrayInstance(txMachine* the, txTypeDispatch* dispatch, txTypeAtomics* atomics); static void fxReduceTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index); static txBoolean fxTypedArrayDefineOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot, txFlag mask); static txBoolean fxTypedArrayDeleteProperty(txMachine* the, txSlot* instance, txID id, txIndex index); static txBoolean fxTypedArrayGetOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot); static txSlot* fxTypedArrayGetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag); static txBoolean fxTypedArrayGetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* value, txSlot* receiver); static txBoolean fxTypedArrayHasProperty(txMachine* the, txSlot* instance, txID id, txIndex index); static void fxTypedArrayOwnKeys(txMachine* the, txSlot* instance, txFlag flag, txSlot* keys); static txSlot* fxTypedArraySetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag); static txBoolean fxTypedArraySetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* value, txSlot* receiver); static void fx_TypedArray_from_object(txMachine* the, txSlot* instance, txSlot* function, txSlot* _this); const txBehavior ICACHE_FLASH_ATTR gxTypedArrayBehavior = { fxTypedArrayGetProperty, fxTypedArraySetProperty, fxOrdinaryCall, fxOrdinaryConstruct, fxTypedArrayDefineOwnProperty, fxTypedArrayDeleteProperty, fxTypedArrayGetOwnProperty, fxTypedArrayGetPropertyValue, fxOrdinaryGetPrototype, fxTypedArrayHasProperty, fxOrdinaryIsExtensible, fxTypedArrayOwnKeys, fxOrdinaryPreventExtensions, fxTypedArraySetPropertyValue, fxOrdinarySetPrototype, }; void *fxArrayBuffer(txMachine* the, txSlot* slot, void* data, txInteger byteLength, txInteger maxByteLength) { txSlot* instance; txSlot* arrayBuffer; txSlot* bufferInfo; if (byteLength < 0) mxRangeError("invalid byteLength %ld", byteLength); mxPush(mxArrayBufferPrototype); instance = fxNewArrayBufferInstance(the); arrayBuffer = instance->next; arrayBuffer->value.arrayBuffer.address = fxNewChunk(the, byteLength); bufferInfo = arrayBuffer->next; bufferInfo->value.bufferInfo.length = byteLength; bufferInfo->value.bufferInfo.maxLength = maxByteLength; if (data != NULL) c_memcpy(arrayBuffer->value.arrayBuffer.address, data, byteLength); else c_memset(arrayBuffer->value.arrayBuffer.address, 0, byteLength); mxPullSlot(slot); return arrayBuffer->value.arrayBuffer.address; } void fxGetArrayBufferData(txMachine* the, txSlot* slot, txInteger byteOffset, void* data, txInteger byteLength) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; if ((byteOffset < 0) || (length < byteOffset)) mxRangeError("out of range byteOffset %ld", byteOffset); if ((byteLength < 0) || (length < (byteOffset + byteLength))) mxRangeError("out of range byteLength %ld", byteLength); c_memcpy(data, arrayBuffer->value.arrayBuffer.address + byteOffset, byteLength); } txInteger fxGetArrayBufferLength(txMachine* the, txSlot* slot) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; return bufferInfo->value.bufferInfo.length; } txInteger fxGetArrayBufferMaxLength(txMachine* the, txSlot* slot) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; return bufferInfo->value.bufferInfo.maxLength; } void fxSetArrayBufferData(txMachine* the, txSlot* slot, txInteger byteOffset, void* data, txInteger byteLength) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; if ((byteOffset < 0) || (length < byteOffset)) mxRangeError("out of range byteOffset %ld", byteOffset); if ((byteLength < 0) || (length < (byteOffset + byteLength))) mxRangeError("out of range byteLength %ld", byteLength); c_memcpy(arrayBuffer->value.arrayBuffer.address + byteOffset, data, byteLength); } void fxSetArrayBufferLength(txMachine* the, txSlot* slot, txInteger target) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; txByte* address = arrayBuffer->value.arrayBuffer.address; if (bufferInfo->value.bufferInfo.maxLength < 0) fxReport(the, "# Use xsArrayBufferResizable instead of xsArrayBuffer\n"); if (length != target) { if (address) address = (txByte*)fxRenewChunk(the, address, target); if (address) { if (length < target) c_memset(address + length, 0, target - length); } else { address = (txByte*)fxNewChunk(the, target); if (length < target) { c_memcpy(address, arrayBuffer->value.arrayBuffer.address, length); c_memset(address + length, 0, target - length); } else c_memcpy(address, arrayBuffer->value.arrayBuffer.address, target); } arrayBuffer->value.arrayBuffer.address = address; bufferInfo->value.bufferInfo.length = target; } } void* fxToArrayBuffer(txMachine* the, txSlot* slot) { txSlot* instance = fxCheckArrayBufferInstance(the, slot); txSlot* arrayBuffer = instance->next; return arrayBuffer->value.arrayBuffer.address; } void fxBuildDataView(txMachine* the) { txSlot* slot; txInteger index; const txTypeDispatch *dispatch; const txTypeAtomics *atomics; txSlot* property; txSlot* constructor; mxPush(mxObjectPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_get_byteLength), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_get_maxByteLength), C_NULL, mxID(_maxByteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_get_resizable), C_NULL, mxID(_resizable), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_concat), 1, mxID(_concat), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_resize), 1, mxID(_resize), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_slice), 2, mxID(_slice), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_prototype_transfer), 0, mxID(_transfer), XS_DONT_ENUM_FLAG); slot = fxNextStringXProperty(the, slot, "ArrayBuffer", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG); mxArrayBufferPrototype = *the->stack; slot = fxBuildHostConstructor(the, mxCallback(fx_ArrayBuffer), 1, mxID(_ArrayBuffer)); mxArrayBufferConstructor = *the->stack; slot = fxLastProperty(the, slot); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_fromBigInt), 1, mxID(_fromBigInt), XS_DONT_ENUM_FLAG); #ifndef mxCESU8 slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_fromString), 1, mxID(_fromString), XS_DONT_ENUM_FLAG); #endif slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_ArrayBuffer_isView), 1, mxID(_isView), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_species_get), C_NULL, mxID(_Symbol_species), XS_DONT_ENUM_FLAG); mxPop(); mxPush(mxObjectPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getBigInt64), 1, mxID(_getBigInt64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setBigInt64), 2, mxID(_setBigInt64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getBigUint64), 1, mxID(_getBigUint64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setBigUint64), 2, mxID(_setBigUint64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getFloat32), 1, mxID(_getFloat32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setFloat32), 2, mxID(_setFloat32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getFloat64), 1, mxID(_getFloat64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setFloat64), 2, mxID(_setFloat64), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getInt8), 1, mxID(_getInt8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setInt8), 2, mxID(_setInt8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getInt16), 1, mxID(_getInt16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setInt16), 2, mxID(_setInt16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getInt32), 1, mxID(_getInt32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setInt32), 2, mxID(_setInt32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getUint8), 1, mxID(_getUint8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setUint8), 2, mxID(_setUint8), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getUint16), 1, mxID(_getUint16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setUint16), 2, mxID(_setUint16), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_getUint32), 1, mxID(_getUint32), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_DataView_prototype_setUint32), 2, mxID(_setUint32), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_DataView_prototype_buffer_get), C_NULL, mxID(_buffer), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_DataView_prototype_byteLength_get), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_DataView_prototype_byteOffset_get), C_NULL, mxID(_byteOffset), XS_DONT_ENUM_FLAG); slot = fxNextStringXProperty(the, slot, "DataView", mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG | XS_DONT_SET_FLAG); mxDataViewPrototype = *the->stack; slot = fxBuildHostConstructor(the, mxCallback(fx_DataView), 1, mxID(_DataView)); mxDataViewConstructor = *the->stack; mxPop(); fxNewHostFunction(the, mxCallback(fxTypedArrayGetter), 0, XS_NO_ID); fxNewHostFunction(the, mxCallback(fxTypedArraySetter), 1, XS_NO_ID); mxPushUndefined(); the->stack->flag = XS_DONT_DELETE_FLAG; the->stack->kind = XS_ACCESSOR_KIND; the->stack->value.accessor.getter = (the->stack + 2)->value.reference; the->stack->value.accessor.setter = (the->stack + 1)->value.reference; mxPull(mxTypedArrayAccessor); mxPop(); mxPop(); mxPush(mxObjectPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_at), 1, mxID(_at), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_buffer_get), C_NULL, mxID(_buffer), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_byteLength_get), C_NULL, mxID(_byteLength), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_byteOffset_get), C_NULL, mxID(_byteOffset), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_length_get), C_NULL, mxID(_length), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_TypedArray_prototype_toStringTag_get), C_NULL, mxID(_Symbol_toStringTag), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_copyWithin), 2, mxID(_copyWithin), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_entries), 0, mxID(_entries), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_every), 1, mxID(_every), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_fill), 1, mxID(_fill), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_filter), 1, mxID(_filter), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_find), 1, mxID(_find), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_findIndex), 1, mxID(_findIndex), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_findLast), 1, mxID(_findLast), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_findLastIndex), 1, mxID(_findLastIndex), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_forEach), 1, mxID(_forEach), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_includes), 1, mxID(_includes), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_indexOf), 1, mxID(_indexOf), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_join), 1, mxID(_join), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_keys), 0, mxID(_keys), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_lastIndexOf), 1, mxID(_lastIndexOf), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_map), 1, mxID(_map), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_reduce), 1, mxID(_reduce), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_reduceRight), 1, mxID(_reduceRight), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_reverse), 0, mxID(_reverse), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_set), 1, mxID(_set), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_slice), 2, mxID(_slice), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_some), 1, mxID(_some), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_sort), 1, mxID(_sort), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_subarray), 2, mxID(_subarray), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_toLocaleString), 0, mxID(_toLocaleString), XS_DONT_ENUM_FLAG); property = mxBehaviorGetProperty(the, mxArrayPrototype.value.reference, mxID(_toString), 0, XS_OWN); slot = fxNextSlotProperty(the, slot, property, mxID(_toString), XS_DONT_ENUM_FLAG); property = slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_prototype_values), 0, mxID(_values), XS_DONT_ENUM_FLAG); slot = fxNextSlotProperty(the, slot, property, mxID(_Symbol_iterator), XS_DONT_ENUM_FLAG); mxTypedArrayPrototype = *the->stack; constructor = fxBuildHostConstructor(the, mxCallback(fx_TypedArray), 0, mxID(_TypedArray)); mxTypedArrayConstructor = *the->stack; slot = fxLastProperty(the, constructor); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_from), 1, mxID(_from), XS_DONT_ENUM_FLAG); slot = fxNextHostFunctionProperty(the, slot, mxCallback(fx_TypedArray_of), 0, mxID(_of), XS_DONT_ENUM_FLAG); slot = fxNextHostAccessorProperty(the, slot, mxCallback(fx_species_get), C_NULL, mxID(_Symbol_species), XS_DONT_ENUM_FLAG); for (index = 0, dispatch = &gxTypeDispatches[0], atomics = &gxTypeAtomics[0]; index < mxTypeArrayCount; index++, dispatch++, atomics++) { mxPush(mxTypedArrayPrototype); slot = fxLastProperty(the, fxNewObjectInstance(the)); slot = fxNextIntegerProperty(the, slot, dispatch->size, mxID(_BYTES_PER_ELEMENT), XS_GET_ONLY); slot = fxBuildHostConstructor(the, mxCallback(fx_TypedArray), 3, mxID(dispatch->constructorID)); the->stackPrototypes[-1 - (txInteger)dispatch->constructorID] = *the->stack; //@@ slot->value.instance.prototype = constructor; property = mxFunctionInstanceHome(slot); slot = property->next; property = fxNextTypeDispatchProperty(the, property, (txTypeDispatch*)dispatch, (txTypeAtomics*)atomics, XS_NO_ID, XS_INTERNAL_FLAG); property->next = slot; slot = fxLastProperty(the, slot); slot = fxNextIntegerProperty(the, slot, dispatch->size, mxID(_BYTES_PER_ELEMENT), XS_GET_ONLY); mxPop(); } mxPop(); } txInteger fxArgToByteLength(txMachine* the, txInteger argi, txInteger length) { txSlot *arg = mxArgv(argi); if ((mxArgc > argi) && (arg->kind != XS_UNDEFINED_KIND)) { txNumber value; if (XS_INTEGER_KIND == arg->kind) { txInteger value = arg->value.integer; if (value < 0) mxRangeError("out of range byteLength"); return value; } value = c_trunc(fxToNumber(the, arg)); if (c_isnan(value)) return 0; if ((value < 0) || (0x7FFFFFFF < value)) mxRangeError("out of range byteLength"); return (txInteger)value; } return length; } txSlot* fxArgToInstance(txMachine* the, txInteger i) { if (mxArgc > i) return fxToInstance(the, mxArgv(i)); mxTypeError("Cannot coerce undefined to object"); return C_NULL; } txBoolean fxCheckLength(txMachine* the, txSlot* slot, txInteger* index) { txNumber number = fxToNumber(the, slot); txNumber check = c_trunc(number); if ((number == check) && (0 <= number) && (number <= 0x7FFFFFFF)) { *index = (txInteger)number; return 1 ; } return 0; } txSlot* fxCheckArrayBufferDetached(txMachine* the, txSlot* slot, txBoolean mutable) { slot = slot->value.reference->next; if (slot->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); if (mutable && (slot->flag & XS_DONT_SET_FLAG)) mxTypeError("ArrayBuffer instance is read-only"); return slot; } txSlot* fxCheckArrayBufferInstance(txMachine* the, txSlot* slot) { if (slot->kind == XS_REFERENCE_KIND) { txSlot* instance = slot->value.reference; if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_ARRAY_BUFFER_KIND)) return instance; } mxTypeError("this is no ArrayBuffer instance"); return C_NULL; } void fxConstructArrayBufferResult(txMachine* the, txSlot* constructor, txInteger length) { txSlot* instance; if (constructor) mxPushSlot(constructor); else { mxPushSlot(mxThis); mxGetID(mxID(_constructor)); } fxToSpeciesConstructor(the, &mxArrayBufferConstructor); mxNew(); mxPushInteger(length); mxRunCount(1); if (the->stack->kind != XS_REFERENCE_KIND) mxTypeError("no instance"); instance = the->stack->value.reference; if (!(instance->next) || (instance->next->kind != XS_ARRAY_BUFFER_KIND)) mxTypeError("no ArrayBuffer instance"); if (!constructor && (mxThis->value.reference == instance)) mxTypeError("same ArrayBuffer instance"); if (instance->next->next->value.bufferInfo.length < length) mxTypeError("smaller ArrayBuffer instance"); mxPullSlot(mxResult); } txSlot* fxNewArrayBufferInstance(txMachine* the) { txSlot* instance; txSlot* property; instance = fxNewObjectInstance(the); property = instance->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_ARRAY_BUFFER_KIND; property->value.arrayBuffer.address = C_NULL; property->value.arrayBuffer.detachKey = C_NULL; property = property->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_BUFFER_INFO_KIND; property->value.bufferInfo.length = 0; property->value.bufferInfo.maxLength = -1; return instance; } void fx_ArrayBuffer(txMachine* the) { txSlot* instance; txInteger byteLength; txInteger maxByteLength = -1; txSlot* property; if (mxIsUndefined(mxTarget)) mxTypeError("call: ArrayBuffer"); mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxArrayBufferPrototype); instance = fxNewArrayBufferInstance(the); mxPullSlot(mxResult); byteLength = fxArgToByteLength(the, 0, 0); if ((mxArgc > 1) && mxIsReference(mxArgv(1))) { mxPushSlot(mxArgv(1)); mxGetID(mxID(_maxByteLength)); mxPullSlot(mxArgv(1)); maxByteLength = fxArgToByteLength(the, 1, -1); } if (maxByteLength >= 0) { if (byteLength > maxByteLength) mxRangeError("byteLength > maxByteLength"); } property = instance->next; property->value.arrayBuffer.address = fxNewChunk(the, byteLength); c_memset(property->value.arrayBuffer.address, 0, byteLength); property = property->next; property->value.bufferInfo.length = byteLength; property->value.bufferInfo.maxLength = maxByteLength; } void fx_ArrayBuffer_fromBigInt(txMachine* the) { txU4 minBytes = 0; txBoolean sign = 0; int endian = EndianBig; if (mxArgc < 1) mxTypeError("no argument"); if (mxArgc > 1) { txInteger m = fxToInteger(the, mxArgv(1)); if (m < 0) mxRangeError("minBytes < 0"); minBytes = (txU4)m; } if ((mxArgc > 2) && fxToBoolean(the, mxArgv(2))) sign = 1; if ((mxArgc > 3) && fxToBoolean(the, mxArgv(3))) endian = EndianLittle; if (gxTypeBigInt.toArrayBuffer) { gxTypeBigInt.toArrayBuffer(the, mxArgv(0), minBytes, sign, endian); } else { mxUnknownError("not built-in"); } } #ifndef mxCESU8 void fx_ArrayBuffer_fromString(txMachine* the) { txSize length; if (mxArgc < 1) mxTypeError("no argument"); length = mxStringLength(fxToString(the, mxArgv(0))); fxConstructArrayBufferResult(the, mxThis, length); c_memcpy(mxResult->value.reference->next->value.arrayBuffer.address, mxArgv(0)->value.string, length); } #endif void fx_ArrayBuffer_isView(txMachine* the) { txSlot* slot; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = 0; if (mxArgc > 0) { slot = mxArgv(0); if (slot->kind == XS_REFERENCE_KIND) { slot = slot->value.reference; if (slot->next) { slot = slot->next; if ((slot->kind == XS_DATA_VIEW_KIND) || (slot->kind == XS_TYPED_ARRAY_KIND)) { mxResult->value.boolean = 1; } } } } } void fx_ArrayBuffer_prototype_get_byteLength(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_INTEGER_KIND; if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxResult->value.integer = 0; else mxResult->value.integer = bufferInfo->value.bufferInfo.length; } void fx_ArrayBuffer_prototype_get_maxByteLength(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_INTEGER_KIND; if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxResult->value.integer = 0; else if (bufferInfo->value.bufferInfo.maxLength >= 0) mxResult->value.integer = bufferInfo->value.bufferInfo.maxLength; else mxResult->value.integer = bufferInfo->value.bufferInfo.length; } void fx_ArrayBuffer_prototype_get_resizable(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = (bufferInfo->value.bufferInfo.maxLength >= 0) ? 1 : 0; } void fx_ArrayBuffer_prototype_concat(txMachine* the) { txSlot* instance = fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = instance->next; txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; txInteger c = mxArgc, i = 0; txByte* address; txSlot* slot; while (i < c) { arrayBuffer = C_NULL; bufferInfo = C_NULL; slot = mxArgv(i); if (slot->kind == XS_REFERENCE_KIND) { slot = slot->value.reference->next; if (slot && (slot->kind == XS_ARRAY_BUFFER_KIND)) { arrayBuffer = slot; bufferInfo = slot->next; } } if (arrayBuffer) length = fxAddChunkSizes(the, length, bufferInfo->value.bufferInfo.length); else mxTypeError("arguments[%ld] is no ArrayBuffer instance", i); i++; } fxConstructArrayBufferResult(the, C_NULL, length); arrayBuffer = instance->next; bufferInfo = arrayBuffer->next; address = mxResult->value.reference->next->value.arrayBuffer.address; length = bufferInfo->value.bufferInfo.length; c_memcpy(address, arrayBuffer->value.arrayBuffer.address, length); address += length; i = 0; while (i < c) { arrayBuffer = mxArgv(i)->value.reference->next; bufferInfo = arrayBuffer->next; length = bufferInfo->value.bufferInfo.length; c_memcpy(address, arrayBuffer->value.arrayBuffer.address, length); address += length; i++; } } void fx_ArrayBuffer_prototype_resize(txMachine* the) { /* txSlot* instance = */ fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = fxCheckArrayBufferDetached(the, mxThis, XS_MUTABLE); txSlot* bufferInfo = arrayBuffer->next; txInteger maxByteLength, oldByteLength, newByteLength; txByte* chunk; maxByteLength = bufferInfo->value.bufferInfo.maxLength; if (maxByteLength < 0) mxTypeError("not resizable"); oldByteLength = bufferInfo->value.bufferInfo.length; newByteLength = fxArgToByteLength(the, 0, 0); if (newByteLength > maxByteLength) mxRangeError("newLength > maxByteLength"); chunk = (txByte*)fxRenewChunk(the, arrayBuffer->value.arrayBuffer.address, newByteLength); if (!chunk) { chunk = (txByte*)fxNewChunk(the, newByteLength); c_memcpy(chunk, arrayBuffer->value.arrayBuffer.address, (newByteLength < oldByteLength) ? newByteLength : oldByteLength); } if (newByteLength > oldByteLength) c_memset(chunk + oldByteLength, 0, newByteLength - oldByteLength); arrayBuffer->value.arrayBuffer.address = chunk; bufferInfo->value.bufferInfo.length = newByteLength; } void fx_ArrayBuffer_prototype_slice(txMachine* the) { /* txSlot* instance = */ fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = fxCheckArrayBufferDetached(the, mxThis, XS_IMMUTABLE); txSlot* bufferInfo = arrayBuffer->next; txInteger length = bufferInfo->value.bufferInfo.length; txInteger start = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger stop = (txInteger)fxArgToIndex(the, 1, length, length); txSlot* resultBuffer; if (stop < start) stop = start; fxConstructArrayBufferResult(the, C_NULL, stop - start); resultBuffer = fxCheckArrayBufferDetached(the, mxResult, XS_MUTABLE); c_memcpy(resultBuffer->value.arrayBuffer.address, arrayBuffer->value.arrayBuffer.address + start, stop - start); } void fx_ArrayBuffer_prototype_transfer(txMachine* the) { /* txSlot* instance = */ fxCheckArrayBufferInstance(the, mxThis); txSlot* arrayBuffer = fxCheckArrayBufferDetached(the, mxThis, XS_MUTABLE); txSlot* bufferInfo = arrayBuffer->next; txInteger oldByteLength = bufferInfo->value.bufferInfo.length; txInteger newByteLength = fxArgToByteLength(the, 0, oldByteLength); txSlot* resultBuffer; fxConstructArrayBufferResult(the, C_NULL, newByteLength); resultBuffer = fxCheckArrayBufferDetached(the, mxResult, XS_MUTABLE); c_memcpy(resultBuffer->value.arrayBuffer.address, arrayBuffer->value.arrayBuffer.address, (newByteLength < oldByteLength) ? newByteLength : oldByteLength); if (newByteLength > oldByteLength) c_memset(resultBuffer->value.arrayBuffer.address + oldByteLength, 0, newByteLength - oldByteLength); arrayBuffer->value.arrayBuffer.address = C_NULL; bufferInfo->value.bufferInfo.length = 0; } txSlot* fxCheckDataViewInstance(txMachine* the, txSlot* slot) { if (slot->kind == XS_REFERENCE_KIND) { txSlot* instance = slot->value.reference; if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_DATA_VIEW_KIND)) return instance; } mxTypeError("this is no DataView instance"); return C_NULL; } txInteger fxCheckDataViewSize(txMachine* the, txSlot* view, txSlot* buffer, txBoolean mutable) { txInteger size = view->value.dataView.size; txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); if (mutable && (arrayBuffer->flag & XS_DONT_SET_FLAG)) mxTypeError("read-only buffer"); if (bufferInfo->value.bufferInfo.maxLength >= 0) { txInteger offset = view->value.dataView.offset; txInteger byteLength = bufferInfo->value.bufferInfo.length; if (offset > byteLength) mxTypeError("out of bounds view"); else if (size < 0) size = byteLength - offset; else if (offset + size > byteLength) mxTypeError("out of bounds view"); } return size; } txSlot* fxGetBufferInfo(txMachine* the, txSlot* buffer) { txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; if (arrayBuffer->kind == XS_ARRAY_BUFFER_KIND) { if (arrayBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); return bufferInfo; } if (arrayBuffer->kind == XS_HOST_KIND) { txInteger byteLength; if (bufferInfo && (bufferInfo->kind == XS_BUFFER_INFO_KIND)) return bufferInfo; mxPushSlot(buffer); mxGetID(mxID(_byteLength)); if (!fxCheckLength(the, the->stack, &byteLength)) mxTypeError("invalid byteLength"); fxReport(the, "# Use xsSetHostBuffer instead of xsSetHostData\n"); mxPop(); bufferInfo = fxNewSlot(the); bufferInfo->next = arrayBuffer->next; bufferInfo->flag = XS_INTERNAL_FLAG; bufferInfo->kind = XS_BUFFER_INFO_KIND; bufferInfo->value.bufferInfo.length = byteLength; bufferInfo->value.bufferInfo.maxLength = -1; arrayBuffer->next = bufferInfo; return bufferInfo; } mxTypeError("invalid buffer"); return C_NULL; } txInteger fxGetDataViewSize(txMachine* the, txSlot* view, txSlot* buffer) { txInteger size = view->value.dataView.size; txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; if (arrayBuffer->value.arrayBuffer.address == C_NULL) return 0; if (bufferInfo->value.bufferInfo.maxLength >= 0) { txInteger offset = view->value.dataView.offset; txInteger byteLength = bufferInfo->value.bufferInfo.length; if (offset > byteLength) size = 0; else if (size < 0) size = byteLength - offset; else if (offset + size > byteLength) size = 0; } return size; } txSlot* fxNewDataViewInstance(txMachine* the) { txSlot* instance; txSlot* property; instance = fxNewObjectInstance(the); property = instance->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_DATA_VIEW_KIND; property->value.dataView.offset = 0; property->value.dataView.size = 0; property = fxNextNullProperty(the, property, XS_NO_ID, XS_INTERNAL_FLAG); return instance; } void fx_DataView(txMachine* the) { txSlot* slot; txBoolean flag = 0; txInteger offset, size; txSlot* info; txSlot* instance; txSlot* view; txSlot* buffer; if (mxIsUndefined(mxTarget)) mxTypeError("call: DataView"); if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { flag = 1; } } if (!flag) mxTypeError("buffer is no ArrayBuffer instance"); offset = fxArgToByteLength(the, 1, 0); info = fxGetBufferInfo(the, mxArgv(0)); if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); if (size >= 0) { txInteger end = offset + size; if ((info->value.bufferInfo.length < end) || (end < offset)) mxRangeError("out of range byteLength %ld", size); } else { if (info->value.bufferInfo.maxLength < 0) size = info->value.bufferInfo.length - offset; } mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxDataViewPrototype); instance = fxNewDataViewInstance(the); mxPullSlot(mxResult); view = instance->next; buffer = view->next; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; info = fxGetBufferInfo(the, buffer); if (info->value.bufferInfo.maxLength >= 0) { if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); else if (size >= 0) { txInteger end = offset + size; if ((info->value.bufferInfo.length < end) || (end < offset)) mxRangeError("out of range byteLength %ld", size); } } view->value.dataView.offset = offset; view->value.dataView.size = size; } void fx_DataView_prototype_buffer_get(txMachine* the) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; mxResult->kind = buffer->kind; mxResult->value = buffer->value; } void fx_DataView_prototype_byteLength_get(txMachine* the) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; txInteger size = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = size; } void fx_DataView_prototype_byteOffset_get(txMachine* the) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = view->value.dataView.offset; } void fx_DataView_prototype_get(txMachine* the, txNumber delta, txTypeCallback getter) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; txInteger offset = fxArgToByteLength(the, 0, 0); txInteger size; int endian = EndianBig; if ((mxArgc > 1) && fxToBoolean(the, mxArgv(1))) endian = EndianLittle; size = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); if ((size < delta) || ((size - delta) < offset)) mxRangeError("out of range byteOffset"); offset += view->value.dataView.offset; (*getter)(the, buffer->value.reference->next, offset, mxResult, endian); } void fx_DataView_prototype_getBigInt64(txMachine* the) { fx_DataView_prototype_get(the, 8, fxBigInt64Getter); } void fx_DataView_prototype_getBigUint64(txMachine* the) { fx_DataView_prototype_get(the, 8, fxBigUint64Getter); } void fx_DataView_prototype_getFloat32(txMachine* the) { fx_DataView_prototype_get(the, 4, fxFloat32Getter); } void fx_DataView_prototype_getFloat64(txMachine* the) { fx_DataView_prototype_get(the, 8, fxFloat64Getter); } void fx_DataView_prototype_getInt8(txMachine* the) { fx_DataView_prototype_get(the, 1, fxInt8Getter); } void fx_DataView_prototype_getInt16(txMachine* the) { fx_DataView_prototype_get(the, 2, fxInt16Getter); } void fx_DataView_prototype_getInt32(txMachine* the) { fx_DataView_prototype_get(the, 4, fxInt32Getter); } void fx_DataView_prototype_getUint8(txMachine* the) { fx_DataView_prototype_get(the, 1, fxUint8Getter); } void fx_DataView_prototype_getUint16(txMachine* the) { fx_DataView_prototype_get(the, 2, fxUint16Getter); } void fx_DataView_prototype_getUint32(txMachine* the) { fx_DataView_prototype_get(the, 4, fxUint32Getter); } void fx_DataView_prototype_set(txMachine* the, txNumber delta, txTypeCoerce coercer, txTypeCallback setter) { txSlot* instance = fxCheckDataViewInstance(the, mxThis); txSlot* view = instance->next; txSlot* buffer = view->next; txInteger offset = fxArgToByteLength(the, 0, 0); txInteger size; int endian = EndianBig; txSlot* value; if (mxArgc > 1) mxPushSlot(mxArgv(1)); else mxPushUndefined(); value = the->stack; (*coercer)(the, value); if ((mxArgc > 2) && fxToBoolean(the, mxArgv(2))) endian = EndianLittle; size = fxCheckDataViewSize(the, view, buffer, XS_MUTABLE); if ((size < delta) || ((size - delta) < offset)) mxRangeError("out of range byteOffset"); offset += view->value.dataView.offset; (*setter)(the, buffer->value.reference->next, offset, value, endian); mxPop(); } void fx_DataView_prototype_setBigInt64(txMachine* the) { fx_DataView_prototype_set(the, 8, fxBigIntCoerce, fxBigInt64Setter); } void fx_DataView_prototype_setBigUint64(txMachine* the) { fx_DataView_prototype_set(the, 8, fxBigIntCoerce, fxBigUint64Setter); } void fx_DataView_prototype_setFloat32(txMachine* the) { fx_DataView_prototype_set(the, 4, fxNumberCoerce, fxFloat32Setter); } void fx_DataView_prototype_setFloat64(txMachine* the) { fx_DataView_prototype_set(the, 8, fxNumberCoerce, fxFloat64Setter); } void fx_DataView_prototype_setInt8(txMachine* the) { fx_DataView_prototype_set(the, 1, fxIntCoerce, fxInt8Setter); } void fx_DataView_prototype_setInt16(txMachine* the) { fx_DataView_prototype_set(the, 2, fxIntCoerce, fxInt16Setter); } void fx_DataView_prototype_setInt32(txMachine* the) { fx_DataView_prototype_set(the, 4, fxIntCoerce, fxInt32Setter); } void fx_DataView_prototype_setUint8(txMachine* the) { fx_DataView_prototype_set(the, 1, fxUintCoerce, fxUint8Setter); } void fx_DataView_prototype_setUint16(txMachine* the) { fx_DataView_prototype_set(the, 2, fxUintCoerce, fxUint16Setter); } void fx_DataView_prototype_setUint32(txMachine* the) { fx_DataView_prototype_set(the, 4, fxUintCoerce, fxUint32Setter); } #define mxTypedArrayDeclarations \ txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); \ txSlot* dispatch = instance->next; \ txSlot* view = dispatch->next; \ txSlot* buffer = view->next; \ txInteger length = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE) >> dispatch->value.typedArray.dispatch->shift #define mxMutableTypedArrayDeclarations \ txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); \ txSlot* dispatch = instance->next; \ txSlot* view = dispatch->next; \ txSlot* buffer = view->next; \ txInteger length = fxCheckDataViewSize(the, view, buffer, XS_MUTABLE) >> dispatch->value.typedArray.dispatch->shift #define mxResultTypedArrayDeclarations \ txSlot* resultInstance = fxCheckTypedArrayInstance(the, mxResult); \ txSlot* resultDispatch = resultInstance->next; \ txSlot* resultView = resultDispatch->next; \ txSlot* resultBuffer = resultView->next; \ txInteger resultLength = fxCheckDataViewSize(the, resultView, resultBuffer, XS_MUTABLE) >> resultDispatch->value.typedArray.dispatch->shift void fxTypedArrayGetter(txMachine* the) { txSlot* instance = fxToInstance(the, mxThis); txSlot* dispatch; while (instance) { if (instance->flag & XS_EXOTIC_FLAG) { dispatch = instance->next; if (dispatch->ID == XS_TYPED_ARRAY_BEHAVIOR) break; } instance = fxGetPrototype(the, instance); } if (instance) { txID id = the->scratch.value.at.id; txIndex index = the->scratch.value.at.index; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), mxResult, EndianNative); } } } void fxTypedArraySetter(txMachine* the) { txSlot* instance = fxToInstance(the, mxThis); txSlot* dispatch; while (instance) { if (instance->flag & XS_EXOTIC_FLAG) { dispatch = instance->next; if (dispatch->ID == XS_TYPED_ARRAY_BEHAVIOR) break; } instance = fxGetPrototype(the, instance); } if (instance) { txSlot* slot = mxArgv(0); txID id = the->scratch.value.at.id; txIndex index = the->scratch.value.at.index; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* arrayBuffer = buffer->value.reference->next; txIndex length; dispatch->value.typedArray.dispatch->coerce(the, slot); if (arrayBuffer->flag & XS_DONT_SET_FLAG) mxTypeError("read-only buffer"); length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->setter)(the, arrayBuffer, view->value.dataView.offset + (index << shift), slot, EndianNative); } } } txBoolean fxTypedArrayDefineOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot, txFlag mask) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* arrayBuffer = buffer->value.reference->next; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if (id || (index >= length)) return 0; if ((mask & XS_DONT_DELETE_FLAG) && (slot->flag & XS_DONT_DELETE_FLAG)) return 0; if ((mask & XS_DONT_ENUM_FLAG) && (slot->flag & XS_DONT_ENUM_FLAG)) return 0; if (mask & XS_ACCESSOR_FLAG) return 0; if ((mask & XS_DONT_SET_FLAG) && (slot->flag & XS_DONT_SET_FLAG)) return 0; if (slot->kind != XS_UNINITIALIZED_KIND) { dispatch->value.typedArray.dispatch->coerce(the, slot); if (arrayBuffer->flag & XS_DONT_SET_FLAG) mxTypeError("read-only buffer"); length = fxGetDataViewSize(the, view, buffer) >> shift; if (index < length) (*dispatch->value.typedArray.dispatch->setter)(the, arrayBuffer, view->value.dataView.offset + (index << shift), slot, EndianNative); } return 1; } return fxOrdinaryDefineOwnProperty(the, instance, id, index, slot, mask); } txBoolean fxTypedArrayDeleteProperty(txMachine* the, txSlot* instance, txID id, txIndex index) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; return ((!id) && (index < length)) ? 0 : 1; } return fxOrdinaryDeleteProperty(the, instance, id, index); } txBoolean fxTypedArrayGetOwnProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* slot) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), slot, EndianNative); return 1; } slot->kind = XS_UNDEFINED_KIND; slot->flag = XS_NO_FLAG; return 0; } return fxOrdinaryGetOwnProperty(the, instance, id, index, slot); } txSlot* fxTypedArrayGetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag) { if ((!id) || fxIsCanonicalIndex(the, id)) { the->scratch.value.at.id = id; the->scratch.value.at.index = index; return &mxTypedArrayAccessor; } return fxOrdinaryGetProperty(the, instance, id, index, flag); } txBoolean fxTypedArrayGetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* receiver, txSlot* value) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), value, EndianNative); return 1; } value->kind = XS_UNDEFINED_KIND; return 0; } return fxOrdinaryGetPropertyValue(the, instance, id, index, receiver, value); } txBoolean fxTypedArrayHasProperty(txMachine* the, txSlot* instance, txID id, txIndex index) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; return ((!id) && (index < length)) ? 1 : 0; } return fxOrdinaryHasProperty(the, instance, id, index); } void fxTypedArrayOwnKeys(txMachine* the, txSlot* instance, txFlag flag, txSlot* keys) { if (flag & XS_EACH_NAME_FLAG) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txIndex length = fxGetDataViewSize(the, view, buffer) >> shift; if (length) { txIndex index; for (index = 0; index < length; index++) keys = fxQueueKey(the, 0, index, keys); } } fxOrdinaryOwnKeys(the, instance, flag, keys); } txSlot* fxTypedArraySetProperty(txMachine* the, txSlot* instance, txID id, txIndex index, txFlag flag) { if ((!id) || fxIsCanonicalIndex(the, id)) { the->scratch.value.at.id = id; the->scratch.value.at.index = index; return &mxTypedArrayAccessor; } return fxOrdinarySetProperty(the, instance, id, index, flag); } txBoolean fxTypedArraySetPropertyValue(txMachine* the, txSlot* instance, txID id, txIndex index, txSlot* value, txSlot* receiver) { if ((!id) || fxIsCanonicalIndex(the, id)) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* arrayBuffer = buffer->value.reference->next; txIndex length; dispatch->value.typedArray.dispatch->coerce(the, value); if (arrayBuffer->flag & XS_DONT_SET_FLAG) mxTypeError("read-only buffer"); length = fxGetDataViewSize(the, view, buffer) >> shift; if ((!id) && (index < length)) { (*dispatch->value.typedArray.dispatch->setter)(the, buffer->value.reference->next, view->value.dataView.offset + (index << shift), value, EndianNative); } return 1; } return fxOrdinarySetPropertyValue(the, instance, id, index, value, receiver); } void fxCallTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index, txSlot* item) { /* THIS */ if (mxArgc > 1) mxPushSlot(mxArgv(1)); else mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(mxThis); mxGetIndex(index); if (item) { item->kind = the->stack->kind; item->value = the->stack->value; } mxPushInteger(index); mxPushSlot(mxThis); mxRunCount(3); } txSlot* fxCheckTypedArrayInstance(txMachine* the, txSlot* slot) { if (slot->kind == XS_REFERENCE_KIND) { txSlot* instance = slot->value.reference; if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_TYPED_ARRAY_KIND)) return instance; } mxTypeError("this is no TypedArray instance"); return C_NULL; } int fxCompareTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index) { txSlot* slot = the->stack; int result; /* THIS */ mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushUndefined(); (*dispatch->value.typedArray.dispatch->getter)(the, data, view->value.dataView.offset + (index << dispatch->value.typedArray.dispatch->shift), the->stack, EndianNative); mxPushSlot(slot); mxRunCount(2); if (the->stack->kind == XS_INTEGER_KIND) result = the->stack->value.integer; else { txNumber number = fxToNumber(the, the->stack); result = (number < 0) ? -1 : (number > 0) ? 1 : 0; } mxPop(); if (data->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); return result; } txSlot* fxConstructTypedArray(txMachine* the) { txSlot* prototype; txSlot* dispatch; txSlot* instance; if (mxIsUndefined(mxTarget)) mxTypeError("call: TypedArray"); dispatch = mxFunctionInstanceHome(mxFunction->value.reference); dispatch = dispatch->next; prototype = mxBehaviorGetProperty(the, mxFunction->value.reference, mxID(_prototype), 0, XS_ANY); if (!dispatch || (dispatch->kind != XS_TYPED_ARRAY_KIND)) mxTypeError("new: TypedArray"); mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, prototype); instance = fxNewTypedArrayInstance(the, dispatch->value.typedArray.dispatch, dispatch->value.typedArray.atomics); mxPullSlot(mxResult); return instance; } void fxCreateTypedArraySpecies(txMachine* the) { txSlot* instance = fxToInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* constructor = &the->stackPrototypes[-1 - (txInteger)dispatch->value.typedArray.dispatch->constructorID]; mxPushSlot(mxThis); mxGetID(mxID(_constructor)); fxToSpeciesConstructor(the, constructor); mxNew(); } txSlot* fxGetTypedArrayValue(txMachine* the, txSlot* instance, txInteger index) { txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* data = mxIsReference(buffer) ? fxCheckArrayBufferDetached(the, buffer, XS_IMMUTABLE) : C_NULL; txU2 shift = dispatch->value.typedArray.dispatch->shift; index <<= shift; if ((0 <= index) && ((index + (1 << shift)) <= view->value.dataView.size)) { (*dispatch->value.typedArray.dispatch->getter)(the, data, view->value.dataView.offset + index, &(the->scratch), EndianNative); return &the->scratch; } return C_NULL; } void fxReduceTypedArrayItem(txMachine* the, txSlot* function, txSlot* dispatch, txSlot* view, txSlot* data, txInteger index) { /* THIS */ mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(mxResult); mxPushSlot(mxThis); mxGetIndex(index); mxPushInteger(index); mxPushSlot(mxThis); mxRunCount(4); mxPullSlot(mxResult); } txSlot* fxNewTypedArrayInstance(txMachine* the, txTypeDispatch* dispatch, txTypeAtomics* atomics) { txSlot* instance; txSlot* property; instance = fxNewObjectInstance(the); instance->flag |= XS_EXOTIC_FLAG; property = fxNextTypeDispatchProperty(the, instance, dispatch, atomics, XS_TYPED_ARRAY_BEHAVIOR, XS_INTERNAL_FLAG); property = property->next = fxNewSlot(the); property->flag = XS_INTERNAL_FLAG; property->kind = XS_DATA_VIEW_KIND; property->value.dataView.offset = 0; property->value.dataView.size = 0; property = fxNextNullProperty(the, property, XS_NO_ID, XS_INTERNAL_FLAG); return instance; } void fx_TypedArray(txMachine* the) { txSlot* instance = fxConstructTypedArray(the); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* data = C_NULL; txU2 shift = dispatch->value.typedArray.dispatch->shift; txSlot* slot; if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { txInteger offset = fxArgToByteLength(the, 1, 0); txInteger size; txSlot* info; if (offset & ((1 << shift) - 1)) mxRangeError("invalid byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); info = fxGetBufferInfo(the, mxArgv(0)); if (size >= 0) { txInteger delta = size << shift; txInteger end = offset + delta; if ((info->value.bufferInfo.length < end) || (end < offset)) mxRangeError("out of range length %ld", size); size = delta; } else { if (info->value.bufferInfo.length & ((1 << shift) - 1)) mxRangeError("invalid byteLength %ld", info->value.bufferInfo.length); size = info->value.bufferInfo.length - offset; if (size < 0) mxRangeError("out of range byteLength %ld", size); if (info->value.bufferInfo.maxLength >= 0) size = -1; } view->value.dataView.offset = offset; view->value.dataView.size = size; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; } else if (slot && (slot->kind == XS_TYPED_ARRAY_KIND)) { txSlot* sourceDispatch = slot; txSlot* sourceView = sourceDispatch->next; txSlot* sourceBuffer = sourceView->next; txU2 sourceShift = sourceDispatch->value.typedArray.dispatch->shift; txInteger sourceLength = fxCheckDataViewSize(the, sourceView, sourceBuffer, XS_IMMUTABLE) >> sourceShift; txSlot* sourceData = sourceBuffer->value.reference->next; txInteger sourceDelta = sourceDispatch->value.typedArray.dispatch->size; txInteger sourceOffset = sourceView->value.dataView.offset; txInteger offset = 0; txInteger size = sourceLength << shift; /* THIS */ mxPushUninitialized(); /* FUNCTION */ mxPush(mxArrayBufferConstructor); /* TARGET */ if (sourceData->kind == XS_ARRAY_BUFFER_KIND) { mxPushSlot(sourceBuffer); mxGetID(mxID(_constructor)); fxToSpeciesConstructor(the, &mxArrayBufferConstructor); } else mxPush(mxArrayBufferConstructor); /* RESULT */ mxPushUndefined(); mxPushUninitialized(); mxPushUninitialized(); /* ARGUMENTS */ sourceLength = fxGetDataViewSize(the, sourceView, sourceBuffer) >> sourceShift; size = sourceLength << shift; mxPushInteger(size); mxRunCount(1); mxPullSlot(buffer); sourceLength = fxCheckDataViewSize(the, sourceView, sourceBuffer, XS_IMMUTABLE) >> sourceShift; size = sourceLength << shift; data = fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); view->value.dataView.offset = offset; view->value.dataView.size = size; if (dispatch == sourceDispatch) c_memcpy(data->value.arrayBuffer.address + offset, sourceData->value.arrayBuffer.address + sourceOffset, size); else { txBoolean contentType = (dispatch->value.typedArray.dispatch->constructorID == _BigInt64Array) || (dispatch->value.typedArray.dispatch->constructorID == _BigUint64Array); txBoolean sourceContentType = (sourceDispatch->value.typedArray.dispatch->constructorID == _BigInt64Array) || (sourceDispatch->value.typedArray.dispatch->constructorID == _BigUint64Array); if (contentType != sourceContentType) mxTypeError("incompatible content type"); mxPushUndefined(); while (offset < size) { (*sourceDispatch->value.typedArray.dispatch->getter)(the, sourceData, sourceOffset, the->stack, EndianNative); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*dispatch->value.typedArray.dispatch->setter)(the, data, offset, the->stack, EndianNative); sourceOffset += sourceDelta; offset += 1 << shift; } mxPop(); } } else { fx_TypedArray_from_object(the, instance, C_NULL, C_NULL); } } else { txInteger length = fxArgToByteLength(the, 0, 0); if (length & (((1 << shift) - 1) << (32 - shift))) mxRangeError("out of range byteLength"); length <<= shift; mxPush(mxArrayBufferConstructor); mxNew(); mxPushInteger(length); mxRunCount(1); mxPullSlot(buffer); view->value.dataView.offset = 0; view->value.dataView.size = length; } } void fx_TypedArray_from(txMachine* the) { txSlot* function = C_NULL; txSlot* _this = C_NULL; if (!mxIsReference(mxThis) || !(mxIsConstructor(mxThis->value.reference))) mxTypeError("this is no constructor"); if (mxArgc > 1) { txSlot* slot = mxArgv(1); if (!mxIsUndefined(slot)) { function = slot; if (!fxIsCallable(the, function)) mxTypeError("map is no function"); if (mxArgc > 2) _this = mxArgv(2); } } fx_TypedArray_from_object(the, C_NULL, function, _this); } void fx_TypedArray_from_object(txMachine* the, txSlot* instance, txSlot* function, txSlot* _this) { txSlot* stack = the->stack; txSlot* iterator; txSlot* next; txSlot* value; txSlot* list = C_NULL; txSlot* slot; txSlot* dispatch; txSlot* view; txSlot* buffer; txSlot* data; txU2 shift; txNumber length; mxTemporary(iterator); mxTemporary(next); if (fxGetIterator(the, mxArgv(0), iterator, next, 1)) { list = fxNewInstance(the); slot = list; length = 0; mxTemporary(value); while (fxIteratorNext(the, iterator, next, value)) { slot = fxNextSlotProperty(the, slot, value, XS_NO_ID, XS_NO_FLAG); length++; } } else { mxPushSlot(mxArgv(0)); mxGetID(mxID(_length)); length = fxToLength(the, the->stack); mxPop(); } if (instance) { dispatch = instance->next; view = dispatch->next; buffer = view->next; shift = dispatch->value.typedArray.dispatch->shift; mxPush(mxArrayBufferConstructor); mxNew(); mxPushNumber(length * dispatch->value.typedArray.dispatch->size); mxRunCount(1); mxPullSlot(buffer); data = fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); view->value.dataView.offset = 0; view->value.dataView.size = data->next->value.bufferInfo.length; } else { mxPushSlot(mxThis); mxNew(); mxPushNumber(length); mxRunCount(1); mxPullSlot(mxResult); instance = fxToInstance(the, mxResult); if (((slot = instance->next)) && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_TYPED_ARRAY_KIND)) { dispatch = instance->next; view = dispatch->next; buffer = view->next; data = fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); shift = dispatch->value.typedArray.dispatch->shift; if (view->value.dataView.size < (length * dispatch->value.typedArray.dispatch->size)) mxTypeError("too small TypedArray"); } else mxTypeError("no TypedArray"); } if (list) { txInteger index = 0; slot = list->next; while (slot) { /* ARG0 */ if (function) { /* THIS */ if (_this) mxPushSlot(_this); else mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(slot); mxPushInteger(index); mxRunCount(2); } else mxPushSlot(slot); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*dispatch->value.typedArray.dispatch->setter)(the, data, (index << shift), the->stack, EndianNative); mxPop(); index++; slot = slot->next; } } else { txInteger index = 0; txInteger count = (txInteger)length; while (index < count) { if (function) { /* THIS */ if (_this) mxPushSlot(_this); else mxPushUndefined(); /* FUNCTION */ mxPushSlot(function); mxCall(); /* ARGUMENTS */ mxPushSlot(mxArgv(0)); mxGetIndex(index); mxPushInteger(index); mxRunCount(2); } else { mxPushSlot(mxArgv(0)); mxGetIndex(index); } (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*dispatch->value.typedArray.dispatch->setter)(the, data, (index << shift), the->stack, EndianNative); mxPop(); index++; } } the->stack = stack; } void fx_TypedArray_of(txMachine* the) { txInteger count = mxArgc; txInteger index = 0; mxPushSlot(mxThis); mxNew(); mxPushInteger(count); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; txU2 shift = resultDispatch->value.typedArray.dispatch->shift; if (resultLength < count) mxTypeError("insufficient TypedArray"); while (index < count) { (*resultDispatch->value.typedArray.dispatch->coerce)(the, mxArgv(index)); if (resultBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << shift), mxArgv(index), EndianNative); index++; } } } void fx_TypedArray_prototype_at(txMachine* the) { mxTypedArrayDeclarations; txInteger index = (mxArgc > 0) ? fxToInteger(the, mxArgv(0)) : 0; if (index < 0) index = length + index; if ((0 <= index) && (index < length)) { mxPushSlot(mxThis); mxGetIndex(index); mxPullSlot(mxResult); } } void fx_TypedArray_prototype_buffer_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; mxResult->kind = buffer->kind; mxResult->value = buffer->value; } void fx_TypedArray_prototype_byteLength_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = fxGetDataViewSize(the, view, buffer); } void fx_TypedArray_prototype_byteOffset_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txInteger offset = view->value.dataView.offset; txInteger size = view->value.dataView.size; txSlot* arrayBuffer = buffer->value.reference->next; txSlot* bufferInfo = arrayBuffer->next; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = 0; if (arrayBuffer->value.arrayBuffer.address == C_NULL) return; if (bufferInfo->value.bufferInfo.maxLength >= 0) { txInteger byteLength = bufferInfo->value.bufferInfo.length; if (offset > byteLength) return; size = (size < 0) ? byteLength : offset + size; if (size > byteLength) return; size -= offset; } mxResult->value.integer = offset; } void fx_TypedArray_prototype_copyWithin(txMachine* the) { mxMutableTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger target = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger start = (txInteger)fxArgToIndex(the, 1, 0, length); txInteger end = (txInteger)fxArgToIndex(the, 2, length, length); txInteger count = end - start; fxCheckArrayBufferDetached(the, buffer, XS_MUTABLE); if (count > length - target) count = length - target; if (count > 0) { txByte* address = buffer->value.reference->next->value.arrayBuffer.address + view->value.dataView.offset; c_memmove(address + (target * delta), address + (start * delta), count * delta); mxMeterSome((txU4)count * 2); } mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_entries(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* property; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPush(mxArrayIteratorPrototype); property = fxLastProperty(the, fxNewIteratorInstance(the, mxThis, mxID(_Array))); property = fxNextIntegerProperty(the, property, 2, XS_NO_ID, XS_INTERNAL_FLAG); mxPullSlot(mxResult); } void fx_TypedArray_prototype_every(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = 1; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); mxResult->value.boolean = fxToBoolean(the, the->stack++); if (!mxResult->value.boolean) break; index++; } } void fx_TypedArray_prototype_fill(txMachine* the) { mxMutableTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger start = (txInteger)fxArgToIndex(the, 1, 0, length); txInteger end = (txInteger)fxArgToIndex(the, 2, length, length); start *= delta; end *= delta; start += view->value.dataView.offset; end += view->value.dataView.offset; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); fxCheckDataViewSize(the, view, buffer, XS_MUTABLE); while (start < end) { (*dispatch->value.typedArray.dispatch->setter)(the, buffer->value.reference->next, start, the->stack, EndianNative); start += delta; } mxPop(); mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_filter(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txSlot* list = fxNewInstance(the); txSlot* slot = list; txInteger count = 0; txInteger index = 0; mxPushUndefined(); while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, the->stack); if (fxToBoolean(the, the->stack++)) { count++; slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); } index++; } mxPop(); fxCreateTypedArraySpecies(the); mxPushNumber(count); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; txInteger resultOffset = 0; txInteger resultSize = resultDispatch->value.typedArray.dispatch->size; if (resultLength < count) mxTypeError("insufficient buffer"); slot = list->next; while (slot) { (*resultDispatch->value.typedArray.dispatch->coerce)(the, slot); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultOffset, slot, EndianNative); resultOffset += resultSize; slot = slot->next; } } mxPop(); } void fx_TypedArray_prototype_find(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxPushUndefined(); while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, the->stack); if (fxToBoolean(the, the->stack++)) { mxResult->kind = the->stack->kind; mxResult->value = the->stack->value; break; } index++; } mxPop(); } void fx_TypedArray_prototype_findIndex(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = -1; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); if (fxToBoolean(the, the->stack++)) { mxResult->value.integer = index; break; } index++; } } void fx_TypedArray_prototype_findLast(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = length; mxPushUndefined(); while (index > 0) { index--; fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, the->stack); if (fxToBoolean(the, the->stack++)) { mxResult->kind = the->stack->kind; mxResult->value = the->stack->value; break; } } mxPop(); } void fx_TypedArray_prototype_findLastIndex(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = length; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = -1; while (index > 0) { index--; fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); if (fxToBoolean(the, the->stack++)) { mxResult->value.integer = index; break; } } } void fx_TypedArray_prototype_forEach(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); mxPop(); index++; } } void fx_TypedArray_prototype_includes(txMachine* the) { mxTypedArrayDeclarations; fxBoolean(the, mxResult, 0); if (length) { txInteger index = (txInteger)fxArgToIndex(the, 1, 0, length); txSlot* argument; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); argument = the->stack; while (index < length) { mxPushSlot(mxThis); mxGetIndex(index); if (fxIsSameValue(the, the->stack++, argument, 1)) { mxResult->value.boolean = 1; break; } index++; } mxPop(); } } void fx_TypedArray_prototype_indexOf(txMachine* the) { mxTypedArrayDeclarations; fxInteger(the, mxResult, -1); if (length) { txInteger index = (txInteger)fxArgToIndex(the, 1, 0, length); txSlot* argument; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); argument = the->stack; while (index < length) { mxPushSlot(mxThis); if (fxHasIndex(the, index)) { mxPushSlot(mxThis); mxGetIndex(index); if (fxIsSameSlot(the, the->stack++, argument)) { mxResult->value.integer = index; break; } } index++; } mxPop(); } } void fx_TypedArray_prototype_join(txMachine* the) { mxTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger offset = view->value.dataView.offset; txInteger limit = offset + (length << dispatch->value.typedArray.dispatch->shift); txString string; txSlot* list = fxNewInstance(the); txSlot* slot = list; txBoolean comma = 0; txInteger size = 0; if ((mxArgc > 0) && (mxArgv(0)->kind != XS_UNDEFINED_KIND)) { mxPushSlot(mxArgv(0)); string = fxToString(the, the->stack); the->stack->kind += XS_KEY_KIND - XS_STRING_KIND; the->stack->value.key.sum = mxStringLength(the->stack->value.string); } else { mxPushStringX(","); the->stack->kind += XS_KEY_KIND - XS_STRING_KIND; the->stack->value.key.sum = 1; } length = offset + fxGetDataViewSize(the, view, buffer); while (offset < limit) { if (comma) { slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); size = fxAddChunkSizes(the, size, slot->value.key.sum); } else comma = 1; if (offset < length) { mxPushUndefined(); (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, offset, the->stack, EndianNative); slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); string = fxToString(the, slot); slot->kind += XS_KEY_KIND - XS_STRING_KIND; slot->value.key.sum = mxStringLength(string); size = fxAddChunkSizes(the, size, slot->value.key.sum); mxPop(); } offset += delta; } mxPop(); string = mxResult->value.string = fxNewChunk(the, fxAddChunkSizes(the, size, 1)); slot = list->next; while (slot) { c_memcpy(string, slot->value.key.string, slot->value.key.sum); string += slot->value.key.sum; slot = slot->next; } *string = 0; mxResult->kind = XS_STRING_KIND; mxPop(); } void fx_TypedArray_prototype_keys(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* property; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPush(mxArrayIteratorPrototype); property = fxLastProperty(the, fxNewIteratorInstance(the, mxThis, mxID(_Array))); property = fxNextIntegerProperty(the, property, 1, XS_NO_ID, XS_INTERNAL_FLAG); mxPullSlot(mxResult); } void fx_TypedArray_prototype_lastIndexOf(txMachine* the) { mxTypedArrayDeclarations; fxInteger(the, mxResult, -1); if (length) { txIndex index = (txIndex)fxArgToLastIndex(the, 1, length, length); txSlot* argument; if (mxArgc > 0) mxPushSlot(mxArgv(0)); else mxPushUndefined(); argument = the->stack; while (index > 0) { index--; mxPushSlot(mxThis); if (fxHasIndex(the, index)) { mxPushSlot(mxThis); mxGetIndex(index); if (fxIsSameSlot(the, the->stack++, argument)) { mxResult->value.integer = index; break; } } } mxPop(); } } void fx_TypedArray_prototype_length_get(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; mxResult->kind = XS_INTEGER_KIND; mxResult->value.integer = fxGetDataViewSize(the, view, buffer) >> shift; } void fx_TypedArray_prototype_map(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); fxCreateTypedArraySpecies(the); mxPushNumber(length); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; txU2 shift = resultDispatch->value.typedArray.dispatch->shift; txInteger index = 0; if (resultLength < length) mxTypeError("insufficient buffer"); while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); if (resultBuffer->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*resultDispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << shift), the->stack, EndianNative); mxPop(); index++; } } } void fx_TypedArray_prototype_reduce(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; if (mxArgc > 1) *mxResult = *mxArgv(1); else if (index < length) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset, mxResult, EndianNative); index++; } else mxTypeError("no initial value"); while (index < length) { fxReduceTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index); index++; } } void fx_TypedArray_prototype_reduceRight(txMachine* the) { mxTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txSlot* function = fxArgToCallback(the, 0); txInteger index = length - 1; if (mxArgc > 1) *mxResult = *mxArgv(1); else if (index >= 0) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (index * delta), mxResult, EndianNative); index--; } else mxTypeError("no initial value"); while (index >= 0) { fxReduceTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index); index--; } } void fx_TypedArray_prototype_reverse(txMachine* the) { mxMutableTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; if (length) { txByte tmp; txByte* first = buffer->value.reference->next->value.arrayBuffer.address + view->value.dataView.offset; txByte* last = first + (length << dispatch->value.typedArray.dispatch->shift) - delta; txInteger offset; while (first < last) { for (offset = 0; offset < delta; offset++) { tmp = last[offset]; last[offset] = first[offset]; first[offset] = tmp; } first += delta; last -= delta; } mxMeterSome(length * 4); } mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_set(txMachine* the) { mxMutableTypedArrayDeclarations; txSlot* data = buffer->value.reference->next; txInteger delta = dispatch->value.typedArray.dispatch->size; txSlot* source = fxArgToInstance(the, 0); txInteger target = fxArgToByteLength(the, 1, 0); txInteger offset = view->value.dataView.offset + (target * delta); if (source->next && (source->next->kind == XS_TYPED_ARRAY_KIND)) { txSlot* sourceDispatch = source->next; txSlot* sourceView = sourceDispatch->next; txSlot* sourceBuffer = sourceView->next; txU2 shift = sourceDispatch->value.typedArray.dispatch->shift; txInteger sourceLength = fxCheckDataViewSize(the, sourceView, sourceBuffer, XS_IMMUTABLE) >> shift; txInteger sourceOffset = sourceView->value.dataView.offset; txSlot* sourceData = sourceBuffer->value.reference->next; txInteger limit = offset + (sourceLength * delta); if ((target < 0) || (length - sourceLength < target)) mxRangeError("invalid offset"); if (data == sourceData) { txSlot* resultBuffer; mxPush(mxArrayBufferConstructor); mxNew(); mxPushInteger(sourceLength << shift); mxRunCount(1); resultBuffer = the->stack->value.reference->next; c_memcpy(resultBuffer->value.arrayBuffer.address, sourceData->value.arrayBuffer.address + sourceOffset, sourceLength << shift); sourceData = resultBuffer; sourceOffset = 0; } else mxPushUndefined(); if (dispatch == sourceDispatch) { c_memcpy(data->value.arrayBuffer.address + offset, sourceData->value.arrayBuffer.address + sourceOffset, limit - offset); mxMeterSome(((txU4)(limit - offset)) * 2); } else { txInteger sourceDelta = 1 << shift; mxPushUndefined(); while (offset < limit) { (*sourceDispatch->value.typedArray.dispatch->getter)(the, sourceData, sourceOffset, the->stack, EndianNative); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); if (data->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*dispatch->value.typedArray.dispatch->setter)(the, data, offset, the->stack, EndianNative); sourceOffset += sourceDelta; offset += delta; } mxPop(); } mxPop(); } else { txInteger count, index; mxPushSlot(mxArgv(0)); mxGetID(mxID(_length)); count = fxToInteger(the, the->stack); mxPop(); if ((target < 0) || (length - count < target)) mxRangeError("invalid offset"); index = 0; while (index < count) { mxPushSlot(mxArgv(0)); mxGetIndex(index); (*dispatch->value.typedArray.dispatch->coerce)(the, the->stack); if (data->value.arrayBuffer.address == C_NULL) mxTypeError("detached buffer"); (*dispatch->value.typedArray.dispatch->setter)(the, data, offset, the->stack, EndianNative); mxPop(); offset += delta; index++; } } } void fx_TypedArray_prototype_slice(txMachine* the) { mxTypedArrayDeclarations; txInteger delta = dispatch->value.typedArray.dispatch->size; txInteger start = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger end = (txInteger)fxArgToIndex(the, 1, length, length); txInteger count = (end > start) ? end - start : 0; txInteger index = 0; fxCreateTypedArraySpecies(the); mxPushNumber(count); mxRunCount(1); mxPullSlot(mxResult); { mxResultTypedArrayDeclarations; if (resultLength < count) mxTypeError("insufficient buffer"); if (count) { length = fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPushUndefined(); while ((start < length) && (start < end)) { (*dispatch->value.typedArray.dispatch->getter)(the, buffer->value.reference->next, view->value.dataView.offset + (start * delta), the->stack, EndianNative); (*resultDispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << resultDispatch->value.typedArray.dispatch->shift), the->stack, EndianNative); start++; index++; } while (start < end) { the->stack->kind = XS_UNDEFINED_KIND; (*resultDispatch->value.typedArray.dispatch->coerce)(the, the->stack); (*resultDispatch->value.typedArray.dispatch->setter)(the, resultBuffer->value.reference->next, resultView->value.dataView.offset + (index << resultDispatch->value.typedArray.dispatch->shift), the->stack, EndianNative); start++; index++; } mxPop(); } } } void fx_TypedArray_prototype_some(txMachine* the) { mxTypedArrayDeclarations; txSlot* function = fxArgToCallback(the, 0); txInteger index = 0; mxResult->kind = XS_BOOLEAN_KIND; mxResult->value.boolean = 0; while (index < length) { fxCallTypedArrayItem(the, function, dispatch, view, buffer->value.reference->next, index, C_NULL); mxResult->value.boolean = fxToBoolean(the, the->stack++); if (mxResult->value.boolean) break; index++; } } void fx_TypedArray_prototype_sort(txMachine* the) { mxMutableTypedArrayDeclarations; txSlot* data = buffer->value.reference->next; txInteger delta = dispatch->value.typedArray.dispatch->size; txSlot* function = C_NULL; if (mxArgc > 0) { txSlot* slot = mxArgv(0); if (slot->kind != XS_UNDEFINED_KIND) { if (fxIsCallable(the, slot)) function = slot; else mxTypeError("compare is no function"); } } if (function) { /* like GCC qsort */ #define COMPARE(INDEX) \ fxCompareTypedArrayItem(the, function, dispatch, view, data, INDEX) #define MOVE(FROM,TO) \ from = data->value.arrayBuffer.address + view->value.dataView.offset + ((FROM) * delta); \ to = data->value.arrayBuffer.address + view->value.dataView.offset + ((TO) * delta); \ for (k = 0; k < delta; k++) *to++ = *from++ #define PUSH(INDEX) \ mxPushUndefined(); \ (*dispatch->value.typedArray.dispatch->getter)(the, data, view->value.dataView.offset + ((INDEX) * delta), the->stack, EndianNative) #define PULL(INDEX) \ (*dispatch->value.typedArray.dispatch->setter)(the, data, view->value.dataView.offset + ((INDEX) * delta), the->stack++, EndianNative) if (length > 0) { txInteger i, j, k; txByte* from; txByte* to; if (length > mxSortThreshold) { txInteger lo = 0, hi = length - 1; txSortPartition stack[mxSortPartitionCount]; txSortPartition *top = stack + 1; while (stack < top) { txIndex mid = lo + ((hi - lo) >> 1); PUSH(mid); if (COMPARE(lo) > 0) { MOVE(lo, mid); PULL(lo); PUSH(mid); } if (COMPARE(hi) < 0) { MOVE(hi, mid); PULL(hi); PUSH(mid); if (COMPARE(lo) > 0) { MOVE(lo, mid); PULL(lo); PUSH(mid); } } i = lo + 1; j = hi - 1; do { while ((COMPARE(i) < 0) && (i <= j)) i++; while ((COMPARE(j) > 0) && (i <= j)) j--; if (i < j) { PUSH(i); MOVE(j, i); PULL(j); i++; j--; } else if (i == j) { i++; j--; break; } } while (i <= j); if ((j - lo) <= mxSortThreshold) { if ((hi - i) <= mxSortThreshold) { top--; lo = top->lo; hi = top->hi; } else { lo = i; } } else if ((hi - i) <= mxSortThreshold) { hi = j; } else if ((j - lo) > (hi - i)) { top->lo = lo; top->hi = j; top++; lo = i; } else { top->lo = i; top->hi = hi; top++; hi = j; } mxPop(); } } for (i = 1; i < length; i++) { PUSH(i); for (j = i; (j > 0) && (COMPARE(j - 1) > 0); j--) { MOVE(j - 1, j); } PULL(j); } } } else c_qsort(data->value.arrayBuffer.address, length, delta, dispatch->value.typedArray.dispatch->compare); mxResult->kind = mxThis->kind; mxResult->value = mxThis->value; } void fx_TypedArray_prototype_subarray(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txU2 shift = dispatch->value.typedArray.dispatch->shift; txInteger length = fxGetDataViewSize(the, view, buffer) >> shift; txInteger start = (txInteger)fxArgToIndex(the, 0, 0, length); txInteger stop = (txInteger)fxArgToIndex(the, 1, length, length); if (stop < start) stop = start; fxCreateTypedArraySpecies(the); mxPushSlot(buffer); mxPushInteger(view->value.dataView.offset + (start << shift)); mxPushInteger(stop - start); mxRunCount(3); mxPullSlot(mxResult); fxCheckTypedArrayInstance(the, mxResult); } void fx_TypedArray_prototype_toLocaleString(txMachine* the) { mxTypedArrayDeclarations; txInteger index = 0; txString string; txSlot* list = fxNewInstance(the); txSlot* slot = list; txBoolean comma = 0; txInteger size = 0; mxPushStringX(","); the->stack->kind += XS_KEY_KIND - XS_STRING_KIND; the->stack->value.key.sum = 1; while (index < length) { if (comma) { slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); size += slot->value.key.sum; } else comma = 1; mxPushSlot(mxThis); mxGetIndex(index); if ((the->stack->kind != XS_UNDEFINED_KIND) && (the->stack->kind != XS_NULL_KIND)) { mxDub(); mxGetID(mxID(_toLocaleString)); mxCall(); mxRunCount(0); slot = fxNextSlotProperty(the, slot, the->stack, XS_NO_ID, XS_NO_FLAG); string = fxToString(the, slot); slot->kind += XS_KEY_KIND - XS_STRING_KIND; slot->value.key.sum = mxStringLength(string); size = fxAddChunkSizes(the, size, slot->value.key.sum); } mxPop(); index++; } string = mxResult->value.string = fxNewChunk(the, fxAddChunkSizes(the, size, 1)); slot = list->next; while (slot) { c_memcpy(string, slot->value.key.string, slot->value.key.sum); string += slot->value.key.sum; slot = slot->next; } *string = 0; mxResult->kind = XS_STRING_KIND; mxPop(); } void fx_TypedArray_prototype_toStringTag_get(txMachine* the) { if (mxThis->kind == XS_REFERENCE_KIND) { txSlot* instance = mxThis->value.reference; txSlot* slot = instance->next; if (slot && (slot->flag & XS_INTERNAL_FLAG) && (slot->kind == XS_TYPED_ARRAY_KIND)) { txTypeDispatch *dispatch = instance->next->value.typedArray.dispatch; txSlot* key = fxGetKey(the, mxID(dispatch->constructorID)); if (key->kind == XS_KEY_X_KIND) mxResult->kind = XS_STRING_X_KIND; else mxResult->kind = XS_STRING_KIND; mxResult->value.string = key->value.key.string; } } } void fx_TypedArray_prototype_values(txMachine* the) { txSlot* instance = fxCheckTypedArrayInstance(the, mxThis); txSlot* dispatch = instance->next; txSlot* view = dispatch->next; txSlot* buffer = view->next; txSlot* property; fxCheckDataViewSize(the, view, buffer, XS_IMMUTABLE); mxPush(mxArrayIteratorPrototype); property = fxLastProperty(the, fxNewIteratorInstance(the, mxThis, mxID(_Array))); property = fxNextIntegerProperty(the, property, 0, XS_NO_ID, XS_INTERNAL_FLAG); mxPullSlot(mxResult); } #if mxBigEndian #define mxEndianDouble_BtoN(a) (a) #define mxEndianFloat_BtoN(a) (a) #define mxEndianS64_BtoN(a) (a) #define mxEndianU64_BtoN(a) (a) #define mxEndianS32_BtoN(a) (a) #define mxEndianU32_BtoN(a) (a) #define mxEndianS16_BtoN(a) (a) #define mxEndianU16_BtoN(a) (a) #define mxEndianDouble_NtoB(a) (a) #define mxEndianFloat_NtoB(a) (a) #define mxEndianS64_NtoB(a) (a) #define mxEndianU64_NtoB(a) (a) #define mxEndianS32_NtoB(a) (a) #define mxEndianU32_NtoB(a) (a) #define mxEndianS16_NtoB(a) (a) #define mxEndianU16_NtoB(a) (a) #else #define mxEndianDouble_LtoN(a) (a) #define mxEndianFloat_LtoN(a) (a) #define mxEndianS64_LtoN(a) (a) #define mxEndianU64_LtoN(a) (a) #define mxEndianS32_LtoN(a) (a) #define mxEndianU32_LtoN(a) (a) #define mxEndianS16_LtoN(a) (a) #define mxEndianU16_LtoN(a) (a) #define mxEndianDouble_NtoL(a) (a) #define mxEndianFloat_NtoL(a) (a) #define mxEndianS64_NtoL(a) (a) #define mxEndianU64_NtoL(a) (a) #define mxEndianS32_NtoL(a) (a) #define mxEndianU32_NtoL(a) (a) #define mxEndianS16_NtoL(a) (a) #define mxEndianU16_NtoL(a) (a) #endif #if mxLittleEndian #define mxEndianDouble_BtoN(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_BtoN(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_BtoN(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_BtoN(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_BtoN(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_BtoN(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_BtoN(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_BtoN(a) ((txU2) mxEndian16_Swap(a)) #define mxEndianDouble_NtoB(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_NtoB(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_NtoB(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_NtoB(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_NtoB(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_NtoB(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_NtoB(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_NtoB(a) ((txU2) mxEndian16_Swap(a)) #else #define mxEndianDouble_LtoN(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_LtoN(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_LtoN(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_LtoN(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_LtoN(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_LtoN(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_LtoN(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_LtoN(a) ((txU2) mxEndian16_Swap(a)) #define mxEndianDouble_NtoL(a) (mxEndianDouble_Swap(a)) #define mxEndianFloat_NtoL(a) (mxEndianFloat_Swap(a)) #define mxEndianS64_NtoL(a) ((txS8) mxEndian64_Swap(a)) #define mxEndianU64_NtoL(a) ((txU8) mxEndian64_Swap(a)) #define mxEndianS32_NtoL(a) ((txS4) mxEndian32_Swap(a)) #define mxEndianU32_NtoL(a) ((txU4) mxEndian32_Swap(a)) #define mxEndianS16_NtoL(a) ((txS2) mxEndian16_Swap(a)) #define mxEndianU16_NtoL(a) ((txU2) mxEndian16_Swap(a)) #endif #if defined(__GNUC__) || defined(__llvm__) #define mxEndian16_Swap(a) __builtin_bswap16(a) #else static txU2 mxEndian16_Swap(txU2 a) { txU2 b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 2; i++) p2[i] = p1[1 - i]; return b; } #endif #if defined(__GNUC__) || defined(__llvm__) #define mxEndian32_Swap(a) __builtin_bswap32(a) #else static txU4 mxEndian32_Swap(txU4 a) { txU4 b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 4; i++) p2[i] = p1[3 - i]; return b; } #endif #if defined(__GNUC__) || defined(__llvm__) #define mxEndian64_Swap(a) __builtin_bswap64(a) #else static txU8 mxEndian64_Swap(txU8 a) { txU4 b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 8; i++) p2[i] = p1[7 - i]; return b; } #endif static float mxEndianFloat_Swap(float a) { #if defined(__GNUC__) || defined(__llvm__) uint32_t result = __builtin_bswap32(*(uint32_t *)&a); return *(float *)&result; #else float b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 4; i++) p2[i] = p1[3 - i]; return b; #endif } static double mxEndianDouble_Swap(double a) { #if defined(__GNUC__) || defined(__llvm__) uint64_t result = __builtin_bswap64(*(uint64_t *)&a); return *(double *)&result; #else double b; txU1 *p1 = (txU1 *) &a, *p2 = (txU1 *) &b; int i; for (i = 0; i < 8; i++) p2[i] = p1[7 - i]; return b; #endif } #define toNative(size, endian) mxEndian##size##_##endian##toN #define fromNative(size, endian) mxEndian##size##_Nto##endian #define IMPORT(size) (endian == EndianBig ? toNative(size, B)(value) : endian == EndianLittle ? toNative(size, L)(value) : (value)) #define EXPORT(size) (endian == EndianBig ? fromNative(size, B)(value) : endian == EndianLittle ? toNative(size, L)(value) : (value)) int fxBigInt64Compare(const void* p, const void* q) { txS8 a = *((txS8*)p); txS8 b = *((txS8*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxBigInt64Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS8 value; #ifdef mxMisalignedSettersCrash value = c_read32(data->value.arrayBuffer.address + offset); #else value = *((txS8*)(data->value.arrayBuffer.address + offset)); #endif value = IMPORT(S64); fxFromBigInt64(the, slot, value); mxMeterOne(); } void fxBigInt64Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS8 value = (txS8)fxToBigInt64(the, slot); #ifdef mxMisalignedSettersCrash value = EXPORT(S64); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS8)); #else *((txS8*)(data->value.arrayBuffer.address + offset)) = EXPORT(S64); #endif mxMeterOne(); } int fxBigUint64Compare(const void* p, const void* q) { txU8 a = *((txU8*)p); txU8 b = *((txU8*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxBigUint64Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU8 value; #ifdef mxMisalignedSettersCrash value = c_read32(data->value.arrayBuffer.address + offset); #else value = *((txU8*)(data->value.arrayBuffer.address + offset)); #endif value = IMPORT(U64); fxFromBigUint64(the, slot, value); mxMeterOne(); } void fxBigUint64Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU8 value = (txU8)fxToBigUint64(the, slot); #ifdef mxMisalignedSettersCrash value = EXPORT(U64); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txU8)); #else *((txU8*)(data->value.arrayBuffer.address + offset)) = EXPORT(U64); #endif mxMeterOne(); } int fxFloat32Compare(const void* p, const void* q) { float a = *((float*)p); float b = *((float*)q); if (c_isnan(a)) { if (c_isnan(b)) return 0; return 1; } if (c_isnan(b)) return -1; if (a < b) return -1; if (a > b) return 1; if (a == 0) { if (c_signbit(a)) { if (c_signbit(b)) return 0; return -1; } if (c_signbit(b)) return 1; } return 0; } void fxFloat32Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { float value; slot->kind = XS_NUMBER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((float*)(data->value.arrayBuffer.address + offset)); #endif slot->value.number = IMPORT(Float); mxMeterOne(); } void fxFloat32Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { float value = (float)slot->value.number; #ifdef mxMisalignedSettersCrash value = EXPORT(Float); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(float)); #else *((float*)(data->value.arrayBuffer.address + offset)) = EXPORT(Float); #endif mxMeterOne(); } int fxFloat64Compare(const void* p, const void* q) { double a = *((double*)p); double b = *((double*)q); if (c_isnan(a)) { if (c_isnan(b)) return 0; return 1; } if (c_isnan(b)) return -1; if (a < b) return -1; if (a > b) return 1; if (a == 0) { if (c_signbit(a)) { if (c_signbit(b)) return 0; return -1; } if (c_signbit(b)) return 1; } return 0; } void fxFloat64Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { double value; slot->kind = XS_NUMBER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((double*)(data->value.arrayBuffer.address + offset)); #endif slot->value.number = IMPORT(Double); mxMeterOne(); } void fxFloat64Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { double value = slot->value.number; #ifdef mxMisalignedSettersCrash value = EXPORT(Double); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(double)); #else *((double*)(data->value.arrayBuffer.address + offset)) = EXPORT(Double); #endif mxMeterOne(); } void fxIntCoerce(txMachine* the, txSlot* slot) { fxToInteger(the, slot); } void fxUintCoerce(txMachine* the, txSlot* slot) { fxToUnsigned(the, slot); } int fxInt8Compare(const void* p, const void* q) { txS1 a = *((txS1*)p); txS1 b = *((txS1*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxInt8Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { slot->kind = XS_INTEGER_KIND; slot->value.integer = *((txS1*)(data->value.arrayBuffer.address + offset)); mxMeterOne(); } void fxInt8Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { *((txS1*)(data->value.arrayBuffer.address + offset)) = (txS1)slot->value.integer; mxMeterOne(); } int fxInt16Compare(const void* p, const void* q) { txS2 a = *((txS2*)p); txS2 b = *((txS2*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxInt16Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS2 value; slot->kind = XS_INTEGER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((txS2*)(data->value.arrayBuffer.address + offset)); #endif slot->value.integer = IMPORT(S16); mxMeterOne(); } void fxInt16Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS2 value = (txS2)slot->value.integer; #ifdef mxMisalignedSettersCrash value = EXPORT(S16); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS2)); #else *((txS2*)(data->value.arrayBuffer.address + offset)) = EXPORT(S16); #endif mxMeterOne(); } int fxInt32Compare(const void* p, const void* q) { txS4 a = *((txS4*)p); txS4 b = *((txS4*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxInt32Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS4 value; slot->kind = XS_INTEGER_KIND; #ifdef mxMisalignedSettersCrash value = c_read32(data->value.arrayBuffer.address + offset); #else value = *((txS4*)(data->value.arrayBuffer.address + offset)); #endif slot->value.integer = IMPORT(S32); mxMeterOne(); } void fxInt32Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txS4 value = (txS4)slot->value.integer; #ifdef mxMisalignedSettersCrash value = EXPORT(S32); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txS4)); #else *((txS4*)(data->value.arrayBuffer.address + offset)) = EXPORT(S32); #endif mxMeterOne(); } int fxUint8Compare(const void* p, const void* q) { txU1 a = c_read8((txU1*)p); txU1 b = c_read8((txU1*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxUint8Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { slot->kind = XS_INTEGER_KIND; slot->value.integer = c_read8((txU1*)(data->value.arrayBuffer.address + offset)); mxMeterOne(); } void fxUint8Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txUnsigned tmp = (slot->kind == XS_INTEGER_KIND) ? (txUnsigned)slot->value.integer : (txUnsigned)slot->value.number; *((txU1*)(data->value.arrayBuffer.address + offset)) = (txU1)tmp; mxMeterOne(); } int fxUint16Compare(const void* p, const void* q) { txU2 a = *((txU2*)p); txU2 b = *((txU2*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxUint16Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU2 value; slot->kind = XS_INTEGER_KIND; #ifdef mxMisalignedSettersCrash c_memcpy(&value, data->value.arrayBuffer.address + offset, sizeof(value)); #else value = *((txU2*)(data->value.arrayBuffer.address + offset)); #endif slot->value.integer = IMPORT(U16); mxMeterOne(); } void fxUint16Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txUnsigned tmp = (slot->kind == XS_INTEGER_KIND) ? (txUnsigned)slot->value.integer : (txUnsigned)slot->value.number; txU2 value = (txU2)tmp; #ifdef mxMisalignedSettersCrash value = EXPORT(U16); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txU2)); #else *((txU2*)(data->value.arrayBuffer.address + offset)) = EXPORT(U16); #endif mxMeterOne(); } int fxUint32Compare(const void* p, const void* q) { txU4 a = *((txU4*)p); txU4 b = *((txU4*)q); return (a < b) ? -1 : (a > b) ? 1 : 0; } void fxUint32Getter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { #ifdef mxMisalignedSettersCrash txUnsigned value = c_read32(data->value.arrayBuffer.address + offset); #else txUnsigned value = *((txU4*)(data->value.arrayBuffer.address + offset)); #endif value = IMPORT(U32); if (((txInteger)value) >= 0) { slot->kind = XS_INTEGER_KIND; slot->value.integer = value; } else { slot->kind = XS_NUMBER_KIND; slot->value.number = value; } mxMeterOne(); } void fxUint32Setter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txU4 value = (slot->kind == XS_INTEGER_KIND) ? (txU4)slot->value.integer : (txU4)slot->value.number; #ifdef mxMisalignedSettersCrash value = EXPORT(U32); c_memcpy(data->value.arrayBuffer.address + offset, &value, sizeof(txU4)); #else *((txU4*)(data->value.arrayBuffer.address + offset)) = EXPORT(U32); #endif mxMeterOne(); } void fxUint8ClampedSetter(txMachine* the, txSlot* data, txInteger offset, txSlot* slot, int endian) { txNumber value = fxToNumber(the, slot); if (value <= 0) value = 0; else if (value >= 255) value = 255; else if (c_isnan(value)) value = 0; else value = c_nearbyint(value); *((txU1*)(data->value.arrayBuffer.address + offset)) = (txU1)value; mxMeterOne(); }
void fx_DataView(txMachine* the) { txSlot* slot; txBoolean flag = 0; txInteger offset, size; txSlot* info; txSlot* instance; txSlot* view; txSlot* buffer; if (mxIsUndefined(mxTarget)) mxTypeError("call: DataView"); if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { flag = 1; } } if (!flag) mxTypeError("buffer is no ArrayBuffer instance"); offset = fxArgToByteLength(the, 1, 0); info = fxGetBufferInfo(the, mxArgv(0)); if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); if (size >= 0) { if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } else { if (info->value.bufferInfo.maxLength < 0) size = info->value.bufferInfo.length - offset; } mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxDataViewPrototype); instance = fxNewDataViewInstance(the); mxPullSlot(mxResult); view = instance->next; buffer = view->next; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; info = fxGetBufferInfo(the, buffer); if (info->value.bufferInfo.maxLength >= 0) { if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); else if (size >= 0) { if (info->value.bufferInfo.length < (offset + size)) mxRangeError("out of range byteLength %ld", size); } } view->value.dataView.offset = offset; view->value.dataView.size = size; }
void fx_DataView(txMachine* the) { txSlot* slot; txBoolean flag = 0; txInteger offset, size; txSlot* info; txSlot* instance; txSlot* view; txSlot* buffer; if (mxIsUndefined(mxTarget)) mxTypeError("call: DataView"); if ((mxArgc > 0) && (mxArgv(0)->kind == XS_REFERENCE_KIND)) { slot = mxArgv(0)->value.reference->next; if (slot && ((slot->kind == XS_ARRAY_BUFFER_KIND) || (slot->kind == XS_HOST_KIND))) { flag = 1; } } if (!flag) mxTypeError("buffer is no ArrayBuffer instance"); offset = fxArgToByteLength(the, 1, 0); info = fxGetBufferInfo(the, mxArgv(0)); if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); size = fxArgToByteLength(the, 2, -1); if (size >= 0) { txInteger end = offset + size; if ((info->value.bufferInfo.length < end) || (end < offset)) mxRangeError("out of range byteLength %ld", size); } else { if (info->value.bufferInfo.maxLength < 0) size = info->value.bufferInfo.length - offset; } mxPushSlot(mxTarget); fxGetPrototypeFromConstructor(the, &mxDataViewPrototype); instance = fxNewDataViewInstance(the); mxPullSlot(mxResult); view = instance->next; buffer = view->next; buffer->kind = XS_REFERENCE_KIND; buffer->value.reference = mxArgv(0)->value.reference; info = fxGetBufferInfo(the, buffer); if (info->value.bufferInfo.maxLength >= 0) { if (info->value.bufferInfo.length < offset) mxRangeError("out of range byteOffset %ld", offset); else if (size >= 0) { txInteger end = offset + size; if ((info->value.bufferInfo.length < end) || (end < offset)) mxRangeError("out of range byteLength %ld", size); } } view->value.dataView.offset = offset; view->value.dataView.size = size; }
{'added': [(776, '\t\ttxInteger end = offset + size;'), (777, '\t\tif ((info->value.bufferInfo.length < end) || (end < offset))'), (797, '\t\t\ttxInteger end = offset + size;'), (798, '\t\t\tif ((info->value.bufferInfo.length < end) || (end < offset))'), (1365, '\t\t\t\ttxInteger delta = size << shift;'), (1366, '\t\t\t\ttxInteger end = offset + delta;'), (1367, '\t\t\t\tif ((info->value.bufferInfo.length < end) || (end < offset))'), (1368, '\t\t\t\t\tmxRangeError("out of range length %ld", size);'), (1369, '\t\t\t\tsize = delta;')], 'deleted': [(776, '\t\tif (info->value.bufferInfo.length < (offset + size))'), (796, '\t\t\tif (info->value.bufferInfo.length < (offset + size))'), (1363, '\t\t\t\tsize <<= shift;'), (1364, '\t\t\t\tif (info->value.bufferInfo.length < (offset + size))'), (1365, '\t\t\t\t\tmxRangeError("out of range byteLength %ld", size);')]}
9
5
2,594
21,236
52
386
16
https://github.com/Moddable-OpenSource/moddable
CVE-2022-29368
CWE-125
2,811
alloc.c
C
xmalloc
/* * alloc.c -- Useful allocation function/defintions * * Copyright (C)1999-2006 Mark Simpson <damned@world.std.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, you can either send email to this * program's maintainer or write to: The Free Software Foundation, * Inc.; 59 Temple Place, Suite 330; Boston, MA 02111-1307, USA. * */ #ifdef HAVE_CONFIG_H # include "config.h" #endif /* HAVE_CONFIG_H */ #include "common.h" #include "alloc.h" static size_t alloc_limit = 0; void set_alloc_limit (size_t size) { alloc_limit = size; } size_t get_alloc_limit() { return alloc_limit; } static void alloc_limit_failure (char *fn_name, size_t size) { fprintf (stderr, "%s: Maximum allocation size exceeded " "(maxsize = %lu; size = %lu).\n", fn_name, (unsigned long)alloc_limit, (unsigned long)size); } void alloc_limit_assert (char *fn_name, size_t size) { if (alloc_limit && size > alloc_limit) { alloc_limit_failure (fn_name, size); exit (-1); } } /* attempts to malloc memory, if fails print error and call abort */ void* xmalloc (size_t size) { void *ptr = malloc (size); if (!ptr && (size != 0)) /* some libc don't like size == 0 */ { perror ("xmalloc: Memory allocation failure"); abort(); } return ptr; } /* Allocates memory but only up to a limit */ void* checked_xmalloc (size_t size) { alloc_limit_assert ("checked_xmalloc", size); return xmalloc (size); } /* xmallocs memory and clears it out */ void* xcalloc (size_t num, size_t size) { void *ptr = malloc(num * size); if (ptr) { memset (ptr, '\0', (num * size)); } return ptr; } /* xcallocs memory but only up to a limit */ void* checked_xcalloc (size_t num, size_t size) { alloc_limit_assert ("checked_xcalloc", (num *size)); return xcalloc (num, size); }
/* * alloc.c -- Useful allocation function/defintions * * Copyright (C)1999-2006 Mark Simpson <damned@world.std.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, you can either send email to this * program's maintainer or write to: The Free Software Foundation, * Inc.; 59 Temple Place, Suite 330; Boston, MA 02111-1307, USA. * */ #ifdef HAVE_CONFIG_H # include "config.h" #endif /* HAVE_CONFIG_H */ #include "common.h" #include "alloc.h" static size_t alloc_limit = 0; void set_alloc_limit (size_t size) { alloc_limit = size; } size_t get_alloc_limit() { return alloc_limit; } size_t check_mul_overflow(size_t a, size_t b, size_t* res) { size_t tmp = a * b; if (a != 0 && tmp / a != b) return 1; *res = tmp; return 0; } static void alloc_limit_failure (char *fn_name, size_t size) { fprintf (stderr, "%s: Maximum allocation size exceeded " "(maxsize = %lu; size = %lu).\n", fn_name, (unsigned long)alloc_limit, (unsigned long)size); } void alloc_limit_assert (char *fn_name, size_t size) { if (alloc_limit && size > alloc_limit) { alloc_limit_failure (fn_name, size); exit (-1); } } /* attempts to malloc memory, if fails print error and call abort */ void* xmalloc (size_t num, size_t size) { size_t res; if (check_mul_overflow(num, size, &res)) abort(); void *ptr = malloc (res); if (!ptr && (size != 0)) /* some libc don't like size == 0 */ { perror ("xmalloc: Memory allocation failure"); abort(); } return ptr; } /* Allocates memory but only up to a limit */ void* checked_xmalloc (size_t num, size_t size) { size_t res; if (check_mul_overflow(num, size, &res)) abort(); alloc_limit_assert ("checked_xmalloc", res); return xmalloc (num, size); } /* xmallocs memory and clears it out */ void* xcalloc (size_t num, size_t size) { size_t res; if (check_mul_overflow(num, size, &res)) abort(); void *ptr; ptr = malloc(res); if (ptr) { memset (ptr, '\0', (res)); } return ptr; } /* xcallocs memory but only up to a limit */ void* checked_xcalloc (size_t num, size_t size) { size_t res; if (check_mul_overflow(num, size, &res)) abort(); alloc_limit_assert ("checked_xcalloc", (res)); return xcalloc (num, size); }
xmalloc (size_t size) { void *ptr = malloc (size); if (!ptr && (size != 0)) /* some libc don't like size == 0 */ { perror ("xmalloc: Memory allocation failure"); abort(); } return ptr; }
xmalloc (size_t num, size_t size) { size_t res; if (check_mul_overflow(num, size, &res)) abort(); void *ptr = malloc (res); if (!ptr && (size != 0)) /* some libc don't like size == 0 */ { perror ("xmalloc: Memory allocation failure"); abort(); } return ptr; }
{'added': [(43, 'size_t'), (44, 'check_mul_overflow(size_t a, size_t b, size_t* res)'), (45, '{'), (46, ' size_t tmp = a * b;'), (47, ' if (a != 0 && tmp / a != b) return 1;'), (48, ' *res = tmp;'), (49, ' return 0;'), (50, '}'), (51, ''), (55, ' fprintf (stderr,'), (59, ' (unsigned long)alloc_limit,'), (68, ' alloc_limit_failure (fn_name, size);'), (69, ' exit (-1);'), (75, 'xmalloc (size_t num, size_t size)'), (77, ' size_t res;'), (78, ' if (check_mul_overflow(num, size, &res))'), (79, ' abort();'), (80, ''), (81, ' void *ptr = malloc (res);'), (82, ' if (!ptr'), (93, 'checked_xmalloc (size_t num, size_t size)'), (95, ' size_t res;'), (96, ' if (check_mul_overflow(num, size, &res))'), (97, ' abort();'), (98, ''), (99, ' alloc_limit_assert ("checked_xmalloc", res);'), (100, ' return xmalloc (num, size);'), (107, ' size_t res;'), (108, ' if (check_mul_overflow(num, size, &res))'), (109, ' abort();'), (110, ''), (111, ' void *ptr;'), (112, ' ptr = malloc(res);'), (115, " memset (ptr, '\\0', (res));"), (124, ' size_t res;'), (125, ' if (check_mul_overflow(num, size, &res))'), (126, ' abort();'), (127, ''), (128, ' alloc_limit_assert ("checked_xcalloc", (res));')], 'deleted': [(46, ' fprintf (stderr,'), (50, ' (unsigned long)alloc_limit,'), (59, '\talloc_limit_failure (fn_name, size);'), (60, '\texit (-1);'), (66, 'xmalloc (size_t size)'), (68, ' void *ptr = malloc (size);'), (69, ' if (!ptr'), (80, 'checked_xmalloc (size_t size)'), (82, ' alloc_limit_assert ("checked_xmalloc", size);'), (83, ' return xmalloc (size);'), (90, ' void *ptr = malloc(num * size);'), (93, " memset (ptr, '\\0', (num * size));"), (102, ' alloc_limit_assert ("checked_xcalloc", (num *size));'), (105, ''), (106, ''), (107, '')]}
39
16
88
369
11
41
3
https://github.com/verdammelt/tnef
CVE-2017-6308
CWE-190
1,577
host_calls.cc
C++
enc_untrusted_read
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "asylo/platform/host_call/trusted/host_calls.h" #include <errno.h> #include <ifaddrs.h> #include <net/if.h> #include <netdb.h> #include <signal.h> #include <sys/statfs.h> #include <algorithm> #include "asylo/platform/host_call/exit_handler_constants.h" #include "asylo/platform/host_call/serializer_functions.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/platform/system_call/type_conversions/types_functions.h" using ::asylo::host_call::NonSystemCallDispatcher; using ::asylo::primitives::Extent; using ::asylo::primitives::MessageReader; using ::asylo::primitives::MessageWriter; using ::asylo::primitives::TrustedPrimitives; void CheckStatusAndParamCount(const asylo::primitives::PrimitiveStatus &status, const MessageReader &output, const char *name, int expected_params, bool match_exact_params) { if (!status.ok()) { std::string message = absl::StrCat("Host call '", name, "' failed."); TrustedPrimitives::BestEffortAbort(message.c_str()); } if (!match_exact_params) { if (output.size() < expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected at least ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } else { if (output.size() != expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } } namespace { // A global passwd struct. The address of it is used as the return value of // getpwuid. struct passwd global_passwd; size_t CalculateTotalMessageSize(const struct msghdr *msg) { size_t total_message_size = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { total_message_size += msg->msg_iov[i].iov_len; } return total_message_size; } #define PASSWD_HOLDER_FIELD_LENGTH 1024 // Struct for storing the buffers needed by struct passwd members. struct passwd_holder { char pw_name[PASSWD_HOLDER_FIELD_LENGTH]; char pw_passwd[PASSWD_HOLDER_FIELD_LENGTH]; uid_t pw_uid; gid_t pw_gid; char pw_gecos[PASSWD_HOLDER_FIELD_LENGTH]; char pw_dir[PASSWD_HOLDER_FIELD_LENGTH]; char pw_shell[PASSWD_HOLDER_FIELD_LENGTH]; }; bool DeserializePasswd(MessageReader *reader, struct passwd_holder *passwd_buffers) { if (!reader || !passwd_buffers) { return false; } if (reader->size() < 7) { return false; } auto pw_name_buf = reader->next(); auto pw_passwd_buf = reader->next(); auto pw_uid = reader->next<uid_t>(); auto pw_gid = reader->next<gid_t>(); auto pw_gecos_buf = reader->next(); auto pw_dir_buf = reader->next(); auto pw_shell_buf = reader->next(); strncpy(passwd_buffers->pw_name, pw_name_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_name), pw_name_buf.size())); strncpy(passwd_buffers->pw_passwd, pw_passwd_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_passwd), pw_passwd_buf.size())); passwd_buffers->pw_uid = pw_uid; passwd_buffers->pw_gid = pw_gid; strncpy(passwd_buffers->pw_gecos, pw_gecos_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_gecos), pw_gecos_buf.size())); strncpy(passwd_buffers->pw_dir, pw_dir_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_dir), pw_dir_buf.size())); strncpy(passwd_buffers->pw_shell, pw_shell_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_shell), pw_shell_buf.size())); return true; } bool PasswdHolderToPasswd(struct passwd_holder *passwd_in, struct passwd *passwd_out) { if (!passwd_in || !passwd_out) { return false; } passwd_out->pw_name = passwd_in->pw_name; passwd_out->pw_passwd = passwd_in->pw_passwd; passwd_out->pw_uid = passwd_in->pw_uid; passwd_out->pw_gid = passwd_in->pw_gid; passwd_out->pw_gecos = passwd_in->pw_gecos; passwd_out->pw_dir = passwd_in->pw_dir; passwd_out->pw_shell = passwd_in->pw_shell; return true; } } // namespace extern "C" { int enc_untrusted_access(const char *path_name, int mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_access, path_name, mode); } pid_t enc_untrusted_getpid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getpid); } pid_t enc_untrusted_getppid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getppid); } pid_t enc_untrusted_setsid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setsid); } uid_t enc_untrusted_getuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getuid); } gid_t enc_untrusted_getgid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getgid); } uid_t enc_untrusted_geteuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_geteuid); } gid_t enc_untrusted_getegid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getegid); } int enc_untrusted_kill(pid_t pid, int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_kill, pid, klinux_sig); } int enc_untrusted_link(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_link, oldpath, newpath); } off_t enc_untrusted_lseek(int fd, off_t offset, int whence) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lseek, fd, offset, whence); } int enc_untrusted_mkdir(const char *pathname, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_mkdir, pathname, mode); } int enc_untrusted_open(const char *pathname, int flags, ...) { int mode = 0; if (flags & O_CREAT) { va_list ap; va_start(ap, flags); mode = va_arg(ap, mode_t); va_end(ap); } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_open, pathname, TokLinuxFileStatusFlag(flags), TokLinuxFileModeFlag(mode)); } int enc_untrusted_unlink(const char *pathname) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_unlink, pathname); } int enc_untrusted_rename(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rename, oldpath, newpath); } ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); } ssize_t enc_untrusted_write(int fd, const void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_write, fd, buf, count)); } int enc_untrusted_symlink(const char *target, const char *linkpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_symlink, target, linkpath); } ssize_t enc_untrusted_readlink(const char *pathname, char *buf, size_t bufsiz) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_readlink, pathname, buf, bufsiz)); } int enc_untrusted_truncate(const char *path, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_truncate, path, length); } int enc_untrusted_ftruncate(int fd, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ftruncate, fd, length); } int enc_untrusted_rmdir(const char *path) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rmdir, path); } int enc_untrusted_pipe2(int pipefd[2], int flags) { if (flags & ~(O_CLOEXEC | O_DIRECT | O_NONBLOCK)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_pipe2, pipefd, TokLinuxFileStatusFlag(flags)); } int enc_untrusted_socket(int domain, int type, int protocol) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_socket, TokLinuxAfFamily(domain), TokLinuxSocketType(type), protocol); } int enc_untrusted_listen(int sockfd, int backlog) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listen, sockfd, backlog); } int enc_untrusted_shutdown(int sockfd, int how) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_shutdown, sockfd, how); } ssize_t enc_untrusted_send(int sockfd, const void *buf, size_t len, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_sendto, sockfd, buf, len, flags, /*dest_addr=*/nullptr, /*addrlen=*/0); } int enc_untrusted_fcntl(int fd, int cmd, ... /* arg */) { // We do not currently support file locks in Asylo, so arg is not expected to // be a pointer to struct flock. int64_t arg = 0; va_list ap; va_start(ap, cmd); arg = va_arg(ap, int64_t); va_end(ap); int klinux_cmd = TokLinuxFcntlCommand(cmd); if (klinux_cmd == -1) { errno = EINVAL; return -1; } int intarg = arg; switch (cmd) { case F_SETFL: { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFileStatusFlag(intarg)); } case F_SETFD: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFDFlag(intarg)); } case F_GETFL: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFileStatusFlag(retval); } return retval; } case F_GETFD: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFDFlag(retval); } return retval; } case F_GETPIPE_SZ: case F_SETPIPE_SZ: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); } // We do not handle the case for F_DUPFD. It is expected to be handled at // a higher abstraction, as we need not exit the enclave for duplicating // the file descriptor. default: { errno = EINVAL; return -1; } } } int enc_untrusted_chown(const char *pathname, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chown, pathname, owner, group); } int enc_untrusted_fchown(int fd, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchown, fd, owner, group); } int enc_untrusted_setsockopt(int sockfd, int level, int optname, const void *optval, socklen_t optlen) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setsockopt, sockfd, level, TokLinuxOptionName(level, optname), optval, optlen); } int enc_untrusted_flock(int fd, int operation) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_flock, fd, TokLinuxFLockOperation(operation)); } int enc_untrusted_wait(int *wstatus) { int klinux_wstatus; pid_t ret = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*wpid=*/-1, &klinux_wstatus, /*options=*/0, /*rusage=*/nullptr); *wstatus = FromkLinuxToNewlibWstatus(klinux_wstatus); return ret; } int enc_untrusted_inotify_init1(int flags) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_init1, TokLinuxInotifyFlag(flags)); } int enc_untrusted_inotify_add_watch(int fd, const char *pathname, uint32_t mask) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_add_watch, fd, pathname, TokLinuxInotifyEventMask(mask)); } int enc_untrusted_inotify_rm_watch(int fd, int wd) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_rm_watch, fd, wd); } mode_t enc_untrusted_umask(mode_t mask) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_umask, mask); } int enc_untrusted_chmod(const char *path_name, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chmod, path_name, mode); } int enc_untrusted_fchmod(int fd, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchmod, fd, mode); } int enc_untrusted_sched_yield() { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_yield); } int enc_untrusted_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) { klinux_cpu_set_t klinux_mask{}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_getaffinity, pid, static_cast<uint64_t>(cpusetsize), &klinux_mask); // On success, the raw getaffinity syscall returns the size of the cpumask_t // data type, To mimic the glibc behavior, we return 0 on success and -1 on // failure. See https://linux.die.net/man/2/sched_getaffinity, under "notes". if (result < 0) { return -1; } if (!FromkLinuxCpuSet(&klinux_mask, mask)) { errno = EFAULT; return -1; } return 0; } int enc_untrusted_pread64(int fd, void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pread64, fd, buf, count, offset); } int enc_untrusted_pwrite64(int fd, const void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pwrite64, fd, buf, count, offset); } int enc_untrusted_isatty(int fd) { MessageWriter input; input.Push(fd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIsAttyHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_isatty", 2); int result = output.next<int>(); // isatty() returns 1 if fd is an open file descriptor referring to a // terminal; otherwise 0 is returned, and errno is set to indicate the error. if (result == 0) { int klinux_errno = output.next<int>(); errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_usleep(useconds_t usec) { MessageWriter input; input.Push(usec); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kUSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_usleep", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); // usleep() returns 0 on success. On error, -1 is returned, with errno set to // indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_fstat(int fd, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstat, fd, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_fstatfs(int fd, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstatfs, fd, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } int enc_untrusted_lstat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_lstat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_stat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_stat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_statfs(const char *pathname, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_statfs, pathname, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } ssize_t enc_untrusted_getxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getxattr, path, name, value, size); } ssize_t enc_untrusted_lgetxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lgetxattr, path, name, value, size); } ssize_t enc_untrusted_fgetxattr(int fd, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fgetxattr, fd, name, value, size); } int enc_untrusted_setxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setxattr, path, name, value, size, flags); } int enc_untrusted_lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lsetxattr, path, name, value, size, flags); } int enc_untrusted_fsetxattr(int fd, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsetxattr, fd, name, value, size, flags); } ssize_t enc_untrusted_listxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listxattr, path, list, size); } ssize_t enc_untrusted_llistxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_llistxattr, path, list, size); } ssize_t enc_untrusted_flistxattr(int fd, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_flistxattr, fd, list, size); } int64_t enc_untrusted_sysconf(int name) { int kLinux_name = TokLinuxSysconfConstant(name); if (kLinux_name == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push(kLinux_name); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kSysconfHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sysconf", 2); int64_t result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_close(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_close, fd); } void *enc_untrusted_realloc(void *ptr, size_t size) { MessageWriter input; input.Push(reinterpret_cast<uint64_t>(ptr)); input.Push(static_cast<uint64_t>(size)); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kReallocHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_realloc", 2); void *result = output.next<void *>(); int klinux_errno = output.next<int>(); // realloc only sets the errno (ENOMEM) when output pointer is null and a // non-zero |size| is provided. if (!result && size != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } if (!::asylo::primitives::TrustedPrimitives::IsOutsideEnclave(result, size)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_realloc: realloc result should be in untrusted " "memory"); } return result; } uint32_t enc_untrusted_sleep(uint32_t seconds) { MessageWriter input; input.Push<uint32_t>(seconds); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher(asylo::host_call::kSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sleep", 2); // Returns sleep's return value directly since it doesn't set errno. return output.next<uint32_t>(); } int enc_untrusted_nanosleep(const struct timespec *req, struct timespec *rem) { struct kLinux_timespec klinux_req; if (!TokLinuxtimespec(req, &klinux_req)) { errno = EINVAL; return -1; } struct kLinux_timespec klinux_rem; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_nanosleep, &klinux_req, &klinux_rem); FromkLinuxtimespec(&klinux_rem, rem); return result; } int enc_untrusted_clock_gettime(clockid_t clk_id, struct timespec *tp) { clockid_t klinux_clk_id = TokLinuxClockId(clk_id); if (klinux_clk_id == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int64_t>(klinux_clk_id); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kClockGettimeHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_clock_gettime", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); struct kLinux_timespec klinux_tp = output.next<struct kLinux_timespec>(); // clock_gettime returns -1 on error and sets the errno. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } FromkLinuxtimespec(&klinux_tp, tp); return result; } int enc_untrusted_clock_getcpuclockid(pid_t pid, clockid_t *clock_id) { MessageWriter input; input.Push<uint32_t>(pid); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kGetCpuClockIdHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getcpuclockid", 2); // clock_getcpuclockid returns an errno value directly, without setting errno. // The value must still be translated in order to be interpreted. int klinux_errno_result = output.next<int32_t>(); if (klinux_errno_result != 0) { return FromkLinuxErrorNumber(klinux_errno_result); } clockid_t klinux_clk_id = output.next<uint64_t>(); *clock_id = FromkLinuxClockId(klinux_clk_id); return 0; } int enc_untrusted_bind(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_bind, sockfd, klinux_sock.get(), klinux_sock_len); } int enc_untrusted_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_connect, sockfd, klinux_sock.get(), klinux_sock_len); } ssize_t enc_untrusted_sendmsg(int sockfd, const struct msghdr *msg, int flags) { size_t total_message_size = CalculateTotalMessageSize(msg); std::unique_ptr<char[]> msg_iov_buffer(new char[total_message_size]); size_t copied_bytes = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { memcpy(msg_iov_buffer.get() + copied_bytes, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); copied_bytes += msg->msg_iov[i].iov_len; } MessageWriter input; input.Push(sockfd); input.PushByReference(Extent{msg->msg_name, msg->msg_namelen}); input.PushByReference(Extent{msg_iov_buffer.get(), total_message_size}); input.PushByReference(Extent{msg->msg_control, msg->msg_controllen}); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSendMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sendmsg", 2); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // sendmsg() returns the number of characters sent. On error, -1 is returned, // with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } ssize_t enc_untrusted_recvmsg(int sockfd, struct msghdr *msg, int flags) { size_t total_buffer_size = CalculateTotalMessageSize(msg); MessageWriter input; input.Push(sockfd); input.Push<uint64_t>(msg->msg_namelen); input.Push<uint64_t>(total_buffer_size); input.Push<uint64_t>(msg->msg_controllen); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvmsg", 2, /*match_exact_params=*/false); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // recvmsg() returns the number of characters received. On error, -1 is // returned, with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > total_buffer_size) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvmsg: result exceeds requested"); } auto msg_name_extent = output.next(); // The returned |msg_namelen| should not exceed the buffer size. if (msg_name_extent.size() <= msg->msg_namelen) { msg->msg_namelen = msg_name_extent.size(); } memcpy(msg->msg_name, msg_name_extent.As<char>(), msg->msg_namelen); // A single buffer is passed from the untrusted side, copy it into the // scattered buffers inside the enclave. auto msg_iov_extent = output.next(); size_t total_bytes = msg_iov_extent.size(); size_t bytes_copied = 0; for (int i = 0; i < msg->msg_iovlen && bytes_copied < total_bytes; ++i) { size_t bytes_to_copy = std::min(msg->msg_iov[i].iov_len, total_bytes - bytes_copied); memcpy(msg->msg_iov[i].iov_base, msg_iov_extent.As<char>() + bytes_copied, bytes_to_copy); bytes_copied += bytes_to_copy; } auto msg_control_extent = output.next(); // The returned |msg_controllen| should not exceed the buffer size. if (msg_control_extent.size() <= msg->msg_controllen) { msg->msg_controllen = msg_control_extent.size(); } memcpy(msg->msg_control, msg_control_extent.As<char>(), msg->msg_controllen); return result; } int enc_untrusted_getsockname(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSocknameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockname", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getsockname() returns 0 on success. On error, -1 is returned, with errno // set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); if (!FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kAcceptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_accept", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // accept() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } int enc_untrusted_getpeername(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPeernameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpeername", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getpeername() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } ssize_t enc_untrusted_recvfrom(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { int klinux_flags = TokLinuxRecvSendFlag(flags); if (klinux_flags == 0 && flags != 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<uint64_t>(len); input.Push<int>(klinux_flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvFromHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvfrom", 4); int result = output.next<int>(); int klinux_errno = output.next<int>(); // recvfrom() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > len) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvfrom: result exceeds requested"); } auto buffer_received = output.next(); memcpy(buf, buffer_received.data(), std::min(len, buffer_received.size())); // If |src_addr| is not NULL, and the underlying protocol provides the source // address, this source address is filled in. When |src_addr| is NULL, nothing // is filled in; in this case, |addrlen| is not used, and should also be NULL. if (src_addr != nullptr && addrlen != nullptr) { auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), src_addr, addrlen, TrustedPrimitives::BestEffortAbort); } return result; } int enc_untrusted_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { struct klinux_fd_set klinux_readfds, klinux_writefds, klinux_exceptfds; struct kLinux_timeval klinux_timeout; TokLinuxFdSet(readfds, &klinux_readfds); TokLinuxFdSet(writefds, &klinux_writefds); TokLinuxFdSet(exceptfds, &klinux_exceptfds); TokLinuxtimeval(timeout, &klinux_timeout); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_select, nfds, &klinux_readfds, &klinux_writefds, &klinux_exceptfds, &klinux_timeout); FromkLinuxFdSet(&klinux_readfds, readfds); FromkLinuxFdSet(&klinux_writefds, writefds); FromkLinuxFdSet(&klinux_exceptfds, exceptfds); return result; } int enc_untrusted_gettimeofday(struct timeval *tv, struct timezone *tz) { struct kLinux_timeval ktv; TokLinuxtimeval(tv, &ktv); // We do not convert timezone to a klinux value since this struct is expected // to be identical across enclave boundary. Besides, the use of the timezone // structure is obsolete; the tz argument should normally be specified as // NULL. int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_gettimeofday, &ktv, tz); FromkLinuxtimeval(&ktv, tv); return result; } int enc_untrusted_fsync(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsync, fd); } int enc_untrusted_raise(int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_sig); MessageReader output; const auto status = NonSystemCallDispatcher(::asylo::host_call::kRaiseHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_raise", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen) { if (!optval || !optlen || *optlen == 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<int>(level); input.Push<int>(TokLinuxOptionName(level, optname)); input.PushByReference(Extent{reinterpret_cast<char *>(optval), *optlen}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSockOptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockopt", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); Extent opt_received = output.next(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } // The returned |optlen| should not exceed the buffer size. if (opt_received.size() <= *optlen) { *optlen = opt_received.size(); } memcpy(optval, opt_received.data(), *optlen); return result; } int enc_untrusted_getitimer(int which, struct itimerval *curr_value) { struct klinux_itimerval klinux_curr_value {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getitimer, TokLinuxItimerType(which), &klinux_curr_value); if (!curr_value || !FromkLinuxItimerval(&klinux_curr_value, curr_value)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_setitimer(int which, const struct itimerval *new_value, struct itimerval *old_value) { struct klinux_itimerval klinux_new_value {}; struct klinux_itimerval klinux_old_value {}; if (!TokLinuxItimerval(new_value, &klinux_new_value)) { errno = EFAULT; return -1; } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setitimer, TokLinuxItimerType(which), &klinux_new_value, &klinux_old_value); if (old_value != nullptr && !FromkLinuxItimerval(&klinux_old_value, old_value)) { errno = EFAULT; return -1; } return result; } clock_t enc_untrusted_times(struct tms *buf) { struct kLinux_tms klinux_buf {}; int64_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_times, &klinux_buf); if (!FromkLinuxtms(&klinux_buf, buf)) { errno = EFAULT; return -1; } return static_cast<clock_t>(result); } int enc_untrusted_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { MessageWriter input; input.PushByReference(Extent{node, (node != nullptr) ? strlen(node) + 1 : 0}); input.PushByReference( Extent{service, (service != nullptr) ? strlen(service) + 1 : 0}); if (hints != nullptr) { input.Push<int>(TokLinuxAddressInfoFlag(hints->ai_flags)); input.Push<int>(TokLinuxAfFamily(hints->ai_family)); input.Push<int>(TokLinuxSocketType(hints->ai_socktype)); input.Push<int>(hints->ai_protocol); } MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetAddrInfoHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getaddrinfo", 3, /*match_exact_params=*/false); int klinux_ret = output.next<int>(); int klinux_errno = output.next<int>(); int ret = FromkLinuxAddressInfoError(klinux_ret); if (ret != 0) { if (ret == EAI_SYSTEM) { errno = FromkLinuxErrorNumber(klinux_errno); } return ret; } if (!asylo::host_call::DeserializeAddrinfo( &output, res, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getaddrinfo: Invalid addrinfo in response."); return -1; } return 0; } void enc_freeaddrinfo(struct addrinfo *res) { struct addrinfo *prev_info = nullptr; for (struct addrinfo *info = res; info != nullptr; info = info->ai_next) { if (prev_info) free(prev_info); if (info->ai_addr) free(info->ai_addr); if (info->ai_canonname) free(info->ai_canonname); prev_info = info; } if (prev_info) free(prev_info); } int enc_untrusted_poll(struct pollfd *fds, nfds_t nfds, int timeout) { auto klinux_fds = absl::make_unique<struct klinux_pollfd[]>(nfds); for (int i = 0; i < nfds; ++i) { if (!TokLinuxPollfd(&fds[i], &klinux_fds[i])) { errno = EFAULT; return -1; } } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_poll, klinux_fds.get(), static_cast<uint64_t>(nfds), timeout); if (result < 0) { return result; } for (int i = 0; i < nfds; ++i) { if (!FromkLinuxPollfd(&klinux_fds[i], &fds[i])) { errno = EFAULT; return -1; } } return result; } int enc_untrusted_epoll_create(int size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_create, size); } int enc_untrusted_utimes(const char *filename, const struct timeval times[2]) { struct kLinux_timeval klinux_times[2]; if (!TokLinuxtimeval(&times[0], &klinux_times[0]) || !TokLinuxtimeval(&times[1], &klinux_times[1])) { errno = EBADE; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utimes, filename, klinux_times); } int enc_untrusted_utime(const char *filename, const struct utimbuf *times) { struct kLinux_utimbuf klinux_times {}; // We do not check the return value of the conversion function since utimbuf // is allowed to be null. TokLinuxutimbuf(times, &klinux_times); return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utime, filename, &klinux_times); } int enc_untrusted_inet_pton(int af, const char *src, void *dst) { if (!src || !dst) { return 0; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{ src, std::min(strlen(src) + 1, static_cast<size_t>(INET6_ADDRSTRLEN))}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetPtonHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_pton", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } auto klinux_addr_buffer = output.next(); size_t max_size = 0; if (af == AF_INET) { if (klinux_addr_buffer.size() != sizeof(klinux_in_addr)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_inet_pton: unexpected output size"); } max_size = sizeof(struct in_addr); } else if (af == AF_INET6) { if (klinux_addr_buffer.size() != sizeof(klinux_in6_addr)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_inet_pton: unexpected output size"); } max_size = sizeof(struct in6_addr); } memcpy(dst, klinux_addr_buffer.data(), std::min(klinux_addr_buffer.size(), max_size)); return result; } const char *enc_untrusted_inet_ntop(int af, const void *src, char *dst, socklen_t size) { if (!src || !dst) { errno = EFAULT; return nullptr; } size_t src_size = 0; if (af == AF_INET) { src_size = sizeof(struct in_addr); } else if (af == AF_INET6) { src_size = sizeof(struct in6_addr); } else { errno = EAFNOSUPPORT; return nullptr; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{reinterpret_cast<const char *>(src), src_size}); input.Push(size); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetNtopHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_ntop", 2); auto result = output.next(); int klinux_errno = output.next<int>(); if (result.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } memcpy( dst, result.data(), std::min({static_cast<size_t>(size), static_cast<size_t>(result.size()), static_cast<size_t>(INET6_ADDRSTRLEN)})); return dst; } int enc_untrusted_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) { klinux_sigset_t klinux_set; if (!TokLinuxSigset(set, &klinux_set)) { errno = EINVAL; return -1; } int klinux_how = TokLinuxSigMaskAction(how); if (klinux_how == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_how); input.Push<klinux_sigset_t>(klinux_set); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSigprocmaskHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sigprocmask", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // sigprocmask() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } klinux_sigset_t klinux_oldset = output.next<klinux_sigset_t>(); if (oldset != nullptr) { if (!FromkLinuxSigset(&klinux_oldset, oldset)) { errno = EINVAL; return -1; } } return result; } unsigned int enc_untrusted_if_nametoindex(const char *ifname) { MessageWriter input; input.PushString(ifname); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfNameToIndexHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_nametoindex", 2); auto result = output.next<unsigned int>(); int klinux_errno = output.next<int>(); if (result == 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } char *enc_untrusted_if_indextoname(unsigned int ifindex, char *ifname) { if (!ifname) { return nullptr; } MessageWriter input; input.Push(ifindex); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfIndexToNameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_indextoname", 2); Extent ifname_buffer = output.next(); memcpy(ifname, ifname_buffer.As<char>(), std::min(ifname_buffer.size(), static_cast<size_t>(IF_NAMESIZE))); int klinux_errno = output.next<int>(); if (ifname_buffer.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); } return ifname; } int enc_untrusted_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) { struct klinux_epoll_event klinux_event_tmp {}; if (event != nullptr && !TokLinuxEpollEvent(event, &klinux_event_tmp)) { errno = EINVAL; return -1; } int klinux_op = TokLinuxEpollCtlOp(op); if (klinux_op == 0) { errno = EINVAL; return -1; } struct klinux_epoll_event *klinux_event = (event != nullptr) ? &klinux_event_tmp : nullptr; return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_epoll_ctl, epfd, klinux_op, fd, klinux_event); } int enc_untrusted_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout) { if (maxevents <= 0) { errno = EINVAL; return -1; } auto klinux_events = absl::make_unique<struct klinux_epoll_event[]>(maxevents); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_wait, epfd, klinux_events.get(), maxevents, timeout); // Only process epoll events if syscall was successful. if (result == -1) { // errno is already set by the system_call library at this point for a // return value of -1. return result; } if (result > maxevents) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_epoll_wait: result found to be greater than maxevents " "supplied."); } for (int i = 0; i < result; i++) { if (!FromkLinuxEpollEvent(&klinux_events.get()[i], &events[i])) { errno = EBADE; return -1; } } return result; } int enc_untrusted_getifaddrs(struct ifaddrs **ifap) { MessageWriter input; MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetIfAddrsHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getifaddrs", 3, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (!asylo::host_call::DeserializeIfAddrs( &output, ifap, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getifaddrs: Invalid ifaddrs in response."); return -1; } return 0; } void enc_freeifaddrs(struct ifaddrs *ifa) { asylo::host_call::FreeDeserializedIfAddrs(ifa); } int enc_untrusted_getrusage(int who, struct rusage *usage) { struct klinux_rusage klinux_usage {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getrusage, TokLinuxRusageTarget(who), &klinux_usage); if (result != -1) { if (!FromkLinuxRusage(&klinux_usage, usage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_wait3(int *status, int options, struct rusage *rusage) { int klinux_status; struct klinux_rusage klinux_usage; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*pid=*/-1, &klinux_status, TokLinuxWaitOption(options), &klinux_usage); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } if (rusage) { if (!FromkLinuxRusage(&klinux_usage, rusage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_waitpid(pid_t pid, int *status, int options) { int klinux_status; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, pid, &klinux_status, TokLinuxWaitOption(options), /*rusage=*/nullptr); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } return result; } int enc_untrusted_uname(struct utsname *buf) { struct klinux_utsname klinux_buf {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_uname, &klinux_buf); if (result != 0) { return result; } if (!FromkLinuxUtsName(&klinux_buf, buf)) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_uname: Returned an ill-formed utsname."); } return 0; } struct passwd *enc_untrusted_getpwuid(uid_t uid) { MessageWriter input; MessageReader output; input.Push<uid_t>(uid); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPwUidHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpwuid", 1, /*match_exact_params=*/false); int klinux_errno = output.next<int>(); if (output.size() == 1) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } // Store the struct passwd members in a static passwd_holder, and direct the // pointers in global_passwd to those members. static struct passwd_holder passwd_buffers; if (!DeserializePasswd(&output, &passwd_buffers) || !PasswdHolderToPasswd(&passwd_buffers, &global_passwd)) { errno = EFAULT; return nullptr; } return &global_passwd; } void enc_untrusted_hex_dump(const void *buf, size_t nbytes) { MessageWriter input; MessageReader output; input.PushByReference(Extent{reinterpret_cast<const char *>(buf), nbytes}); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kHexDumpHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_hex_dump", 2); } void enc_untrusted_syslog(int priority, const char *message, int len) { EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_syslog, TokLinuxSyslogPriority(priority), message, len); } void enc_untrusted_openlog(const char *ident, int option, int facility) { MessageWriter input; MessageReader output; input.PushString(ident); input.Push<int>(TokLinuxSyslogOption(option)); input.Push<int>(TokLinuxSyslogFacility(facility)); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kOpenLogHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_openlog", 1); } int enc_untrusted_inotify_read(int fd, size_t count, char **serialized_events, size_t *serialized_events_len) { MessageWriter input; MessageReader output; input.Push<int>(fd); input.Push<uint64_t>(count); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInotifyReadHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inotify_read", 2, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } Extent serialized_buf = output.next(); *serialized_events_len = serialized_buf.size(); // The caller to this host call owns memory pointed by |*serialized_events|. *serialized_events = reinterpret_cast<char *>(malloc(*serialized_events_len)); if (!serialized_events) { errno = ENOMEM; return -1; } memcpy(*serialized_events, serialized_buf.As<char>(), *serialized_events_len); return result; } int enc_untrusted_ioctl1(int fd, uint64_t request) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ioctl, fd, request); } } // extern "C"
/* * * Copyright 2019 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "asylo/platform/host_call/trusted/host_calls.h" #include <errno.h> #include <ifaddrs.h> #include <net/if.h> #include <netdb.h> #include <signal.h> #include <sys/statfs.h> #include <algorithm> #include "asylo/platform/host_call/exit_handler_constants.h" #include "asylo/platform/host_call/serializer_functions.h" #include "asylo/platform/primitives/trusted_primitives.h" #include "asylo/platform/system_call/type_conversions/types_functions.h" using ::asylo::host_call::NonSystemCallDispatcher; using ::asylo::primitives::Extent; using ::asylo::primitives::MessageReader; using ::asylo::primitives::MessageWriter; using ::asylo::primitives::TrustedPrimitives; void CheckStatusAndParamCount(const asylo::primitives::PrimitiveStatus &status, const MessageReader &output, const char *name, int expected_params, bool match_exact_params) { if (!status.ok()) { std::string message = absl::StrCat("Host call '", name, "' failed."); TrustedPrimitives::BestEffortAbort(message.c_str()); } if (!match_exact_params) { if (output.size() < expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected at least ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } else { if (output.size() != expected_params) { std::string message = absl::StrCat( "Host call '", name, "': Expected ", expected_params, " parameters on the MessageReader, found ", output.size()); TrustedPrimitives::BestEffortAbort(message.c_str()); } } } namespace { // A global passwd struct. The address of it is used as the return value of // getpwuid. struct passwd global_passwd; size_t CalculateTotalMessageSize(const struct msghdr *msg) { size_t total_message_size = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { total_message_size += msg->msg_iov[i].iov_len; } return total_message_size; } #define PASSWD_HOLDER_FIELD_LENGTH 1024 // Struct for storing the buffers needed by struct passwd members. struct passwd_holder { char pw_name[PASSWD_HOLDER_FIELD_LENGTH]; char pw_passwd[PASSWD_HOLDER_FIELD_LENGTH]; uid_t pw_uid; gid_t pw_gid; char pw_gecos[PASSWD_HOLDER_FIELD_LENGTH]; char pw_dir[PASSWD_HOLDER_FIELD_LENGTH]; char pw_shell[PASSWD_HOLDER_FIELD_LENGTH]; }; bool DeserializePasswd(MessageReader *reader, struct passwd_holder *passwd_buffers) { if (!reader || !passwd_buffers) { return false; } if (reader->size() < 7) { return false; } auto pw_name_buf = reader->next(); auto pw_passwd_buf = reader->next(); auto pw_uid = reader->next<uid_t>(); auto pw_gid = reader->next<gid_t>(); auto pw_gecos_buf = reader->next(); auto pw_dir_buf = reader->next(); auto pw_shell_buf = reader->next(); strncpy(passwd_buffers->pw_name, pw_name_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_name), pw_name_buf.size())); strncpy(passwd_buffers->pw_passwd, pw_passwd_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_passwd), pw_passwd_buf.size())); passwd_buffers->pw_uid = pw_uid; passwd_buffers->pw_gid = pw_gid; strncpy(passwd_buffers->pw_gecos, pw_gecos_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_gecos), pw_gecos_buf.size())); strncpy(passwd_buffers->pw_dir, pw_dir_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_dir), pw_dir_buf.size())); strncpy(passwd_buffers->pw_shell, pw_shell_buf.As<char>(), std::min(sizeof(passwd_buffers->pw_shell), pw_shell_buf.size())); return true; } bool PasswdHolderToPasswd(struct passwd_holder *passwd_in, struct passwd *passwd_out) { if (!passwd_in || !passwd_out) { return false; } passwd_out->pw_name = passwd_in->pw_name; passwd_out->pw_passwd = passwd_in->pw_passwd; passwd_out->pw_uid = passwd_in->pw_uid; passwd_out->pw_gid = passwd_in->pw_gid; passwd_out->pw_gecos = passwd_in->pw_gecos; passwd_out->pw_dir = passwd_in->pw_dir; passwd_out->pw_shell = passwd_in->pw_shell; return true; } } // namespace extern "C" { int enc_untrusted_access(const char *path_name, int mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_access, path_name, mode); } pid_t enc_untrusted_getpid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getpid); } pid_t enc_untrusted_getppid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getppid); } pid_t enc_untrusted_setsid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setsid); } uid_t enc_untrusted_getuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getuid); } gid_t enc_untrusted_getgid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getgid); } uid_t enc_untrusted_geteuid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_geteuid); } gid_t enc_untrusted_getegid() { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getegid); } int enc_untrusted_kill(pid_t pid, int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_kill, pid, klinux_sig); } int enc_untrusted_link(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_link, oldpath, newpath); } off_t enc_untrusted_lseek(int fd, off_t offset, int whence) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lseek, fd, offset, whence); } int enc_untrusted_mkdir(const char *pathname, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_mkdir, pathname, mode); } int enc_untrusted_open(const char *pathname, int flags, ...) { int mode = 0; if (flags & O_CREAT) { va_list ap; va_start(ap, flags); mode = va_arg(ap, mode_t); va_end(ap); } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_open, pathname, TokLinuxFileStatusFlag(flags), TokLinuxFileModeFlag(mode)); } int enc_untrusted_unlink(const char *pathname) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_unlink, pathname); } int enc_untrusted_rename(const char *oldpath, const char *newpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rename, oldpath, newpath); } ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { ssize_t ret = static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); if (ret != -1 && ret > count) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_read: read result exceeds requested"); } return ret; } ssize_t enc_untrusted_write(int fd, const void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_write, fd, buf, count)); } int enc_untrusted_symlink(const char *target, const char *linkpath) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_symlink, target, linkpath); } ssize_t enc_untrusted_readlink(const char *pathname, char *buf, size_t bufsiz) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_readlink, pathname, buf, bufsiz)); } int enc_untrusted_truncate(const char *path, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_truncate, path, length); } int enc_untrusted_ftruncate(int fd, off_t length) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ftruncate, fd, length); } int enc_untrusted_rmdir(const char *path) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_rmdir, path); } int enc_untrusted_pipe2(int pipefd[2], int flags) { if (flags & ~(O_CLOEXEC | O_DIRECT | O_NONBLOCK)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_pipe2, pipefd, TokLinuxFileStatusFlag(flags)); } int enc_untrusted_socket(int domain, int type, int protocol) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_socket, TokLinuxAfFamily(domain), TokLinuxSocketType(type), protocol); } int enc_untrusted_listen(int sockfd, int backlog) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listen, sockfd, backlog); } int enc_untrusted_shutdown(int sockfd, int how) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_shutdown, sockfd, how); } ssize_t enc_untrusted_send(int sockfd, const void *buf, size_t len, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_sendto, sockfd, buf, len, flags, /*dest_addr=*/nullptr, /*addrlen=*/0); } int enc_untrusted_fcntl(int fd, int cmd, ... /* arg */) { // We do not currently support file locks in Asylo, so arg is not expected to // be a pointer to struct flock. int64_t arg = 0; va_list ap; va_start(ap, cmd); arg = va_arg(ap, int64_t); va_end(ap); int klinux_cmd = TokLinuxFcntlCommand(cmd); if (klinux_cmd == -1) { errno = EINVAL; return -1; } int intarg = arg; switch (cmd) { case F_SETFL: { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFileStatusFlag(intarg)); } case F_SETFD: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, TokLinuxFDFlag(intarg)); } case F_GETFL: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFileStatusFlag(retval); } return retval; } case F_GETFD: { int retval = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); if (retval != -1) { retval = FromkLinuxFDFlag(retval); } return retval; } case F_GETPIPE_SZ: case F_SETPIPE_SZ: { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fcntl, fd, klinux_cmd, arg); } // We do not handle the case for F_DUPFD. It is expected to be handled at // a higher abstraction, as we need not exit the enclave for duplicating // the file descriptor. default: { errno = EINVAL; return -1; } } } int enc_untrusted_chown(const char *pathname, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chown, pathname, owner, group); } int enc_untrusted_fchown(int fd, uid_t owner, gid_t group) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchown, fd, owner, group); } int enc_untrusted_setsockopt(int sockfd, int level, int optname, const void *optval, socklen_t optlen) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setsockopt, sockfd, level, TokLinuxOptionName(level, optname), optval, optlen); } int enc_untrusted_flock(int fd, int operation) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_flock, fd, TokLinuxFLockOperation(operation)); } int enc_untrusted_wait(int *wstatus) { int klinux_wstatus; pid_t ret = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*wpid=*/-1, &klinux_wstatus, /*options=*/0, /*rusage=*/nullptr); *wstatus = FromkLinuxToNewlibWstatus(klinux_wstatus); return ret; } int enc_untrusted_inotify_init1(int flags) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_init1, TokLinuxInotifyFlag(flags)); } int enc_untrusted_inotify_add_watch(int fd, const char *pathname, uint32_t mask) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_add_watch, fd, pathname, TokLinuxInotifyEventMask(mask)); } int enc_untrusted_inotify_rm_watch(int fd, int wd) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_inotify_rm_watch, fd, wd); } mode_t enc_untrusted_umask(mode_t mask) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_umask, mask); } int enc_untrusted_chmod(const char *path_name, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_chmod, path_name, mode); } int enc_untrusted_fchmod(int fd, mode_t mode) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fchmod, fd, mode); } int enc_untrusted_sched_yield() { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_yield); } int enc_untrusted_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask) { klinux_cpu_set_t klinux_mask{}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_sched_getaffinity, pid, static_cast<uint64_t>(cpusetsize), &klinux_mask); // On success, the raw getaffinity syscall returns the size of the cpumask_t // data type, To mimic the glibc behavior, we return 0 on success and -1 on // failure. See https://linux.die.net/man/2/sched_getaffinity, under "notes". if (result < 0) { return -1; } if (!FromkLinuxCpuSet(&klinux_mask, mask)) { errno = EFAULT; return -1; } return 0; } int enc_untrusted_pread64(int fd, void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pread64, fd, buf, count, offset); } int enc_untrusted_pwrite64(int fd, const void *buf, size_t count, off_t offset) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_pwrite64, fd, buf, count, offset); } int enc_untrusted_isatty(int fd) { MessageWriter input; input.Push(fd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIsAttyHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_isatty", 2); int result = output.next<int>(); // isatty() returns 1 if fd is an open file descriptor referring to a // terminal; otherwise 0 is returned, and errno is set to indicate the error. if (result == 0) { int klinux_errno = output.next<int>(); errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_usleep(useconds_t usec) { MessageWriter input; input.Push(usec); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kUSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_usleep", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); // usleep() returns 0 on success. On error, -1 is returned, with errno set to // indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_fstat(int fd, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstat, fd, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_fstatfs(int fd, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_fstatfs, fd, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } int enc_untrusted_lstat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_lstat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_stat(const char *pathname, struct stat *statbuf) { struct klinux_stat stat_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_stat, pathname, &stat_kernel); if (FromkLinuxStat(&stat_kernel, statbuf)) { statbuf->st_mode = FromkLinuxFileModeFlag(stat_kernel.klinux_st_mode); } return result; } int enc_untrusted_statfs(const char *pathname, struct statfs *statbuf) { struct klinux_statfs statfs_kernel; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_statfs, pathname, &statfs_kernel); if (FromkLinuxStatFs(&statfs_kernel, statbuf)) { statbuf->f_flags = FromkLinuxStatFsFlags(statfs_kernel.klinux_f_flags); } return result; } ssize_t enc_untrusted_getxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_getxattr, path, name, value, size); } ssize_t enc_untrusted_lgetxattr(const char *path, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lgetxattr, path, name, value, size); } ssize_t enc_untrusted_fgetxattr(int fd, const char *name, void *value, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fgetxattr, fd, name, value, size); } int enc_untrusted_setxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_setxattr, path, name, value, size, flags); } int enc_untrusted_lsetxattr(const char *path, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_lsetxattr, path, name, value, size, flags); } int enc_untrusted_fsetxattr(int fd, const char *name, const void *value, size_t size, int flags) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsetxattr, fd, name, value, size, flags); } ssize_t enc_untrusted_listxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_listxattr, path, list, size); } ssize_t enc_untrusted_llistxattr(const char *path, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_llistxattr, path, list, size); } ssize_t enc_untrusted_flistxattr(int fd, char *list, size_t size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_flistxattr, fd, list, size); } int64_t enc_untrusted_sysconf(int name) { int kLinux_name = TokLinuxSysconfConstant(name); if (kLinux_name == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push(kLinux_name); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kSysconfHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sysconf", 2); int64_t result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_close(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_close, fd); } void *enc_untrusted_realloc(void *ptr, size_t size) { MessageWriter input; input.Push(reinterpret_cast<uint64_t>(ptr)); input.Push(static_cast<uint64_t>(size)); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kReallocHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_realloc", 2); void *result = output.next<void *>(); int klinux_errno = output.next<int>(); // realloc only sets the errno (ENOMEM) when output pointer is null and a // non-zero |size| is provided. if (!result && size != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } if (!::asylo::primitives::TrustedPrimitives::IsOutsideEnclave(result, size)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_realloc: realloc result should be in untrusted " "memory"); } return result; } uint32_t enc_untrusted_sleep(uint32_t seconds) { MessageWriter input; input.Push<uint32_t>(seconds); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher(asylo::host_call::kSleepHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sleep", 2); // Returns sleep's return value directly since it doesn't set errno. return output.next<uint32_t>(); } int enc_untrusted_nanosleep(const struct timespec *req, struct timespec *rem) { struct kLinux_timespec klinux_req; if (!TokLinuxtimespec(req, &klinux_req)) { errno = EINVAL; return -1; } struct kLinux_timespec klinux_rem; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_nanosleep, &klinux_req, &klinux_rem); FromkLinuxtimespec(&klinux_rem, rem); return result; } int enc_untrusted_clock_gettime(clockid_t clk_id, struct timespec *tp) { clockid_t klinux_clk_id = TokLinuxClockId(clk_id); if (klinux_clk_id == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int64_t>(klinux_clk_id); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kClockGettimeHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_clock_gettime", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); struct kLinux_timespec klinux_tp = output.next<struct kLinux_timespec>(); // clock_gettime returns -1 on error and sets the errno. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } FromkLinuxtimespec(&klinux_tp, tp); return result; } int enc_untrusted_clock_getcpuclockid(pid_t pid, clockid_t *clock_id) { MessageWriter input; input.Push<uint32_t>(pid); MessageReader output; asylo::primitives::PrimitiveStatus status = asylo::host_call::NonSystemCallDispatcher( asylo::host_call::kGetCpuClockIdHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getcpuclockid", 2); // clock_getcpuclockid returns an errno value directly, without setting errno. // The value must still be translated in order to be interpreted. int klinux_errno_result = output.next<int32_t>(); if (klinux_errno_result != 0) { return FromkLinuxErrorNumber(klinux_errno_result); } clockid_t klinux_clk_id = output.next<uint64_t>(); *clock_id = FromkLinuxClockId(klinux_clk_id); return 0; } int enc_untrusted_bind(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_bind, sockfd, klinux_sock.get(), klinux_sock_len); } int enc_untrusted_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) { socklen_t klinux_sock_len = std::max(std::max(sizeof(klinux_sockaddr_un), sizeof(klinux_sockaddr_in)), sizeof(klinux_sockaddr_in6)); auto klinux_sock = absl::make_unique<char[]>(klinux_sock_len); if (!TokLinuxSockAddr(addr, addrlen, reinterpret_cast<klinux_sockaddr *>(klinux_sock.get()), &klinux_sock_len, TrustedPrimitives::BestEffortAbort)) { errno = EINVAL; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_connect, sockfd, klinux_sock.get(), klinux_sock_len); } ssize_t enc_untrusted_sendmsg(int sockfd, const struct msghdr *msg, int flags) { size_t total_message_size = CalculateTotalMessageSize(msg); std::unique_ptr<char[]> msg_iov_buffer(new char[total_message_size]); size_t copied_bytes = 0; for (int i = 0; i < msg->msg_iovlen; ++i) { memcpy(msg_iov_buffer.get() + copied_bytes, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); copied_bytes += msg->msg_iov[i].iov_len; } MessageWriter input; input.Push(sockfd); input.PushByReference(Extent{msg->msg_name, msg->msg_namelen}); input.PushByReference(Extent{msg_iov_buffer.get(), total_message_size}); input.PushByReference(Extent{msg->msg_control, msg->msg_controllen}); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSendMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sendmsg", 2); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // sendmsg() returns the number of characters sent. On error, -1 is returned, // with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } ssize_t enc_untrusted_recvmsg(int sockfd, struct msghdr *msg, int flags) { size_t total_buffer_size = CalculateTotalMessageSize(msg); MessageWriter input; input.Push(sockfd); input.Push<uint64_t>(msg->msg_namelen); input.Push<uint64_t>(total_buffer_size); input.Push<uint64_t>(msg->msg_controllen); input.Push(msg->msg_flags); input.Push(flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvMsgHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvmsg", 2, /*match_exact_params=*/false); ssize_t result = output.next<ssize_t>(); int klinux_errno = output.next<int>(); // recvmsg() returns the number of characters received. On error, -1 is // returned, with errno set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > total_buffer_size) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvmsg: result exceeds requested"); } auto msg_name_extent = output.next(); // The returned |msg_namelen| should not exceed the buffer size. if (msg_name_extent.size() <= msg->msg_namelen) { msg->msg_namelen = msg_name_extent.size(); } memcpy(msg->msg_name, msg_name_extent.As<char>(), msg->msg_namelen); // A single buffer is passed from the untrusted side, copy it into the // scattered buffers inside the enclave. auto msg_iov_extent = output.next(); size_t total_bytes = msg_iov_extent.size(); size_t bytes_copied = 0; for (int i = 0; i < msg->msg_iovlen && bytes_copied < total_bytes; ++i) { size_t bytes_to_copy = std::min(msg->msg_iov[i].iov_len, total_bytes - bytes_copied); memcpy(msg->msg_iov[i].iov_base, msg_iov_extent.As<char>() + bytes_copied, bytes_to_copy); bytes_copied += bytes_to_copy; } auto msg_control_extent = output.next(); // The returned |msg_controllen| should not exceed the buffer size. if (msg_control_extent.size() <= msg->msg_controllen) { msg->msg_controllen = msg_control_extent.size(); } memcpy(msg->msg_control, msg_control_extent.As<char>(), msg->msg_controllen); return result; } int enc_untrusted_getsockname(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSocknameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockname", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getsockname() returns 0 on success. On error, -1 is returned, with errno // set to indicate the cause of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); if (!FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kAcceptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_accept", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // accept() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } int enc_untrusted_getpeername(int sockfd, struct sockaddr *addr, socklen_t *addrlen) { if (!addr || !addrlen) { errno = EFAULT; return -1; } // Guard against -1 being passed as addrlen even though it's unsigned. if (*addrlen == 0 || *addrlen > INT32_MAX) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPeernameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpeername", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // getpeername() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), addr, addrlen, TrustedPrimitives::BestEffortAbort); return result; } ssize_t enc_untrusted_recvfrom(int sockfd, void *buf, size_t len, int flags, struct sockaddr *src_addr, socklen_t *addrlen) { int klinux_flags = TokLinuxRecvSendFlag(flags); if (klinux_flags == 0 && flags != 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<uint64_t>(len); input.Push<int>(klinux_flags); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kRecvFromHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_recvfrom", 4); int result = output.next<int>(); int klinux_errno = output.next<int>(); // recvfrom() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (result > len) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_recvfrom: result exceeds requested"); } auto buffer_received = output.next(); memcpy(buf, buffer_received.data(), std::min(len, buffer_received.size())); // If |src_addr| is not NULL, and the underlying protocol provides the source // address, this source address is filled in. When |src_addr| is NULL, nothing // is filled in; in this case, |addrlen| is not used, and should also be NULL. if (src_addr != nullptr && addrlen != nullptr) { auto klinux_sockaddr_buf = output.next(); const struct klinux_sockaddr *klinux_addr = klinux_sockaddr_buf.As<struct klinux_sockaddr>(); FromkLinuxSockAddr(klinux_addr, klinux_sockaddr_buf.size(), src_addr, addrlen, TrustedPrimitives::BestEffortAbort); } return result; } int enc_untrusted_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout) { struct klinux_fd_set klinux_readfds, klinux_writefds, klinux_exceptfds; struct kLinux_timeval klinux_timeout; TokLinuxFdSet(readfds, &klinux_readfds); TokLinuxFdSet(writefds, &klinux_writefds); TokLinuxFdSet(exceptfds, &klinux_exceptfds); TokLinuxtimeval(timeout, &klinux_timeout); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_select, nfds, &klinux_readfds, &klinux_writefds, &klinux_exceptfds, &klinux_timeout); FromkLinuxFdSet(&klinux_readfds, readfds); FromkLinuxFdSet(&klinux_writefds, writefds); FromkLinuxFdSet(&klinux_exceptfds, exceptfds); return result; } int enc_untrusted_gettimeofday(struct timeval *tv, struct timezone *tz) { struct kLinux_timeval ktv; TokLinuxtimeval(tv, &ktv); // We do not convert timezone to a klinux value since this struct is expected // to be identical across enclave boundary. Besides, the use of the timezone // structure is obsolete; the tz argument should normally be specified as // NULL. int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_gettimeofday, &ktv, tz); FromkLinuxtimeval(&ktv, tv); return result; } int enc_untrusted_fsync(int fd) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_fsync, fd); } int enc_untrusted_raise(int sig) { int klinux_sig = TokLinuxSignalNumber(sig); if (klinux_sig < 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_sig); MessageReader output; const auto status = NonSystemCallDispatcher(::asylo::host_call::kRaiseHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_raise", 2); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } int enc_untrusted_getsockopt(int sockfd, int level, int optname, void *optval, socklen_t *optlen) { if (!optval || !optlen || *optlen == 0) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(sockfd); input.Push<int>(level); input.Push<int>(TokLinuxOptionName(level, optname)); input.PushByReference(Extent{reinterpret_cast<char *>(optval), *optlen}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetSockOptHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getsockopt", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); Extent opt_received = output.next(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } // The returned |optlen| should not exceed the buffer size. if (opt_received.size() <= *optlen) { *optlen = opt_received.size(); } memcpy(optval, opt_received.data(), *optlen); return result; } int enc_untrusted_getitimer(int which, struct itimerval *curr_value) { struct klinux_itimerval klinux_curr_value {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getitimer, TokLinuxItimerType(which), &klinux_curr_value); if (!curr_value || !FromkLinuxItimerval(&klinux_curr_value, curr_value)) { errno = EFAULT; return -1; } return result; } int enc_untrusted_setitimer(int which, const struct itimerval *new_value, struct itimerval *old_value) { struct klinux_itimerval klinux_new_value {}; struct klinux_itimerval klinux_old_value {}; if (!TokLinuxItimerval(new_value, &klinux_new_value)) { errno = EFAULT; return -1; } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_setitimer, TokLinuxItimerType(which), &klinux_new_value, &klinux_old_value); if (old_value != nullptr && !FromkLinuxItimerval(&klinux_old_value, old_value)) { errno = EFAULT; return -1; } return result; } clock_t enc_untrusted_times(struct tms *buf) { struct kLinux_tms klinux_buf {}; int64_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_times, &klinux_buf); if (!FromkLinuxtms(&klinux_buf, buf)) { errno = EFAULT; return -1; } return static_cast<clock_t>(result); } int enc_untrusted_getaddrinfo(const char *node, const char *service, const struct addrinfo *hints, struct addrinfo **res) { MessageWriter input; input.PushByReference(Extent{node, (node != nullptr) ? strlen(node) + 1 : 0}); input.PushByReference( Extent{service, (service != nullptr) ? strlen(service) + 1 : 0}); if (hints != nullptr) { input.Push<int>(TokLinuxAddressInfoFlag(hints->ai_flags)); input.Push<int>(TokLinuxAfFamily(hints->ai_family)); input.Push<int>(TokLinuxSocketType(hints->ai_socktype)); input.Push<int>(hints->ai_protocol); } MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetAddrInfoHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getaddrinfo", 3, /*match_exact_params=*/false); int klinux_ret = output.next<int>(); int klinux_errno = output.next<int>(); int ret = FromkLinuxAddressInfoError(klinux_ret); if (ret != 0) { if (ret == EAI_SYSTEM) { errno = FromkLinuxErrorNumber(klinux_errno); } return ret; } if (!asylo::host_call::DeserializeAddrinfo( &output, res, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getaddrinfo: Invalid addrinfo in response."); return -1; } return 0; } void enc_freeaddrinfo(struct addrinfo *res) { struct addrinfo *prev_info = nullptr; for (struct addrinfo *info = res; info != nullptr; info = info->ai_next) { if (prev_info) free(prev_info); if (info->ai_addr) free(info->ai_addr); if (info->ai_canonname) free(info->ai_canonname); prev_info = info; } if (prev_info) free(prev_info); } int enc_untrusted_poll(struct pollfd *fds, nfds_t nfds, int timeout) { auto klinux_fds = absl::make_unique<struct klinux_pollfd[]>(nfds); for (int i = 0; i < nfds; ++i) { if (!TokLinuxPollfd(&fds[i], &klinux_fds[i])) { errno = EFAULT; return -1; } } int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_poll, klinux_fds.get(), static_cast<uint64_t>(nfds), timeout); if (result < 0) { return result; } for (int i = 0; i < nfds; ++i) { if (!FromkLinuxPollfd(&klinux_fds[i], &fds[i])) { errno = EFAULT; return -1; } } return result; } int enc_untrusted_epoll_create(int size) { return EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_create, size); } int enc_untrusted_utimes(const char *filename, const struct timeval times[2]) { struct kLinux_timeval klinux_times[2]; if (!TokLinuxtimeval(&times[0], &klinux_times[0]) || !TokLinuxtimeval(&times[1], &klinux_times[1])) { errno = EBADE; return -1; } return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utimes, filename, klinux_times); } int enc_untrusted_utime(const char *filename, const struct utimbuf *times) { struct kLinux_utimbuf klinux_times {}; // We do not check the return value of the conversion function since utimbuf // is allowed to be null. TokLinuxutimbuf(times, &klinux_times); return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_utime, filename, &klinux_times); } int enc_untrusted_inet_pton(int af, const char *src, void *dst) { if (!src || !dst) { return 0; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{ src, std::min(strlen(src) + 1, static_cast<size_t>(INET6_ADDRSTRLEN))}); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetPtonHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_pton", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } auto klinux_addr_buffer = output.next(); size_t max_size = 0; if (af == AF_INET) { if (klinux_addr_buffer.size() != sizeof(klinux_in_addr)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_inet_pton: unexpected output size"); } max_size = sizeof(struct in_addr); } else if (af == AF_INET6) { if (klinux_addr_buffer.size() != sizeof(klinux_in6_addr)) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_inet_pton: unexpected output size"); } max_size = sizeof(struct in6_addr); } memcpy(dst, klinux_addr_buffer.data(), std::min(klinux_addr_buffer.size(), max_size)); return result; } const char *enc_untrusted_inet_ntop(int af, const void *src, char *dst, socklen_t size) { if (!src || !dst) { errno = EFAULT; return nullptr; } size_t src_size = 0; if (af == AF_INET) { src_size = sizeof(struct in_addr); } else if (af == AF_INET6) { src_size = sizeof(struct in6_addr); } else { errno = EAFNOSUPPORT; return nullptr; } MessageWriter input; input.Push<int>(TokLinuxAfFamily(af)); input.PushByReference(Extent{reinterpret_cast<const char *>(src), src_size}); input.Push(size); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInetNtopHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inet_ntop", 2); auto result = output.next(); int klinux_errno = output.next<int>(); if (result.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } memcpy( dst, result.data(), std::min({static_cast<size_t>(size), static_cast<size_t>(result.size()), static_cast<size_t>(INET6_ADDRSTRLEN)})); return dst; } int enc_untrusted_sigprocmask(int how, const sigset_t *set, sigset_t *oldset) { klinux_sigset_t klinux_set; if (!TokLinuxSigset(set, &klinux_set)) { errno = EINVAL; return -1; } int klinux_how = TokLinuxSigMaskAction(how); if (klinux_how == -1) { errno = EINVAL; return -1; } MessageWriter input; input.Push<int>(klinux_how); input.Push<klinux_sigset_t>(klinux_set); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kSigprocmaskHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_sigprocmask", 3); int result = output.next<int>(); int klinux_errno = output.next<int>(); // sigprocmask() returns -1 on failure, with errno set to indicate the cause // of the error. if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } klinux_sigset_t klinux_oldset = output.next<klinux_sigset_t>(); if (oldset != nullptr) { if (!FromkLinuxSigset(&klinux_oldset, oldset)) { errno = EINVAL; return -1; } } return result; } unsigned int enc_untrusted_if_nametoindex(const char *ifname) { MessageWriter input; input.PushString(ifname); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfNameToIndexHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_nametoindex", 2); auto result = output.next<unsigned int>(); int klinux_errno = output.next<int>(); if (result == 0) { errno = FromkLinuxErrorNumber(klinux_errno); } return result; } char *enc_untrusted_if_indextoname(unsigned int ifindex, char *ifname) { if (!ifname) { return nullptr; } MessageWriter input; input.Push(ifindex); MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kIfIndexToNameHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_indextoname", 2); Extent ifname_buffer = output.next(); memcpy(ifname, ifname_buffer.As<char>(), std::min(ifname_buffer.size(), static_cast<size_t>(IF_NAMESIZE))); int klinux_errno = output.next<int>(); if (ifname_buffer.empty()) { errno = FromkLinuxErrorNumber(klinux_errno); } return ifname; } int enc_untrusted_epoll_ctl(int epfd, int op, int fd, struct epoll_event *event) { struct klinux_epoll_event klinux_event_tmp {}; if (event != nullptr && !TokLinuxEpollEvent(event, &klinux_event_tmp)) { errno = EINVAL; return -1; } int klinux_op = TokLinuxEpollCtlOp(op); if (klinux_op == 0) { errno = EINVAL; return -1; } struct klinux_epoll_event *klinux_event = (event != nullptr) ? &klinux_event_tmp : nullptr; return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_epoll_ctl, epfd, klinux_op, fd, klinux_event); } int enc_untrusted_epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout) { if (maxevents <= 0) { errno = EINVAL; return -1; } auto klinux_events = absl::make_unique<struct klinux_epoll_event[]>(maxevents); int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_epoll_wait, epfd, klinux_events.get(), maxevents, timeout); // Only process epoll events if syscall was successful. if (result == -1) { // errno is already set by the system_call library at this point for a // return value of -1. return result; } if (result > maxevents) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_epoll_wait: result found to be greater than maxevents " "supplied."); } for (int i = 0; i < result; i++) { if (!FromkLinuxEpollEvent(&klinux_events.get()[i], &events[i])) { errno = EBADE; return -1; } } return result; } int enc_untrusted_getifaddrs(struct ifaddrs **ifap) { MessageWriter input; MessageReader output; const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetIfAddrsHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getifaddrs", 3, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result != 0) { errno = FromkLinuxErrorNumber(klinux_errno); return result; } if (!asylo::host_call::DeserializeIfAddrs( &output, ifap, TrustedPrimitives::BestEffortAbort)) { TrustedPrimitives::DebugPuts( "enc_untrusted_getifaddrs: Invalid ifaddrs in response."); return -1; } return 0; } void enc_freeifaddrs(struct ifaddrs *ifa) { asylo::host_call::FreeDeserializedIfAddrs(ifa); } int enc_untrusted_getrusage(int who, struct rusage *usage) { struct klinux_rusage klinux_usage {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_getrusage, TokLinuxRusageTarget(who), &klinux_usage); if (result != -1) { if (!FromkLinuxRusage(&klinux_usage, usage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_wait3(int *status, int options, struct rusage *rusage) { int klinux_status; struct klinux_rusage klinux_usage; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, /*pid=*/-1, &klinux_status, TokLinuxWaitOption(options), &klinux_usage); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } if (rusage) { if (!FromkLinuxRusage(&klinux_usage, rusage)) { errno = EINVAL; return -1; } } return result; } pid_t enc_untrusted_waitpid(pid_t pid, int *status, int options) { int klinux_status; pid_t result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_wait4, pid, &klinux_status, TokLinuxWaitOption(options), /*rusage=*/nullptr); if (status) { *status = FromkLinuxToNewlibWstatus(klinux_status); } return result; } int enc_untrusted_uname(struct utsname *buf) { struct klinux_utsname klinux_buf {}; int result = EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_uname, &klinux_buf); if (result != 0) { return result; } if (!FromkLinuxUtsName(&klinux_buf, buf)) { TrustedPrimitives::BestEffortAbort( "enc_untrusted_uname: Returned an ill-formed utsname."); } return 0; } struct passwd *enc_untrusted_getpwuid(uid_t uid) { MessageWriter input; MessageReader output; input.Push<uid_t>(uid); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kGetPwUidHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_getpwuid", 1, /*match_exact_params=*/false); int klinux_errno = output.next<int>(); if (output.size() == 1) { errno = FromkLinuxErrorNumber(klinux_errno); return nullptr; } // Store the struct passwd members in a static passwd_holder, and direct the // pointers in global_passwd to those members. static struct passwd_holder passwd_buffers; if (!DeserializePasswd(&output, &passwd_buffers) || !PasswdHolderToPasswd(&passwd_buffers, &global_passwd)) { errno = EFAULT; return nullptr; } return &global_passwd; } void enc_untrusted_hex_dump(const void *buf, size_t nbytes) { MessageWriter input; MessageReader output; input.PushByReference(Extent{reinterpret_cast<const char *>(buf), nbytes}); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kHexDumpHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_hex_dump", 2); } void enc_untrusted_syslog(int priority, const char *message, int len) { EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_syslog, TokLinuxSyslogPriority(priority), message, len); } void enc_untrusted_openlog(const char *ident, int option, int facility) { MessageWriter input; MessageReader output; input.PushString(ident); input.Push<int>(TokLinuxSyslogOption(option)); input.Push<int>(TokLinuxSyslogFacility(facility)); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kOpenLogHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_openlog", 1); } int enc_untrusted_inotify_read(int fd, size_t count, char **serialized_events, size_t *serialized_events_len) { MessageWriter input; MessageReader output; input.Push<int>(fd); input.Push<uint64_t>(count); const auto status = NonSystemCallDispatcher( ::asylo::host_call::kInotifyReadHandler, &input, &output); CheckStatusAndParamCount(status, output, "enc_untrusted_inotify_read", 2, /*match_exact_params=*/false); int result = output.next<int>(); int klinux_errno = output.next<int>(); if (result == -1) { errno = FromkLinuxErrorNumber(klinux_errno); return -1; } Extent serialized_buf = output.next(); *serialized_events_len = serialized_buf.size(); // The caller to this host call owns memory pointed by |*serialized_events|. *serialized_events = reinterpret_cast<char *>(malloc(*serialized_events_len)); if (!serialized_events) { errno = ENOMEM; return -1; } memcpy(*serialized_events, serialized_buf.As<char>(), *serialized_events_len); return result; } int enc_untrusted_ioctl1(int fd, uint64_t request) { return EnsureInitializedAndDispatchSyscall(asylo::system_call::kSYS_ioctl, fd, request); } } // extern "C"
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); }
ssize_t enc_untrusted_read(int fd, void *buf, size_t count) { ssize_t ret = static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall( asylo::system_call::kSYS_read, fd, buf, count)); if (ret != -1 && ret > count) { ::asylo::primitives::TrustedPrimitives::BestEffortAbort( "enc_untrusted_read: read result exceeds requested"); } return ret; }
{'added': [(229, ' ssize_t ret = static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall('), (231, ' if (ret != -1 && ret > count) {'), (232, ' ::asylo::primitives::TrustedPrimitives::BestEffortAbort('), (233, ' "enc_untrusted_read: read result exceeds requested");'), (234, ' }'), (235, ' return ret;')], 'deleted': [(229, ' return static_cast<ssize_t>(EnsureInitializedAndDispatchSyscall(')]}
6
1
1,340
9,264
4
36
1
https://github.com/google/asylo
CVE-2020-8942
CWE-125
881
njs_async.c
C
njs_async_function_frame_invoke
/* * Copyright (C) Alexander Borisov * Copyright (C) Nginx, Inc. */ #include <njs_main.h> static void njs_async_context_free(njs_vm_t *vm, njs_async_ctx_t *ctx); njs_int_t njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } frame->function->context = capability; ret = njs_function_lambda_call(vm); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; } njs_int_t njs_await_fulfilled(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { njs_int_t ret; njs_value_t **cur_local, **cur_closures, **cur_temp, *value; njs_frame_t *frame, *async_frame; njs_function_t *function; njs_async_ctx_t *ctx; njs_native_frame_t *top, *async; ctx = vm->top_frame->function->context; value = njs_arg(args, nargs, 1); if (njs_is_error(value)) { goto failed; } async_frame = ctx->await; async = &async_frame->native; async->previous = vm->top_frame; function = async->function; cur_local = vm->levels[NJS_LEVEL_LOCAL]; cur_closures = vm->levels[NJS_LEVEL_CLOSURE]; cur_temp = vm->levels[NJS_LEVEL_TEMP]; top = vm->top_frame; frame = vm->active_frame; vm->levels[NJS_LEVEL_LOCAL] = async->local; vm->levels[NJS_LEVEL_CLOSURE] = njs_function_closures(async->function); vm->levels[NJS_LEVEL_TEMP] = async->temp; vm->top_frame = async; vm->active_frame = async_frame; *njs_scope_value(vm, ctx->index) = *value; vm->retval = *value; vm->top_frame->retval = &vm->retval; function->context = ctx->capability; function->await = ctx; ret = njs_vmcode_interpreter(vm, ctx->pc); function->context = NULL; function->await = NULL; vm->levels[NJS_LEVEL_LOCAL] = cur_local; vm->levels[NJS_LEVEL_CLOSURE] = cur_closures; vm->levels[NJS_LEVEL_TEMP] = cur_temp; vm->top_frame = top; vm->active_frame = frame; if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&ctx->capability->resolve), &njs_value_undefined, &vm->retval, 1, &vm->retval); njs_async_context_free(vm, ctx); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } value = &vm->retval; goto failed; } return ret; failed: (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); njs_async_context_free(vm, ctx); return NJS_ERROR; } njs_int_t njs_await_rejected(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { njs_value_t *value; njs_async_ctx_t *ctx; ctx = vm->top_frame->function->context; value = njs_arg(args, nargs, 1); if (ctx->await->native.pc == ctx->pc) { (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); njs_async_context_free(vm, ctx); return NJS_ERROR; } ctx->pc = ctx->await->native.pc; return njs_await_fulfilled(vm, args, nargs, unused); } static void njs_async_context_free(njs_vm_t *vm, njs_async_ctx_t *ctx) { njs_mp_free(vm->mem_pool, ctx->capability); njs_mp_free(vm->mem_pool, ctx); } static const njs_object_prop_t njs_async_constructor_properties[] = { { .type = NJS_PROPERTY, .name = njs_string("length"), .value = njs_value(NJS_NUMBER, 1, 1.0), .configurable = 1, }, { .type = NJS_PROPERTY_HANDLER, .name = njs_string("prototype"), .value = njs_prop_handler(njs_object_prototype_create), }, }; const njs_object_init_t njs_async_constructor_init = { njs_async_constructor_properties, njs_nitems(njs_async_constructor_properties), }; static const njs_object_prop_t njs_async_prototype_properties[] = { { .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), .value = njs_string("AsyncFunction"), .configurable = 1, }, { .type = NJS_PROPERTY_HANDLER, .name = njs_string("constructor"), .value = njs_prop_handler(njs_object_prototype_create_constructor), .configurable = 1, }, }; const njs_object_init_t njs_async_prototype_init = { njs_async_prototype_properties, njs_nitems(njs_async_prototype_properties), }; const njs_object_type_init_t njs_async_function_type_init = { .constructor = njs_native_ctor(njs_function_constructor, 1, 1), .constructor_props = &njs_async_constructor_init, .prototype_props = &njs_async_prototype_init, .prototype_value = { .object = { .type = NJS_OBJECT } }, }; const njs_object_prop_t njs_async_function_instance_properties[] = { { .type = NJS_PROPERTY_HANDLER, .name = njs_string("length"), .value = njs_prop_handler(njs_function_instance_length), .configurable = 1, }, }; const njs_object_init_t njs_async_function_instance_init = { njs_async_function_instance_properties, njs_nitems(njs_async_function_instance_properties), };
/* * Copyright (C) Alexander Borisov * Copyright (C) Nginx, Inc. */ #include <njs_main.h> static void njs_async_context_free(njs_vm_t *vm, njs_async_ctx_t *ctx); njs_int_t njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } ret = njs_function_lambda_call(vm, capability, NULL); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; } njs_int_t njs_await_fulfilled(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { njs_int_t ret; njs_value_t **cur_local, **cur_closures, **cur_temp, *value; njs_frame_t *frame, *async_frame; njs_async_ctx_t *ctx; njs_native_frame_t *top, *async; ctx = vm->top_frame->function->context; value = njs_arg(args, nargs, 1); if (njs_is_error(value)) { goto failed; } async_frame = ctx->await; async = &async_frame->native; async->previous = vm->top_frame; cur_local = vm->levels[NJS_LEVEL_LOCAL]; cur_closures = vm->levels[NJS_LEVEL_CLOSURE]; cur_temp = vm->levels[NJS_LEVEL_TEMP]; top = vm->top_frame; frame = vm->active_frame; vm->levels[NJS_LEVEL_LOCAL] = async->local; vm->levels[NJS_LEVEL_CLOSURE] = njs_function_closures(async->function); vm->levels[NJS_LEVEL_TEMP] = async->temp; vm->top_frame = async; vm->active_frame = async_frame; *njs_scope_value(vm, ctx->index) = *value; vm->retval = *value; vm->top_frame->retval = &vm->retval; ret = njs_vmcode_interpreter(vm, ctx->pc, ctx->capability, ctx); vm->levels[NJS_LEVEL_LOCAL] = cur_local; vm->levels[NJS_LEVEL_CLOSURE] = cur_closures; vm->levels[NJS_LEVEL_TEMP] = cur_temp; vm->top_frame = top; vm->active_frame = frame; if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&ctx->capability->resolve), &njs_value_undefined, &vm->retval, 1, &vm->retval); njs_async_context_free(vm, ctx); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } value = &vm->retval; goto failed; } return ret; failed: (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); njs_async_context_free(vm, ctx); return NJS_ERROR; } njs_int_t njs_await_rejected(njs_vm_t *vm, njs_value_t *args, njs_uint_t nargs, njs_index_t unused) { njs_value_t *value; njs_async_ctx_t *ctx; ctx = vm->top_frame->function->context; value = njs_arg(args, nargs, 1); if (ctx->await->native.pc == ctx->pc) { (void) njs_function_call(vm, njs_function(&ctx->capability->reject), &njs_value_undefined, value, 1, &vm->retval); njs_async_context_free(vm, ctx); return NJS_ERROR; } ctx->pc = ctx->await->native.pc; return njs_await_fulfilled(vm, args, nargs, unused); } static void njs_async_context_free(njs_vm_t *vm, njs_async_ctx_t *ctx) { njs_mp_free(vm->mem_pool, ctx->capability); njs_mp_free(vm->mem_pool, ctx); } static const njs_object_prop_t njs_async_constructor_properties[] = { { .type = NJS_PROPERTY, .name = njs_string("length"), .value = njs_value(NJS_NUMBER, 1, 1.0), .configurable = 1, }, { .type = NJS_PROPERTY_HANDLER, .name = njs_string("prototype"), .value = njs_prop_handler(njs_object_prototype_create), }, }; const njs_object_init_t njs_async_constructor_init = { njs_async_constructor_properties, njs_nitems(njs_async_constructor_properties), }; static const njs_object_prop_t njs_async_prototype_properties[] = { { .type = NJS_PROPERTY, .name = njs_wellknown_symbol(NJS_SYMBOL_TO_STRING_TAG), .value = njs_string("AsyncFunction"), .configurable = 1, }, { .type = NJS_PROPERTY_HANDLER, .name = njs_string("constructor"), .value = njs_prop_handler(njs_object_prototype_create_constructor), .configurable = 1, }, }; const njs_object_init_t njs_async_prototype_init = { njs_async_prototype_properties, njs_nitems(njs_async_prototype_properties), }; const njs_object_type_init_t njs_async_function_type_init = { .constructor = njs_native_ctor(njs_function_constructor, 1, 1), .constructor_props = &njs_async_constructor_init, .prototype_props = &njs_async_prototype_init, .prototype_value = { .object = { .type = NJS_OBJECT } }, }; const njs_object_prop_t njs_async_function_instance_properties[] = { { .type = NJS_PROPERTY_HANDLER, .name = njs_string("length"), .value = njs_prop_handler(njs_function_instance_length), .configurable = 1, }, }; const njs_object_init_t njs_async_function_instance_init = { njs_async_function_instance_properties, njs_nitems(njs_async_function_instance_properties), };
njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } frame->function->context = capability; ret = njs_function_lambda_call(vm); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; }
njs_async_function_frame_invoke(njs_vm_t *vm, njs_value_t *retval) { njs_int_t ret; njs_value_t ctor; njs_native_frame_t *frame; njs_promise_capability_t *capability; frame = vm->top_frame; frame->retval = retval; njs_set_function(&ctor, &vm->constructors[NJS_OBJ_TYPE_PROMISE]); capability = njs_promise_new_capability(vm, &ctor); if (njs_slow_path(capability == NULL)) { return NJS_ERROR; } ret = njs_function_lambda_call(vm, capability, NULL); if (ret == NJS_OK) { ret = njs_function_call(vm, njs_function(&capability->resolve), &njs_value_undefined, retval, 1, &vm->retval); } else if (ret == NJS_AGAIN) { ret = NJS_OK; } else if (ret == NJS_ERROR) { if (njs_is_memory_error(vm, &vm->retval)) { return NJS_ERROR; } ret = njs_function_call(vm, njs_function(&capability->reject), &njs_value_undefined, &vm->retval, 1, &vm->retval); } *retval = capability->promise; return ret; }
{'added': [(32, ' ret = njs_function_lambda_call(vm, capability, NULL);'), (96, ' ret = njs_vmcode_interpreter(vm, ctx->pc, ctx->capability, ctx);')], 'deleted': [(32, ' frame->function->context = capability;'), (33, ''), (34, ' ret = njs_function_lambda_call(vm);'), (66, ' njs_function_t *function;'), (81, ' function = async->function;'), (82, ''), (101, ' function->context = ctx->capability;'), (102, ' function->await = ctx;'), (103, ''), (104, ' ret = njs_vmcode_interpreter(vm, ctx->pc);'), (105, ''), (106, ' function->context = NULL;'), (107, ' function->await = NULL;')]}
2
13
169
1,064
31
205
6
https://github.com/nginx/njs
CVE-2022-25139
CWE-416
187
ne.c
C
r_bin_ne_get_symbols
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" #define NE_BUG 0 static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (true) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); if (segnum > 0) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name; #if NE_BUG if (rel.index > 0 && rel.index < bin->ne_header->ModRefs) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } else { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } #else if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } #endif if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; imp->name = r_str_newf ("%s.%s", name, __func_name_from_ord(name, rel.func_ord)); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
/* radare - LGPL - Copyright 2019-2022 - GustavoLCR */ #include "ne.h" #define NE_BUG 0 static char *__get_target_os(r_bin_ne_obj_t *bin) { switch (bin->ne_header->targOS) { case 1: return "OS/2"; case 2: return "Windows"; case 3: return "European MS-DOS 4.x"; case 4: return "Windows 386"; case 5: return "BOSS (Borland Operating System Services)"; default: return "Unknown"; } } static int __translate_perms(int flags) { int perms = 0; if (flags & IS_RX) { if (flags & IS_DATA) { perms = R_PERM_R; } else { perms = R_PERM_X; } } if (!perms) { perms = R_PERM_RWX; } return perms; } static char *__read_nonnull_str_at(RBuffer *buf, ut64 offset) { ut8 sz = r_buf_read8_at (buf, offset); if (!sz) { return NULL; } char *str = malloc ((ut64)sz + 1); if (!str) { return NULL; } r_buf_read_at (buf, offset + 1, (ut8 *)str, sz); str[sz] = '\0'; return str; } static char *__func_name_from_ord(const char *module, ut16 ordinal) { if (!module) { return NULL; } char *lower_module = strdup (module); r_str_case (lower_module, false); char *path = r_str_newf (R_JOIN_4_PATHS ("%s", R2_SDB_FORMAT, "dll", "%s.sdb"), r_sys_prefix (NULL), lower_module); free (lower_module); char *ord = r_str_newf ("%d", ordinal); char *name; if (r_file_exists (path)) { Sdb *sdb = sdb_new (NULL, path, 0); name = sdb_get (sdb, ord, NULL); if (!name) { name = ord; } else { free (ord); } sdb_close (sdb); free (sdb); } else { name = ord; } free (path); return name; } RList *r_bin_ne_get_segments(r_bin_ne_obj_t *bin) { int i; if (!bin) { return NULL; } RList *segments = r_list_newf (free); for (i = 0; i < bin->ne_header->SegCount; i++) { RBinSection *bs = R_NEW0 (RBinSection); if (!bs) { return segments; } NE_image_segment_entry *se = &bin->segment_entries[i]; bs->size = se->length; bs->vsize = se->minAllocSz ? se->minAllocSz : 64000; bs->bits = R_SYS_BITS_16; bs->is_data = se->flags & IS_DATA; bs->perm = __translate_perms (se->flags); bs->paddr = (ut64)se->offset * bin->alignment; bs->name = r_str_newf ("%s.%" PFMT64d, se->flags & IS_MOVEABLE ? "MOVEABLE" : "FIXED", bs->paddr); bs->is_segment = true; r_list_append (segments, bs); } bin->segments = segments; return segments; } static int __find_symbol_by_paddr(const void *paddr, const void *sym) { return (int)!(*(ut64 *)paddr == ((RBinSymbol *)sym)->paddr); } RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; } static char *__resource_type_str(int type) { char *typeName; switch (type) { case 1: typeName = "CURSOR"; break; case 2: typeName = "BITMAP"; break; case 3: typeName = "ICON"; break; case 4: typeName = "MENU"; break; case 5: typeName = "DIALOG"; break; case 6: typeName = "STRING"; break; case 7: typeName = "FONTDIR"; break; case 8: typeName = "FONT"; break; case 9: typeName = "ACCELERATOR"; break; case 10: typeName = "RCDATA"; break; case 11: typeName = "MESSAGETABLE"; break; case 12: typeName = "GROUP_CURSOR"; break; case 14: typeName = "GROUP_ICON"; break; case 15: typeName = "NAMETABLE"; break; case 16: typeName = "VERSION"; break; case 17: typeName = "DLGINCLUDE"; break; case 19: typeName = "PLUGPLAY"; break; case 20: typeName = "VXD"; break; case 21: typeName = "ANICURSOR"; break; case 22: typeName = "ANIICON"; break; case 23: typeName = "HTML"; break; case 24: typeName = "MANIFEST"; break; default: return r_str_newf ("UNKNOWN (%d)", type); } return strdup (typeName); } static void __free_resource_entry(void *entry) { r_ne_resource_entry *en = (r_ne_resource_entry *)entry; free (en->name); free (en); } static void __free_resource(void *resource) { r_ne_resource *res = (r_ne_resource *)resource; free (res->name); r_list_free (res->entry); free (res); } static bool __ne_get_resources(r_bin_ne_obj_t *bin) { if (!bin->resources) { bin->resources = r_list_newf (__free_resource); } ut16 resoff = bin->ne_header->ResTableOffset + bin->header_offset; ut16 alignment = r_buf_read_le16_at (bin->buf, resoff); ut32 off = resoff + 2; while (true) { NE_image_typeinfo_entry ti = {0}; r_ne_resource *res = R_NEW0 (r_ne_resource); if (!res) { break; } res->entry = r_list_newf (__free_resource_entry); if (!res->entry) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ti, sizeof (ti)); if (!ti.rtTypeID) { break; } else if (ti.rtTypeID & 0x8000) { res->name = __resource_type_str (ti.rtTypeID & ~0x8000); } else { // Offset to resident name table res->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ti.rtTypeID); } off += sizeof (NE_image_typeinfo_entry); int i; for (i = 0; i < ti.rtResourceCount; i++) { NE_image_nameinfo_entry ni; r_ne_resource_entry *ren = R_NEW0 (r_ne_resource_entry); if (!ren) { break; } r_buf_read_at (bin->buf, off, (ut8 *)&ni, sizeof (NE_image_nameinfo_entry)); ren->offset = ni.rnOffset << alignment; ren->size = ni.rnLength; if (ni.rnID & 0x8000) { ren->name = r_str_newf ("%d", ni.rnID & ~0x8000); } else { // Offset to resident name table ren->name = __read_nonnull_str_at (bin->buf, (ut64)resoff + ni.rnID); } r_list_append (res->entry, ren); off += sizeof (NE_image_nameinfo_entry); } r_list_append (bin->resources, res); } return true; } RList *r_bin_ne_get_imports(r_bin_ne_obj_t *bin) { RList *imports = r_list_newf ((RListFree)r_bin_import_free); if (!imports) { return NULL; } ut16 off = bin->ne_header->ImportNameTable + bin->header_offset + 1; int i; for (i = 0; i < bin->ne_header->ModRefs; i++) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { break; } ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { r_bin_import_free (imp); break; } off++; char *name = malloc ((ut64)sz + 1); if (!name) { break; } r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; imp->name = name; imp->ordinal = i + 1; r_list_append (imports, imp); off += sz; } bin->imports = imports; return imports; } RList *r_bin_ne_get_entrypoints(r_bin_ne_obj_t *bin) { if (!bin->entry_table) { return NULL; } RList *entries = r_list_newf (free); if (!entries) { return NULL; } RList *segments = r_bin_ne_get_segments (bin); if (!segments) { r_list_free (entries); return NULL; } if (bin->ne_header->csEntryPoint) { RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } entry->bits = 16; ut32 entry_cs = bin->ne_header->csEntryPoint; RBinSection *s = r_list_get_n (segments, entry_cs - 1); entry->paddr = bin->ne_header->ipEntryPoint + (s? s->paddr: 0); r_list_append (entries, entry); } int off = 0; size_t tableat = bin->header_offset + bin->ne_header->EntryTableOffset; while (off < bin->ne_header->EntryTableLength) { if (tableat + off >= r_buf_size (bin->buf)) { break; } ut8 bundle_length = *(ut8 *)(bin->entry_table + off); if (!bundle_length) { break; } off++; ut8 bundle_type = *(ut8 *)(bin->entry_table + off); off++; int i; for (i = 0; i < bundle_length; i++) { if (tableat + off + 4 >= r_buf_size (bin->buf)) { break; } RBinAddr *entry = R_NEW0 (RBinAddr); if (!entry) { r_list_free (entries); return NULL; } off++; if (!bundle_type) { // Skip off--; free (entry); break; } else if (bundle_type == 0xff) { // moveable off += 2; ut8 segnum = *(bin->entry_table + off); off++; ut16 segoff = *(ut16 *)(bin->entry_table + off); if (segnum > 0) { entry->paddr = (ut64)bin->segment_entries[segnum - 1].offset * bin->alignment + segoff; } } else { // Fixed if (bundle_type < bin->ne_header->SegCount) { entry->paddr = (ut64)bin->segment_entries[bundle_type - 1].offset * bin->alignment + *(ut16 *)(bin->entry_table + off); } } off += 2; r_list_append (entries, entry); } } r_list_free (segments); bin->entries = entries; return entries; } RList *r_bin_ne_get_relocs(r_bin_ne_obj_t *bin) { RList *segments = bin->segments; if (!segments) { return NULL; } RList *entries = bin->entries; if (!entries) { return NULL; } RList *symbols = bin->symbols; if (!symbols) { return NULL; } ut16 *modref = calloc (bin->ne_header->ModRefs, sizeof (ut16)); if (!modref) { return NULL; } r_buf_read_at (bin->buf, (ut64)bin->ne_header->ModRefTable + bin->header_offset, (ut8 *)modref, bin->ne_header->ModRefs * sizeof (ut16)); RList *relocs = r_list_newf (free); if (!relocs) { free (modref); return NULL; } RListIter *it; RBinSection *seg; int index = -1; r_list_foreach (segments, it, seg) { index++; if (!(bin->segment_entries[index].flags & RELOCINFO)) { continue; } ut32 off = seg->paddr + seg->size; ut32 start = off; ut16 length = r_buf_read_le16_at (bin->buf, off); if (!length) { continue; } off += 2; // size_t buf_size = r_buf_size (bin->buf); while (off < start + length * sizeof (NE_image_reloc_item)) { // && off + sizeof (NE_image_reloc_item) < buf_size) NE_image_reloc_item rel = {0}; if (r_buf_read_at (bin->buf, off, (ut8 *)&rel, sizeof (rel)) < 1) { return NULL; } RBinReloc *reloc = R_NEW0 (RBinReloc); if (!reloc) { return NULL; } reloc->paddr = seg->paddr + rel.offset; switch (rel.type) { case LOBYTE: reloc->type = R_BIN_RELOC_8; break; case SEL_16: case OFF_16: reloc->type = R_BIN_RELOC_16; break; case POI_32: case OFF_32: reloc->type = R_BIN_RELOC_32; break; case POI_48: reloc->type = R_BIN_RELOC_64; break; } ut32 offset; if (rel.flags & (IMPORTED_ORD | IMPORTED_NAME)) { RBinImport *imp = R_NEW0 (RBinImport); if (!imp) { free (reloc); break; } char *name; #if NE_BUG if (rel.index > 0 && rel.index < bin->ne_header->ModRefs) { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } else { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } #else if (rel.index > bin->ne_header->ModRefs) { name = r_str_newf ("UnknownModule%d_%x", rel.index, off); // ???? } else { offset = modref[rel.index - 1] + bin->header_offset + bin->ne_header->ImportNameTable; name = __read_nonnull_str_at (bin->buf, offset); } #endif if (rel.flags & IMPORTED_ORD) { imp->ordinal = rel.func_ord; imp->name = r_str_newf ("%s.%s", name, __func_name_from_ord(name, rel.func_ord)); } else { offset = bin->header_offset + bin->ne_header->ImportNameTable + rel.name_off; char *func = __read_nonnull_str_at (bin->buf, offset); imp->name = r_str_newf ("%s.%s", name, func); free (func); } free (name); reloc->import = imp; } else if (rel.flags & OSFIXUP) { // TODO } else { if (strstr (seg->name, "FIXED")) { RBinSection *s = r_list_get_n (segments, rel.segnum - 1); if (s) { offset = s->paddr + rel.segoff; } else { offset = -1; } } else { RBinAddr *entry = r_list_get_n (entries, rel.entry_ordinal - 1); if (entry) { offset = entry->paddr; } else { offset = -1; } } reloc->addend = offset; RBinSymbol *sym = NULL; RListIter *sit; r_list_foreach (symbols, sit, sym) { if (sym->paddr == reloc->addend) { reloc->symbol = sym; break; } } } if (rel.flags & ADDITIVE) { reloc->additive = 1; r_list_append (relocs, reloc); } else { do { #if NE_BUG if (reloc->paddr + 4 < r_buf_size (bin->buf)) { break; } #endif r_list_append (relocs, reloc); offset = r_buf_read_le16_at (bin->buf, reloc->paddr); RBinReloc *tmp = reloc; reloc = R_NEW0 (RBinReloc); if (!reloc) { break; } *reloc = *tmp; reloc->paddr = seg->paddr + offset; } while (offset != 0xFFFF); free (reloc); } off += sizeof (NE_image_reloc_item); } } free (modref); return relocs; } void __init(RBuffer *buf, r_bin_ne_obj_t *bin) { bin->header_offset = r_buf_read_le16_at (buf, 0x3c); bin->ne_header = R_NEW0 (NE_image_header); if (!bin->ne_header) { return; } bin->buf = buf; // XXX this is endian unsafe if (r_buf_read_at (buf, bin->header_offset, (ut8 *)bin->ne_header, sizeof (NE_image_header)) < 1) { R_FREE (bin->ne_header); return; } if (bin->ne_header->FileAlnSzShftCnt > 15) { bin->ne_header->FileAlnSzShftCnt = 15; } ut64 from = bin->ne_header->ModRefTable + bin->header_offset; ut64 left = r_buf_size (bin->buf) - from; if (from + bin->ne_header->ModRefs * sizeof (ut16) >= left) { bin->ne_header->ModRefs = left / sizeof (ut16); } bin->alignment = 1 << bin->ne_header->FileAlnSzShftCnt; if (!bin->alignment) { bin->alignment = 1 << 9; } bin->os = __get_target_os (bin); ut16 offset = bin->ne_header->SegTableOffset + bin->header_offset; size_t size = bin->ne_header->SegCount * sizeof (NE_image_segment_entry); if (offset >= r_buf_size (bin->buf)) { return; } size_t remaining = r_buf_size (bin->buf) - offset; size = R_MIN (remaining, size); bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); // * sizeof (NE_image_segment_entry); bin->segment_entries = calloc (1, size); if (size >= remaining) { bin->ne_header->SegCount = size / sizeof (NE_image_segment_entry); } if (!bin->segment_entries) { return; } r_buf_read_at (buf, offset, (ut8 *)bin->segment_entries, size); bin->entry_table = calloc (4, bin->ne_header->EntryTableLength); if (!bin->entry_table) { R_FREE (bin->segment_entries); return; } r_buf_read_at (buf, (ut64)bin->header_offset + bin->ne_header->EntryTableOffset, bin->entry_table, bin->ne_header->EntryTableLength); bin->imports = r_bin_ne_get_imports (bin); __ne_get_resources (bin); } void r_bin_ne_free(r_bin_ne_obj_t *bin) { // r_list_free (bin->imports); // double free r_list_free (bin->resources); free (bin->entry_table); free (bin->ne_header); free (bin->resident_name_table); free (bin->segment_entries); free (bin); } r_bin_ne_obj_t *r_bin_ne_new_buf(RBuffer *buf, bool verbose) { r_bin_ne_obj_t *bin = R_NEW0 (r_bin_ne_obj_t); if (!bin) { return NULL; } __init(buf, bin); return bin; }
RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (true) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; }
RList *r_bin_ne_get_symbols(r_bin_ne_obj_t *bin) { RBinSymbol *sym; ut16 off = bin->ne_header->ResidNamTable + bin->header_offset; RList *symbols = r_list_newf (free); if (!symbols) { return NULL; } RList *entries = r_bin_ne_get_entrypoints (bin); bool resident = true, first = true; while (entries) { ut8 sz = r_buf_read8_at (bin->buf, off); if (!sz) { first = true; if (resident) { resident = false; off = bin->ne_header->OffStartNonResTab; sz = r_buf_read8_at (bin->buf, off); if (!sz) { break; } } else { break; } } char *name = malloc ((ut64)sz + 1); if (!name) { break; } off++; r_buf_read_at (bin->buf, off, (ut8 *)name, sz); name[sz] = '\0'; off += sz; sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = name; if (!first) { sym->bind = R_BIN_BIND_GLOBAL_STR; } ut16 entry_off = r_buf_read_le16_at (bin->buf, off); off += 2; RBinAddr *entry = r_list_get_n (entries, entry_off); if (entry) { sym->paddr = entry->paddr; } else { sym->paddr = -1; } sym->ordinal = entry_off; r_list_append (symbols, sym); first = false; } RListIter *it; RBinAddr *en; int i = 1; r_list_foreach (entries, it, en) { if (!r_list_find (symbols, &en->paddr, __find_symbol_by_paddr)) { sym = R_NEW0 (RBinSymbol); if (!sym) { break; } sym->name = r_str_newf ("entry%d", i - 1); sym->paddr = en->paddr; sym->bind = R_BIN_BIND_GLOBAL_STR; sym->ordinal = i; r_list_append (symbols, sym); } i++; } bin->symbols = symbols; return symbols; }
{'added': [(118, '\twhile (entries) {'), (355, '\tif (!bin->entry_table) {'), (356, '\t\treturn NULL;'), (357, '\t}')], 'deleted': [(118, '\twhile (true) {')]}
4
1
623
3,774
72
399
12
https://github.com/radareorg/radare2
CVE-2022-1283
CWE-476
186
jpegc.cxx
C++
ReadJPEG
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 . */ #include <sal/config.h> #include <stdio.h> #include <stdlib.h> #include <setjmp.h> #include <jpeglib.h> #include <jerror.h> #include <com/sun/star/task/XStatusIndicator.hpp> #include <osl/diagnose.h> extern "C" { #include "transupp.h" } #include "jpeg.h" #include <JpegReader.hxx> #include <JpegWriter.hxx> #include <memory> #include <vcl/bitmapaccess.hxx> #ifdef _MSC_VER #pragma warning(push, 1) /* disable to __declspec(align()) aligned warning */ #pragma warning (disable: 4324) #endif struct ErrorManagerStruct { jpeg_error_mgr pub; jmp_buf setjmp_buffer; }; #ifdef _MSC_VER #pragma warning(pop) #endif extern "C" void errorExit (j_common_ptr cinfo) { ErrorManagerStruct * error = reinterpret_cast<ErrorManagerStruct *>(cinfo->err); (*cinfo->err->output_message) (cinfo); longjmp(error->setjmp_buffer, 1); } extern "C" void outputMessage (j_common_ptr cinfo) { char buffer[JMSG_LENGTH_MAX]; (*cinfo->err->format_message) (cinfo, buffer); SAL_WARN("vcl.filter", "failure reading JPEG: " << buffer); } void ReadJPEG( JPEGReader* pJPEGReader, void* pInputStream, long* pLines, Size const & previewSize ) { jpeg_decompress_struct cinfo; ErrorManagerStruct jerr; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_decompress( &cinfo ); return; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_decompress( &cinfo ); jpeg_svstream_src( &cinfo, pInputStream ); SourceManagerStruct *source = reinterpret_cast<SourceManagerStruct*>(cinfo.src); jpeg_read_header( &cinfo, TRUE ); cinfo.scale_num = 1; cinfo.scale_denom = 1; cinfo.output_gamma = 1.0; cinfo.raw_data_out = FALSE; cinfo.quantize_colors = FALSE; /* change scale for preview import */ long nPreviewWidth = previewSize.Width(); long nPreviewHeight = previewSize.Height(); if( nPreviewWidth || nPreviewHeight ) { if( nPreviewWidth == 0 ) { nPreviewWidth = ( cinfo.image_width * nPreviewHeight ) / cinfo.image_height; if( nPreviewWidth <= 0 ) { nPreviewWidth = 1; } } else if( nPreviewHeight == 0 ) { nPreviewHeight = ( cinfo.image_height * nPreviewWidth ) / cinfo.image_width; if( nPreviewHeight <= 0 ) { nPreviewHeight = 1; } } for( cinfo.scale_denom = 1; cinfo.scale_denom < 8; cinfo.scale_denom *= 2 ) { if( cinfo.image_width < nPreviewWidth * cinfo.scale_denom ) break; if( cinfo.image_height < nPreviewHeight * cinfo.scale_denom ) break; } if( cinfo.scale_denom > 1 ) { cinfo.dct_method = JDCT_FASTEST; cinfo.do_fancy_upsampling = FALSE; cinfo.do_block_smoothing = FALSE; } } jpeg_start_decompress( &cinfo ); long nWidth = cinfo.output_width; long nHeight = cinfo.output_height; bool bGray = (cinfo.output_components == 1); JPEGCreateBitmapParam aCreateBitmapParam; aCreateBitmapParam.nWidth = nWidth; aCreateBitmapParam.nHeight = nHeight; aCreateBitmapParam.density_unit = cinfo.density_unit; aCreateBitmapParam.X_density = cinfo.X_density; aCreateBitmapParam.Y_density = cinfo.Y_density; aCreateBitmapParam.bGray = bGray; bool bBitmapCreated = pJPEGReader->CreateBitmap(aCreateBitmapParam); if (bBitmapCreated) { Bitmap::ScopedWriteAccess pAccess(pJPEGReader->GetBitmap()); if (pAccess) { int nPixelSize = 3; J_COLOR_SPACE best_out_color_space = JCS_RGB; ScanlineFormat eScanlineFormat = ScanlineFormat::N24BitTcRgb; ScanlineFormat eFinalFormat = pAccess->GetScanlineFormat(); if (eFinalFormat == ScanlineFormat::N32BitTcBgra) { best_out_color_space = JCS_EXT_BGRA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcRgba) { best_out_color_space = JCS_EXT_RGBA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcArgb) { best_out_color_space = JCS_EXT_ARGB; eScanlineFormat = eFinalFormat; nPixelSize = 4; } if ( cinfo.jpeg_color_space == JCS_YCbCr ) cinfo.out_color_space = best_out_color_space; else if ( cinfo.jpeg_color_space == JCS_YCCK ) cinfo.out_color_space = JCS_CMYK; if (cinfo.out_color_space != JCS_CMYK && cinfo.out_color_space != JCS_GRAYSCALE && cinfo.out_color_space != best_out_color_space) { SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space); cinfo.out_color_space = best_out_color_space; } JSAMPLE* aRangeLimit = cinfo.sample_range_limit; std::vector<sal_uInt8> pScanLineBuffer(nWidth * (bGray ? 1 : nPixelSize)); std::vector<sal_uInt8> pCYMKBuffer; if (cinfo.out_color_space == JCS_CMYK) { pCYMKBuffer.resize(nWidth * 4); } std::unique_ptr<BitmapColor[]> pCols; if (bGray) { pCols.reset(new BitmapColor[256]); for (sal_uInt16 n = 0; n < 256; n++) { const sal_uInt8 cGray = n; pCols[n] = pAccess->GetBestMatchingColor(BitmapColor(cGray, cGray, cGray)); } } for (*pLines = 0; *pLines < nHeight && !source->no_data_available; (*pLines)++) { size_t yIndex = *pLines; sal_uInt8* p = (cinfo.out_color_space == JCS_CMYK) ? pCYMKBuffer.data() : pScanLineBuffer.data(); jpeg_read_scanlines(&cinfo, reinterpret_cast<JSAMPARRAY>(&p), 1); if (bGray) { for (long x = 0; x < nWidth; ++x) { sal_uInt8 nColorGray = pScanLineBuffer[x]; pAccess->SetPixel(yIndex, x, pCols[nColorGray]); } } else if (cinfo.out_color_space == JCS_CMYK) { // convert CMYK to RGB for (long cmyk = 0, x = 0; cmyk < nWidth * 4; cmyk += 4, ++x) { int color_C = 255 - pCYMKBuffer[cmyk + 0]; int color_M = 255 - pCYMKBuffer[cmyk + 1]; int color_Y = 255 - pCYMKBuffer[cmyk + 2]; int color_K = 255 - pCYMKBuffer[cmyk + 3]; sal_uInt8 cRed = aRangeLimit[255L - (color_C + color_K)]; sal_uInt8 cGreen = aRangeLimit[255L - (color_M + color_K)]; sal_uInt8 cBlue = aRangeLimit[255L - (color_Y + color_K)]; pAccess->SetPixel(yIndex, x, BitmapColor(cRed, cGreen, cBlue)); } } else { pAccess->CopyScanline(yIndex, pScanLineBuffer.data(), eScanlineFormat, pScanLineBuffer.size()); } /* PENDING ??? */ if (cinfo.err->msg_code == 113) break; } } } if (bBitmapCreated) { jpeg_finish_decompress( &cinfo ); } else { jpeg_abort_decompress( &cinfo ); } jpeg_destroy_decompress( &cinfo ); } bool WriteJPEG( JPEGWriter* pJPEGWriter, void* pOutputStream, long nWidth, long nHeight, basegfx::B2DSize const & aPPI, bool bGreys, long nQualityPercent, long aChromaSubsampling, css::uno::Reference<css::task::XStatusIndicator> const & status ) { jpeg_compress_struct cinfo; ErrorManagerStruct jerr; void* pScanline; long nY; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_compress( &cinfo ); return false; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_compress( &cinfo ); jpeg_svstream_dest( &cinfo, pOutputStream ); cinfo.image_width = (JDIMENSION) nWidth; cinfo.image_height = (JDIMENSION) nHeight; if ( bGreys ) { cinfo.input_components = 1; cinfo.in_color_space = JCS_GRAYSCALE; } else { cinfo.input_components = 3; cinfo.in_color_space = JCS_RGB; } jpeg_set_defaults( &cinfo ); jpeg_set_quality( &cinfo, (int) nQualityPercent, FALSE ); cinfo.density_unit = 1; cinfo.X_density = aPPI.getX(); cinfo.Y_density = aPPI.getY(); if ( ( nWidth > 128 ) || ( nHeight > 128 ) ) jpeg_simple_progression( &cinfo ); if (aChromaSubsampling == 1) // YUV 4:4:4 { cinfo.comp_info[0].h_samp_factor = 1; cinfo.comp_info[0].v_samp_factor = 1; } else if (aChromaSubsampling == 2) // YUV 4:2:2 { cinfo.comp_info[0].h_samp_factor = 2; cinfo.comp_info[0].v_samp_factor = 1; } else if (aChromaSubsampling == 3) // YUV 4:2:0 { cinfo.comp_info[0].h_samp_factor = 2; cinfo.comp_info[0].v_samp_factor = 2; } jpeg_start_compress( &cinfo, TRUE ); for( nY = 0; nY < nHeight; nY++ ) { pScanline = pJPEGWriter->GetScanline( nY ); if( pScanline ) { jpeg_write_scanlines( &cinfo, reinterpret_cast<JSAMPARRAY>(&pScanline), 1 ); } if( status.is() ) { status->setValue( nY * 100L / nHeight ); } } jpeg_finish_compress(&cinfo); jpeg_destroy_compress( &cinfo ); return true; } long Transform(void* pInputStream, void* pOutputStream, long nAngle) { jpeg_transform_info aTransformOption; JCOPY_OPTION aCopyOption = JCOPYOPT_ALL; jpeg_decompress_struct aSourceInfo; jpeg_compress_struct aDestinationInfo; ErrorManagerStruct aSourceError; ErrorManagerStruct aDestinationError; jvirt_barray_ptr* aSourceCoefArrays = nullptr; jvirt_barray_ptr* aDestinationCoefArrays = nullptr; aTransformOption.force_grayscale = FALSE; aTransformOption.trim = FALSE; aTransformOption.perfect = FALSE; aTransformOption.crop = FALSE; // Angle to transform option // 90 Clockwise = 270 Counterclockwise switch (nAngle) { case 2700: aTransformOption.transform = JXFORM_ROT_90; break; case 1800: aTransformOption.transform = JXFORM_ROT_180; break; case 900: aTransformOption.transform = JXFORM_ROT_270; break; default: aTransformOption.transform = JXFORM_NONE; } // Decompression aSourceInfo.err = jpeg_std_error(&aSourceError.pub); aSourceInfo.err->error_exit = errorExit; aSourceInfo.err->output_message = outputMessage; // Compression aDestinationInfo.err = jpeg_std_error(&aDestinationError.pub); aDestinationInfo.err->error_exit = errorExit; aDestinationInfo.err->output_message = outputMessage; aDestinationInfo.optimize_coding = TRUE; if (setjmp(aSourceError.setjmp_buffer) || setjmp(aDestinationError.setjmp_buffer)) { jpeg_destroy_decompress(&aSourceInfo); jpeg_destroy_compress(&aDestinationInfo); return 0; } jpeg_create_decompress(&aSourceInfo); jpeg_create_compress(&aDestinationInfo); jpeg_svstream_src (&aSourceInfo, pInputStream); jcopy_markers_setup(&aSourceInfo, aCopyOption); jpeg_read_header(&aSourceInfo, TRUE); jtransform_request_workspace(&aSourceInfo, &aTransformOption); aSourceCoefArrays = jpeg_read_coefficients(&aSourceInfo); jpeg_copy_critical_parameters(&aSourceInfo, &aDestinationInfo); aDestinationCoefArrays = jtransform_adjust_parameters(&aSourceInfo, &aDestinationInfo, aSourceCoefArrays, &aTransformOption); jpeg_svstream_dest (&aDestinationInfo, pOutputStream); // Compute optimal Huffman coding tables instead of precomuted tables aDestinationInfo.optimize_coding = TRUE; jpeg_write_coefficients(&aDestinationInfo, aDestinationCoefArrays); jcopy_markers_execute(&aSourceInfo, &aDestinationInfo, aCopyOption); jtransform_execute_transformation(&aSourceInfo, &aDestinationInfo, aSourceCoefArrays, &aTransformOption); jpeg_finish_compress(&aDestinationInfo); jpeg_destroy_compress(&aDestinationInfo); jpeg_finish_decompress(&aSourceInfo); jpeg_destroy_decompress(&aSourceInfo); return 1; } /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ /* * This file is part of the LibreOffice project. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * This file incorporates work covered by the following license notice: * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed * with this work for additional information regarding copyright * ownership. The ASF licenses this file to you under the Apache * License, Version 2.0 (the "License"); you may not use this file * except in compliance with the License. You may obtain a copy of * the License at http://www.apache.org/licenses/LICENSE-2.0 . */ #include <sal/config.h> #include <stdio.h> #include <stdlib.h> #include <setjmp.h> #include <jpeglib.h> #include <jerror.h> #include <com/sun/star/task/XStatusIndicator.hpp> #include <osl/diagnose.h> extern "C" { #include "transupp.h" } #include "jpeg.h" #include <JpegReader.hxx> #include <JpegWriter.hxx> #include <memory> #include <vcl/bitmapaccess.hxx> #ifdef _MSC_VER #pragma warning(push, 1) /* disable to __declspec(align()) aligned warning */ #pragma warning (disable: 4324) #endif struct ErrorManagerStruct { jpeg_error_mgr pub; jmp_buf setjmp_buffer; }; #ifdef _MSC_VER #pragma warning(pop) #endif extern "C" void errorExit (j_common_ptr cinfo) { ErrorManagerStruct * error = reinterpret_cast<ErrorManagerStruct *>(cinfo->err); (*cinfo->err->output_message) (cinfo); longjmp(error->setjmp_buffer, 1); } extern "C" void outputMessage (j_common_ptr cinfo) { char buffer[JMSG_LENGTH_MAX]; (*cinfo->err->format_message) (cinfo, buffer); SAL_WARN("vcl.filter", "failure reading JPEG: " << buffer); } void ReadJPEG( JPEGReader* pJPEGReader, void* pInputStream, long* pLines, Size const & previewSize ) { jpeg_decompress_struct cinfo; ErrorManagerStruct jerr; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_decompress( &cinfo ); return; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_decompress( &cinfo ); jpeg_svstream_src( &cinfo, pInputStream ); SourceManagerStruct *source = reinterpret_cast<SourceManagerStruct*>(cinfo.src); jpeg_read_header( &cinfo, TRUE ); cinfo.scale_num = 1; cinfo.scale_denom = 1; cinfo.output_gamma = 1.0; cinfo.raw_data_out = FALSE; cinfo.quantize_colors = FALSE; /* change scale for preview import */ long nPreviewWidth = previewSize.Width(); long nPreviewHeight = previewSize.Height(); if( nPreviewWidth || nPreviewHeight ) { if( nPreviewWidth == 0 ) { nPreviewWidth = ( cinfo.image_width * nPreviewHeight ) / cinfo.image_height; if( nPreviewWidth <= 0 ) { nPreviewWidth = 1; } } else if( nPreviewHeight == 0 ) { nPreviewHeight = ( cinfo.image_height * nPreviewWidth ) / cinfo.image_width; if( nPreviewHeight <= 0 ) { nPreviewHeight = 1; } } for( cinfo.scale_denom = 1; cinfo.scale_denom < 8; cinfo.scale_denom *= 2 ) { if( cinfo.image_width < nPreviewWidth * cinfo.scale_denom ) break; if( cinfo.image_height < nPreviewHeight * cinfo.scale_denom ) break; } if( cinfo.scale_denom > 1 ) { cinfo.dct_method = JDCT_FASTEST; cinfo.do_fancy_upsampling = FALSE; cinfo.do_block_smoothing = FALSE; } } jpeg_calc_output_dimensions(&cinfo); long nWidth = cinfo.output_width; long nHeight = cinfo.output_height; bool bGray = (cinfo.output_components == 1); JPEGCreateBitmapParam aCreateBitmapParam; aCreateBitmapParam.nWidth = nWidth; aCreateBitmapParam.nHeight = nHeight; aCreateBitmapParam.density_unit = cinfo.density_unit; aCreateBitmapParam.X_density = cinfo.X_density; aCreateBitmapParam.Y_density = cinfo.Y_density; aCreateBitmapParam.bGray = bGray; bool bBitmapCreated = pJPEGReader->CreateBitmap(aCreateBitmapParam); if (bBitmapCreated) { Bitmap::ScopedWriteAccess pAccess(pJPEGReader->GetBitmap()); if (pAccess) { int nPixelSize = 3; J_COLOR_SPACE best_out_color_space = JCS_RGB; ScanlineFormat eScanlineFormat = ScanlineFormat::N24BitTcRgb; ScanlineFormat eFinalFormat = pAccess->GetScanlineFormat(); if (eFinalFormat == ScanlineFormat::N32BitTcBgra) { best_out_color_space = JCS_EXT_BGRA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcRgba) { best_out_color_space = JCS_EXT_RGBA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcArgb) { best_out_color_space = JCS_EXT_ARGB; eScanlineFormat = eFinalFormat; nPixelSize = 4; } if ( cinfo.jpeg_color_space == JCS_YCbCr ) cinfo.out_color_space = best_out_color_space; else if ( cinfo.jpeg_color_space == JCS_YCCK ) cinfo.out_color_space = JCS_CMYK; if (cinfo.out_color_space != JCS_CMYK && cinfo.out_color_space != JCS_GRAYSCALE && cinfo.out_color_space != best_out_color_space) { SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space << " gray "); cinfo.out_color_space = best_out_color_space; } jpeg_start_decompress(&cinfo); JSAMPLE* aRangeLimit = cinfo.sample_range_limit; std::vector<sal_uInt8> pScanLineBuffer(nWidth * (bGray ? 1 : nPixelSize)); std::vector<sal_uInt8> pCYMKBuffer; if (cinfo.out_color_space == JCS_CMYK) { pCYMKBuffer.resize(nWidth * 4); } std::unique_ptr<BitmapColor[]> pCols; if (bGray) { pCols.reset(new BitmapColor[256]); for (sal_uInt16 n = 0; n < 256; n++) { const sal_uInt8 cGray = n; pCols[n] = pAccess->GetBestMatchingColor(BitmapColor(cGray, cGray, cGray)); } } for (*pLines = 0; *pLines < nHeight && !source->no_data_available; (*pLines)++) { size_t yIndex = *pLines; sal_uInt8* p = (cinfo.out_color_space == JCS_CMYK) ? pCYMKBuffer.data() : pScanLineBuffer.data(); jpeg_read_scanlines(&cinfo, reinterpret_cast<JSAMPARRAY>(&p), 1); if (bGray) { for (long x = 0; x < nWidth; ++x) { sal_uInt8 nColorGray = pScanLineBuffer[x]; pAccess->SetPixel(yIndex, x, pCols[nColorGray]); } } else if (cinfo.out_color_space == JCS_CMYK) { // convert CMYK to RGB for (long cmyk = 0, x = 0; cmyk < nWidth * 4; cmyk += 4, ++x) { int color_C = 255 - pCYMKBuffer[cmyk + 0]; int color_M = 255 - pCYMKBuffer[cmyk + 1]; int color_Y = 255 - pCYMKBuffer[cmyk + 2]; int color_K = 255 - pCYMKBuffer[cmyk + 3]; sal_uInt8 cRed = aRangeLimit[255L - (color_C + color_K)]; sal_uInt8 cGreen = aRangeLimit[255L - (color_M + color_K)]; sal_uInt8 cBlue = aRangeLimit[255L - (color_Y + color_K)]; pAccess->SetPixel(yIndex, x, BitmapColor(cRed, cGreen, cBlue)); } } else { pAccess->CopyScanline(yIndex, pScanLineBuffer.data(), eScanlineFormat, pScanLineBuffer.size()); } /* PENDING ??? */ if (cinfo.err->msg_code == 113) break; } } } if (bBitmapCreated) { jpeg_finish_decompress( &cinfo ); } else { jpeg_abort_decompress( &cinfo ); } jpeg_destroy_decompress( &cinfo ); } bool WriteJPEG( JPEGWriter* pJPEGWriter, void* pOutputStream, long nWidth, long nHeight, basegfx::B2DSize const & aPPI, bool bGreys, long nQualityPercent, long aChromaSubsampling, css::uno::Reference<css::task::XStatusIndicator> const & status ) { jpeg_compress_struct cinfo; ErrorManagerStruct jerr; void* pScanline; long nY; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_compress( &cinfo ); return false; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_compress( &cinfo ); jpeg_svstream_dest( &cinfo, pOutputStream ); cinfo.image_width = (JDIMENSION) nWidth; cinfo.image_height = (JDIMENSION) nHeight; if ( bGreys ) { cinfo.input_components = 1; cinfo.in_color_space = JCS_GRAYSCALE; } else { cinfo.input_components = 3; cinfo.in_color_space = JCS_RGB; } jpeg_set_defaults( &cinfo ); jpeg_set_quality( &cinfo, (int) nQualityPercent, FALSE ); cinfo.density_unit = 1; cinfo.X_density = aPPI.getX(); cinfo.Y_density = aPPI.getY(); if ( ( nWidth > 128 ) || ( nHeight > 128 ) ) jpeg_simple_progression( &cinfo ); if (aChromaSubsampling == 1) // YUV 4:4:4 { cinfo.comp_info[0].h_samp_factor = 1; cinfo.comp_info[0].v_samp_factor = 1; } else if (aChromaSubsampling == 2) // YUV 4:2:2 { cinfo.comp_info[0].h_samp_factor = 2; cinfo.comp_info[0].v_samp_factor = 1; } else if (aChromaSubsampling == 3) // YUV 4:2:0 { cinfo.comp_info[0].h_samp_factor = 2; cinfo.comp_info[0].v_samp_factor = 2; } jpeg_start_compress( &cinfo, TRUE ); for( nY = 0; nY < nHeight; nY++ ) { pScanline = pJPEGWriter->GetScanline( nY ); if( pScanline ) { jpeg_write_scanlines( &cinfo, reinterpret_cast<JSAMPARRAY>(&pScanline), 1 ); } if( status.is() ) { status->setValue( nY * 100L / nHeight ); } } jpeg_finish_compress(&cinfo); jpeg_destroy_compress( &cinfo ); return true; } long Transform(void* pInputStream, void* pOutputStream, long nAngle) { jpeg_transform_info aTransformOption; JCOPY_OPTION aCopyOption = JCOPYOPT_ALL; jpeg_decompress_struct aSourceInfo; jpeg_compress_struct aDestinationInfo; ErrorManagerStruct aSourceError; ErrorManagerStruct aDestinationError; jvirt_barray_ptr* aSourceCoefArrays = nullptr; jvirt_barray_ptr* aDestinationCoefArrays = nullptr; aTransformOption.force_grayscale = FALSE; aTransformOption.trim = FALSE; aTransformOption.perfect = FALSE; aTransformOption.crop = FALSE; // Angle to transform option // 90 Clockwise = 270 Counterclockwise switch (nAngle) { case 2700: aTransformOption.transform = JXFORM_ROT_90; break; case 1800: aTransformOption.transform = JXFORM_ROT_180; break; case 900: aTransformOption.transform = JXFORM_ROT_270; break; default: aTransformOption.transform = JXFORM_NONE; } // Decompression aSourceInfo.err = jpeg_std_error(&aSourceError.pub); aSourceInfo.err->error_exit = errorExit; aSourceInfo.err->output_message = outputMessage; // Compression aDestinationInfo.err = jpeg_std_error(&aDestinationError.pub); aDestinationInfo.err->error_exit = errorExit; aDestinationInfo.err->output_message = outputMessage; aDestinationInfo.optimize_coding = TRUE; if (setjmp(aSourceError.setjmp_buffer) || setjmp(aDestinationError.setjmp_buffer)) { jpeg_destroy_decompress(&aSourceInfo); jpeg_destroy_compress(&aDestinationInfo); return 0; } jpeg_create_decompress(&aSourceInfo); jpeg_create_compress(&aDestinationInfo); jpeg_svstream_src (&aSourceInfo, pInputStream); jcopy_markers_setup(&aSourceInfo, aCopyOption); jpeg_read_header(&aSourceInfo, TRUE); jtransform_request_workspace(&aSourceInfo, &aTransformOption); aSourceCoefArrays = jpeg_read_coefficients(&aSourceInfo); jpeg_copy_critical_parameters(&aSourceInfo, &aDestinationInfo); aDestinationCoefArrays = jtransform_adjust_parameters(&aSourceInfo, &aDestinationInfo, aSourceCoefArrays, &aTransformOption); jpeg_svstream_dest (&aDestinationInfo, pOutputStream); // Compute optimal Huffman coding tables instead of precomuted tables aDestinationInfo.optimize_coding = TRUE; jpeg_write_coefficients(&aDestinationInfo, aDestinationCoefArrays); jcopy_markers_execute(&aSourceInfo, &aDestinationInfo, aCopyOption); jtransform_execute_transformation(&aSourceInfo, &aDestinationInfo, aSourceCoefArrays, &aTransformOption); jpeg_finish_compress(&aDestinationInfo); jpeg_destroy_compress(&aDestinationInfo); jpeg_finish_decompress(&aSourceInfo); jpeg_destroy_decompress(&aSourceInfo); return 1; } /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
void ReadJPEG( JPEGReader* pJPEGReader, void* pInputStream, long* pLines, Size const & previewSize ) { jpeg_decompress_struct cinfo; ErrorManagerStruct jerr; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_decompress( &cinfo ); return; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_decompress( &cinfo ); jpeg_svstream_src( &cinfo, pInputStream ); SourceManagerStruct *source = reinterpret_cast<SourceManagerStruct*>(cinfo.src); jpeg_read_header( &cinfo, TRUE ); cinfo.scale_num = 1; cinfo.scale_denom = 1; cinfo.output_gamma = 1.0; cinfo.raw_data_out = FALSE; cinfo.quantize_colors = FALSE; /* change scale for preview import */ long nPreviewWidth = previewSize.Width(); long nPreviewHeight = previewSize.Height(); if( nPreviewWidth || nPreviewHeight ) { if( nPreviewWidth == 0 ) { nPreviewWidth = ( cinfo.image_width * nPreviewHeight ) / cinfo.image_height; if( nPreviewWidth <= 0 ) { nPreviewWidth = 1; } } else if( nPreviewHeight == 0 ) { nPreviewHeight = ( cinfo.image_height * nPreviewWidth ) / cinfo.image_width; if( nPreviewHeight <= 0 ) { nPreviewHeight = 1; } } for( cinfo.scale_denom = 1; cinfo.scale_denom < 8; cinfo.scale_denom *= 2 ) { if( cinfo.image_width < nPreviewWidth * cinfo.scale_denom ) break; if( cinfo.image_height < nPreviewHeight * cinfo.scale_denom ) break; } if( cinfo.scale_denom > 1 ) { cinfo.dct_method = JDCT_FASTEST; cinfo.do_fancy_upsampling = FALSE; cinfo.do_block_smoothing = FALSE; } } jpeg_start_decompress( &cinfo ); long nWidth = cinfo.output_width; long nHeight = cinfo.output_height; bool bGray = (cinfo.output_components == 1); JPEGCreateBitmapParam aCreateBitmapParam; aCreateBitmapParam.nWidth = nWidth; aCreateBitmapParam.nHeight = nHeight; aCreateBitmapParam.density_unit = cinfo.density_unit; aCreateBitmapParam.X_density = cinfo.X_density; aCreateBitmapParam.Y_density = cinfo.Y_density; aCreateBitmapParam.bGray = bGray; bool bBitmapCreated = pJPEGReader->CreateBitmap(aCreateBitmapParam); if (bBitmapCreated) { Bitmap::ScopedWriteAccess pAccess(pJPEGReader->GetBitmap()); if (pAccess) { int nPixelSize = 3; J_COLOR_SPACE best_out_color_space = JCS_RGB; ScanlineFormat eScanlineFormat = ScanlineFormat::N24BitTcRgb; ScanlineFormat eFinalFormat = pAccess->GetScanlineFormat(); if (eFinalFormat == ScanlineFormat::N32BitTcBgra) { best_out_color_space = JCS_EXT_BGRA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcRgba) { best_out_color_space = JCS_EXT_RGBA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcArgb) { best_out_color_space = JCS_EXT_ARGB; eScanlineFormat = eFinalFormat; nPixelSize = 4; } if ( cinfo.jpeg_color_space == JCS_YCbCr ) cinfo.out_color_space = best_out_color_space; else if ( cinfo.jpeg_color_space == JCS_YCCK ) cinfo.out_color_space = JCS_CMYK; if (cinfo.out_color_space != JCS_CMYK && cinfo.out_color_space != JCS_GRAYSCALE && cinfo.out_color_space != best_out_color_space) { SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space); cinfo.out_color_space = best_out_color_space; } JSAMPLE* aRangeLimit = cinfo.sample_range_limit; std::vector<sal_uInt8> pScanLineBuffer(nWidth * (bGray ? 1 : nPixelSize)); std::vector<sal_uInt8> pCYMKBuffer; if (cinfo.out_color_space == JCS_CMYK) { pCYMKBuffer.resize(nWidth * 4); } std::unique_ptr<BitmapColor[]> pCols; if (bGray) { pCols.reset(new BitmapColor[256]); for (sal_uInt16 n = 0; n < 256; n++) { const sal_uInt8 cGray = n; pCols[n] = pAccess->GetBestMatchingColor(BitmapColor(cGray, cGray, cGray)); } } for (*pLines = 0; *pLines < nHeight && !source->no_data_available; (*pLines)++) { size_t yIndex = *pLines; sal_uInt8* p = (cinfo.out_color_space == JCS_CMYK) ? pCYMKBuffer.data() : pScanLineBuffer.data(); jpeg_read_scanlines(&cinfo, reinterpret_cast<JSAMPARRAY>(&p), 1); if (bGray) { for (long x = 0; x < nWidth; ++x) { sal_uInt8 nColorGray = pScanLineBuffer[x]; pAccess->SetPixel(yIndex, x, pCols[nColorGray]); } } else if (cinfo.out_color_space == JCS_CMYK) { // convert CMYK to RGB for (long cmyk = 0, x = 0; cmyk < nWidth * 4; cmyk += 4, ++x) { int color_C = 255 - pCYMKBuffer[cmyk + 0]; int color_M = 255 - pCYMKBuffer[cmyk + 1]; int color_Y = 255 - pCYMKBuffer[cmyk + 2]; int color_K = 255 - pCYMKBuffer[cmyk + 3]; sal_uInt8 cRed = aRangeLimit[255L - (color_C + color_K)]; sal_uInt8 cGreen = aRangeLimit[255L - (color_M + color_K)]; sal_uInt8 cBlue = aRangeLimit[255L - (color_Y + color_K)]; pAccess->SetPixel(yIndex, x, BitmapColor(cRed, cGreen, cBlue)); } } else { pAccess->CopyScanline(yIndex, pScanLineBuffer.data(), eScanlineFormat, pScanLineBuffer.size()); } /* PENDING ??? */ if (cinfo.err->msg_code == 113) break; } } } if (bBitmapCreated) { jpeg_finish_decompress( &cinfo ); } else { jpeg_abort_decompress( &cinfo ); } jpeg_destroy_decompress( &cinfo ); }
void ReadJPEG( JPEGReader* pJPEGReader, void* pInputStream, long* pLines, Size const & previewSize ) { jpeg_decompress_struct cinfo; ErrorManagerStruct jerr; if ( setjmp( jerr.setjmp_buffer ) ) { jpeg_destroy_decompress( &cinfo ); return; } cinfo.err = jpeg_std_error( &jerr.pub ); jerr.pub.error_exit = errorExit; jerr.pub.output_message = outputMessage; jpeg_create_decompress( &cinfo ); jpeg_svstream_src( &cinfo, pInputStream ); SourceManagerStruct *source = reinterpret_cast<SourceManagerStruct*>(cinfo.src); jpeg_read_header( &cinfo, TRUE ); cinfo.scale_num = 1; cinfo.scale_denom = 1; cinfo.output_gamma = 1.0; cinfo.raw_data_out = FALSE; cinfo.quantize_colors = FALSE; /* change scale for preview import */ long nPreviewWidth = previewSize.Width(); long nPreviewHeight = previewSize.Height(); if( nPreviewWidth || nPreviewHeight ) { if( nPreviewWidth == 0 ) { nPreviewWidth = ( cinfo.image_width * nPreviewHeight ) / cinfo.image_height; if( nPreviewWidth <= 0 ) { nPreviewWidth = 1; } } else if( nPreviewHeight == 0 ) { nPreviewHeight = ( cinfo.image_height * nPreviewWidth ) / cinfo.image_width; if( nPreviewHeight <= 0 ) { nPreviewHeight = 1; } } for( cinfo.scale_denom = 1; cinfo.scale_denom < 8; cinfo.scale_denom *= 2 ) { if( cinfo.image_width < nPreviewWidth * cinfo.scale_denom ) break; if( cinfo.image_height < nPreviewHeight * cinfo.scale_denom ) break; } if( cinfo.scale_denom > 1 ) { cinfo.dct_method = JDCT_FASTEST; cinfo.do_fancy_upsampling = FALSE; cinfo.do_block_smoothing = FALSE; } } jpeg_calc_output_dimensions(&cinfo); long nWidth = cinfo.output_width; long nHeight = cinfo.output_height; bool bGray = (cinfo.output_components == 1); JPEGCreateBitmapParam aCreateBitmapParam; aCreateBitmapParam.nWidth = nWidth; aCreateBitmapParam.nHeight = nHeight; aCreateBitmapParam.density_unit = cinfo.density_unit; aCreateBitmapParam.X_density = cinfo.X_density; aCreateBitmapParam.Y_density = cinfo.Y_density; aCreateBitmapParam.bGray = bGray; bool bBitmapCreated = pJPEGReader->CreateBitmap(aCreateBitmapParam); if (bBitmapCreated) { Bitmap::ScopedWriteAccess pAccess(pJPEGReader->GetBitmap()); if (pAccess) { int nPixelSize = 3; J_COLOR_SPACE best_out_color_space = JCS_RGB; ScanlineFormat eScanlineFormat = ScanlineFormat::N24BitTcRgb; ScanlineFormat eFinalFormat = pAccess->GetScanlineFormat(); if (eFinalFormat == ScanlineFormat::N32BitTcBgra) { best_out_color_space = JCS_EXT_BGRA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcRgba) { best_out_color_space = JCS_EXT_RGBA; eScanlineFormat = eFinalFormat; nPixelSize = 4; } else if (eFinalFormat == ScanlineFormat::N32BitTcArgb) { best_out_color_space = JCS_EXT_ARGB; eScanlineFormat = eFinalFormat; nPixelSize = 4; } if ( cinfo.jpeg_color_space == JCS_YCbCr ) cinfo.out_color_space = best_out_color_space; else if ( cinfo.jpeg_color_space == JCS_YCCK ) cinfo.out_color_space = JCS_CMYK; if (cinfo.out_color_space != JCS_CMYK && cinfo.out_color_space != JCS_GRAYSCALE && cinfo.out_color_space != best_out_color_space) { SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space << " gray "); cinfo.out_color_space = best_out_color_space; } jpeg_start_decompress(&cinfo); JSAMPLE* aRangeLimit = cinfo.sample_range_limit; std::vector<sal_uInt8> pScanLineBuffer(nWidth * (bGray ? 1 : nPixelSize)); std::vector<sal_uInt8> pCYMKBuffer; if (cinfo.out_color_space == JCS_CMYK) { pCYMKBuffer.resize(nWidth * 4); } std::unique_ptr<BitmapColor[]> pCols; if (bGray) { pCols.reset(new BitmapColor[256]); for (sal_uInt16 n = 0; n < 256; n++) { const sal_uInt8 cGray = n; pCols[n] = pAccess->GetBestMatchingColor(BitmapColor(cGray, cGray, cGray)); } } for (*pLines = 0; *pLines < nHeight && !source->no_data_available; (*pLines)++) { size_t yIndex = *pLines; sal_uInt8* p = (cinfo.out_color_space == JCS_CMYK) ? pCYMKBuffer.data() : pScanLineBuffer.data(); jpeg_read_scanlines(&cinfo, reinterpret_cast<JSAMPARRAY>(&p), 1); if (bGray) { for (long x = 0; x < nWidth; ++x) { sal_uInt8 nColorGray = pScanLineBuffer[x]; pAccess->SetPixel(yIndex, x, pCols[nColorGray]); } } else if (cinfo.out_color_space == JCS_CMYK) { // convert CMYK to RGB for (long cmyk = 0, x = 0; cmyk < nWidth * 4; cmyk += 4, ++x) { int color_C = 255 - pCYMKBuffer[cmyk + 0]; int color_M = 255 - pCYMKBuffer[cmyk + 1]; int color_Y = 255 - pCYMKBuffer[cmyk + 2]; int color_K = 255 - pCYMKBuffer[cmyk + 3]; sal_uInt8 cRed = aRangeLimit[255L - (color_C + color_K)]; sal_uInt8 cGreen = aRangeLimit[255L - (color_M + color_K)]; sal_uInt8 cBlue = aRangeLimit[255L - (color_Y + color_K)]; pAccess->SetPixel(yIndex, x, BitmapColor(cRed, cGreen, cBlue)); } } else { pAccess->CopyScanline(yIndex, pScanLineBuffer.data(), eScanlineFormat, pScanLineBuffer.size()); } /* PENDING ??? */ if (cinfo.err->msg_code == 113) break; } } } if (bBitmapCreated) { jpeg_finish_decompress( &cinfo ); } else { jpeg_abort_decompress( &cinfo ); } jpeg_destroy_decompress( &cinfo ); }
{'added': [(135, ' jpeg_calc_output_dimensions(&cinfo);'), (192, ' SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space << " gray ");'), (196, ' jpeg_start_decompress(&cinfo);'), (197, '')], 'deleted': [(135, ' jpeg_start_decompress( &cinfo );'), (164, ''), (193, ' SAL_WARN("vcl.filter", "jpg with unknown out color space, forcing to :" << best_out_color_space);')]}
4
3
334
1,964
169
1,034
35
https://github.com/LibreOffice/core
CVE-2017-8358
CWE-119
1,298
spl_array.c
C
spl_array_has_dimension_ex
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/php_var.h" #include "ext/standard/php_smart_str.h" #include "zend_interfaces.h" #include "zend_exceptions.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_array.h" #include "spl_exceptions.h" zend_object_handlers spl_handler_ArrayObject; PHPAPI zend_class_entry *spl_ce_ArrayObject; zend_object_handlers spl_handler_ArrayIterator; PHPAPI zend_class_entry *spl_ce_ArrayIterator; PHPAPI zend_class_entry *spl_ce_RecursiveArrayIterator; #define SPL_ARRAY_STD_PROP_LIST 0x00000001 #define SPL_ARRAY_ARRAY_AS_PROPS 0x00000002 #define SPL_ARRAY_CHILD_ARRAYS_ONLY 0x00000004 #define SPL_ARRAY_OVERLOADED_REWIND 0x00010000 #define SPL_ARRAY_OVERLOADED_VALID 0x00020000 #define SPL_ARRAY_OVERLOADED_KEY 0x00040000 #define SPL_ARRAY_OVERLOADED_CURRENT 0x00080000 #define SPL_ARRAY_OVERLOADED_NEXT 0x00100000 #define SPL_ARRAY_IS_REF 0x01000000 #define SPL_ARRAY_IS_SELF 0x02000000 #define SPL_ARRAY_USE_OTHER 0x04000000 #define SPL_ARRAY_INT_MASK 0xFFFF0000 #define SPL_ARRAY_CLONE_MASK 0x0300FFFF #define SPL_ARRAY_METHOD_NO_ARG 0 #define SPL_ARRAY_METHOD_USE_ARG 1 #define SPL_ARRAY_METHOD_MAY_USER_ARG 2 typedef struct _spl_array_object { zend_object std; zval *array; zval *retval; HashPosition pos; ulong pos_h; int ar_flags; int is_self; zend_function *fptr_offset_get; zend_function *fptr_offset_set; zend_function *fptr_offset_has; zend_function *fptr_offset_del; zend_function *fptr_count; zend_class_entry* ce_get_iterator; HashTable *debug_info; unsigned char nApplyCount; } spl_array_object; static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */ if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC); return spl_array_get_hash_table(other, check_std_props TSRMLS_CC); } else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else { return HASH_OF(intern->array); } } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC); static void spl_array_update_pos(spl_array_object* intern) /* {{{ */ { Bucket *pos = intern->pos; if (pos != NULL) { intern->pos_h = pos->h; } } /* }}} */ static void spl_array_set_pos(spl_array_object* intern, HashPosition pos) /* {{{ */ { intern->pos = pos; spl_array_update_pos(intern); } /* }}} */ SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht TSRMLS_DC) /* {{{ */ { Bucket *p; /* IS_CONSISTENT(ht);*/ /* HASH_PROTECT_RECURSION(ht);*/ p = ht->arBuckets[intern->pos_h & ht->nTableMask]; while (p != NULL) { if (p == intern->pos) { return SUCCESS; } p = p->pNext; } /* HASH_UNPROTECT_RECURSION(ht); */ spl_array_rewind(intern TSRMLS_CC); return FAILURE; } /* }}} */ SPL_API int spl_hash_verify_pos(spl_array_object * intern TSRMLS_DC) /* {{{ */ { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_hash_verify_pos_ex(intern, ht TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_free_storage */ static void spl_array_object_free_storage(void *object TSRMLS_DC) { spl_array_object *intern = (spl_array_object *)object; zend_object_std_dtor(&intern->std TSRMLS_CC); zval_ptr_dtor(&intern->array); zval_ptr_dtor(&intern->retval); if (intern->debug_info != NULL) { zend_hash_destroy(intern->debug_info); efree(intern->debug_info); } efree(object); } /* }}} */ zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC); /* {{{ spl_array_object_new_ex */ static zend_object_value spl_array_object_new_ex(zend_class_entry *class_type, spl_array_object **obj, zval *orig, int clone_orig TSRMLS_DC) { zend_object_value retval = {0}; spl_array_object *intern; zval *tmp; zend_class_entry * parent = class_type; int inherited = 0; intern = emalloc(sizeof(spl_array_object)); memset(intern, 0, sizeof(spl_array_object)); *obj = intern; ALLOC_INIT_ZVAL(intern->retval); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); intern->ar_flags = 0; intern->debug_info = NULL; intern->ce_get_iterator = spl_ce_ArrayIterator; if (orig) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(orig TSRMLS_CC); intern->ar_flags &= ~ SPL_ARRAY_CLONE_MASK; intern->ar_flags |= (other->ar_flags & SPL_ARRAY_CLONE_MASK); intern->ce_get_iterator = other->ce_get_iterator; if (clone_orig) { intern->array = other->array; if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayObject) { MAKE_STD_ZVAL(intern->array); array_init(intern->array); zend_hash_copy(HASH_OF(intern->array), HASH_OF(other->array), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayIterator) { Z_ADDREF_P(other->array); } } else { intern->array = orig; Z_ADDREF_P(intern->array); intern->ar_flags |= SPL_ARRAY_IS_REF | SPL_ARRAY_USE_OTHER; } } else { MAKE_STD_ZVAL(intern->array); array_init(intern->array); intern->ar_flags &= ~SPL_ARRAY_IS_REF; } retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_array_object_free_storage, NULL TSRMLS_CC); while (parent) { if (parent == spl_ce_ArrayIterator || parent == spl_ce_RecursiveArrayIterator) { retval.handlers = &spl_handler_ArrayIterator; class_type->get_iterator = spl_array_get_iterator; break; } else if (parent == spl_ce_ArrayObject) { retval.handlers = &spl_handler_ArrayObject; break; } parent = parent->parent; inherited = 1; } if (!parent) { /* this must never happen */ php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of ArrayObject or ArrayIterator"); } if (inherited) { zend_hash_find(&class_type->function_table, "offsetget", sizeof("offsetget"), (void **) &intern->fptr_offset_get); if (intern->fptr_offset_get->common.scope == parent) { intern->fptr_offset_get = NULL; } zend_hash_find(&class_type->function_table, "offsetset", sizeof("offsetset"), (void **) &intern->fptr_offset_set); if (intern->fptr_offset_set->common.scope == parent) { intern->fptr_offset_set = NULL; } zend_hash_find(&class_type->function_table, "offsetexists", sizeof("offsetexists"), (void **) &intern->fptr_offset_has); if (intern->fptr_offset_has->common.scope == parent) { intern->fptr_offset_has = NULL; } zend_hash_find(&class_type->function_table, "offsetunset", sizeof("offsetunset"), (void **) &intern->fptr_offset_del); if (intern->fptr_offset_del->common.scope == parent) { intern->fptr_offset_del = NULL; } zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count); if (intern->fptr_count->common.scope == parent) { intern->fptr_count = NULL; } } /* Cache iterator functions if ArrayIterator or derived. Check current's */ /* cache since only current is always required */ if (retval.handlers == &spl_handler_ArrayIterator) { if (!class_type->iterator_funcs.zf_current) { zend_hash_find(&class_type->function_table, "rewind", sizeof("rewind"), (void **) &class_type->iterator_funcs.zf_rewind); zend_hash_find(&class_type->function_table, "valid", sizeof("valid"), (void **) &class_type->iterator_funcs.zf_valid); zend_hash_find(&class_type->function_table, "key", sizeof("key"), (void **) &class_type->iterator_funcs.zf_key); zend_hash_find(&class_type->function_table, "current", sizeof("current"), (void **) &class_type->iterator_funcs.zf_current); zend_hash_find(&class_type->function_table, "next", sizeof("next"), (void **) &class_type->iterator_funcs.zf_next); } if (inherited) { if (class_type->iterator_funcs.zf_rewind->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_REWIND; if (class_type->iterator_funcs.zf_valid->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_VALID; if (class_type->iterator_funcs.zf_key->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_KEY; if (class_type->iterator_funcs.zf_current->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_CURRENT; if (class_type->iterator_funcs.zf_next->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_NEXT; } } spl_array_rewind(intern TSRMLS_CC); return retval; } /* }}} */ /* {{{ spl_array_object_new */ static zend_object_value spl_array_object_new(zend_class_entry *class_type TSRMLS_DC) { spl_array_object *tmp; return spl_array_object_new_ex(class_type, &tmp, NULL, 0 TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_clone */ static zend_object_value spl_array_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_array_object *intern; old_object = zend_objects_get_address(zobject TSRMLS_CC); new_obj_val = spl_array_object_new_ex(old_object->ce, &intern, zobject, 1 TSRMLS_CC); new_object = &intern->std; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); return new_obj_val; } /* }}} */ static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; char *key; uint len; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch (Z_TYPE_P(offset)) { case IS_STRING: key = Z_STRVAL_P(offset); len = Z_STRLEN_P(offset) + 1; string_offest: if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", key); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", key); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_NULL: key = ""; len = 1; goto string_offest; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */ static zval *spl_array_read_dimension_ex(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { zval **ret; if (check_inherited) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_offset_get) { zval *rv; if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_get, "offsetGet", &rv, offset); zval_ptr_dtor(&offset); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); return intern->retval; } return EG(uninitialized_zval_ptr); } } ret = spl_array_get_dimension_ptr_ptr(check_inherited, object, offset, type TSRMLS_CC); /* When in a write context, * ZE has to be fooled into thinking this is in a reference set * by separating (if necessary) and returning as an is_ref=1 zval (even if refcount == 1) */ if ((type == BP_VAR_W || type == BP_VAR_RW || type == BP_VAR_UNSET) && !Z_ISREF_PP(ret) && ret != &EG(uninitialized_zval_ptr)) { if (Z_REFCOUNT_PP(ret) > 1) { zval *newval; /* Separate */ MAKE_STD_ZVAL(newval); *newval = **ret; zval_copy_ctor(newval); Z_SET_REFCOUNT_P(newval, 1); /* Replace */ Z_DELREF_PP(ret); *ret = newval; } Z_SET_ISREF_PP(ret); } return *ret; } /* }}} */ static zval *spl_array_read_dimension(zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { return spl_array_read_dimension_ex(1, object, offset, type TSRMLS_CC); } /* }}} */ static void spl_array_write_dimension_ex(int check_inherited, zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_set) { if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_2_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_set, "offsetSet", NULL, offset, value); zval_ptr_dtor(&offset); return; } if (!offset) { ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), NULL); return; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } Z_ADDREF_P(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), NULL); return; case IS_NULL: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; default: zend_error(E_WARNING, "Illegal offset type"); return; } } /* }}} */ static void spl_array_write_dimension(zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_write_dimension_ex(1, object, offset, value TSRMLS_CC); } /* }}} */ static void spl_array_unset_dimension_ex(int check_inherited, zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_del) { SEPARATE_ARG_IF_REF(offset); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_del, "offsetUnset", NULL, offset); zval_ptr_dtor(&offset); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (ht == &EG(symbol_table)) { if (zend_delete_global_variable(Z_STRVAL_P(offset), Z_STRLEN_P(offset) TSRMLS_CC)) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } } else { if (zend_symtable_del(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1) == FAILURE) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } else { spl_array_object *obj = intern; while (1) { if ((obj->ar_flags & SPL_ARRAY_IS_SELF) != 0) { break; } else if (Z_TYPE_P(obj->array) == IS_OBJECT) { if ((obj->ar_flags & SPL_ARRAY_USE_OTHER) == 0) { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); break; } else { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); } } else { obj = NULL; break; } } if (obj) { zend_property_info *property_info = zend_get_property_info(obj->std.ce, offset, 1 TSRMLS_CC); if (property_info && (property_info->flags & ZEND_ACC_STATIC) == 0 && property_info->offset >= 0) { obj->std.properties_table[property_info->offset] = NULL; } } } } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (zend_hash_index_del(ht, index) == FAILURE) { zend_error(E_NOTICE,"Undefined offset: %ld", Z_LVAL_P(offset)); } break; default: zend_error(E_WARNING, "Illegal offset type"); return; } spl_hash_verify_pos(intern TSRMLS_CC); /* call rewind on FAILURE */ } /* }}} */ static void spl_array_unset_dimension(zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_unset_dimension_ex(1, object, offset TSRMLS_CC); } /* }}} */ static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */ static int spl_array_has_dimension(zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { return spl_array_has_dimension_ex(1, object, offset, check_empty TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_verify_pos_ex */ static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashTable *ht, const char *msg_prefix TSRMLS_DC) { if (!ht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and is no longer an array", msg_prefix); return FAILURE; } if (object->pos && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix); return FAILURE; } return SUCCESS; } /* }}} */ /* {{{ spl_array_object_verify_pos */ static inline int spl_array_object_verify_pos(spl_array_object *object, HashTable *ht TSRMLS_DC) { return spl_array_object_verify_pos_ex(object, ht, "" TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayObject::offsetExists(mixed $index) proto bool ArrayIterator::offsetExists(mixed $index) Returns whether the requested $index exists. */ SPL_METHOD(Array, offsetExists) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } RETURN_BOOL(spl_array_has_dimension_ex(0, getThis(), index, 2 TSRMLS_CC)); } /* }}} */ /* {{{ proto mixed ArrayObject::offsetGet(mixed $index) proto mixed ArrayIterator::offsetGet(mixed $index) Returns the value at the specified $index. */ SPL_METHOD(Array, offsetGet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } value = spl_array_read_dimension_ex(0, getThis(), index, BP_VAR_R TSRMLS_CC); RETURN_ZVAL(value, 1, 0); } /* }}} */ /* {{{ proto void ArrayObject::offsetSet(mixed $index, mixed $newval) proto void ArrayIterator::offsetSet(mixed $index, mixed $newval) Sets the value at the specified $index to $newval. */ SPL_METHOD(Array, offsetSet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &index, &value) == FAILURE) { return; } spl_array_write_dimension_ex(0, getThis(), index, value TSRMLS_CC); } /* }}} */ void spl_array_iterator_append(zval *object, zval *append_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "Cannot append properties to objects, use %s::offsetSet() instead", Z_OBJCE_P(object)->name); return; } spl_array_write_dimension(object, NULL, append_value TSRMLS_CC); if (!intern->pos) { spl_array_set_pos(intern, aht->pListTail); } } /* }}} */ /* {{{ proto void ArrayObject::append(mixed $newval) proto void ArrayIterator::append(mixed $newval) Appends the value (cannot be called for objects). */ SPL_METHOD(Array, append) { zval *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &value) == FAILURE) { return; } spl_array_iterator_append(getThis(), value TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::offsetUnset(mixed $index) proto void ArrayIterator::offsetUnset(mixed $index) Unsets the value at the specified $index. */ SPL_METHOD(Array, offsetUnset) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } spl_array_unset_dimension_ex(0, getThis(), index TSRMLS_CC); } /* }}} */ /* {{{ proto array ArrayObject::getArrayCopy() proto array ArrayIterator::getArrayCopy() Return a copy of the contained array */ SPL_METHOD(Array, getArrayCopy) { zval *object = getThis(), *tmp; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } /* }}} */ static HashTable *spl_array_get_properties(zval *object TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *result; if (intern->nApplyCount > 1) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Nesting level too deep - recursive dependency?"); } intern->nApplyCount++; result = spl_array_get_hash_table(intern, 1 TSRMLS_CC); intern->nApplyCount--; return result; } /* }}} */ static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */ static HashTable *spl_array_get_gc(zval *object, zval ***gc_data, int *gc_data_count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); *gc_data = &intern->array; *gc_data_count = 1; return zend_std_get_properties(object TSRMLS_CC); } /* }}} */ static zval *spl_array_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_read_dimension(object, member, type TSRMLS_CC); } return std_object_handlers.read_property(object, member, type, key TSRMLS_CC); } /* }}} */ static void spl_array_write_property(zval *object, zval *member, zval *value, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_write_dimension(object, member, value TSRMLS_CC); return; } std_object_handlers.write_property(object, member, value, key TSRMLS_CC); } /* }}} */ static zval **spl_array_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_get_dimension_ptr_ptr(1, object, member, type TSRMLS_CC); } return std_object_handlers.get_property_ptr_ptr(object, member, type, key TSRMLS_CC); } /* }}} */ static int spl_array_has_property(zval *object, zval *member, int has_set_exists, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_has_dimension(object, member, has_set_exists TSRMLS_CC); } return std_object_handlers.has_property(object, member, has_set_exists, key TSRMLS_CC); } /* }}} */ static void spl_array_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_unset_dimension(object, member TSRMLS_CC); spl_array_rewind(intern TSRMLS_CC); /* because deletion might invalidate position */ return; } std_object_handlers.unset_property(object, member, key TSRMLS_CC); } /* }}} */ static int spl_array_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */ { HashTable *ht1, *ht2; spl_array_object *intern1, *intern2; int result = 0; zval temp_zv; intern1 = (spl_array_object*)zend_object_store_get_object(o1 TSRMLS_CC); intern2 = (spl_array_object*)zend_object_store_get_object(o2 TSRMLS_CC); ht1 = spl_array_get_hash_table(intern1, 0 TSRMLS_CC); ht2 = spl_array_get_hash_table(intern2, 0 TSRMLS_CC); zend_compare_symbol_tables(&temp_zv, ht1, ht2 TSRMLS_CC); result = (int)Z_LVAL(temp_zv); /* if we just compared std.properties, don't do it again */ if (result == 0 && !(ht1 == intern1->std.properties && ht2 == intern2->std.properties)) { result = std_object_handlers.compare_objects(o1, o2 TSRMLS_CC); } return result; } /* }}} */ static int spl_array_skip_protected(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { char *string_key; uint string_length; ulong num_key; if (Z_TYPE_P(intern->array) == IS_OBJECT) { do { if (zend_hash_get_current_key_ex(aht, &string_key, &string_length, &num_key, 0, &intern->pos) == HASH_KEY_IS_STRING) { /* zend_hash_get_current_key_ex() should never set * string_length to 0 when returning HASH_KEY_IS_STRING, but we * may as well be defensive and consider that successful. * Beyond that, we're looking for protected keys (which will * have a null byte at string_key[0]), but want to avoid * skipping completely empty keys (which will also have the * null byte, but a string_length of 1). */ if (!string_length || string_key[0] || string_length == 1) { return SUCCESS; } } else { return SUCCESS; } if (zend_hash_has_more_elements_ex(aht, &intern->pos) != SUCCESS) { return FAILURE; } zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); } while (1); } return FAILURE; } /* }}} */ static int spl_array_next_no_verify(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); if (Z_TYPE_P(intern->array) == IS_OBJECT) { return spl_array_skip_protected(intern, aht TSRMLS_CC); } else { return zend_hash_has_more_elements_ex(aht, &intern->pos); } } /* }}} */ static int spl_array_next_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { if ((intern->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(intern, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and internal position is no longer valid"); return FAILURE; } return spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ static int spl_array_next(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_array_next_ex(intern, aht TSRMLS_CC); } /* }}} */ /* define an overloaded iterator structure */ typedef struct { zend_user_iterator intern; spl_array_object *object; } spl_array_it; static void spl_array_it_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; zend_user_it_invalidate_current(iter TSRMLS_CC); zval_ptr_dtor((zval**)&iterator->intern.it.data); efree(iterator); } /* }}} */ static int spl_array_it_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_VALID) { return zend_user_it_valid(iter TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::valid(): " TSRMLS_CC) == FAILURE) { return FAILURE; } return zend_hash_has_more_elements_ex(aht, &object->pos); } } /* }}} */ static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) { zend_user_it_get_current_data(iter, data TSRMLS_CC); } else { if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) { *data = NULL; } } } /* }}} */ static void spl_array_it_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_KEY) { zend_user_it_get_current_key(iter, key TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::current(): " TSRMLS_CC) == FAILURE) { ZVAL_NULL(key); } else { zend_hash_get_current_key_zval_ex(aht, key, &object->pos); } } } /* }}} */ static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) { zend_user_it_move_forward(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array"); return; } if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid"); } else { spl_array_next_no_verify(object, aht TSRMLS_CC); } } } /* }}} */ static void spl_array_rewind_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_internal_pointer_reset_ex(aht, &intern->pos); spl_array_update_pos(intern); spl_array_skip_protected(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::rewind(): Array was modified outside object and is no longer an array"); return; } spl_array_rewind_ex(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; if (object->ar_flags & SPL_ARRAY_OVERLOADED_REWIND) { zend_user_it_rewind(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); spl_array_rewind(object TSRMLS_CC); } } /* }}} */ /* {{{ spl_array_set_array */ static void spl_array_set_array(zval *object, spl_array_object *intern, zval **array, long ar_flags, int just_array TSRMLS_DC) { if (Z_TYPE_PP(array) == IS_ARRAY) { SEPARATE_ZVAL_IF_NOT_REF(array); } if (Z_TYPE_PP(array) == IS_OBJECT && (Z_OBJ_HT_PP(array) == &spl_handler_ArrayObject || Z_OBJ_HT_PP(array) == &spl_handler_ArrayIterator)) { zval_ptr_dtor(&intern->array); if (just_array) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(*array TSRMLS_CC); ar_flags = other->ar_flags & ~SPL_ARRAY_INT_MASK; } ar_flags |= SPL_ARRAY_USE_OTHER; intern->array = *array; } else { if (Z_TYPE_PP(array) != IS_OBJECT && Z_TYPE_PP(array) != IS_ARRAY) { zend_throw_exception(spl_ce_InvalidArgumentException, "Passed variable is not an array or object, using empty array instead", 0 TSRMLS_CC); return; } zval_ptr_dtor(&intern->array); intern->array = *array; } if (object == *array) { intern->ar_flags |= SPL_ARRAY_IS_SELF; intern->ar_flags &= ~SPL_ARRAY_USE_OTHER; } else { intern->ar_flags &= ~SPL_ARRAY_IS_SELF; } intern->ar_flags |= ar_flags; Z_ADDREF_P(intern->array); if (Z_TYPE_PP(array) == IS_OBJECT) { zend_object_get_properties_t handler = Z_OBJ_HANDLER_PP(array, get_properties); if ((handler != std_object_handlers.get_properties && handler != spl_array_get_properties) || !spl_array_get_hash_table(intern, 0 TSRMLS_CC)) { zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0 TSRMLS_CC, "Overloaded object of type %s is not compatible with %s", Z_OBJCE_PP(array)->name, intern->std.ce->name); } } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* iterator handler table */ zend_object_iterator_funcs spl_array_it_funcs = { spl_array_it_dtor, spl_array_it_valid, spl_array_it_get_current_data, spl_array_it_get_current_key, spl_array_it_move_forward, spl_array_it_rewind }; zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */ { spl_array_it *iterator; spl_array_object *array_object = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (by_ref && (array_object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT)) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } iterator = emalloc(sizeof(spl_array_it)); Z_ADDREF_P(object); iterator->intern.it.data = (void*)object; iterator->intern.it.funcs = &spl_array_it_funcs; iterator->intern.ce = ce; iterator->intern.value = NULL; iterator->object = array_object; return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ proto void ArrayObject::__construct(array|object ar = array() [, int flags = 0 [, string iterator_class = "ArrayIterator"]]) proto void ArrayIterator::__construct(array|object ar = array() [, int flags = 0]) Constructs a new array iterator from a path. */ SPL_METHOD(Array, __construct) { zval *object = getThis(); spl_array_object *intern; zval **array; long ar_flags = 0; zend_class_entry *ce_get_iterator = spl_ce_Iterator; zend_error_handling error_handling; if (ZEND_NUM_ARGS() == 0) { return; /* nothing to do */ } zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling TSRMLS_CC); intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|lC", &array, &ar_flags, &ce_get_iterator) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (ZEND_NUM_ARGS() > 2) { intern->ce_get_iterator = ce_get_iterator; } ar_flags &= ~SPL_ARRAY_INT_MASK; spl_array_set_array(object, intern, array, ar_flags, ZEND_NUM_ARGS() == 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::setIteratorClass(string iterator_class) Set the class used in getIterator. */ SPL_METHOD(Array, setIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zend_class_entry * ce_get_iterator = spl_ce_Iterator; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "C", &ce_get_iterator) == FAILURE) { return; } intern->ce_get_iterator = ce_get_iterator; } /* }}} */ /* {{{ proto string ArrayObject::getIteratorClass() Get the class used in getIterator. */ SPL_METHOD(Array, getIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->ce_get_iterator->name, 1); } /* }}} */ /* {{{ proto int ArrayObject::getFlags() Get flags */ SPL_METHOD(Array, getFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto void ArrayObject::setFlags(int flags) Set flags */ SPL_METHOD(Array, setFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long ar_flags = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &ar_flags) == FAILURE) { return; } intern->ar_flags = (intern->ar_flags & SPL_ARRAY_INT_MASK) | (ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto Array|Object ArrayObject::exchangeArray(Array|Object ar = array()) Replace the referenced array or object with a new one and return the old one (right now copy - to be changed) */ SPL_METHOD(Array, exchangeArray) { zval *object = getThis(), *tmp, **array; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &array) == FAILURE) { return; } spl_array_set_array(object, intern, array, 0L, 1 TSRMLS_CC); } /* }}} */ /* {{{ proto ArrayIterator ArrayObject::getIterator() Create a new iterator from a ArrayObject instance */ SPL_METHOD(Array, getIterator) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); spl_array_object *iterator; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } return_value->type = IS_OBJECT; return_value->value.obj = spl_array_object_new_ex(intern->ce_get_iterator, &iterator, object, 0 TSRMLS_CC); Z_SET_REFCOUNT_P(return_value, 1); Z_SET_ISREF_P(return_value); } /* }}} */ /* {{{ proto void ArrayIterator::rewind() Rewind array back to the start */ SPL_METHOD(Array, rewind) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayIterator::seek(int $position) Seek to position. */ SPL_METHOD(Array, seek) { long opos, position; zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); int result; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } opos = position; if (position >= 0) { /* negative values are not supported */ spl_array_rewind(intern TSRMLS_CC); result = SUCCESS; while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS); if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) { return; /* ok */ } } zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos); } /* }}} */ int static spl_array_object_count_elements_helper(spl_array_object *intern, long *count TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); HashPosition pos; if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); *count = 0; return FAILURE; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { /* We need to store the 'pos' since we'll modify it in the functions * we're going to call and which do not support 'pos' as parameter. */ pos = intern->pos; *count = 0; spl_array_rewind(intern TSRMLS_CC); while(intern->pos && spl_array_next(intern TSRMLS_CC) == SUCCESS) { (*count)++; } spl_array_set_pos(intern, pos); return SUCCESS; } else { *count = zend_hash_num_elements(aht); return SUCCESS; } } /* }}} */ int spl_array_object_count_elements(zval *object, long *count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_count) { zval *rv; zend_call_method_with_0_params(&object, intern->std.ce, &intern->fptr_count, "count", &rv); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); convert_to_long(intern->retval); *count = (long) Z_LVAL_P(intern->retval); return SUCCESS; } *count = 0; return FAILURE; } return spl_array_object_count_elements_helper(intern, count TSRMLS_CC); } /* }}} */ /* {{{ proto int ArrayObject::count() proto int ArrayIterator::count() Return the number of elements in the Iterator. */ SPL_METHOD(Array, count) { long count; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_object_count_elements_helper(intern, &count TSRMLS_CC); RETURN_LONG(count); } /* }}} */ static void spl_array_method(INTERNAL_FUNCTION_PARAMETERS, char *fname, int fname_len, int use_arg) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval *tmp, *arg = NULL; zval *retval_ptr = NULL; MAKE_STD_ZVAL(tmp); Z_TYPE_P(tmp) = IS_ARRAY; Z_ARRVAL_P(tmp) = aht; if (!use_arg) { aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 1, tmp, NULL TSRMLS_CC); aht->nApplyCount--; } else if (use_arg == SPL_ARRAY_METHOD_MAY_USER_ARG) { if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "|z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects one argument at most", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, arg? 2 : 1, tmp, arg TSRMLS_CC); aht->nApplyCount--; } else { if (ZEND_NUM_ARGS() != 1 || zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects exactly one argument", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 2, tmp, arg TSRMLS_CC); aht->nApplyCount--; } Z_TYPE_P(tmp) = IS_NULL; /* we want to destroy the zval, not the hashtable */ zval_ptr_dtor(&tmp); if (retval_ptr) { COPY_PZVAL_TO_ZVAL(*return_value, retval_ptr); } } /* }}} */ #define SPL_ARRAY_METHOD(cname, fname, use_arg) \ SPL_METHOD(cname, fname) \ { \ spl_array_method(INTERNAL_FUNCTION_PARAM_PASSTHRU, #fname, sizeof(#fname)-1, use_arg); \ } /* {{{ proto int ArrayObject::asort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::asort([int $sort_flags = SORT_REGULAR ]) Sort the entries by values. */ SPL_ARRAY_METHOD(Array, asort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::ksort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::ksort([int $sort_flags = SORT_REGULAR ]) Sort the entries by key. */ SPL_ARRAY_METHOD(Array, ksort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::uasort(callback cmp_function) proto int ArrayIterator::uasort(callback cmp_function) Sort the entries by values user defined function. */ SPL_ARRAY_METHOD(Array, uasort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::uksort(callback cmp_function) proto int ArrayIterator::uksort(callback cmp_function) Sort the entries by key using user defined function. */ SPL_ARRAY_METHOD(Array, uksort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::natsort() proto int ArrayIterator::natsort() Sort the entries by values using "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natsort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto int ArrayObject::natcasesort() proto int ArrayIterator::natcasesort() Sort the entries by key using case insensitive "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natcasesort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::current() Return current array entry */ SPL_METHOD(Array, current) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **entry; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } RETVAL_ZVAL(*entry, 1, 0); } /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::key() Return current array key */ SPL_METHOD(Array, key) { if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_iterator_key(getThis(), return_value TSRMLS_CC); } /* }}} */ void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } zend_hash_get_current_key_zval_ex(aht, return_value, &intern->pos); } /* }}} */ /* {{{ proto void ArrayIterator::next() Move to next entry */ SPL_METHOD(Array, next) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayIterator::valid() Check whether array contains more entries */ SPL_METHOD(Array, valid) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } else { RETURN_BOOL(zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS); } } /* }}} */ /* {{{ proto bool RecursiveArrayIterator::hasChildren() Check whether current element has children (e.g. is an array) */ SPL_METHOD(Array, hasChildren) { zval *object = getThis(), **entry; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { RETURN_FALSE; } RETURN_BOOL(Z_TYPE_PP(entry) == IS_ARRAY || (Z_TYPE_PP(entry) == IS_OBJECT && (intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) == 0)); } /* }}} */ /* {{{ proto object RecursiveArrayIterator::getChildren() Create a sub iterator for the current element (same class as $this) */ SPL_METHOD(Array, getChildren) { zval *object = getThis(), **entry, *flags; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } if (Z_TYPE_PP(entry) == IS_OBJECT) { if ((intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) != 0) { return; } if (instanceof_function(Z_OBJCE_PP(entry), Z_OBJCE_P(getThis()) TSRMLS_CC)) { RETURN_ZVAL(*entry, 1, 0); } } MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, SPL_ARRAY_USE_OTHER | intern->ar_flags); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, *entry, flags TSRMLS_CC); zval_ptr_dtor(&flags); } /* }}} */ /* {{{ proto string ArrayObject::serialize() Serialize the object */ SPL_METHOD(Array, serialize) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval members, *pmembers; php_serialize_data_t var_hash; smart_str buf = {0}; zval *flags; if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } PHP_VAR_SERIALIZE_INIT(var_hash); MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, (intern->ar_flags & SPL_ARRAY_CLONE_MASK)); /* storage */ smart_str_appendl(&buf, "x:", 2); php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC); zval_ptr_dtor(&flags); if (!(intern->ar_flags & SPL_ARRAY_IS_SELF)) { php_var_serialize(&buf, &intern->array, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ';'); } /* members */ smart_str_appendl(&buf, "m:", 2); INIT_PZVAL(&members); if (!intern->std.properties) { rebuild_object_properties(&intern->std); } Z_ARRVAL(members) = intern->std.properties; Z_TYPE(members) = IS_ARRAY; pmembers = &members; php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */ /* done */ PHP_VAR_SERIALIZE_DESTROY(var_hash); if (buf.c) { RETURN_STRINGL(buf.c, buf.len, 0); } RETURN_NULL(); } /* }}} */ /* {{{ proto void ArrayObject::unserialize(string serialized) * unserialize the object */ SPL_METHOD(Array, unserialize) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *buf; int buf_len; const unsigned char *p, *s; php_unserialize_data_t var_hash; zval *pmembers, *pflags = NULL; HashTable *aht; long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) { return; } if (buf_len == 0) { return; } aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (aht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } /* storage */ s = p = (const unsigned char*)buf; PHP_VAR_UNSERIALIZE_INIT(var_hash); if (*p!= 'x' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pflags); if (!php_var_unserialize(&pflags, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pflags) != IS_LONG) { goto outexcept; } var_push_dtor(&var_hash, &pflags); --p; /* for ';' */ flags = Z_LVAL_P(pflags); /* flags needs to be verified and we also need to verify whether the next * thing we get is ';'. After that we require an 'm' or somethign else * where 'm' stands for members and anything else should be an array. If * neither 'a' or 'm' follows we have an error. */ if (*p != ';') { goto outexcept; } ++p; if (*p!='m') { if (*p!='a' && *p!='O' && *p!='C' && *p!='r') { goto outexcept; } intern->ar_flags &= ~SPL_ARRAY_CLONE_MASK; intern->ar_flags |= flags & SPL_ARRAY_CLONE_MASK; zval_ptr_dtor(&intern->array); ALLOC_INIT_ZVAL(intern->array); if (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)) { goto outexcept; } var_push_dtor(&var_hash, &intern->array); } if (*p != ';') { goto outexcept; } ++p; /* members */ if (*p!= 'm' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pmembers); if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) { zval_ptr_dtor(&pmembers); goto outexcept; } var_push_dtor(&var_hash, &pmembers); /* copy members */ if (!intern->std.properties) { rebuild_object_properties(&intern->std); } zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *)); zval_ptr_dtor(&pmembers); /* done reading $serialized */ PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } return; outexcept: PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len); return; } /* }}} */ /* {{{ arginfo and function table */ ZEND_BEGIN_ARG_INFO_EX(arginfo_array___construct, 0, 0, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetGet, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetSet, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, newval) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_append, 0) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_exchangeArray, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setIteratorClass, 0) ZEND_ARG_INFO(0, iteratorClass) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_uXsort, 0) ZEND_ARG_INFO(0, cmp_function) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_unserialize, 0) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_void, 0) ZEND_END_ARG_INFO() static const zend_function_entry spl_funcs_ArrayObject[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayObject specific */ SPL_ME(Array, getIterator, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, exchangeArray, arginfo_array_exchangeArray, ZEND_ACC_PUBLIC) SPL_ME(Array, setIteratorClass, arginfo_array_setIteratorClass, ZEND_ACC_PUBLIC) SPL_ME(Array, getIteratorClass, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_ArrayIterator[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayIterator specific */ SPL_ME(Array, rewind, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, current, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, key, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, next, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, valid, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, seek, arginfo_array_seek, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_RecursiveArrayIterator[] = { SPL_ME(Array, hasChildren, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getChildren, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_array) */ PHP_MINIT_FUNCTION(spl_array) { REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject); REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate); REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable); memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_handler_ArrayObject.clone_obj = spl_array_object_clone; spl_handler_ArrayObject.read_dimension = spl_array_read_dimension; spl_handler_ArrayObject.write_dimension = spl_array_write_dimension; spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension; spl_handler_ArrayObject.has_dimension = spl_array_has_dimension; spl_handler_ArrayObject.count_elements = spl_array_object_count_elements; spl_handler_ArrayObject.get_properties = spl_array_get_properties; spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info; spl_handler_ArrayObject.get_gc = spl_array_get_gc; spl_handler_ArrayObject.read_property = spl_array_read_property; spl_handler_ArrayObject.write_property = spl_array_write_property; spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr; spl_handler_ArrayObject.has_property = spl_array_has_property; spl_handler_ArrayObject.unset_property = spl_array_unset_property; spl_handler_ArrayObject.compare_objects = spl_array_compare_objects; REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable); memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers)); spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator); REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator); spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 5 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Authors: Marcus Boerger <helly@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #ifdef HAVE_CONFIG_H # include "config.h" #endif #include "php.h" #include "php_ini.h" #include "ext/standard/info.h" #include "ext/standard/php_var.h" #include "ext/standard/php_smart_str.h" #include "zend_interfaces.h" #include "zend_exceptions.h" #include "php_spl.h" #include "spl_functions.h" #include "spl_engine.h" #include "spl_iterators.h" #include "spl_array.h" #include "spl_exceptions.h" zend_object_handlers spl_handler_ArrayObject; PHPAPI zend_class_entry *spl_ce_ArrayObject; zend_object_handlers spl_handler_ArrayIterator; PHPAPI zend_class_entry *spl_ce_ArrayIterator; PHPAPI zend_class_entry *spl_ce_RecursiveArrayIterator; #define SPL_ARRAY_STD_PROP_LIST 0x00000001 #define SPL_ARRAY_ARRAY_AS_PROPS 0x00000002 #define SPL_ARRAY_CHILD_ARRAYS_ONLY 0x00000004 #define SPL_ARRAY_OVERLOADED_REWIND 0x00010000 #define SPL_ARRAY_OVERLOADED_VALID 0x00020000 #define SPL_ARRAY_OVERLOADED_KEY 0x00040000 #define SPL_ARRAY_OVERLOADED_CURRENT 0x00080000 #define SPL_ARRAY_OVERLOADED_NEXT 0x00100000 #define SPL_ARRAY_IS_REF 0x01000000 #define SPL_ARRAY_IS_SELF 0x02000000 #define SPL_ARRAY_USE_OTHER 0x04000000 #define SPL_ARRAY_INT_MASK 0xFFFF0000 #define SPL_ARRAY_CLONE_MASK 0x0300FFFF #define SPL_ARRAY_METHOD_NO_ARG 0 #define SPL_ARRAY_METHOD_USE_ARG 1 #define SPL_ARRAY_METHOD_MAY_USER_ARG 2 typedef struct _spl_array_object { zend_object std; zval *array; zval *retval; HashPosition pos; ulong pos_h; int ar_flags; int is_self; zend_function *fptr_offset_get; zend_function *fptr_offset_set; zend_function *fptr_offset_has; zend_function *fptr_offset_del; zend_function *fptr_count; zend_class_entry* ce_get_iterator; HashTable *debug_info; unsigned char nApplyCount; } spl_array_object; static inline HashTable *spl_array_get_hash_table(spl_array_object* intern, int check_std_props TSRMLS_DC) { /* {{{ */ if ((intern->ar_flags & SPL_ARRAY_IS_SELF) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else if ((intern->ar_flags & SPL_ARRAY_USE_OTHER) && (check_std_props == 0 || (intern->ar_flags & SPL_ARRAY_STD_PROP_LIST) == 0) && Z_TYPE_P(intern->array) == IS_OBJECT) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(intern->array TSRMLS_CC); return spl_array_get_hash_table(other, check_std_props TSRMLS_CC); } else if ((intern->ar_flags & ((check_std_props ? SPL_ARRAY_STD_PROP_LIST : 0) | SPL_ARRAY_IS_SELF)) != 0) { if (!intern->std.properties) { rebuild_object_properties(&intern->std); } return intern->std.properties; } else { return HASH_OF(intern->array); } } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC); static void spl_array_update_pos(spl_array_object* intern) /* {{{ */ { Bucket *pos = intern->pos; if (pos != NULL) { intern->pos_h = pos->h; } } /* }}} */ static void spl_array_set_pos(spl_array_object* intern, HashPosition pos) /* {{{ */ { intern->pos = pos; spl_array_update_pos(intern); } /* }}} */ SPL_API int spl_hash_verify_pos_ex(spl_array_object * intern, HashTable * ht TSRMLS_DC) /* {{{ */ { Bucket *p; /* IS_CONSISTENT(ht);*/ /* HASH_PROTECT_RECURSION(ht);*/ p = ht->arBuckets[intern->pos_h & ht->nTableMask]; while (p != NULL) { if (p == intern->pos) { return SUCCESS; } p = p->pNext; } /* HASH_UNPROTECT_RECURSION(ht); */ spl_array_rewind(intern TSRMLS_CC); return FAILURE; } /* }}} */ SPL_API int spl_hash_verify_pos(spl_array_object * intern TSRMLS_DC) /* {{{ */ { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_hash_verify_pos_ex(intern, ht TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_free_storage */ static void spl_array_object_free_storage(void *object TSRMLS_DC) { spl_array_object *intern = (spl_array_object *)object; zend_object_std_dtor(&intern->std TSRMLS_CC); zval_ptr_dtor(&intern->array); zval_ptr_dtor(&intern->retval); if (intern->debug_info != NULL) { zend_hash_destroy(intern->debug_info); efree(intern->debug_info); } efree(object); } /* }}} */ zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC); /* {{{ spl_array_object_new_ex */ static zend_object_value spl_array_object_new_ex(zend_class_entry *class_type, spl_array_object **obj, zval *orig, int clone_orig TSRMLS_DC) { zend_object_value retval = {0}; spl_array_object *intern; zval *tmp; zend_class_entry * parent = class_type; int inherited = 0; intern = emalloc(sizeof(spl_array_object)); memset(intern, 0, sizeof(spl_array_object)); *obj = intern; ALLOC_INIT_ZVAL(intern->retval); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); intern->ar_flags = 0; intern->debug_info = NULL; intern->ce_get_iterator = spl_ce_ArrayIterator; if (orig) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(orig TSRMLS_CC); intern->ar_flags &= ~ SPL_ARRAY_CLONE_MASK; intern->ar_flags |= (other->ar_flags & SPL_ARRAY_CLONE_MASK); intern->ce_get_iterator = other->ce_get_iterator; if (clone_orig) { intern->array = other->array; if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayObject) { MAKE_STD_ZVAL(intern->array); array_init(intern->array); zend_hash_copy(HASH_OF(intern->array), HASH_OF(other->array), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } if (Z_OBJ_HT_P(orig) == &spl_handler_ArrayIterator) { Z_ADDREF_P(other->array); } } else { intern->array = orig; Z_ADDREF_P(intern->array); intern->ar_flags |= SPL_ARRAY_IS_REF | SPL_ARRAY_USE_OTHER; } } else { MAKE_STD_ZVAL(intern->array); array_init(intern->array); intern->ar_flags &= ~SPL_ARRAY_IS_REF; } retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t)zend_objects_destroy_object, (zend_objects_free_object_storage_t) spl_array_object_free_storage, NULL TSRMLS_CC); while (parent) { if (parent == spl_ce_ArrayIterator || parent == spl_ce_RecursiveArrayIterator) { retval.handlers = &spl_handler_ArrayIterator; class_type->get_iterator = spl_array_get_iterator; break; } else if (parent == spl_ce_ArrayObject) { retval.handlers = &spl_handler_ArrayObject; break; } parent = parent->parent; inherited = 1; } if (!parent) { /* this must never happen */ php_error_docref(NULL TSRMLS_CC, E_COMPILE_ERROR, "Internal compiler error, Class is not child of ArrayObject or ArrayIterator"); } if (inherited) { zend_hash_find(&class_type->function_table, "offsetget", sizeof("offsetget"), (void **) &intern->fptr_offset_get); if (intern->fptr_offset_get->common.scope == parent) { intern->fptr_offset_get = NULL; } zend_hash_find(&class_type->function_table, "offsetset", sizeof("offsetset"), (void **) &intern->fptr_offset_set); if (intern->fptr_offset_set->common.scope == parent) { intern->fptr_offset_set = NULL; } zend_hash_find(&class_type->function_table, "offsetexists", sizeof("offsetexists"), (void **) &intern->fptr_offset_has); if (intern->fptr_offset_has->common.scope == parent) { intern->fptr_offset_has = NULL; } zend_hash_find(&class_type->function_table, "offsetunset", sizeof("offsetunset"), (void **) &intern->fptr_offset_del); if (intern->fptr_offset_del->common.scope == parent) { intern->fptr_offset_del = NULL; } zend_hash_find(&class_type->function_table, "count", sizeof("count"), (void **) &intern->fptr_count); if (intern->fptr_count->common.scope == parent) { intern->fptr_count = NULL; } } /* Cache iterator functions if ArrayIterator or derived. Check current's */ /* cache since only current is always required */ if (retval.handlers == &spl_handler_ArrayIterator) { if (!class_type->iterator_funcs.zf_current) { zend_hash_find(&class_type->function_table, "rewind", sizeof("rewind"), (void **) &class_type->iterator_funcs.zf_rewind); zend_hash_find(&class_type->function_table, "valid", sizeof("valid"), (void **) &class_type->iterator_funcs.zf_valid); zend_hash_find(&class_type->function_table, "key", sizeof("key"), (void **) &class_type->iterator_funcs.zf_key); zend_hash_find(&class_type->function_table, "current", sizeof("current"), (void **) &class_type->iterator_funcs.zf_current); zend_hash_find(&class_type->function_table, "next", sizeof("next"), (void **) &class_type->iterator_funcs.zf_next); } if (inherited) { if (class_type->iterator_funcs.zf_rewind->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_REWIND; if (class_type->iterator_funcs.zf_valid->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_VALID; if (class_type->iterator_funcs.zf_key->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_KEY; if (class_type->iterator_funcs.zf_current->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_CURRENT; if (class_type->iterator_funcs.zf_next->common.scope != parent) intern->ar_flags |= SPL_ARRAY_OVERLOADED_NEXT; } } spl_array_rewind(intern TSRMLS_CC); return retval; } /* }}} */ /* {{{ spl_array_object_new */ static zend_object_value spl_array_object_new(zend_class_entry *class_type TSRMLS_DC) { spl_array_object *tmp; return spl_array_object_new_ex(class_type, &tmp, NULL, 0 TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_clone */ static zend_object_value spl_array_object_clone(zval *zobject TSRMLS_DC) { zend_object_value new_obj_val; zend_object *old_object; zend_object *new_object; zend_object_handle handle = Z_OBJ_HANDLE_P(zobject); spl_array_object *intern; old_object = zend_objects_get_address(zobject TSRMLS_CC); new_obj_val = spl_array_object_new_ex(old_object->ce, &intern, zobject, 1 TSRMLS_CC); new_object = &intern->std; zend_objects_clone_members(new_object, new_obj_val, old_object, handle TSRMLS_CC); return new_obj_val; } /* }}} */ static zval **spl_array_get_dimension_ptr_ptr(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **retval; char *key; uint len; long index; HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!offset || !ht) { return &EG(uninitialized_zval_ptr); } if ((type == BP_VAR_W || type == BP_VAR_RW) && (ht->nApplyCount > 0)) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return &EG(error_zval_ptr);; } switch (Z_TYPE_P(offset)) { case IS_STRING: key = Z_STRVAL_P(offset); len = Z_STRLEN_P(offset) + 1; string_offest: if (zend_symtable_find(ht, key, len, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined index: %s", key); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE,"Undefined index: %s", key); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_symtable_update(ht, key, len, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; case IS_NULL: key = ""; len = 1; goto string_offest; case IS_RESOURCE: zend_error(E_STRICT, "Resource ID#%ld used as offset, casting to integer (%ld)", Z_LVAL_P(offset), Z_LVAL_P(offset)); case IS_DOUBLE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **) &retval) == FAILURE) { switch (type) { case BP_VAR_R: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_UNSET: case BP_VAR_IS: retval = &EG(uninitialized_zval_ptr); break; case BP_VAR_RW: zend_error(E_NOTICE, "Undefined offset: %ld", index); case BP_VAR_W: { zval *value; ALLOC_INIT_ZVAL(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), (void **)&retval); } } } return retval; default: zend_error(E_WARNING, "Illegal offset type"); return (type == BP_VAR_W || type == BP_VAR_RW) ? &EG(error_zval_ptr) : &EG(uninitialized_zval_ptr); } } /* }}} */ static zval *spl_array_read_dimension_ex(int check_inherited, zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { zval **ret; if (check_inherited) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_offset_get) { zval *rv; if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_get, "offsetGet", &rv, offset); zval_ptr_dtor(&offset); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); return intern->retval; } return EG(uninitialized_zval_ptr); } } ret = spl_array_get_dimension_ptr_ptr(check_inherited, object, offset, type TSRMLS_CC); /* When in a write context, * ZE has to be fooled into thinking this is in a reference set * by separating (if necessary) and returning as an is_ref=1 zval (even if refcount == 1) */ if ((type == BP_VAR_W || type == BP_VAR_RW || type == BP_VAR_UNSET) && !Z_ISREF_PP(ret) && ret != &EG(uninitialized_zval_ptr)) { if (Z_REFCOUNT_PP(ret) > 1) { zval *newval; /* Separate */ MAKE_STD_ZVAL(newval); *newval = **ret; zval_copy_ctor(newval); Z_SET_REFCOUNT_P(newval, 1); /* Replace */ Z_DELREF_PP(ret); *ret = newval; } Z_SET_ISREF_PP(ret); } return *ret; } /* }}} */ static zval *spl_array_read_dimension(zval *object, zval *offset, int type TSRMLS_DC) /* {{{ */ { return spl_array_read_dimension_ex(1, object, offset, type TSRMLS_CC); } /* }}} */ static void spl_array_write_dimension_ex(int check_inherited, zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_set) { if (!offset) { ALLOC_INIT_ZVAL(offset); } else { SEPARATE_ARG_IF_REF(offset); } zend_call_method_with_2_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_set, "offsetSet", NULL, offset, value); zval_ptr_dtor(&offset); return; } if (!offset) { ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_symtable_update(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void**)&value, sizeof(void*), NULL); return; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } Z_ADDREF_P(value); zend_hash_index_update(ht, index, (void**)&value, sizeof(void*), NULL); return; case IS_NULL: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } Z_ADDREF_P(value); zend_hash_next_index_insert(ht, (void**)&value, sizeof(void*), NULL); return; default: zend_error(E_WARNING, "Illegal offset type"); return; } } /* }}} */ static void spl_array_write_dimension(zval *object, zval *offset, zval *value TSRMLS_DC) /* {{{ */ { spl_array_write_dimension_ex(1, object, offset, value TSRMLS_CC); } /* }}} */ static void spl_array_unset_dimension_ex(int check_inherited, zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; HashTable *ht; if (check_inherited && intern->fptr_offset_del) { SEPARATE_ARG_IF_REF(offset); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_del, "offsetUnset", NULL, offset); zval_ptr_dtor(&offset); return; } switch(Z_TYPE_P(offset)) { case IS_STRING: ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (ht == &EG(symbol_table)) { if (zend_delete_global_variable(Z_STRVAL_P(offset), Z_STRLEN_P(offset) TSRMLS_CC)) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } } else { if (zend_symtable_del(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1) == FAILURE) { zend_error(E_NOTICE,"Undefined index: %s", Z_STRVAL_P(offset)); } else { spl_array_object *obj = intern; while (1) { if ((obj->ar_flags & SPL_ARRAY_IS_SELF) != 0) { break; } else if (Z_TYPE_P(obj->array) == IS_OBJECT) { if ((obj->ar_flags & SPL_ARRAY_USE_OTHER) == 0) { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); break; } else { obj = (spl_array_object*)zend_object_store_get_object(obj->array TSRMLS_CC); } } else { obj = NULL; break; } } if (obj) { zend_property_info *property_info = zend_get_property_info(obj->std.ce, offset, 1 TSRMLS_CC); if (property_info && (property_info->flags & ZEND_ACC_STATIC) == 0 && property_info->offset >= 0) { obj->std.properties_table[property_info->offset] = NULL; } } } } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (ht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } if (zend_hash_index_del(ht, index) == FAILURE) { zend_error(E_NOTICE,"Undefined offset: %ld", Z_LVAL_P(offset)); } break; default: zend_error(E_WARNING, "Illegal offset type"); return; } spl_hash_verify_pos(intern TSRMLS_CC); /* call rewind on FAILURE */ } /* }}} */ static void spl_array_unset_dimension(zval *object, zval *offset TSRMLS_DC) /* {{{ */ { spl_array_unset_dimension_ex(1, object, offset TSRMLS_CC); } /* }}} */ static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */ static int spl_array_has_dimension(zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { return spl_array_has_dimension_ex(1, object, offset, check_empty TSRMLS_CC); } /* }}} */ /* {{{ spl_array_object_verify_pos_ex */ static inline int spl_array_object_verify_pos_ex(spl_array_object *object, HashTable *ht, const char *msg_prefix TSRMLS_DC) { if (!ht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and is no longer an array", msg_prefix); return FAILURE; } if (object->pos && (object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, ht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "%sArray was modified outside object and internal position is no longer valid", msg_prefix); return FAILURE; } return SUCCESS; } /* }}} */ /* {{{ spl_array_object_verify_pos */ static inline int spl_array_object_verify_pos(spl_array_object *object, HashTable *ht TSRMLS_DC) { return spl_array_object_verify_pos_ex(object, ht, "" TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayObject::offsetExists(mixed $index) proto bool ArrayIterator::offsetExists(mixed $index) Returns whether the requested $index exists. */ SPL_METHOD(Array, offsetExists) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } RETURN_BOOL(spl_array_has_dimension_ex(0, getThis(), index, 2 TSRMLS_CC)); } /* }}} */ /* {{{ proto mixed ArrayObject::offsetGet(mixed $index) proto mixed ArrayIterator::offsetGet(mixed $index) Returns the value at the specified $index. */ SPL_METHOD(Array, offsetGet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } value = spl_array_read_dimension_ex(0, getThis(), index, BP_VAR_R TSRMLS_CC); RETURN_ZVAL(value, 1, 0); } /* }}} */ /* {{{ proto void ArrayObject::offsetSet(mixed $index, mixed $newval) proto void ArrayIterator::offsetSet(mixed $index, mixed $newval) Sets the value at the specified $index to $newval. */ SPL_METHOD(Array, offsetSet) { zval *index, *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &index, &value) == FAILURE) { return; } spl_array_write_dimension_ex(0, getThis(), index, value TSRMLS_CC); } /* }}} */ void spl_array_iterator_append(zval *object, zval *append_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { php_error_docref(NULL TSRMLS_CC, E_RECOVERABLE_ERROR, "Cannot append properties to objects, use %s::offsetSet() instead", Z_OBJCE_P(object)->name); return; } spl_array_write_dimension(object, NULL, append_value TSRMLS_CC); if (!intern->pos) { spl_array_set_pos(intern, aht->pListTail); } } /* }}} */ /* {{{ proto void ArrayObject::append(mixed $newval) proto void ArrayIterator::append(mixed $newval) Appends the value (cannot be called for objects). */ SPL_METHOD(Array, append) { zval *value; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &value) == FAILURE) { return; } spl_array_iterator_append(getThis(), value TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::offsetUnset(mixed $index) proto void ArrayIterator::offsetUnset(mixed $index) Unsets the value at the specified $index. */ SPL_METHOD(Array, offsetUnset) { zval *index; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z", &index) == FAILURE) { return; } spl_array_unset_dimension_ex(0, getThis(), index TSRMLS_CC); } /* }}} */ /* {{{ proto array ArrayObject::getArrayCopy() proto array ArrayIterator::getArrayCopy() Return a copy of the contained array */ SPL_METHOD(Array, getArrayCopy) { zval *object = getThis(), *tmp; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); } /* }}} */ static HashTable *spl_array_get_properties(zval *object TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *result; if (intern->nApplyCount > 1) { php_error_docref(NULL TSRMLS_CC, E_ERROR, "Nesting level too deep - recursive dependency?"); } intern->nApplyCount++; result = spl_array_get_hash_table(intern, 1 TSRMLS_CC); intern->nApplyCount--; return result; } /* }}} */ static HashTable* spl_array_get_debug_info(zval *obj, int *is_temp TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(obj TSRMLS_CC); zval *tmp, *storage; int name_len; char *zname; zend_class_entry *base; *is_temp = 0; if (!intern->std.properties) { rebuild_object_properties(&intern->std); } if (HASH_OF(intern->array) == intern->std.properties) { return intern->std.properties; } else { if (intern->debug_info == NULL) { ALLOC_HASHTABLE(intern->debug_info); ZEND_INIT_SYMTABLE_EX(intern->debug_info, zend_hash_num_elements(intern->std.properties) + 1, 0); } if (intern->debug_info->nApplyCount == 0) { zend_hash_clean(intern->debug_info); zend_hash_copy(intern->debug_info, intern->std.properties, (copy_ctor_func_t) zval_add_ref, (void *) &tmp, sizeof(zval *)); storage = intern->array; zval_add_ref(&storage); base = (Z_OBJ_HT_P(obj) == &spl_handler_ArrayIterator) ? spl_ce_ArrayIterator : spl_ce_ArrayObject; zname = spl_gen_private_prop_name(base, "storage", sizeof("storage")-1, &name_len TSRMLS_CC); zend_symtable_update(intern->debug_info, zname, name_len+1, &storage, sizeof(zval *), NULL); efree(zname); } return intern->debug_info; } } /* }}} */ static HashTable *spl_array_get_gc(zval *object, zval ***gc_data, int *gc_data_count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); *gc_data = &intern->array; *gc_data_count = 1; return zend_std_get_properties(object TSRMLS_CC); } /* }}} */ static zval *spl_array_read_property(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_read_dimension(object, member, type TSRMLS_CC); } return std_object_handlers.read_property(object, member, type, key TSRMLS_CC); } /* }}} */ static void spl_array_write_property(zval *object, zval *member, zval *value, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_write_dimension(object, member, value TSRMLS_CC); return; } std_object_handlers.write_property(object, member, value, key TSRMLS_CC); } /* }}} */ static zval **spl_array_get_property_ptr_ptr(zval *object, zval *member, int type, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_get_dimension_ptr_ptr(1, object, member, type TSRMLS_CC); } return std_object_handlers.get_property_ptr_ptr(object, member, type, key TSRMLS_CC); } /* }}} */ static int spl_array_has_property(zval *object, zval *member, int has_set_exists, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { return spl_array_has_dimension(object, member, has_set_exists TSRMLS_CC); } return std_object_handlers.has_property(object, member, has_set_exists, key TSRMLS_CC); } /* }}} */ static void spl_array_unset_property(zval *object, zval *member, const zend_literal *key TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if ((intern->ar_flags & SPL_ARRAY_ARRAY_AS_PROPS) != 0 && !std_object_handlers.has_property(object, member, 2, key TSRMLS_CC)) { spl_array_unset_dimension(object, member TSRMLS_CC); spl_array_rewind(intern TSRMLS_CC); /* because deletion might invalidate position */ return; } std_object_handlers.unset_property(object, member, key TSRMLS_CC); } /* }}} */ static int spl_array_compare_objects(zval *o1, zval *o2 TSRMLS_DC) /* {{{ */ { HashTable *ht1, *ht2; spl_array_object *intern1, *intern2; int result = 0; zval temp_zv; intern1 = (spl_array_object*)zend_object_store_get_object(o1 TSRMLS_CC); intern2 = (spl_array_object*)zend_object_store_get_object(o2 TSRMLS_CC); ht1 = spl_array_get_hash_table(intern1, 0 TSRMLS_CC); ht2 = spl_array_get_hash_table(intern2, 0 TSRMLS_CC); zend_compare_symbol_tables(&temp_zv, ht1, ht2 TSRMLS_CC); result = (int)Z_LVAL(temp_zv); /* if we just compared std.properties, don't do it again */ if (result == 0 && !(ht1 == intern1->std.properties && ht2 == intern2->std.properties)) { result = std_object_handlers.compare_objects(o1, o2 TSRMLS_CC); } return result; } /* }}} */ static int spl_array_skip_protected(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { char *string_key; uint string_length; ulong num_key; if (Z_TYPE_P(intern->array) == IS_OBJECT) { do { if (zend_hash_get_current_key_ex(aht, &string_key, &string_length, &num_key, 0, &intern->pos) == HASH_KEY_IS_STRING) { /* zend_hash_get_current_key_ex() should never set * string_length to 0 when returning HASH_KEY_IS_STRING, but we * may as well be defensive and consider that successful. * Beyond that, we're looking for protected keys (which will * have a null byte at string_key[0]), but want to avoid * skipping completely empty keys (which will also have the * null byte, but a string_length of 1). */ if (!string_length || string_key[0] || string_length == 1) { return SUCCESS; } } else { return SUCCESS; } if (zend_hash_has_more_elements_ex(aht, &intern->pos) != SUCCESS) { return FAILURE; } zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); } while (1); } return FAILURE; } /* }}} */ static int spl_array_next_no_verify(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_move_forward_ex(aht, &intern->pos); spl_array_update_pos(intern); if (Z_TYPE_P(intern->array) == IS_OBJECT) { return spl_array_skip_protected(intern, aht TSRMLS_CC); } else { return zend_hash_has_more_elements_ex(aht, &intern->pos); } } /* }}} */ static int spl_array_next_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { if ((intern->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(intern, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and internal position is no longer valid"); return FAILURE; } return spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ static int spl_array_next(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); return spl_array_next_ex(intern, aht TSRMLS_CC); } /* }}} */ /* define an overloaded iterator structure */ typedef struct { zend_user_iterator intern; spl_array_object *object; } spl_array_it; static void spl_array_it_dtor(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; zend_user_it_invalidate_current(iter TSRMLS_CC); zval_ptr_dtor((zval**)&iterator->intern.it.data); efree(iterator); } /* }}} */ static int spl_array_it_valid(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_VALID) { return zend_user_it_valid(iter TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::valid(): " TSRMLS_CC) == FAILURE) { return FAILURE; } return zend_hash_has_more_elements_ex(aht, &object->pos); } } /* }}} */ static void spl_array_it_get_current_data(zend_object_iterator *iter, zval ***data TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT) { zend_user_it_get_current_data(iter, data TSRMLS_CC); } else { if (zend_hash_get_current_data_ex(aht, (void**)data, &object->pos) == FAILURE) { *data = NULL; } } } /* }}} */ static void spl_array_it_get_current_key(zend_object_iterator *iter, zval *key TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_KEY) { zend_user_it_get_current_key(iter, key TSRMLS_CC); } else { if (spl_array_object_verify_pos_ex(object, aht, "ArrayIterator::current(): " TSRMLS_CC) == FAILURE) { ZVAL_NULL(key); } else { zend_hash_get_current_key_zval_ex(aht, key, &object->pos); } } } /* }}} */ static void spl_array_it_move_forward(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; HashTable *aht = spl_array_get_hash_table(object, 0 TSRMLS_CC); if (object->ar_flags & SPL_ARRAY_OVERLOADED_NEXT) { zend_user_it_move_forward(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::current(): Array was modified outside object and is no longer an array"); return; } if ((object->ar_flags & SPL_ARRAY_IS_REF) && spl_hash_verify_pos_ex(object, aht TSRMLS_CC) == FAILURE) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::next(): Array was modified outside object and internal position is no longer valid"); } else { spl_array_next_no_verify(object, aht TSRMLS_CC); } } } /* }}} */ static void spl_array_rewind_ex(spl_array_object *intern, HashTable *aht TSRMLS_DC) /* {{{ */ { zend_hash_internal_pointer_reset_ex(aht, &intern->pos); spl_array_update_pos(intern); spl_array_skip_protected(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_rewind(spl_array_object *intern TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "ArrayIterator::rewind(): Array was modified outside object and is no longer an array"); return; } spl_array_rewind_ex(intern, aht TSRMLS_CC); } /* }}} */ static void spl_array_it_rewind(zend_object_iterator *iter TSRMLS_DC) /* {{{ */ { spl_array_it *iterator = (spl_array_it *)iter; spl_array_object *object = iterator->object; if (object->ar_flags & SPL_ARRAY_OVERLOADED_REWIND) { zend_user_it_rewind(iter TSRMLS_CC); } else { zend_user_it_invalidate_current(iter TSRMLS_CC); spl_array_rewind(object TSRMLS_CC); } } /* }}} */ /* {{{ spl_array_set_array */ static void spl_array_set_array(zval *object, spl_array_object *intern, zval **array, long ar_flags, int just_array TSRMLS_DC) { if (Z_TYPE_PP(array) == IS_ARRAY) { SEPARATE_ZVAL_IF_NOT_REF(array); } if (Z_TYPE_PP(array) == IS_OBJECT && (Z_OBJ_HT_PP(array) == &spl_handler_ArrayObject || Z_OBJ_HT_PP(array) == &spl_handler_ArrayIterator)) { zval_ptr_dtor(&intern->array); if (just_array) { spl_array_object *other = (spl_array_object*)zend_object_store_get_object(*array TSRMLS_CC); ar_flags = other->ar_flags & ~SPL_ARRAY_INT_MASK; } ar_flags |= SPL_ARRAY_USE_OTHER; intern->array = *array; } else { if (Z_TYPE_PP(array) != IS_OBJECT && Z_TYPE_PP(array) != IS_ARRAY) { zend_throw_exception(spl_ce_InvalidArgumentException, "Passed variable is not an array or object, using empty array instead", 0 TSRMLS_CC); return; } zval_ptr_dtor(&intern->array); intern->array = *array; } if (object == *array) { intern->ar_flags |= SPL_ARRAY_IS_SELF; intern->ar_flags &= ~SPL_ARRAY_USE_OTHER; } else { intern->ar_flags &= ~SPL_ARRAY_IS_SELF; } intern->ar_flags |= ar_flags; Z_ADDREF_P(intern->array); if (Z_TYPE_PP(array) == IS_OBJECT) { zend_object_get_properties_t handler = Z_OBJ_HANDLER_PP(array, get_properties); if ((handler != std_object_handlers.get_properties && handler != spl_array_get_properties) || !spl_array_get_hash_table(intern, 0 TSRMLS_CC)) { zend_throw_exception_ex(spl_ce_InvalidArgumentException, 0 TSRMLS_CC, "Overloaded object of type %s is not compatible with %s", Z_OBJCE_PP(array)->name, intern->std.ce->name); } } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* iterator handler table */ zend_object_iterator_funcs spl_array_it_funcs = { spl_array_it_dtor, spl_array_it_valid, spl_array_it_get_current_data, spl_array_it_get_current_key, spl_array_it_move_forward, spl_array_it_rewind }; zend_object_iterator *spl_array_get_iterator(zend_class_entry *ce, zval *object, int by_ref TSRMLS_DC) /* {{{ */ { spl_array_it *iterator; spl_array_object *array_object = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (by_ref && (array_object->ar_flags & SPL_ARRAY_OVERLOADED_CURRENT)) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } iterator = emalloc(sizeof(spl_array_it)); Z_ADDREF_P(object); iterator->intern.it.data = (void*)object; iterator->intern.it.funcs = &spl_array_it_funcs; iterator->intern.ce = ce; iterator->intern.value = NULL; iterator->object = array_object; return (zend_object_iterator*)iterator; } /* }}} */ /* {{{ proto void ArrayObject::__construct(array|object ar = array() [, int flags = 0 [, string iterator_class = "ArrayIterator"]]) proto void ArrayIterator::__construct(array|object ar = array() [, int flags = 0]) Constructs a new array iterator from a path. */ SPL_METHOD(Array, __construct) { zval *object = getThis(); spl_array_object *intern; zval **array; long ar_flags = 0; zend_class_entry *ce_get_iterator = spl_ce_Iterator; zend_error_handling error_handling; if (ZEND_NUM_ARGS() == 0) { return; /* nothing to do */ } zend_replace_error_handling(EH_THROW, spl_ce_InvalidArgumentException, &error_handling TSRMLS_CC); intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z|lC", &array, &ar_flags, &ce_get_iterator) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } if (ZEND_NUM_ARGS() > 2) { intern->ce_get_iterator = ce_get_iterator; } ar_flags &= ~SPL_ARRAY_INT_MASK; spl_array_set_array(object, intern, array, ar_flags, ZEND_NUM_ARGS() == 1 TSRMLS_CC); zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayObject::setIteratorClass(string iterator_class) Set the class used in getIterator. */ SPL_METHOD(Array, setIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zend_class_entry * ce_get_iterator = spl_ce_Iterator; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "C", &ce_get_iterator) == FAILURE) { return; } intern->ce_get_iterator = ce_get_iterator; } /* }}} */ /* {{{ proto string ArrayObject::getIteratorClass() Get the class used in getIterator. */ SPL_METHOD(Array, getIteratorClass) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_STRING(intern->ce_get_iterator->name, 1); } /* }}} */ /* {{{ proto int ArrayObject::getFlags() Get flags */ SPL_METHOD(Array, getFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto void ArrayObject::setFlags(int flags) Set flags */ SPL_METHOD(Array, setFlags) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long ar_flags = 0; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &ar_flags) == FAILURE) { return; } intern->ar_flags = (intern->ar_flags & SPL_ARRAY_INT_MASK) | (ar_flags & ~SPL_ARRAY_INT_MASK); } /* }}} */ /* {{{ proto Array|Object ArrayObject::exchangeArray(Array|Object ar = array()) Replace the referenced array or object with a new one and return the old one (right now copy - to be changed) */ SPL_METHOD(Array, exchangeArray) { zval *object = getThis(), *tmp, **array; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); array_init(return_value); zend_hash_copy(HASH_OF(return_value), spl_array_get_hash_table(intern, 0 TSRMLS_CC), (copy_ctor_func_t) zval_add_ref, &tmp, sizeof(zval*)); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "Z", &array) == FAILURE) { return; } spl_array_set_array(object, intern, array, 0L, 1 TSRMLS_CC); } /* }}} */ /* {{{ proto ArrayIterator ArrayObject::getIterator() Create a new iterator from a ArrayObject instance */ SPL_METHOD(Array, getIterator) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); spl_array_object *iterator; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } return_value->type = IS_OBJECT; return_value->value.obj = spl_array_object_new_ex(intern->ce_get_iterator, &iterator, object, 0 TSRMLS_CC); Z_SET_REFCOUNT_P(return_value, 1); Z_SET_ISREF_P(return_value); } /* }}} */ /* {{{ proto void ArrayIterator::rewind() Rewind array back to the start */ SPL_METHOD(Array, rewind) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_rewind(intern TSRMLS_CC); } /* }}} */ /* {{{ proto void ArrayIterator::seek(int $position) Seek to position. */ SPL_METHOD(Array, seek) { long opos, position; zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); int result; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "l", &position) == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } opos = position; if (position >= 0) { /* negative values are not supported */ spl_array_rewind(intern TSRMLS_CC); result = SUCCESS; while (position-- > 0 && (result = spl_array_next(intern TSRMLS_CC)) == SUCCESS); if (result == SUCCESS && zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS) { return; /* ok */ } } zend_throw_exception_ex(spl_ce_OutOfBoundsException, 0 TSRMLS_CC, "Seek position %ld is out of range", opos); } /* }}} */ int static spl_array_object_count_elements_helper(spl_array_object *intern, long *count TSRMLS_DC) /* {{{ */ { HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); HashPosition pos; if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); *count = 0; return FAILURE; } if (Z_TYPE_P(intern->array) == IS_OBJECT) { /* We need to store the 'pos' since we'll modify it in the functions * we're going to call and which do not support 'pos' as parameter. */ pos = intern->pos; *count = 0; spl_array_rewind(intern TSRMLS_CC); while(intern->pos && spl_array_next(intern TSRMLS_CC) == SUCCESS) { (*count)++; } spl_array_set_pos(intern, pos); return SUCCESS; } else { *count = zend_hash_num_elements(aht); return SUCCESS; } } /* }}} */ int spl_array_object_count_elements(zval *object, long *count TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); if (intern->fptr_count) { zval *rv; zend_call_method_with_0_params(&object, intern->std.ce, &intern->fptr_count, "count", &rv); if (rv) { zval_ptr_dtor(&intern->retval); MAKE_STD_ZVAL(intern->retval); ZVAL_ZVAL(intern->retval, rv, 1, 1); convert_to_long(intern->retval); *count = (long) Z_LVAL_P(intern->retval); return SUCCESS; } *count = 0; return FAILURE; } return spl_array_object_count_elements_helper(intern, count TSRMLS_CC); } /* }}} */ /* {{{ proto int ArrayObject::count() proto int ArrayIterator::count() Return the number of elements in the Iterator. */ SPL_METHOD(Array, count) { long count; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_object_count_elements_helper(intern, &count TSRMLS_CC); RETURN_LONG(count); } /* }}} */ static void spl_array_method(INTERNAL_FUNCTION_PARAMETERS, char *fname, int fname_len, int use_arg) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval *tmp, *arg = NULL; zval *retval_ptr = NULL; MAKE_STD_ZVAL(tmp); Z_TYPE_P(tmp) = IS_ARRAY; Z_ARRVAL_P(tmp) = aht; if (!use_arg) { aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 1, tmp, NULL TSRMLS_CC); aht->nApplyCount--; } else if (use_arg == SPL_ARRAY_METHOD_MAY_USER_ARG) { if (zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "|z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects one argument at most", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, arg? 2 : 1, tmp, arg TSRMLS_CC); aht->nApplyCount--; } else { if (ZEND_NUM_ARGS() != 1 || zend_parse_parameters_ex(ZEND_PARSE_PARAMS_QUIET, ZEND_NUM_ARGS() TSRMLS_CC, "z", &arg) == FAILURE) { Z_TYPE_P(tmp) = IS_NULL; zval_ptr_dtor(&tmp); zend_throw_exception(spl_ce_BadMethodCallException, "Function expects exactly one argument", 0 TSRMLS_CC); return; } aht->nApplyCount++; zend_call_method(NULL, NULL, NULL, fname, fname_len, &retval_ptr, 2, tmp, arg TSRMLS_CC); aht->nApplyCount--; } Z_TYPE_P(tmp) = IS_NULL; /* we want to destroy the zval, not the hashtable */ zval_ptr_dtor(&tmp); if (retval_ptr) { COPY_PZVAL_TO_ZVAL(*return_value, retval_ptr); } } /* }}} */ #define SPL_ARRAY_METHOD(cname, fname, use_arg) \ SPL_METHOD(cname, fname) \ { \ spl_array_method(INTERNAL_FUNCTION_PARAM_PASSTHRU, #fname, sizeof(#fname)-1, use_arg); \ } /* {{{ proto int ArrayObject::asort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::asort([int $sort_flags = SORT_REGULAR ]) Sort the entries by values. */ SPL_ARRAY_METHOD(Array, asort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::ksort([int $sort_flags = SORT_REGULAR ]) proto int ArrayIterator::ksort([int $sort_flags = SORT_REGULAR ]) Sort the entries by key. */ SPL_ARRAY_METHOD(Array, ksort, SPL_ARRAY_METHOD_MAY_USER_ARG) /* }}} */ /* {{{ proto int ArrayObject::uasort(callback cmp_function) proto int ArrayIterator::uasort(callback cmp_function) Sort the entries by values user defined function. */ SPL_ARRAY_METHOD(Array, uasort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::uksort(callback cmp_function) proto int ArrayIterator::uksort(callback cmp_function) Sort the entries by key using user defined function. */ SPL_ARRAY_METHOD(Array, uksort, SPL_ARRAY_METHOD_USE_ARG) /* }}} */ /* {{{ proto int ArrayObject::natsort() proto int ArrayIterator::natsort() Sort the entries by values using "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natsort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto int ArrayObject::natcasesort() proto int ArrayIterator::natcasesort() Sort the entries by key using case insensitive "natural order" algorithm. */ SPL_ARRAY_METHOD(Array, natcasesort, SPL_ARRAY_METHOD_NO_ARG) /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::current() Return current array entry */ SPL_METHOD(Array, current) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); zval **entry; HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } RETVAL_ZVAL(*entry, 1, 0); } /* }}} */ /* {{{ proto mixed|NULL ArrayIterator::key() Return current array key */ SPL_METHOD(Array, key) { if (zend_parse_parameters_none() == FAILURE) { return; } spl_array_iterator_key(getThis(), return_value TSRMLS_CC); } /* }}} */ void spl_array_iterator_key(zval *object, zval *return_value TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } zend_hash_get_current_key_zval_ex(aht, return_value, &intern->pos); } /* }}} */ /* {{{ proto void ArrayIterator::next() Move to next entry */ SPL_METHOD(Array, next) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } spl_array_next_no_verify(intern, aht TSRMLS_CC); } /* }}} */ /* {{{ proto bool ArrayIterator::valid() Check whether array contains more entries */ SPL_METHOD(Array, valid) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } else { RETURN_BOOL(zend_hash_has_more_elements_ex(aht, &intern->pos) == SUCCESS); } } /* }}} */ /* {{{ proto bool RecursiveArrayIterator::hasChildren() Check whether current element has children (e.g. is an array) */ SPL_METHOD(Array, hasChildren) { zval *object = getThis(), **entry; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { RETURN_FALSE; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { RETURN_FALSE; } RETURN_BOOL(Z_TYPE_PP(entry) == IS_ARRAY || (Z_TYPE_PP(entry) == IS_OBJECT && (intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) == 0)); } /* }}} */ /* {{{ proto object RecursiveArrayIterator::getChildren() Create a sub iterator for the current element (same class as $this) */ SPL_METHOD(Array, getChildren) { zval *object = getThis(), **entry, *flags; spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (zend_parse_parameters_none() == FAILURE) { return; } if (spl_array_object_verify_pos(intern, aht TSRMLS_CC) == FAILURE) { return; } if (zend_hash_get_current_data_ex(aht, (void **) &entry, &intern->pos) == FAILURE) { return; } if (Z_TYPE_PP(entry) == IS_OBJECT) { if ((intern->ar_flags & SPL_ARRAY_CHILD_ARRAYS_ONLY) != 0) { return; } if (instanceof_function(Z_OBJCE_PP(entry), Z_OBJCE_P(getThis()) TSRMLS_CC)) { RETURN_ZVAL(*entry, 1, 0); } } MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, SPL_ARRAY_USE_OTHER | intern->ar_flags); spl_instantiate_arg_ex2(Z_OBJCE_P(getThis()), &return_value, 0, *entry, flags TSRMLS_CC); zval_ptr_dtor(&flags); } /* }}} */ /* {{{ proto string ArrayObject::serialize() Serialize the object */ SPL_METHOD(Array, serialize) { zval *object = getThis(); spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); HashTable *aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); zval members, *pmembers; php_serialize_data_t var_hash; smart_str buf = {0}; zval *flags; if (zend_parse_parameters_none() == FAILURE) { return; } if (!aht) { php_error_docref(NULL TSRMLS_CC, E_NOTICE, "Array was modified outside object and is no longer an array"); return; } PHP_VAR_SERIALIZE_INIT(var_hash); MAKE_STD_ZVAL(flags); ZVAL_LONG(flags, (intern->ar_flags & SPL_ARRAY_CLONE_MASK)); /* storage */ smart_str_appendl(&buf, "x:", 2); php_var_serialize(&buf, &flags, &var_hash TSRMLS_CC); zval_ptr_dtor(&flags); if (!(intern->ar_flags & SPL_ARRAY_IS_SELF)) { php_var_serialize(&buf, &intern->array, &var_hash TSRMLS_CC); smart_str_appendc(&buf, ';'); } /* members */ smart_str_appendl(&buf, "m:", 2); INIT_PZVAL(&members); if (!intern->std.properties) { rebuild_object_properties(&intern->std); } Z_ARRVAL(members) = intern->std.properties; Z_TYPE(members) = IS_ARRAY; pmembers = &members; php_var_serialize(&buf, &pmembers, &var_hash TSRMLS_CC); /* finishes the string */ /* done */ PHP_VAR_SERIALIZE_DESTROY(var_hash); if (buf.c) { RETURN_STRINGL(buf.c, buf.len, 0); } RETURN_NULL(); } /* }}} */ /* {{{ proto void ArrayObject::unserialize(string serialized) * unserialize the object */ SPL_METHOD(Array, unserialize) { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(getThis() TSRMLS_CC); char *buf; int buf_len; const unsigned char *p, *s; php_unserialize_data_t var_hash; zval *pmembers, *pflags = NULL; HashTable *aht; long flags; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &buf, &buf_len) == FAILURE) { return; } if (buf_len == 0) { return; } aht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); if (aht->nApplyCount > 0) { zend_error(E_WARNING, "Modification of ArrayObject during sorting is prohibited"); return; } /* storage */ s = p = (const unsigned char*)buf; PHP_VAR_UNSERIALIZE_INIT(var_hash); if (*p!= 'x' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pflags); if (!php_var_unserialize(&pflags, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pflags) != IS_LONG) { goto outexcept; } var_push_dtor(&var_hash, &pflags); --p; /* for ';' */ flags = Z_LVAL_P(pflags); /* flags needs to be verified and we also need to verify whether the next * thing we get is ';'. After that we require an 'm' or somethign else * where 'm' stands for members and anything else should be an array. If * neither 'a' or 'm' follows we have an error. */ if (*p != ';') { goto outexcept; } ++p; if (*p!='m') { if (*p!='a' && *p!='O' && *p!='C' && *p!='r') { goto outexcept; } intern->ar_flags &= ~SPL_ARRAY_CLONE_MASK; intern->ar_flags |= flags & SPL_ARRAY_CLONE_MASK; zval_ptr_dtor(&intern->array); ALLOC_INIT_ZVAL(intern->array); if (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC) || (Z_TYPE_P(intern->array) != IS_ARRAY && Z_TYPE_P(intern->array) != IS_OBJECT)) { zval_ptr_dtor(&intern->array); goto outexcept; } var_push_dtor(&var_hash, &intern->array); } if (*p != ';') { goto outexcept; } ++p; /* members */ if (*p!= 'm' || *++p != ':') { goto outexcept; } ++p; ALLOC_INIT_ZVAL(pmembers); if (!php_var_unserialize(&pmembers, &p, s + buf_len, &var_hash TSRMLS_CC) || Z_TYPE_P(pmembers) != IS_ARRAY) { zval_ptr_dtor(&pmembers); goto outexcept; } var_push_dtor(&var_hash, &pmembers); /* copy members */ if (!intern->std.properties) { rebuild_object_properties(&intern->std); } zend_hash_copy(intern->std.properties, Z_ARRVAL_P(pmembers), (copy_ctor_func_t) zval_add_ref, (void *) NULL, sizeof(zval *)); zval_ptr_dtor(&pmembers); /* done reading $serialized */ PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } return; outexcept: PHP_VAR_UNSERIALIZE_DESTROY(var_hash); if (pflags) { zval_ptr_dtor(&pflags); } zend_throw_exception_ex(spl_ce_UnexpectedValueException, 0 TSRMLS_CC, "Error at offset %ld of %d bytes", (long)((char*)p - buf), buf_len); return; } /* }}} */ /* {{{ arginfo and function table */ ZEND_BEGIN_ARG_INFO_EX(arginfo_array___construct, 0, 0, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetGet, 0, 0, 1) ZEND_ARG_INFO(0, index) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_array_offsetSet, 0, 0, 2) ZEND_ARG_INFO(0, index) ZEND_ARG_INFO(0, newval) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_append, 0) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_seek, 0) ZEND_ARG_INFO(0, position) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_exchangeArray, 0) ZEND_ARG_INFO(0, array) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setFlags, 0) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_setIteratorClass, 0) ZEND_ARG_INFO(0, iteratorClass) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_array_uXsort, 0) ZEND_ARG_INFO(0, cmp_function) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_unserialize, 0) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO(arginfo_array_void, 0) ZEND_END_ARG_INFO() static const zend_function_entry spl_funcs_ArrayObject[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayObject specific */ SPL_ME(Array, getIterator, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, exchangeArray, arginfo_array_exchangeArray, ZEND_ACC_PUBLIC) SPL_ME(Array, setIteratorClass, arginfo_array_setIteratorClass, ZEND_ACC_PUBLIC) SPL_ME(Array, getIteratorClass, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_ArrayIterator[] = { SPL_ME(Array, __construct, arginfo_array___construct, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetExists, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetGet, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetSet, arginfo_array_offsetSet, ZEND_ACC_PUBLIC) SPL_ME(Array, offsetUnset, arginfo_array_offsetGet, ZEND_ACC_PUBLIC) SPL_ME(Array, append, arginfo_array_append, ZEND_ACC_PUBLIC) SPL_ME(Array, getArrayCopy, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, count, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getFlags, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, setFlags, arginfo_array_setFlags, ZEND_ACC_PUBLIC) SPL_ME(Array, asort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, ksort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, uasort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, uksort, arginfo_array_uXsort, ZEND_ACC_PUBLIC) SPL_ME(Array, natsort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, natcasesort, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, unserialize, arginfo_array_unserialize, ZEND_ACC_PUBLIC) SPL_ME(Array, serialize, arginfo_array_void, ZEND_ACC_PUBLIC) /* ArrayIterator specific */ SPL_ME(Array, rewind, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, current, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, key, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, next, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, valid, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, seek, arginfo_array_seek, ZEND_ACC_PUBLIC) PHP_FE_END }; static const zend_function_entry spl_funcs_RecursiveArrayIterator[] = { SPL_ME(Array, hasChildren, arginfo_array_void, ZEND_ACC_PUBLIC) SPL_ME(Array, getChildren, arginfo_array_void, ZEND_ACC_PUBLIC) PHP_FE_END }; /* }}} */ /* {{{ PHP_MINIT_FUNCTION(spl_array) */ PHP_MINIT_FUNCTION(spl_array) { REGISTER_SPL_STD_CLASS_EX(ArrayObject, spl_array_object_new, spl_funcs_ArrayObject); REGISTER_SPL_IMPLEMENTS(ArrayObject, Aggregate); REGISTER_SPL_IMPLEMENTS(ArrayObject, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayObject, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayObject, Countable); memcpy(&spl_handler_ArrayObject, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); spl_handler_ArrayObject.clone_obj = spl_array_object_clone; spl_handler_ArrayObject.read_dimension = spl_array_read_dimension; spl_handler_ArrayObject.write_dimension = spl_array_write_dimension; spl_handler_ArrayObject.unset_dimension = spl_array_unset_dimension; spl_handler_ArrayObject.has_dimension = spl_array_has_dimension; spl_handler_ArrayObject.count_elements = spl_array_object_count_elements; spl_handler_ArrayObject.get_properties = spl_array_get_properties; spl_handler_ArrayObject.get_debug_info = spl_array_get_debug_info; spl_handler_ArrayObject.get_gc = spl_array_get_gc; spl_handler_ArrayObject.read_property = spl_array_read_property; spl_handler_ArrayObject.write_property = spl_array_write_property; spl_handler_ArrayObject.get_property_ptr_ptr = spl_array_get_property_ptr_ptr; spl_handler_ArrayObject.has_property = spl_array_has_property; spl_handler_ArrayObject.unset_property = spl_array_unset_property; spl_handler_ArrayObject.compare_objects = spl_array_compare_objects; REGISTER_SPL_STD_CLASS_EX(ArrayIterator, spl_array_object_new, spl_funcs_ArrayIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Iterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, ArrayAccess); REGISTER_SPL_IMPLEMENTS(ArrayIterator, SeekableIterator); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Serializable); REGISTER_SPL_IMPLEMENTS(ArrayIterator, Countable); memcpy(&spl_handler_ArrayIterator, &spl_handler_ArrayObject, sizeof(zend_object_handlers)); spl_ce_ArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_SUB_CLASS_EX(RecursiveArrayIterator, ArrayIterator, spl_array_object_new, spl_funcs_RecursiveArrayIterator); REGISTER_SPL_IMPLEMENTS(RecursiveArrayIterator, RecursiveIterator); spl_ce_RecursiveArrayIterator->get_iterator = spl_array_get_iterator; REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayObject, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "STD_PROP_LIST", SPL_ARRAY_STD_PROP_LIST); REGISTER_SPL_CLASS_CONST_LONG(ArrayIterator, "ARRAY_AS_PROPS", SPL_ARRAY_ARRAY_AS_PROPS); REGISTER_SPL_CLASS_CONST_LONG(RecursiveArrayIterator, "CHILD_ARRAYS_ONLY", SPL_ARRAY_CHILD_ARRAYS_ONLY); return SUCCESS; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */
static int spl_array_has_dimension_ex(int check_inherited, zval *object, zval *offset, int check_empty TSRMLS_DC) /* {{{ */ { spl_array_object *intern = (spl_array_object*)zend_object_store_get_object(object TSRMLS_CC); long index; zval *rv, *value = NULL, **tmp; if (check_inherited && intern->fptr_offset_has) { zval *offset_tmp = offset; SEPARATE_ARG_IF_REF(offset_tmp); zend_call_method_with_1_params(&object, Z_OBJCE_P(object), &intern->fptr_offset_has, "offsetExists", &rv, offset_tmp); zval_ptr_dtor(&offset_tmp); if (rv && zend_is_true(rv)) { zval_ptr_dtor(&rv); if (check_empty != 1) { return 1; } else if (intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } } else { if (rv) { zval_ptr_dtor(&rv); } return 0; } } if (!value) { HashTable *ht = spl_array_get_hash_table(intern, 0 TSRMLS_CC); switch(Z_TYPE_P(offset)) { case IS_STRING: if (zend_symtable_find(ht, Z_STRVAL_P(offset), Z_STRLEN_P(offset)+1, (void **) &tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; case IS_DOUBLE: case IS_RESOURCE: case IS_BOOL: case IS_LONG: if (offset->type == IS_DOUBLE) { index = (long)Z_DVAL_P(offset); } else { index = Z_LVAL_P(offset); } if (zend_hash_index_find(ht, index, (void **)&tmp) != FAILURE) { if (check_empty == 2) { return 1; } } else { return 0; } break; default: zend_error(E_WARNING, "Illegal offset type"); return 0; } if (check_empty && check_inherited && intern->fptr_offset_get) { value = spl_array_read_dimension_ex(1, object, offset, BP_VAR_R TSRMLS_CC); } else { value = *tmp; } } return check_empty ? zend_is_true(value) : Z_TYPE_P(value) != IS_NULL; } /* }}} */
{'added': [(311, '\tif (!offset || !ht) {'), (629, '\t\t\tcase IS_STRING:'), (641, '\t\t\tcase IS_BOOL:'), (1813, '\t\tif (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)'), (1814, '\t\t\t\t|| (Z_TYPE_P(intern->array) != IS_ARRAY && Z_TYPE_P(intern->array) != IS_OBJECT)) {'), (1815, '\t\t\tzval_ptr_dtor(&intern->array);')], 'deleted': [(311, '\tif (!offset) {'), (629, '\t\t\tcase IS_STRING:'), (641, '\t\t\tcase IS_BOOL:'), (1813, '\t\tif (!php_var_unserialize(&intern->array, &p, s + buf_len, &var_hash TSRMLS_CC)) {')]}
6
4
1,540
10,658
65
389
23
https://github.com/php/php-src
CVE-2016-7417
CWE-20
312
icmp.c
C
icmp_send
/* * NET3: Implementation of the ICMP protocol layer. * * Alan Cox, <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Some of the function names and the icmp unreach table for this * module were derived from [icmp.c 1.0.11 06/02/93] by * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting. * Other than that this module is a complete rewrite. * * Fixes: * Clemens Fruhwirth : introduce global icmp rate limiting * with icmp type masking ability instead * of broken per type icmp timeouts. * Mike Shaver : RFC1122 checks. * Alan Cox : Multicast ping reply as self. * Alan Cox : Fix atomicity lockup in ip_build_xmit * call. * Alan Cox : Added 216,128 byte paths to the MTU * code. * Martin Mares : RFC1812 checks. * Martin Mares : Can be configured to follow redirects * if acting as a router _without_ a * routing protocol (RFC 1812). * Martin Mares : Echo requests may be configured to * be ignored (RFC 1812). * Martin Mares : Limitation of ICMP error message * transmit rate (RFC 1812). * Martin Mares : TOS and Precedence set correctly * (RFC 1812). * Martin Mares : Now copying as much data from the * original packet as we can without * exceeding 576 bytes (RFC 1812). * Willy Konynenberg : Transparent proxying support. * Keith Owens : RFC1191 correction for 4.2BSD based * path MTU bug. * Thomas Quinot : ICMP Dest Unreach codes up to 15 are * valid (RFC 1812). * Andi Kleen : Check all packet lengths properly * and moved all kfree_skb() up to * icmp_rcv. * Andi Kleen : Move the rate limit bookkeeping * into the dest entry and use a token * bucket filter (thanks to ANK). Make * the rates sysctl configurable. * Yu Tianli : Fixed two ugly bugs in icmp_send * - IP option length was accounted wrongly * - ICMP header length was not accounted * at all. * Tristan Greaves : Added sysctl option to ignore bogus * broadcast responses from broken routers. * * To Fix: * * - Should use skb_pull() instead of all the manual checking. * This would also greatly simply some upper layer error handlers. --AK * */ #include <linux/module.h> #include <linux/types.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/netfilter_ipv4.h> #include <linux/slab.h> #include <net/snmp.h> #include <net/ip.h> #include <net/route.h> #include <net/protocol.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/udp.h> #include <net/raw.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/init.h> #include <asm/system.h> #include <asm/uaccess.h> #include <net/checksum.h> #include <net/xfrm.h> #include <net/inet_common.h> /* * Build xmit assembly blocks */ struct icmp_bxm { struct sk_buff *skb; int offset; int data_len; struct { struct icmphdr icmph; __be32 times[3]; } data; int head_len; struct ip_options replyopts; unsigned char optbuf[40]; }; /* An array of errno for error messages from dest unreach. */ /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ const struct icmp_err icmp_err_convert[] = { { .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */ .fatal = 0, }, { .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */, .fatal = 1, }, { .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */ .fatal = 1, }, { .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */ .fatal = 0, }, { .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */ .fatal = 0, }, { .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */ .fatal = 1, }, { .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */ .fatal = 1, }, { .errno = ENONET, /* ICMP_HOST_ISOLATED */ .fatal = 1, }, { .errno = ENETUNREACH, /* ICMP_NET_ANO */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */ .fatal = 1, }, { .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */ .fatal = 1, }, }; EXPORT_SYMBOL(icmp_err_convert); /* * ICMP control array. This specifies what to do with each ICMP. */ struct icmp_control { void (*handler)(struct sk_buff *skb); short error; /* This ICMP is classed as an error message */ }; static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static struct sock *icmp_sk(struct net *net) { return net->ipv4.icmp_sk[smp_processor_id()]; } static inline struct sock *icmp_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmp_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); return NULL; } return sk; } static inline void icmp_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Send an ICMP frame. */ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, int type, int code) { struct dst_entry *dst = &rt->dst; bool rc = true; if (type > NR_ICMP_TYPES) goto out; /* Don't limit PMTU discovery. */ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) goto out; /* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) goto out; /* Limit if icmp type is enabled in ratemask. */ if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { if (!rt->peer) rt_bind_peer(rt, 1); rc = inet_peer_xrlim_allow(rt->peer, net->ipv4.sysctl_icmp_ratelimit); } out: return rc; } /* * Maintain the counters used in the SNMP statistics for outgoing ICMP */ void icmp_out_count(struct net *net, unsigned char type) { ICMPMSGOUT_INC_STATS(net, type); ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); } /* * Checksum each fragment, and on the first include the headers and final * checksum. */ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; __wsum csum; csum = skb_copy_and_csum_bits(icmp_param->skb, icmp_param->offset + offset, to, len, 0); skb->csum = csum_block_add(skb->csum, csum, odd); if (icmp_pointers[icmp_param->data.icmph.type].error) nf_ct_attach(skb, icmp_param->skb); return 0; } static void icmp_push_reply(struct icmp_bxm *icmp_param, struct ipcm_cookie *ipc, struct rtable **rt) { struct sock *sk; struct sk_buff *skb; sk = icmp_sk(dev_net((*rt)->dst.dev)); if (ip_append_data(sk, icmp_glue_bits, icmp_param, icmp_param->data_len+icmp_param->head_len, icmp_param->head_len, ipc, rt, MSG_DONTWAIT) < 0) { ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); ip_flush_pending_frames(sk); } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { struct icmphdr *icmph = icmp_hdr(skb); __wsum csum = 0; struct sk_buff *skb1; skb_queue_walk(&sk->sk_write_queue, skb1) { csum = csum_add(csum, skb1->csum); } csum = csum_partial_copy_nocheck((void *)&icmp_param->data, (char *)icmph, icmp_param->head_len, csum); icmph->checksum = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk); } } /* * Driving logic for building and sending ICMP messages. */ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); struct sock *sk; struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts, skb)) return; sk = icmp_xmit_lock(net); if (sk == NULL) return; inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; inet->tos = ip_hdr(skb)->tos; daddr = ipc.addr = rt->rt_src; ipc.opt = NULL; ipc.tx_flags = 0; if (icmp_param->replyopts.optlen) { ipc.opt = &icmp_param->replyopts; if (ipc.opt->srr) daddr = icmp_param->replyopts.faddr; } { struct flowi4 fl4 = { .daddr = daddr, .saddr = rt->rt_spec_dst, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_proto = IPPROTO_ICMP, }; security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; } if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type, icmp_param->data.icmph.code)) icmp_push_reply(icmp_param, &ipc, &rt); ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); } static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, const struct iphdr *iph, __be32 saddr, u8 tos, int type, int code, struct icmp_bxm *param) { struct flowi4 fl4 = { .daddr = (param->replyopts.srr ? param->replyopts.faddr : iph->saddr), .saddr = saddr, .flowi4_tos = RT_TOS(tos), .flowi4_proto = IPPROTO_ICMP, .fl4_icmp_type = type, .fl4_icmp_code = code, }; struct rtable *rt, *rt2; int err; security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4)); rt = __ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return rt; /* No need to clone since we're just using its address. */ rt2 = rt; if (!fl4.saddr) fl4.saddr = rt->rt_src; rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flowi4_to_flowi(&fl4), NULL, 0); if (!IS_ERR(rt)) { if (rt != rt2) return rt; } else if (PTR_ERR(rt) == -EPERM) { rt = NULL; } else return rt; err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET); if (err) goto relookup_failed; if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4); if (IS_ERR(rt2)) err = PTR_ERR(rt2); } else { struct flowi4 fl4_2 = {}; unsigned long orefdst; fl4_2.daddr = fl4.saddr; rt2 = ip_route_output_key(net, &fl4_2); if (IS_ERR(rt2)) { err = PTR_ERR(rt2); goto relookup_failed; } /* Ugh! */ orefdst = skb_in->_skb_refdst; /* save old refdst */ err = ip_route_input(skb_in, fl4.daddr, fl4.saddr, RT_TOS(tos), rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); skb_in->_skb_refdst = orefdst; /* restore old refdst */ } if (err) goto relookup_failed; rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4), NULL, XFRM_LOOKUP_ICMP); if (!IS_ERR(rt2)) { dst_release(&rt->dst); rt = rt2; } else if (PTR_ERR(rt2) == -EPERM) { if (rt) dst_release(&rt->dst); return rt2; } else { err = PTR_ERR(rt2); goto relookup_failed; } return rt; relookup_failed: if (rt) return rt; return ERR_PTR(err); } /* * Send an ICMP message in response to a situation * * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. * MAY send more (we do). * MUST NOT change this header information. * MUST NOT reply to a multicast/broadcast IP address. * MUST NOT reply to a multicast/broadcast MAC address. * MUST reply to only the first fragment. */ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; } EXPORT_SYMBOL(icmp_send); /* * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH. */ static void icmp_unreach(struct sk_buff *skb) { const struct iphdr *iph; struct icmphdr *icmph; int hash, protocol; const struct net_protocol *ipprot; u32 info = 0; struct net *net; net = dev_net(skb_dst(skb)->dev); /* * Incomplete header ? * Only checks for the IP header, there should be an * additional check for longer headers in upper levels. */ if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out_err; icmph = icmp_hdr(skb); iph = (const struct iphdr *)skb->data; if (iph->ihl < 5) /* Mangled header, drop. */ goto out_err; if (icmph->type == ICMP_DEST_UNREACH) { switch (icmph->code & 15) { case ICMP_NET_UNREACH: case ICMP_HOST_UNREACH: case ICMP_PROT_UNREACH: case ICMP_PORT_UNREACH: break; case ICMP_FRAG_NEEDED: if (ipv4_config.no_pmtu_disc) { LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", &iph->daddr); } else { info = ip_rt_frag_needed(net, iph, ntohs(icmph->un.frag.mtu), skb->dev); if (!info) goto out; } break; case ICMP_SR_FAILED: LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", &iph->daddr); break; default: break; } if (icmph->code > NR_ICMP_UNREACH) goto out; } else if (icmph->type == ICMP_PARAMETERPROB) info = ntohl(icmph->un.gateway) >> 24; /* * Throw it at our lower layers * * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed * header. * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the * transport layer. * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to * transport layer. */ /* * Check the other end isn't violating RFC 1122. Some routers send * bogus responses to broadcast frames. If you see this message * first check your netmask matches at both ends, if it does then * get the other vendor to fix their kit. */ if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { if (net_ratelimit()) printk(KERN_WARNING "%pI4 sent an invalid ICMP " "type %u, code %u " "error to a broadcast: %pI4 on %s\n", &ip_hdr(skb)->saddr, icmph->type, icmph->code, &iph->daddr, skb->dev->name); goto out; } /* Checkin full IP header plus 8 bytes of protocol to * avoid additional coding at protocol handlers. */ if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) goto out; iph = (const struct iphdr *)skb->data; protocol = iph->protocol; /* * Deliver ICMP message to raw sockets. Pretty useless feature? */ raw_icmp_error(skb, protocol, info); hash = protocol & (MAX_INET_PROTOS - 1); rcu_read_lock(); ipprot = rcu_dereference(inet_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, info); rcu_read_unlock(); out: return; out_err: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_REDIRECT. */ static void icmp_redirect(struct sk_buff *skb) { const struct iphdr *iph; if (skb->len < sizeof(struct iphdr)) goto out_err; /* * Get the copied header of the packet that caused the redirect */ if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; iph = (const struct iphdr *)skb->data; switch (icmp_hdr(skb)->code & 7) { case ICMP_REDIR_NET: case ICMP_REDIR_NETTOS: /* * As per RFC recommendations now handle it as a host redirect. */ case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr, icmp_hdr(skb)->un.gateway, iph->saddr, skb->dev); break; } out: return; out_err: ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo * requests. * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be * included in the reply. * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring * echo requests, MUST have default=NOT. * See also WRT handling of options once they are done and working. */ static void icmp_echo(struct sk_buff *skb) { struct net *net; net = dev_net(skb_dst(skb)->dev); if (!net->ipv4.sysctl_icmp_echo_ignore_all) { struct icmp_bxm icmp_param; icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_ECHOREPLY; icmp_param.skb = skb; icmp_param.offset = 0; icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); } } /* * Handle ICMP Timestamp requests. * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. * SHOULD be in the kernel for minimum random latency. * MUST be accurate to a few minutes. * MUST be updated at least at 15Hz. */ static void icmp_timestamp(struct sk_buff *skb) { struct timespec tv; struct icmp_bxm icmp_param; /* * Too short. */ if (skb->len < 4) goto out_err; /* * Fill in the current time as ms since midnight UT: */ getnstimeofday(&tv); icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); icmp_param.data.times[2] = icmp_param.data.times[1]; if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) BUG(); icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY; icmp_param.data.icmph.code = 0; icmp_param.skb = skb; icmp_param.offset = 0; icmp_param.data_len = 0; icmp_param.head_len = sizeof(struct icmphdr) + 12; icmp_reply(&icmp_param, skb); out: return; out_err: ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_ADDRESS_MASK requests. (RFC950) * * RFC1122 (3.2.2.9). A host MUST only send replies to * ADDRESS_MASK requests if it's been configured as an address mask * agent. Receiving a request doesn't constitute implicit permission to * act as one. Of course, implementing this correctly requires (SHOULD) * a way to turn the functionality on and off. Another one for sysctl(), * I guess. -- MS * * RFC1812 (4.3.3.9). A router MUST implement it. * A router SHOULD have switch turning it on/off. * This switch MUST be ON by default. * * Gratuitous replies, zero-source replies are not implemented, * that complies with RFC. DO NOT implement them!!! All the idea * of broadcast addrmask replies as specified in RFC950 is broken. * The problem is that it is not uncommon to have several prefixes * on one physical interface. Moreover, addrmask agent can even be * not aware of existing another prefixes. * If source is zero, addrmask agent cannot choose correct prefix. * Gratuitous mask announcements suffer from the same problem. * RFC1812 explains it, but still allows to use ADDRMASK, * that is pretty silly. --ANK * * All these rules are so bizarre, that I removed kernel addrmask * support at all. It is wrong, it is obsolete, nobody uses it in * any case. --ANK * * Furthermore you can do it with a usermode address agent program * anyway... */ static void icmp_address(struct sk_buff *skb) { #if 0 if (net_ratelimit()) printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n"); #endif } /* * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain * loudly if an inconsistency is found. * called with rcu_read_lock() */ static void icmp_address_reply(struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct net_device *dev = skb->dev; struct in_device *in_dev; struct in_ifaddr *ifa; if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) return; in_dev = __in_dev_get_rcu(dev); if (!in_dev) return; if (in_dev->ifa_list && IN_DEV_LOG_MARTIANS(in_dev) && IN_DEV_FORWARD(in_dev)) { __be32 _mask, *mp; mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); BUG_ON(mp == NULL); for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { if (*mp == ifa->ifa_mask && inet_ifa_match(rt->rt_src, ifa)) break; } if (!ifa && net_ratelimit()) { printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", mp, dev->name, &rt->rt_src); } } } static void icmp_discard(struct sk_buff *skb) { } /* * Deal with incoming ICMP packets. */ int icmp_rcv(struct sk_buff *skb) { struct icmphdr *icmph; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop; if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr))) goto drop; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*icmph)); if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop; skb_set_network_header(skb, nh); } ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; if (__skb_checksum_complete(skb)) goto error; } if (!pskb_pull(skb, sizeof(*icmph))) goto error; icmph = icmp_hdr(skb); ICMPMSGIN_INC_STATS_BH(net, icmph->type); /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently * discarded. */ if (icmph->type > NR_ICMP_TYPES) goto error; /* * Parse the ICMP message */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { /* * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be * silently ignored (we let user decide with a sysctl). * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently * discarded if to broadcast/multicast. */ if ((icmph->type == ICMP_ECHO || icmph->type == ICMP_TIMESTAMP) && net->ipv4.sysctl_icmp_echo_ignore_broadcasts) { goto error; } if (icmph->type != ICMP_ECHO && icmph->type != ICMP_TIMESTAMP && icmph->type != ICMP_ADDRESS && icmph->type != ICMP_ADDRESSREPLY) { goto error; } } icmp_pointers[icmph->type].handler(skb); drop: kfree_skb(skb); return 0; error: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); goto drop; } /* * This table is the definition of how we handle ICMP. */ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { [ICMP_ECHOREPLY] = { .handler = icmp_discard, }, [1] = { .handler = icmp_discard, .error = 1, }, [2] = { .handler = icmp_discard, .error = 1, }, [ICMP_DEST_UNREACH] = { .handler = icmp_unreach, .error = 1, }, [ICMP_SOURCE_QUENCH] = { .handler = icmp_unreach, .error = 1, }, [ICMP_REDIRECT] = { .handler = icmp_redirect, .error = 1, }, [6] = { .handler = icmp_discard, .error = 1, }, [7] = { .handler = icmp_discard, .error = 1, }, [ICMP_ECHO] = { .handler = icmp_echo, }, [9] = { .handler = icmp_discard, .error = 1, }, [10] = { .handler = icmp_discard, .error = 1, }, [ICMP_TIME_EXCEEDED] = { .handler = icmp_unreach, .error = 1, }, [ICMP_PARAMETERPROB] = { .handler = icmp_unreach, .error = 1, }, [ICMP_TIMESTAMP] = { .handler = icmp_timestamp, }, [ICMP_TIMESTAMPREPLY] = { .handler = icmp_discard, }, [ICMP_INFO_REQUEST] = { .handler = icmp_discard, }, [ICMP_INFO_REPLY] = { .handler = icmp_discard, }, [ICMP_ADDRESS] = { .handler = icmp_address, }, [ICMP_ADDRESSREPLY] = { .handler = icmp_address_reply, }, }; static void __net_exit icmp_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]); kfree(net->ipv4.icmp_sk); net->ipv4.icmp_sk = NULL; } static int __net_init icmp_sk_init(struct net *net) { int i, err; net->ipv4.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (net->ipv4.icmp_sk == NULL) return -ENOMEM; for_each_possible_cpu(i) { struct sock *sk; err = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_ICMP, net); if (err < 0) goto fail; net->ipv4.icmp_sk[i] = sk; /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = (2 * ((64 * 1024) + sizeof(struct sk_buff))); /* * Speedup sock_wfree() */ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; } /* Control parameters for ECHO replies. */ net->ipv4.sysctl_icmp_echo_ignore_all = 0; net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1; /* Control parameter - ignore bogus broadcast responses? */ net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1; /* * Configurable global rate limit. * * ratelimit defines tokens/packet consumed for dst->rate_token * bucket ratemask defines which icmp types are ratelimited by * setting it's bit position. * * default: * dest unreachable (3), source quench (4), * time exceeded (11), parameter problem (12) */ net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; net->ipv4.sysctl_icmp_ratemask = 0x1818; net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; return 0; fail: for_each_possible_cpu(i) inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]); kfree(net->ipv4.icmp_sk); return err; } static struct pernet_operations __net_initdata icmp_sk_ops = { .init = icmp_sk_init, .exit = icmp_sk_exit, }; int __init icmp_init(void) { return register_pernet_subsys(&icmp_sk_ops); }
/* * NET3: Implementation of the ICMP protocol layer. * * Alan Cox, <alan@lxorguk.ukuu.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Some of the function names and the icmp unreach table for this * module were derived from [icmp.c 1.0.11 06/02/93] by * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting. * Other than that this module is a complete rewrite. * * Fixes: * Clemens Fruhwirth : introduce global icmp rate limiting * with icmp type masking ability instead * of broken per type icmp timeouts. * Mike Shaver : RFC1122 checks. * Alan Cox : Multicast ping reply as self. * Alan Cox : Fix atomicity lockup in ip_build_xmit * call. * Alan Cox : Added 216,128 byte paths to the MTU * code. * Martin Mares : RFC1812 checks. * Martin Mares : Can be configured to follow redirects * if acting as a router _without_ a * routing protocol (RFC 1812). * Martin Mares : Echo requests may be configured to * be ignored (RFC 1812). * Martin Mares : Limitation of ICMP error message * transmit rate (RFC 1812). * Martin Mares : TOS and Precedence set correctly * (RFC 1812). * Martin Mares : Now copying as much data from the * original packet as we can without * exceeding 576 bytes (RFC 1812). * Willy Konynenberg : Transparent proxying support. * Keith Owens : RFC1191 correction for 4.2BSD based * path MTU bug. * Thomas Quinot : ICMP Dest Unreach codes up to 15 are * valid (RFC 1812). * Andi Kleen : Check all packet lengths properly * and moved all kfree_skb() up to * icmp_rcv. * Andi Kleen : Move the rate limit bookkeeping * into the dest entry and use a token * bucket filter (thanks to ANK). Make * the rates sysctl configurable. * Yu Tianli : Fixed two ugly bugs in icmp_send * - IP option length was accounted wrongly * - ICMP header length was not accounted * at all. * Tristan Greaves : Added sysctl option to ignore bogus * broadcast responses from broken routers. * * To Fix: * * - Should use skb_pull() instead of all the manual checking. * This would also greatly simply some upper layer error handlers. --AK * */ #include <linux/module.h> #include <linux/types.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/inetdevice.h> #include <linux/netdevice.h> #include <linux/string.h> #include <linux/netfilter_ipv4.h> #include <linux/slab.h> #include <net/snmp.h> #include <net/ip.h> #include <net/route.h> #include <net/protocol.h> #include <net/icmp.h> #include <net/tcp.h> #include <net/udp.h> #include <net/raw.h> #include <linux/skbuff.h> #include <net/sock.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/init.h> #include <asm/system.h> #include <asm/uaccess.h> #include <net/checksum.h> #include <net/xfrm.h> #include <net/inet_common.h> /* * Build xmit assembly blocks */ struct icmp_bxm { struct sk_buff *skb; int offset; int data_len; struct { struct icmphdr icmph; __be32 times[3]; } data; int head_len; struct ip_options_data replyopts; }; /* An array of errno for error messages from dest unreach. */ /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ const struct icmp_err icmp_err_convert[] = { { .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */ .fatal = 0, }, { .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */, .fatal = 1, }, { .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */ .fatal = 1, }, { .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */ .fatal = 0, }, { .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */ .fatal = 0, }, { .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */ .fatal = 1, }, { .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */ .fatal = 1, }, { .errno = ENONET, /* ICMP_HOST_ISOLATED */ .fatal = 1, }, { .errno = ENETUNREACH, /* ICMP_NET_ANO */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */ .fatal = 1, }, { .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */ .fatal = 0, }, { .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */ .fatal = 1, }, { .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */ .fatal = 1, }, }; EXPORT_SYMBOL(icmp_err_convert); /* * ICMP control array. This specifies what to do with each ICMP. */ struct icmp_control { void (*handler)(struct sk_buff *skb); short error; /* This ICMP is classed as an error message */ }; static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; /* * The ICMP socket(s). This is the most convenient way to flow control * our ICMP output as well as maintain a clean interface throughout * all layers. All Socketless IP sends will soon be gone. * * On SMP we have one ICMP socket per-cpu. */ static struct sock *icmp_sk(struct net *net) { return net->ipv4.icmp_sk[smp_processor_id()]; } static inline struct sock *icmp_xmit_lock(struct net *net) { struct sock *sk; local_bh_disable(); sk = icmp_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ local_bh_enable(); return NULL; } return sk; } static inline void icmp_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); } /* * Send an ICMP frame. */ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, int type, int code) { struct dst_entry *dst = &rt->dst; bool rc = true; if (type > NR_ICMP_TYPES) goto out; /* Don't limit PMTU discovery. */ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) goto out; /* No rate limit on loopback */ if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) goto out; /* Limit if icmp type is enabled in ratemask. */ if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { if (!rt->peer) rt_bind_peer(rt, 1); rc = inet_peer_xrlim_allow(rt->peer, net->ipv4.sysctl_icmp_ratelimit); } out: return rc; } /* * Maintain the counters used in the SNMP statistics for outgoing ICMP */ void icmp_out_count(struct net *net, unsigned char type) { ICMPMSGOUT_INC_STATS(net, type); ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); } /* * Checksum each fragment, and on the first include the headers and final * checksum. */ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct icmp_bxm *icmp_param = (struct icmp_bxm *)from; __wsum csum; csum = skb_copy_and_csum_bits(icmp_param->skb, icmp_param->offset + offset, to, len, 0); skb->csum = csum_block_add(skb->csum, csum, odd); if (icmp_pointers[icmp_param->data.icmph.type].error) nf_ct_attach(skb, icmp_param->skb); return 0; } static void icmp_push_reply(struct icmp_bxm *icmp_param, struct ipcm_cookie *ipc, struct rtable **rt) { struct sock *sk; struct sk_buff *skb; sk = icmp_sk(dev_net((*rt)->dst.dev)); if (ip_append_data(sk, icmp_glue_bits, icmp_param, icmp_param->data_len+icmp_param->head_len, icmp_param->head_len, ipc, rt, MSG_DONTWAIT) < 0) { ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); ip_flush_pending_frames(sk); } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { struct icmphdr *icmph = icmp_hdr(skb); __wsum csum = 0; struct sk_buff *skb1; skb_queue_walk(&sk->sk_write_queue, skb1) { csum = csum_add(csum, skb1->csum); } csum = csum_partial_copy_nocheck((void *)&icmp_param->data, (char *)icmph, icmp_param->head_len, csum); icmph->checksum = csum_fold(csum); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk); } } /* * Driving logic for building and sending ICMP messages. */ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); struct sock *sk; struct inet_sock *inet; __be32 daddr; if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb)) return; sk = icmp_xmit_lock(net); if (sk == NULL) return; inet = inet_sk(sk); icmp_param->data.icmph.checksum = 0; inet->tos = ip_hdr(skb)->tos; daddr = ipc.addr = rt->rt_src; ipc.opt = NULL; ipc.tx_flags = 0; if (icmp_param->replyopts.opt.opt.optlen) { ipc.opt = &icmp_param->replyopts.opt; if (ipc.opt->opt.srr) daddr = icmp_param->replyopts.opt.opt.faddr; } { struct flowi4 fl4 = { .daddr = daddr, .saddr = rt->rt_spec_dst, .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), .flowi4_proto = IPPROTO_ICMP, }; security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) goto out_unlock; } if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type, icmp_param->data.icmph.code)) icmp_push_reply(icmp_param, &ipc, &rt); ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); } static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, const struct iphdr *iph, __be32 saddr, u8 tos, int type, int code, struct icmp_bxm *param) { struct flowi4 fl4 = { .daddr = (param->replyopts.opt.opt.srr ? param->replyopts.opt.opt.faddr : iph->saddr), .saddr = saddr, .flowi4_tos = RT_TOS(tos), .flowi4_proto = IPPROTO_ICMP, .fl4_icmp_type = type, .fl4_icmp_code = code, }; struct rtable *rt, *rt2; int err; security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4)); rt = __ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return rt; /* No need to clone since we're just using its address. */ rt2 = rt; if (!fl4.saddr) fl4.saddr = rt->rt_src; rt = (struct rtable *) xfrm_lookup(net, &rt->dst, flowi4_to_flowi(&fl4), NULL, 0); if (!IS_ERR(rt)) { if (rt != rt2) return rt; } else if (PTR_ERR(rt) == -EPERM) { rt = NULL; } else return rt; err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET); if (err) goto relookup_failed; if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) { rt2 = __ip_route_output_key(net, &fl4); if (IS_ERR(rt2)) err = PTR_ERR(rt2); } else { struct flowi4 fl4_2 = {}; unsigned long orefdst; fl4_2.daddr = fl4.saddr; rt2 = ip_route_output_key(net, &fl4_2); if (IS_ERR(rt2)) { err = PTR_ERR(rt2); goto relookup_failed; } /* Ugh! */ orefdst = skb_in->_skb_refdst; /* save old refdst */ err = ip_route_input(skb_in, fl4.daddr, fl4.saddr, RT_TOS(tos), rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); skb_in->_skb_refdst = orefdst; /* restore old refdst */ } if (err) goto relookup_failed; rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4), NULL, XFRM_LOOKUP_ICMP); if (!IS_ERR(rt2)) { dst_release(&rt->dst); rt = rt2; } else if (PTR_ERR(rt2) == -EPERM) { if (rt) dst_release(&rt->dst); return rt2; } else { err = PTR_ERR(rt2); goto relookup_failed; } return rt; relookup_failed: if (rt) return rt; return ERR_PTR(err); } /* * Send an ICMP message in response to a situation * * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. * MAY send more (we do). * MUST NOT change this header information. * MUST NOT reply to a multicast/broadcast IP address. * MUST NOT reply to a multicast/broadcast MAC address. * MUST reply to only the first fragment. */ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; } EXPORT_SYMBOL(icmp_send); /* * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH. */ static void icmp_unreach(struct sk_buff *skb) { const struct iphdr *iph; struct icmphdr *icmph; int hash, protocol; const struct net_protocol *ipprot; u32 info = 0; struct net *net; net = dev_net(skb_dst(skb)->dev); /* * Incomplete header ? * Only checks for the IP header, there should be an * additional check for longer headers in upper levels. */ if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out_err; icmph = icmp_hdr(skb); iph = (const struct iphdr *)skb->data; if (iph->ihl < 5) /* Mangled header, drop. */ goto out_err; if (icmph->type == ICMP_DEST_UNREACH) { switch (icmph->code & 15) { case ICMP_NET_UNREACH: case ICMP_HOST_UNREACH: case ICMP_PROT_UNREACH: case ICMP_PORT_UNREACH: break; case ICMP_FRAG_NEEDED: if (ipv4_config.no_pmtu_disc) { LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", &iph->daddr); } else { info = ip_rt_frag_needed(net, iph, ntohs(icmph->un.frag.mtu), skb->dev); if (!info) goto out; } break; case ICMP_SR_FAILED: LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", &iph->daddr); break; default: break; } if (icmph->code > NR_ICMP_UNREACH) goto out; } else if (icmph->type == ICMP_PARAMETERPROB) info = ntohl(icmph->un.gateway) >> 24; /* * Throw it at our lower layers * * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed * header. * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the * transport layer. * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to * transport layer. */ /* * Check the other end isn't violating RFC 1122. Some routers send * bogus responses to broadcast frames. If you see this message * first check your netmask matches at both ends, if it does then * get the other vendor to fix their kit. */ if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { if (net_ratelimit()) printk(KERN_WARNING "%pI4 sent an invalid ICMP " "type %u, code %u " "error to a broadcast: %pI4 on %s\n", &ip_hdr(skb)->saddr, icmph->type, icmph->code, &iph->daddr, skb->dev->name); goto out; } /* Checkin full IP header plus 8 bytes of protocol to * avoid additional coding at protocol handlers. */ if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) goto out; iph = (const struct iphdr *)skb->data; protocol = iph->protocol; /* * Deliver ICMP message to raw sockets. Pretty useless feature? */ raw_icmp_error(skb, protocol, info); hash = protocol & (MAX_INET_PROTOS - 1); rcu_read_lock(); ipprot = rcu_dereference(inet_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, info); rcu_read_unlock(); out: return; out_err: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_REDIRECT. */ static void icmp_redirect(struct sk_buff *skb) { const struct iphdr *iph; if (skb->len < sizeof(struct iphdr)) goto out_err; /* * Get the copied header of the packet that caused the redirect */ if (!pskb_may_pull(skb, sizeof(struct iphdr))) goto out; iph = (const struct iphdr *)skb->data; switch (icmp_hdr(skb)->code & 7) { case ICMP_REDIR_NET: case ICMP_REDIR_NETTOS: /* * As per RFC recommendations now handle it as a host redirect. */ case ICMP_REDIR_HOST: case ICMP_REDIR_HOSTTOS: ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr, icmp_hdr(skb)->un.gateway, iph->saddr, skb->dev); break; } out: return; out_err: ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo * requests. * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be * included in the reply. * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring * echo requests, MUST have default=NOT. * See also WRT handling of options once they are done and working. */ static void icmp_echo(struct sk_buff *skb) { struct net *net; net = dev_net(skb_dst(skb)->dev); if (!net->ipv4.sysctl_icmp_echo_ignore_all) { struct icmp_bxm icmp_param; icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_ECHOREPLY; icmp_param.skb = skb; icmp_param.offset = 0; icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); } } /* * Handle ICMP Timestamp requests. * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. * SHOULD be in the kernel for minimum random latency. * MUST be accurate to a few minutes. * MUST be updated at least at 15Hz. */ static void icmp_timestamp(struct sk_buff *skb) { struct timespec tv; struct icmp_bxm icmp_param; /* * Too short. */ if (skb->len < 4) goto out_err; /* * Fill in the current time as ms since midnight UT: */ getnstimeofday(&tv); icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); icmp_param.data.times[2] = icmp_param.data.times[1]; if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) BUG(); icmp_param.data.icmph = *icmp_hdr(skb); icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY; icmp_param.data.icmph.code = 0; icmp_param.skb = skb; icmp_param.offset = 0; icmp_param.data_len = 0; icmp_param.head_len = sizeof(struct icmphdr) + 12; icmp_reply(&icmp_param, skb); out: return; out_err: ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); goto out; } /* * Handle ICMP_ADDRESS_MASK requests. (RFC950) * * RFC1122 (3.2.2.9). A host MUST only send replies to * ADDRESS_MASK requests if it's been configured as an address mask * agent. Receiving a request doesn't constitute implicit permission to * act as one. Of course, implementing this correctly requires (SHOULD) * a way to turn the functionality on and off. Another one for sysctl(), * I guess. -- MS * * RFC1812 (4.3.3.9). A router MUST implement it. * A router SHOULD have switch turning it on/off. * This switch MUST be ON by default. * * Gratuitous replies, zero-source replies are not implemented, * that complies with RFC. DO NOT implement them!!! All the idea * of broadcast addrmask replies as specified in RFC950 is broken. * The problem is that it is not uncommon to have several prefixes * on one physical interface. Moreover, addrmask agent can even be * not aware of existing another prefixes. * If source is zero, addrmask agent cannot choose correct prefix. * Gratuitous mask announcements suffer from the same problem. * RFC1812 explains it, but still allows to use ADDRMASK, * that is pretty silly. --ANK * * All these rules are so bizarre, that I removed kernel addrmask * support at all. It is wrong, it is obsolete, nobody uses it in * any case. --ANK * * Furthermore you can do it with a usermode address agent program * anyway... */ static void icmp_address(struct sk_buff *skb) { #if 0 if (net_ratelimit()) printk(KERN_DEBUG "a guy asks for address mask. Who is it?\n"); #endif } /* * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain * loudly if an inconsistency is found. * called with rcu_read_lock() */ static void icmp_address_reply(struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct net_device *dev = skb->dev; struct in_device *in_dev; struct in_ifaddr *ifa; if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) return; in_dev = __in_dev_get_rcu(dev); if (!in_dev) return; if (in_dev->ifa_list && IN_DEV_LOG_MARTIANS(in_dev) && IN_DEV_FORWARD(in_dev)) { __be32 _mask, *mp; mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask); BUG_ON(mp == NULL); for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { if (*mp == ifa->ifa_mask && inet_ifa_match(rt->rt_src, ifa)) break; } if (!ifa && net_ratelimit()) { printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", mp, dev->name, &rt->rt_src); } } } static void icmp_discard(struct sk_buff *skb) { } /* * Deal with incoming ICMP packets. */ int icmp_rcv(struct sk_buff *skb) { struct icmphdr *icmph; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); int nh; if (!(sp && sp->xvec[sp->len - 1]->props.flags & XFRM_STATE_ICMP)) goto drop; if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr))) goto drop; nh = skb_network_offset(skb); skb_set_network_header(skb, sizeof(*icmph)); if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, skb)) goto drop; skb_set_network_header(skb, nh); } ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; if (__skb_checksum_complete(skb)) goto error; } if (!pskb_pull(skb, sizeof(*icmph))) goto error; icmph = icmp_hdr(skb); ICMPMSGIN_INC_STATS_BH(net, icmph->type); /* * 18 is the highest 'known' ICMP type. Anything else is a mystery * * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently * discarded. */ if (icmph->type > NR_ICMP_TYPES) goto error; /* * Parse the ICMP message */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { /* * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be * silently ignored (we let user decide with a sysctl). * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently * discarded if to broadcast/multicast. */ if ((icmph->type == ICMP_ECHO || icmph->type == ICMP_TIMESTAMP) && net->ipv4.sysctl_icmp_echo_ignore_broadcasts) { goto error; } if (icmph->type != ICMP_ECHO && icmph->type != ICMP_TIMESTAMP && icmph->type != ICMP_ADDRESS && icmph->type != ICMP_ADDRESSREPLY) { goto error; } } icmp_pointers[icmph->type].handler(skb); drop: kfree_skb(skb); return 0; error: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); goto drop; } /* * This table is the definition of how we handle ICMP. */ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { [ICMP_ECHOREPLY] = { .handler = icmp_discard, }, [1] = { .handler = icmp_discard, .error = 1, }, [2] = { .handler = icmp_discard, .error = 1, }, [ICMP_DEST_UNREACH] = { .handler = icmp_unreach, .error = 1, }, [ICMP_SOURCE_QUENCH] = { .handler = icmp_unreach, .error = 1, }, [ICMP_REDIRECT] = { .handler = icmp_redirect, .error = 1, }, [6] = { .handler = icmp_discard, .error = 1, }, [7] = { .handler = icmp_discard, .error = 1, }, [ICMP_ECHO] = { .handler = icmp_echo, }, [9] = { .handler = icmp_discard, .error = 1, }, [10] = { .handler = icmp_discard, .error = 1, }, [ICMP_TIME_EXCEEDED] = { .handler = icmp_unreach, .error = 1, }, [ICMP_PARAMETERPROB] = { .handler = icmp_unreach, .error = 1, }, [ICMP_TIMESTAMP] = { .handler = icmp_timestamp, }, [ICMP_TIMESTAMPREPLY] = { .handler = icmp_discard, }, [ICMP_INFO_REQUEST] = { .handler = icmp_discard, }, [ICMP_INFO_REPLY] = { .handler = icmp_discard, }, [ICMP_ADDRESS] = { .handler = icmp_address, }, [ICMP_ADDRESSREPLY] = { .handler = icmp_address_reply, }, }; static void __net_exit icmp_sk_exit(struct net *net) { int i; for_each_possible_cpu(i) inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]); kfree(net->ipv4.icmp_sk); net->ipv4.icmp_sk = NULL; } static int __net_init icmp_sk_init(struct net *net) { int i, err; net->ipv4.icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL); if (net->ipv4.icmp_sk == NULL) return -ENOMEM; for_each_possible_cpu(i) { struct sock *sk; err = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_ICMP, net); if (err < 0) goto fail; net->ipv4.icmp_sk[i] = sk; /* Enough space for 2 64K ICMP packets, including * sk_buff struct overhead. */ sk->sk_sndbuf = (2 * ((64 * 1024) + sizeof(struct sk_buff))); /* * Speedup sock_wfree() */ sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; } /* Control parameters for ECHO replies. */ net->ipv4.sysctl_icmp_echo_ignore_all = 0; net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1; /* Control parameter - ignore bogus broadcast responses? */ net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1; /* * Configurable global rate limit. * * ratelimit defines tokens/packet consumed for dst->rate_token * bucket ratemask defines which icmp types are ratelimited by * setting it's bit position. * * default: * dest unreachable (3), source quench (4), * time exceeded (11), parameter problem (12) */ net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; net->ipv4.sysctl_icmp_ratemask = 0x1818; net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; return 0; fail: for_each_possible_cpu(i) inet_ctl_sock_destroy(net->ipv4.icmp_sk[i]); kfree(net->ipv4.icmp_sk); return err; } static struct pernet_operations __net_initdata icmp_sk_ops = { .init = icmp_sk_init, .exit = icmp_sk_exit, }; int __init icmp_init(void) { return register_pernet_subsys(&icmp_sk_ops); }
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; }
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) { struct iphdr *iph; int room; struct icmp_bxm icmp_param; struct rtable *rt = skb_rtable(skb_in); struct ipcm_cookie ipc; __be32 saddr; u8 tos; struct net *net; struct sock *sk; if (!rt) goto out; net = dev_net(rt->dst.dev); /* * Find the original header. It is expected to be valid, of course. * Check this, icmp_send is called from the most obscure devices * sometimes. */ iph = ip_hdr(skb_in); if ((u8 *)iph < skb_in->head || (skb_in->network_header + sizeof(*iph)) > skb_in->tail) goto out; /* * No replies to physical multicast/broadcast */ if (skb_in->pkt_type != PACKET_HOST) goto out; /* * Now check at the protocol level */ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto out; /* * Only reply to fragment 0. We byte re-order the constant * mask for efficiency. */ if (iph->frag_off & htons(IP_OFFSET)) goto out; /* * If we send an ICMP error to an ICMP error a mess would result.. */ if (icmp_pointers[type].error) { /* * We are an error, check if we are replying to an * ICMP error */ if (iph->protocol == IPPROTO_ICMP) { u8 _inner_type, *itp; itp = skb_header_pointer(skb_in, skb_network_header(skb_in) + (iph->ihl << 2) + offsetof(struct icmphdr, type) - skb_in->data, sizeof(_inner_type), &_inner_type); if (itp == NULL) goto out; /* * Assume any unknown ICMP type is an error. This * isn't specified by the RFC, but think about it.. */ if (*itp > NR_ICMP_TYPES || icmp_pointers[*itp].error) goto out; } } sk = icmp_xmit_lock(net); if (sk == NULL) return; /* * Construct source address and options. */ saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { struct net_device *dev = NULL; rcu_read_lock(); if (rt_is_input_route(rt) && net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) dev = dev_get_by_index_rcu(net, rt->rt_iif); if (dev) saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); else saddr = 0; rcu_read_unlock(); } tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) | IPTOS_PREC_INTERNETCONTROL) : iph->tos; if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in)) goto out_unlock; /* * Prepare data for ICMP header. */ icmp_param.data.icmph.type = type; icmp_param.data.icmph.code = code; icmp_param.data.icmph.un.gateway = info; icmp_param.data.icmph.checksum = 0; icmp_param.skb = skb_in; icmp_param.offset = skb_network_offset(skb_in); inet_sk(sk)->tos = tos; ipc.addr = iph->saddr; ipc.opt = &icmp_param.replyopts.opt; ipc.tx_flags = 0; rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, type, code, &icmp_param); if (IS_ERR(rt)) goto out_unlock; if (!icmpv4_xrlim_allow(net, rt, type, code)) goto ende; /* RFC says return as much as we can without exceeding 576 bytes. */ room = dst_mtu(&rt->dst); if (room > 576) room = 576; room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; room -= sizeof(struct icmphdr); icmp_param.data_len = skb_in->len - icmp_param.offset; if (icmp_param.data_len > room) icmp_param.data_len = room; icmp_param.head_len = sizeof(struct icmphdr); icmp_push_reply(&icmp_param, &ipc, &rt); ende: ip_rt_put(rt); out_unlock: icmp_xmit_unlock(sk); out:; }
{'added': [(111, '\tstruct ip_options_data replyopts;'), (335, '\tif (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))'), (349, '\tif (icmp_param->replyopts.opt.opt.optlen) {'), (350, '\t\tipc.opt = &icmp_param->replyopts.opt;'), (351, '\t\tif (ipc.opt->opt.srr)'), (352, '\t\t\tdaddr = icmp_param->replyopts.opt.opt.faddr;'), (381, '\t\t.daddr = (param->replyopts.opt.opt.srr ?'), (382, '\t\t\t param->replyopts.opt.opt.faddr : iph->saddr),'), (583, '\tif (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))'), (599, '\tipc.opt = &icmp_param.replyopts.opt;'), (615, '\troom -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;')], 'deleted': [(111, '\tstruct ip_options replyopts;'), (112, '\tunsigned char optbuf[40];'), (336, '\tif (ip_options_echo(&icmp_param->replyopts, skb))'), (350, '\tif (icmp_param->replyopts.optlen) {'), (351, '\t\tipc.opt = &icmp_param->replyopts;'), (352, '\t\tif (ipc.opt->srr)'), (353, '\t\t\tdaddr = icmp_param->replyopts.faddr;'), (382, '\t\t.daddr = (param->replyopts.srr ?'), (383, '\t\t\t param->replyopts.faddr : iph->saddr),'), (584, '\tif (ip_options_echo(&icmp_param.replyopts, skb_in))'), (600, '\tipc.opt = &icmp_param.replyopts;'), (616, '\troom -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;')]}
11
12
764
4,430
95
630
23
https://github.com/torvalds/linux
CVE-2012-3552
CWE-362
978
revision.c
C
show_object_with_name
#include "cache.h" #include "tag.h" #include "blob.h" #include "tree.h" #include "commit.h" #include "diff.h" #include "refs.h" #include "revision.h" #include "graph.h" #include "grep.h" #include "reflog-walk.h" #include "patch-ids.h" #include "decorate.h" #include "log-tree.h" #include "string-list.h" #include "line-log.h" #include "mailmap.h" #include "commit-slab.h" #include "dir.h" #include "cache-tree.h" #include "bisect.h" volatile show_early_output_fn_t show_early_output; static const char *term_bad; static const char *term_good; char *path_name(struct strbuf *path, const char *name) { struct strbuf ret = STRBUF_INIT; if (path) strbuf_addbuf(&ret, path); strbuf_addstr(&ret, name); return strbuf_detach(&ret, NULL); } void show_object_with_name(FILE *out, struct object *obj, struct strbuf *path, const char *component) { char *name = path_name(path, component); char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); free(name); } static void mark_blob_uninteresting(struct blob *blob) { if (!blob) return; if (blob->object.flags & UNINTERESTING) return; blob->object.flags |= UNINTERESTING; } static void mark_tree_contents_uninteresting(struct tree *tree) { struct tree_desc desc; struct name_entry entry; struct object *obj = &tree->object; if (!has_object_file(&obj->oid)) return; if (parse_tree(tree) < 0) die("bad tree %s", oid_to_hex(&obj->oid)); init_tree_desc(&desc, tree->buffer, tree->size); while (tree_entry(&desc, &entry)) { switch (object_type(entry.mode)) { case OBJ_TREE: mark_tree_uninteresting(lookup_tree(entry.sha1)); break; case OBJ_BLOB: mark_blob_uninteresting(lookup_blob(entry.sha1)); break; default: /* Subproject commit - not in this repository */ break; } } /* * We don't care about the tree any more * after it has been marked uninteresting. */ free_tree_buffer(tree); } void mark_tree_uninteresting(struct tree *tree) { struct object *obj; if (!tree) return; obj = &tree->object; if (obj->flags & UNINTERESTING) return; obj->flags |= UNINTERESTING; mark_tree_contents_uninteresting(tree); } void mark_parents_uninteresting(struct commit *commit) { struct commit_list *parents = NULL, *l; for (l = commit->parents; l; l = l->next) commit_list_insert(l->item, &parents); while (parents) { struct commit *commit = pop_commit(&parents); while (commit) { /* * A missing commit is ok iff its parent is marked * uninteresting. * * We just mark such a thing parsed, so that when * it is popped next time around, we won't be trying * to parse it and get an error. */ if (!has_object_file(&commit->object.oid)) commit->object.parsed = 1; if (commit->object.flags & UNINTERESTING) break; commit->object.flags |= UNINTERESTING; /* * Normally we haven't parsed the parent * yet, so we won't have a parent of a parent * here. However, it may turn out that we've * reached this commit some other way (where it * wasn't uninteresting), in which case we need * to mark its parents recursively too.. */ if (!commit->parents) break; for (l = commit->parents->next; l; l = l->next) commit_list_insert(l->item, &parents); commit = commit->parents->item; } } } static void add_pending_object_with_path(struct rev_info *revs, struct object *obj, const char *name, unsigned mode, const char *path) { if (!obj) return; if (revs->no_walk && (obj->flags & UNINTERESTING)) revs->no_walk = 0; if (revs->reflog_info && obj->type == OBJ_COMMIT) { struct strbuf buf = STRBUF_INIT; int len = interpret_branch_name(name, 0, &buf); int st; if (0 < len && name[len] && buf.len) strbuf_addstr(&buf, name + len); st = add_reflog_for_walk(revs->reflog_info, (struct commit *)obj, buf.buf[0] ? buf.buf: name); strbuf_release(&buf); if (st) return; } add_object_array_with_path(obj, name, &revs->pending, mode, path); } static void add_pending_object_with_mode(struct rev_info *revs, struct object *obj, const char *name, unsigned mode) { add_pending_object_with_path(revs, obj, name, mode, NULL); } void add_pending_object(struct rev_info *revs, struct object *obj, const char *name) { add_pending_object_with_mode(revs, obj, name, S_IFINVALID); } void add_head_to_pending(struct rev_info *revs) { unsigned char sha1[20]; struct object *obj; if (get_sha1("HEAD", sha1)) return; obj = parse_object(sha1); if (!obj) return; add_pending_object(revs, obj, "HEAD"); } static struct object *get_reference(struct rev_info *revs, const char *name, const unsigned char *sha1, unsigned int flags) { struct object *object; object = parse_object(sha1); if (!object) { if (revs->ignore_missing) return object; die("bad object %s", name); } object->flags |= flags; return object; } void add_pending_sha1(struct rev_info *revs, const char *name, const unsigned char *sha1, unsigned int flags) { struct object *object = get_reference(revs, name, sha1, flags); add_pending_object(revs, object, name); } static struct commit *handle_commit(struct rev_info *revs, struct object_array_entry *entry) { struct object *object = entry->item; const char *name = entry->name; const char *path = entry->path; unsigned int mode = entry->mode; unsigned long flags = object->flags; /* * Tag object? Look what it points to.. */ while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (revs->tag_objects && !(flags & UNINTERESTING)) add_pending_object(revs, object, tag->tag); if (!tag->tagged) die("bad tag"); object = parse_object(tag->tagged->oid.hash); if (!object) { if (flags & UNINTERESTING) return NULL; die("bad object %s", oid_to_hex(&tag->tagged->oid)); } object->flags |= flags; /* * We'll handle the tagged object by looping or dropping * through to the non-tag handlers below. Do not * propagate path data from the tag's pending entry. */ path = NULL; mode = 0; } /* * Commit object? Just return it, we'll do all the complex * reachability crud. */ if (object->type == OBJ_COMMIT) { struct commit *commit = (struct commit *)object; if (parse_commit(commit) < 0) die("unable to parse commit %s", name); if (flags & UNINTERESTING) { mark_parents_uninteresting(commit); revs->limited = 1; } if (revs->show_source && !commit->util) commit->util = xstrdup(name); return commit; } /* * Tree object? Either mark it uninteresting, or add it * to the list of objects to look at later.. */ if (object->type == OBJ_TREE) { struct tree *tree = (struct tree *)object; if (!revs->tree_objects) return NULL; if (flags & UNINTERESTING) { mark_tree_contents_uninteresting(tree); return NULL; } add_pending_object_with_path(revs, object, name, mode, path); return NULL; } /* * Blob object? You know the drill by now.. */ if (object->type == OBJ_BLOB) { if (!revs->blob_objects) return NULL; if (flags & UNINTERESTING) return NULL; add_pending_object_with_path(revs, object, name, mode, path); return NULL; } die("%s is unknown object", name); } static int everybody_uninteresting(struct commit_list *orig, struct commit **interesting_cache) { struct commit_list *list = orig; if (*interesting_cache) { struct commit *commit = *interesting_cache; if (!(commit->object.flags & UNINTERESTING)) return 0; } while (list) { struct commit *commit = list->item; list = list->next; if (commit->object.flags & UNINTERESTING) continue; *interesting_cache = commit; return 0; } return 1; } /* * A definition of "relevant" commit that we can use to simplify limited graphs * by eliminating side branches. * * A "relevant" commit is one that is !UNINTERESTING (ie we are including it * in our list), or that is a specified BOTTOM commit. Then after computing * a limited list, during processing we can generally ignore boundary merges * coming from outside the graph, (ie from irrelevant parents), and treat * those merges as if they were single-parent. TREESAME is defined to consider * only relevant parents, if any. If we are TREESAME to our on-graph parents, * we don't care if we were !TREESAME to non-graph parents. * * Treating bottom commits as relevant ensures that a limited graph's * connection to the actual bottom commit is not viewed as a side branch, but * treated as part of the graph. For example: * * ....Z...A---X---o---o---B * . / * W---Y * * When computing "A..B", the A-X connection is at least as important as * Y-X, despite A being flagged UNINTERESTING. * * And when computing --ancestry-path "A..B", the A-X connection is more * important than Y-X, despite both A and Y being flagged UNINTERESTING. */ static inline int relevant_commit(struct commit *commit) { return (commit->object.flags & (UNINTERESTING | BOTTOM)) != UNINTERESTING; } /* * Return a single relevant commit from a parent list. If we are a TREESAME * commit, and this selects one of our parents, then we can safely simplify to * that parent. */ static struct commit *one_relevant_parent(const struct rev_info *revs, struct commit_list *orig) { struct commit_list *list = orig; struct commit *relevant = NULL; if (!orig) return NULL; /* * For 1-parent commits, or if first-parent-only, then return that * first parent (even if not "relevant" by the above definition). * TREESAME will have been set purely on that parent. */ if (revs->first_parent_only || !orig->next) return orig->item; /* * For multi-parent commits, identify a sole relevant parent, if any. * If we have only one relevant parent, then TREESAME will be set purely * with regard to that parent, and we can simplify accordingly. * * If we have more than one relevant parent, or no relevant parents * (and multiple irrelevant ones), then we can't select a parent here * and return NULL. */ while (list) { struct commit *commit = list->item; list = list->next; if (relevant_commit(commit)) { if (relevant) return NULL; relevant = commit; } } return relevant; } /* * The goal is to get REV_TREE_NEW as the result only if the * diff consists of all '+' (and no other changes), REV_TREE_OLD * if the whole diff is removal of old data, and otherwise * REV_TREE_DIFFERENT (of course if the trees are the same we * want REV_TREE_SAME). * That means that once we get to REV_TREE_DIFFERENT, we do not * have to look any further. */ static int tree_difference = REV_TREE_SAME; static void file_add_remove(struct diff_options *options, int addremove, unsigned mode, const unsigned char *sha1, int sha1_valid, const char *fullpath, unsigned dirty_submodule) { int diff = addremove == '+' ? REV_TREE_NEW : REV_TREE_OLD; tree_difference |= diff; if (tree_difference == REV_TREE_DIFFERENT) DIFF_OPT_SET(options, HAS_CHANGES); } static void file_change(struct diff_options *options, unsigned old_mode, unsigned new_mode, const unsigned char *old_sha1, const unsigned char *new_sha1, int old_sha1_valid, int new_sha1_valid, const char *fullpath, unsigned old_dirty_submodule, unsigned new_dirty_submodule) { tree_difference = REV_TREE_DIFFERENT; DIFF_OPT_SET(options, HAS_CHANGES); } static int rev_compare_tree(struct rev_info *revs, struct commit *parent, struct commit *commit) { struct tree *t1 = parent->tree; struct tree *t2 = commit->tree; if (!t1) return REV_TREE_NEW; if (!t2) return REV_TREE_OLD; if (revs->simplify_by_decoration) { /* * If we are simplifying by decoration, then the commit * is worth showing if it has a tag pointing at it. */ if (get_name_decoration(&commit->object)) return REV_TREE_DIFFERENT; /* * A commit that is not pointed by a tag is uninteresting * if we are not limited by path. This means that you will * see the usual "commits that touch the paths" plus any * tagged commit by specifying both --simplify-by-decoration * and pathspec. */ if (!revs->prune_data.nr) return REV_TREE_SAME; } tree_difference = REV_TREE_SAME; DIFF_OPT_CLR(&revs->pruning, HAS_CHANGES); if (diff_tree_sha1(t1->object.oid.hash, t2->object.oid.hash, "", &revs->pruning) < 0) return REV_TREE_DIFFERENT; return tree_difference; } static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit) { int retval; struct tree *t1 = commit->tree; if (!t1) return 0; tree_difference = REV_TREE_SAME; DIFF_OPT_CLR(&revs->pruning, HAS_CHANGES); retval = diff_tree_sha1(NULL, t1->object.oid.hash, "", &revs->pruning); return retval >= 0 && (tree_difference == REV_TREE_SAME); } struct treesame_state { unsigned int nparents; unsigned char treesame[FLEX_ARRAY]; }; static struct treesame_state *initialise_treesame(struct rev_info *revs, struct commit *commit) { unsigned n = commit_list_count(commit->parents); struct treesame_state *st = xcalloc(1, sizeof(*st) + n); st->nparents = n; add_decoration(&revs->treesame, &commit->object, st); return st; } /* * Must be called immediately after removing the nth_parent from a commit's * parent list, if we are maintaining the per-parent treesame[] decoration. * This does not recalculate the master TREESAME flag - update_treesame() * should be called to update it after a sequence of treesame[] modifications * that may have affected it. */ static int compact_treesame(struct rev_info *revs, struct commit *commit, unsigned nth_parent) { struct treesame_state *st; int old_same; if (!commit->parents) { /* * Have just removed the only parent from a non-merge. * Different handling, as we lack decoration. */ if (nth_parent != 0) die("compact_treesame %u", nth_parent); old_same = !!(commit->object.flags & TREESAME); if (rev_same_tree_as_empty(revs, commit)) commit->object.flags |= TREESAME; else commit->object.flags &= ~TREESAME; return old_same; } st = lookup_decoration(&revs->treesame, &commit->object); if (!st || nth_parent >= st->nparents) die("compact_treesame %u", nth_parent); old_same = st->treesame[nth_parent]; memmove(st->treesame + nth_parent, st->treesame + nth_parent + 1, st->nparents - nth_parent - 1); /* * If we've just become a non-merge commit, update TREESAME * immediately, and remove the no-longer-needed decoration. * If still a merge, defer update until update_treesame(). */ if (--st->nparents == 1) { if (commit->parents->next) die("compact_treesame parents mismatch"); if (st->treesame[0] && revs->dense) commit->object.flags |= TREESAME; else commit->object.flags &= ~TREESAME; free(add_decoration(&revs->treesame, &commit->object, NULL)); } return old_same; } static unsigned update_treesame(struct rev_info *revs, struct commit *commit) { if (commit->parents && commit->parents->next) { unsigned n; struct treesame_state *st; struct commit_list *p; unsigned relevant_parents; unsigned relevant_change, irrelevant_change; st = lookup_decoration(&revs->treesame, &commit->object); if (!st) die("update_treesame %s", oid_to_hex(&commit->object.oid)); relevant_parents = 0; relevant_change = irrelevant_change = 0; for (p = commit->parents, n = 0; p; n++, p = p->next) { if (relevant_commit(p->item)) { relevant_change |= !st->treesame[n]; relevant_parents++; } else irrelevant_change |= !st->treesame[n]; } if (relevant_parents ? relevant_change : irrelevant_change) commit->object.flags &= ~TREESAME; else commit->object.flags |= TREESAME; } return commit->object.flags & TREESAME; } static inline int limiting_can_increase_treesame(const struct rev_info *revs) { /* * TREESAME is irrelevant unless prune && dense; * if simplify_history is set, we can't have a mixture of TREESAME and * !TREESAME INTERESTING parents (and we don't have treesame[] * decoration anyway); * if first_parent_only is set, then the TREESAME flag is locked * against the first parent (and again we lack treesame[] decoration). */ return revs->prune && revs->dense && !revs->simplify_history && !revs->first_parent_only; } static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit) { struct commit_list **pp, *parent; struct treesame_state *ts = NULL; int relevant_change = 0, irrelevant_change = 0; int relevant_parents, nth_parent; /* * If we don't do pruning, everything is interesting */ if (!revs->prune) return; if (!commit->tree) return; if (!commit->parents) { if (rev_same_tree_as_empty(revs, commit)) commit->object.flags |= TREESAME; return; } /* * Normal non-merge commit? If we don't want to make the * history dense, we consider it always to be a change.. */ if (!revs->dense && !commit->parents->next) return; for (pp = &commit->parents, nth_parent = 0, relevant_parents = 0; (parent = *pp) != NULL; pp = &parent->next, nth_parent++) { struct commit *p = parent->item; if (relevant_commit(p)) relevant_parents++; if (nth_parent == 1) { /* * This our second loop iteration - so we now know * we're dealing with a merge. * * Do not compare with later parents when we care only about * the first parent chain, in order to avoid derailing the * traversal to follow a side branch that brought everything * in the path we are limited to by the pathspec. */ if (revs->first_parent_only) break; /* * If this will remain a potentially-simplifiable * merge, remember per-parent treesame if needed. * Initialise the array with the comparison from our * first iteration. */ if (revs->treesame.name && !revs->simplify_history && !(commit->object.flags & UNINTERESTING)) { ts = initialise_treesame(revs, commit); if (!(irrelevant_change || relevant_change)) ts->treesame[0] = 1; } } if (parse_commit(p) < 0) die("cannot simplify commit %s (because of %s)", oid_to_hex(&commit->object.oid), oid_to_hex(&p->object.oid)); switch (rev_compare_tree(revs, p, commit)) { case REV_TREE_SAME: if (!revs->simplify_history || !relevant_commit(p)) { /* Even if a merge with an uninteresting * side branch brought the entire change * we are interested in, we do not want * to lose the other branches of this * merge, so we just keep going. */ if (ts) ts->treesame[nth_parent] = 1; continue; } parent->next = NULL; commit->parents = parent; commit->object.flags |= TREESAME; return; case REV_TREE_NEW: if (revs->remove_empty_trees && rev_same_tree_as_empty(revs, p)) { /* We are adding all the specified * paths from this parent, so the * history beyond this parent is not * interesting. Remove its parents * (they are grandparents for us). * IOW, we pretend this parent is a * "root" commit. */ if (parse_commit(p) < 0) die("cannot simplify commit %s (invalid %s)", oid_to_hex(&commit->object.oid), oid_to_hex(&p->object.oid)); p->parents = NULL; } /* fallthrough */ case REV_TREE_OLD: case REV_TREE_DIFFERENT: if (relevant_commit(p)) relevant_change = 1; else irrelevant_change = 1; continue; } die("bad tree compare for commit %s", oid_to_hex(&commit->object.oid)); } /* * TREESAME is straightforward for single-parent commits. For merge * commits, it is most useful to define it so that "irrelevant" * parents cannot make us !TREESAME - if we have any relevant * parents, then we only consider TREESAMEness with respect to them, * allowing irrelevant merges from uninteresting branches to be * simplified away. Only if we have only irrelevant parents do we * base TREESAME on them. Note that this logic is replicated in * update_treesame, which should be kept in sync. */ if (relevant_parents ? !relevant_change : !irrelevant_change) commit->object.flags |= TREESAME; } static void commit_list_insert_by_date_cached(struct commit *p, struct commit_list **head, struct commit_list *cached_base, struct commit_list **cache) { struct commit_list *new_entry; if (cached_base && p->date < cached_base->item->date) new_entry = commit_list_insert_by_date(p, &cached_base->next); else new_entry = commit_list_insert_by_date(p, head); if (cache && (!*cache || p->date < (*cache)->item->date)) *cache = new_entry; } static int add_parents_to_list(struct rev_info *revs, struct commit *commit, struct commit_list **list, struct commit_list **cache_ptr) { struct commit_list *parent = commit->parents; unsigned left_flag; struct commit_list *cached_base = cache_ptr ? *cache_ptr : NULL; if (commit->object.flags & ADDED) return 0; commit->object.flags |= ADDED; if (revs->include_check && !revs->include_check(commit, revs->include_check_data)) return 0; /* * If the commit is uninteresting, don't try to * prune parents - we want the maximal uninteresting * set. * * Normally we haven't parsed the parent * yet, so we won't have a parent of a parent * here. However, it may turn out that we've * reached this commit some other way (where it * wasn't uninteresting), in which case we need * to mark its parents recursively too.. */ if (commit->object.flags & UNINTERESTING) { while (parent) { struct commit *p = parent->item; parent = parent->next; if (p) p->object.flags |= UNINTERESTING; if (parse_commit_gently(p, 1) < 0) continue; if (p->parents) mark_parents_uninteresting(p); if (p->object.flags & SEEN) continue; p->object.flags |= SEEN; commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); } return 0; } /* * Ok, the commit wasn't uninteresting. Try to * simplify the commit history and find the parent * that has no differences in the path set if one exists. */ try_to_simplify_commit(revs, commit); if (revs->no_walk) return 0; left_flag = (commit->object.flags & SYMMETRIC_LEFT); for (parent = commit->parents; parent; parent = parent->next) { struct commit *p = parent->item; if (parse_commit_gently(p, revs->ignore_missing_links) < 0) return -1; if (revs->show_source && !p->util) p->util = commit->util; p->object.flags |= left_flag; if (!(p->object.flags & SEEN)) { p->object.flags |= SEEN; commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); } if (revs->first_parent_only) break; } return 0; } static void cherry_pick_list(struct commit_list *list, struct rev_info *revs) { struct commit_list *p; int left_count = 0, right_count = 0; int left_first; struct patch_ids ids; unsigned cherry_flag; /* First count the commits on the left and on the right */ for (p = list; p; p = p->next) { struct commit *commit = p->item; unsigned flags = commit->object.flags; if (flags & BOUNDARY) ; else if (flags & SYMMETRIC_LEFT) left_count++; else right_count++; } if (!left_count || !right_count) return; left_first = left_count < right_count; init_patch_ids(&ids); ids.diffopts.pathspec = revs->diffopt.pathspec; /* Compute patch-ids for one side */ for (p = list; p; p = p->next) { struct commit *commit = p->item; unsigned flags = commit->object.flags; if (flags & BOUNDARY) continue; /* * If we have fewer left, left_first is set and we omit * commits on the right branch in this loop. If we have * fewer right, we skip the left ones. */ if (left_first != !!(flags & SYMMETRIC_LEFT)) continue; commit->util = add_commit_patch_id(commit, &ids); } /* either cherry_mark or cherry_pick are true */ cherry_flag = revs->cherry_mark ? PATCHSAME : SHOWN; /* Check the other side */ for (p = list; p; p = p->next) { struct commit *commit = p->item; struct patch_id *id; unsigned flags = commit->object.flags; if (flags & BOUNDARY) continue; /* * If we have fewer left, left_first is set and we omit * commits on the left branch in this loop. */ if (left_first == !!(flags & SYMMETRIC_LEFT)) continue; /* * Have we seen the same patch id? */ id = has_commit_patch_id(commit, &ids); if (!id) continue; id->seen = 1; commit->object.flags |= cherry_flag; } /* Now check the original side for seen ones */ for (p = list; p; p = p->next) { struct commit *commit = p->item; struct patch_id *ent; ent = commit->util; if (!ent) continue; if (ent->seen) commit->object.flags |= cherry_flag; commit->util = NULL; } free_patch_ids(&ids); } /* How many extra uninteresting commits we want to see.. */ #define SLOP 5 static int still_interesting(struct commit_list *src, unsigned long date, int slop, struct commit **interesting_cache) { /* * No source list at all? We're definitely done.. */ if (!src) return 0; /* * Does the destination list contain entries with a date * before the source list? Definitely _not_ done. */ if (date <= src->item->date) return SLOP; /* * Does the source list still have interesting commits in * it? Definitely not done.. */ if (!everybody_uninteresting(src, interesting_cache)) return SLOP; /* Ok, we're closing in.. */ return slop-1; } /* * "rev-list --ancestry-path A..B" computes commits that are ancestors * of B but not ancestors of A but further limits the result to those * that are descendants of A. This takes the list of bottom commits and * the result of "A..B" without --ancestry-path, and limits the latter * further to the ones that can reach one of the commits in "bottom". */ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *list) { struct commit_list *p; struct commit_list *rlist = NULL; int made_progress; /* * Reverse the list so that it will be likely that we would * process parents before children. */ for (p = list; p; p = p->next) commit_list_insert(p->item, &rlist); for (p = bottom; p; p = p->next) p->item->object.flags |= TMP_MARK; /* * Mark the ones that can reach bottom commits in "list", * in a bottom-up fashion. */ do { made_progress = 0; for (p = rlist; p; p = p->next) { struct commit *c = p->item; struct commit_list *parents; if (c->object.flags & (TMP_MARK | UNINTERESTING)) continue; for (parents = c->parents; parents; parents = parents->next) { if (!(parents->item->object.flags & TMP_MARK)) continue; c->object.flags |= TMP_MARK; made_progress = 1; break; } } } while (made_progress); /* * NEEDSWORK: decide if we want to remove parents that are * not marked with TMP_MARK from commit->parents for commits * in the resulting list. We may not want to do that, though. */ /* * The ones that are not marked with TMP_MARK are uninteresting */ for (p = list; p; p = p->next) { struct commit *c = p->item; if (c->object.flags & TMP_MARK) continue; c->object.flags |= UNINTERESTING; } /* We are done with the TMP_MARK */ for (p = list; p; p = p->next) p->item->object.flags &= ~TMP_MARK; for (p = bottom; p; p = p->next) p->item->object.flags &= ~TMP_MARK; free_commit_list(rlist); } /* * Before walking the history, keep the set of "negative" refs the * caller has asked to exclude. * * This is used to compute "rev-list --ancestry-path A..B", as we need * to filter the result of "A..B" further to the ones that can actually * reach A. */ static struct commit_list *collect_bottom_commits(struct commit_list *list) { struct commit_list *elem, *bottom = NULL; for (elem = list; elem; elem = elem->next) if (elem->item->object.flags & BOTTOM) commit_list_insert(elem->item, &bottom); return bottom; } /* Assumes either left_only or right_only is set */ static void limit_left_right(struct commit_list *list, struct rev_info *revs) { struct commit_list *p; for (p = list; p; p = p->next) { struct commit *commit = p->item; if (revs->right_only) { if (commit->object.flags & SYMMETRIC_LEFT) commit->object.flags |= SHOWN; } else /* revs->left_only is set */ if (!(commit->object.flags & SYMMETRIC_LEFT)) commit->object.flags |= SHOWN; } } static int limit_list(struct rev_info *revs) { int slop = SLOP; unsigned long date = ~0ul; struct commit_list *list = revs->commits; struct commit_list *newlist = NULL; struct commit_list **p = &newlist; struct commit_list *bottom = NULL; struct commit *interesting_cache = NULL; if (revs->ancestry_path) { bottom = collect_bottom_commits(list); if (!bottom) die("--ancestry-path given but there are no bottom commits"); } while (list) { struct commit *commit = pop_commit(&list); struct object *obj = &commit->object; show_early_output_fn_t show; if (commit == interesting_cache) interesting_cache = NULL; if (revs->max_age != -1 && (commit->date < revs->max_age)) obj->flags |= UNINTERESTING; if (add_parents_to_list(revs, commit, &list, NULL) < 0) return -1; if (obj->flags & UNINTERESTING) { mark_parents_uninteresting(commit); if (revs->show_all) p = &commit_list_insert(commit, p)->next; slop = still_interesting(list, date, slop, &interesting_cache); if (slop) continue; /* If showing all, add the whole pending list to the end */ if (revs->show_all) *p = list; break; } if (revs->min_age != -1 && (commit->date > revs->min_age)) continue; date = commit->date; p = &commit_list_insert(commit, p)->next; show = show_early_output; if (!show) continue; show(revs, newlist); show_early_output = NULL; } if (revs->cherry_pick || revs->cherry_mark) cherry_pick_list(newlist, revs); if (revs->left_only || revs->right_only) limit_left_right(newlist, revs); if (bottom) { limit_to_ancestry(bottom, newlist); free_commit_list(bottom); } /* * Check if any commits have become TREESAME by some of their parents * becoming UNINTERESTING. */ if (limiting_can_increase_treesame(revs)) for (list = newlist; list; list = list->next) { struct commit *c = list->item; if (c->object.flags & (UNINTERESTING | TREESAME)) continue; update_treesame(revs, c); } revs->commits = newlist; return 0; } /* * Add an entry to refs->cmdline with the specified information. * *name is copied. */ static void add_rev_cmdline(struct rev_info *revs, struct object *item, const char *name, int whence, unsigned flags) { struct rev_cmdline_info *info = &revs->cmdline; int nr = info->nr; ALLOC_GROW(info->rev, nr + 1, info->alloc); info->rev[nr].item = item; info->rev[nr].name = xstrdup(name); info->rev[nr].whence = whence; info->rev[nr].flags = flags; info->nr++; } static void add_rev_cmdline_list(struct rev_info *revs, struct commit_list *commit_list, int whence, unsigned flags) { while (commit_list) { struct object *object = &commit_list->item->object; add_rev_cmdline(revs, object, oid_to_hex(&object->oid), whence, flags); commit_list = commit_list->next; } } struct all_refs_cb { int all_flags; int warned_bad_reflog; struct rev_info *all_revs; const char *name_for_errormsg; }; int ref_excluded(struct string_list *ref_excludes, const char *path) { struct string_list_item *item; if (!ref_excludes) return 0; for_each_string_list_item(item, ref_excludes) { if (!wildmatch(item->string, path, 0, NULL)) return 1; } return 0; } static int handle_one_ref(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct all_refs_cb *cb = cb_data; struct object *object; if (ref_excluded(cb->all_revs->ref_excludes, path)) return 0; object = get_reference(cb->all_revs, path, oid->hash, cb->all_flags); add_rev_cmdline(cb->all_revs, object, path, REV_CMD_REF, cb->all_flags); add_pending_sha1(cb->all_revs, path, oid->hash, cb->all_flags); return 0; } static void init_all_refs_cb(struct all_refs_cb *cb, struct rev_info *revs, unsigned flags) { cb->all_revs = revs; cb->all_flags = flags; } void clear_ref_exclusion(struct string_list **ref_excludes_p) { if (*ref_excludes_p) { string_list_clear(*ref_excludes_p, 0); free(*ref_excludes_p); } *ref_excludes_p = NULL; } void add_ref_exclusion(struct string_list **ref_excludes_p, const char *exclude) { if (!*ref_excludes_p) { *ref_excludes_p = xcalloc(1, sizeof(**ref_excludes_p)); (*ref_excludes_p)->strdup_strings = 1; } string_list_append(*ref_excludes_p, exclude); } static void handle_refs(const char *submodule, struct rev_info *revs, unsigned flags, int (*for_each)(const char *, each_ref_fn, void *)) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, flags); for_each(submodule, handle_one_ref, &cb); } static void handle_one_reflog_commit(unsigned char *sha1, void *cb_data) { struct all_refs_cb *cb = cb_data; if (!is_null_sha1(sha1)) { struct object *o = parse_object(sha1); if (o) { o->flags |= cb->all_flags; /* ??? CMDLINEFLAGS ??? */ add_pending_object(cb->all_revs, o, ""); } else if (!cb->warned_bad_reflog) { warning("reflog of '%s' references pruned commits", cb->name_for_errormsg); cb->warned_bad_reflog = 1; } } } static int handle_one_reflog_ent(unsigned char *osha1, unsigned char *nsha1, const char *email, unsigned long timestamp, int tz, const char *message, void *cb_data) { handle_one_reflog_commit(osha1, cb_data); handle_one_reflog_commit(nsha1, cb_data); return 0; } static int handle_one_reflog(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct all_refs_cb *cb = cb_data; cb->warned_bad_reflog = 0; cb->name_for_errormsg = path; for_each_reflog_ent(path, handle_one_reflog_ent, cb_data); return 0; } void add_reflogs_to_pending(struct rev_info *revs, unsigned flags) { struct all_refs_cb cb; cb.all_revs = revs; cb.all_flags = flags; for_each_reflog(handle_one_reflog, &cb); } static void add_cache_tree(struct cache_tree *it, struct rev_info *revs, struct strbuf *path) { size_t baselen = path->len; int i; if (it->entry_count >= 0) { struct tree *tree = lookup_tree(it->sha1); add_pending_object_with_path(revs, &tree->object, "", 040000, path->buf); } for (i = 0; i < it->subtree_nr; i++) { struct cache_tree_sub *sub = it->down[i]; strbuf_addf(path, "%s%s", baselen ? "/" : "", sub->name); add_cache_tree(sub->cache_tree, revs, path); strbuf_setlen(path, baselen); } } void add_index_objects_to_pending(struct rev_info *revs, unsigned flags) { int i; read_cache(); for (i = 0; i < active_nr; i++) { struct cache_entry *ce = active_cache[i]; struct blob *blob; if (S_ISGITLINK(ce->ce_mode)) continue; blob = lookup_blob(ce->sha1); if (!blob) die("unable to add index blob to traversal"); add_pending_object_with_path(revs, &blob->object, "", ce->ce_mode, ce->name); } if (active_cache_tree) { struct strbuf path = STRBUF_INIT; add_cache_tree(active_cache_tree, revs, &path); strbuf_release(&path); } } static int add_parents_only(struct rev_info *revs, const char *arg_, int flags) { unsigned char sha1[20]; struct object *it; struct commit *commit; struct commit_list *parents; const char *arg = arg_; if (*arg == '^') { flags ^= UNINTERESTING | BOTTOM; arg++; } if (get_sha1_committish(arg, sha1)) return 0; while (1) { it = get_reference(revs, arg, sha1, 0); if (!it && revs->ignore_missing) return 0; if (it->type != OBJ_TAG) break; if (!((struct tag*)it)->tagged) return 0; hashcpy(sha1, ((struct tag*)it)->tagged->oid.hash); } if (it->type != OBJ_COMMIT) return 0; commit = (struct commit *)it; for (parents = commit->parents; parents; parents = parents->next) { it = &parents->item->object; it->flags |= flags; add_rev_cmdline(revs, it, arg_, REV_CMD_PARENTS_ONLY, flags); add_pending_object(revs, it, arg); } return 1; } void init_revisions(struct rev_info *revs, const char *prefix) { memset(revs, 0, sizeof(*revs)); revs->abbrev = DEFAULT_ABBREV; revs->ignore_merges = 1; revs->simplify_history = 1; DIFF_OPT_SET(&revs->pruning, RECURSIVE); DIFF_OPT_SET(&revs->pruning, QUICK); revs->pruning.add_remove = file_add_remove; revs->pruning.change = file_change; revs->sort_order = REV_SORT_IN_GRAPH_ORDER; revs->dense = 1; revs->prefix = prefix; revs->max_age = -1; revs->min_age = -1; revs->skip_count = -1; revs->max_count = -1; revs->max_parents = -1; revs->commit_format = CMIT_FMT_DEFAULT; init_grep_defaults(); grep_init(&revs->grep_filter, prefix); revs->grep_filter.status_only = 1; revs->grep_filter.regflags = REG_NEWLINE; diff_setup(&revs->diffopt); if (prefix && !revs->diffopt.prefix) { revs->diffopt.prefix = prefix; revs->diffopt.prefix_length = strlen(prefix); } revs->notes_opt.use_default_notes = -1; } static void add_pending_commit_list(struct rev_info *revs, struct commit_list *commit_list, unsigned int flags) { while (commit_list) { struct object *object = &commit_list->item->object; object->flags |= flags; add_pending_object(revs, object, oid_to_hex(&object->oid)); commit_list = commit_list->next; } } static void prepare_show_merge(struct rev_info *revs) { struct commit_list *bases; struct commit *head, *other; unsigned char sha1[20]; const char **prune = NULL; int i, prune_num = 1; /* counting terminating NULL */ if (get_sha1("HEAD", sha1)) die("--merge without HEAD?"); head = lookup_commit_or_die(sha1, "HEAD"); if (get_sha1("MERGE_HEAD", sha1)) die("--merge without MERGE_HEAD?"); other = lookup_commit_or_die(sha1, "MERGE_HEAD"); add_pending_object(revs, &head->object, "HEAD"); add_pending_object(revs, &other->object, "MERGE_HEAD"); bases = get_merge_bases(head, other); add_rev_cmdline_list(revs, bases, REV_CMD_MERGE_BASE, UNINTERESTING | BOTTOM); add_pending_commit_list(revs, bases, UNINTERESTING | BOTTOM); free_commit_list(bases); head->object.flags |= SYMMETRIC_LEFT; if (!active_nr) read_cache(); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; if (!ce_stage(ce)) continue; if (ce_path_match(ce, &revs->prune_data, NULL)) { prune_num++; REALLOC_ARRAY(prune, prune_num); prune[prune_num-2] = ce->name; prune[prune_num-1] = NULL; } while ((i+1 < active_nr) && ce_same_name(ce, active_cache[i+1])) i++; } free_pathspec(&revs->prune_data); parse_pathspec(&revs->prune_data, PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, PATHSPEC_PREFER_FULL | PATHSPEC_LITERAL_PATH, "", prune); revs->limited = 1; } int handle_revision_arg(const char *arg_, struct rev_info *revs, int flags, unsigned revarg_opt) { struct object_context oc; char *dotdot; struct object *object; unsigned char sha1[20]; int local_flags; const char *arg = arg_; int cant_be_filename = revarg_opt & REVARG_CANNOT_BE_FILENAME; unsigned get_sha1_flags = 0; flags = flags & UNINTERESTING ? flags | BOTTOM : flags & ~BOTTOM; dotdot = strstr(arg, ".."); if (dotdot) { unsigned char from_sha1[20]; const char *next = dotdot + 2; const char *this = arg; int symmetric = *next == '.'; unsigned int flags_exclude = flags ^ (UNINTERESTING | BOTTOM); static const char head_by_default[] = "HEAD"; unsigned int a_flags; *dotdot = 0; next += symmetric; if (!*next) next = head_by_default; if (dotdot == arg) this = head_by_default; if (this == head_by_default && next == head_by_default && !symmetric) { /* * Just ".."? That is not a range but the * pathspec for the parent directory. */ if (!cant_be_filename) { *dotdot = '.'; return -1; } } if (!get_sha1_committish(this, from_sha1) && !get_sha1_committish(next, sha1)) { struct object *a_obj, *b_obj; if (!cant_be_filename) { *dotdot = '.'; verify_non_filename(revs->prefix, arg); } a_obj = parse_object(from_sha1); b_obj = parse_object(sha1); if (!a_obj || !b_obj) { missing: if (revs->ignore_missing) return 0; die(symmetric ? "Invalid symmetric difference expression %s" : "Invalid revision range %s", arg); } if (!symmetric) { /* just A..B */ a_flags = flags_exclude; } else { /* A...B -- find merge bases between the two */ struct commit *a, *b; struct commit_list *exclude; a = (a_obj->type == OBJ_COMMIT ? (struct commit *)a_obj : lookup_commit_reference(a_obj->oid.hash)); b = (b_obj->type == OBJ_COMMIT ? (struct commit *)b_obj : lookup_commit_reference(b_obj->oid.hash)); if (!a || !b) goto missing; exclude = get_merge_bases(a, b); add_rev_cmdline_list(revs, exclude, REV_CMD_MERGE_BASE, flags_exclude); add_pending_commit_list(revs, exclude, flags_exclude); free_commit_list(exclude); a_flags = flags | SYMMETRIC_LEFT; } a_obj->flags |= a_flags; b_obj->flags |= flags; add_rev_cmdline(revs, a_obj, this, REV_CMD_LEFT, a_flags); add_rev_cmdline(revs, b_obj, next, REV_CMD_RIGHT, flags); add_pending_object(revs, a_obj, this); add_pending_object(revs, b_obj, next); return 0; } *dotdot = '.'; } dotdot = strstr(arg, "^@"); if (dotdot && !dotdot[2]) { *dotdot = 0; if (add_parents_only(revs, arg, flags)) return 0; *dotdot = '^'; } dotdot = strstr(arg, "^!"); if (dotdot && !dotdot[2]) { *dotdot = 0; if (!add_parents_only(revs, arg, flags ^ (UNINTERESTING | BOTTOM))) *dotdot = '^'; } local_flags = 0; if (*arg == '^') { local_flags = UNINTERESTING | BOTTOM; arg++; } if (revarg_opt & REVARG_COMMITTISH) get_sha1_flags = GET_SHA1_COMMITTISH; if (get_sha1_with_context(arg, get_sha1_flags, sha1, &oc)) return revs->ignore_missing ? 0 : -1; if (!cant_be_filename) verify_non_filename(revs->prefix, arg); object = get_reference(revs, arg, sha1, flags ^ local_flags); add_rev_cmdline(revs, object, arg_, REV_CMD_REV, flags ^ local_flags); add_pending_object_with_mode(revs, object, arg, oc.mode); return 0; } struct cmdline_pathspec { int alloc; int nr; const char **path; }; static void append_prune_data(struct cmdline_pathspec *prune, const char **av) { while (*av) { ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc); prune->path[prune->nr++] = *(av++); } } static void read_pathspec_from_stdin(struct rev_info *revs, struct strbuf *sb, struct cmdline_pathspec *prune) { while (strbuf_getline(sb, stdin) != EOF) { ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc); prune->path[prune->nr++] = xstrdup(sb->buf); } } static void read_revisions_from_stdin(struct rev_info *revs, struct cmdline_pathspec *prune) { struct strbuf sb; int seen_dashdash = 0; int save_warning; save_warning = warn_on_object_refname_ambiguity; warn_on_object_refname_ambiguity = 0; strbuf_init(&sb, 1000); while (strbuf_getline(&sb, stdin) != EOF) { int len = sb.len; if (!len) break; if (sb.buf[0] == '-') { if (len == 2 && sb.buf[1] == '-') { seen_dashdash = 1; break; } die("options not supported in --stdin mode"); } if (handle_revision_arg(sb.buf, revs, 0, REVARG_CANNOT_BE_FILENAME)) die("bad revision '%s'", sb.buf); } if (seen_dashdash) read_pathspec_from_stdin(revs, &sb, prune); strbuf_release(&sb); warn_on_object_refname_ambiguity = save_warning; } static void add_grep(struct rev_info *revs, const char *ptn, enum grep_pat_token what) { append_grep_pattern(&revs->grep_filter, ptn, "command line", 0, what); } static void add_header_grep(struct rev_info *revs, enum grep_header_field field, const char *pattern) { append_header_grep_pattern(&revs->grep_filter, field, pattern); } static void add_message_grep(struct rev_info *revs, const char *pattern) { add_grep(revs, pattern, GREP_PATTERN_BODY); } static int handle_revision_opt(struct rev_info *revs, int argc, const char **argv, int *unkc, const char **unkv) { const char *arg = argv[0]; const char *optarg; int argcount; /* pseudo revision arguments */ if (!strcmp(arg, "--all") || !strcmp(arg, "--branches") || !strcmp(arg, "--tags") || !strcmp(arg, "--remotes") || !strcmp(arg, "--reflog") || !strcmp(arg, "--not") || !strcmp(arg, "--no-walk") || !strcmp(arg, "--do-walk") || !strcmp(arg, "--bisect") || starts_with(arg, "--glob=") || !strcmp(arg, "--indexed-objects") || starts_with(arg, "--exclude=") || starts_with(arg, "--branches=") || starts_with(arg, "--tags=") || starts_with(arg, "--remotes=") || starts_with(arg, "--no-walk=")) { unkv[(*unkc)++] = arg; return 1; } if ((argcount = parse_long_opt("max-count", argv, &optarg))) { revs->max_count = atoi(optarg); revs->no_walk = 0; return argcount; } else if ((argcount = parse_long_opt("skip", argv, &optarg))) { revs->skip_count = atoi(optarg); return argcount; } else if ((*arg == '-') && isdigit(arg[1])) { /* accept -<digit>, like traditional "head" */ if (strtol_i(arg + 1, 10, &revs->max_count) < 0 || revs->max_count < 0) die("'%s': not a non-negative integer", arg + 1); revs->no_walk = 0; } else if (!strcmp(arg, "-n")) { if (argc <= 1) return error("-n requires an argument"); revs->max_count = atoi(argv[1]); revs->no_walk = 0; return 2; } else if (starts_with(arg, "-n")) { revs->max_count = atoi(arg + 2); revs->no_walk = 0; } else if ((argcount = parse_long_opt("max-age", argv, &optarg))) { revs->max_age = atoi(optarg); return argcount; } else if ((argcount = parse_long_opt("since", argv, &optarg))) { revs->max_age = approxidate(optarg); return argcount; } else if ((argcount = parse_long_opt("after", argv, &optarg))) { revs->max_age = approxidate(optarg); return argcount; } else if ((argcount = parse_long_opt("min-age", argv, &optarg))) { revs->min_age = atoi(optarg); return argcount; } else if ((argcount = parse_long_opt("before", argv, &optarg))) { revs->min_age = approxidate(optarg); return argcount; } else if ((argcount = parse_long_opt("until", argv, &optarg))) { revs->min_age = approxidate(optarg); return argcount; } else if (!strcmp(arg, "--first-parent")) { revs->first_parent_only = 1; } else if (!strcmp(arg, "--ancestry-path")) { revs->ancestry_path = 1; revs->simplify_history = 0; revs->limited = 1; } else if (!strcmp(arg, "-g") || !strcmp(arg, "--walk-reflogs")) { init_reflog_walk(&revs->reflog_info); } else if (!strcmp(arg, "--default")) { if (argc <= 1) return error("bad --default argument"); revs->def = argv[1]; return 2; } else if (!strcmp(arg, "--merge")) { revs->show_merge = 1; } else if (!strcmp(arg, "--topo-order")) { revs->sort_order = REV_SORT_IN_GRAPH_ORDER; revs->topo_order = 1; } else if (!strcmp(arg, "--simplify-merges")) { revs->simplify_merges = 1; revs->topo_order = 1; revs->rewrite_parents = 1; revs->simplify_history = 0; revs->limited = 1; } else if (!strcmp(arg, "--simplify-by-decoration")) { revs->simplify_merges = 1; revs->topo_order = 1; revs->rewrite_parents = 1; revs->simplify_history = 0; revs->simplify_by_decoration = 1; revs->limited = 1; revs->prune = 1; load_ref_decorations(DECORATE_SHORT_REFS); } else if (!strcmp(arg, "--date-order")) { revs->sort_order = REV_SORT_BY_COMMIT_DATE; revs->topo_order = 1; } else if (!strcmp(arg, "--author-date-order")) { revs->sort_order = REV_SORT_BY_AUTHOR_DATE; revs->topo_order = 1; } else if (starts_with(arg, "--early-output")) { int count = 100; switch (arg[14]) { case '=': count = atoi(arg+15); /* Fallthrough */ case 0: revs->topo_order = 1; revs->early_output = count; } } else if (!strcmp(arg, "--parents")) { revs->rewrite_parents = 1; revs->print_parents = 1; } else if (!strcmp(arg, "--dense")) { revs->dense = 1; } else if (!strcmp(arg, "--sparse")) { revs->dense = 0; } else if (!strcmp(arg, "--show-all")) { revs->show_all = 1; } else if (!strcmp(arg, "--remove-empty")) { revs->remove_empty_trees = 1; } else if (!strcmp(arg, "--merges")) { revs->min_parents = 2; } else if (!strcmp(arg, "--no-merges")) { revs->max_parents = 1; } else if (starts_with(arg, "--min-parents=")) { revs->min_parents = atoi(arg+14); } else if (starts_with(arg, "--no-min-parents")) { revs->min_parents = 0; } else if (starts_with(arg, "--max-parents=")) { revs->max_parents = atoi(arg+14); } else if (starts_with(arg, "--no-max-parents")) { revs->max_parents = -1; } else if (!strcmp(arg, "--boundary")) { revs->boundary = 1; } else if (!strcmp(arg, "--left-right")) { revs->left_right = 1; } else if (!strcmp(arg, "--left-only")) { if (revs->right_only) die("--left-only is incompatible with --right-only" " or --cherry"); revs->left_only = 1; } else if (!strcmp(arg, "--right-only")) { if (revs->left_only) die("--right-only is incompatible with --left-only"); revs->right_only = 1; } else if (!strcmp(arg, "--cherry")) { if (revs->left_only) die("--cherry is incompatible with --left-only"); revs->cherry_mark = 1; revs->right_only = 1; revs->max_parents = 1; revs->limited = 1; } else if (!strcmp(arg, "--count")) { revs->count = 1; } else if (!strcmp(arg, "--cherry-mark")) { if (revs->cherry_pick) die("--cherry-mark is incompatible with --cherry-pick"); revs->cherry_mark = 1; revs->limited = 1; /* needs limit_list() */ } else if (!strcmp(arg, "--cherry-pick")) { if (revs->cherry_mark) die("--cherry-pick is incompatible with --cherry-mark"); revs->cherry_pick = 1; revs->limited = 1; } else if (!strcmp(arg, "--objects")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; } else if (!strcmp(arg, "--objects-edge")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; revs->edge_hint = 1; } else if (!strcmp(arg, "--objects-edge-aggressive")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; revs->edge_hint = 1; revs->edge_hint_aggressive = 1; } else if (!strcmp(arg, "--verify-objects")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; revs->verify_objects = 1; } else if (!strcmp(arg, "--unpacked")) { revs->unpacked = 1; } else if (starts_with(arg, "--unpacked=")) { die("--unpacked=<packfile> no longer supported."); } else if (!strcmp(arg, "-r")) { revs->diff = 1; DIFF_OPT_SET(&revs->diffopt, RECURSIVE); } else if (!strcmp(arg, "-t")) { revs->diff = 1; DIFF_OPT_SET(&revs->diffopt, RECURSIVE); DIFF_OPT_SET(&revs->diffopt, TREE_IN_RECURSIVE); } else if (!strcmp(arg, "-m")) { revs->ignore_merges = 0; } else if (!strcmp(arg, "-c")) { revs->diff = 1; revs->dense_combined_merges = 0; revs->combine_merges = 1; } else if (!strcmp(arg, "--cc")) { revs->diff = 1; revs->dense_combined_merges = 1; revs->combine_merges = 1; } else if (!strcmp(arg, "-v")) { revs->verbose_header = 1; } else if (!strcmp(arg, "--pretty")) { revs->verbose_header = 1; revs->pretty_given = 1; get_commit_format(NULL, revs); } else if (starts_with(arg, "--pretty=") || starts_with(arg, "--format=")) { /* * Detached form ("--pretty X" as opposed to "--pretty=X") * not allowed, since the argument is optional. */ revs->verbose_header = 1; revs->pretty_given = 1; get_commit_format(arg+9, revs); } else if (!strcmp(arg, "--show-notes") || !strcmp(arg, "--notes")) { revs->show_notes = 1; revs->show_notes_given = 1; revs->notes_opt.use_default_notes = 1; } else if (!strcmp(arg, "--show-signature")) { revs->show_signature = 1; } else if (!strcmp(arg, "--show-linear-break") || starts_with(arg, "--show-linear-break=")) { if (starts_with(arg, "--show-linear-break=")) revs->break_bar = xstrdup(arg + 20); else revs->break_bar = " .........."; revs->track_linear = 1; revs->track_first_time = 1; } else if (starts_with(arg, "--show-notes=") || starts_with(arg, "--notes=")) { struct strbuf buf = STRBUF_INIT; revs->show_notes = 1; revs->show_notes_given = 1; if (starts_with(arg, "--show-notes")) { if (revs->notes_opt.use_default_notes < 0) revs->notes_opt.use_default_notes = 1; strbuf_addstr(&buf, arg+13); } else strbuf_addstr(&buf, arg+8); expand_notes_ref(&buf); string_list_append(&revs->notes_opt.extra_notes_refs, strbuf_detach(&buf, NULL)); } else if (!strcmp(arg, "--no-notes")) { revs->show_notes = 0; revs->show_notes_given = 1; revs->notes_opt.use_default_notes = -1; /* we have been strdup'ing ourselves, so trick * string_list into free()ing strings */ revs->notes_opt.extra_notes_refs.strdup_strings = 1; string_list_clear(&revs->notes_opt.extra_notes_refs, 0); revs->notes_opt.extra_notes_refs.strdup_strings = 0; } else if (!strcmp(arg, "--standard-notes")) { revs->show_notes_given = 1; revs->notes_opt.use_default_notes = 1; } else if (!strcmp(arg, "--no-standard-notes")) { revs->notes_opt.use_default_notes = 0; } else if (!strcmp(arg, "--oneline")) { revs->verbose_header = 1; get_commit_format("oneline", revs); revs->pretty_given = 1; revs->abbrev_commit = 1; } else if (!strcmp(arg, "--graph")) { revs->topo_order = 1; revs->rewrite_parents = 1; revs->graph = graph_init(revs); } else if (!strcmp(arg, "--root")) { revs->show_root_diff = 1; } else if (!strcmp(arg, "--no-commit-id")) { revs->no_commit_id = 1; } else if (!strcmp(arg, "--always")) { revs->always_show_header = 1; } else if (!strcmp(arg, "--no-abbrev")) { revs->abbrev = 0; } else if (!strcmp(arg, "--abbrev")) { revs->abbrev = DEFAULT_ABBREV; } else if (starts_with(arg, "--abbrev=")) { revs->abbrev = strtoul(arg + 9, NULL, 10); if (revs->abbrev < MINIMUM_ABBREV) revs->abbrev = MINIMUM_ABBREV; else if (revs->abbrev > 40) revs->abbrev = 40; } else if (!strcmp(arg, "--abbrev-commit")) { revs->abbrev_commit = 1; revs->abbrev_commit_given = 1; } else if (!strcmp(arg, "--no-abbrev-commit")) { revs->abbrev_commit = 0; } else if (!strcmp(arg, "--full-diff")) { revs->diff = 1; revs->full_diff = 1; } else if (!strcmp(arg, "--full-history")) { revs->simplify_history = 0; } else if (!strcmp(arg, "--relative-date")) { revs->date_mode.type = DATE_RELATIVE; revs->date_mode_explicit = 1; } else if ((argcount = parse_long_opt("date", argv, &optarg))) { parse_date_format(optarg, &revs->date_mode); revs->date_mode_explicit = 1; return argcount; } else if (!strcmp(arg, "--log-size")) { revs->show_log_size = 1; } /* * Grepping the commit log */ else if ((argcount = parse_long_opt("author", argv, &optarg))) { add_header_grep(revs, GREP_HEADER_AUTHOR, optarg); return argcount; } else if ((argcount = parse_long_opt("committer", argv, &optarg))) { add_header_grep(revs, GREP_HEADER_COMMITTER, optarg); return argcount; } else if ((argcount = parse_long_opt("grep-reflog", argv, &optarg))) { add_header_grep(revs, GREP_HEADER_REFLOG, optarg); return argcount; } else if ((argcount = parse_long_opt("grep", argv, &optarg))) { add_message_grep(revs, optarg); return argcount; } else if (!strcmp(arg, "--grep-debug")) { revs->grep_filter.debug = 1; } else if (!strcmp(arg, "--basic-regexp")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_BRE, &revs->grep_filter); } else if (!strcmp(arg, "--extended-regexp") || !strcmp(arg, "-E")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_ERE, &revs->grep_filter); } else if (!strcmp(arg, "--regexp-ignore-case") || !strcmp(arg, "-i")) { revs->grep_filter.regflags |= REG_ICASE; DIFF_OPT_SET(&revs->diffopt, PICKAXE_IGNORE_CASE); } else if (!strcmp(arg, "--fixed-strings") || !strcmp(arg, "-F")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_FIXED, &revs->grep_filter); } else if (!strcmp(arg, "--perl-regexp")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_PCRE, &revs->grep_filter); } else if (!strcmp(arg, "--all-match")) { revs->grep_filter.all_match = 1; } else if (!strcmp(arg, "--invert-grep")) { revs->invert_grep = 1; } else if ((argcount = parse_long_opt("encoding", argv, &optarg))) { if (strcmp(optarg, "none")) git_log_output_encoding = xstrdup(optarg); else git_log_output_encoding = ""; return argcount; } else if (!strcmp(arg, "--reverse")) { revs->reverse ^= 1; } else if (!strcmp(arg, "--children")) { revs->children.name = "children"; revs->limited = 1; } else if (!strcmp(arg, "--ignore-missing")) { revs->ignore_missing = 1; } else { int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix); if (!opts) unkv[(*unkc)++] = arg; return opts; } if (revs->graph && revs->track_linear) die("--show-linear-break and --graph are incompatible"); return 1; } void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx, const struct option *options, const char * const usagestr[]) { int n = handle_revision_opt(revs, ctx->argc, ctx->argv, &ctx->cpidx, ctx->out); if (n <= 0) { error("unknown option `%s'", ctx->argv[0]); usage_with_options(usagestr, options); } ctx->argv += n; ctx->argc -= n; } static int for_each_bisect_ref(const char *submodule, each_ref_fn fn, void *cb_data, const char *term) { struct strbuf bisect_refs = STRBUF_INIT; int status; strbuf_addf(&bisect_refs, "refs/bisect/%s", term); status = for_each_ref_in_submodule(submodule, bisect_refs.buf, fn, cb_data); strbuf_release(&bisect_refs); return status; } static int for_each_bad_bisect_ref(const char *submodule, each_ref_fn fn, void *cb_data) { return for_each_bisect_ref(submodule, fn, cb_data, term_bad); } static int for_each_good_bisect_ref(const char *submodule, each_ref_fn fn, void *cb_data) { return for_each_bisect_ref(submodule, fn, cb_data, term_good); } static int handle_revision_pseudo_opt(const char *submodule, struct rev_info *revs, int argc, const char **argv, int *flags) { const char *arg = argv[0]; const char *optarg; int argcount; /* * NOTE! * * Commands like "git shortlog" will not accept the options below * unless parse_revision_opt queues them (as opposed to erroring * out). * * When implementing your new pseudo-option, remember to * register it in the list at the top of handle_revision_opt. */ if (!strcmp(arg, "--all")) { handle_refs(submodule, revs, *flags, for_each_ref_submodule); handle_refs(submodule, revs, *flags, head_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--branches")) { handle_refs(submodule, revs, *flags, for_each_branch_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--bisect")) { read_bisect_terms(&term_bad, &term_good); handle_refs(submodule, revs, *flags, for_each_bad_bisect_ref); handle_refs(submodule, revs, *flags ^ (UNINTERESTING | BOTTOM), for_each_good_bisect_ref); revs->bisect = 1; } else if (!strcmp(arg, "--tags")) { handle_refs(submodule, revs, *flags, for_each_tag_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--remotes")) { handle_refs(submodule, revs, *flags, for_each_remote_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if ((argcount = parse_long_opt("glob", argv, &optarg))) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref(handle_one_ref, optarg, &cb); clear_ref_exclusion(&revs->ref_excludes); return argcount; } else if ((argcount = parse_long_opt("exclude", argv, &optarg))) { add_ref_exclusion(&revs->ref_excludes, optarg); return argcount; } else if (starts_with(arg, "--branches=")) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref_in(handle_one_ref, arg + 11, "refs/heads/", &cb); clear_ref_exclusion(&revs->ref_excludes); } else if (starts_with(arg, "--tags=")) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref_in(handle_one_ref, arg + 7, "refs/tags/", &cb); clear_ref_exclusion(&revs->ref_excludes); } else if (starts_with(arg, "--remotes=")) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref_in(handle_one_ref, arg + 10, "refs/remotes/", &cb); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--reflog")) { add_reflogs_to_pending(revs, *flags); } else if (!strcmp(arg, "--indexed-objects")) { add_index_objects_to_pending(revs, *flags); } else if (!strcmp(arg, "--not")) { *flags ^= UNINTERESTING | BOTTOM; } else if (!strcmp(arg, "--no-walk")) { revs->no_walk = REVISION_WALK_NO_WALK_SORTED; } else if (starts_with(arg, "--no-walk=")) { /* * Detached form ("--no-walk X" as opposed to "--no-walk=X") * not allowed, since the argument is optional. */ if (!strcmp(arg + 10, "sorted")) revs->no_walk = REVISION_WALK_NO_WALK_SORTED; else if (!strcmp(arg + 10, "unsorted")) revs->no_walk = REVISION_WALK_NO_WALK_UNSORTED; else return error("invalid argument to --no-walk"); } else if (!strcmp(arg, "--do-walk")) { revs->no_walk = 0; } else { return 0; } return 1; } static void NORETURN diagnose_missing_default(const char *def) { unsigned char sha1[20]; int flags; const char *refname; refname = resolve_ref_unsafe(def, 0, sha1, &flags); if (!refname || !(flags & REF_ISSYMREF) || (flags & REF_ISBROKEN)) die(_("your current branch appears to be broken")); skip_prefix(refname, "refs/heads/", &refname); die(_("your current branch '%s' does not have any commits yet"), refname); } /* * Parse revision information, filling in the "rev_info" structure, * and removing the used arguments from the argument list. * * Returns the number of arguments left that weren't recognized * (which are also moved to the head of the argument list) */ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt) { int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt; struct cmdline_pathspec prune_data; const char *submodule = NULL; memset(&prune_data, 0, sizeof(prune_data)); if (opt) submodule = opt->submodule; /* First, search for "--" */ if (opt && opt->assume_dashdash) { seen_dashdash = 1; } else { seen_dashdash = 0; for (i = 1; i < argc; i++) { const char *arg = argv[i]; if (strcmp(arg, "--")) continue; argv[i] = NULL; argc = i; if (argv[i + 1]) append_prune_data(&prune_data, argv + i + 1); seen_dashdash = 1; break; } } /* Second, deal with arguments and options */ flags = 0; revarg_opt = opt ? opt->revarg_opt : 0; if (seen_dashdash) revarg_opt |= REVARG_CANNOT_BE_FILENAME; read_from_stdin = 0; for (left = i = 1; i < argc; i++) { const char *arg = argv[i]; if (*arg == '-') { int opts; opts = handle_revision_pseudo_opt(submodule, revs, argc - i, argv + i, &flags); if (opts > 0) { i += opts - 1; continue; } if (!strcmp(arg, "--stdin")) { if (revs->disable_stdin) { argv[left++] = arg; continue; } if (read_from_stdin++) die("--stdin given twice?"); read_revisions_from_stdin(revs, &prune_data); continue; } opts = handle_revision_opt(revs, argc - i, argv + i, &left, argv); if (opts > 0) { i += opts - 1; continue; } if (opts < 0) exit(128); continue; } if (handle_revision_arg(arg, revs, flags, revarg_opt)) { int j; if (seen_dashdash || *arg == '^') die("bad revision '%s'", arg); /* If we didn't have a "--": * (1) all filenames must exist; * (2) all rev-args must not be interpretable * as a valid filename. * but the latter we have checked in the main loop. */ for (j = i; j < argc; j++) verify_filename(revs->prefix, argv[j], j == i); append_prune_data(&prune_data, argv + i); break; } else got_rev_arg = 1; } if (prune_data.nr) { /* * If we need to introduce the magic "a lone ':' means no * pathspec whatsoever", here is the place to do so. * * if (prune_data.nr == 1 && !strcmp(prune_data[0], ":")) { * prune_data.nr = 0; * prune_data.alloc = 0; * free(prune_data.path); * prune_data.path = NULL; * } else { * terminate prune_data.alloc with NULL and * call init_pathspec() to set revs->prune_data here. * } */ ALLOC_GROW(prune_data.path, prune_data.nr + 1, prune_data.alloc); prune_data.path[prune_data.nr++] = NULL; parse_pathspec(&revs->prune_data, 0, 0, revs->prefix, prune_data.path); } if (revs->def == NULL) revs->def = opt ? opt->def : NULL; if (opt && opt->tweak) opt->tweak(revs, opt); if (revs->show_merge) prepare_show_merge(revs); if (revs->def && !revs->pending.nr && !got_rev_arg) { unsigned char sha1[20]; struct object *object; struct object_context oc; if (get_sha1_with_context(revs->def, 0, sha1, &oc)) diagnose_missing_default(revs->def); object = get_reference(revs, revs->def, sha1, 0); add_pending_object_with_mode(revs, object, revs->def, oc.mode); } /* Did the user ask for any diff output? Run the diff! */ if (revs->diffopt.output_format & ~DIFF_FORMAT_NO_OUTPUT) revs->diff = 1; /* Pickaxe, diff-filter and rename following need diffs */ if (revs->diffopt.pickaxe || revs->diffopt.filter || DIFF_OPT_TST(&revs->diffopt, FOLLOW_RENAMES)) revs->diff = 1; if (revs->topo_order) revs->limited = 1; if (revs->prune_data.nr) { copy_pathspec(&revs->pruning.pathspec, &revs->prune_data); /* Can't prune commits with rename following: the paths change.. */ if (!DIFF_OPT_TST(&revs->diffopt, FOLLOW_RENAMES)) revs->prune = 1; if (!revs->full_diff) copy_pathspec(&revs->diffopt.pathspec, &revs->prune_data); } if (revs->combine_merges) revs->ignore_merges = 0; revs->diffopt.abbrev = revs->abbrev; if (revs->line_level_traverse) { revs->limited = 1; revs->topo_order = 1; } diff_setup_done(&revs->diffopt); grep_commit_pattern_type(GREP_PATTERN_TYPE_UNSPECIFIED, &revs->grep_filter); compile_grep_patterns(&revs->grep_filter); if (revs->reverse && revs->reflog_info) die("cannot combine --reverse with --walk-reflogs"); if (revs->rewrite_parents && revs->children.name) die("cannot combine --parents and --children"); /* * Limitations on the graph functionality */ if (revs->reverse && revs->graph) die("cannot combine --reverse with --graph"); if (revs->reflog_info && revs->graph) die("cannot combine --walk-reflogs with --graph"); if (revs->no_walk && revs->graph) die("cannot combine --no-walk with --graph"); if (!revs->reflog_info && revs->grep_filter.use_reflog_filter) die("cannot use --grep-reflog without --walk-reflogs"); if (revs->first_parent_only && revs->bisect) die(_("--first-parent is incompatible with --bisect")); return left; } static void add_child(struct rev_info *revs, struct commit *parent, struct commit *child) { struct commit_list *l = xcalloc(1, sizeof(*l)); l->item = child; l->next = add_decoration(&revs->children, &parent->object, l); } static int remove_duplicate_parents(struct rev_info *revs, struct commit *commit) { struct treesame_state *ts = lookup_decoration(&revs->treesame, &commit->object); struct commit_list **pp, *p; int surviving_parents; /* Examine existing parents while marking ones we have seen... */ pp = &commit->parents; surviving_parents = 0; while ((p = *pp) != NULL) { struct commit *parent = p->item; if (parent->object.flags & TMP_MARK) { *pp = p->next; if (ts) compact_treesame(revs, commit, surviving_parents); continue; } parent->object.flags |= TMP_MARK; surviving_parents++; pp = &p->next; } /* clear the temporary mark */ for (p = commit->parents; p; p = p->next) { p->item->object.flags &= ~TMP_MARK; } /* no update_treesame() - removing duplicates can't affect TREESAME */ return surviving_parents; } struct merge_simplify_state { struct commit *simplified; }; static struct merge_simplify_state *locate_simplify_state(struct rev_info *revs, struct commit *commit) { struct merge_simplify_state *st; st = lookup_decoration(&revs->merge_simplification, &commit->object); if (!st) { st = xcalloc(1, sizeof(*st)); add_decoration(&revs->merge_simplification, &commit->object, st); } return st; } static int mark_redundant_parents(struct rev_info *revs, struct commit *commit) { struct commit_list *h = reduce_heads(commit->parents); int i = 0, marked = 0; struct commit_list *po, *pn; /* Want these for sanity-checking only */ int orig_cnt = commit_list_count(commit->parents); int cnt = commit_list_count(h); /* * Not ready to remove items yet, just mark them for now, based * on the output of reduce_heads(). reduce_heads outputs the reduced * set in its original order, so this isn't too hard. */ po = commit->parents; pn = h; while (po) { if (pn && po->item == pn->item) { pn = pn->next; i++; } else { po->item->object.flags |= TMP_MARK; marked++; } po=po->next; } if (i != cnt || cnt+marked != orig_cnt) die("mark_redundant_parents %d %d %d %d", orig_cnt, cnt, i, marked); free_commit_list(h); return marked; } static int mark_treesame_root_parents(struct rev_info *revs, struct commit *commit) { struct commit_list *p; int marked = 0; for (p = commit->parents; p; p = p->next) { struct commit *parent = p->item; if (!parent->parents && (parent->object.flags & TREESAME)) { parent->object.flags |= TMP_MARK; marked++; } } return marked; } /* * Awkward naming - this means one parent we are TREESAME to. * cf mark_treesame_root_parents: root parents that are TREESAME (to an * empty tree). Better name suggestions? */ static int leave_one_treesame_to_parent(struct rev_info *revs, struct commit *commit) { struct treesame_state *ts = lookup_decoration(&revs->treesame, &commit->object); struct commit *unmarked = NULL, *marked = NULL; struct commit_list *p; unsigned n; for (p = commit->parents, n = 0; p; p = p->next, n++) { if (ts->treesame[n]) { if (p->item->object.flags & TMP_MARK) { if (!marked) marked = p->item; } else { if (!unmarked) { unmarked = p->item; break; } } } } /* * If we are TREESAME to a marked-for-deletion parent, but not to any * unmarked parents, unmark the first TREESAME parent. This is the * parent that the default simplify_history==1 scan would have followed, * and it doesn't make sense to omit that path when asking for a * simplified full history. Retaining it improves the chances of * understanding odd missed merges that took an old version of a file. * * Example: * * I--------*X A modified the file, but mainline merge X used * \ / "-s ours", so took the version from I. X is * `-*A--' TREESAME to I and !TREESAME to A. * * Default log from X would produce "I". Without this check, * --full-history --simplify-merges would produce "I-A-X", showing * the merge commit X and that it changed A, but not making clear that * it had just taken the I version. With this check, the topology above * is retained. * * Note that it is possible that the simplification chooses a different * TREESAME parent from the default, in which case this test doesn't * activate, and we _do_ drop the default parent. Example: * * I------X A modified the file, but it was reverted in B, * \ / meaning mainline merge X is TREESAME to both * *A-*B parents. * * Default log would produce "I" by following the first parent; * --full-history --simplify-merges will produce "I-A-B". But this is a * reasonable result - it presents a logical full history leading from * I to X, and X is not an important merge. */ if (!unmarked && marked) { marked->object.flags &= ~TMP_MARK; return 1; } return 0; } static int remove_marked_parents(struct rev_info *revs, struct commit *commit) { struct commit_list **pp, *p; int nth_parent, removed = 0; pp = &commit->parents; nth_parent = 0; while ((p = *pp) != NULL) { struct commit *parent = p->item; if (parent->object.flags & TMP_MARK) { parent->object.flags &= ~TMP_MARK; *pp = p->next; free(p); removed++; compact_treesame(revs, commit, nth_parent); continue; } pp = &p->next; nth_parent++; } /* Removing parents can only increase TREESAMEness */ if (removed && !(commit->object.flags & TREESAME)) update_treesame(revs, commit); return nth_parent; } static struct commit_list **simplify_one(struct rev_info *revs, struct commit *commit, struct commit_list **tail) { struct commit_list *p; struct commit *parent; struct merge_simplify_state *st, *pst; int cnt; st = locate_simplify_state(revs, commit); /* * Have we handled this one? */ if (st->simplified) return tail; /* * An UNINTERESTING commit simplifies to itself, so does a * root commit. We do not rewrite parents of such commit * anyway. */ if ((commit->object.flags & UNINTERESTING) || !commit->parents) { st->simplified = commit; return tail; } /* * Do we know what commit all of our parents that matter * should be rewritten to? Otherwise we are not ready to * rewrite this one yet. */ for (cnt = 0, p = commit->parents; p; p = p->next) { pst = locate_simplify_state(revs, p->item); if (!pst->simplified) { tail = &commit_list_insert(p->item, tail)->next; cnt++; } if (revs->first_parent_only) break; } if (cnt) { tail = &commit_list_insert(commit, tail)->next; return tail; } /* * Rewrite our list of parents. Note that this cannot * affect our TREESAME flags in any way - a commit is * always TREESAME to its simplification. */ for (p = commit->parents; p; p = p->next) { pst = locate_simplify_state(revs, p->item); p->item = pst->simplified; if (revs->first_parent_only) break; } if (revs->first_parent_only) cnt = 1; else cnt = remove_duplicate_parents(revs, commit); /* * It is possible that we are a merge and one side branch * does not have any commit that touches the given paths; * in such a case, the immediate parent from that branch * will be rewritten to be the merge base. * * o----X X: the commit we are looking at; * / / o: a commit that touches the paths; * ---o----' * * Further, a merge of an independent branch that doesn't * touch the path will reduce to a treesame root parent: * * ----o----X X: the commit we are looking at; * / o: a commit that touches the paths; * r r: a root commit not touching the paths * * Detect and simplify both cases. */ if (1 < cnt) { int marked = mark_redundant_parents(revs, commit); marked += mark_treesame_root_parents(revs, commit); if (marked) marked -= leave_one_treesame_to_parent(revs, commit); if (marked) cnt = remove_marked_parents(revs, commit); } /* * A commit simplifies to itself if it is a root, if it is * UNINTERESTING, if it touches the given paths, or if it is a * merge and its parents don't simplify to one relevant commit * (the first two cases are already handled at the beginning of * this function). * * Otherwise, it simplifies to what its sole relevant parent * simplifies to. */ if (!cnt || (commit->object.flags & UNINTERESTING) || !(commit->object.flags & TREESAME) || (parent = one_relevant_parent(revs, commit->parents)) == NULL) st->simplified = commit; else { pst = locate_simplify_state(revs, parent); st->simplified = pst->simplified; } return tail; } static void simplify_merges(struct rev_info *revs) { struct commit_list *list, *next; struct commit_list *yet_to_do, **tail; struct commit *commit; if (!revs->prune) return; /* feed the list reversed */ yet_to_do = NULL; for (list = revs->commits; list; list = next) { commit = list->item; next = list->next; /* * Do not free(list) here yet; the original list * is used later in this function. */ commit_list_insert(commit, &yet_to_do); } while (yet_to_do) { list = yet_to_do; yet_to_do = NULL; tail = &yet_to_do; while (list) { commit = pop_commit(&list); tail = simplify_one(revs, commit, tail); } } /* clean up the result, removing the simplified ones */ list = revs->commits; revs->commits = NULL; tail = &revs->commits; while (list) { struct merge_simplify_state *st; commit = pop_commit(&list); st = locate_simplify_state(revs, commit); if (st->simplified == commit) tail = &commit_list_insert(commit, tail)->next; } } static void set_children(struct rev_info *revs) { struct commit_list *l; for (l = revs->commits; l; l = l->next) { struct commit *commit = l->item; struct commit_list *p; for (p = commit->parents; p; p = p->next) add_child(revs, p->item, commit); } } void reset_revision_walk(void) { clear_object_flags(SEEN | ADDED | SHOWN); } int prepare_revision_walk(struct rev_info *revs) { int i; struct object_array old_pending; struct commit_list **next = &revs->commits; memcpy(&old_pending, &revs->pending, sizeof(old_pending)); revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; for (i = 0; i < old_pending.nr; i++) { struct object_array_entry *e = old_pending.objects + i; struct commit *commit = handle_commit(revs, e); if (commit) { if (!(commit->object.flags & SEEN)) { commit->object.flags |= SEEN; next = commit_list_append(commit, next); } } } if (!revs->leak_pending) object_array_clear(&old_pending); /* Signal whether we need per-parent treesame decoration */ if (revs->simplify_merges || (revs->limited && limiting_can_increase_treesame(revs))) revs->treesame.name = "treesame"; if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED) commit_list_sort_by_date(&revs->commits); if (revs->no_walk) return 0; if (revs->limited) if (limit_list(revs) < 0) return -1; if (revs->topo_order) sort_in_topological_order(&revs->commits, revs->sort_order); if (revs->line_level_traverse) line_log_filter(revs); if (revs->simplify_merges) simplify_merges(revs); if (revs->children.name) set_children(revs); return 0; } static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp) { struct commit_list *cache = NULL; for (;;) { struct commit *p = *pp; if (!revs->limited) if (add_parents_to_list(revs, p, &revs->commits, &cache) < 0) return rewrite_one_error; if (p->object.flags & UNINTERESTING) return rewrite_one_ok; if (!(p->object.flags & TREESAME)) return rewrite_one_ok; if (!p->parents) return rewrite_one_noparents; if ((p = one_relevant_parent(revs, p->parents)) == NULL) return rewrite_one_ok; *pp = p; } } int rewrite_parents(struct rev_info *revs, struct commit *commit, rewrite_parent_fn_t rewrite_parent) { struct commit_list **pp = &commit->parents; while (*pp) { struct commit_list *parent = *pp; switch (rewrite_parent(revs, &parent->item)) { case rewrite_one_ok: break; case rewrite_one_noparents: *pp = parent->next; continue; case rewrite_one_error: return -1; } pp = &parent->next; } remove_duplicate_parents(revs, commit); return 0; } static int commit_rewrite_person(struct strbuf *buf, const char *what, struct string_list *mailmap) { char *person, *endp; size_t len, namelen, maillen; const char *name; const char *mail; struct ident_split ident; person = strstr(buf->buf, what); if (!person) return 0; person += strlen(what); endp = strchr(person, '\n'); if (!endp) return 0; len = endp - person; if (split_ident_line(&ident, person, len)) return 0; mail = ident.mail_begin; maillen = ident.mail_end - ident.mail_begin; name = ident.name_begin; namelen = ident.name_end - ident.name_begin; if (map_user(mailmap, &mail, &maillen, &name, &namelen)) { struct strbuf namemail = STRBUF_INIT; strbuf_addf(&namemail, "%.*s <%.*s>", (int)namelen, name, (int)maillen, mail); strbuf_splice(buf, ident.name_begin - buf->buf, ident.mail_end - ident.name_begin + 1, namemail.buf, namemail.len); strbuf_release(&namemail); return 1; } return 0; } static int commit_match(struct commit *commit, struct rev_info *opt) { int retval; const char *encoding; const char *message; struct strbuf buf = STRBUF_INIT; if (!opt->grep_filter.pattern_list && !opt->grep_filter.header_list) return 1; /* Prepend "fake" headers as needed */ if (opt->grep_filter.use_reflog_filter) { strbuf_addstr(&buf, "reflog "); get_reflog_message(&buf, opt->reflog_info); strbuf_addch(&buf, '\n'); } /* * We grep in the user's output encoding, under the assumption that it * is the encoding they are most likely to write their grep pattern * for. In addition, it means we will match the "notes" encoding below, * so we will not end up with a buffer that has two different encodings * in it. */ encoding = get_log_output_encoding(); message = logmsg_reencode(commit, NULL, encoding); /* Copy the commit to temporary if we are using "fake" headers */ if (buf.len) strbuf_addstr(&buf, message); if (opt->grep_filter.header_list && opt->mailmap) { if (!buf.len) strbuf_addstr(&buf, message); commit_rewrite_person(&buf, "\nauthor ", opt->mailmap); commit_rewrite_person(&buf, "\ncommitter ", opt->mailmap); } /* Append "fake" message parts as needed */ if (opt->show_notes) { if (!buf.len) strbuf_addstr(&buf, message); format_display_notes(commit->object.oid.hash, &buf, encoding, 1); } /* * Find either in the original commit message, or in the temporary. * Note that we cast away the constness of "message" here. It is * const because it may come from the cached commit buffer. That's OK, * because we know that it is modifiable heap memory, and that while * grep_buffer may modify it for speed, it will restore any * changes before returning. */ if (buf.len) retval = grep_buffer(&opt->grep_filter, buf.buf, buf.len); else retval = grep_buffer(&opt->grep_filter, (char *)message, strlen(message)); strbuf_release(&buf); unuse_commit_buffer(commit, message); return opt->invert_grep ? !retval : retval; } static inline int want_ancestry(const struct rev_info *revs) { return (revs->rewrite_parents || revs->children.name); } enum commit_action get_commit_action(struct rev_info *revs, struct commit *commit) { if (commit->object.flags & SHOWN) return commit_ignore; if (revs->unpacked && has_sha1_pack(commit->object.oid.hash)) return commit_ignore; if (revs->show_all) return commit_show; if (commit->object.flags & UNINTERESTING) return commit_ignore; if (revs->min_age != -1 && (commit->date > revs->min_age)) return commit_ignore; if (revs->min_parents || (revs->max_parents >= 0)) { int n = commit_list_count(commit->parents); if ((n < revs->min_parents) || ((revs->max_parents >= 0) && (n > revs->max_parents))) return commit_ignore; } if (!commit_match(commit, revs)) return commit_ignore; if (revs->prune && revs->dense) { /* Commit without changes? */ if (commit->object.flags & TREESAME) { int n; struct commit_list *p; /* drop merges unless we want parenthood */ if (!want_ancestry(revs)) return commit_ignore; /* * If we want ancestry, then need to keep any merges * between relevant commits to tie together topology. * For consistency with TREESAME and simplification * use "relevant" here rather than just INTERESTING, * to treat bottom commit(s) as part of the topology. */ for (n = 0, p = commit->parents; p; p = p->next) if (relevant_commit(p->item)) if (++n >= 2) return commit_show; return commit_ignore; } } return commit_show; } define_commit_slab(saved_parents, struct commit_list *); #define EMPTY_PARENT_LIST ((struct commit_list *)-1) /* * You may only call save_parents() once per commit (this is checked * for non-root commits). */ static void save_parents(struct rev_info *revs, struct commit *commit) { struct commit_list **pp; if (!revs->saved_parents_slab) { revs->saved_parents_slab = xmalloc(sizeof(struct saved_parents)); init_saved_parents(revs->saved_parents_slab); } pp = saved_parents_at(revs->saved_parents_slab, commit); /* * When walking with reflogs, we may visit the same commit * several times: once for each appearance in the reflog. * * In this case, save_parents() will be called multiple times. * We want to keep only the first set of parents. We need to * store a sentinel value for an empty (i.e., NULL) parent * list to distinguish it from a not-yet-saved list, however. */ if (*pp) return; if (commit->parents) *pp = copy_commit_list(commit->parents); else *pp = EMPTY_PARENT_LIST; } static void free_saved_parents(struct rev_info *revs) { if (revs->saved_parents_slab) clear_saved_parents(revs->saved_parents_slab); } struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit) { struct commit_list *parents; if (!revs->saved_parents_slab) return commit->parents; parents = *saved_parents_at(revs->saved_parents_slab, commit); if (parents == EMPTY_PARENT_LIST) return NULL; return parents; } enum commit_action simplify_commit(struct rev_info *revs, struct commit *commit) { enum commit_action action = get_commit_action(revs, commit); if (action == commit_show && !revs->show_all && revs->prune && revs->dense && want_ancestry(revs)) { /* * --full-diff on simplified parents is no good: it * will show spurious changes from the commits that * were elided. So we save the parents on the side * when --full-diff is in effect. */ if (revs->full_diff) save_parents(revs, commit); if (rewrite_parents(revs, commit, rewrite_one) < 0) return commit_error; } return action; } static void track_linear(struct rev_info *revs, struct commit *commit) { if (revs->track_first_time) { revs->linear = 1; revs->track_first_time = 0; } else { struct commit_list *p; for (p = revs->previous_parents; p; p = p->next) if (p->item == NULL || /* first commit */ !oidcmp(&p->item->object.oid, &commit->object.oid)) break; revs->linear = p != NULL; } if (revs->reverse) { if (revs->linear) commit->object.flags |= TRACK_LINEAR; } free_commit_list(revs->previous_parents); revs->previous_parents = copy_commit_list(commit->parents); } static struct commit *get_revision_1(struct rev_info *revs) { if (!revs->commits) return NULL; do { struct commit *commit = pop_commit(&revs->commits); if (revs->reflog_info) { save_parents(revs, commit); fake_reflog_parent(revs->reflog_info, commit); commit->object.flags &= ~(ADDED | SEEN | SHOWN); } /* * If we haven't done the list limiting, we need to look at * the parents here. We also need to do the date-based limiting * that we'd otherwise have done in limit_list(). */ if (!revs->limited) { if (revs->max_age != -1 && (commit->date < revs->max_age)) continue; if (add_parents_to_list(revs, commit, &revs->commits, NULL) < 0) { if (!revs->ignore_missing_links) die("Failed to traverse parents of commit %s", oid_to_hex(&commit->object.oid)); } } switch (simplify_commit(revs, commit)) { case commit_ignore: continue; case commit_error: die("Failed to simplify parents of commit %s", oid_to_hex(&commit->object.oid)); default: if (revs->track_linear) track_linear(revs, commit); return commit; } } while (revs->commits); return NULL; } /* * Return true for entries that have not yet been shown. (This is an * object_array_each_func_t.) */ static int entry_unshown(struct object_array_entry *entry, void *cb_data_unused) { return !(entry->item->flags & SHOWN); } /* * If array is on the verge of a realloc, garbage-collect any entries * that have already been shown to try to free up some space. */ static void gc_boundary(struct object_array *array) { if (array->nr == array->alloc) object_array_filter(array, entry_unshown, NULL); } static void create_boundary_commit_list(struct rev_info *revs) { unsigned i; struct commit *c; struct object_array *array = &revs->boundary_commits; struct object_array_entry *objects = array->objects; /* * If revs->commits is non-NULL at this point, an error occurred in * get_revision_1(). Ignore the error and continue printing the * boundary commits anyway. (This is what the code has always * done.) */ if (revs->commits) { free_commit_list(revs->commits); revs->commits = NULL; } /* * Put all of the actual boundary commits from revs->boundary_commits * into revs->commits */ for (i = 0; i < array->nr; i++) { c = (struct commit *)(objects[i].item); if (!c) continue; if (!(c->object.flags & CHILD_SHOWN)) continue; if (c->object.flags & (SHOWN | BOUNDARY)) continue; c->object.flags |= BOUNDARY; commit_list_insert(c, &revs->commits); } /* * If revs->topo_order is set, sort the boundary commits * in topological order */ sort_in_topological_order(&revs->commits, revs->sort_order); } static struct commit *get_revision_internal(struct rev_info *revs) { struct commit *c = NULL; struct commit_list *l; if (revs->boundary == 2) { /* * All of the normal commits have already been returned, * and we are now returning boundary commits. * create_boundary_commit_list() has populated * revs->commits with the remaining commits to return. */ c = pop_commit(&revs->commits); if (c) c->object.flags |= SHOWN; return c; } /* * If our max_count counter has reached zero, then we are done. We * don't simply return NULL because we still might need to show * boundary commits. But we want to avoid calling get_revision_1, which * might do a considerable amount of work finding the next commit only * for us to throw it away. * * If it is non-zero, then either we don't have a max_count at all * (-1), or it is still counting, in which case we decrement. */ if (revs->max_count) { c = get_revision_1(revs); if (c) { while (revs->skip_count > 0) { revs->skip_count--; c = get_revision_1(revs); if (!c) break; } } if (revs->max_count > 0) revs->max_count--; } if (c) c->object.flags |= SHOWN; if (!revs->boundary) return c; if (!c) { /* * get_revision_1() runs out the commits, and * we are done computing the boundaries. * switch to boundary commits output mode. */ revs->boundary = 2; /* * Update revs->commits to contain the list of * boundary commits. */ create_boundary_commit_list(revs); return get_revision_internal(revs); } /* * boundary commits are the commits that are parents of the * ones we got from get_revision_1() but they themselves are * not returned from get_revision_1(). Before returning * 'c', we need to mark its parents that they could be boundaries. */ for (l = c->parents; l; l = l->next) { struct object *p; p = &(l->item->object); if (p->flags & (CHILD_SHOWN | SHOWN)) continue; p->flags |= CHILD_SHOWN; gc_boundary(&revs->boundary_commits); add_object_array(p, NULL, &revs->boundary_commits); } return c; } struct commit *get_revision(struct rev_info *revs) { struct commit *c; struct commit_list *reversed; if (revs->reverse) { reversed = NULL; while ((c = get_revision_internal(revs))) commit_list_insert(c, &reversed); revs->commits = reversed; revs->reverse = 0; revs->reverse_output_stage = 1; } if (revs->reverse_output_stage) { c = pop_commit(&revs->commits); if (revs->track_linear) revs->linear = !!(c && c->object.flags & TRACK_LINEAR); return c; } c = get_revision_internal(revs); if (c && revs->graph) graph_update(revs->graph, c); if (!c) { free_saved_parents(revs); if (revs->previous_parents) { free_commit_list(revs->previous_parents); revs->previous_parents = NULL; } } return c; } char *get_revision_mark(const struct rev_info *revs, const struct commit *commit) { if (commit->object.flags & BOUNDARY) return "-"; else if (commit->object.flags & UNINTERESTING) return "^"; else if (commit->object.flags & PATCHSAME) return "="; else if (!revs || revs->left_right) { if (commit->object.flags & SYMMETRIC_LEFT) return "<"; else return ">"; } else if (revs->graph) return "*"; else if (revs->cherry_mark) return "+"; return ""; } void put_revision_mark(const struct rev_info *revs, const struct commit *commit) { char *mark = get_revision_mark(revs, commit); if (!strlen(mark)) return; fputs(mark, stdout); putchar(' '); }
#include "cache.h" #include "tag.h" #include "blob.h" #include "tree.h" #include "commit.h" #include "diff.h" #include "refs.h" #include "revision.h" #include "graph.h" #include "grep.h" #include "reflog-walk.h" #include "patch-ids.h" #include "decorate.h" #include "log-tree.h" #include "string-list.h" #include "line-log.h" #include "mailmap.h" #include "commit-slab.h" #include "dir.h" #include "cache-tree.h" #include "bisect.h" volatile show_early_output_fn_t show_early_output; static const char *term_bad; static const char *term_good; void show_object_with_name(FILE *out, struct object *obj, const char *name) { const char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); } static void mark_blob_uninteresting(struct blob *blob) { if (!blob) return; if (blob->object.flags & UNINTERESTING) return; blob->object.flags |= UNINTERESTING; } static void mark_tree_contents_uninteresting(struct tree *tree) { struct tree_desc desc; struct name_entry entry; struct object *obj = &tree->object; if (!has_object_file(&obj->oid)) return; if (parse_tree(tree) < 0) die("bad tree %s", oid_to_hex(&obj->oid)); init_tree_desc(&desc, tree->buffer, tree->size); while (tree_entry(&desc, &entry)) { switch (object_type(entry.mode)) { case OBJ_TREE: mark_tree_uninteresting(lookup_tree(entry.sha1)); break; case OBJ_BLOB: mark_blob_uninteresting(lookup_blob(entry.sha1)); break; default: /* Subproject commit - not in this repository */ break; } } /* * We don't care about the tree any more * after it has been marked uninteresting. */ free_tree_buffer(tree); } void mark_tree_uninteresting(struct tree *tree) { struct object *obj; if (!tree) return; obj = &tree->object; if (obj->flags & UNINTERESTING) return; obj->flags |= UNINTERESTING; mark_tree_contents_uninteresting(tree); } void mark_parents_uninteresting(struct commit *commit) { struct commit_list *parents = NULL, *l; for (l = commit->parents; l; l = l->next) commit_list_insert(l->item, &parents); while (parents) { struct commit *commit = pop_commit(&parents); while (commit) { /* * A missing commit is ok iff its parent is marked * uninteresting. * * We just mark such a thing parsed, so that when * it is popped next time around, we won't be trying * to parse it and get an error. */ if (!has_object_file(&commit->object.oid)) commit->object.parsed = 1; if (commit->object.flags & UNINTERESTING) break; commit->object.flags |= UNINTERESTING; /* * Normally we haven't parsed the parent * yet, so we won't have a parent of a parent * here. However, it may turn out that we've * reached this commit some other way (where it * wasn't uninteresting), in which case we need * to mark its parents recursively too.. */ if (!commit->parents) break; for (l = commit->parents->next; l; l = l->next) commit_list_insert(l->item, &parents); commit = commit->parents->item; } } } static void add_pending_object_with_path(struct rev_info *revs, struct object *obj, const char *name, unsigned mode, const char *path) { if (!obj) return; if (revs->no_walk && (obj->flags & UNINTERESTING)) revs->no_walk = 0; if (revs->reflog_info && obj->type == OBJ_COMMIT) { struct strbuf buf = STRBUF_INIT; int len = interpret_branch_name(name, 0, &buf); int st; if (0 < len && name[len] && buf.len) strbuf_addstr(&buf, name + len); st = add_reflog_for_walk(revs->reflog_info, (struct commit *)obj, buf.buf[0] ? buf.buf: name); strbuf_release(&buf); if (st) return; } add_object_array_with_path(obj, name, &revs->pending, mode, path); } static void add_pending_object_with_mode(struct rev_info *revs, struct object *obj, const char *name, unsigned mode) { add_pending_object_with_path(revs, obj, name, mode, NULL); } void add_pending_object(struct rev_info *revs, struct object *obj, const char *name) { add_pending_object_with_mode(revs, obj, name, S_IFINVALID); } void add_head_to_pending(struct rev_info *revs) { unsigned char sha1[20]; struct object *obj; if (get_sha1("HEAD", sha1)) return; obj = parse_object(sha1); if (!obj) return; add_pending_object(revs, obj, "HEAD"); } static struct object *get_reference(struct rev_info *revs, const char *name, const unsigned char *sha1, unsigned int flags) { struct object *object; object = parse_object(sha1); if (!object) { if (revs->ignore_missing) return object; die("bad object %s", name); } object->flags |= flags; return object; } void add_pending_sha1(struct rev_info *revs, const char *name, const unsigned char *sha1, unsigned int flags) { struct object *object = get_reference(revs, name, sha1, flags); add_pending_object(revs, object, name); } static struct commit *handle_commit(struct rev_info *revs, struct object_array_entry *entry) { struct object *object = entry->item; const char *name = entry->name; const char *path = entry->path; unsigned int mode = entry->mode; unsigned long flags = object->flags; /* * Tag object? Look what it points to.. */ while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (revs->tag_objects && !(flags & UNINTERESTING)) add_pending_object(revs, object, tag->tag); if (!tag->tagged) die("bad tag"); object = parse_object(tag->tagged->oid.hash); if (!object) { if (flags & UNINTERESTING) return NULL; die("bad object %s", oid_to_hex(&tag->tagged->oid)); } object->flags |= flags; /* * We'll handle the tagged object by looping or dropping * through to the non-tag handlers below. Do not * propagate path data from the tag's pending entry. */ path = NULL; mode = 0; } /* * Commit object? Just return it, we'll do all the complex * reachability crud. */ if (object->type == OBJ_COMMIT) { struct commit *commit = (struct commit *)object; if (parse_commit(commit) < 0) die("unable to parse commit %s", name); if (flags & UNINTERESTING) { mark_parents_uninteresting(commit); revs->limited = 1; } if (revs->show_source && !commit->util) commit->util = xstrdup(name); return commit; } /* * Tree object? Either mark it uninteresting, or add it * to the list of objects to look at later.. */ if (object->type == OBJ_TREE) { struct tree *tree = (struct tree *)object; if (!revs->tree_objects) return NULL; if (flags & UNINTERESTING) { mark_tree_contents_uninteresting(tree); return NULL; } add_pending_object_with_path(revs, object, name, mode, path); return NULL; } /* * Blob object? You know the drill by now.. */ if (object->type == OBJ_BLOB) { if (!revs->blob_objects) return NULL; if (flags & UNINTERESTING) return NULL; add_pending_object_with_path(revs, object, name, mode, path); return NULL; } die("%s is unknown object", name); } static int everybody_uninteresting(struct commit_list *orig, struct commit **interesting_cache) { struct commit_list *list = orig; if (*interesting_cache) { struct commit *commit = *interesting_cache; if (!(commit->object.flags & UNINTERESTING)) return 0; } while (list) { struct commit *commit = list->item; list = list->next; if (commit->object.flags & UNINTERESTING) continue; *interesting_cache = commit; return 0; } return 1; } /* * A definition of "relevant" commit that we can use to simplify limited graphs * by eliminating side branches. * * A "relevant" commit is one that is !UNINTERESTING (ie we are including it * in our list), or that is a specified BOTTOM commit. Then after computing * a limited list, during processing we can generally ignore boundary merges * coming from outside the graph, (ie from irrelevant parents), and treat * those merges as if they were single-parent. TREESAME is defined to consider * only relevant parents, if any. If we are TREESAME to our on-graph parents, * we don't care if we were !TREESAME to non-graph parents. * * Treating bottom commits as relevant ensures that a limited graph's * connection to the actual bottom commit is not viewed as a side branch, but * treated as part of the graph. For example: * * ....Z...A---X---o---o---B * . / * W---Y * * When computing "A..B", the A-X connection is at least as important as * Y-X, despite A being flagged UNINTERESTING. * * And when computing --ancestry-path "A..B", the A-X connection is more * important than Y-X, despite both A and Y being flagged UNINTERESTING. */ static inline int relevant_commit(struct commit *commit) { return (commit->object.flags & (UNINTERESTING | BOTTOM)) != UNINTERESTING; } /* * Return a single relevant commit from a parent list. If we are a TREESAME * commit, and this selects one of our parents, then we can safely simplify to * that parent. */ static struct commit *one_relevant_parent(const struct rev_info *revs, struct commit_list *orig) { struct commit_list *list = orig; struct commit *relevant = NULL; if (!orig) return NULL; /* * For 1-parent commits, or if first-parent-only, then return that * first parent (even if not "relevant" by the above definition). * TREESAME will have been set purely on that parent. */ if (revs->first_parent_only || !orig->next) return orig->item; /* * For multi-parent commits, identify a sole relevant parent, if any. * If we have only one relevant parent, then TREESAME will be set purely * with regard to that parent, and we can simplify accordingly. * * If we have more than one relevant parent, or no relevant parents * (and multiple irrelevant ones), then we can't select a parent here * and return NULL. */ while (list) { struct commit *commit = list->item; list = list->next; if (relevant_commit(commit)) { if (relevant) return NULL; relevant = commit; } } return relevant; } /* * The goal is to get REV_TREE_NEW as the result only if the * diff consists of all '+' (and no other changes), REV_TREE_OLD * if the whole diff is removal of old data, and otherwise * REV_TREE_DIFFERENT (of course if the trees are the same we * want REV_TREE_SAME). * That means that once we get to REV_TREE_DIFFERENT, we do not * have to look any further. */ static int tree_difference = REV_TREE_SAME; static void file_add_remove(struct diff_options *options, int addremove, unsigned mode, const unsigned char *sha1, int sha1_valid, const char *fullpath, unsigned dirty_submodule) { int diff = addremove == '+' ? REV_TREE_NEW : REV_TREE_OLD; tree_difference |= diff; if (tree_difference == REV_TREE_DIFFERENT) DIFF_OPT_SET(options, HAS_CHANGES); } static void file_change(struct diff_options *options, unsigned old_mode, unsigned new_mode, const unsigned char *old_sha1, const unsigned char *new_sha1, int old_sha1_valid, int new_sha1_valid, const char *fullpath, unsigned old_dirty_submodule, unsigned new_dirty_submodule) { tree_difference = REV_TREE_DIFFERENT; DIFF_OPT_SET(options, HAS_CHANGES); } static int rev_compare_tree(struct rev_info *revs, struct commit *parent, struct commit *commit) { struct tree *t1 = parent->tree; struct tree *t2 = commit->tree; if (!t1) return REV_TREE_NEW; if (!t2) return REV_TREE_OLD; if (revs->simplify_by_decoration) { /* * If we are simplifying by decoration, then the commit * is worth showing if it has a tag pointing at it. */ if (get_name_decoration(&commit->object)) return REV_TREE_DIFFERENT; /* * A commit that is not pointed by a tag is uninteresting * if we are not limited by path. This means that you will * see the usual "commits that touch the paths" plus any * tagged commit by specifying both --simplify-by-decoration * and pathspec. */ if (!revs->prune_data.nr) return REV_TREE_SAME; } tree_difference = REV_TREE_SAME; DIFF_OPT_CLR(&revs->pruning, HAS_CHANGES); if (diff_tree_sha1(t1->object.oid.hash, t2->object.oid.hash, "", &revs->pruning) < 0) return REV_TREE_DIFFERENT; return tree_difference; } static int rev_same_tree_as_empty(struct rev_info *revs, struct commit *commit) { int retval; struct tree *t1 = commit->tree; if (!t1) return 0; tree_difference = REV_TREE_SAME; DIFF_OPT_CLR(&revs->pruning, HAS_CHANGES); retval = diff_tree_sha1(NULL, t1->object.oid.hash, "", &revs->pruning); return retval >= 0 && (tree_difference == REV_TREE_SAME); } struct treesame_state { unsigned int nparents; unsigned char treesame[FLEX_ARRAY]; }; static struct treesame_state *initialise_treesame(struct rev_info *revs, struct commit *commit) { unsigned n = commit_list_count(commit->parents); struct treesame_state *st = xcalloc(1, sizeof(*st) + n); st->nparents = n; add_decoration(&revs->treesame, &commit->object, st); return st; } /* * Must be called immediately after removing the nth_parent from a commit's * parent list, if we are maintaining the per-parent treesame[] decoration. * This does not recalculate the master TREESAME flag - update_treesame() * should be called to update it after a sequence of treesame[] modifications * that may have affected it. */ static int compact_treesame(struct rev_info *revs, struct commit *commit, unsigned nth_parent) { struct treesame_state *st; int old_same; if (!commit->parents) { /* * Have just removed the only parent from a non-merge. * Different handling, as we lack decoration. */ if (nth_parent != 0) die("compact_treesame %u", nth_parent); old_same = !!(commit->object.flags & TREESAME); if (rev_same_tree_as_empty(revs, commit)) commit->object.flags |= TREESAME; else commit->object.flags &= ~TREESAME; return old_same; } st = lookup_decoration(&revs->treesame, &commit->object); if (!st || nth_parent >= st->nparents) die("compact_treesame %u", nth_parent); old_same = st->treesame[nth_parent]; memmove(st->treesame + nth_parent, st->treesame + nth_parent + 1, st->nparents - nth_parent - 1); /* * If we've just become a non-merge commit, update TREESAME * immediately, and remove the no-longer-needed decoration. * If still a merge, defer update until update_treesame(). */ if (--st->nparents == 1) { if (commit->parents->next) die("compact_treesame parents mismatch"); if (st->treesame[0] && revs->dense) commit->object.flags |= TREESAME; else commit->object.flags &= ~TREESAME; free(add_decoration(&revs->treesame, &commit->object, NULL)); } return old_same; } static unsigned update_treesame(struct rev_info *revs, struct commit *commit) { if (commit->parents && commit->parents->next) { unsigned n; struct treesame_state *st; struct commit_list *p; unsigned relevant_parents; unsigned relevant_change, irrelevant_change; st = lookup_decoration(&revs->treesame, &commit->object); if (!st) die("update_treesame %s", oid_to_hex(&commit->object.oid)); relevant_parents = 0; relevant_change = irrelevant_change = 0; for (p = commit->parents, n = 0; p; n++, p = p->next) { if (relevant_commit(p->item)) { relevant_change |= !st->treesame[n]; relevant_parents++; } else irrelevant_change |= !st->treesame[n]; } if (relevant_parents ? relevant_change : irrelevant_change) commit->object.flags &= ~TREESAME; else commit->object.flags |= TREESAME; } return commit->object.flags & TREESAME; } static inline int limiting_can_increase_treesame(const struct rev_info *revs) { /* * TREESAME is irrelevant unless prune && dense; * if simplify_history is set, we can't have a mixture of TREESAME and * !TREESAME INTERESTING parents (and we don't have treesame[] * decoration anyway); * if first_parent_only is set, then the TREESAME flag is locked * against the first parent (and again we lack treesame[] decoration). */ return revs->prune && revs->dense && !revs->simplify_history && !revs->first_parent_only; } static void try_to_simplify_commit(struct rev_info *revs, struct commit *commit) { struct commit_list **pp, *parent; struct treesame_state *ts = NULL; int relevant_change = 0, irrelevant_change = 0; int relevant_parents, nth_parent; /* * If we don't do pruning, everything is interesting */ if (!revs->prune) return; if (!commit->tree) return; if (!commit->parents) { if (rev_same_tree_as_empty(revs, commit)) commit->object.flags |= TREESAME; return; } /* * Normal non-merge commit? If we don't want to make the * history dense, we consider it always to be a change.. */ if (!revs->dense && !commit->parents->next) return; for (pp = &commit->parents, nth_parent = 0, relevant_parents = 0; (parent = *pp) != NULL; pp = &parent->next, nth_parent++) { struct commit *p = parent->item; if (relevant_commit(p)) relevant_parents++; if (nth_parent == 1) { /* * This our second loop iteration - so we now know * we're dealing with a merge. * * Do not compare with later parents when we care only about * the first parent chain, in order to avoid derailing the * traversal to follow a side branch that brought everything * in the path we are limited to by the pathspec. */ if (revs->first_parent_only) break; /* * If this will remain a potentially-simplifiable * merge, remember per-parent treesame if needed. * Initialise the array with the comparison from our * first iteration. */ if (revs->treesame.name && !revs->simplify_history && !(commit->object.flags & UNINTERESTING)) { ts = initialise_treesame(revs, commit); if (!(irrelevant_change || relevant_change)) ts->treesame[0] = 1; } } if (parse_commit(p) < 0) die("cannot simplify commit %s (because of %s)", oid_to_hex(&commit->object.oid), oid_to_hex(&p->object.oid)); switch (rev_compare_tree(revs, p, commit)) { case REV_TREE_SAME: if (!revs->simplify_history || !relevant_commit(p)) { /* Even if a merge with an uninteresting * side branch brought the entire change * we are interested in, we do not want * to lose the other branches of this * merge, so we just keep going. */ if (ts) ts->treesame[nth_parent] = 1; continue; } parent->next = NULL; commit->parents = parent; commit->object.flags |= TREESAME; return; case REV_TREE_NEW: if (revs->remove_empty_trees && rev_same_tree_as_empty(revs, p)) { /* We are adding all the specified * paths from this parent, so the * history beyond this parent is not * interesting. Remove its parents * (they are grandparents for us). * IOW, we pretend this parent is a * "root" commit. */ if (parse_commit(p) < 0) die("cannot simplify commit %s (invalid %s)", oid_to_hex(&commit->object.oid), oid_to_hex(&p->object.oid)); p->parents = NULL; } /* fallthrough */ case REV_TREE_OLD: case REV_TREE_DIFFERENT: if (relevant_commit(p)) relevant_change = 1; else irrelevant_change = 1; continue; } die("bad tree compare for commit %s", oid_to_hex(&commit->object.oid)); } /* * TREESAME is straightforward for single-parent commits. For merge * commits, it is most useful to define it so that "irrelevant" * parents cannot make us !TREESAME - if we have any relevant * parents, then we only consider TREESAMEness with respect to them, * allowing irrelevant merges from uninteresting branches to be * simplified away. Only if we have only irrelevant parents do we * base TREESAME on them. Note that this logic is replicated in * update_treesame, which should be kept in sync. */ if (relevant_parents ? !relevant_change : !irrelevant_change) commit->object.flags |= TREESAME; } static void commit_list_insert_by_date_cached(struct commit *p, struct commit_list **head, struct commit_list *cached_base, struct commit_list **cache) { struct commit_list *new_entry; if (cached_base && p->date < cached_base->item->date) new_entry = commit_list_insert_by_date(p, &cached_base->next); else new_entry = commit_list_insert_by_date(p, head); if (cache && (!*cache || p->date < (*cache)->item->date)) *cache = new_entry; } static int add_parents_to_list(struct rev_info *revs, struct commit *commit, struct commit_list **list, struct commit_list **cache_ptr) { struct commit_list *parent = commit->parents; unsigned left_flag; struct commit_list *cached_base = cache_ptr ? *cache_ptr : NULL; if (commit->object.flags & ADDED) return 0; commit->object.flags |= ADDED; if (revs->include_check && !revs->include_check(commit, revs->include_check_data)) return 0; /* * If the commit is uninteresting, don't try to * prune parents - we want the maximal uninteresting * set. * * Normally we haven't parsed the parent * yet, so we won't have a parent of a parent * here. However, it may turn out that we've * reached this commit some other way (where it * wasn't uninteresting), in which case we need * to mark its parents recursively too.. */ if (commit->object.flags & UNINTERESTING) { while (parent) { struct commit *p = parent->item; parent = parent->next; if (p) p->object.flags |= UNINTERESTING; if (parse_commit_gently(p, 1) < 0) continue; if (p->parents) mark_parents_uninteresting(p); if (p->object.flags & SEEN) continue; p->object.flags |= SEEN; commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); } return 0; } /* * Ok, the commit wasn't uninteresting. Try to * simplify the commit history and find the parent * that has no differences in the path set if one exists. */ try_to_simplify_commit(revs, commit); if (revs->no_walk) return 0; left_flag = (commit->object.flags & SYMMETRIC_LEFT); for (parent = commit->parents; parent; parent = parent->next) { struct commit *p = parent->item; if (parse_commit_gently(p, revs->ignore_missing_links) < 0) return -1; if (revs->show_source && !p->util) p->util = commit->util; p->object.flags |= left_flag; if (!(p->object.flags & SEEN)) { p->object.flags |= SEEN; commit_list_insert_by_date_cached(p, list, cached_base, cache_ptr); } if (revs->first_parent_only) break; } return 0; } static void cherry_pick_list(struct commit_list *list, struct rev_info *revs) { struct commit_list *p; int left_count = 0, right_count = 0; int left_first; struct patch_ids ids; unsigned cherry_flag; /* First count the commits on the left and on the right */ for (p = list; p; p = p->next) { struct commit *commit = p->item; unsigned flags = commit->object.flags; if (flags & BOUNDARY) ; else if (flags & SYMMETRIC_LEFT) left_count++; else right_count++; } if (!left_count || !right_count) return; left_first = left_count < right_count; init_patch_ids(&ids); ids.diffopts.pathspec = revs->diffopt.pathspec; /* Compute patch-ids for one side */ for (p = list; p; p = p->next) { struct commit *commit = p->item; unsigned flags = commit->object.flags; if (flags & BOUNDARY) continue; /* * If we have fewer left, left_first is set and we omit * commits on the right branch in this loop. If we have * fewer right, we skip the left ones. */ if (left_first != !!(flags & SYMMETRIC_LEFT)) continue; commit->util = add_commit_patch_id(commit, &ids); } /* either cherry_mark or cherry_pick are true */ cherry_flag = revs->cherry_mark ? PATCHSAME : SHOWN; /* Check the other side */ for (p = list; p; p = p->next) { struct commit *commit = p->item; struct patch_id *id; unsigned flags = commit->object.flags; if (flags & BOUNDARY) continue; /* * If we have fewer left, left_first is set and we omit * commits on the left branch in this loop. */ if (left_first == !!(flags & SYMMETRIC_LEFT)) continue; /* * Have we seen the same patch id? */ id = has_commit_patch_id(commit, &ids); if (!id) continue; id->seen = 1; commit->object.flags |= cherry_flag; } /* Now check the original side for seen ones */ for (p = list; p; p = p->next) { struct commit *commit = p->item; struct patch_id *ent; ent = commit->util; if (!ent) continue; if (ent->seen) commit->object.flags |= cherry_flag; commit->util = NULL; } free_patch_ids(&ids); } /* How many extra uninteresting commits we want to see.. */ #define SLOP 5 static int still_interesting(struct commit_list *src, unsigned long date, int slop, struct commit **interesting_cache) { /* * No source list at all? We're definitely done.. */ if (!src) return 0; /* * Does the destination list contain entries with a date * before the source list? Definitely _not_ done. */ if (date <= src->item->date) return SLOP; /* * Does the source list still have interesting commits in * it? Definitely not done.. */ if (!everybody_uninteresting(src, interesting_cache)) return SLOP; /* Ok, we're closing in.. */ return slop-1; } /* * "rev-list --ancestry-path A..B" computes commits that are ancestors * of B but not ancestors of A but further limits the result to those * that are descendants of A. This takes the list of bottom commits and * the result of "A..B" without --ancestry-path, and limits the latter * further to the ones that can reach one of the commits in "bottom". */ static void limit_to_ancestry(struct commit_list *bottom, struct commit_list *list) { struct commit_list *p; struct commit_list *rlist = NULL; int made_progress; /* * Reverse the list so that it will be likely that we would * process parents before children. */ for (p = list; p; p = p->next) commit_list_insert(p->item, &rlist); for (p = bottom; p; p = p->next) p->item->object.flags |= TMP_MARK; /* * Mark the ones that can reach bottom commits in "list", * in a bottom-up fashion. */ do { made_progress = 0; for (p = rlist; p; p = p->next) { struct commit *c = p->item; struct commit_list *parents; if (c->object.flags & (TMP_MARK | UNINTERESTING)) continue; for (parents = c->parents; parents; parents = parents->next) { if (!(parents->item->object.flags & TMP_MARK)) continue; c->object.flags |= TMP_MARK; made_progress = 1; break; } } } while (made_progress); /* * NEEDSWORK: decide if we want to remove parents that are * not marked with TMP_MARK from commit->parents for commits * in the resulting list. We may not want to do that, though. */ /* * The ones that are not marked with TMP_MARK are uninteresting */ for (p = list; p; p = p->next) { struct commit *c = p->item; if (c->object.flags & TMP_MARK) continue; c->object.flags |= UNINTERESTING; } /* We are done with the TMP_MARK */ for (p = list; p; p = p->next) p->item->object.flags &= ~TMP_MARK; for (p = bottom; p; p = p->next) p->item->object.flags &= ~TMP_MARK; free_commit_list(rlist); } /* * Before walking the history, keep the set of "negative" refs the * caller has asked to exclude. * * This is used to compute "rev-list --ancestry-path A..B", as we need * to filter the result of "A..B" further to the ones that can actually * reach A. */ static struct commit_list *collect_bottom_commits(struct commit_list *list) { struct commit_list *elem, *bottom = NULL; for (elem = list; elem; elem = elem->next) if (elem->item->object.flags & BOTTOM) commit_list_insert(elem->item, &bottom); return bottom; } /* Assumes either left_only or right_only is set */ static void limit_left_right(struct commit_list *list, struct rev_info *revs) { struct commit_list *p; for (p = list; p; p = p->next) { struct commit *commit = p->item; if (revs->right_only) { if (commit->object.flags & SYMMETRIC_LEFT) commit->object.flags |= SHOWN; } else /* revs->left_only is set */ if (!(commit->object.flags & SYMMETRIC_LEFT)) commit->object.flags |= SHOWN; } } static int limit_list(struct rev_info *revs) { int slop = SLOP; unsigned long date = ~0ul; struct commit_list *list = revs->commits; struct commit_list *newlist = NULL; struct commit_list **p = &newlist; struct commit_list *bottom = NULL; struct commit *interesting_cache = NULL; if (revs->ancestry_path) { bottom = collect_bottom_commits(list); if (!bottom) die("--ancestry-path given but there are no bottom commits"); } while (list) { struct commit *commit = pop_commit(&list); struct object *obj = &commit->object; show_early_output_fn_t show; if (commit == interesting_cache) interesting_cache = NULL; if (revs->max_age != -1 && (commit->date < revs->max_age)) obj->flags |= UNINTERESTING; if (add_parents_to_list(revs, commit, &list, NULL) < 0) return -1; if (obj->flags & UNINTERESTING) { mark_parents_uninteresting(commit); if (revs->show_all) p = &commit_list_insert(commit, p)->next; slop = still_interesting(list, date, slop, &interesting_cache); if (slop) continue; /* If showing all, add the whole pending list to the end */ if (revs->show_all) *p = list; break; } if (revs->min_age != -1 && (commit->date > revs->min_age)) continue; date = commit->date; p = &commit_list_insert(commit, p)->next; show = show_early_output; if (!show) continue; show(revs, newlist); show_early_output = NULL; } if (revs->cherry_pick || revs->cherry_mark) cherry_pick_list(newlist, revs); if (revs->left_only || revs->right_only) limit_left_right(newlist, revs); if (bottom) { limit_to_ancestry(bottom, newlist); free_commit_list(bottom); } /* * Check if any commits have become TREESAME by some of their parents * becoming UNINTERESTING. */ if (limiting_can_increase_treesame(revs)) for (list = newlist; list; list = list->next) { struct commit *c = list->item; if (c->object.flags & (UNINTERESTING | TREESAME)) continue; update_treesame(revs, c); } revs->commits = newlist; return 0; } /* * Add an entry to refs->cmdline with the specified information. * *name is copied. */ static void add_rev_cmdline(struct rev_info *revs, struct object *item, const char *name, int whence, unsigned flags) { struct rev_cmdline_info *info = &revs->cmdline; int nr = info->nr; ALLOC_GROW(info->rev, nr + 1, info->alloc); info->rev[nr].item = item; info->rev[nr].name = xstrdup(name); info->rev[nr].whence = whence; info->rev[nr].flags = flags; info->nr++; } static void add_rev_cmdline_list(struct rev_info *revs, struct commit_list *commit_list, int whence, unsigned flags) { while (commit_list) { struct object *object = &commit_list->item->object; add_rev_cmdline(revs, object, oid_to_hex(&object->oid), whence, flags); commit_list = commit_list->next; } } struct all_refs_cb { int all_flags; int warned_bad_reflog; struct rev_info *all_revs; const char *name_for_errormsg; }; int ref_excluded(struct string_list *ref_excludes, const char *path) { struct string_list_item *item; if (!ref_excludes) return 0; for_each_string_list_item(item, ref_excludes) { if (!wildmatch(item->string, path, 0, NULL)) return 1; } return 0; } static int handle_one_ref(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct all_refs_cb *cb = cb_data; struct object *object; if (ref_excluded(cb->all_revs->ref_excludes, path)) return 0; object = get_reference(cb->all_revs, path, oid->hash, cb->all_flags); add_rev_cmdline(cb->all_revs, object, path, REV_CMD_REF, cb->all_flags); add_pending_sha1(cb->all_revs, path, oid->hash, cb->all_flags); return 0; } static void init_all_refs_cb(struct all_refs_cb *cb, struct rev_info *revs, unsigned flags) { cb->all_revs = revs; cb->all_flags = flags; } void clear_ref_exclusion(struct string_list **ref_excludes_p) { if (*ref_excludes_p) { string_list_clear(*ref_excludes_p, 0); free(*ref_excludes_p); } *ref_excludes_p = NULL; } void add_ref_exclusion(struct string_list **ref_excludes_p, const char *exclude) { if (!*ref_excludes_p) { *ref_excludes_p = xcalloc(1, sizeof(**ref_excludes_p)); (*ref_excludes_p)->strdup_strings = 1; } string_list_append(*ref_excludes_p, exclude); } static void handle_refs(const char *submodule, struct rev_info *revs, unsigned flags, int (*for_each)(const char *, each_ref_fn, void *)) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, flags); for_each(submodule, handle_one_ref, &cb); } static void handle_one_reflog_commit(unsigned char *sha1, void *cb_data) { struct all_refs_cb *cb = cb_data; if (!is_null_sha1(sha1)) { struct object *o = parse_object(sha1); if (o) { o->flags |= cb->all_flags; /* ??? CMDLINEFLAGS ??? */ add_pending_object(cb->all_revs, o, ""); } else if (!cb->warned_bad_reflog) { warning("reflog of '%s' references pruned commits", cb->name_for_errormsg); cb->warned_bad_reflog = 1; } } } static int handle_one_reflog_ent(unsigned char *osha1, unsigned char *nsha1, const char *email, unsigned long timestamp, int tz, const char *message, void *cb_data) { handle_one_reflog_commit(osha1, cb_data); handle_one_reflog_commit(nsha1, cb_data); return 0; } static int handle_one_reflog(const char *path, const struct object_id *oid, int flag, void *cb_data) { struct all_refs_cb *cb = cb_data; cb->warned_bad_reflog = 0; cb->name_for_errormsg = path; for_each_reflog_ent(path, handle_one_reflog_ent, cb_data); return 0; } void add_reflogs_to_pending(struct rev_info *revs, unsigned flags) { struct all_refs_cb cb; cb.all_revs = revs; cb.all_flags = flags; for_each_reflog(handle_one_reflog, &cb); } static void add_cache_tree(struct cache_tree *it, struct rev_info *revs, struct strbuf *path) { size_t baselen = path->len; int i; if (it->entry_count >= 0) { struct tree *tree = lookup_tree(it->sha1); add_pending_object_with_path(revs, &tree->object, "", 040000, path->buf); } for (i = 0; i < it->subtree_nr; i++) { struct cache_tree_sub *sub = it->down[i]; strbuf_addf(path, "%s%s", baselen ? "/" : "", sub->name); add_cache_tree(sub->cache_tree, revs, path); strbuf_setlen(path, baselen); } } void add_index_objects_to_pending(struct rev_info *revs, unsigned flags) { int i; read_cache(); for (i = 0; i < active_nr; i++) { struct cache_entry *ce = active_cache[i]; struct blob *blob; if (S_ISGITLINK(ce->ce_mode)) continue; blob = lookup_blob(ce->sha1); if (!blob) die("unable to add index blob to traversal"); add_pending_object_with_path(revs, &blob->object, "", ce->ce_mode, ce->name); } if (active_cache_tree) { struct strbuf path = STRBUF_INIT; add_cache_tree(active_cache_tree, revs, &path); strbuf_release(&path); } } static int add_parents_only(struct rev_info *revs, const char *arg_, int flags) { unsigned char sha1[20]; struct object *it; struct commit *commit; struct commit_list *parents; const char *arg = arg_; if (*arg == '^') { flags ^= UNINTERESTING | BOTTOM; arg++; } if (get_sha1_committish(arg, sha1)) return 0; while (1) { it = get_reference(revs, arg, sha1, 0); if (!it && revs->ignore_missing) return 0; if (it->type != OBJ_TAG) break; if (!((struct tag*)it)->tagged) return 0; hashcpy(sha1, ((struct tag*)it)->tagged->oid.hash); } if (it->type != OBJ_COMMIT) return 0; commit = (struct commit *)it; for (parents = commit->parents; parents; parents = parents->next) { it = &parents->item->object; it->flags |= flags; add_rev_cmdline(revs, it, arg_, REV_CMD_PARENTS_ONLY, flags); add_pending_object(revs, it, arg); } return 1; } void init_revisions(struct rev_info *revs, const char *prefix) { memset(revs, 0, sizeof(*revs)); revs->abbrev = DEFAULT_ABBREV; revs->ignore_merges = 1; revs->simplify_history = 1; DIFF_OPT_SET(&revs->pruning, RECURSIVE); DIFF_OPT_SET(&revs->pruning, QUICK); revs->pruning.add_remove = file_add_remove; revs->pruning.change = file_change; revs->sort_order = REV_SORT_IN_GRAPH_ORDER; revs->dense = 1; revs->prefix = prefix; revs->max_age = -1; revs->min_age = -1; revs->skip_count = -1; revs->max_count = -1; revs->max_parents = -1; revs->commit_format = CMIT_FMT_DEFAULT; init_grep_defaults(); grep_init(&revs->grep_filter, prefix); revs->grep_filter.status_only = 1; revs->grep_filter.regflags = REG_NEWLINE; diff_setup(&revs->diffopt); if (prefix && !revs->diffopt.prefix) { revs->diffopt.prefix = prefix; revs->diffopt.prefix_length = strlen(prefix); } revs->notes_opt.use_default_notes = -1; } static void add_pending_commit_list(struct rev_info *revs, struct commit_list *commit_list, unsigned int flags) { while (commit_list) { struct object *object = &commit_list->item->object; object->flags |= flags; add_pending_object(revs, object, oid_to_hex(&object->oid)); commit_list = commit_list->next; } } static void prepare_show_merge(struct rev_info *revs) { struct commit_list *bases; struct commit *head, *other; unsigned char sha1[20]; const char **prune = NULL; int i, prune_num = 1; /* counting terminating NULL */ if (get_sha1("HEAD", sha1)) die("--merge without HEAD?"); head = lookup_commit_or_die(sha1, "HEAD"); if (get_sha1("MERGE_HEAD", sha1)) die("--merge without MERGE_HEAD?"); other = lookup_commit_or_die(sha1, "MERGE_HEAD"); add_pending_object(revs, &head->object, "HEAD"); add_pending_object(revs, &other->object, "MERGE_HEAD"); bases = get_merge_bases(head, other); add_rev_cmdline_list(revs, bases, REV_CMD_MERGE_BASE, UNINTERESTING | BOTTOM); add_pending_commit_list(revs, bases, UNINTERESTING | BOTTOM); free_commit_list(bases); head->object.flags |= SYMMETRIC_LEFT; if (!active_nr) read_cache(); for (i = 0; i < active_nr; i++) { const struct cache_entry *ce = active_cache[i]; if (!ce_stage(ce)) continue; if (ce_path_match(ce, &revs->prune_data, NULL)) { prune_num++; REALLOC_ARRAY(prune, prune_num); prune[prune_num-2] = ce->name; prune[prune_num-1] = NULL; } while ((i+1 < active_nr) && ce_same_name(ce, active_cache[i+1])) i++; } free_pathspec(&revs->prune_data); parse_pathspec(&revs->prune_data, PATHSPEC_ALL_MAGIC & ~PATHSPEC_LITERAL, PATHSPEC_PREFER_FULL | PATHSPEC_LITERAL_PATH, "", prune); revs->limited = 1; } int handle_revision_arg(const char *arg_, struct rev_info *revs, int flags, unsigned revarg_opt) { struct object_context oc; char *dotdot; struct object *object; unsigned char sha1[20]; int local_flags; const char *arg = arg_; int cant_be_filename = revarg_opt & REVARG_CANNOT_BE_FILENAME; unsigned get_sha1_flags = 0; flags = flags & UNINTERESTING ? flags | BOTTOM : flags & ~BOTTOM; dotdot = strstr(arg, ".."); if (dotdot) { unsigned char from_sha1[20]; const char *next = dotdot + 2; const char *this = arg; int symmetric = *next == '.'; unsigned int flags_exclude = flags ^ (UNINTERESTING | BOTTOM); static const char head_by_default[] = "HEAD"; unsigned int a_flags; *dotdot = 0; next += symmetric; if (!*next) next = head_by_default; if (dotdot == arg) this = head_by_default; if (this == head_by_default && next == head_by_default && !symmetric) { /* * Just ".."? That is not a range but the * pathspec for the parent directory. */ if (!cant_be_filename) { *dotdot = '.'; return -1; } } if (!get_sha1_committish(this, from_sha1) && !get_sha1_committish(next, sha1)) { struct object *a_obj, *b_obj; if (!cant_be_filename) { *dotdot = '.'; verify_non_filename(revs->prefix, arg); } a_obj = parse_object(from_sha1); b_obj = parse_object(sha1); if (!a_obj || !b_obj) { missing: if (revs->ignore_missing) return 0; die(symmetric ? "Invalid symmetric difference expression %s" : "Invalid revision range %s", arg); } if (!symmetric) { /* just A..B */ a_flags = flags_exclude; } else { /* A...B -- find merge bases between the two */ struct commit *a, *b; struct commit_list *exclude; a = (a_obj->type == OBJ_COMMIT ? (struct commit *)a_obj : lookup_commit_reference(a_obj->oid.hash)); b = (b_obj->type == OBJ_COMMIT ? (struct commit *)b_obj : lookup_commit_reference(b_obj->oid.hash)); if (!a || !b) goto missing; exclude = get_merge_bases(a, b); add_rev_cmdline_list(revs, exclude, REV_CMD_MERGE_BASE, flags_exclude); add_pending_commit_list(revs, exclude, flags_exclude); free_commit_list(exclude); a_flags = flags | SYMMETRIC_LEFT; } a_obj->flags |= a_flags; b_obj->flags |= flags; add_rev_cmdline(revs, a_obj, this, REV_CMD_LEFT, a_flags); add_rev_cmdline(revs, b_obj, next, REV_CMD_RIGHT, flags); add_pending_object(revs, a_obj, this); add_pending_object(revs, b_obj, next); return 0; } *dotdot = '.'; } dotdot = strstr(arg, "^@"); if (dotdot && !dotdot[2]) { *dotdot = 0; if (add_parents_only(revs, arg, flags)) return 0; *dotdot = '^'; } dotdot = strstr(arg, "^!"); if (dotdot && !dotdot[2]) { *dotdot = 0; if (!add_parents_only(revs, arg, flags ^ (UNINTERESTING | BOTTOM))) *dotdot = '^'; } local_flags = 0; if (*arg == '^') { local_flags = UNINTERESTING | BOTTOM; arg++; } if (revarg_opt & REVARG_COMMITTISH) get_sha1_flags = GET_SHA1_COMMITTISH; if (get_sha1_with_context(arg, get_sha1_flags, sha1, &oc)) return revs->ignore_missing ? 0 : -1; if (!cant_be_filename) verify_non_filename(revs->prefix, arg); object = get_reference(revs, arg, sha1, flags ^ local_flags); add_rev_cmdline(revs, object, arg_, REV_CMD_REV, flags ^ local_flags); add_pending_object_with_mode(revs, object, arg, oc.mode); return 0; } struct cmdline_pathspec { int alloc; int nr; const char **path; }; static void append_prune_data(struct cmdline_pathspec *prune, const char **av) { while (*av) { ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc); prune->path[prune->nr++] = *(av++); } } static void read_pathspec_from_stdin(struct rev_info *revs, struct strbuf *sb, struct cmdline_pathspec *prune) { while (strbuf_getline(sb, stdin) != EOF) { ALLOC_GROW(prune->path, prune->nr + 1, prune->alloc); prune->path[prune->nr++] = xstrdup(sb->buf); } } static void read_revisions_from_stdin(struct rev_info *revs, struct cmdline_pathspec *prune) { struct strbuf sb; int seen_dashdash = 0; int save_warning; save_warning = warn_on_object_refname_ambiguity; warn_on_object_refname_ambiguity = 0; strbuf_init(&sb, 1000); while (strbuf_getline(&sb, stdin) != EOF) { int len = sb.len; if (!len) break; if (sb.buf[0] == '-') { if (len == 2 && sb.buf[1] == '-') { seen_dashdash = 1; break; } die("options not supported in --stdin mode"); } if (handle_revision_arg(sb.buf, revs, 0, REVARG_CANNOT_BE_FILENAME)) die("bad revision '%s'", sb.buf); } if (seen_dashdash) read_pathspec_from_stdin(revs, &sb, prune); strbuf_release(&sb); warn_on_object_refname_ambiguity = save_warning; } static void add_grep(struct rev_info *revs, const char *ptn, enum grep_pat_token what) { append_grep_pattern(&revs->grep_filter, ptn, "command line", 0, what); } static void add_header_grep(struct rev_info *revs, enum grep_header_field field, const char *pattern) { append_header_grep_pattern(&revs->grep_filter, field, pattern); } static void add_message_grep(struct rev_info *revs, const char *pattern) { add_grep(revs, pattern, GREP_PATTERN_BODY); } static int handle_revision_opt(struct rev_info *revs, int argc, const char **argv, int *unkc, const char **unkv) { const char *arg = argv[0]; const char *optarg; int argcount; /* pseudo revision arguments */ if (!strcmp(arg, "--all") || !strcmp(arg, "--branches") || !strcmp(arg, "--tags") || !strcmp(arg, "--remotes") || !strcmp(arg, "--reflog") || !strcmp(arg, "--not") || !strcmp(arg, "--no-walk") || !strcmp(arg, "--do-walk") || !strcmp(arg, "--bisect") || starts_with(arg, "--glob=") || !strcmp(arg, "--indexed-objects") || starts_with(arg, "--exclude=") || starts_with(arg, "--branches=") || starts_with(arg, "--tags=") || starts_with(arg, "--remotes=") || starts_with(arg, "--no-walk=")) { unkv[(*unkc)++] = arg; return 1; } if ((argcount = parse_long_opt("max-count", argv, &optarg))) { revs->max_count = atoi(optarg); revs->no_walk = 0; return argcount; } else if ((argcount = parse_long_opt("skip", argv, &optarg))) { revs->skip_count = atoi(optarg); return argcount; } else if ((*arg == '-') && isdigit(arg[1])) { /* accept -<digit>, like traditional "head" */ if (strtol_i(arg + 1, 10, &revs->max_count) < 0 || revs->max_count < 0) die("'%s': not a non-negative integer", arg + 1); revs->no_walk = 0; } else if (!strcmp(arg, "-n")) { if (argc <= 1) return error("-n requires an argument"); revs->max_count = atoi(argv[1]); revs->no_walk = 0; return 2; } else if (starts_with(arg, "-n")) { revs->max_count = atoi(arg + 2); revs->no_walk = 0; } else if ((argcount = parse_long_opt("max-age", argv, &optarg))) { revs->max_age = atoi(optarg); return argcount; } else if ((argcount = parse_long_opt("since", argv, &optarg))) { revs->max_age = approxidate(optarg); return argcount; } else if ((argcount = parse_long_opt("after", argv, &optarg))) { revs->max_age = approxidate(optarg); return argcount; } else if ((argcount = parse_long_opt("min-age", argv, &optarg))) { revs->min_age = atoi(optarg); return argcount; } else if ((argcount = parse_long_opt("before", argv, &optarg))) { revs->min_age = approxidate(optarg); return argcount; } else if ((argcount = parse_long_opt("until", argv, &optarg))) { revs->min_age = approxidate(optarg); return argcount; } else if (!strcmp(arg, "--first-parent")) { revs->first_parent_only = 1; } else if (!strcmp(arg, "--ancestry-path")) { revs->ancestry_path = 1; revs->simplify_history = 0; revs->limited = 1; } else if (!strcmp(arg, "-g") || !strcmp(arg, "--walk-reflogs")) { init_reflog_walk(&revs->reflog_info); } else if (!strcmp(arg, "--default")) { if (argc <= 1) return error("bad --default argument"); revs->def = argv[1]; return 2; } else if (!strcmp(arg, "--merge")) { revs->show_merge = 1; } else if (!strcmp(arg, "--topo-order")) { revs->sort_order = REV_SORT_IN_GRAPH_ORDER; revs->topo_order = 1; } else if (!strcmp(arg, "--simplify-merges")) { revs->simplify_merges = 1; revs->topo_order = 1; revs->rewrite_parents = 1; revs->simplify_history = 0; revs->limited = 1; } else if (!strcmp(arg, "--simplify-by-decoration")) { revs->simplify_merges = 1; revs->topo_order = 1; revs->rewrite_parents = 1; revs->simplify_history = 0; revs->simplify_by_decoration = 1; revs->limited = 1; revs->prune = 1; load_ref_decorations(DECORATE_SHORT_REFS); } else if (!strcmp(arg, "--date-order")) { revs->sort_order = REV_SORT_BY_COMMIT_DATE; revs->topo_order = 1; } else if (!strcmp(arg, "--author-date-order")) { revs->sort_order = REV_SORT_BY_AUTHOR_DATE; revs->topo_order = 1; } else if (starts_with(arg, "--early-output")) { int count = 100; switch (arg[14]) { case '=': count = atoi(arg+15); /* Fallthrough */ case 0: revs->topo_order = 1; revs->early_output = count; } } else if (!strcmp(arg, "--parents")) { revs->rewrite_parents = 1; revs->print_parents = 1; } else if (!strcmp(arg, "--dense")) { revs->dense = 1; } else if (!strcmp(arg, "--sparse")) { revs->dense = 0; } else if (!strcmp(arg, "--show-all")) { revs->show_all = 1; } else if (!strcmp(arg, "--remove-empty")) { revs->remove_empty_trees = 1; } else if (!strcmp(arg, "--merges")) { revs->min_parents = 2; } else if (!strcmp(arg, "--no-merges")) { revs->max_parents = 1; } else if (starts_with(arg, "--min-parents=")) { revs->min_parents = atoi(arg+14); } else if (starts_with(arg, "--no-min-parents")) { revs->min_parents = 0; } else if (starts_with(arg, "--max-parents=")) { revs->max_parents = atoi(arg+14); } else if (starts_with(arg, "--no-max-parents")) { revs->max_parents = -1; } else if (!strcmp(arg, "--boundary")) { revs->boundary = 1; } else if (!strcmp(arg, "--left-right")) { revs->left_right = 1; } else if (!strcmp(arg, "--left-only")) { if (revs->right_only) die("--left-only is incompatible with --right-only" " or --cherry"); revs->left_only = 1; } else if (!strcmp(arg, "--right-only")) { if (revs->left_only) die("--right-only is incompatible with --left-only"); revs->right_only = 1; } else if (!strcmp(arg, "--cherry")) { if (revs->left_only) die("--cherry is incompatible with --left-only"); revs->cherry_mark = 1; revs->right_only = 1; revs->max_parents = 1; revs->limited = 1; } else if (!strcmp(arg, "--count")) { revs->count = 1; } else if (!strcmp(arg, "--cherry-mark")) { if (revs->cherry_pick) die("--cherry-mark is incompatible with --cherry-pick"); revs->cherry_mark = 1; revs->limited = 1; /* needs limit_list() */ } else if (!strcmp(arg, "--cherry-pick")) { if (revs->cherry_mark) die("--cherry-pick is incompatible with --cherry-mark"); revs->cherry_pick = 1; revs->limited = 1; } else if (!strcmp(arg, "--objects")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; } else if (!strcmp(arg, "--objects-edge")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; revs->edge_hint = 1; } else if (!strcmp(arg, "--objects-edge-aggressive")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; revs->edge_hint = 1; revs->edge_hint_aggressive = 1; } else if (!strcmp(arg, "--verify-objects")) { revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; revs->verify_objects = 1; } else if (!strcmp(arg, "--unpacked")) { revs->unpacked = 1; } else if (starts_with(arg, "--unpacked=")) { die("--unpacked=<packfile> no longer supported."); } else if (!strcmp(arg, "-r")) { revs->diff = 1; DIFF_OPT_SET(&revs->diffopt, RECURSIVE); } else if (!strcmp(arg, "-t")) { revs->diff = 1; DIFF_OPT_SET(&revs->diffopt, RECURSIVE); DIFF_OPT_SET(&revs->diffopt, TREE_IN_RECURSIVE); } else if (!strcmp(arg, "-m")) { revs->ignore_merges = 0; } else if (!strcmp(arg, "-c")) { revs->diff = 1; revs->dense_combined_merges = 0; revs->combine_merges = 1; } else if (!strcmp(arg, "--cc")) { revs->diff = 1; revs->dense_combined_merges = 1; revs->combine_merges = 1; } else if (!strcmp(arg, "-v")) { revs->verbose_header = 1; } else if (!strcmp(arg, "--pretty")) { revs->verbose_header = 1; revs->pretty_given = 1; get_commit_format(NULL, revs); } else if (starts_with(arg, "--pretty=") || starts_with(arg, "--format=")) { /* * Detached form ("--pretty X" as opposed to "--pretty=X") * not allowed, since the argument is optional. */ revs->verbose_header = 1; revs->pretty_given = 1; get_commit_format(arg+9, revs); } else if (!strcmp(arg, "--show-notes") || !strcmp(arg, "--notes")) { revs->show_notes = 1; revs->show_notes_given = 1; revs->notes_opt.use_default_notes = 1; } else if (!strcmp(arg, "--show-signature")) { revs->show_signature = 1; } else if (!strcmp(arg, "--show-linear-break") || starts_with(arg, "--show-linear-break=")) { if (starts_with(arg, "--show-linear-break=")) revs->break_bar = xstrdup(arg + 20); else revs->break_bar = " .........."; revs->track_linear = 1; revs->track_first_time = 1; } else if (starts_with(arg, "--show-notes=") || starts_with(arg, "--notes=")) { struct strbuf buf = STRBUF_INIT; revs->show_notes = 1; revs->show_notes_given = 1; if (starts_with(arg, "--show-notes")) { if (revs->notes_opt.use_default_notes < 0) revs->notes_opt.use_default_notes = 1; strbuf_addstr(&buf, arg+13); } else strbuf_addstr(&buf, arg+8); expand_notes_ref(&buf); string_list_append(&revs->notes_opt.extra_notes_refs, strbuf_detach(&buf, NULL)); } else if (!strcmp(arg, "--no-notes")) { revs->show_notes = 0; revs->show_notes_given = 1; revs->notes_opt.use_default_notes = -1; /* we have been strdup'ing ourselves, so trick * string_list into free()ing strings */ revs->notes_opt.extra_notes_refs.strdup_strings = 1; string_list_clear(&revs->notes_opt.extra_notes_refs, 0); revs->notes_opt.extra_notes_refs.strdup_strings = 0; } else if (!strcmp(arg, "--standard-notes")) { revs->show_notes_given = 1; revs->notes_opt.use_default_notes = 1; } else if (!strcmp(arg, "--no-standard-notes")) { revs->notes_opt.use_default_notes = 0; } else if (!strcmp(arg, "--oneline")) { revs->verbose_header = 1; get_commit_format("oneline", revs); revs->pretty_given = 1; revs->abbrev_commit = 1; } else if (!strcmp(arg, "--graph")) { revs->topo_order = 1; revs->rewrite_parents = 1; revs->graph = graph_init(revs); } else if (!strcmp(arg, "--root")) { revs->show_root_diff = 1; } else if (!strcmp(arg, "--no-commit-id")) { revs->no_commit_id = 1; } else if (!strcmp(arg, "--always")) { revs->always_show_header = 1; } else if (!strcmp(arg, "--no-abbrev")) { revs->abbrev = 0; } else if (!strcmp(arg, "--abbrev")) { revs->abbrev = DEFAULT_ABBREV; } else if (starts_with(arg, "--abbrev=")) { revs->abbrev = strtoul(arg + 9, NULL, 10); if (revs->abbrev < MINIMUM_ABBREV) revs->abbrev = MINIMUM_ABBREV; else if (revs->abbrev > 40) revs->abbrev = 40; } else if (!strcmp(arg, "--abbrev-commit")) { revs->abbrev_commit = 1; revs->abbrev_commit_given = 1; } else if (!strcmp(arg, "--no-abbrev-commit")) { revs->abbrev_commit = 0; } else if (!strcmp(arg, "--full-diff")) { revs->diff = 1; revs->full_diff = 1; } else if (!strcmp(arg, "--full-history")) { revs->simplify_history = 0; } else if (!strcmp(arg, "--relative-date")) { revs->date_mode.type = DATE_RELATIVE; revs->date_mode_explicit = 1; } else if ((argcount = parse_long_opt("date", argv, &optarg))) { parse_date_format(optarg, &revs->date_mode); revs->date_mode_explicit = 1; return argcount; } else if (!strcmp(arg, "--log-size")) { revs->show_log_size = 1; } /* * Grepping the commit log */ else if ((argcount = parse_long_opt("author", argv, &optarg))) { add_header_grep(revs, GREP_HEADER_AUTHOR, optarg); return argcount; } else if ((argcount = parse_long_opt("committer", argv, &optarg))) { add_header_grep(revs, GREP_HEADER_COMMITTER, optarg); return argcount; } else if ((argcount = parse_long_opt("grep-reflog", argv, &optarg))) { add_header_grep(revs, GREP_HEADER_REFLOG, optarg); return argcount; } else if ((argcount = parse_long_opt("grep", argv, &optarg))) { add_message_grep(revs, optarg); return argcount; } else if (!strcmp(arg, "--grep-debug")) { revs->grep_filter.debug = 1; } else if (!strcmp(arg, "--basic-regexp")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_BRE, &revs->grep_filter); } else if (!strcmp(arg, "--extended-regexp") || !strcmp(arg, "-E")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_ERE, &revs->grep_filter); } else if (!strcmp(arg, "--regexp-ignore-case") || !strcmp(arg, "-i")) { revs->grep_filter.regflags |= REG_ICASE; DIFF_OPT_SET(&revs->diffopt, PICKAXE_IGNORE_CASE); } else if (!strcmp(arg, "--fixed-strings") || !strcmp(arg, "-F")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_FIXED, &revs->grep_filter); } else if (!strcmp(arg, "--perl-regexp")) { grep_set_pattern_type_option(GREP_PATTERN_TYPE_PCRE, &revs->grep_filter); } else if (!strcmp(arg, "--all-match")) { revs->grep_filter.all_match = 1; } else if (!strcmp(arg, "--invert-grep")) { revs->invert_grep = 1; } else if ((argcount = parse_long_opt("encoding", argv, &optarg))) { if (strcmp(optarg, "none")) git_log_output_encoding = xstrdup(optarg); else git_log_output_encoding = ""; return argcount; } else if (!strcmp(arg, "--reverse")) { revs->reverse ^= 1; } else if (!strcmp(arg, "--children")) { revs->children.name = "children"; revs->limited = 1; } else if (!strcmp(arg, "--ignore-missing")) { revs->ignore_missing = 1; } else { int opts = diff_opt_parse(&revs->diffopt, argv, argc, revs->prefix); if (!opts) unkv[(*unkc)++] = arg; return opts; } if (revs->graph && revs->track_linear) die("--show-linear-break and --graph are incompatible"); return 1; } void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx, const struct option *options, const char * const usagestr[]) { int n = handle_revision_opt(revs, ctx->argc, ctx->argv, &ctx->cpidx, ctx->out); if (n <= 0) { error("unknown option `%s'", ctx->argv[0]); usage_with_options(usagestr, options); } ctx->argv += n; ctx->argc -= n; } static int for_each_bisect_ref(const char *submodule, each_ref_fn fn, void *cb_data, const char *term) { struct strbuf bisect_refs = STRBUF_INIT; int status; strbuf_addf(&bisect_refs, "refs/bisect/%s", term); status = for_each_ref_in_submodule(submodule, bisect_refs.buf, fn, cb_data); strbuf_release(&bisect_refs); return status; } static int for_each_bad_bisect_ref(const char *submodule, each_ref_fn fn, void *cb_data) { return for_each_bisect_ref(submodule, fn, cb_data, term_bad); } static int for_each_good_bisect_ref(const char *submodule, each_ref_fn fn, void *cb_data) { return for_each_bisect_ref(submodule, fn, cb_data, term_good); } static int handle_revision_pseudo_opt(const char *submodule, struct rev_info *revs, int argc, const char **argv, int *flags) { const char *arg = argv[0]; const char *optarg; int argcount; /* * NOTE! * * Commands like "git shortlog" will not accept the options below * unless parse_revision_opt queues them (as opposed to erroring * out). * * When implementing your new pseudo-option, remember to * register it in the list at the top of handle_revision_opt. */ if (!strcmp(arg, "--all")) { handle_refs(submodule, revs, *flags, for_each_ref_submodule); handle_refs(submodule, revs, *flags, head_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--branches")) { handle_refs(submodule, revs, *flags, for_each_branch_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--bisect")) { read_bisect_terms(&term_bad, &term_good); handle_refs(submodule, revs, *flags, for_each_bad_bisect_ref); handle_refs(submodule, revs, *flags ^ (UNINTERESTING | BOTTOM), for_each_good_bisect_ref); revs->bisect = 1; } else if (!strcmp(arg, "--tags")) { handle_refs(submodule, revs, *flags, for_each_tag_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--remotes")) { handle_refs(submodule, revs, *flags, for_each_remote_ref_submodule); clear_ref_exclusion(&revs->ref_excludes); } else if ((argcount = parse_long_opt("glob", argv, &optarg))) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref(handle_one_ref, optarg, &cb); clear_ref_exclusion(&revs->ref_excludes); return argcount; } else if ((argcount = parse_long_opt("exclude", argv, &optarg))) { add_ref_exclusion(&revs->ref_excludes, optarg); return argcount; } else if (starts_with(arg, "--branches=")) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref_in(handle_one_ref, arg + 11, "refs/heads/", &cb); clear_ref_exclusion(&revs->ref_excludes); } else if (starts_with(arg, "--tags=")) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref_in(handle_one_ref, arg + 7, "refs/tags/", &cb); clear_ref_exclusion(&revs->ref_excludes); } else if (starts_with(arg, "--remotes=")) { struct all_refs_cb cb; init_all_refs_cb(&cb, revs, *flags); for_each_glob_ref_in(handle_one_ref, arg + 10, "refs/remotes/", &cb); clear_ref_exclusion(&revs->ref_excludes); } else if (!strcmp(arg, "--reflog")) { add_reflogs_to_pending(revs, *flags); } else if (!strcmp(arg, "--indexed-objects")) { add_index_objects_to_pending(revs, *flags); } else if (!strcmp(arg, "--not")) { *flags ^= UNINTERESTING | BOTTOM; } else if (!strcmp(arg, "--no-walk")) { revs->no_walk = REVISION_WALK_NO_WALK_SORTED; } else if (starts_with(arg, "--no-walk=")) { /* * Detached form ("--no-walk X" as opposed to "--no-walk=X") * not allowed, since the argument is optional. */ if (!strcmp(arg + 10, "sorted")) revs->no_walk = REVISION_WALK_NO_WALK_SORTED; else if (!strcmp(arg + 10, "unsorted")) revs->no_walk = REVISION_WALK_NO_WALK_UNSORTED; else return error("invalid argument to --no-walk"); } else if (!strcmp(arg, "--do-walk")) { revs->no_walk = 0; } else { return 0; } return 1; } static void NORETURN diagnose_missing_default(const char *def) { unsigned char sha1[20]; int flags; const char *refname; refname = resolve_ref_unsafe(def, 0, sha1, &flags); if (!refname || !(flags & REF_ISSYMREF) || (flags & REF_ISBROKEN)) die(_("your current branch appears to be broken")); skip_prefix(refname, "refs/heads/", &refname); die(_("your current branch '%s' does not have any commits yet"), refname); } /* * Parse revision information, filling in the "rev_info" structure, * and removing the used arguments from the argument list. * * Returns the number of arguments left that weren't recognized * (which are also moved to the head of the argument list) */ int setup_revisions(int argc, const char **argv, struct rev_info *revs, struct setup_revision_opt *opt) { int i, flags, left, seen_dashdash, read_from_stdin, got_rev_arg = 0, revarg_opt; struct cmdline_pathspec prune_data; const char *submodule = NULL; memset(&prune_data, 0, sizeof(prune_data)); if (opt) submodule = opt->submodule; /* First, search for "--" */ if (opt && opt->assume_dashdash) { seen_dashdash = 1; } else { seen_dashdash = 0; for (i = 1; i < argc; i++) { const char *arg = argv[i]; if (strcmp(arg, "--")) continue; argv[i] = NULL; argc = i; if (argv[i + 1]) append_prune_data(&prune_data, argv + i + 1); seen_dashdash = 1; break; } } /* Second, deal with arguments and options */ flags = 0; revarg_opt = opt ? opt->revarg_opt : 0; if (seen_dashdash) revarg_opt |= REVARG_CANNOT_BE_FILENAME; read_from_stdin = 0; for (left = i = 1; i < argc; i++) { const char *arg = argv[i]; if (*arg == '-') { int opts; opts = handle_revision_pseudo_opt(submodule, revs, argc - i, argv + i, &flags); if (opts > 0) { i += opts - 1; continue; } if (!strcmp(arg, "--stdin")) { if (revs->disable_stdin) { argv[left++] = arg; continue; } if (read_from_stdin++) die("--stdin given twice?"); read_revisions_from_stdin(revs, &prune_data); continue; } opts = handle_revision_opt(revs, argc - i, argv + i, &left, argv); if (opts > 0) { i += opts - 1; continue; } if (opts < 0) exit(128); continue; } if (handle_revision_arg(arg, revs, flags, revarg_opt)) { int j; if (seen_dashdash || *arg == '^') die("bad revision '%s'", arg); /* If we didn't have a "--": * (1) all filenames must exist; * (2) all rev-args must not be interpretable * as a valid filename. * but the latter we have checked in the main loop. */ for (j = i; j < argc; j++) verify_filename(revs->prefix, argv[j], j == i); append_prune_data(&prune_data, argv + i); break; } else got_rev_arg = 1; } if (prune_data.nr) { /* * If we need to introduce the magic "a lone ':' means no * pathspec whatsoever", here is the place to do so. * * if (prune_data.nr == 1 && !strcmp(prune_data[0], ":")) { * prune_data.nr = 0; * prune_data.alloc = 0; * free(prune_data.path); * prune_data.path = NULL; * } else { * terminate prune_data.alloc with NULL and * call init_pathspec() to set revs->prune_data here. * } */ ALLOC_GROW(prune_data.path, prune_data.nr + 1, prune_data.alloc); prune_data.path[prune_data.nr++] = NULL; parse_pathspec(&revs->prune_data, 0, 0, revs->prefix, prune_data.path); } if (revs->def == NULL) revs->def = opt ? opt->def : NULL; if (opt && opt->tweak) opt->tweak(revs, opt); if (revs->show_merge) prepare_show_merge(revs); if (revs->def && !revs->pending.nr && !got_rev_arg) { unsigned char sha1[20]; struct object *object; struct object_context oc; if (get_sha1_with_context(revs->def, 0, sha1, &oc)) diagnose_missing_default(revs->def); object = get_reference(revs, revs->def, sha1, 0); add_pending_object_with_mode(revs, object, revs->def, oc.mode); } /* Did the user ask for any diff output? Run the diff! */ if (revs->diffopt.output_format & ~DIFF_FORMAT_NO_OUTPUT) revs->diff = 1; /* Pickaxe, diff-filter and rename following need diffs */ if (revs->diffopt.pickaxe || revs->diffopt.filter || DIFF_OPT_TST(&revs->diffopt, FOLLOW_RENAMES)) revs->diff = 1; if (revs->topo_order) revs->limited = 1; if (revs->prune_data.nr) { copy_pathspec(&revs->pruning.pathspec, &revs->prune_data); /* Can't prune commits with rename following: the paths change.. */ if (!DIFF_OPT_TST(&revs->diffopt, FOLLOW_RENAMES)) revs->prune = 1; if (!revs->full_diff) copy_pathspec(&revs->diffopt.pathspec, &revs->prune_data); } if (revs->combine_merges) revs->ignore_merges = 0; revs->diffopt.abbrev = revs->abbrev; if (revs->line_level_traverse) { revs->limited = 1; revs->topo_order = 1; } diff_setup_done(&revs->diffopt); grep_commit_pattern_type(GREP_PATTERN_TYPE_UNSPECIFIED, &revs->grep_filter); compile_grep_patterns(&revs->grep_filter); if (revs->reverse && revs->reflog_info) die("cannot combine --reverse with --walk-reflogs"); if (revs->rewrite_parents && revs->children.name) die("cannot combine --parents and --children"); /* * Limitations on the graph functionality */ if (revs->reverse && revs->graph) die("cannot combine --reverse with --graph"); if (revs->reflog_info && revs->graph) die("cannot combine --walk-reflogs with --graph"); if (revs->no_walk && revs->graph) die("cannot combine --no-walk with --graph"); if (!revs->reflog_info && revs->grep_filter.use_reflog_filter) die("cannot use --grep-reflog without --walk-reflogs"); if (revs->first_parent_only && revs->bisect) die(_("--first-parent is incompatible with --bisect")); return left; } static void add_child(struct rev_info *revs, struct commit *parent, struct commit *child) { struct commit_list *l = xcalloc(1, sizeof(*l)); l->item = child; l->next = add_decoration(&revs->children, &parent->object, l); } static int remove_duplicate_parents(struct rev_info *revs, struct commit *commit) { struct treesame_state *ts = lookup_decoration(&revs->treesame, &commit->object); struct commit_list **pp, *p; int surviving_parents; /* Examine existing parents while marking ones we have seen... */ pp = &commit->parents; surviving_parents = 0; while ((p = *pp) != NULL) { struct commit *parent = p->item; if (parent->object.flags & TMP_MARK) { *pp = p->next; if (ts) compact_treesame(revs, commit, surviving_parents); continue; } parent->object.flags |= TMP_MARK; surviving_parents++; pp = &p->next; } /* clear the temporary mark */ for (p = commit->parents; p; p = p->next) { p->item->object.flags &= ~TMP_MARK; } /* no update_treesame() - removing duplicates can't affect TREESAME */ return surviving_parents; } struct merge_simplify_state { struct commit *simplified; }; static struct merge_simplify_state *locate_simplify_state(struct rev_info *revs, struct commit *commit) { struct merge_simplify_state *st; st = lookup_decoration(&revs->merge_simplification, &commit->object); if (!st) { st = xcalloc(1, sizeof(*st)); add_decoration(&revs->merge_simplification, &commit->object, st); } return st; } static int mark_redundant_parents(struct rev_info *revs, struct commit *commit) { struct commit_list *h = reduce_heads(commit->parents); int i = 0, marked = 0; struct commit_list *po, *pn; /* Want these for sanity-checking only */ int orig_cnt = commit_list_count(commit->parents); int cnt = commit_list_count(h); /* * Not ready to remove items yet, just mark them for now, based * on the output of reduce_heads(). reduce_heads outputs the reduced * set in its original order, so this isn't too hard. */ po = commit->parents; pn = h; while (po) { if (pn && po->item == pn->item) { pn = pn->next; i++; } else { po->item->object.flags |= TMP_MARK; marked++; } po=po->next; } if (i != cnt || cnt+marked != orig_cnt) die("mark_redundant_parents %d %d %d %d", orig_cnt, cnt, i, marked); free_commit_list(h); return marked; } static int mark_treesame_root_parents(struct rev_info *revs, struct commit *commit) { struct commit_list *p; int marked = 0; for (p = commit->parents; p; p = p->next) { struct commit *parent = p->item; if (!parent->parents && (parent->object.flags & TREESAME)) { parent->object.flags |= TMP_MARK; marked++; } } return marked; } /* * Awkward naming - this means one parent we are TREESAME to. * cf mark_treesame_root_parents: root parents that are TREESAME (to an * empty tree). Better name suggestions? */ static int leave_one_treesame_to_parent(struct rev_info *revs, struct commit *commit) { struct treesame_state *ts = lookup_decoration(&revs->treesame, &commit->object); struct commit *unmarked = NULL, *marked = NULL; struct commit_list *p; unsigned n; for (p = commit->parents, n = 0; p; p = p->next, n++) { if (ts->treesame[n]) { if (p->item->object.flags & TMP_MARK) { if (!marked) marked = p->item; } else { if (!unmarked) { unmarked = p->item; break; } } } } /* * If we are TREESAME to a marked-for-deletion parent, but not to any * unmarked parents, unmark the first TREESAME parent. This is the * parent that the default simplify_history==1 scan would have followed, * and it doesn't make sense to omit that path when asking for a * simplified full history. Retaining it improves the chances of * understanding odd missed merges that took an old version of a file. * * Example: * * I--------*X A modified the file, but mainline merge X used * \ / "-s ours", so took the version from I. X is * `-*A--' TREESAME to I and !TREESAME to A. * * Default log from X would produce "I". Without this check, * --full-history --simplify-merges would produce "I-A-X", showing * the merge commit X and that it changed A, but not making clear that * it had just taken the I version. With this check, the topology above * is retained. * * Note that it is possible that the simplification chooses a different * TREESAME parent from the default, in which case this test doesn't * activate, and we _do_ drop the default parent. Example: * * I------X A modified the file, but it was reverted in B, * \ / meaning mainline merge X is TREESAME to both * *A-*B parents. * * Default log would produce "I" by following the first parent; * --full-history --simplify-merges will produce "I-A-B". But this is a * reasonable result - it presents a logical full history leading from * I to X, and X is not an important merge. */ if (!unmarked && marked) { marked->object.flags &= ~TMP_MARK; return 1; } return 0; } static int remove_marked_parents(struct rev_info *revs, struct commit *commit) { struct commit_list **pp, *p; int nth_parent, removed = 0; pp = &commit->parents; nth_parent = 0; while ((p = *pp) != NULL) { struct commit *parent = p->item; if (parent->object.flags & TMP_MARK) { parent->object.flags &= ~TMP_MARK; *pp = p->next; free(p); removed++; compact_treesame(revs, commit, nth_parent); continue; } pp = &p->next; nth_parent++; } /* Removing parents can only increase TREESAMEness */ if (removed && !(commit->object.flags & TREESAME)) update_treesame(revs, commit); return nth_parent; } static struct commit_list **simplify_one(struct rev_info *revs, struct commit *commit, struct commit_list **tail) { struct commit_list *p; struct commit *parent; struct merge_simplify_state *st, *pst; int cnt; st = locate_simplify_state(revs, commit); /* * Have we handled this one? */ if (st->simplified) return tail; /* * An UNINTERESTING commit simplifies to itself, so does a * root commit. We do not rewrite parents of such commit * anyway. */ if ((commit->object.flags & UNINTERESTING) || !commit->parents) { st->simplified = commit; return tail; } /* * Do we know what commit all of our parents that matter * should be rewritten to? Otherwise we are not ready to * rewrite this one yet. */ for (cnt = 0, p = commit->parents; p; p = p->next) { pst = locate_simplify_state(revs, p->item); if (!pst->simplified) { tail = &commit_list_insert(p->item, tail)->next; cnt++; } if (revs->first_parent_only) break; } if (cnt) { tail = &commit_list_insert(commit, tail)->next; return tail; } /* * Rewrite our list of parents. Note that this cannot * affect our TREESAME flags in any way - a commit is * always TREESAME to its simplification. */ for (p = commit->parents; p; p = p->next) { pst = locate_simplify_state(revs, p->item); p->item = pst->simplified; if (revs->first_parent_only) break; } if (revs->first_parent_only) cnt = 1; else cnt = remove_duplicate_parents(revs, commit); /* * It is possible that we are a merge and one side branch * does not have any commit that touches the given paths; * in such a case, the immediate parent from that branch * will be rewritten to be the merge base. * * o----X X: the commit we are looking at; * / / o: a commit that touches the paths; * ---o----' * * Further, a merge of an independent branch that doesn't * touch the path will reduce to a treesame root parent: * * ----o----X X: the commit we are looking at; * / o: a commit that touches the paths; * r r: a root commit not touching the paths * * Detect and simplify both cases. */ if (1 < cnt) { int marked = mark_redundant_parents(revs, commit); marked += mark_treesame_root_parents(revs, commit); if (marked) marked -= leave_one_treesame_to_parent(revs, commit); if (marked) cnt = remove_marked_parents(revs, commit); } /* * A commit simplifies to itself if it is a root, if it is * UNINTERESTING, if it touches the given paths, or if it is a * merge and its parents don't simplify to one relevant commit * (the first two cases are already handled at the beginning of * this function). * * Otherwise, it simplifies to what its sole relevant parent * simplifies to. */ if (!cnt || (commit->object.flags & UNINTERESTING) || !(commit->object.flags & TREESAME) || (parent = one_relevant_parent(revs, commit->parents)) == NULL) st->simplified = commit; else { pst = locate_simplify_state(revs, parent); st->simplified = pst->simplified; } return tail; } static void simplify_merges(struct rev_info *revs) { struct commit_list *list, *next; struct commit_list *yet_to_do, **tail; struct commit *commit; if (!revs->prune) return; /* feed the list reversed */ yet_to_do = NULL; for (list = revs->commits; list; list = next) { commit = list->item; next = list->next; /* * Do not free(list) here yet; the original list * is used later in this function. */ commit_list_insert(commit, &yet_to_do); } while (yet_to_do) { list = yet_to_do; yet_to_do = NULL; tail = &yet_to_do; while (list) { commit = pop_commit(&list); tail = simplify_one(revs, commit, tail); } } /* clean up the result, removing the simplified ones */ list = revs->commits; revs->commits = NULL; tail = &revs->commits; while (list) { struct merge_simplify_state *st; commit = pop_commit(&list); st = locate_simplify_state(revs, commit); if (st->simplified == commit) tail = &commit_list_insert(commit, tail)->next; } } static void set_children(struct rev_info *revs) { struct commit_list *l; for (l = revs->commits; l; l = l->next) { struct commit *commit = l->item; struct commit_list *p; for (p = commit->parents; p; p = p->next) add_child(revs, p->item, commit); } } void reset_revision_walk(void) { clear_object_flags(SEEN | ADDED | SHOWN); } int prepare_revision_walk(struct rev_info *revs) { int i; struct object_array old_pending; struct commit_list **next = &revs->commits; memcpy(&old_pending, &revs->pending, sizeof(old_pending)); revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; for (i = 0; i < old_pending.nr; i++) { struct object_array_entry *e = old_pending.objects + i; struct commit *commit = handle_commit(revs, e); if (commit) { if (!(commit->object.flags & SEEN)) { commit->object.flags |= SEEN; next = commit_list_append(commit, next); } } } if (!revs->leak_pending) object_array_clear(&old_pending); /* Signal whether we need per-parent treesame decoration */ if (revs->simplify_merges || (revs->limited && limiting_can_increase_treesame(revs))) revs->treesame.name = "treesame"; if (revs->no_walk != REVISION_WALK_NO_WALK_UNSORTED) commit_list_sort_by_date(&revs->commits); if (revs->no_walk) return 0; if (revs->limited) if (limit_list(revs) < 0) return -1; if (revs->topo_order) sort_in_topological_order(&revs->commits, revs->sort_order); if (revs->line_level_traverse) line_log_filter(revs); if (revs->simplify_merges) simplify_merges(revs); if (revs->children.name) set_children(revs); return 0; } static enum rewrite_result rewrite_one(struct rev_info *revs, struct commit **pp) { struct commit_list *cache = NULL; for (;;) { struct commit *p = *pp; if (!revs->limited) if (add_parents_to_list(revs, p, &revs->commits, &cache) < 0) return rewrite_one_error; if (p->object.flags & UNINTERESTING) return rewrite_one_ok; if (!(p->object.flags & TREESAME)) return rewrite_one_ok; if (!p->parents) return rewrite_one_noparents; if ((p = one_relevant_parent(revs, p->parents)) == NULL) return rewrite_one_ok; *pp = p; } } int rewrite_parents(struct rev_info *revs, struct commit *commit, rewrite_parent_fn_t rewrite_parent) { struct commit_list **pp = &commit->parents; while (*pp) { struct commit_list *parent = *pp; switch (rewrite_parent(revs, &parent->item)) { case rewrite_one_ok: break; case rewrite_one_noparents: *pp = parent->next; continue; case rewrite_one_error: return -1; } pp = &parent->next; } remove_duplicate_parents(revs, commit); return 0; } static int commit_rewrite_person(struct strbuf *buf, const char *what, struct string_list *mailmap) { char *person, *endp; size_t len, namelen, maillen; const char *name; const char *mail; struct ident_split ident; person = strstr(buf->buf, what); if (!person) return 0; person += strlen(what); endp = strchr(person, '\n'); if (!endp) return 0; len = endp - person; if (split_ident_line(&ident, person, len)) return 0; mail = ident.mail_begin; maillen = ident.mail_end - ident.mail_begin; name = ident.name_begin; namelen = ident.name_end - ident.name_begin; if (map_user(mailmap, &mail, &maillen, &name, &namelen)) { struct strbuf namemail = STRBUF_INIT; strbuf_addf(&namemail, "%.*s <%.*s>", (int)namelen, name, (int)maillen, mail); strbuf_splice(buf, ident.name_begin - buf->buf, ident.mail_end - ident.name_begin + 1, namemail.buf, namemail.len); strbuf_release(&namemail); return 1; } return 0; } static int commit_match(struct commit *commit, struct rev_info *opt) { int retval; const char *encoding; const char *message; struct strbuf buf = STRBUF_INIT; if (!opt->grep_filter.pattern_list && !opt->grep_filter.header_list) return 1; /* Prepend "fake" headers as needed */ if (opt->grep_filter.use_reflog_filter) { strbuf_addstr(&buf, "reflog "); get_reflog_message(&buf, opt->reflog_info); strbuf_addch(&buf, '\n'); } /* * We grep in the user's output encoding, under the assumption that it * is the encoding they are most likely to write their grep pattern * for. In addition, it means we will match the "notes" encoding below, * so we will not end up with a buffer that has two different encodings * in it. */ encoding = get_log_output_encoding(); message = logmsg_reencode(commit, NULL, encoding); /* Copy the commit to temporary if we are using "fake" headers */ if (buf.len) strbuf_addstr(&buf, message); if (opt->grep_filter.header_list && opt->mailmap) { if (!buf.len) strbuf_addstr(&buf, message); commit_rewrite_person(&buf, "\nauthor ", opt->mailmap); commit_rewrite_person(&buf, "\ncommitter ", opt->mailmap); } /* Append "fake" message parts as needed */ if (opt->show_notes) { if (!buf.len) strbuf_addstr(&buf, message); format_display_notes(commit->object.oid.hash, &buf, encoding, 1); } /* * Find either in the original commit message, or in the temporary. * Note that we cast away the constness of "message" here. It is * const because it may come from the cached commit buffer. That's OK, * because we know that it is modifiable heap memory, and that while * grep_buffer may modify it for speed, it will restore any * changes before returning. */ if (buf.len) retval = grep_buffer(&opt->grep_filter, buf.buf, buf.len); else retval = grep_buffer(&opt->grep_filter, (char *)message, strlen(message)); strbuf_release(&buf); unuse_commit_buffer(commit, message); return opt->invert_grep ? !retval : retval; } static inline int want_ancestry(const struct rev_info *revs) { return (revs->rewrite_parents || revs->children.name); } enum commit_action get_commit_action(struct rev_info *revs, struct commit *commit) { if (commit->object.flags & SHOWN) return commit_ignore; if (revs->unpacked && has_sha1_pack(commit->object.oid.hash)) return commit_ignore; if (revs->show_all) return commit_show; if (commit->object.flags & UNINTERESTING) return commit_ignore; if (revs->min_age != -1 && (commit->date > revs->min_age)) return commit_ignore; if (revs->min_parents || (revs->max_parents >= 0)) { int n = commit_list_count(commit->parents); if ((n < revs->min_parents) || ((revs->max_parents >= 0) && (n > revs->max_parents))) return commit_ignore; } if (!commit_match(commit, revs)) return commit_ignore; if (revs->prune && revs->dense) { /* Commit without changes? */ if (commit->object.flags & TREESAME) { int n; struct commit_list *p; /* drop merges unless we want parenthood */ if (!want_ancestry(revs)) return commit_ignore; /* * If we want ancestry, then need to keep any merges * between relevant commits to tie together topology. * For consistency with TREESAME and simplification * use "relevant" here rather than just INTERESTING, * to treat bottom commit(s) as part of the topology. */ for (n = 0, p = commit->parents; p; p = p->next) if (relevant_commit(p->item)) if (++n >= 2) return commit_show; return commit_ignore; } } return commit_show; } define_commit_slab(saved_parents, struct commit_list *); #define EMPTY_PARENT_LIST ((struct commit_list *)-1) /* * You may only call save_parents() once per commit (this is checked * for non-root commits). */ static void save_parents(struct rev_info *revs, struct commit *commit) { struct commit_list **pp; if (!revs->saved_parents_slab) { revs->saved_parents_slab = xmalloc(sizeof(struct saved_parents)); init_saved_parents(revs->saved_parents_slab); } pp = saved_parents_at(revs->saved_parents_slab, commit); /* * When walking with reflogs, we may visit the same commit * several times: once for each appearance in the reflog. * * In this case, save_parents() will be called multiple times. * We want to keep only the first set of parents. We need to * store a sentinel value for an empty (i.e., NULL) parent * list to distinguish it from a not-yet-saved list, however. */ if (*pp) return; if (commit->parents) *pp = copy_commit_list(commit->parents); else *pp = EMPTY_PARENT_LIST; } static void free_saved_parents(struct rev_info *revs) { if (revs->saved_parents_slab) clear_saved_parents(revs->saved_parents_slab); } struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit) { struct commit_list *parents; if (!revs->saved_parents_slab) return commit->parents; parents = *saved_parents_at(revs->saved_parents_slab, commit); if (parents == EMPTY_PARENT_LIST) return NULL; return parents; } enum commit_action simplify_commit(struct rev_info *revs, struct commit *commit) { enum commit_action action = get_commit_action(revs, commit); if (action == commit_show && !revs->show_all && revs->prune && revs->dense && want_ancestry(revs)) { /* * --full-diff on simplified parents is no good: it * will show spurious changes from the commits that * were elided. So we save the parents on the side * when --full-diff is in effect. */ if (revs->full_diff) save_parents(revs, commit); if (rewrite_parents(revs, commit, rewrite_one) < 0) return commit_error; } return action; } static void track_linear(struct rev_info *revs, struct commit *commit) { if (revs->track_first_time) { revs->linear = 1; revs->track_first_time = 0; } else { struct commit_list *p; for (p = revs->previous_parents; p; p = p->next) if (p->item == NULL || /* first commit */ !oidcmp(&p->item->object.oid, &commit->object.oid)) break; revs->linear = p != NULL; } if (revs->reverse) { if (revs->linear) commit->object.flags |= TRACK_LINEAR; } free_commit_list(revs->previous_parents); revs->previous_parents = copy_commit_list(commit->parents); } static struct commit *get_revision_1(struct rev_info *revs) { if (!revs->commits) return NULL; do { struct commit *commit = pop_commit(&revs->commits); if (revs->reflog_info) { save_parents(revs, commit); fake_reflog_parent(revs->reflog_info, commit); commit->object.flags &= ~(ADDED | SEEN | SHOWN); } /* * If we haven't done the list limiting, we need to look at * the parents here. We also need to do the date-based limiting * that we'd otherwise have done in limit_list(). */ if (!revs->limited) { if (revs->max_age != -1 && (commit->date < revs->max_age)) continue; if (add_parents_to_list(revs, commit, &revs->commits, NULL) < 0) { if (!revs->ignore_missing_links) die("Failed to traverse parents of commit %s", oid_to_hex(&commit->object.oid)); } } switch (simplify_commit(revs, commit)) { case commit_ignore: continue; case commit_error: die("Failed to simplify parents of commit %s", oid_to_hex(&commit->object.oid)); default: if (revs->track_linear) track_linear(revs, commit); return commit; } } while (revs->commits); return NULL; } /* * Return true for entries that have not yet been shown. (This is an * object_array_each_func_t.) */ static int entry_unshown(struct object_array_entry *entry, void *cb_data_unused) { return !(entry->item->flags & SHOWN); } /* * If array is on the verge of a realloc, garbage-collect any entries * that have already been shown to try to free up some space. */ static void gc_boundary(struct object_array *array) { if (array->nr == array->alloc) object_array_filter(array, entry_unshown, NULL); } static void create_boundary_commit_list(struct rev_info *revs) { unsigned i; struct commit *c; struct object_array *array = &revs->boundary_commits; struct object_array_entry *objects = array->objects; /* * If revs->commits is non-NULL at this point, an error occurred in * get_revision_1(). Ignore the error and continue printing the * boundary commits anyway. (This is what the code has always * done.) */ if (revs->commits) { free_commit_list(revs->commits); revs->commits = NULL; } /* * Put all of the actual boundary commits from revs->boundary_commits * into revs->commits */ for (i = 0; i < array->nr; i++) { c = (struct commit *)(objects[i].item); if (!c) continue; if (!(c->object.flags & CHILD_SHOWN)) continue; if (c->object.flags & (SHOWN | BOUNDARY)) continue; c->object.flags |= BOUNDARY; commit_list_insert(c, &revs->commits); } /* * If revs->topo_order is set, sort the boundary commits * in topological order */ sort_in_topological_order(&revs->commits, revs->sort_order); } static struct commit *get_revision_internal(struct rev_info *revs) { struct commit *c = NULL; struct commit_list *l; if (revs->boundary == 2) { /* * All of the normal commits have already been returned, * and we are now returning boundary commits. * create_boundary_commit_list() has populated * revs->commits with the remaining commits to return. */ c = pop_commit(&revs->commits); if (c) c->object.flags |= SHOWN; return c; } /* * If our max_count counter has reached zero, then we are done. We * don't simply return NULL because we still might need to show * boundary commits. But we want to avoid calling get_revision_1, which * might do a considerable amount of work finding the next commit only * for us to throw it away. * * If it is non-zero, then either we don't have a max_count at all * (-1), or it is still counting, in which case we decrement. */ if (revs->max_count) { c = get_revision_1(revs); if (c) { while (revs->skip_count > 0) { revs->skip_count--; c = get_revision_1(revs); if (!c) break; } } if (revs->max_count > 0) revs->max_count--; } if (c) c->object.flags |= SHOWN; if (!revs->boundary) return c; if (!c) { /* * get_revision_1() runs out the commits, and * we are done computing the boundaries. * switch to boundary commits output mode. */ revs->boundary = 2; /* * Update revs->commits to contain the list of * boundary commits. */ create_boundary_commit_list(revs); return get_revision_internal(revs); } /* * boundary commits are the commits that are parents of the * ones we got from get_revision_1() but they themselves are * not returned from get_revision_1(). Before returning * 'c', we need to mark its parents that they could be boundaries. */ for (l = c->parents; l; l = l->next) { struct object *p; p = &(l->item->object); if (p->flags & (CHILD_SHOWN | SHOWN)) continue; p->flags |= CHILD_SHOWN; gc_boundary(&revs->boundary_commits); add_object_array(p, NULL, &revs->boundary_commits); } return c; } struct commit *get_revision(struct rev_info *revs) { struct commit *c; struct commit_list *reversed; if (revs->reverse) { reversed = NULL; while ((c = get_revision_internal(revs))) commit_list_insert(c, &reversed); revs->commits = reversed; revs->reverse = 0; revs->reverse_output_stage = 1; } if (revs->reverse_output_stage) { c = pop_commit(&revs->commits); if (revs->track_linear) revs->linear = !!(c && c->object.flags & TRACK_LINEAR); return c; } c = get_revision_internal(revs); if (c && revs->graph) graph_update(revs->graph, c); if (!c) { free_saved_parents(revs); if (revs->previous_parents) { free_commit_list(revs->previous_parents); revs->previous_parents = NULL; } } return c; } char *get_revision_mark(const struct rev_info *revs, const struct commit *commit) { if (commit->object.flags & BOUNDARY) return "-"; else if (commit->object.flags & UNINTERESTING) return "^"; else if (commit->object.flags & PATCHSAME) return "="; else if (!revs || revs->left_right) { if (commit->object.flags & SYMMETRIC_LEFT) return "<"; else return ">"; } else if (revs->graph) return "*"; else if (revs->cherry_mark) return "+"; return ""; } void put_revision_mark(const struct rev_info *revs, const struct commit *commit) { char *mark = get_revision_mark(revs, commit); if (!strlen(mark)) return; fputs(mark, stdout); putchar(' '); }
void show_object_with_name(FILE *out, struct object *obj, struct strbuf *path, const char *component) { char *name = path_name(path, component); char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); free(name); }
void show_object_with_name(FILE *out, struct object *obj, const char *name) { const char *p; fprintf(out, "%s ", oid_to_hex(&obj->oid)); for (p = name; *p && *p != '\n'; p++) fputc(*p, out); fputc('\n', out); }
{'added': [(28, 'void show_object_with_name(FILE *out, struct object *obj, const char *name)'), (30, '\tconst char *p;')], 'deleted': [(28, 'char *path_name(struct strbuf *path, const char *name)'), (30, '\tstruct strbuf ret = STRBUF_INIT;'), (31, '\tif (path)'), (32, '\t\tstrbuf_addbuf(&ret, path);'), (33, '\tstrbuf_addstr(&ret, name);'), (34, '\treturn strbuf_detach(&ret, NULL);'), (35, '}'), (36, ''), (37, 'void show_object_with_name(FILE *out, struct object *obj,'), (38, '\t\t\t struct strbuf *path, const char *component)'), (39, '{'), (40, '\tchar *name = path_name(path, component);'), (41, '\tchar *p;'), (47, ''), (48, '\tfree(name);')]}
2
15
2,418
16,753
11
90
3
https://github.com/git/git
CVE-2016-2315
CWE-119
2,986
print-isoclns.c
C
isis_print_extd_ip_reach
/* * Copyright (c) 1992, 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Original code by Matt Thomas, Digital Equipment Corporation * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete IS-IS & CLNP support. */ /* \summary: ISO CLNS, ESIS, and ISIS printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "ether.h" #include "nlpid.h" #include "extract.h" #include "gmpls.h" #include "oui.h" #include "signature.h" static const char tstr[] = " [|isis]"; /* * IS-IS is defined in ISO 10589. Look there for protocol definitions. */ #define SYSTEM_ID_LEN ETHER_ADDR_LEN #define NODE_ID_LEN SYSTEM_ID_LEN+1 #define LSP_ID_LEN SYSTEM_ID_LEN+2 #define ISIS_VERSION 1 #define ESIS_VERSION 1 #define CLNP_VERSION 1 #define ISIS_PDU_TYPE_MASK 0x1F #define ESIS_PDU_TYPE_MASK 0x1F #define CLNP_PDU_TYPE_MASK 0x1F #define CLNP_FLAG_MASK 0xE0 #define ISIS_LAN_PRIORITY_MASK 0x7F #define ISIS_PDU_L1_LAN_IIH 15 #define ISIS_PDU_L2_LAN_IIH 16 #define ISIS_PDU_PTP_IIH 17 #define ISIS_PDU_L1_LSP 18 #define ISIS_PDU_L2_LSP 20 #define ISIS_PDU_L1_CSNP 24 #define ISIS_PDU_L2_CSNP 25 #define ISIS_PDU_L1_PSNP 26 #define ISIS_PDU_L2_PSNP 27 static const struct tok isis_pdu_values[] = { { ISIS_PDU_L1_LAN_IIH, "L1 Lan IIH"}, { ISIS_PDU_L2_LAN_IIH, "L2 Lan IIH"}, { ISIS_PDU_PTP_IIH, "p2p IIH"}, { ISIS_PDU_L1_LSP, "L1 LSP"}, { ISIS_PDU_L2_LSP, "L2 LSP"}, { ISIS_PDU_L1_CSNP, "L1 CSNP"}, { ISIS_PDU_L2_CSNP, "L2 CSNP"}, { ISIS_PDU_L1_PSNP, "L1 PSNP"}, { ISIS_PDU_L2_PSNP, "L2 PSNP"}, { 0, NULL} }; /* * A TLV is a tuple of a type, length and a value and is normally used for * encoding information in all sorts of places. This is an enumeration of * the well known types. * * list taken from rfc3359 plus some memory from veterans ;-) */ #define ISIS_TLV_AREA_ADDR 1 /* iso10589 */ #define ISIS_TLV_IS_REACH 2 /* iso10589 */ #define ISIS_TLV_ESNEIGH 3 /* iso10589 */ #define ISIS_TLV_PART_DIS 4 /* iso10589 */ #define ISIS_TLV_PREFIX_NEIGH 5 /* iso10589 */ #define ISIS_TLV_ISNEIGH 6 /* iso10589 */ #define ISIS_TLV_ISNEIGH_VARLEN 7 /* iso10589 */ #define ISIS_TLV_PADDING 8 /* iso10589 */ #define ISIS_TLV_LSP 9 /* iso10589 */ #define ISIS_TLV_AUTH 10 /* iso10589, rfc3567 */ #define ISIS_TLV_CHECKSUM 12 /* rfc3358 */ #define ISIS_TLV_CHECKSUM_MINLEN 2 #define ISIS_TLV_POI 13 /* rfc6232 */ #define ISIS_TLV_LSP_BUFFERSIZE 14 /* iso10589 rev2 */ #define ISIS_TLV_LSP_BUFFERSIZE_MINLEN 2 #define ISIS_TLV_EXT_IS_REACH 22 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_IS_ALIAS_ID 24 /* draft-ietf-isis-ext-lsp-frags-02 */ #define ISIS_TLV_DECNET_PHASE4 42 #define ISIS_TLV_LUCENT_PRIVATE 66 #define ISIS_TLV_INT_IP_REACH 128 /* rfc1195, rfc2966 */ #define ISIS_TLV_PROTOCOLS 129 /* rfc1195 */ #define ISIS_TLV_EXT_IP_REACH 130 /* rfc1195, rfc2966 */ #define ISIS_TLV_IDRP_INFO 131 /* rfc1195 */ #define ISIS_TLV_IDRP_INFO_MINLEN 1 #define ISIS_TLV_IPADDR 132 /* rfc1195 */ #define ISIS_TLV_IPAUTH 133 /* rfc1195 */ #define ISIS_TLV_TE_ROUTER_ID 134 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_EXTD_IP_REACH 135 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_HOSTNAME 137 /* rfc2763 */ #define ISIS_TLV_SHARED_RISK_GROUP 138 /* draft-ietf-isis-gmpls-extensions */ #define ISIS_TLV_MT_PORT_CAP 143 /* rfc6165 */ #define ISIS_TLV_MT_CAPABILITY 144 /* rfc6329 */ #define ISIS_TLV_NORTEL_PRIVATE1 176 #define ISIS_TLV_NORTEL_PRIVATE2 177 #define ISIS_TLV_RESTART_SIGNALING 211 /* rfc3847 */ #define ISIS_TLV_RESTART_SIGNALING_FLAGLEN 1 #define ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN 2 #define ISIS_TLV_MT_IS_REACH 222 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED 229 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED_MINLEN 2 #define ISIS_TLV_IP6ADDR 232 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP_REACH 235 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_IP6_REACH 236 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP6_REACH 237 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_PTP_ADJ 240 /* rfc3373 */ #define ISIS_TLV_IIH_SEQNR 241 /* draft-shen-isis-iih-sequence-00 */ #define ISIS_TLV_IIH_SEQNR_MINLEN 4 #define ISIS_TLV_VENDOR_PRIVATE 250 /* draft-ietf-isis-experimental-tlv-01 */ #define ISIS_TLV_VENDOR_PRIVATE_MINLEN 3 static const struct tok isis_tlv_values[] = { { ISIS_TLV_AREA_ADDR, "Area address(es)"}, { ISIS_TLV_IS_REACH, "IS Reachability"}, { ISIS_TLV_ESNEIGH, "ES Neighbor(s)"}, { ISIS_TLV_PART_DIS, "Partition DIS"}, { ISIS_TLV_PREFIX_NEIGH, "Prefix Neighbors"}, { ISIS_TLV_ISNEIGH, "IS Neighbor(s)"}, { ISIS_TLV_ISNEIGH_VARLEN, "IS Neighbor(s) (variable length)"}, { ISIS_TLV_PADDING, "Padding"}, { ISIS_TLV_LSP, "LSP entries"}, { ISIS_TLV_AUTH, "Authentication"}, { ISIS_TLV_CHECKSUM, "Checksum"}, { ISIS_TLV_POI, "Purge Originator Identifier"}, { ISIS_TLV_LSP_BUFFERSIZE, "LSP Buffersize"}, { ISIS_TLV_EXT_IS_REACH, "Extended IS Reachability"}, { ISIS_TLV_IS_ALIAS_ID, "IS Alias ID"}, { ISIS_TLV_DECNET_PHASE4, "DECnet Phase IV"}, { ISIS_TLV_LUCENT_PRIVATE, "Lucent Proprietary"}, { ISIS_TLV_INT_IP_REACH, "IPv4 Internal Reachability"}, { ISIS_TLV_PROTOCOLS, "Protocols supported"}, { ISIS_TLV_EXT_IP_REACH, "IPv4 External Reachability"}, { ISIS_TLV_IDRP_INFO, "Inter-Domain Information Type"}, { ISIS_TLV_IPADDR, "IPv4 Interface address(es)"}, { ISIS_TLV_IPAUTH, "IPv4 authentication (deprecated)"}, { ISIS_TLV_TE_ROUTER_ID, "Traffic Engineering Router ID"}, { ISIS_TLV_EXTD_IP_REACH, "Extended IPv4 Reachability"}, { ISIS_TLV_SHARED_RISK_GROUP, "Shared Risk Link Group"}, { ISIS_TLV_MT_PORT_CAP, "Multi-Topology-Aware Port Capability"}, { ISIS_TLV_MT_CAPABILITY, "Multi-Topology Capability"}, { ISIS_TLV_NORTEL_PRIVATE1, "Nortel Proprietary"}, { ISIS_TLV_NORTEL_PRIVATE2, "Nortel Proprietary"}, { ISIS_TLV_HOSTNAME, "Hostname"}, { ISIS_TLV_RESTART_SIGNALING, "Restart Signaling"}, { ISIS_TLV_MT_IS_REACH, "Multi Topology IS Reachability"}, { ISIS_TLV_MT_SUPPORTED, "Multi Topology"}, { ISIS_TLV_IP6ADDR, "IPv6 Interface address(es)"}, { ISIS_TLV_MT_IP_REACH, "Multi-Topology IPv4 Reachability"}, { ISIS_TLV_IP6_REACH, "IPv6 reachability"}, { ISIS_TLV_MT_IP6_REACH, "Multi-Topology IP6 Reachability"}, { ISIS_TLV_PTP_ADJ, "Point-to-point Adjacency State"}, { ISIS_TLV_IIH_SEQNR, "Hello PDU Sequence Number"}, { ISIS_TLV_VENDOR_PRIVATE, "Vendor Private"}, { 0, NULL } }; #define ESIS_OPTION_PROTOCOLS 129 #define ESIS_OPTION_QOS_MAINTENANCE 195 /* iso9542 */ #define ESIS_OPTION_SECURITY 197 /* iso9542 */ #define ESIS_OPTION_ES_CONF_TIME 198 /* iso9542 */ #define ESIS_OPTION_PRIORITY 205 /* iso9542 */ #define ESIS_OPTION_ADDRESS_MASK 225 /* iso9542 */ #define ESIS_OPTION_SNPA_MASK 226 /* iso9542 */ static const struct tok esis_option_values[] = { { ESIS_OPTION_PROTOCOLS, "Protocols supported"}, { ESIS_OPTION_QOS_MAINTENANCE, "QoS Maintenance" }, { ESIS_OPTION_SECURITY, "Security" }, { ESIS_OPTION_ES_CONF_TIME, "ES Configuration Time" }, { ESIS_OPTION_PRIORITY, "Priority" }, { ESIS_OPTION_ADDRESS_MASK, "Addressk Mask" }, { ESIS_OPTION_SNPA_MASK, "SNPA Mask" }, { 0, NULL } }; #define CLNP_OPTION_DISCARD_REASON 193 #define CLNP_OPTION_QOS_MAINTENANCE 195 /* iso8473 */ #define CLNP_OPTION_SECURITY 197 /* iso8473 */ #define CLNP_OPTION_SOURCE_ROUTING 200 /* iso8473 */ #define CLNP_OPTION_ROUTE_RECORDING 203 /* iso8473 */ #define CLNP_OPTION_PADDING 204 /* iso8473 */ #define CLNP_OPTION_PRIORITY 205 /* iso8473 */ static const struct tok clnp_option_values[] = { { CLNP_OPTION_DISCARD_REASON, "Discard Reason"}, { CLNP_OPTION_PRIORITY, "Priority"}, { CLNP_OPTION_QOS_MAINTENANCE, "QoS Maintenance"}, { CLNP_OPTION_SECURITY, "Security"}, { CLNP_OPTION_SOURCE_ROUTING, "Source Routing"}, { CLNP_OPTION_ROUTE_RECORDING, "Route Recording"}, { CLNP_OPTION_PADDING, "Padding"}, { 0, NULL } }; static const struct tok clnp_option_rfd_class_values[] = { { 0x0, "General"}, { 0x8, "Address"}, { 0x9, "Source Routeing"}, { 0xa, "Lifetime"}, { 0xb, "PDU Discarded"}, { 0xc, "Reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_general_values[] = { { 0x0, "Reason not specified"}, { 0x1, "Protocol procedure error"}, { 0x2, "Incorrect checksum"}, { 0x3, "PDU discarded due to congestion"}, { 0x4, "Header syntax error (cannot be parsed)"}, { 0x5, "Segmentation needed but not permitted"}, { 0x6, "Incomplete PDU received"}, { 0x7, "Duplicate option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_address_values[] = { { 0x0, "Destination address unreachable"}, { 0x1, "Destination address unknown"}, { 0, NULL } }; static const struct tok clnp_option_rfd_source_routeing_values[] = { { 0x0, "Unspecified source routeing error"}, { 0x1, "Syntax error in source routeing field"}, { 0x2, "Unknown address in source routeing field"}, { 0x3, "Path not acceptable"}, { 0, NULL } }; static const struct tok clnp_option_rfd_lifetime_values[] = { { 0x0, "Lifetime expired while data unit in transit"}, { 0x1, "Lifetime expired during reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_pdu_discard_values[] = { { 0x0, "Unsupported option not specified"}, { 0x1, "Unsupported protocol version"}, { 0x2, "Unsupported security option"}, { 0x3, "Unsupported source routeing option"}, { 0x4, "Unsupported recording of route option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_reassembly_values[] = { { 0x0, "Reassembly interference"}, { 0, NULL } }; /* array of 16 error-classes */ static const struct tok *clnp_option_rfd_error_class[] = { clnp_option_rfd_general_values, NULL, NULL, NULL, NULL, NULL, NULL, NULL, clnp_option_rfd_address_values, clnp_option_rfd_source_routeing_values, clnp_option_rfd_lifetime_values, clnp_option_rfd_pdu_discard_values, clnp_option_rfd_reassembly_values, NULL, NULL, NULL }; #define CLNP_OPTION_OPTION_QOS_MASK 0x3f #define CLNP_OPTION_SCOPE_MASK 0xc0 #define CLNP_OPTION_SCOPE_SA_SPEC 0x40 #define CLNP_OPTION_SCOPE_DA_SPEC 0x80 #define CLNP_OPTION_SCOPE_GLOBAL 0xc0 static const struct tok clnp_option_scope_values[] = { { CLNP_OPTION_SCOPE_SA_SPEC, "Source Address Specific"}, { CLNP_OPTION_SCOPE_DA_SPEC, "Destination Address Specific"}, { CLNP_OPTION_SCOPE_GLOBAL, "Globally unique"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_values[] = { { 0x0, "partial"}, { 0x1, "complete"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_string_values[] = { { CLNP_OPTION_SOURCE_ROUTING, "source routing"}, { CLNP_OPTION_ROUTE_RECORDING, "recording of route in progress"}, { 0, NULL } }; static const struct tok clnp_option_qos_global_values[] = { { 0x20, "reserved"}, { 0x10, "sequencing vs. delay"}, { 0x08, "congested"}, { 0x04, "delay vs. cost"}, { 0x02, "error vs. delay"}, { 0x01, "error vs. cost"}, { 0, NULL } }; #define ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP 3 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID 4 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID 5 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR 6 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR 8 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW 9 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW 10 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW 11 /* rfc4124 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD 12 /* draft-ietf-tewg-diff-te-proto-06 */ #define ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC 18 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE 19 /* draft-ietf-isis-link-attr-01 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE 20 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR 21 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS 22 /* rfc4124 */ #define ISIS_SUBTLV_SPB_METRIC 29 /* rfc6329 */ static const struct tok isis_ext_is_reach_subtlv_values[] = { { ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP, "Administrative groups" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID, "Link Local/Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID, "Link Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR, "IPv4 interface address" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR, "IPv4 neighbor address" }, { ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW, "Maximum link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW, "Reservable link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW, "Unreserved bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC, "Traffic Engineering Metric" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE, "Link Attribute" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE, "Link Protection Type" }, { ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR, "Interface Switching Capability" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD, "Bandwidth Constraints (old)" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS, "Bandwidth Constraints" }, { ISIS_SUBTLV_SPB_METRIC, "SPB Metric" }, { 250, "Reserved for cisco specific extensions" }, { 251, "Reserved for cisco specific extensions" }, { 252, "Reserved for cisco specific extensions" }, { 253, "Reserved for cisco specific extensions" }, { 254, "Reserved for cisco specific extensions" }, { 255, "Reserved for future expansion" }, { 0, NULL } }; #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32 1 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64 2 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR 117 /* draft-ietf-isis-wg-multi-topology-05 */ static const struct tok isis_ext_ip_reach_subtlv_values[] = { { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32, "32-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64, "64-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR, "Management Prefix Color" }, { 0, NULL } }; static const struct tok isis_subtlv_link_attribute_values[] = { { 0x01, "Local Protection Available" }, { 0x02, "Link excluded from local protection path" }, { 0x04, "Local maintenance required"}, { 0, NULL } }; #define ISIS_SUBTLV_AUTH_SIMPLE 1 #define ISIS_SUBTLV_AUTH_GENERIC 3 /* rfc 5310 */ #define ISIS_SUBTLV_AUTH_MD5 54 #define ISIS_SUBTLV_AUTH_MD5_LEN 16 #define ISIS_SUBTLV_AUTH_PRIVATE 255 static const struct tok isis_subtlv_auth_values[] = { { ISIS_SUBTLV_AUTH_SIMPLE, "simple text password"}, { ISIS_SUBTLV_AUTH_GENERIC, "Generic Crypto key-id"}, { ISIS_SUBTLV_AUTH_MD5, "HMAC-MD5 password"}, { ISIS_SUBTLV_AUTH_PRIVATE, "Routing Domain private password"}, { 0, NULL } }; #define ISIS_SUBTLV_IDRP_RES 0 #define ISIS_SUBTLV_IDRP_LOCAL 1 #define ISIS_SUBTLV_IDRP_ASN 2 static const struct tok isis_subtlv_idrp_values[] = { { ISIS_SUBTLV_IDRP_RES, "Reserved"}, { ISIS_SUBTLV_IDRP_LOCAL, "Routing-Domain Specific"}, { ISIS_SUBTLV_IDRP_ASN, "AS Number Tag"}, { 0, NULL} }; #define ISIS_SUBTLV_SPB_MCID 4 #define ISIS_SUBTLV_SPB_DIGEST 5 #define ISIS_SUBTLV_SPB_BVID 6 #define ISIS_SUBTLV_SPB_INSTANCE 1 #define ISIS_SUBTLV_SPBM_SI 3 #define ISIS_SPB_MCID_LEN 51 #define ISIS_SUBTLV_SPB_MCID_MIN_LEN 102 #define ISIS_SUBTLV_SPB_DIGEST_MIN_LEN 33 #define ISIS_SUBTLV_SPB_BVID_MIN_LEN 6 #define ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN 19 #define ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN 8 static const struct tok isis_mt_port_cap_subtlv_values[] = { { ISIS_SUBTLV_SPB_MCID, "SPB MCID" }, { ISIS_SUBTLV_SPB_DIGEST, "SPB Digest" }, { ISIS_SUBTLV_SPB_BVID, "SPB BVID" }, { 0, NULL } }; static const struct tok isis_mt_capability_subtlv_values[] = { { ISIS_SUBTLV_SPB_INSTANCE, "SPB Instance" }, { ISIS_SUBTLV_SPBM_SI, "SPBM Service Identifier and Unicast Address" }, { 0, NULL } }; struct isis_spb_mcid { uint8_t format_id; uint8_t name[32]; uint8_t revision_lvl[2]; uint8_t digest[16]; }; struct isis_subtlv_spb_mcid { struct isis_spb_mcid mcid; struct isis_spb_mcid aux_mcid; }; struct isis_subtlv_spb_instance { uint8_t cist_root_id[8]; uint8_t cist_external_root_path_cost[4]; uint8_t bridge_priority[2]; uint8_t spsourceid[4]; uint8_t no_of_trees; }; #define CLNP_SEGMENT_PART 0x80 #define CLNP_MORE_SEGMENTS 0x40 #define CLNP_REQUEST_ER 0x20 static const struct tok clnp_flag_values[] = { { CLNP_SEGMENT_PART, "Segmentation permitted"}, { CLNP_MORE_SEGMENTS, "more Segments"}, { CLNP_REQUEST_ER, "request Error Report"}, { 0, NULL} }; #define ISIS_MASK_LSP_OL_BIT(x) ((x)&0x4) #define ISIS_MASK_LSP_ISTYPE_BITS(x) ((x)&0x3) #define ISIS_MASK_LSP_PARTITION_BIT(x) ((x)&0x80) #define ISIS_MASK_LSP_ATT_BITS(x) ((x)&0x78) #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) #define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define ISIS_MASK_MTID(x) ((x)&0x0fff) #define ISIS_MASK_MTFLAGS(x) ((x)&0xf000) static const struct tok isis_mt_flag_values[] = { { 0x4000, "ATT bit set"}, { 0x8000, "Overload bit set"}, { 0, NULL} }; #define ISIS_MASK_TLV_EXTD_IP_UPDOWN(x) ((x)&0x80) #define ISIS_MASK_TLV_EXTD_IP_SUBTLV(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_IE(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_SUBTLV(x) ((x)&0x20) #define ISIS_LSP_TLV_METRIC_SUPPORTED(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_IE(x) ((x)&0x40) #define ISIS_LSP_TLV_METRIC_UPDOWN(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_VALUE(x) ((x)&0x3f) #define ISIS_MASK_TLV_SHARED_RISK_GROUP(x) ((x)&0x1) static const struct tok isis_mt_values[] = { { 0, "IPv4 unicast"}, { 1, "In-Band Management"}, { 2, "IPv6 unicast"}, { 3, "Multicast"}, { 4095, "Development, Experimental or Proprietary"}, { 0, NULL } }; static const struct tok isis_iih_circuit_type_values[] = { { 1, "Level 1 only"}, { 2, "Level 2 only"}, { 3, "Level 1, Level 2"}, { 0, NULL} }; #define ISIS_LSP_TYPE_UNUSED0 0 #define ISIS_LSP_TYPE_LEVEL_1 1 #define ISIS_LSP_TYPE_UNUSED2 2 #define ISIS_LSP_TYPE_LEVEL_2 3 static const struct tok isis_lsp_istype_values[] = { { ISIS_LSP_TYPE_UNUSED0, "Unused 0x0 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_1, "L1 IS"}, { ISIS_LSP_TYPE_UNUSED2, "Unused 0x2 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_2, "L2 IS"}, { 0, NULL } }; /* * Katz's point to point adjacency TLV uses codes to tell us the state of * the remote adjacency. Enumerate them. */ #define ISIS_PTP_ADJ_UP 0 #define ISIS_PTP_ADJ_INIT 1 #define ISIS_PTP_ADJ_DOWN 2 static const struct tok isis_ptp_adjancey_values[] = { { ISIS_PTP_ADJ_UP, "Up" }, { ISIS_PTP_ADJ_INIT, "Initializing" }, { ISIS_PTP_ADJ_DOWN, "Down" }, { 0, NULL} }; struct isis_tlv_ptp_adj { uint8_t adjacency_state; uint8_t extd_local_circuit_id[4]; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; uint8_t neighbor_extd_local_circuit_id[4]; }; static void osi_print_cksum(netdissect_options *, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length); static int clnp_print(netdissect_options *, const uint8_t *, u_int); static void esis_print(netdissect_options *, const uint8_t *, u_int); static int isis_print(netdissect_options *, const uint8_t *, u_int); struct isis_metric_block { uint8_t metric_default; uint8_t metric_delay; uint8_t metric_expense; uint8_t metric_error; }; struct isis_tlv_is_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_nodeid[NODE_ID_LEN]; }; struct isis_tlv_es_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; }; struct isis_tlv_ip_reach { struct isis_metric_block isis_metric_block; uint8_t prefix[4]; uint8_t mask[4]; }; static const struct tok isis_is_reach_virtual_values[] = { { 0, "IsNotVirtual"}, { 1, "IsVirtual"}, { 0, NULL } }; static const struct tok isis_restart_flag_values[] = { { 0x1, "Restart Request"}, { 0x2, "Restart Acknowledgement"}, { 0x4, "Suppress adjacency advertisement"}, { 0, NULL } }; struct isis_common_header { uint8_t nlpid; uint8_t fixed_len; uint8_t version; /* Protocol version */ uint8_t id_length; uint8_t pdu_type; /* 3 MSbits are reserved */ uint8_t pdu_version; /* Packet format version */ uint8_t reserved; uint8_t max_area; }; struct isis_iih_lan_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t priority; uint8_t lan_id[NODE_ID_LEN]; }; struct isis_iih_ptp_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t circuit_id; }; struct isis_lsp_header { uint8_t pdu_len[2]; uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; uint8_t typeblock; }; struct isis_csnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; uint8_t start_lsp_id[LSP_ID_LEN]; uint8_t end_lsp_id[LSP_ID_LEN]; }; struct isis_psnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; }; struct isis_tlv_lsp { uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; }; #define ISIS_COMMON_HEADER_SIZE (sizeof(struct isis_common_header)) #define ISIS_IIH_LAN_HEADER_SIZE (sizeof(struct isis_iih_lan_header)) #define ISIS_IIH_PTP_HEADER_SIZE (sizeof(struct isis_iih_ptp_header)) #define ISIS_LSP_HEADER_SIZE (sizeof(struct isis_lsp_header)) #define ISIS_CSNP_HEADER_SIZE (sizeof(struct isis_csnp_header)) #define ISIS_PSNP_HEADER_SIZE (sizeof(struct isis_psnp_header)) void isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length) { if (!ND_TTEST(*p)) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (length > 1) print_unknown_data(ndo, p, "\n\t", length); break; } } #define CLNP_PDU_ER 1 #define CLNP_PDU_DT 28 #define CLNP_PDU_MD 29 #define CLNP_PDU_ERQ 30 #define CLNP_PDU_ERP 31 static const struct tok clnp_pdu_values[] = { { CLNP_PDU_ER, "Error Report"}, { CLNP_PDU_MD, "MD"}, { CLNP_PDU_DT, "Data"}, { CLNP_PDU_ERQ, "Echo Request"}, { CLNP_PDU_ERP, "Echo Response"}, { 0, NULL } }; struct clnp_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t lifetime; /* units of 500ms */ uint8_t type; uint8_t segment_length[2]; uint8_t cksum[2]; }; struct clnp_segment_header_t { uint8_t data_unit_id[2]; uint8_t segment_offset[2]; uint8_t total_length[2]; }; /* * clnp_print * Decode CLNP packets. Return 0 on error. */ static int clnp_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr,*source_address,*dest_address; u_int li,tlen,nsap_offset,source_address_length,dest_address_length, clnp_pdu_type, clnp_flags; const struct clnp_header_t *clnp_header; const struct clnp_segment_header_t *clnp_segment_header; uint8_t rfd_error_major,rfd_error_minor; clnp_header = (const struct clnp_header_t *) pptr; ND_TCHECK(*clnp_header); li = clnp_header->length_indicator; optr = pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "CLNP")); /* * Sanity checking of the header. */ if (clnp_header->version != CLNP_VERSION) { ND_PRINT((ndo, "version %d packet not supported", clnp_header->version)); return (0); } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return (0); } if (li < sizeof(struct clnp_header_t)) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return (0); } /* FIXME further header sanity checking */ clnp_pdu_type = clnp_header->type & CLNP_PDU_TYPE_MASK; clnp_flags = clnp_header->type & CLNP_FLAG_MASK; pptr += sizeof(struct clnp_header_t); li -= sizeof(struct clnp_header_t); if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); dest_address_length = *pptr; pptr += 1; li -= 1; if (li < dest_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, dest_address_length); dest_address = pptr; pptr += dest_address_length; li -= dest_address_length; if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); source_address_length = *pptr; pptr += 1; li -= 1; if (li < source_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, source_address_length); source_address = pptr; pptr += source_address_length; li -= source_address_length; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s > %s, %s, length %u", ndo->ndo_eflag ? "" : ", ", isonsap_string(ndo, source_address, source_address_length), isonsap_string(ndo, dest_address, dest_address_length), tok2str(clnp_pdu_values,"unknown (%u)",clnp_pdu_type), length)); return (1); } ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s PDU, hlen: %u, v: %u, lifetime: %u.%us, Segment PDU length: %u, checksum: 0x%04x", tok2str(clnp_pdu_values, "unknown (%u)",clnp_pdu_type), clnp_header->length_indicator, clnp_header->version, clnp_header->lifetime/2, (clnp_header->lifetime%2)*5, EXTRACT_16BITS(clnp_header->segment_length), EXTRACT_16BITS(clnp_header->cksum))); osi_print_cksum(ndo, optr, EXTRACT_16BITS(clnp_header->cksum), 7, clnp_header->length_indicator); ND_PRINT((ndo, "\n\tFlags [%s]", bittok2str(clnp_flag_values, "none", clnp_flags))); ND_PRINT((ndo, "\n\tsource address (length %u): %s\n\tdest address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length), dest_address_length, isonsap_string(ndo, dest_address, dest_address_length))); if (clnp_flags & CLNP_SEGMENT_PART) { if (li < sizeof(const struct clnp_segment_header_t)) { ND_PRINT((ndo, "li < size of fixed part of CLNP header, addresses, and segment part")); return (0); } clnp_segment_header = (const struct clnp_segment_header_t *) pptr; ND_TCHECK(*clnp_segment_header); ND_PRINT((ndo, "\n\tData Unit ID: 0x%04x, Segment Offset: %u, Total PDU Length: %u", EXTRACT_16BITS(clnp_segment_header->data_unit_id), EXTRACT_16BITS(clnp_segment_header->segment_offset), EXTRACT_16BITS(clnp_segment_header->total_length))); pptr+=sizeof(const struct clnp_segment_header_t); li-=sizeof(const struct clnp_segment_header_t); } /* now walk the options */ while (li >= 2) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return (0); } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return (0); } ND_TCHECK2(*pptr, opli); li -= opli; tptr = pptr; tlen = opli; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(clnp_option_values,"Unknown",op), op, opli)); /* * We've already checked that the entire option is present * in the captured packet with the ND_TCHECK2() call. * Therefore, we don't need to do ND_TCHECK()/ND_TCHECK2() * checks. * We do, however, need to check tlen, to make sure we * don't run past the end of the option. */ switch (op) { case CLNP_OPTION_ROUTE_RECORDING: /* those two options share the format */ case CLNP_OPTION_SOURCE_ROUTING: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "%s %s", tok2str(clnp_option_sr_rr_values,"Unknown",*tptr), tok2str(clnp_option_sr_rr_string_values, "Unknown Option %u", op))); nsap_offset=*(tptr+1); if (nsap_offset == 0) { ND_PRINT((ndo, " Bad NSAP offset (0)")); break; } nsap_offset-=1; /* offset to nsap list */ if (nsap_offset > tlen) { ND_PRINT((ndo, " Bad NSAP offset (past end of option)")); break; } tptr+=nsap_offset; tlen-=nsap_offset; while (tlen > 0) { source_address_length=*tptr; if (tlen < source_address_length+1) { ND_PRINT((ndo, "\n\t NSAP address goes past end of option")); break; } if (source_address_length > 0) { source_address=(tptr+1); ND_TCHECK2(*source_address, source_address_length); ND_PRINT((ndo, "\n\t NSAP address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length))); } tlen-=source_address_length+1; } break; case CLNP_OPTION_PRIORITY: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "0x%1x", *tptr&0x0f)); break; case CLNP_OPTION_QOS_MAINTENANCE: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s", tok2str(clnp_option_scope_values, "Reserved", *tptr&CLNP_OPTION_SCOPE_MASK))); if ((*tptr&CLNP_OPTION_SCOPE_MASK) == CLNP_OPTION_SCOPE_GLOBAL) ND_PRINT((ndo, "\n\t QoS Flags [%s]", bittok2str(clnp_option_qos_global_values, "none", *tptr&CLNP_OPTION_OPTION_QOS_MASK))); break; case CLNP_OPTION_SECURITY: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s, Security-Level %u", tok2str(clnp_option_scope_values,"Reserved",*tptr&CLNP_OPTION_SCOPE_MASK), *(tptr+1))); break; case CLNP_OPTION_DISCARD_REASON: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } rfd_error_major = (*tptr&0xf0) >> 4; rfd_error_minor = *tptr&0x0f; ND_PRINT((ndo, "\n\t Class: %s Error (0x%01x), %s (0x%01x)", tok2str(clnp_option_rfd_class_values,"Unknown",rfd_error_major), rfd_error_major, tok2str(clnp_option_rfd_error_class[rfd_error_major],"Unknown",rfd_error_minor), rfd_error_minor)); break; case CLNP_OPTION_PADDING: ND_PRINT((ndo, "padding data")); break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } switch (clnp_pdu_type) { case CLNP_PDU_ER: /* fall through */ case CLNP_PDU_ERP: ND_TCHECK(*pptr); if (*(pptr) == NLPID_CLNP) { ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); /* FIXME recursion protection */ clnp_print(ndo, pptr, length - clnp_header->length_indicator); break; } case CLNP_PDU_DT: case CLNP_PDU_MD: case CLNP_PDU_ERQ: default: /* dump the PDU specific data */ if (length-(pptr-optr) > 0) { ND_PRINT((ndo, "\n\t undecoded non-header data, length %u", length-clnp_header->length_indicator)); print_unknown_data(ndo, pptr, "\n\t ", length - (pptr - optr)); } } return (1); trunc: ND_PRINT((ndo, "[|clnp]")); return (1); } #define ESIS_PDU_REDIRECT 6 #define ESIS_PDU_ESH 2 #define ESIS_PDU_ISH 4 static const struct tok esis_pdu_values[] = { { ESIS_PDU_REDIRECT, "redirect"}, { ESIS_PDU_ESH, "ESH"}, { ESIS_PDU_ISH, "ISH"}, { 0, NULL } }; struct esis_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t reserved; uint8_t type; uint8_t holdtime[2]; uint8_t cksum[2]; }; static void esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (netal == 0) ND_PRINT((ndo, "\n\t %s", etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; } static void isis_print_mcid(netdissect_options *ndo, const struct isis_spb_mcid *mcid) { int i; ND_TCHECK(*mcid); ND_PRINT((ndo, "ID: %d, Name: ", mcid->format_id)); if (fn_printzp(ndo, mcid->name, 32, ndo->ndo_snapend)) goto trunc; ND_PRINT((ndo, "\n\t Lvl: %d", EXTRACT_16BITS(mcid->revision_lvl))); ND_PRINT((ndo, ", Digest: ")); for(i=0;i<16;i++) ND_PRINT((ndo, "%.2x ", mcid->digest[i])); trunc: ND_PRINT((ndo, "%s", tstr)); } static int isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_MCID_MIN_LEN); subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + sizeof(struct isis_subtlv_spb_mcid); len = len - sizeof(struct isis_subtlv_spb_mcid); break; } case ISIS_SUBTLV_SPB_DIGEST: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_DIGEST_MIN_LEN); ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { ND_TCHECK2(*(tptr), stlv_len); while (len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_BVID_MIN_LEN); ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static int isis_print_mt_capability_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len, tmp; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); len = len - 2; switch (stlv_type) { case ISIS_SUBTLV_SPB_INSTANCE: ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN); ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr))); tptr = tptr + 2; ND_PRINT((ndo, "\n\t RES: %d", EXTRACT_16BITS(tptr) >> 5)); ND_PRINT((ndo, ", V: %d", (EXTRACT_16BITS(tptr) >> 4) & 0x0001)); ND_PRINT((ndo, ", SPSource-ID: %d", (EXTRACT_32BITS(tptr) & 0x000fffff))); tptr = tptr+4; ND_PRINT((ndo, ", No of Trees: %x", *(tptr))); tmp = *(tptr++); len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN; while (tmp) { ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN); ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d", *(tptr) >> 7, (*(tptr) >> 6) & 0x01, (*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f))); tptr++; ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr + 4; ND_PRINT((ndo, ", BVID: %d, SPVID: %d", (EXTRACT_24BITS(tptr) >> 12) & 0x000fff, EXTRACT_24BITS(tptr) & 0x000fff)); tptr = tptr + 3; len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN; tmp--; } break; case ISIS_SUBTLV_SPBM_SI: ND_TCHECK2(*tptr, 8); ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr))); tptr = tptr+2; ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12, (EXTRACT_16BITS(tptr)) & 0x0fff)); tptr = tptr+2; len = len - 8; stlv_len = stlv_len - 8; while (stlv_len >= 4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d", (EXTRACT_32BITS(tptr) >> 31), (EXTRACT_32BITS(tptr) >> 30) & 0x01, (EXTRACT_32BITS(tptr) >> 24) & 0x03f, (EXTRACT_32BITS(tptr)) & 0x0ffffff)); tptr = tptr + 4; len = len - 4; stlv_len = stlv_len - 4; } break; default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } /* shared routine for printing system, node and lsp-ids */ static char * isis_print_id(const uint8_t *cp, int id_len) { int i; static char id[sizeof("xxxx.xxxx.xxxx.yy-zz")]; char *pos = id; for (i = 1; i <= SYSTEM_ID_LEN; i++) { snprintf(pos, sizeof(id) - (pos - id), "%02x", *cp++); pos += strlen(pos); if (i == 2 || i == 4) *pos++ = '.'; } if (id_len >= NODE_ID_LEN) { snprintf(pos, sizeof(id) - (pos - id), ".%02x", *cp++); pos += strlen(pos); } if (id_len == LSP_ID_LEN) snprintf(pos, sizeof(id) - (pos - id), "-%02x", *cp); return (id); } /* print the 4-byte metric block which is common found in the old-style TLVs */ static int isis_print_metric_block(netdissect_options *ndo, const struct isis_metric_block *isis_metric_block) { ND_PRINT((ndo, ", Default Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_default), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_delay)) ND_PRINT((ndo, "\n\t\t Delay Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_delay), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_expense)) ND_PRINT((ndo, "\n\t\t Expense Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_expense), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_error)) ND_PRINT((ndo, "\n\t\t Error Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_error), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_error) ? "External" : "Internal")); return(1); /* everything is ok */ } static int isis_print_tlv_ip_reach(netdissect_options *ndo, const uint8_t *cp, const char *ident, int length) { int prefix_len; const struct isis_tlv_ip_reach *tlv_ip_reach; tlv_ip_reach = (const struct isis_tlv_ip_reach *)cp; while (length > 0) { if ((size_t)length < sizeof(*tlv_ip_reach)) { ND_PRINT((ndo, "short IPv4 Reachability (%d vs %lu)", length, (unsigned long)sizeof(*tlv_ip_reach))); return (0); } if (!ND_TTEST(*tlv_ip_reach)) return (0); prefix_len = mask2plen(EXTRACT_32BITS(tlv_ip_reach->mask)); if (prefix_len == -1) ND_PRINT((ndo, "%sIPv4 prefix: %s mask %s", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), ipaddr_string(ndo, (tlv_ip_reach->mask)))); else ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), prefix_len)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u, %s", ISIS_LSP_TLV_METRIC_UPDOWN(tlv_ip_reach->isis_metric_block.metric_default) ? "down" : "up", ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_default), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_delay)) ND_PRINT((ndo, "%s Delay Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_delay), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_expense)) ND_PRINT((ndo, "%s Expense Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_expense), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_error)) ND_PRINT((ndo, "%s Error Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_error), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_error) ? "External" : "Internal")); length -= sizeof(struct isis_tlv_ip_reach); tlv_ip_reach++; } return (1); } /* * this is the common IP-REACH subTLV decoder it is called * from various EXTD-IP REACH TLVs (135,235,236,237) */ static int isis_print_ip_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, int subt, int subl, const char *ident) { /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_ip_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr,subl); switch(subt) { case ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR: /* fall through */ case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32: while (subl >= 4) { ND_PRINT((ndo, ", 0x%08x (=%u)", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr))); tptr+=4; subl-=4; } break; case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64: while (subl >= 8) { ND_PRINT((ndo, ", 0x%08x%08x", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr+4))); tptr+=8; subl-=8; } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: ND_PRINT((ndo, "%s", ident)); ND_PRINT((ndo, "%s", tstr)); return(0); } /* * this is the common IS-REACH subTLV decoder it is called * from isis_print_ext_is_reach() */ static int isis_print_is_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, u_int subt, u_int subl, const char *ident) { u_int te_class,priority_level,gmpls_switch_cap; union { /* int to float conversion buffer for several subTLVs */ float f; uint32_t i; } bw; /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr, subl); switch(subt) { case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP: case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID: case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID: if (subl >= 4) { ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr))); if (subl == 8) /* rfc4205 */ ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4))); } break; case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR: case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR: if (subl >= sizeof(struct in_addr)) ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW : case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW: if (subl >= 4) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000)); } break; case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW : if (subl >= 32) { for (te_class = 0; te_class < 8; te_class++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } } break; case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */ case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD: ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)", ident, tok2str(diffserv_te_bc_values, "unknown", *tptr), *tptr)); tptr++; /* decode BCs until the subTLV ends */ for (te_class = 0; te_class < (subl-1)/4; te_class++) { ND_TCHECK2(*tptr, 4); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } break; case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC: if (subl >= 3) ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE: if (subl == 2) { ND_PRINT((ndo, ", [ %s ] (0x%04x)", bittok2str(isis_subtlv_link_attribute_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE: if (subl >= 2) { ND_PRINT((ndo, ", %s, Priority %u", bittok2str(gmpls_link_prot_values, "none", *tptr), *(tptr+1))); } break; case ISIS_SUBTLV_SPB_METRIC: if (subl >= 6) { ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr))); tptr=tptr+3; ND_PRINT((ndo, ", P: %u", *(tptr))); tptr++; ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR: if (subl >= 36) { gmpls_switch_cap = *tptr; ND_PRINT((ndo, "%s Interface Switching Capability:%s", ident, tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap))); ND_PRINT((ndo, ", LSP Encoding: %s", tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1)))); tptr+=4; ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident)); for (priority_level = 0; priority_level < 8; priority_level++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s priority level %d: %.3f Mbps", ident, priority_level, bw.f * 8 / 1000000)); tptr+=4; } subl-=36; switch (gmpls_switch_cap) { case GMPLS_PSC1: case GMPLS_PSC2: case GMPLS_PSC3: case GMPLS_PSC4: ND_TCHECK2(*tptr, 6); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4))); break; case GMPLS_TSC: ND_TCHECK2(*tptr, 8); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Indication %s", ident, tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4)))); break; default: /* there is some optional stuff left to decode but this is as of yet not specified so just lets hexdump what is left */ if(subl>0){ if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); } } } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: return(0); } /* * this is the common IS-REACH decoder it is called * from various EXTD-IS REACH style TLVs (22,24,222) */ static int isis_print_ext_is_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, int tlv_type) { char ident_buffer[20]; int subtlv_type,subtlv_len,subtlv_sum_len; int proc_bytes = 0; /* how many bytes did we process ? */ if (!ND_TTEST2(*tptr, NODE_ID_LEN)) return(0); ND_PRINT((ndo, "%sIS Neighbor: %s", ident, isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); if (tlv_type != ISIS_TLV_IS_ALIAS_ID) { /* the Alias TLV Metric field is implicit 0 */ if (!ND_TTEST2(*tptr, 3)) /* and is therefore skipped */ return(0); ND_PRINT((ndo, ", Metric: %d", EXTRACT_24BITS(tptr))); tptr+=3; } if (!ND_TTEST2(*tptr, 1)) return(0); subtlv_sum_len=*(tptr++); /* read out subTLV length */ proc_bytes=NODE_ID_LEN+3+1; ND_PRINT((ndo, ", %ssub-TLVs present",subtlv_sum_len ? "" : "no ")); if (subtlv_sum_len) { ND_PRINT((ndo, " (%u)", subtlv_sum_len)); while (subtlv_sum_len>0) { if (!ND_TTEST2(*tptr,2)) return(0); subtlv_type=*(tptr++); subtlv_len=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_is_reach_subtlv(ndo, tptr, subtlv_type, subtlv_len, ident_buffer)) return(0); tptr+=subtlv_len; subtlv_sum_len-=(subtlv_len+2); proc_bytes+=(subtlv_len+2); } } return(proc_bytes); } /* * this is the common Multi Topology ID decoder * it is called from various MT-TLVs (222,229,235,237) */ static int isis_print_mtid(netdissect_options *ndo, const uint8_t *tptr, const char *ident) { if (!ND_TTEST2(*tptr, 2)) return(0); ND_PRINT((ndo, "%s%s", ident, tok2str(isis_mt_values, "Reserved for IETF Consensus", ISIS_MASK_MTID(EXTRACT_16BITS(tptr))))); ND_PRINT((ndo, " Topology (0x%03x), Flags: [%s]", ISIS_MASK_MTID(EXTRACT_16BITS(tptr)), bittok2str(isis_mt_flag_values, "none",ISIS_MASK_MTFLAGS(EXTRACT_16BITS(tptr))))); return(2); } /* * this is the common extended IP reach decoder * it is called from TLVs (135,235,236,237) * we process the TLV and optional subTLVs and return * the amount of processed bytes */ static int isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); } /* * Clear checksum and lifetime prior to signature verification. */ static void isis_clear_checksum_lifetime(void *header) { struct isis_lsp_header *header_lsp = (struct isis_lsp_header *) header; header_lsp->checksum[0] = 0; header_lsp->checksum[1] = 0; header_lsp->remaining_lifetime[0] = 0; header_lsp->remaining_lifetime[1] = 0; } /* * isis_print * Decode IS-IS packets. Return 0 on error. */ static int isis_print(netdissect_options *ndo, const uint8_t *p, u_int length) { const struct isis_common_header *isis_header; const struct isis_iih_lan_header *header_iih_lan; const struct isis_iih_ptp_header *header_iih_ptp; const struct isis_lsp_header *header_lsp; const struct isis_csnp_header *header_csnp; const struct isis_psnp_header *header_psnp; const struct isis_tlv_lsp *tlv_lsp; const struct isis_tlv_ptp_adj *tlv_ptp_adj; const struct isis_tlv_is_reach *tlv_is_reach; const struct isis_tlv_es_reach *tlv_es_reach; uint8_t pdu_type, max_area, id_length, tlv_type, tlv_len, tmp, alen, lan_alen, prefix_len; uint8_t ext_is_len, ext_ip_len, mt_len; const uint8_t *optr, *pptr, *tptr; u_short packet_len,pdu_len, key_id; u_int i,vendor_id; int sigcheck; packet_len=length; optr = p; /* initialize the _o_riginal pointer to the packet start - need it for parsing the checksum TLV and authentication TLV verification */ isis_header = (const struct isis_common_header *)p; ND_TCHECK(*isis_header); if (length < ISIS_COMMON_HEADER_SIZE) goto trunc; pptr = p+(ISIS_COMMON_HEADER_SIZE); header_iih_lan = (const struct isis_iih_lan_header *)pptr; header_iih_ptp = (const struct isis_iih_ptp_header *)pptr; header_lsp = (const struct isis_lsp_header *)pptr; header_csnp = (const struct isis_csnp_header *)pptr; header_psnp = (const struct isis_psnp_header *)pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "IS-IS")); /* * Sanity checking of the header. */ if (isis_header->version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->version)); return (0); } if ((isis_header->id_length != SYSTEM_ID_LEN) && (isis_header->id_length != 0)) { ND_PRINT((ndo, "system ID length of %d is not supported", isis_header->id_length)); return (0); } if (isis_header->pdu_version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->pdu_version)); return (0); } if (length < isis_header->fixed_len) { ND_PRINT((ndo, "fixed header length %u > packet length %u", isis_header->fixed_len, length)); return (0); } if (isis_header->fixed_len < ISIS_COMMON_HEADER_SIZE) { ND_PRINT((ndo, "fixed header length %u < minimum header size %u", isis_header->fixed_len, (u_int)ISIS_COMMON_HEADER_SIZE)); return (0); } max_area = isis_header->max_area; switch(max_area) { case 0: max_area = 3; /* silly shit */ break; case 255: ND_PRINT((ndo, "bad packet -- 255 areas")); return (0); default: break; } id_length = isis_header->id_length; switch(id_length) { case 0: id_length = 6; /* silly shit again */ break; case 1: /* 1-8 are valid sys-ID lenghts */ case 2: case 3: case 4: case 5: case 6: case 7: case 8: break; case 255: id_length = 0; /* entirely useless */ break; default: break; } /* toss any non 6-byte sys-ID len PDUs */ if (id_length != 6 ) { ND_PRINT((ndo, "bad packet -- illegal sys-ID length (%u)", id_length)); return (0); } pdu_type=isis_header->pdu_type; /* in non-verbose mode print the basic PDU Type plus PDU specific brief information*/ if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, "%s%s", ndo->ndo_eflag ? "" : ", ", tok2str(isis_pdu_values, "unknown PDU-Type %u", pdu_type))); } else { /* ok they seem to want to know everything - lets fully decode it */ ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s, hlen: %u, v: %u, pdu-v: %u, sys-id-len: %u (%u), max-area: %u (%u)", tok2str(isis_pdu_values, "unknown, type %u", pdu_type), isis_header->fixed_len, isis_header->version, isis_header->pdu_version, id_length, isis_header->id_length, max_area, isis_header->max_area)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, optr, "\n\t", 8)) /* provide the _o_riginal pointer */ return (0); /* for optionally debugging the common header */ } } switch (pdu_type) { case ISIS_PDU_L1_LAN_IIH: case ISIS_PDU_L2_LAN_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_lan); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_lan->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", lan-id %s, prio %u", isis_print_id(header_iih_lan->lan_id,NODE_ID_LEN), header_iih_lan->priority)); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_lan->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_lan->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_lan->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_lan->circuit_type))); ND_PRINT((ndo, "\n\t lan-id: %s, Priority: %u, PDU length: %u", isis_print_id(header_iih_lan->lan_id, NODE_ID_LEN), (header_iih_lan->priority) & ISIS_LAN_PRIORITY_MASK, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_LAN_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); break; case ISIS_PDU_PTP_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_ptp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_ptp->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_ptp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_ptp->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_ptp->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_ptp->circuit_type))); ND_PRINT((ndo, "\n\t circuit-id: 0x%02x, PDU length: %u", header_iih_ptp->circuit_id, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_PTP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); break; case ISIS_PDU_L1_LSP: case ISIS_PDU_L2_LSP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)ISIS_LSP_HEADER_SIZE)); return (0); } ND_TCHECK(*header_lsp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", lsp-id %s, seq 0x%08x, lifetime %5us", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_lsp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t lsp-id: %s, seq: 0x%08x, lifetime: %5us\n\t chksum: 0x%04x", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime), EXTRACT_16BITS(header_lsp->checksum))); osi_print_cksum(ndo, (const uint8_t *)header_lsp->lsp_id, EXTRACT_16BITS(header_lsp->checksum), 12, length-12); ND_PRINT((ndo, ", PDU length: %u, Flags: [ %s", pdu_len, ISIS_MASK_LSP_OL_BIT(header_lsp->typeblock) ? "Overload bit set, " : "")); if (ISIS_MASK_LSP_ATT_BITS(header_lsp->typeblock)) { ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DEFAULT_BIT(header_lsp->typeblock) ? "default " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DELAY_BIT(header_lsp->typeblock) ? "delay " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_EXPENSE_BIT(header_lsp->typeblock) ? "expense " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_ERROR_BIT(header_lsp->typeblock) ? "error " : "")); ND_PRINT((ndo, "ATT bit set, ")); } ND_PRINT((ndo, "%s", ISIS_MASK_LSP_PARTITION_BIT(header_lsp->typeblock) ? "P bit set, " : "")); ND_PRINT((ndo, "%s ]", tok2str(isis_lsp_istype_values, "Unknown(0x%x)", ISIS_MASK_LSP_ISTYPE_BITS(header_lsp->typeblock)))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_LSP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); break; case ISIS_PDU_L1_CSNP: case ISIS_PDU_L2_CSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_csnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_csnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_csnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_csnp->source_id, NODE_ID_LEN), pdu_len)); ND_PRINT((ndo, "\n\t start lsp-id: %s", isis_print_id(header_csnp->start_lsp_id, LSP_ID_LEN))); ND_PRINT((ndo, "\n\t end lsp-id: %s", isis_print_id(header_csnp->end_lsp_id, LSP_ID_LEN))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_CSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); break; case ISIS_PDU_L1_PSNP: case ISIS_PDU_L2_PSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE)) { ND_PRINT((ndo, "- bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_psnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_psnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_psnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_psnp->source_id, NODE_ID_LEN), pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_PSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); break; default: if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", length %u", length)); return (1); } (void)print_unknown_data(ndo, pptr, "\n\t ", length); return (0); } /* * Now print the TLV's. */ while (packet_len > 0) { ND_TCHECK2(*pptr, 2); if (packet_len < 2) goto trunc; tlv_type = *pptr++; tlv_len = *pptr++; tmp =tlv_len; /* copy temporary len & pointer to packet data */ tptr = pptr; packet_len -= 2; /* first lets see if we know the TLVs name*/ ND_PRINT((ndo, "\n\t %s TLV #%u, length: %u", tok2str(isis_tlv_values, "unknown", tlv_type), tlv_type, tlv_len)); if (tlv_len == 0) /* something is invalid */ continue; if (packet_len < tlv_len) goto trunc; /* now check if we have a decoder otherwise do a hexdump at the end*/ switch (tlv_type) { case ISIS_TLV_AREA_ADDR: ND_TCHECK2(*tptr, 1); alen = *tptr++; while (tmp && alen < tmp) { ND_TCHECK2(*tptr, alen); ND_PRINT((ndo, "\n\t Area address (length: %u): %s", alen, isonsap_string(ndo, tptr, alen))); tptr += alen; tmp -= alen + 1; if (tmp==0) /* if this is the last area address do not attemt a boundary check */ break; ND_TCHECK2(*tptr, 1); alen = *tptr++; } break; case ISIS_TLV_ISNEIGH: while (tmp >= ETHER_ADDR_LEN) { ND_TCHECK2(*tptr, ETHER_ADDR_LEN); ND_PRINT((ndo, "\n\t SNPA: %s", isis_print_id(tptr, ETHER_ADDR_LEN))); tmp -= ETHER_ADDR_LEN; tptr += ETHER_ADDR_LEN; } break; case ISIS_TLV_ISNEIGH_VARLEN: if (!ND_TTEST2(*tptr, 1) || tmp < 3) /* min. TLV length */ goto trunctlv; lan_alen = *tptr++; /* LAN address length */ if (lan_alen == 0) { ND_PRINT((ndo, "\n\t LAN address length 0 bytes (invalid)")); break; } tmp --; ND_PRINT((ndo, "\n\t LAN address length %u bytes ", lan_alen)); while (tmp >= lan_alen) { ND_TCHECK2(*tptr, lan_alen); ND_PRINT((ndo, "\n\t\tIS Neighbor: %s", isis_print_id(tptr, lan_alen))); tmp -= lan_alen; tptr +=lan_alen; } break; case ISIS_TLV_PADDING: break; case ISIS_TLV_MT_IS_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; while (tmp >= 2+NODE_ID_LEN+3+1) { ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_ALIAS_ID: while (tmp >= NODE_ID_LEN+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_EXT_IS_REACH: while (tmp >= NODE_ID_LEN+3+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_REACH: ND_TCHECK2(*tptr,1); /* check if there is one byte left to read out the virtual flag */ ND_PRINT((ndo, "\n\t %s", tok2str(isis_is_reach_virtual_values, "bogus virtual flag 0x%02x", *tptr++))); tlv_is_reach = (const struct isis_tlv_is_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_is_reach)) { ND_TCHECK(*tlv_is_reach); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tlv_is_reach->neighbor_nodeid, NODE_ID_LEN))); isis_print_metric_block(ndo, &tlv_is_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_is_reach); tlv_is_reach++; } break; case ISIS_TLV_ESNEIGH: tlv_es_reach = (const struct isis_tlv_es_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_es_reach)) { ND_TCHECK(*tlv_es_reach); ND_PRINT((ndo, "\n\t ES Neighbor: %s", isis_print_id(tlv_es_reach->neighbor_sysid, SYSTEM_ID_LEN))); isis_print_metric_block(ndo, &tlv_es_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_es_reach); tlv_es_reach++; } break; /* those two TLVs share the same format */ case ISIS_TLV_INT_IP_REACH: case ISIS_TLV_EXT_IP_REACH: if (!isis_print_tlv_ip_reach(ndo, pptr, "\n\t ", tlv_len)) return (1); break; case ISIS_TLV_EXTD_IP_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP6_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6ADDR: while (tmp>=sizeof(struct in6_addr)) { ND_TCHECK2(*tptr, sizeof(struct in6_addr)); ND_PRINT((ndo, "\n\t IPv6 interface address: %s", ip6addr_string(ndo, tptr))); tptr += sizeof(struct in6_addr); tmp -= sizeof(struct in6_addr); } break; case ISIS_TLV_AUTH: ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t %s: ", tok2str(isis_subtlv_auth_values, "unknown Authentication type 0x%02x", *tptr))); switch (*tptr) { case ISIS_SUBTLV_AUTH_SIMPLE: if (fn_printzp(ndo, tptr + 1, tlv_len - 1, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_SUBTLV_AUTH_MD5: for(i=1;i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } if (tlv_len != ISIS_SUBTLV_AUTH_MD5_LEN+1) ND_PRINT((ndo, ", (invalid subTLV) ")); sigcheck = signature_verify(ndo, optr, length, tptr + 1, isis_clear_checksum_lifetime, header_lsp); ND_PRINT((ndo, " (%s)", tok2str(signature_check_values, "Unknown", sigcheck))); break; case ISIS_SUBTLV_AUTH_GENERIC: ND_TCHECK2(*(tptr + 1), 2); key_id = EXTRACT_16BITS((tptr+1)); ND_PRINT((ndo, "%u, password: ", key_id)); for(i=1 + sizeof(uint16_t);i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } break; case ISIS_SUBTLV_AUTH_PRIVATE: default: if (!print_unknown_data(ndo, tptr + 1, "\n\t\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_PTP_ADJ: tlv_ptp_adj = (const struct isis_tlv_ptp_adj *)tptr; if(tmp>=1) { ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t Adjacency State: %s (%u)", tok2str(isis_ptp_adjancey_values, "unknown", *tptr), *tptr)); tmp--; } if(tmp>sizeof(tlv_ptp_adj->extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->extd_local_circuit_id); ND_PRINT((ndo, "\n\t Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->extd_local_circuit_id))); tmp-=sizeof(tlv_ptp_adj->extd_local_circuit_id); } if(tmp>=SYSTEM_ID_LEN) { ND_TCHECK2(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t Neighbor System-ID: %s", isis_print_id(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN))); tmp-=SYSTEM_ID_LEN; } if(tmp>=sizeof(tlv_ptp_adj->neighbor_extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->neighbor_extd_local_circuit_id); ND_PRINT((ndo, "\n\t Neighbor Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->neighbor_extd_local_circuit_id))); } break; case ISIS_TLV_PROTOCOLS: ND_PRINT((ndo, "\n\t NLPID(s): ")); while (tmp>0) { ND_TCHECK2(*(tptr), 1); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (tmp>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; tmp--; } break; case ISIS_TLV_MT_PORT_CAP: { ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t RES: %d, MTID(s): %d", (EXTRACT_16BITS (tptr) >> 12), (EXTRACT_16BITS (tptr) & 0x0fff))); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_port_cap_subtlv(ndo, tptr, tmp); break; } case ISIS_TLV_MT_CAPABILITY: ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t O: %d, RES: %d, MTID(s): %d", (EXTRACT_16BITS(tptr) >> 15) & 0x01, (EXTRACT_16BITS(tptr) >> 12) & 0x07, EXTRACT_16BITS(tptr) & 0x0fff)); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_capability_subtlv(ndo, tptr, tmp); break; case ISIS_TLV_TE_ROUTER_ID: ND_TCHECK2(*pptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t Traffic Engineering Router ID: %s", ipaddr_string(ndo, pptr))); break; case ISIS_TLV_IPADDR: while (tmp>=sizeof(struct in_addr)) { ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr += sizeof(struct in_addr); tmp -= sizeof(struct in_addr); } break; case ISIS_TLV_HOSTNAME: ND_PRINT((ndo, "\n\t Hostname: ")); if (fn_printzp(ndo, tptr, tmp, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_TLV_SHARED_RISK_GROUP: if (tmp < NODE_ID_LEN) break; ND_TCHECK2(*tptr, NODE_ID_LEN); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); tmp-=(NODE_ID_LEN); if (tmp < 1) break; ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, ", Flags: [%s]", ISIS_MASK_TLV_SHARED_RISK_GROUP(*tptr++) ? "numbered" : "unnumbered")); tmp--; if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 neighbor address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); while (tmp>=4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Link-ID: 0x%08x", EXTRACT_32BITS(tptr))); tptr+=4; tmp-=4; } break; case ISIS_TLV_LSP: tlv_lsp = (const struct isis_tlv_lsp *)tptr; while(tmp>=sizeof(struct isis_tlv_lsp)) { ND_TCHECK((tlv_lsp->lsp_id)[LSP_ID_LEN-1]); ND_PRINT((ndo, "\n\t lsp-id: %s", isis_print_id(tlv_lsp->lsp_id, LSP_ID_LEN))); ND_TCHECK2(tlv_lsp->sequence_number, 4); ND_PRINT((ndo, ", seq: 0x%08x", EXTRACT_32BITS(tlv_lsp->sequence_number))); ND_TCHECK2(tlv_lsp->remaining_lifetime, 2); ND_PRINT((ndo, ", lifetime: %5ds", EXTRACT_16BITS(tlv_lsp->remaining_lifetime))); ND_TCHECK2(tlv_lsp->checksum, 2); ND_PRINT((ndo, ", chksum: 0x%04x", EXTRACT_16BITS(tlv_lsp->checksum))); tmp-=sizeof(struct isis_tlv_lsp); tlv_lsp++; } break; case ISIS_TLV_CHECKSUM: if (tmp < ISIS_TLV_CHECKSUM_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_CHECKSUM_MINLEN); ND_PRINT((ndo, "\n\t checksum: 0x%04x ", EXTRACT_16BITS(tptr))); /* do not attempt to verify the checksum if it is zero * most likely a HMAC-MD5 TLV is also present and * to avoid conflicts the checksum TLV is zeroed. * see rfc3358 for details */ osi_print_cksum(ndo, optr, EXTRACT_16BITS(tptr), tptr-optr, length); break; case ISIS_TLV_POI: if (tlv_len >= SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Purge Originator System-ID: %s", isis_print_id(tptr + 1, SYSTEM_ID_LEN))); } if (tlv_len == 2 * SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, 2 * SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Received from System-ID: %s", isis_print_id(tptr + SYSTEM_ID_LEN + 1, SYSTEM_ID_LEN))); } break; case ISIS_TLV_MT_SUPPORTED: if (tmp < ISIS_TLV_MT_SUPPORTED_MINLEN) break; while (tmp>1) { /* length can only be a multiple of 2, otherwise there is something broken -> so decode down until length is 1 */ if (tmp!=1) { mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; } else { ND_PRINT((ndo, "\n\t invalid MT-ID")); break; } } break; case ISIS_TLV_RESTART_SIGNALING: /* first attempt to decode the flags */ if (tmp < ISIS_TLV_RESTART_SIGNALING_FLAGLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_FLAGLEN); ND_PRINT((ndo, "\n\t Flags [%s]", bittok2str(isis_restart_flag_values, "none", *tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; tmp-=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; /* is there anything other than the flags field? */ if (tmp == 0) break; if (tmp < ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN); ND_PRINT((ndo, ", Remaining holding time %us", EXTRACT_16BITS(tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; tmp-=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; /* is there an additional sysid field present ?*/ if (tmp == SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, ", for %s", isis_print_id(tptr,SYSTEM_ID_LEN))); } break; case ISIS_TLV_IDRP_INFO: if (tmp < ISIS_TLV_IDRP_INFO_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IDRP_INFO_MINLEN); ND_PRINT((ndo, "\n\t Inter-Domain Information Type: %s", tok2str(isis_subtlv_idrp_values, "Unknown (0x%02x)", *tptr))); switch (*tptr++) { case ISIS_SUBTLV_IDRP_ASN: ND_TCHECK2(*tptr, 2); /* fetch AS number */ ND_PRINT((ndo, "AS Number: %u", EXTRACT_16BITS(tptr))); break; case ISIS_SUBTLV_IDRP_LOCAL: case ISIS_SUBTLV_IDRP_RES: default: if (!print_unknown_data(ndo, tptr, "\n\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_LSP_BUFFERSIZE: if (tmp < ISIS_TLV_LSP_BUFFERSIZE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_LSP_BUFFERSIZE_MINLEN); ND_PRINT((ndo, "\n\t LSP Buffersize: %u", EXTRACT_16BITS(tptr))); break; case ISIS_TLV_PART_DIS: while (tmp >= SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t %s", isis_print_id(tptr, SYSTEM_ID_LEN))); tptr+=SYSTEM_ID_LEN; tmp-=SYSTEM_ID_LEN; } break; case ISIS_TLV_PREFIX_NEIGH: if (tmp < sizeof(struct isis_metric_block)) break; ND_TCHECK2(*tptr, sizeof(struct isis_metric_block)); ND_PRINT((ndo, "\n\t Metric Block")); isis_print_metric_block(ndo, (const struct isis_metric_block *)tptr); tptr+=sizeof(struct isis_metric_block); tmp-=sizeof(struct isis_metric_block); while(tmp>0) { ND_TCHECK2(*tptr, 1); prefix_len=*tptr++; /* read out prefix length in semioctets*/ if (prefix_len < 2) { ND_PRINT((ndo, "\n\t\tAddress: prefix length %u < 2", prefix_len)); break; } tmp--; if (tmp < prefix_len/2) break; ND_TCHECK2(*tptr, prefix_len / 2); ND_PRINT((ndo, "\n\t\tAddress: %s/%u", isonsap_string(ndo, tptr, prefix_len / 2), prefix_len * 4)); tptr+=prefix_len/2; tmp-=prefix_len/2; } break; case ISIS_TLV_IIH_SEQNR: if (tmp < ISIS_TLV_IIH_SEQNR_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IIH_SEQNR_MINLEN); /* check if four bytes are on the wire */ ND_PRINT((ndo, "\n\t Sequence number: %u", EXTRACT_32BITS(tptr))); break; case ISIS_TLV_VENDOR_PRIVATE: if (tmp < ISIS_TLV_VENDOR_PRIVATE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_VENDOR_PRIVATE_MINLEN); /* check if enough byte for a full oui */ vendor_id = EXTRACT_24BITS(tptr); ND_PRINT((ndo, "\n\t Vendor: %s (%u)", tok2str(oui_values, "Unknown", vendor_id), vendor_id)); tptr+=3; tmp-=3; if (tmp > 0) /* hexdump the rest */ if (!print_unknown_data(ndo, tptr, "\n\t\t", tmp)) return(0); break; /* * FIXME those are the defined TLVs that lack a decoder * you are welcome to contribute code ;-) */ case ISIS_TLV_DECNET_PHASE4: case ISIS_TLV_LUCENT_PRIVATE: case ISIS_TLV_IPAUTH: case ISIS_TLV_NORTEL_PRIVATE1: case ISIS_TLV_NORTEL_PRIVATE2: default: if (ndo->ndo_vflag <= 1) { if (!print_unknown_data(ndo, pptr, "\n\t\t", tlv_len)) return(0); } break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", tlv_len)) return(0); } pptr += tlv_len; packet_len -= tlv_len; } if (packet_len != 0) { ND_PRINT((ndo, "\n\t %u straggler bytes", packet_len)); } return (1); trunc: ND_PRINT((ndo, "%s", tstr)); return (1); trunctlv: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static void osi_print_cksum(netdissect_options *ndo, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length) { uint16_t calculated_checksum; /* do not attempt to verify the checksum if it is zero, * if the offset is nonsense, * or the base pointer is not sane */ if (!checksum || checksum_offset < 0 || !ND_TTEST2(*(pptr + checksum_offset), 2) || (u_int)checksum_offset > length || !ND_TTEST2(*pptr, length)) { ND_PRINT((ndo, " (unverified)")); } else { #if 0 printf("\nosi_print_cksum: %p %u %u\n", pptr, checksum_offset, length); #endif calculated_checksum = create_osi_cksum(pptr, checksum_offset, length); if (checksum == calculated_checksum) { ND_PRINT((ndo, " (correct)")); } else { ND_PRINT((ndo, " (incorrect should be 0x%04x)", calculated_checksum)); } } } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
/* * Copyright (c) 1992, 1993, 1994, 1995, 1996 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that: (1) source code distributions * retain the above copyright notice and this paragraph in its entirety, (2) * distributions including binary code include the above copyright notice and * this paragraph in its entirety in the documentation or other materials * provided with the distribution, and (3) all advertising materials mentioning * features or use of this software display the following acknowledgement: * ``This product includes software developed by the University of California, * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of * the University nor the names of its contributors may be used to endorse * or promote products derived from this software without specific prior * written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Original code by Matt Thomas, Digital Equipment Corporation * * Extensively modified by Hannes Gredler (hannes@gredler.at) for more * complete IS-IS & CLNP support. */ /* \summary: ISO CLNS, ESIS, and ISIS printer */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <netdissect-stdinc.h> #include <string.h> #include "netdissect.h" #include "addrtoname.h" #include "ether.h" #include "nlpid.h" #include "extract.h" #include "gmpls.h" #include "oui.h" #include "signature.h" static const char tstr[] = " [|isis]"; /* * IS-IS is defined in ISO 10589. Look there for protocol definitions. */ #define SYSTEM_ID_LEN ETHER_ADDR_LEN #define NODE_ID_LEN SYSTEM_ID_LEN+1 #define LSP_ID_LEN SYSTEM_ID_LEN+2 #define ISIS_VERSION 1 #define ESIS_VERSION 1 #define CLNP_VERSION 1 #define ISIS_PDU_TYPE_MASK 0x1F #define ESIS_PDU_TYPE_MASK 0x1F #define CLNP_PDU_TYPE_MASK 0x1F #define CLNP_FLAG_MASK 0xE0 #define ISIS_LAN_PRIORITY_MASK 0x7F #define ISIS_PDU_L1_LAN_IIH 15 #define ISIS_PDU_L2_LAN_IIH 16 #define ISIS_PDU_PTP_IIH 17 #define ISIS_PDU_L1_LSP 18 #define ISIS_PDU_L2_LSP 20 #define ISIS_PDU_L1_CSNP 24 #define ISIS_PDU_L2_CSNP 25 #define ISIS_PDU_L1_PSNP 26 #define ISIS_PDU_L2_PSNP 27 static const struct tok isis_pdu_values[] = { { ISIS_PDU_L1_LAN_IIH, "L1 Lan IIH"}, { ISIS_PDU_L2_LAN_IIH, "L2 Lan IIH"}, { ISIS_PDU_PTP_IIH, "p2p IIH"}, { ISIS_PDU_L1_LSP, "L1 LSP"}, { ISIS_PDU_L2_LSP, "L2 LSP"}, { ISIS_PDU_L1_CSNP, "L1 CSNP"}, { ISIS_PDU_L2_CSNP, "L2 CSNP"}, { ISIS_PDU_L1_PSNP, "L1 PSNP"}, { ISIS_PDU_L2_PSNP, "L2 PSNP"}, { 0, NULL} }; /* * A TLV is a tuple of a type, length and a value and is normally used for * encoding information in all sorts of places. This is an enumeration of * the well known types. * * list taken from rfc3359 plus some memory from veterans ;-) */ #define ISIS_TLV_AREA_ADDR 1 /* iso10589 */ #define ISIS_TLV_IS_REACH 2 /* iso10589 */ #define ISIS_TLV_ESNEIGH 3 /* iso10589 */ #define ISIS_TLV_PART_DIS 4 /* iso10589 */ #define ISIS_TLV_PREFIX_NEIGH 5 /* iso10589 */ #define ISIS_TLV_ISNEIGH 6 /* iso10589 */ #define ISIS_TLV_ISNEIGH_VARLEN 7 /* iso10589 */ #define ISIS_TLV_PADDING 8 /* iso10589 */ #define ISIS_TLV_LSP 9 /* iso10589 */ #define ISIS_TLV_AUTH 10 /* iso10589, rfc3567 */ #define ISIS_TLV_CHECKSUM 12 /* rfc3358 */ #define ISIS_TLV_CHECKSUM_MINLEN 2 #define ISIS_TLV_POI 13 /* rfc6232 */ #define ISIS_TLV_LSP_BUFFERSIZE 14 /* iso10589 rev2 */ #define ISIS_TLV_LSP_BUFFERSIZE_MINLEN 2 #define ISIS_TLV_EXT_IS_REACH 22 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_IS_ALIAS_ID 24 /* draft-ietf-isis-ext-lsp-frags-02 */ #define ISIS_TLV_DECNET_PHASE4 42 #define ISIS_TLV_LUCENT_PRIVATE 66 #define ISIS_TLV_INT_IP_REACH 128 /* rfc1195, rfc2966 */ #define ISIS_TLV_PROTOCOLS 129 /* rfc1195 */ #define ISIS_TLV_EXT_IP_REACH 130 /* rfc1195, rfc2966 */ #define ISIS_TLV_IDRP_INFO 131 /* rfc1195 */ #define ISIS_TLV_IDRP_INFO_MINLEN 1 #define ISIS_TLV_IPADDR 132 /* rfc1195 */ #define ISIS_TLV_IPAUTH 133 /* rfc1195 */ #define ISIS_TLV_TE_ROUTER_ID 134 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_EXTD_IP_REACH 135 /* draft-ietf-isis-traffic-05 */ #define ISIS_TLV_HOSTNAME 137 /* rfc2763 */ #define ISIS_TLV_SHARED_RISK_GROUP 138 /* draft-ietf-isis-gmpls-extensions */ #define ISIS_TLV_MT_PORT_CAP 143 /* rfc6165 */ #define ISIS_TLV_MT_CAPABILITY 144 /* rfc6329 */ #define ISIS_TLV_NORTEL_PRIVATE1 176 #define ISIS_TLV_NORTEL_PRIVATE2 177 #define ISIS_TLV_RESTART_SIGNALING 211 /* rfc3847 */ #define ISIS_TLV_RESTART_SIGNALING_FLAGLEN 1 #define ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN 2 #define ISIS_TLV_MT_IS_REACH 222 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED 229 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_MT_SUPPORTED_MINLEN 2 #define ISIS_TLV_IP6ADDR 232 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP_REACH 235 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_IP6_REACH 236 /* draft-ietf-isis-ipv6-02 */ #define ISIS_TLV_MT_IP6_REACH 237 /* draft-ietf-isis-wg-multi-topology-05 */ #define ISIS_TLV_PTP_ADJ 240 /* rfc3373 */ #define ISIS_TLV_IIH_SEQNR 241 /* draft-shen-isis-iih-sequence-00 */ #define ISIS_TLV_IIH_SEQNR_MINLEN 4 #define ISIS_TLV_VENDOR_PRIVATE 250 /* draft-ietf-isis-experimental-tlv-01 */ #define ISIS_TLV_VENDOR_PRIVATE_MINLEN 3 static const struct tok isis_tlv_values[] = { { ISIS_TLV_AREA_ADDR, "Area address(es)"}, { ISIS_TLV_IS_REACH, "IS Reachability"}, { ISIS_TLV_ESNEIGH, "ES Neighbor(s)"}, { ISIS_TLV_PART_DIS, "Partition DIS"}, { ISIS_TLV_PREFIX_NEIGH, "Prefix Neighbors"}, { ISIS_TLV_ISNEIGH, "IS Neighbor(s)"}, { ISIS_TLV_ISNEIGH_VARLEN, "IS Neighbor(s) (variable length)"}, { ISIS_TLV_PADDING, "Padding"}, { ISIS_TLV_LSP, "LSP entries"}, { ISIS_TLV_AUTH, "Authentication"}, { ISIS_TLV_CHECKSUM, "Checksum"}, { ISIS_TLV_POI, "Purge Originator Identifier"}, { ISIS_TLV_LSP_BUFFERSIZE, "LSP Buffersize"}, { ISIS_TLV_EXT_IS_REACH, "Extended IS Reachability"}, { ISIS_TLV_IS_ALIAS_ID, "IS Alias ID"}, { ISIS_TLV_DECNET_PHASE4, "DECnet Phase IV"}, { ISIS_TLV_LUCENT_PRIVATE, "Lucent Proprietary"}, { ISIS_TLV_INT_IP_REACH, "IPv4 Internal Reachability"}, { ISIS_TLV_PROTOCOLS, "Protocols supported"}, { ISIS_TLV_EXT_IP_REACH, "IPv4 External Reachability"}, { ISIS_TLV_IDRP_INFO, "Inter-Domain Information Type"}, { ISIS_TLV_IPADDR, "IPv4 Interface address(es)"}, { ISIS_TLV_IPAUTH, "IPv4 authentication (deprecated)"}, { ISIS_TLV_TE_ROUTER_ID, "Traffic Engineering Router ID"}, { ISIS_TLV_EXTD_IP_REACH, "Extended IPv4 Reachability"}, { ISIS_TLV_SHARED_RISK_GROUP, "Shared Risk Link Group"}, { ISIS_TLV_MT_PORT_CAP, "Multi-Topology-Aware Port Capability"}, { ISIS_TLV_MT_CAPABILITY, "Multi-Topology Capability"}, { ISIS_TLV_NORTEL_PRIVATE1, "Nortel Proprietary"}, { ISIS_TLV_NORTEL_PRIVATE2, "Nortel Proprietary"}, { ISIS_TLV_HOSTNAME, "Hostname"}, { ISIS_TLV_RESTART_SIGNALING, "Restart Signaling"}, { ISIS_TLV_MT_IS_REACH, "Multi Topology IS Reachability"}, { ISIS_TLV_MT_SUPPORTED, "Multi Topology"}, { ISIS_TLV_IP6ADDR, "IPv6 Interface address(es)"}, { ISIS_TLV_MT_IP_REACH, "Multi-Topology IPv4 Reachability"}, { ISIS_TLV_IP6_REACH, "IPv6 reachability"}, { ISIS_TLV_MT_IP6_REACH, "Multi-Topology IP6 Reachability"}, { ISIS_TLV_PTP_ADJ, "Point-to-point Adjacency State"}, { ISIS_TLV_IIH_SEQNR, "Hello PDU Sequence Number"}, { ISIS_TLV_VENDOR_PRIVATE, "Vendor Private"}, { 0, NULL } }; #define ESIS_OPTION_PROTOCOLS 129 #define ESIS_OPTION_QOS_MAINTENANCE 195 /* iso9542 */ #define ESIS_OPTION_SECURITY 197 /* iso9542 */ #define ESIS_OPTION_ES_CONF_TIME 198 /* iso9542 */ #define ESIS_OPTION_PRIORITY 205 /* iso9542 */ #define ESIS_OPTION_ADDRESS_MASK 225 /* iso9542 */ #define ESIS_OPTION_SNPA_MASK 226 /* iso9542 */ static const struct tok esis_option_values[] = { { ESIS_OPTION_PROTOCOLS, "Protocols supported"}, { ESIS_OPTION_QOS_MAINTENANCE, "QoS Maintenance" }, { ESIS_OPTION_SECURITY, "Security" }, { ESIS_OPTION_ES_CONF_TIME, "ES Configuration Time" }, { ESIS_OPTION_PRIORITY, "Priority" }, { ESIS_OPTION_ADDRESS_MASK, "Addressk Mask" }, { ESIS_OPTION_SNPA_MASK, "SNPA Mask" }, { 0, NULL } }; #define CLNP_OPTION_DISCARD_REASON 193 #define CLNP_OPTION_QOS_MAINTENANCE 195 /* iso8473 */ #define CLNP_OPTION_SECURITY 197 /* iso8473 */ #define CLNP_OPTION_SOURCE_ROUTING 200 /* iso8473 */ #define CLNP_OPTION_ROUTE_RECORDING 203 /* iso8473 */ #define CLNP_OPTION_PADDING 204 /* iso8473 */ #define CLNP_OPTION_PRIORITY 205 /* iso8473 */ static const struct tok clnp_option_values[] = { { CLNP_OPTION_DISCARD_REASON, "Discard Reason"}, { CLNP_OPTION_PRIORITY, "Priority"}, { CLNP_OPTION_QOS_MAINTENANCE, "QoS Maintenance"}, { CLNP_OPTION_SECURITY, "Security"}, { CLNP_OPTION_SOURCE_ROUTING, "Source Routing"}, { CLNP_OPTION_ROUTE_RECORDING, "Route Recording"}, { CLNP_OPTION_PADDING, "Padding"}, { 0, NULL } }; static const struct tok clnp_option_rfd_class_values[] = { { 0x0, "General"}, { 0x8, "Address"}, { 0x9, "Source Routeing"}, { 0xa, "Lifetime"}, { 0xb, "PDU Discarded"}, { 0xc, "Reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_general_values[] = { { 0x0, "Reason not specified"}, { 0x1, "Protocol procedure error"}, { 0x2, "Incorrect checksum"}, { 0x3, "PDU discarded due to congestion"}, { 0x4, "Header syntax error (cannot be parsed)"}, { 0x5, "Segmentation needed but not permitted"}, { 0x6, "Incomplete PDU received"}, { 0x7, "Duplicate option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_address_values[] = { { 0x0, "Destination address unreachable"}, { 0x1, "Destination address unknown"}, { 0, NULL } }; static const struct tok clnp_option_rfd_source_routeing_values[] = { { 0x0, "Unspecified source routeing error"}, { 0x1, "Syntax error in source routeing field"}, { 0x2, "Unknown address in source routeing field"}, { 0x3, "Path not acceptable"}, { 0, NULL } }; static const struct tok clnp_option_rfd_lifetime_values[] = { { 0x0, "Lifetime expired while data unit in transit"}, { 0x1, "Lifetime expired during reassembly"}, { 0, NULL } }; static const struct tok clnp_option_rfd_pdu_discard_values[] = { { 0x0, "Unsupported option not specified"}, { 0x1, "Unsupported protocol version"}, { 0x2, "Unsupported security option"}, { 0x3, "Unsupported source routeing option"}, { 0x4, "Unsupported recording of route option"}, { 0, NULL } }; static const struct tok clnp_option_rfd_reassembly_values[] = { { 0x0, "Reassembly interference"}, { 0, NULL } }; /* array of 16 error-classes */ static const struct tok *clnp_option_rfd_error_class[] = { clnp_option_rfd_general_values, NULL, NULL, NULL, NULL, NULL, NULL, NULL, clnp_option_rfd_address_values, clnp_option_rfd_source_routeing_values, clnp_option_rfd_lifetime_values, clnp_option_rfd_pdu_discard_values, clnp_option_rfd_reassembly_values, NULL, NULL, NULL }; #define CLNP_OPTION_OPTION_QOS_MASK 0x3f #define CLNP_OPTION_SCOPE_MASK 0xc0 #define CLNP_OPTION_SCOPE_SA_SPEC 0x40 #define CLNP_OPTION_SCOPE_DA_SPEC 0x80 #define CLNP_OPTION_SCOPE_GLOBAL 0xc0 static const struct tok clnp_option_scope_values[] = { { CLNP_OPTION_SCOPE_SA_SPEC, "Source Address Specific"}, { CLNP_OPTION_SCOPE_DA_SPEC, "Destination Address Specific"}, { CLNP_OPTION_SCOPE_GLOBAL, "Globally unique"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_values[] = { { 0x0, "partial"}, { 0x1, "complete"}, { 0, NULL } }; static const struct tok clnp_option_sr_rr_string_values[] = { { CLNP_OPTION_SOURCE_ROUTING, "source routing"}, { CLNP_OPTION_ROUTE_RECORDING, "recording of route in progress"}, { 0, NULL } }; static const struct tok clnp_option_qos_global_values[] = { { 0x20, "reserved"}, { 0x10, "sequencing vs. delay"}, { 0x08, "congested"}, { 0x04, "delay vs. cost"}, { 0x02, "error vs. delay"}, { 0x01, "error vs. cost"}, { 0, NULL } }; #define ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP 3 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID 4 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID 5 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR 6 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR 8 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW 9 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW 10 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW 11 /* rfc4124 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD 12 /* draft-ietf-tewg-diff-te-proto-06 */ #define ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC 18 /* draft-ietf-isis-traffic-05 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE 19 /* draft-ietf-isis-link-attr-01 */ #define ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE 20 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR 21 /* rfc4205 */ #define ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS 22 /* rfc4124 */ #define ISIS_SUBTLV_SPB_METRIC 29 /* rfc6329 */ static const struct tok isis_ext_is_reach_subtlv_values[] = { { ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP, "Administrative groups" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID, "Link Local/Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID, "Link Remote Identifier" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR, "IPv4 interface address" }, { ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR, "IPv4 neighbor address" }, { ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW, "Maximum link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW, "Reservable link bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW, "Unreserved bandwidth" }, { ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC, "Traffic Engineering Metric" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE, "Link Attribute" }, { ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE, "Link Protection Type" }, { ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR, "Interface Switching Capability" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD, "Bandwidth Constraints (old)" }, { ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS, "Bandwidth Constraints" }, { ISIS_SUBTLV_SPB_METRIC, "SPB Metric" }, { 250, "Reserved for cisco specific extensions" }, { 251, "Reserved for cisco specific extensions" }, { 252, "Reserved for cisco specific extensions" }, { 253, "Reserved for cisco specific extensions" }, { 254, "Reserved for cisco specific extensions" }, { 255, "Reserved for future expansion" }, { 0, NULL } }; #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32 1 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64 2 /* draft-ietf-isis-admin-tags-01 */ #define ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR 117 /* draft-ietf-isis-wg-multi-topology-05 */ static const struct tok isis_ext_ip_reach_subtlv_values[] = { { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32, "32-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64, "64-Bit Administrative tag" }, { ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR, "Management Prefix Color" }, { 0, NULL } }; static const struct tok isis_subtlv_link_attribute_values[] = { { 0x01, "Local Protection Available" }, { 0x02, "Link excluded from local protection path" }, { 0x04, "Local maintenance required"}, { 0, NULL } }; #define ISIS_SUBTLV_AUTH_SIMPLE 1 #define ISIS_SUBTLV_AUTH_GENERIC 3 /* rfc 5310 */ #define ISIS_SUBTLV_AUTH_MD5 54 #define ISIS_SUBTLV_AUTH_MD5_LEN 16 #define ISIS_SUBTLV_AUTH_PRIVATE 255 static const struct tok isis_subtlv_auth_values[] = { { ISIS_SUBTLV_AUTH_SIMPLE, "simple text password"}, { ISIS_SUBTLV_AUTH_GENERIC, "Generic Crypto key-id"}, { ISIS_SUBTLV_AUTH_MD5, "HMAC-MD5 password"}, { ISIS_SUBTLV_AUTH_PRIVATE, "Routing Domain private password"}, { 0, NULL } }; #define ISIS_SUBTLV_IDRP_RES 0 #define ISIS_SUBTLV_IDRP_LOCAL 1 #define ISIS_SUBTLV_IDRP_ASN 2 static const struct tok isis_subtlv_idrp_values[] = { { ISIS_SUBTLV_IDRP_RES, "Reserved"}, { ISIS_SUBTLV_IDRP_LOCAL, "Routing-Domain Specific"}, { ISIS_SUBTLV_IDRP_ASN, "AS Number Tag"}, { 0, NULL} }; #define ISIS_SUBTLV_SPB_MCID 4 #define ISIS_SUBTLV_SPB_DIGEST 5 #define ISIS_SUBTLV_SPB_BVID 6 #define ISIS_SUBTLV_SPB_INSTANCE 1 #define ISIS_SUBTLV_SPBM_SI 3 #define ISIS_SPB_MCID_LEN 51 #define ISIS_SUBTLV_SPB_MCID_MIN_LEN 102 #define ISIS_SUBTLV_SPB_DIGEST_MIN_LEN 33 #define ISIS_SUBTLV_SPB_BVID_MIN_LEN 6 #define ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN 19 #define ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN 8 static const struct tok isis_mt_port_cap_subtlv_values[] = { { ISIS_SUBTLV_SPB_MCID, "SPB MCID" }, { ISIS_SUBTLV_SPB_DIGEST, "SPB Digest" }, { ISIS_SUBTLV_SPB_BVID, "SPB BVID" }, { 0, NULL } }; static const struct tok isis_mt_capability_subtlv_values[] = { { ISIS_SUBTLV_SPB_INSTANCE, "SPB Instance" }, { ISIS_SUBTLV_SPBM_SI, "SPBM Service Identifier and Unicast Address" }, { 0, NULL } }; struct isis_spb_mcid { uint8_t format_id; uint8_t name[32]; uint8_t revision_lvl[2]; uint8_t digest[16]; }; struct isis_subtlv_spb_mcid { struct isis_spb_mcid mcid; struct isis_spb_mcid aux_mcid; }; struct isis_subtlv_spb_instance { uint8_t cist_root_id[8]; uint8_t cist_external_root_path_cost[4]; uint8_t bridge_priority[2]; uint8_t spsourceid[4]; uint8_t no_of_trees; }; #define CLNP_SEGMENT_PART 0x80 #define CLNP_MORE_SEGMENTS 0x40 #define CLNP_REQUEST_ER 0x20 static const struct tok clnp_flag_values[] = { { CLNP_SEGMENT_PART, "Segmentation permitted"}, { CLNP_MORE_SEGMENTS, "more Segments"}, { CLNP_REQUEST_ER, "request Error Report"}, { 0, NULL} }; #define ISIS_MASK_LSP_OL_BIT(x) ((x)&0x4) #define ISIS_MASK_LSP_ISTYPE_BITS(x) ((x)&0x3) #define ISIS_MASK_LSP_PARTITION_BIT(x) ((x)&0x80) #define ISIS_MASK_LSP_ATT_BITS(x) ((x)&0x78) #define ISIS_MASK_LSP_ATT_ERROR_BIT(x) ((x)&0x40) #define ISIS_MASK_LSP_ATT_EXPENSE_BIT(x) ((x)&0x20) #define ISIS_MASK_LSP_ATT_DELAY_BIT(x) ((x)&0x10) #define ISIS_MASK_LSP_ATT_DEFAULT_BIT(x) ((x)&0x8) #define ISIS_MASK_MTID(x) ((x)&0x0fff) #define ISIS_MASK_MTFLAGS(x) ((x)&0xf000) static const struct tok isis_mt_flag_values[] = { { 0x4000, "ATT bit set"}, { 0x8000, "Overload bit set"}, { 0, NULL} }; #define ISIS_MASK_TLV_EXTD_IP_UPDOWN(x) ((x)&0x80) #define ISIS_MASK_TLV_EXTD_IP_SUBTLV(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_IE(x) ((x)&0x40) #define ISIS_MASK_TLV_EXTD_IP6_SUBTLV(x) ((x)&0x20) #define ISIS_LSP_TLV_METRIC_SUPPORTED(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_IE(x) ((x)&0x40) #define ISIS_LSP_TLV_METRIC_UPDOWN(x) ((x)&0x80) #define ISIS_LSP_TLV_METRIC_VALUE(x) ((x)&0x3f) #define ISIS_MASK_TLV_SHARED_RISK_GROUP(x) ((x)&0x1) static const struct tok isis_mt_values[] = { { 0, "IPv4 unicast"}, { 1, "In-Band Management"}, { 2, "IPv6 unicast"}, { 3, "Multicast"}, { 4095, "Development, Experimental or Proprietary"}, { 0, NULL } }; static const struct tok isis_iih_circuit_type_values[] = { { 1, "Level 1 only"}, { 2, "Level 2 only"}, { 3, "Level 1, Level 2"}, { 0, NULL} }; #define ISIS_LSP_TYPE_UNUSED0 0 #define ISIS_LSP_TYPE_LEVEL_1 1 #define ISIS_LSP_TYPE_UNUSED2 2 #define ISIS_LSP_TYPE_LEVEL_2 3 static const struct tok isis_lsp_istype_values[] = { { ISIS_LSP_TYPE_UNUSED0, "Unused 0x0 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_1, "L1 IS"}, { ISIS_LSP_TYPE_UNUSED2, "Unused 0x2 (invalid)"}, { ISIS_LSP_TYPE_LEVEL_2, "L2 IS"}, { 0, NULL } }; /* * Katz's point to point adjacency TLV uses codes to tell us the state of * the remote adjacency. Enumerate them. */ #define ISIS_PTP_ADJ_UP 0 #define ISIS_PTP_ADJ_INIT 1 #define ISIS_PTP_ADJ_DOWN 2 static const struct tok isis_ptp_adjancey_values[] = { { ISIS_PTP_ADJ_UP, "Up" }, { ISIS_PTP_ADJ_INIT, "Initializing" }, { ISIS_PTP_ADJ_DOWN, "Down" }, { 0, NULL} }; struct isis_tlv_ptp_adj { uint8_t adjacency_state; uint8_t extd_local_circuit_id[4]; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; uint8_t neighbor_extd_local_circuit_id[4]; }; static void osi_print_cksum(netdissect_options *, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length); static int clnp_print(netdissect_options *, const uint8_t *, u_int); static void esis_print(netdissect_options *, const uint8_t *, u_int); static int isis_print(netdissect_options *, const uint8_t *, u_int); struct isis_metric_block { uint8_t metric_default; uint8_t metric_delay; uint8_t metric_expense; uint8_t metric_error; }; struct isis_tlv_is_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_nodeid[NODE_ID_LEN]; }; struct isis_tlv_es_reach { struct isis_metric_block isis_metric_block; uint8_t neighbor_sysid[SYSTEM_ID_LEN]; }; struct isis_tlv_ip_reach { struct isis_metric_block isis_metric_block; uint8_t prefix[4]; uint8_t mask[4]; }; static const struct tok isis_is_reach_virtual_values[] = { { 0, "IsNotVirtual"}, { 1, "IsVirtual"}, { 0, NULL } }; static const struct tok isis_restart_flag_values[] = { { 0x1, "Restart Request"}, { 0x2, "Restart Acknowledgement"}, { 0x4, "Suppress adjacency advertisement"}, { 0, NULL } }; struct isis_common_header { uint8_t nlpid; uint8_t fixed_len; uint8_t version; /* Protocol version */ uint8_t id_length; uint8_t pdu_type; /* 3 MSbits are reserved */ uint8_t pdu_version; /* Packet format version */ uint8_t reserved; uint8_t max_area; }; struct isis_iih_lan_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t priority; uint8_t lan_id[NODE_ID_LEN]; }; struct isis_iih_ptp_header { uint8_t circuit_type; uint8_t source_id[SYSTEM_ID_LEN]; uint8_t holding_time[2]; uint8_t pdu_len[2]; uint8_t circuit_id; }; struct isis_lsp_header { uint8_t pdu_len[2]; uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; uint8_t typeblock; }; struct isis_csnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; uint8_t start_lsp_id[LSP_ID_LEN]; uint8_t end_lsp_id[LSP_ID_LEN]; }; struct isis_psnp_header { uint8_t pdu_len[2]; uint8_t source_id[NODE_ID_LEN]; }; struct isis_tlv_lsp { uint8_t remaining_lifetime[2]; uint8_t lsp_id[LSP_ID_LEN]; uint8_t sequence_number[4]; uint8_t checksum[2]; }; #define ISIS_COMMON_HEADER_SIZE (sizeof(struct isis_common_header)) #define ISIS_IIH_LAN_HEADER_SIZE (sizeof(struct isis_iih_lan_header)) #define ISIS_IIH_PTP_HEADER_SIZE (sizeof(struct isis_iih_ptp_header)) #define ISIS_LSP_HEADER_SIZE (sizeof(struct isis_lsp_header)) #define ISIS_CSNP_HEADER_SIZE (sizeof(struct isis_csnp_header)) #define ISIS_PSNP_HEADER_SIZE (sizeof(struct isis_psnp_header)) void isoclns_print(netdissect_options *ndo, const uint8_t *p, u_int length) { if (!ND_TTEST(*p)) { /* enough bytes on the wire ? */ ND_PRINT((ndo, "|OSI")); return; } if (ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID %s (0x%02x): ", tok2str(nlpid_values, "Unknown", *p), *p)); switch (*p) { case NLPID_CLNP: if (!clnp_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_ESIS: esis_print(ndo, p, length); return; case NLPID_ISIS: if (!isis_print(ndo, p, length)) print_unknown_data(ndo, p, "\n\t", length); break; case NLPID_NULLNS: ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); break; case NLPID_Q933: q933_print(ndo, p + 1, length - 1); break; case NLPID_IP: ip_print(ndo, p + 1, length - 1); break; case NLPID_IP6: ip6_print(ndo, p + 1, length - 1); break; case NLPID_PPP: ppp_print(ndo, p + 1, length - 1); break; default: if (!ndo->ndo_eflag) ND_PRINT((ndo, "OSI NLPID 0x%02x unknown", *p)); ND_PRINT((ndo, "%slength: %u", ndo->ndo_eflag ? "" : ", ", length)); if (length > 1) print_unknown_data(ndo, p, "\n\t", length); break; } } #define CLNP_PDU_ER 1 #define CLNP_PDU_DT 28 #define CLNP_PDU_MD 29 #define CLNP_PDU_ERQ 30 #define CLNP_PDU_ERP 31 static const struct tok clnp_pdu_values[] = { { CLNP_PDU_ER, "Error Report"}, { CLNP_PDU_MD, "MD"}, { CLNP_PDU_DT, "Data"}, { CLNP_PDU_ERQ, "Echo Request"}, { CLNP_PDU_ERP, "Echo Response"}, { 0, NULL } }; struct clnp_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t lifetime; /* units of 500ms */ uint8_t type; uint8_t segment_length[2]; uint8_t cksum[2]; }; struct clnp_segment_header_t { uint8_t data_unit_id[2]; uint8_t segment_offset[2]; uint8_t total_length[2]; }; /* * clnp_print * Decode CLNP packets. Return 0 on error. */ static int clnp_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr,*source_address,*dest_address; u_int li,tlen,nsap_offset,source_address_length,dest_address_length, clnp_pdu_type, clnp_flags; const struct clnp_header_t *clnp_header; const struct clnp_segment_header_t *clnp_segment_header; uint8_t rfd_error_major,rfd_error_minor; clnp_header = (const struct clnp_header_t *) pptr; ND_TCHECK(*clnp_header); li = clnp_header->length_indicator; optr = pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "CLNP")); /* * Sanity checking of the header. */ if (clnp_header->version != CLNP_VERSION) { ND_PRINT((ndo, "version %d packet not supported", clnp_header->version)); return (0); } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return (0); } if (li < sizeof(struct clnp_header_t)) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return (0); } /* FIXME further header sanity checking */ clnp_pdu_type = clnp_header->type & CLNP_PDU_TYPE_MASK; clnp_flags = clnp_header->type & CLNP_FLAG_MASK; pptr += sizeof(struct clnp_header_t); li -= sizeof(struct clnp_header_t); if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); dest_address_length = *pptr; pptr += 1; li -= 1; if (li < dest_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, dest_address_length); dest_address = pptr; pptr += dest_address_length; li -= dest_address_length; if (li < 1) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK(*pptr); source_address_length = *pptr; pptr += 1; li -= 1; if (li < source_address_length) { ND_PRINT((ndo, "li < size of fixed part of CLNP header and addresses")); return (0); } ND_TCHECK2(*pptr, source_address_length); source_address = pptr; pptr += source_address_length; li -= source_address_length; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s > %s, %s, length %u", ndo->ndo_eflag ? "" : ", ", isonsap_string(ndo, source_address, source_address_length), isonsap_string(ndo, dest_address, dest_address_length), tok2str(clnp_pdu_values,"unknown (%u)",clnp_pdu_type), length)); return (1); } ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s PDU, hlen: %u, v: %u, lifetime: %u.%us, Segment PDU length: %u, checksum: 0x%04x", tok2str(clnp_pdu_values, "unknown (%u)",clnp_pdu_type), clnp_header->length_indicator, clnp_header->version, clnp_header->lifetime/2, (clnp_header->lifetime%2)*5, EXTRACT_16BITS(clnp_header->segment_length), EXTRACT_16BITS(clnp_header->cksum))); osi_print_cksum(ndo, optr, EXTRACT_16BITS(clnp_header->cksum), 7, clnp_header->length_indicator); ND_PRINT((ndo, "\n\tFlags [%s]", bittok2str(clnp_flag_values, "none", clnp_flags))); ND_PRINT((ndo, "\n\tsource address (length %u): %s\n\tdest address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length), dest_address_length, isonsap_string(ndo, dest_address, dest_address_length))); if (clnp_flags & CLNP_SEGMENT_PART) { if (li < sizeof(const struct clnp_segment_header_t)) { ND_PRINT((ndo, "li < size of fixed part of CLNP header, addresses, and segment part")); return (0); } clnp_segment_header = (const struct clnp_segment_header_t *) pptr; ND_TCHECK(*clnp_segment_header); ND_PRINT((ndo, "\n\tData Unit ID: 0x%04x, Segment Offset: %u, Total PDU Length: %u", EXTRACT_16BITS(clnp_segment_header->data_unit_id), EXTRACT_16BITS(clnp_segment_header->segment_offset), EXTRACT_16BITS(clnp_segment_header->total_length))); pptr+=sizeof(const struct clnp_segment_header_t); li-=sizeof(const struct clnp_segment_header_t); } /* now walk the options */ while (li >= 2) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return (0); } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return (0); } ND_TCHECK2(*pptr, opli); li -= opli; tptr = pptr; tlen = opli; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(clnp_option_values,"Unknown",op), op, opli)); /* * We've already checked that the entire option is present * in the captured packet with the ND_TCHECK2() call. * Therefore, we don't need to do ND_TCHECK()/ND_TCHECK2() * checks. * We do, however, need to check tlen, to make sure we * don't run past the end of the option. */ switch (op) { case CLNP_OPTION_ROUTE_RECORDING: /* those two options share the format */ case CLNP_OPTION_SOURCE_ROUTING: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "%s %s", tok2str(clnp_option_sr_rr_values,"Unknown",*tptr), tok2str(clnp_option_sr_rr_string_values, "Unknown Option %u", op))); nsap_offset=*(tptr+1); if (nsap_offset == 0) { ND_PRINT((ndo, " Bad NSAP offset (0)")); break; } nsap_offset-=1; /* offset to nsap list */ if (nsap_offset > tlen) { ND_PRINT((ndo, " Bad NSAP offset (past end of option)")); break; } tptr+=nsap_offset; tlen-=nsap_offset; while (tlen > 0) { source_address_length=*tptr; if (tlen < source_address_length+1) { ND_PRINT((ndo, "\n\t NSAP address goes past end of option")); break; } if (source_address_length > 0) { source_address=(tptr+1); ND_TCHECK2(*source_address, source_address_length); ND_PRINT((ndo, "\n\t NSAP address (length %u): %s", source_address_length, isonsap_string(ndo, source_address, source_address_length))); } tlen-=source_address_length+1; } break; case CLNP_OPTION_PRIORITY: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "0x%1x", *tptr&0x0f)); break; case CLNP_OPTION_QOS_MAINTENANCE: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s", tok2str(clnp_option_scope_values, "Reserved", *tptr&CLNP_OPTION_SCOPE_MASK))); if ((*tptr&CLNP_OPTION_SCOPE_MASK) == CLNP_OPTION_SCOPE_GLOBAL) ND_PRINT((ndo, "\n\t QoS Flags [%s]", bittok2str(clnp_option_qos_global_values, "none", *tptr&CLNP_OPTION_OPTION_QOS_MASK))); break; case CLNP_OPTION_SECURITY: if (tlen < 2) { ND_PRINT((ndo, ", bad opt len")); return (0); } ND_PRINT((ndo, "\n\t Format Code: %s, Security-Level %u", tok2str(clnp_option_scope_values,"Reserved",*tptr&CLNP_OPTION_SCOPE_MASK), *(tptr+1))); break; case CLNP_OPTION_DISCARD_REASON: if (tlen < 1) { ND_PRINT((ndo, ", bad opt len")); return (0); } rfd_error_major = (*tptr&0xf0) >> 4; rfd_error_minor = *tptr&0x0f; ND_PRINT((ndo, "\n\t Class: %s Error (0x%01x), %s (0x%01x)", tok2str(clnp_option_rfd_class_values,"Unknown",rfd_error_major), rfd_error_major, tok2str(clnp_option_rfd_error_class[rfd_error_major],"Unknown",rfd_error_minor), rfd_error_minor)); break; case CLNP_OPTION_PADDING: ND_PRINT((ndo, "padding data")); break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } switch (clnp_pdu_type) { case CLNP_PDU_ER: /* fall through */ case CLNP_PDU_ERP: ND_TCHECK(*pptr); if (*(pptr) == NLPID_CLNP) { ND_PRINT((ndo, "\n\t-----original packet-----\n\t")); /* FIXME recursion protection */ clnp_print(ndo, pptr, length - clnp_header->length_indicator); break; } case CLNP_PDU_DT: case CLNP_PDU_MD: case CLNP_PDU_ERQ: default: /* dump the PDU specific data */ if (length-(pptr-optr) > 0) { ND_PRINT((ndo, "\n\t undecoded non-header data, length %u", length-clnp_header->length_indicator)); print_unknown_data(ndo, pptr, "\n\t ", length - (pptr - optr)); } } return (1); trunc: ND_PRINT((ndo, "[|clnp]")); return (1); } #define ESIS_PDU_REDIRECT 6 #define ESIS_PDU_ESH 2 #define ESIS_PDU_ISH 4 static const struct tok esis_pdu_values[] = { { ESIS_PDU_REDIRECT, "redirect"}, { ESIS_PDU_ESH, "ESH"}, { ESIS_PDU_ISH, "ISH"}, { 0, NULL } }; struct esis_header_t { uint8_t nlpid; uint8_t length_indicator; uint8_t version; uint8_t reserved; uint8_t type; uint8_t holdtime[2]; uint8_t cksum[2]; }; static void esis_print(netdissect_options *ndo, const uint8_t *pptr, u_int length) { const uint8_t *optr; u_int li,esis_pdu_type,source_address_length, source_address_number; const struct esis_header_t *esis_header; if (!ndo->ndo_eflag) ND_PRINT((ndo, "ES-IS")); if (length <= 2) { ND_PRINT((ndo, ndo->ndo_qflag ? "bad pkt!" : "no header at all!")); return; } esis_header = (const struct esis_header_t *) pptr; ND_TCHECK(*esis_header); li = esis_header->length_indicator; optr = pptr; /* * Sanity checking of the header. */ if (esis_header->nlpid != NLPID_ESIS) { ND_PRINT((ndo, " nlpid 0x%02x packet not supported", esis_header->nlpid)); return; } if (esis_header->version != ESIS_VERSION) { ND_PRINT((ndo, " version %d packet not supported", esis_header->version)); return; } if (li > length) { ND_PRINT((ndo, " length indicator(%u) > PDU size (%u)!", li, length)); return; } if (li < sizeof(struct esis_header_t) + 2) { ND_PRINT((ndo, " length indicator %u < min PDU size:", li)); while (pptr < ndo->ndo_snapend) ND_PRINT((ndo, "%02X", *pptr++)); return; } esis_pdu_type = esis_header->type & ESIS_PDU_TYPE_MASK; if (ndo->ndo_vflag < 1) { ND_PRINT((ndo, "%s%s, length %u", ndo->ndo_eflag ? "" : ", ", tok2str(esis_pdu_values,"unknown type (%u)",esis_pdu_type), length)); return; } else ND_PRINT((ndo, "%slength %u\n\t%s (%u)", ndo->ndo_eflag ? "" : ", ", length, tok2str(esis_pdu_values,"unknown type: %u", esis_pdu_type), esis_pdu_type)); ND_PRINT((ndo, ", v: %u%s", esis_header->version, esis_header->version == ESIS_VERSION ? "" : "unsupported" )); ND_PRINT((ndo, ", checksum: 0x%04x", EXTRACT_16BITS(esis_header->cksum))); osi_print_cksum(ndo, pptr, EXTRACT_16BITS(esis_header->cksum), 7, li); ND_PRINT((ndo, ", holding time: %us, length indicator: %u", EXTRACT_16BITS(esis_header->holdtime), li)); if (ndo->ndo_vflag > 1) print_unknown_data(ndo, optr, "\n\t", sizeof(struct esis_header_t)); pptr += sizeof(struct esis_header_t); li -= sizeof(struct esis_header_t); switch (esis_pdu_type) { case ESIS_PDU_REDIRECT: { const uint8_t *dst, *snpa, *neta; u_int dstl, snpal, netal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } dstl = *pptr; pptr++; li--; ND_TCHECK2(*pptr, dstl); if (li < dstl) { ND_PRINT((ndo, ", bad redirect/li")); return; } dst = pptr; pptr += dstl; li -= dstl; ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, dst, dstl))); ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpal = *pptr; pptr++; li--; ND_TCHECK2(*pptr, snpal); if (li < snpal) { ND_PRINT((ndo, ", bad redirect/li")); return; } snpa = pptr; pptr += snpal; li -= snpal; ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad redirect/li")); return; } netal = *pptr; pptr++; ND_TCHECK2(*pptr, netal); if (li < netal) { ND_PRINT((ndo, ", bad redirect/li")); return; } neta = pptr; pptr += netal; li -= netal; if (netal == 0) ND_PRINT((ndo, "\n\t %s", etheraddr_string(ndo, snpa))); else ND_PRINT((ndo, "\n\t %s", isonsap_string(ndo, neta, netal))); break; } case ESIS_PDU_ESH: ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_number = *pptr; pptr++; li--; ND_PRINT((ndo, "\n\t Number of Source Addresses: %u", source_address_number)); while (source_address_number > 0) { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad esh/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad esh/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; source_address_number--; } break; case ESIS_PDU_ISH: { ND_TCHECK(*pptr); if (li < 1) { ND_PRINT((ndo, ", bad ish/li")); return; } source_address_length = *pptr; pptr++; li--; ND_TCHECK2(*pptr, source_address_length); if (li < source_address_length) { ND_PRINT((ndo, ", bad ish/li")); return; } ND_PRINT((ndo, "\n\t NET (length: %u): %s", source_address_length, isonsap_string(ndo, pptr, source_address_length))); pptr += source_address_length; li -= source_address_length; break; } default: if (ndo->ndo_vflag <= 1) { if (pptr < ndo->ndo_snapend) print_unknown_data(ndo, pptr, "\n\t ", ndo->ndo_snapend - pptr); } return; } /* now walk the options */ while (li != 0) { u_int op, opli; const uint8_t *tptr; if (li < 2) { ND_PRINT((ndo, ", bad opts/li")); return; } ND_TCHECK2(*pptr, 2); op = *pptr++; opli = *pptr++; li -= 2; if (opli > li) { ND_PRINT((ndo, ", opt (%d) too long", op)); return; } li -= opli; tptr = pptr; ND_PRINT((ndo, "\n\t %s Option #%u, length %u, value: ", tok2str(esis_option_values,"Unknown",op), op, opli)); switch (op) { case ESIS_OPTION_ES_CONF_TIME: if (opli == 2) { ND_TCHECK2(*pptr, 2); ND_PRINT((ndo, "%us", EXTRACT_16BITS(tptr))); } else ND_PRINT((ndo, "(bad length)")); break; case ESIS_OPTION_PROTOCOLS: while (opli>0) { ND_TCHECK(*pptr); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (opli>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; opli--; } break; /* * FIXME those are the defined Options that lack a decoder * you are welcome to contribute code ;-) */ case ESIS_OPTION_QOS_MAINTENANCE: case ESIS_OPTION_SECURITY: case ESIS_OPTION_PRIORITY: case ESIS_OPTION_ADDRESS_MASK: case ESIS_OPTION_SNPA_MASK: default: print_unknown_data(ndo, tptr, "\n\t ", opli); break; } if (ndo->ndo_vflag > 1) print_unknown_data(ndo, pptr, "\n\t ", opli); pptr += opli; } trunc: return; } static void isis_print_mcid(netdissect_options *ndo, const struct isis_spb_mcid *mcid) { int i; ND_TCHECK(*mcid); ND_PRINT((ndo, "ID: %d, Name: ", mcid->format_id)); if (fn_printzp(ndo, mcid->name, 32, ndo->ndo_snapend)) goto trunc; ND_PRINT((ndo, "\n\t Lvl: %d", EXTRACT_16BITS(mcid->revision_lvl))); ND_PRINT((ndo, ", Digest: ")); for(i=0;i<16;i++) ND_PRINT((ndo, "%.2x ", mcid->digest[i])); trunc: ND_PRINT((ndo, "%s", tstr)); } static int isis_print_mt_port_cap_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len; const struct isis_subtlv_spb_mcid *subtlv_spb_mcid; int i; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_port_cap_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); /*len -= TLV_TYPE_LEN_OFFSET;*/ len = len -2; switch (stlv_type) { case ISIS_SUBTLV_SPB_MCID: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_MCID_MIN_LEN); subtlv_spb_mcid = (const struct isis_subtlv_spb_mcid *)tptr; ND_PRINT((ndo, "\n\t MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ ND_PRINT((ndo, "\n\t AUX-MCID: ")); isis_print_mcid(ndo, &(subtlv_spb_mcid->aux_mcid)); /*tptr += SPB_MCID_MIN_LEN; len -= SPB_MCID_MIN_LEN; */ tptr = tptr + sizeof(struct isis_subtlv_spb_mcid); len = len - sizeof(struct isis_subtlv_spb_mcid); break; } case ISIS_SUBTLV_SPB_DIGEST: { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_DIGEST_MIN_LEN); ND_PRINT((ndo, "\n\t RES: %d V: %d A: %d D: %d", (*(tptr) >> 5), (((*tptr)>> 4) & 0x01), ((*(tptr) >> 2) & 0x03), ((*tptr) & 0x03))); tptr++; ND_PRINT((ndo, "\n\t Digest: ")); for(i=1;i<=8; i++) { ND_PRINT((ndo, "%08x ", EXTRACT_32BITS(tptr))); if (i%4 == 0 && i != 8) ND_PRINT((ndo, "\n\t ")); tptr = tptr + 4; } len = len - ISIS_SUBTLV_SPB_DIGEST_MIN_LEN; break; } case ISIS_SUBTLV_SPB_BVID: { ND_TCHECK2(*(tptr), stlv_len); while (len >= ISIS_SUBTLV_SPB_BVID_MIN_LEN) { ND_TCHECK2(*(tptr), ISIS_SUBTLV_SPB_BVID_MIN_LEN); ND_PRINT((ndo, "\n\t ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " BVID: %d, U:%01x M:%01x ", (EXTRACT_16BITS (tptr) >> 4) , (EXTRACT_16BITS (tptr) >> 3) & 0x01, (EXTRACT_16BITS (tptr) >> 2) & 0x01)); tptr = tptr + 2; len = len - ISIS_SUBTLV_SPB_BVID_MIN_LEN; } break; } default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static int isis_print_mt_capability_subtlv(netdissect_options *ndo, const uint8_t *tptr, int len) { int stlv_type, stlv_len, tmp; while (len > 2) { stlv_type = *(tptr++); stlv_len = *(tptr++); /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "\n\t %s subTLV #%u, length: %u", tok2str(isis_mt_capability_subtlv_values, "unknown", stlv_type), stlv_type, stlv_len)); len = len - 2; switch (stlv_type) { case ISIS_SUBTLV_SPB_INSTANCE: ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN); ND_PRINT((ndo, "\n\t CIST Root-ID: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, " %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Path Cost: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, ", Prio: %d", EXTRACT_16BITS(tptr))); tptr = tptr + 2; ND_PRINT((ndo, "\n\t RES: %d", EXTRACT_16BITS(tptr) >> 5)); ND_PRINT((ndo, ", V: %d", (EXTRACT_16BITS(tptr) >> 4) & 0x0001)); ND_PRINT((ndo, ", SPSource-ID: %d", (EXTRACT_32BITS(tptr) & 0x000fffff))); tptr = tptr+4; ND_PRINT((ndo, ", No of Trees: %x", *(tptr))); tmp = *(tptr++); len = len - ISIS_SUBTLV_SPB_INSTANCE_MIN_LEN; while (tmp) { ND_TCHECK2(*tptr, ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN); ND_PRINT((ndo, "\n\t U:%d, M:%d, A:%d, RES:%d", *(tptr) >> 7, (*(tptr) >> 6) & 0x01, (*(tptr) >> 5) & 0x01, (*(tptr) & 0x1f))); tptr++; ND_PRINT((ndo, ", ECT: %08x", EXTRACT_32BITS(tptr))); tptr = tptr + 4; ND_PRINT((ndo, ", BVID: %d, SPVID: %d", (EXTRACT_24BITS(tptr) >> 12) & 0x000fff, EXTRACT_24BITS(tptr) & 0x000fff)); tptr = tptr + 3; len = len - ISIS_SUBTLV_SPB_INSTANCE_VLAN_TUPLE_LEN; tmp--; } break; case ISIS_SUBTLV_SPBM_SI: ND_TCHECK2(*tptr, 8); ND_PRINT((ndo, "\n\t BMAC: %08x", EXTRACT_32BITS(tptr))); tptr = tptr+4; ND_PRINT((ndo, "%04x", EXTRACT_16BITS(tptr))); tptr = tptr+2; ND_PRINT((ndo, ", RES: %d, VID: %d", EXTRACT_16BITS(tptr) >> 12, (EXTRACT_16BITS(tptr)) & 0x0fff)); tptr = tptr+2; len = len - 8; stlv_len = stlv_len - 8; while (stlv_len >= 4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t T: %d, R: %d, RES: %d, ISID: %d", (EXTRACT_32BITS(tptr) >> 31), (EXTRACT_32BITS(tptr) >> 30) & 0x01, (EXTRACT_32BITS(tptr) >> 24) & 0x03f, (EXTRACT_32BITS(tptr)) & 0x0ffffff)); tptr = tptr + 4; len = len - 4; stlv_len = stlv_len - 4; } break; default: break; } } return 0; trunc: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } /* shared routine for printing system, node and lsp-ids */ static char * isis_print_id(const uint8_t *cp, int id_len) { int i; static char id[sizeof("xxxx.xxxx.xxxx.yy-zz")]; char *pos = id; for (i = 1; i <= SYSTEM_ID_LEN; i++) { snprintf(pos, sizeof(id) - (pos - id), "%02x", *cp++); pos += strlen(pos); if (i == 2 || i == 4) *pos++ = '.'; } if (id_len >= NODE_ID_LEN) { snprintf(pos, sizeof(id) - (pos - id), ".%02x", *cp++); pos += strlen(pos); } if (id_len == LSP_ID_LEN) snprintf(pos, sizeof(id) - (pos - id), "-%02x", *cp); return (id); } /* print the 4-byte metric block which is common found in the old-style TLVs */ static int isis_print_metric_block(netdissect_options *ndo, const struct isis_metric_block *isis_metric_block) { ND_PRINT((ndo, ", Default Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_default), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_delay)) ND_PRINT((ndo, "\n\t\t Delay Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_delay), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_expense)) ND_PRINT((ndo, "\n\t\t Expense Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_expense), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(isis_metric_block->metric_error)) ND_PRINT((ndo, "\n\t\t Error Metric: %d, %s", ISIS_LSP_TLV_METRIC_VALUE(isis_metric_block->metric_error), ISIS_LSP_TLV_METRIC_IE(isis_metric_block->metric_error) ? "External" : "Internal")); return(1); /* everything is ok */ } static int isis_print_tlv_ip_reach(netdissect_options *ndo, const uint8_t *cp, const char *ident, int length) { int prefix_len; const struct isis_tlv_ip_reach *tlv_ip_reach; tlv_ip_reach = (const struct isis_tlv_ip_reach *)cp; while (length > 0) { if ((size_t)length < sizeof(*tlv_ip_reach)) { ND_PRINT((ndo, "short IPv4 Reachability (%d vs %lu)", length, (unsigned long)sizeof(*tlv_ip_reach))); return (0); } if (!ND_TTEST(*tlv_ip_reach)) return (0); prefix_len = mask2plen(EXTRACT_32BITS(tlv_ip_reach->mask)); if (prefix_len == -1) ND_PRINT((ndo, "%sIPv4 prefix: %s mask %s", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), ipaddr_string(ndo, (tlv_ip_reach->mask)))); else ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, (tlv_ip_reach->prefix)), prefix_len)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u, %s", ISIS_LSP_TLV_METRIC_UPDOWN(tlv_ip_reach->isis_metric_block.metric_default) ? "down" : "up", ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_default), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_default) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_delay)) ND_PRINT((ndo, "%s Delay Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_delay), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_delay) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_expense)) ND_PRINT((ndo, "%s Expense Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_expense), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_expense) ? "External" : "Internal")); if (!ISIS_LSP_TLV_METRIC_SUPPORTED(tlv_ip_reach->isis_metric_block.metric_error)) ND_PRINT((ndo, "%s Error Metric: %u, %s", ident, ISIS_LSP_TLV_METRIC_VALUE(tlv_ip_reach->isis_metric_block.metric_error), ISIS_LSP_TLV_METRIC_IE(tlv_ip_reach->isis_metric_block.metric_error) ? "External" : "Internal")); length -= sizeof(struct isis_tlv_ip_reach); tlv_ip_reach++; } return (1); } /* * this is the common IP-REACH subTLV decoder it is called * from various EXTD-IP REACH TLVs (135,235,236,237) */ static int isis_print_ip_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, int subt, int subl, const char *ident) { /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_ip_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr,subl); switch(subt) { case ISIS_SUBTLV_EXTD_IP_REACH_MGMT_PREFIX_COLOR: /* fall through */ case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG32: while (subl >= 4) { ND_PRINT((ndo, ", 0x%08x (=%u)", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr))); tptr+=4; subl-=4; } break; case ISIS_SUBTLV_EXTD_IP_REACH_ADMIN_TAG64: while (subl >= 8) { ND_PRINT((ndo, ", 0x%08x%08x", EXTRACT_32BITS(tptr), EXTRACT_32BITS(tptr+4))); tptr+=8; subl-=8; } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: ND_PRINT((ndo, "%s", ident)); ND_PRINT((ndo, "%s", tstr)); return(0); } /* * this is the common IS-REACH subTLV decoder it is called * from isis_print_ext_is_reach() */ static int isis_print_is_reach_subtlv(netdissect_options *ndo, const uint8_t *tptr, u_int subt, u_int subl, const char *ident) { u_int te_class,priority_level,gmpls_switch_cap; union { /* int to float conversion buffer for several subTLVs */ float f; uint32_t i; } bw; /* first lets see if we know the subTLVs name*/ ND_PRINT((ndo, "%s%s subTLV #%u, length: %u", ident, tok2str(isis_ext_is_reach_subtlv_values, "unknown", subt), subt, subl)); ND_TCHECK2(*tptr, subl); switch(subt) { case ISIS_SUBTLV_EXT_IS_REACH_ADMIN_GROUP: case ISIS_SUBTLV_EXT_IS_REACH_LINK_LOCAL_REMOTE_ID: case ISIS_SUBTLV_EXT_IS_REACH_LINK_REMOTE_ID: if (subl >= 4) { ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr))); if (subl == 8) /* rfc4205 */ ND_PRINT((ndo, ", 0x%08x", EXTRACT_32BITS(tptr+4))); } break; case ISIS_SUBTLV_EXT_IS_REACH_IPV4_INTF_ADDR: case ISIS_SUBTLV_EXT_IS_REACH_IPV4_NEIGHBOR_ADDR: if (subl >= sizeof(struct in_addr)) ND_PRINT((ndo, ", %s", ipaddr_string(ndo, tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_MAX_LINK_BW : case ISIS_SUBTLV_EXT_IS_REACH_RESERVABLE_BW: if (subl >= 4) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, ", %.3f Mbps", bw.f * 8 / 1000000)); } break; case ISIS_SUBTLV_EXT_IS_REACH_UNRESERVED_BW : if (subl >= 32) { for (te_class = 0; te_class < 8; te_class++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s TE-Class %u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } } break; case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS: /* fall through */ case ISIS_SUBTLV_EXT_IS_REACH_BW_CONSTRAINTS_OLD: ND_PRINT((ndo, "%sBandwidth Constraints Model ID: %s (%u)", ident, tok2str(diffserv_te_bc_values, "unknown", *tptr), *tptr)); tptr++; /* decode BCs until the subTLV ends */ for (te_class = 0; te_class < (subl-1)/4; te_class++) { ND_TCHECK2(*tptr, 4); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Bandwidth constraint CT%u: %.3f Mbps", ident, te_class, bw.f * 8 / 1000000)); tptr+=4; } break; case ISIS_SUBTLV_EXT_IS_REACH_TE_METRIC: if (subl >= 3) ND_PRINT((ndo, ", %u", EXTRACT_24BITS(tptr))); break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_ATTRIBUTE: if (subl == 2) { ND_PRINT((ndo, ", [ %s ] (0x%04x)", bittok2str(isis_subtlv_link_attribute_values, "Unknown", EXTRACT_16BITS(tptr)), EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_LINK_PROTECTION_TYPE: if (subl >= 2) { ND_PRINT((ndo, ", %s, Priority %u", bittok2str(gmpls_link_prot_values, "none", *tptr), *(tptr+1))); } break; case ISIS_SUBTLV_SPB_METRIC: if (subl >= 6) { ND_PRINT((ndo, ", LM: %u", EXTRACT_24BITS(tptr))); tptr=tptr+3; ND_PRINT((ndo, ", P: %u", *(tptr))); tptr++; ND_PRINT((ndo, ", P-ID: %u", EXTRACT_16BITS(tptr))); } break; case ISIS_SUBTLV_EXT_IS_REACH_INTF_SW_CAP_DESCR: if (subl >= 36) { gmpls_switch_cap = *tptr; ND_PRINT((ndo, "%s Interface Switching Capability:%s", ident, tok2str(gmpls_switch_cap_values, "Unknown", gmpls_switch_cap))); ND_PRINT((ndo, ", LSP Encoding: %s", tok2str(gmpls_encoding_values, "Unknown", *(tptr + 1)))); tptr+=4; ND_PRINT((ndo, "%s Max LSP Bandwidth:", ident)); for (priority_level = 0; priority_level < 8; priority_level++) { bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s priority level %d: %.3f Mbps", ident, priority_level, bw.f * 8 / 1000000)); tptr+=4; } subl-=36; switch (gmpls_switch_cap) { case GMPLS_PSC1: case GMPLS_PSC2: case GMPLS_PSC3: case GMPLS_PSC4: ND_TCHECK2(*tptr, 6); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Interface MTU: %u", ident, EXTRACT_16BITS(tptr + 4))); break; case GMPLS_TSC: ND_TCHECK2(*tptr, 8); bw.i = EXTRACT_32BITS(tptr); ND_PRINT((ndo, "%s Min LSP Bandwidth: %.3f Mbps", ident, bw.f * 8 / 1000000)); ND_PRINT((ndo, "%s Indication %s", ident, tok2str(gmpls_switch_cap_tsc_indication_values, "Unknown (%u)", *(tptr + 4)))); break; default: /* there is some optional stuff left to decode but this is as of yet not specified so just lets hexdump what is left */ if(subl>0){ if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); } } } break; default: if (!print_unknown_data(ndo, tptr, "\n\t\t ", subl)) return(0); break; } return(1); trunc: return(0); } /* * this is the common IS-REACH decoder it is called * from various EXTD-IS REACH style TLVs (22,24,222) */ static int isis_print_ext_is_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, int tlv_type) { char ident_buffer[20]; int subtlv_type,subtlv_len,subtlv_sum_len; int proc_bytes = 0; /* how many bytes did we process ? */ if (!ND_TTEST2(*tptr, NODE_ID_LEN)) return(0); ND_PRINT((ndo, "%sIS Neighbor: %s", ident, isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); if (tlv_type != ISIS_TLV_IS_ALIAS_ID) { /* the Alias TLV Metric field is implicit 0 */ if (!ND_TTEST2(*tptr, 3)) /* and is therefore skipped */ return(0); ND_PRINT((ndo, ", Metric: %d", EXTRACT_24BITS(tptr))); tptr+=3; } if (!ND_TTEST2(*tptr, 1)) return(0); subtlv_sum_len=*(tptr++); /* read out subTLV length */ proc_bytes=NODE_ID_LEN+3+1; ND_PRINT((ndo, ", %ssub-TLVs present",subtlv_sum_len ? "" : "no ")); if (subtlv_sum_len) { ND_PRINT((ndo, " (%u)", subtlv_sum_len)); while (subtlv_sum_len>0) { if (!ND_TTEST2(*tptr,2)) return(0); subtlv_type=*(tptr++); subtlv_len=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_is_reach_subtlv(ndo, tptr, subtlv_type, subtlv_len, ident_buffer)) return(0); tptr+=subtlv_len; subtlv_sum_len-=(subtlv_len+2); proc_bytes+=(subtlv_len+2); } } return(proc_bytes); } /* * this is the common Multi Topology ID decoder * it is called from various MT-TLVs (222,229,235,237) */ static int isis_print_mtid(netdissect_options *ndo, const uint8_t *tptr, const char *ident) { if (!ND_TTEST2(*tptr, 2)) return(0); ND_PRINT((ndo, "%s%s", ident, tok2str(isis_mt_values, "Reserved for IETF Consensus", ISIS_MASK_MTID(EXTRACT_16BITS(tptr))))); ND_PRINT((ndo, " Topology (0x%03x), Flags: [%s]", ISIS_MASK_MTID(EXTRACT_16BITS(tptr)), bittok2str(isis_mt_flag_values, "none",ISIS_MASK_MTFLAGS(EXTRACT_16BITS(tptr))))); return(2); } /* * this is the common extended IP reach decoder * it is called from TLVs (135,235,236,237) * we process the TLV and optional subTLVs and return * the amount of processed bytes */ static int isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); } /* * Clear checksum and lifetime prior to signature verification. */ static void isis_clear_checksum_lifetime(void *header) { struct isis_lsp_header *header_lsp = (struct isis_lsp_header *) header; header_lsp->checksum[0] = 0; header_lsp->checksum[1] = 0; header_lsp->remaining_lifetime[0] = 0; header_lsp->remaining_lifetime[1] = 0; } /* * isis_print * Decode IS-IS packets. Return 0 on error. */ static int isis_print(netdissect_options *ndo, const uint8_t *p, u_int length) { const struct isis_common_header *isis_header; const struct isis_iih_lan_header *header_iih_lan; const struct isis_iih_ptp_header *header_iih_ptp; const struct isis_lsp_header *header_lsp; const struct isis_csnp_header *header_csnp; const struct isis_psnp_header *header_psnp; const struct isis_tlv_lsp *tlv_lsp; const struct isis_tlv_ptp_adj *tlv_ptp_adj; const struct isis_tlv_is_reach *tlv_is_reach; const struct isis_tlv_es_reach *tlv_es_reach; uint8_t pdu_type, max_area, id_length, tlv_type, tlv_len, tmp, alen, lan_alen, prefix_len; uint8_t ext_is_len, ext_ip_len, mt_len; const uint8_t *optr, *pptr, *tptr; u_short packet_len,pdu_len, key_id; u_int i,vendor_id; int sigcheck; packet_len=length; optr = p; /* initialize the _o_riginal pointer to the packet start - need it for parsing the checksum TLV and authentication TLV verification */ isis_header = (const struct isis_common_header *)p; ND_TCHECK(*isis_header); if (length < ISIS_COMMON_HEADER_SIZE) goto trunc; pptr = p+(ISIS_COMMON_HEADER_SIZE); header_iih_lan = (const struct isis_iih_lan_header *)pptr; header_iih_ptp = (const struct isis_iih_ptp_header *)pptr; header_lsp = (const struct isis_lsp_header *)pptr; header_csnp = (const struct isis_csnp_header *)pptr; header_psnp = (const struct isis_psnp_header *)pptr; if (!ndo->ndo_eflag) ND_PRINT((ndo, "IS-IS")); /* * Sanity checking of the header. */ if (isis_header->version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->version)); return (0); } if ((isis_header->id_length != SYSTEM_ID_LEN) && (isis_header->id_length != 0)) { ND_PRINT((ndo, "system ID length of %d is not supported", isis_header->id_length)); return (0); } if (isis_header->pdu_version != ISIS_VERSION) { ND_PRINT((ndo, "version %d packet not supported", isis_header->pdu_version)); return (0); } if (length < isis_header->fixed_len) { ND_PRINT((ndo, "fixed header length %u > packet length %u", isis_header->fixed_len, length)); return (0); } if (isis_header->fixed_len < ISIS_COMMON_HEADER_SIZE) { ND_PRINT((ndo, "fixed header length %u < minimum header size %u", isis_header->fixed_len, (u_int)ISIS_COMMON_HEADER_SIZE)); return (0); } max_area = isis_header->max_area; switch(max_area) { case 0: max_area = 3; /* silly shit */ break; case 255: ND_PRINT((ndo, "bad packet -- 255 areas")); return (0); default: break; } id_length = isis_header->id_length; switch(id_length) { case 0: id_length = 6; /* silly shit again */ break; case 1: /* 1-8 are valid sys-ID lenghts */ case 2: case 3: case 4: case 5: case 6: case 7: case 8: break; case 255: id_length = 0; /* entirely useless */ break; default: break; } /* toss any non 6-byte sys-ID len PDUs */ if (id_length != 6 ) { ND_PRINT((ndo, "bad packet -- illegal sys-ID length (%u)", id_length)); return (0); } pdu_type=isis_header->pdu_type; /* in non-verbose mode print the basic PDU Type plus PDU specific brief information*/ if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, "%s%s", ndo->ndo_eflag ? "" : ", ", tok2str(isis_pdu_values, "unknown PDU-Type %u", pdu_type))); } else { /* ok they seem to want to know everything - lets fully decode it */ ND_PRINT((ndo, "%slength %u", ndo->ndo_eflag ? "" : ", ", length)); ND_PRINT((ndo, "\n\t%s, hlen: %u, v: %u, pdu-v: %u, sys-id-len: %u (%u), max-area: %u (%u)", tok2str(isis_pdu_values, "unknown, type %u", pdu_type), isis_header->fixed_len, isis_header->version, isis_header->pdu_version, id_length, isis_header->id_length, max_area, isis_header->max_area)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, optr, "\n\t", 8)) /* provide the _o_riginal pointer */ return (0); /* for optionally debugging the common header */ } } switch (pdu_type) { case ISIS_PDU_L1_LAN_IIH: case ISIS_PDU_L2_LAN_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_lan); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_lan->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", lan-id %s, prio %u", isis_print_id(header_iih_lan->lan_id,NODE_ID_LEN), header_iih_lan->priority)); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_lan->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_lan->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_lan->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_lan->circuit_type))); ND_PRINT((ndo, "\n\t lan-id: %s, Priority: %u, PDU length: %u", isis_print_id(header_iih_lan->lan_id, NODE_ID_LEN), (header_iih_lan->priority) & ISIS_LAN_PRIORITY_MASK, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_LAN_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_LAN_HEADER_SIZE); break; case ISIS_PDU_PTP_IIH: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_iih_ptp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_iih_ptp->source_id, SYSTEM_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_iih_ptp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, holding time: %us, Flags: [%s]", isis_print_id(header_iih_ptp->source_id,SYSTEM_ID_LEN), EXTRACT_16BITS(header_iih_ptp->holding_time), tok2str(isis_iih_circuit_type_values, "unknown circuit type 0x%02x", header_iih_ptp->circuit_type))); ND_PRINT((ndo, "\n\t circuit-id: 0x%02x, PDU length: %u", header_iih_ptp->circuit_id, pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_IIH_PTP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_IIH_PTP_HEADER_SIZE); break; case ISIS_PDU_L1_LSP: case ISIS_PDU_L2_LSP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)ISIS_LSP_HEADER_SIZE)); return (0); } ND_TCHECK(*header_lsp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", lsp-id %s, seq 0x%08x, lifetime %5us", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_lsp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t lsp-id: %s, seq: 0x%08x, lifetime: %5us\n\t chksum: 0x%04x", isis_print_id(header_lsp->lsp_id, LSP_ID_LEN), EXTRACT_32BITS(header_lsp->sequence_number), EXTRACT_16BITS(header_lsp->remaining_lifetime), EXTRACT_16BITS(header_lsp->checksum))); osi_print_cksum(ndo, (const uint8_t *)header_lsp->lsp_id, EXTRACT_16BITS(header_lsp->checksum), 12, length-12); ND_PRINT((ndo, ", PDU length: %u, Flags: [ %s", pdu_len, ISIS_MASK_LSP_OL_BIT(header_lsp->typeblock) ? "Overload bit set, " : "")); if (ISIS_MASK_LSP_ATT_BITS(header_lsp->typeblock)) { ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DEFAULT_BIT(header_lsp->typeblock) ? "default " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_DELAY_BIT(header_lsp->typeblock) ? "delay " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_EXPENSE_BIT(header_lsp->typeblock) ? "expense " : "")); ND_PRINT((ndo, "%s", ISIS_MASK_LSP_ATT_ERROR_BIT(header_lsp->typeblock) ? "error " : "")); ND_PRINT((ndo, "ATT bit set, ")); } ND_PRINT((ndo, "%s", ISIS_MASK_LSP_PARTITION_BIT(header_lsp->typeblock) ? "P bit set, " : "")); ND_PRINT((ndo, "%s ]", tok2str(isis_lsp_istype_values, "Unknown(0x%x)", ISIS_MASK_LSP_ISTYPE_BITS(header_lsp->typeblock)))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_LSP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_LSP_HEADER_SIZE); break; case ISIS_PDU_L1_CSNP: case ISIS_PDU_L2_CSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE)) { ND_PRINT((ndo, ", bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_csnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_csnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_csnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_csnp->source_id, NODE_ID_LEN), pdu_len)); ND_PRINT((ndo, "\n\t start lsp-id: %s", isis_print_id(header_csnp->start_lsp_id, LSP_ID_LEN))); ND_PRINT((ndo, "\n\t end lsp-id: %s", isis_print_id(header_csnp->end_lsp_id, LSP_ID_LEN))); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_CSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_CSNP_HEADER_SIZE); break; case ISIS_PDU_L1_PSNP: case ISIS_PDU_L2_PSNP: if (isis_header->fixed_len != (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE)) { ND_PRINT((ndo, "- bogus fixed header length %u should be %lu", isis_header->fixed_len, (unsigned long)(ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE))); return (0); } ND_TCHECK(*header_psnp); if (length < ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE) goto trunc; if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", src-id %s", isis_print_id(header_psnp->source_id, NODE_ID_LEN))); ND_PRINT((ndo, ", length %u", length)); return (1); } pdu_len=EXTRACT_16BITS(header_psnp->pdu_len); if (packet_len>pdu_len) { packet_len=pdu_len; /* do TLV decoding as long as it makes sense */ length=pdu_len; } ND_PRINT((ndo, "\n\t source-id: %s, PDU length: %u", isis_print_id(header_psnp->source_id, NODE_ID_LEN), pdu_len)); if (ndo->ndo_vflag > 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", ISIS_PSNP_HEADER_SIZE)) return (0); } packet_len -= (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); pptr = p + (ISIS_COMMON_HEADER_SIZE+ISIS_PSNP_HEADER_SIZE); break; default: if (ndo->ndo_vflag == 0) { ND_PRINT((ndo, ", length %u", length)); return (1); } (void)print_unknown_data(ndo, pptr, "\n\t ", length); return (0); } /* * Now print the TLV's. */ while (packet_len > 0) { ND_TCHECK2(*pptr, 2); if (packet_len < 2) goto trunc; tlv_type = *pptr++; tlv_len = *pptr++; tmp =tlv_len; /* copy temporary len & pointer to packet data */ tptr = pptr; packet_len -= 2; /* first lets see if we know the TLVs name*/ ND_PRINT((ndo, "\n\t %s TLV #%u, length: %u", tok2str(isis_tlv_values, "unknown", tlv_type), tlv_type, tlv_len)); if (tlv_len == 0) /* something is invalid */ continue; if (packet_len < tlv_len) goto trunc; /* now check if we have a decoder otherwise do a hexdump at the end*/ switch (tlv_type) { case ISIS_TLV_AREA_ADDR: ND_TCHECK2(*tptr, 1); alen = *tptr++; while (tmp && alen < tmp) { ND_TCHECK2(*tptr, alen); ND_PRINT((ndo, "\n\t Area address (length: %u): %s", alen, isonsap_string(ndo, tptr, alen))); tptr += alen; tmp -= alen + 1; if (tmp==0) /* if this is the last area address do not attemt a boundary check */ break; ND_TCHECK2(*tptr, 1); alen = *tptr++; } break; case ISIS_TLV_ISNEIGH: while (tmp >= ETHER_ADDR_LEN) { ND_TCHECK2(*tptr, ETHER_ADDR_LEN); ND_PRINT((ndo, "\n\t SNPA: %s", isis_print_id(tptr, ETHER_ADDR_LEN))); tmp -= ETHER_ADDR_LEN; tptr += ETHER_ADDR_LEN; } break; case ISIS_TLV_ISNEIGH_VARLEN: if (!ND_TTEST2(*tptr, 1) || tmp < 3) /* min. TLV length */ goto trunctlv; lan_alen = *tptr++; /* LAN address length */ if (lan_alen == 0) { ND_PRINT((ndo, "\n\t LAN address length 0 bytes (invalid)")); break; } tmp --; ND_PRINT((ndo, "\n\t LAN address length %u bytes ", lan_alen)); while (tmp >= lan_alen) { ND_TCHECK2(*tptr, lan_alen); ND_PRINT((ndo, "\n\t\tIS Neighbor: %s", isis_print_id(tptr, lan_alen))); tmp -= lan_alen; tptr +=lan_alen; } break; case ISIS_TLV_PADDING: break; case ISIS_TLV_MT_IS_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; while (tmp >= 2+NODE_ID_LEN+3+1) { ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_ALIAS_ID: while (tmp >= NODE_ID_LEN+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_EXT_IS_REACH: while (tmp >= NODE_ID_LEN+3+1) { /* is it worth attempting a decode ? */ ext_is_len = isis_print_ext_is_reach(ndo, tptr, "\n\t ", tlv_type); if (ext_is_len == 0) /* did something go wrong ? */ goto trunctlv; tmp-=ext_is_len; tptr+=ext_is_len; } break; case ISIS_TLV_IS_REACH: ND_TCHECK2(*tptr,1); /* check if there is one byte left to read out the virtual flag */ ND_PRINT((ndo, "\n\t %s", tok2str(isis_is_reach_virtual_values, "bogus virtual flag 0x%02x", *tptr++))); tlv_is_reach = (const struct isis_tlv_is_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_is_reach)) { ND_TCHECK(*tlv_is_reach); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tlv_is_reach->neighbor_nodeid, NODE_ID_LEN))); isis_print_metric_block(ndo, &tlv_is_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_is_reach); tlv_is_reach++; } break; case ISIS_TLV_ESNEIGH: tlv_es_reach = (const struct isis_tlv_es_reach *)tptr; while (tmp >= sizeof(struct isis_tlv_es_reach)) { ND_TCHECK(*tlv_es_reach); ND_PRINT((ndo, "\n\t ES Neighbor: %s", isis_print_id(tlv_es_reach->neighbor_sysid, SYSTEM_ID_LEN))); isis_print_metric_block(ndo, &tlv_es_reach->isis_metric_block); tmp -= sizeof(struct isis_tlv_es_reach); tlv_es_reach++; } break; /* those two TLVs share the same format */ case ISIS_TLV_INT_IP_REACH: case ISIS_TLV_EXT_IP_REACH: if (!isis_print_tlv_ip_reach(ndo, pptr, "\n\t ", tlv_len)) return (1); break; case ISIS_TLV_EXTD_IP_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6_REACH: while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_MT_IP6_REACH: mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) { /* did something go wrong ? */ goto trunctlv; } tptr+=mt_len; tmp-=mt_len; while (tmp>0) { ext_ip_len = isis_print_extd_ip_reach(ndo, tptr, "\n\t ", AF_INET6); if (ext_ip_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=ext_ip_len; tmp-=ext_ip_len; } break; case ISIS_TLV_IP6ADDR: while (tmp>=sizeof(struct in6_addr)) { ND_TCHECK2(*tptr, sizeof(struct in6_addr)); ND_PRINT((ndo, "\n\t IPv6 interface address: %s", ip6addr_string(ndo, tptr))); tptr += sizeof(struct in6_addr); tmp -= sizeof(struct in6_addr); } break; case ISIS_TLV_AUTH: ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t %s: ", tok2str(isis_subtlv_auth_values, "unknown Authentication type 0x%02x", *tptr))); switch (*tptr) { case ISIS_SUBTLV_AUTH_SIMPLE: if (fn_printzp(ndo, tptr + 1, tlv_len - 1, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_SUBTLV_AUTH_MD5: for(i=1;i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } if (tlv_len != ISIS_SUBTLV_AUTH_MD5_LEN+1) ND_PRINT((ndo, ", (invalid subTLV) ")); sigcheck = signature_verify(ndo, optr, length, tptr + 1, isis_clear_checksum_lifetime, header_lsp); ND_PRINT((ndo, " (%s)", tok2str(signature_check_values, "Unknown", sigcheck))); break; case ISIS_SUBTLV_AUTH_GENERIC: ND_TCHECK2(*(tptr + 1), 2); key_id = EXTRACT_16BITS((tptr+1)); ND_PRINT((ndo, "%u, password: ", key_id)); for(i=1 + sizeof(uint16_t);i<tlv_len;i++) { ND_TCHECK2(*(tptr + i), 1); ND_PRINT((ndo, "%02x", *(tptr + i))); } break; case ISIS_SUBTLV_AUTH_PRIVATE: default: if (!print_unknown_data(ndo, tptr + 1, "\n\t\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_PTP_ADJ: tlv_ptp_adj = (const struct isis_tlv_ptp_adj *)tptr; if(tmp>=1) { ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, "\n\t Adjacency State: %s (%u)", tok2str(isis_ptp_adjancey_values, "unknown", *tptr), *tptr)); tmp--; } if(tmp>sizeof(tlv_ptp_adj->extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->extd_local_circuit_id); ND_PRINT((ndo, "\n\t Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->extd_local_circuit_id))); tmp-=sizeof(tlv_ptp_adj->extd_local_circuit_id); } if(tmp>=SYSTEM_ID_LEN) { ND_TCHECK2(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t Neighbor System-ID: %s", isis_print_id(tlv_ptp_adj->neighbor_sysid, SYSTEM_ID_LEN))); tmp-=SYSTEM_ID_LEN; } if(tmp>=sizeof(tlv_ptp_adj->neighbor_extd_local_circuit_id)) { ND_TCHECK(tlv_ptp_adj->neighbor_extd_local_circuit_id); ND_PRINT((ndo, "\n\t Neighbor Extended Local circuit-ID: 0x%08x", EXTRACT_32BITS(tlv_ptp_adj->neighbor_extd_local_circuit_id))); } break; case ISIS_TLV_PROTOCOLS: ND_PRINT((ndo, "\n\t NLPID(s): ")); while (tmp>0) { ND_TCHECK2(*(tptr), 1); ND_PRINT((ndo, "%s (0x%02x)", tok2str(nlpid_values, "unknown", *tptr), *tptr)); if (tmp>1) /* further NPLIDs ? - put comma */ ND_PRINT((ndo, ", ")); tptr++; tmp--; } break; case ISIS_TLV_MT_PORT_CAP: { ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t RES: %d, MTID(s): %d", (EXTRACT_16BITS (tptr) >> 12), (EXTRACT_16BITS (tptr) & 0x0fff))); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_port_cap_subtlv(ndo, tptr, tmp); break; } case ISIS_TLV_MT_CAPABILITY: ND_TCHECK2(*(tptr), 2); ND_PRINT((ndo, "\n\t O: %d, RES: %d, MTID(s): %d", (EXTRACT_16BITS(tptr) >> 15) & 0x01, (EXTRACT_16BITS(tptr) >> 12) & 0x07, EXTRACT_16BITS(tptr) & 0x0fff)); tmp = tmp-2; tptr = tptr+2; if (tmp) isis_print_mt_capability_subtlv(ndo, tptr, tmp); break; case ISIS_TLV_TE_ROUTER_ID: ND_TCHECK2(*pptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t Traffic Engineering Router ID: %s", ipaddr_string(ndo, pptr))); break; case ISIS_TLV_IPADDR: while (tmp>=sizeof(struct in_addr)) { ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr += sizeof(struct in_addr); tmp -= sizeof(struct in_addr); } break; case ISIS_TLV_HOSTNAME: ND_PRINT((ndo, "\n\t Hostname: ")); if (fn_printzp(ndo, tptr, tmp, ndo->ndo_snapend)) goto trunctlv; break; case ISIS_TLV_SHARED_RISK_GROUP: if (tmp < NODE_ID_LEN) break; ND_TCHECK2(*tptr, NODE_ID_LEN); ND_PRINT((ndo, "\n\t IS Neighbor: %s", isis_print_id(tptr, NODE_ID_LEN))); tptr+=(NODE_ID_LEN); tmp-=(NODE_ID_LEN); if (tmp < 1) break; ND_TCHECK2(*tptr, 1); ND_PRINT((ndo, ", Flags: [%s]", ISIS_MASK_TLV_SHARED_RISK_GROUP(*tptr++) ? "numbered" : "unnumbered")); tmp--; if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 interface address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); if (tmp < sizeof(struct in_addr)) break; ND_TCHECK2(*tptr, sizeof(struct in_addr)); ND_PRINT((ndo, "\n\t IPv4 neighbor address: %s", ipaddr_string(ndo, tptr))); tptr+=sizeof(struct in_addr); tmp-=sizeof(struct in_addr); while (tmp>=4) { ND_TCHECK2(*tptr, 4); ND_PRINT((ndo, "\n\t Link-ID: 0x%08x", EXTRACT_32BITS(tptr))); tptr+=4; tmp-=4; } break; case ISIS_TLV_LSP: tlv_lsp = (const struct isis_tlv_lsp *)tptr; while(tmp>=sizeof(struct isis_tlv_lsp)) { ND_TCHECK((tlv_lsp->lsp_id)[LSP_ID_LEN-1]); ND_PRINT((ndo, "\n\t lsp-id: %s", isis_print_id(tlv_lsp->lsp_id, LSP_ID_LEN))); ND_TCHECK2(tlv_lsp->sequence_number, 4); ND_PRINT((ndo, ", seq: 0x%08x", EXTRACT_32BITS(tlv_lsp->sequence_number))); ND_TCHECK2(tlv_lsp->remaining_lifetime, 2); ND_PRINT((ndo, ", lifetime: %5ds", EXTRACT_16BITS(tlv_lsp->remaining_lifetime))); ND_TCHECK2(tlv_lsp->checksum, 2); ND_PRINT((ndo, ", chksum: 0x%04x", EXTRACT_16BITS(tlv_lsp->checksum))); tmp-=sizeof(struct isis_tlv_lsp); tlv_lsp++; } break; case ISIS_TLV_CHECKSUM: if (tmp < ISIS_TLV_CHECKSUM_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_CHECKSUM_MINLEN); ND_PRINT((ndo, "\n\t checksum: 0x%04x ", EXTRACT_16BITS(tptr))); /* do not attempt to verify the checksum if it is zero * most likely a HMAC-MD5 TLV is also present and * to avoid conflicts the checksum TLV is zeroed. * see rfc3358 for details */ osi_print_cksum(ndo, optr, EXTRACT_16BITS(tptr), tptr-optr, length); break; case ISIS_TLV_POI: if (tlv_len >= SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Purge Originator System-ID: %s", isis_print_id(tptr + 1, SYSTEM_ID_LEN))); } if (tlv_len == 2 * SYSTEM_ID_LEN + 1) { ND_TCHECK2(*tptr, 2 * SYSTEM_ID_LEN + 1); ND_PRINT((ndo, "\n\t Received from System-ID: %s", isis_print_id(tptr + SYSTEM_ID_LEN + 1, SYSTEM_ID_LEN))); } break; case ISIS_TLV_MT_SUPPORTED: if (tmp < ISIS_TLV_MT_SUPPORTED_MINLEN) break; while (tmp>1) { /* length can only be a multiple of 2, otherwise there is something broken -> so decode down until length is 1 */ if (tmp!=1) { mt_len = isis_print_mtid(ndo, tptr, "\n\t "); if (mt_len == 0) /* did something go wrong ? */ goto trunctlv; tptr+=mt_len; tmp-=mt_len; } else { ND_PRINT((ndo, "\n\t invalid MT-ID")); break; } } break; case ISIS_TLV_RESTART_SIGNALING: /* first attempt to decode the flags */ if (tmp < ISIS_TLV_RESTART_SIGNALING_FLAGLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_FLAGLEN); ND_PRINT((ndo, "\n\t Flags [%s]", bittok2str(isis_restart_flag_values, "none", *tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; tmp-=ISIS_TLV_RESTART_SIGNALING_FLAGLEN; /* is there anything other than the flags field? */ if (tmp == 0) break; if (tmp < ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN) break; ND_TCHECK2(*tptr, ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN); ND_PRINT((ndo, ", Remaining holding time %us", EXTRACT_16BITS(tptr))); tptr+=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; tmp-=ISIS_TLV_RESTART_SIGNALING_HOLDTIMELEN; /* is there an additional sysid field present ?*/ if (tmp == SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, ", for %s", isis_print_id(tptr,SYSTEM_ID_LEN))); } break; case ISIS_TLV_IDRP_INFO: if (tmp < ISIS_TLV_IDRP_INFO_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IDRP_INFO_MINLEN); ND_PRINT((ndo, "\n\t Inter-Domain Information Type: %s", tok2str(isis_subtlv_idrp_values, "Unknown (0x%02x)", *tptr))); switch (*tptr++) { case ISIS_SUBTLV_IDRP_ASN: ND_TCHECK2(*tptr, 2); /* fetch AS number */ ND_PRINT((ndo, "AS Number: %u", EXTRACT_16BITS(tptr))); break; case ISIS_SUBTLV_IDRP_LOCAL: case ISIS_SUBTLV_IDRP_RES: default: if (!print_unknown_data(ndo, tptr, "\n\t ", tlv_len - 1)) return(0); break; } break; case ISIS_TLV_LSP_BUFFERSIZE: if (tmp < ISIS_TLV_LSP_BUFFERSIZE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_LSP_BUFFERSIZE_MINLEN); ND_PRINT((ndo, "\n\t LSP Buffersize: %u", EXTRACT_16BITS(tptr))); break; case ISIS_TLV_PART_DIS: while (tmp >= SYSTEM_ID_LEN) { ND_TCHECK2(*tptr, SYSTEM_ID_LEN); ND_PRINT((ndo, "\n\t %s", isis_print_id(tptr, SYSTEM_ID_LEN))); tptr+=SYSTEM_ID_LEN; tmp-=SYSTEM_ID_LEN; } break; case ISIS_TLV_PREFIX_NEIGH: if (tmp < sizeof(struct isis_metric_block)) break; ND_TCHECK2(*tptr, sizeof(struct isis_metric_block)); ND_PRINT((ndo, "\n\t Metric Block")); isis_print_metric_block(ndo, (const struct isis_metric_block *)tptr); tptr+=sizeof(struct isis_metric_block); tmp-=sizeof(struct isis_metric_block); while(tmp>0) { ND_TCHECK2(*tptr, 1); prefix_len=*tptr++; /* read out prefix length in semioctets*/ if (prefix_len < 2) { ND_PRINT((ndo, "\n\t\tAddress: prefix length %u < 2", prefix_len)); break; } tmp--; if (tmp < prefix_len/2) break; ND_TCHECK2(*tptr, prefix_len / 2); ND_PRINT((ndo, "\n\t\tAddress: %s/%u", isonsap_string(ndo, tptr, prefix_len / 2), prefix_len * 4)); tptr+=prefix_len/2; tmp-=prefix_len/2; } break; case ISIS_TLV_IIH_SEQNR: if (tmp < ISIS_TLV_IIH_SEQNR_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_IIH_SEQNR_MINLEN); /* check if four bytes are on the wire */ ND_PRINT((ndo, "\n\t Sequence number: %u", EXTRACT_32BITS(tptr))); break; case ISIS_TLV_VENDOR_PRIVATE: if (tmp < ISIS_TLV_VENDOR_PRIVATE_MINLEN) break; ND_TCHECK2(*tptr, ISIS_TLV_VENDOR_PRIVATE_MINLEN); /* check if enough byte for a full oui */ vendor_id = EXTRACT_24BITS(tptr); ND_PRINT((ndo, "\n\t Vendor: %s (%u)", tok2str(oui_values, "Unknown", vendor_id), vendor_id)); tptr+=3; tmp-=3; if (tmp > 0) /* hexdump the rest */ if (!print_unknown_data(ndo, tptr, "\n\t\t", tmp)) return(0); break; /* * FIXME those are the defined TLVs that lack a decoder * you are welcome to contribute code ;-) */ case ISIS_TLV_DECNET_PHASE4: case ISIS_TLV_LUCENT_PRIVATE: case ISIS_TLV_IPAUTH: case ISIS_TLV_NORTEL_PRIVATE1: case ISIS_TLV_NORTEL_PRIVATE2: default: if (ndo->ndo_vflag <= 1) { if (!print_unknown_data(ndo, pptr, "\n\t\t", tlv_len)) return(0); } break; } /* do we want to see an additionally hexdump ? */ if (ndo->ndo_vflag> 1) { if (!print_unknown_data(ndo, pptr, "\n\t ", tlv_len)) return(0); } pptr += tlv_len; packet_len -= tlv_len; } if (packet_len != 0) { ND_PRINT((ndo, "\n\t %u straggler bytes", packet_len)); } return (1); trunc: ND_PRINT((ndo, "%s", tstr)); return (1); trunctlv: ND_PRINT((ndo, "\n\t\t")); ND_PRINT((ndo, "%s", tstr)); return(1); } static void osi_print_cksum(netdissect_options *ndo, const uint8_t *pptr, uint16_t checksum, int checksum_offset, u_int length) { uint16_t calculated_checksum; /* do not attempt to verify the checksum if it is zero, * if the offset is nonsense, * or the base pointer is not sane */ if (!checksum || checksum_offset < 0 || !ND_TTEST2(*(pptr + checksum_offset), 2) || (u_int)checksum_offset > length || !ND_TTEST2(*pptr, length)) { ND_PRINT((ndo, " (unverified)")); } else { #if 0 printf("\nosi_print_cksum: %p %u %u\n", pptr, checksum_offset, length); #endif calculated_checksum = create_osi_cksum(pptr, checksum_offset, length); if (checksum == calculated_checksum) { ND_PRINT((ndo, " (correct)")); } else { ND_PRINT((ndo, " (incorrect should be 0x%04x)", calculated_checksum)); } } } /* * Local Variables: * c-style: whitesmith * c-basic-offset: 8 * End: */
isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); }
isis_print_extd_ip_reach(netdissect_options *ndo, const uint8_t *tptr, const char *ident, uint16_t afi) { char ident_buffer[20]; uint8_t prefix[sizeof(struct in6_addr)]; /* shared copy buffer for IPv4 and IPv6 prefixes */ u_int metric, status_byte, bit_length, byte_length, sublen, processed, subtlvtype, subtlvlen; if (!ND_TTEST2(*tptr, 4)) return (0); metric = EXTRACT_32BITS(tptr); processed=4; tptr+=4; if (afi == AF_INET) { if (!ND_TTEST2(*tptr, 1)) /* fetch status byte */ return (0); status_byte=*(tptr++); bit_length = status_byte&0x3f; if (bit_length > 32) { ND_PRINT((ndo, "%sIPv4 prefix: bad bit length %u", ident, bit_length)); return (0); } processed++; } else if (afi == AF_INET6) { if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */ return (0); status_byte=*(tptr++); bit_length=*(tptr++); if (bit_length > 128) { ND_PRINT((ndo, "%sIPv6 prefix: bad bit length %u", ident, bit_length)); return (0); } processed+=2; } else return (0); /* somebody is fooling us */ byte_length = (bit_length + 7) / 8; /* prefix has variable length encoding */ if (!ND_TTEST2(*tptr, byte_length)) return (0); memset(prefix, 0, sizeof prefix); /* clear the copy buffer */ memcpy(prefix,tptr,byte_length); /* copy as much as is stored in the TLV */ tptr+=byte_length; processed+=byte_length; if (afi == AF_INET) ND_PRINT((ndo, "%sIPv4 prefix: %15s/%u", ident, ipaddr_string(ndo, prefix), bit_length)); else if (afi == AF_INET6) ND_PRINT((ndo, "%sIPv6 prefix: %s/%u", ident, ip6addr_string(ndo, prefix), bit_length)); ND_PRINT((ndo, ", Distribution: %s, Metric: %u", ISIS_MASK_TLV_EXTD_IP_UPDOWN(status_byte) ? "down" : "up", metric)); if (afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) ND_PRINT((ndo, ", sub-TLVs present")); else if (afi == AF_INET6) ND_PRINT((ndo, ", %s%s", ISIS_MASK_TLV_EXTD_IP6_IE(status_byte) ? "External" : "Internal", ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte) ? ", sub-TLVs present" : "")); if ((afi == AF_INET && ISIS_MASK_TLV_EXTD_IP_SUBTLV(status_byte)) || (afi == AF_INET6 && ISIS_MASK_TLV_EXTD_IP6_SUBTLV(status_byte)) ) { /* assume that one prefix can hold more than one subTLV - therefore the first byte must reflect the aggregate bytecount of the subTLVs for this prefix */ if (!ND_TTEST2(*tptr, 1)) return (0); sublen=*(tptr++); processed+=sublen+1; ND_PRINT((ndo, " (%u)", sublen)); /* print out subTLV length */ while (sublen>0) { if (!ND_TTEST2(*tptr,2)) return (0); subtlvtype=*(tptr++); subtlvlen=*(tptr++); /* prepend the indent string */ snprintf(ident_buffer, sizeof(ident_buffer), "%s ",ident); if (!isis_print_ip_reach_subtlv(ndo, tptr, subtlvtype, subtlvlen, ident_buffer)) return(0); tptr+=subtlvlen; sublen-=(subtlvlen+2); } } return (processed); }
{'added': [(2041, ' if (!ND_TTEST2(*tptr, 2)) /* fetch status & prefix_len byte */')], 'deleted': [(2041, ' if (!ND_TTEST2(*tptr, 1)) /* fetch status & prefix_len byte */')]}
1
1
2,403
14,614
85
568
25
https://github.com/the-tcpdump-group/tcpdump
CVE-2017-12998
CWE-125
2,910
jas_stream.c
C
mem_read
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * I/O Stream Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #if defined(HAVE_FCNTL_H) #include <fcntl.h> #endif #include <stdlib.h> #include <stdarg.h> #include <stdio.h> #include <ctype.h> #if defined(HAVE_UNISTD_H) #include <unistd.h> #endif #if defined(WIN32) || defined(HAVE_IO_H) #include <io.h> #endif #include "jasper/jas_debug.h" #include "jasper/jas_types.h" #include "jasper/jas_stream.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" /******************************************************************************\ * Local function prototypes. \******************************************************************************/ static int jas_strtoopenmode(const char *s); static void jas_stream_destroy(jas_stream_t *stream); static jas_stream_t *jas_stream_create(void); static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf, int bufsize); static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt); static int mem_write(jas_stream_obj_t *obj, char *buf, int cnt); static long mem_seek(jas_stream_obj_t *obj, long offset, int origin); static int mem_close(jas_stream_obj_t *obj); static int sfile_read(jas_stream_obj_t *obj, char *buf, int cnt); static int sfile_write(jas_stream_obj_t *obj, char *buf, int cnt); static long sfile_seek(jas_stream_obj_t *obj, long offset, int origin); static int sfile_close(jas_stream_obj_t *obj); static int file_read(jas_stream_obj_t *obj, char *buf, int cnt); static int file_write(jas_stream_obj_t *obj, char *buf, int cnt); static long file_seek(jas_stream_obj_t *obj, long offset, int origin); static int file_close(jas_stream_obj_t *obj); /******************************************************************************\ * Local data. \******************************************************************************/ static jas_stream_ops_t jas_stream_fileops = { file_read, file_write, file_seek, file_close }; static jas_stream_ops_t jas_stream_sfileops = { sfile_read, sfile_write, sfile_seek, sfile_close }; static jas_stream_ops_t jas_stream_memops = { mem_read, mem_write, mem_seek, mem_close }; /******************************************************************************\ * Code for opening and closing streams. \******************************************************************************/ static jas_stream_t *jas_stream_create() { jas_stream_t *stream; if (!(stream = jas_malloc(sizeof(jas_stream_t)))) { return 0; } stream->openmode_ = 0; stream->bufmode_ = 0; stream->flags_ = 0; stream->bufbase_ = 0; stream->bufstart_ = 0; stream->bufsize_ = 0; stream->ptr_ = 0; stream->cnt_ = 0; stream->ops_ = 0; stream->obj_ = 0; stream->rwcnt_ = 0; stream->rwlimit_ = -1; return stream; } jas_stream_t *jas_stream_memopen(char *buf, int bufsize) { jas_stream_t *stream; jas_stream_memobj_t *obj; JAS_DBGLOG(100, ("jas_stream_memopen(%p, %d)\n", buf, bufsize)); if (!(stream = jas_stream_create())) { return 0; } /* A stream associated with a memory buffer is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Since the stream data is already resident in memory, buffering is not necessary. */ /* But... It still may be faster to use buffering anyways. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); /* Select the operations for a memory stream. */ stream->ops_ = &jas_stream_memops; /* Allocate memory for the underlying memory stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) { jas_stream_destroy(stream); return 0; } stream->obj_ = (void *) obj; /* Initialize a few important members of the memory stream object. */ obj->myalloc_ = 0; obj->buf_ = 0; /* If the buffer size specified is nonpositive, then the buffer is allocated internally and automatically grown as needed. */ if (bufsize <= 0) { obj->bufsize_ = 1024; obj->growable_ = 1; } else { obj->bufsize_ = bufsize; obj->growable_ = 0; } if (buf) { obj->buf_ = (unsigned char *) buf; } else { obj->buf_ = jas_malloc(obj->bufsize_); obj->myalloc_ = 1; } if (!obj->buf_) { jas_stream_close(stream); return 0; } JAS_DBGLOG(100, ("jas_stream_memopen buffer buf=%p myalloc=%d\n", obj->buf_, obj->myalloc_)); if (bufsize > 0 && buf) { /* If a buffer was supplied by the caller and its length is positive, make the associated buffer data appear in the stream initially. */ obj->len_ = bufsize; } else { /* The stream is initially empty. */ obj->len_ = 0; } obj->pos_ = 0; return stream; } jas_stream_t *jas_stream_fopen(const char *filename, const char *mode) { jas_stream_t *stream; jas_stream_fileobj_t *obj; int openflags; /* Allocate a stream object. */ if (!(stream = jas_stream_create())) { return 0; } /* Parse the mode string. */ stream->openmode_ = jas_strtoopenmode(mode); /* Determine the correct flags to use for opening the file. */ if ((stream->openmode_ & JAS_STREAM_READ) && (stream->openmode_ & JAS_STREAM_WRITE)) { openflags = O_RDWR; } else if (stream->openmode_ & JAS_STREAM_READ) { openflags = O_RDONLY; } else if (stream->openmode_ & JAS_STREAM_WRITE) { openflags = O_WRONLY; } else { openflags = 0; } if (stream->openmode_ & JAS_STREAM_APPEND) { openflags |= O_APPEND; } if (stream->openmode_ & JAS_STREAM_BINARY) { openflags |= O_BINARY; } if (stream->openmode_ & JAS_STREAM_CREATE) { openflags |= O_CREAT | O_TRUNC; } /* Allocate space for the underlying file stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = -1; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = (void *) obj; /* Select the operations for a file stream object. */ stream->ops_ = &jas_stream_fileops; /* Open the underlying file. */ if ((obj->fd = open(filename, openflags, JAS_STREAM_PERMS)) < 0) { // Free the underlying file object, since it will not otherwise // be freed. jas_free(obj); jas_stream_destroy(stream); return 0; } /* By default, use full buffering for this type of stream. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); return stream; } jas_stream_t *jas_stream_freopen(const char *path, const char *mode, FILE *fp) { jas_stream_t *stream; int openflags; /* Eliminate compiler warning about unused variable. */ path = 0; /* Allocate a stream object. */ if (!(stream = jas_stream_create())) { return 0; } /* Parse the mode string. */ stream->openmode_ = jas_strtoopenmode(mode); /* Determine the correct flags to use for opening the file. */ if ((stream->openmode_ & JAS_STREAM_READ) && (stream->openmode_ & JAS_STREAM_WRITE)) { openflags = O_RDWR; } else if (stream->openmode_ & JAS_STREAM_READ) { openflags = O_RDONLY; } else if (stream->openmode_ & JAS_STREAM_WRITE) { openflags = O_WRONLY; } else { openflags = 0; } if (stream->openmode_ & JAS_STREAM_APPEND) { openflags |= O_APPEND; } if (stream->openmode_ & JAS_STREAM_BINARY) { openflags |= O_BINARY; } if (stream->openmode_ & JAS_STREAM_CREATE) { openflags |= O_CREAT | O_TRUNC; } stream->obj_ = JAS_CAST(void *, fp); /* Select the operations for a file stream object. */ stream->ops_ = &jas_stream_sfileops; /* By default, use full buffering for this type of stream. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); return stream; } jas_stream_t *jas_stream_tmpfile() { jas_stream_t *stream; jas_stream_fileobj_t *obj; if (!(stream = jas_stream_create())) { return 0; } /* A temporary file stream is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Allocate memory for the underlying temporary file object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = -1; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = obj; /* Choose a file name. */ tmpnam(obj->pathname); /* Open the underlying file. */ if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY, JAS_STREAM_PERMS)) < 0) { jas_stream_destroy(stream); return 0; } /* Unlink the file so that it will disappear if the program terminates abnormally. */ /* Under UNIX, one can unlink an open file and continue to do I/O on it. Not all operating systems support this functionality, however. For example, under Microsoft Windows the unlink operation will fail, since the file is open. */ if (unlink(obj->pathname)) { /* We will try unlinking the file again after it is closed. */ obj->flags |= JAS_STREAM_FILEOBJ_DELONCLOSE; } /* Use full buffering. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); stream->ops_ = &jas_stream_fileops; return stream; } jas_stream_t *jas_stream_fdopen(int fd, const char *mode) { jas_stream_t *stream; jas_stream_fileobj_t *obj; /* Allocate a stream object. */ if (!(stream = jas_stream_create())) { return 0; } /* Parse the mode string. */ stream->openmode_ = jas_strtoopenmode(mode); #if defined(WIN32) /* Argh!!! Someone ought to banish text mode (i.e., O_TEXT) to the greatest depths of purgatory! */ /* Ensure that the file descriptor is in binary mode, if the caller has specified the binary mode flag. Arguably, the caller ought to take care of this, but text mode is a ugly wart anyways, so we save the caller some grief by handling this within the stream library. */ /* This ugliness is mainly for the benefit of those who run the JasPer software under Windows from shells that insist on opening files in text mode. For example, in the Cygwin environment, shells often open files in text mode when I/O redirection is used. Grr... */ if (stream->openmode_ & JAS_STREAM_BINARY) { setmode(fd, O_BINARY); } #endif /* Allocate space for the underlying file stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = fd; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = (void *) obj; /* Do not close the underlying file descriptor when the stream is closed. */ obj->flags |= JAS_STREAM_FILEOBJ_NOCLOSE; /* By default, use full buffering for this type of stream. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); /* Select the operations for a file stream object. */ stream->ops_ = &jas_stream_fileops; return stream; } static void jas_stream_destroy(jas_stream_t *stream) { /* If the memory for the buffer was allocated with malloc, free this memory. */ if ((stream->bufmode_ & JAS_STREAM_FREEBUF) && stream->bufbase_) { JAS_DBGLOG(100, ("jas_stream_destroy freeing buffer %p\n", stream->bufbase_)); jas_free(stream->bufbase_); stream->bufbase_ = 0; } jas_free(stream); } int jas_stream_close(jas_stream_t *stream) { /* Flush buffer if necessary. */ jas_stream_flush(stream); /* Close the underlying stream object. */ (*stream->ops_->close_)(stream->obj_); jas_stream_destroy(stream); return 0; } /******************************************************************************\ * Code for reading and writing streams. \******************************************************************************/ int jas_stream_getc_func(jas_stream_t *stream) { assert(stream->ptr_ - stream->bufbase_ <= stream->bufsize_ + JAS_STREAM_MAXPUTBACK); return jas_stream_getc_macro(stream); } int jas_stream_putc_func(jas_stream_t *stream, int c) { assert(stream->ptr_ - stream->bufstart_ <= stream->bufsize_); return jas_stream_putc_macro(stream, c); } int jas_stream_ungetc(jas_stream_t *stream, int c) { if (!stream->ptr_ || stream->ptr_ == stream->bufbase_) { return -1; } /* Reset the EOF indicator (since we now have at least one character to read). */ stream->flags_ &= ~JAS_STREAM_EOF; --stream->rwcnt_; --stream->ptr_; ++stream->cnt_; *stream->ptr_ = c; return 0; } int jas_stream_read(jas_stream_t *stream, void *buf, int cnt) { int n; int c; char *bufptr; bufptr = buf; n = 0; while (n < cnt) { if ((c = jas_stream_getc(stream)) == EOF) { return n; } *bufptr++ = c; ++n; } return n; } int jas_stream_write(jas_stream_t *stream, const void *buf, int cnt) { int n; const char *bufptr; bufptr = buf; n = 0; while (n < cnt) { if (jas_stream_putc(stream, *bufptr) == EOF) { return n; } ++bufptr; ++n; } return n; } /* Note: This function uses a fixed size buffer. Therefore, it cannot handle invocations that will produce more output than can be held by the buffer. */ int jas_stream_printf(jas_stream_t *stream, const char *fmt, ...) { va_list ap; char buf[4096]; int ret; va_start(ap, fmt); ret = vsnprintf(buf, sizeof buf, fmt, ap); jas_stream_puts(stream, buf); va_end(ap); return ret; } int jas_stream_puts(jas_stream_t *stream, const char *s) { while (*s != '\0') { if (jas_stream_putc_macro(stream, *s) == EOF) { return -1; } ++s; } return 0; } char *jas_stream_gets(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = jas_stream_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; return buf; } int jas_stream_gobble(jas_stream_t *stream, int n) { int m; m = n; for (m = n; m > 0; --m) { if (jas_stream_getc(stream) == EOF) { return n - m; } } return n; } int jas_stream_pad(jas_stream_t *stream, int n, int c) { int m; m = n; for (m = n; m > 0; --m) { if (jas_stream_putc(stream, c) == EOF) return n - m; } return n; } /******************************************************************************\ * Code for getting and setting the stream position. \******************************************************************************/ int jas_stream_isseekable(jas_stream_t *stream) { if (stream->ops_ == &jas_stream_memops) { return 1; } else if (stream->ops_ == &jas_stream_fileops) { if ((*stream->ops_->seek_)(stream->obj_, 0, SEEK_CUR) < 0) { return 0; } return 1; } else { return 0; } } int jas_stream_rewind(jas_stream_t *stream) { return jas_stream_seek(stream, 0, SEEK_SET); } long jas_stream_seek(jas_stream_t *stream, long offset, int origin) { long newpos; /* The buffer cannot be in use for both reading and writing. */ assert(!((stream->bufmode_ & JAS_STREAM_RDBUF) && (stream->bufmode_ & JAS_STREAM_WRBUF))); /* Reset the EOF indicator (since we may not be at the EOF anymore). */ stream->flags_ &= ~JAS_STREAM_EOF; if (stream->bufmode_ & JAS_STREAM_RDBUF) { if (origin == SEEK_CUR) { offset -= stream->cnt_; } } else if (stream->bufmode_ & JAS_STREAM_WRBUF) { if (jas_stream_flush(stream)) { return -1; } } stream->cnt_ = 0; stream->ptr_ = stream->bufstart_; stream->bufmode_ &= ~(JAS_STREAM_RDBUF | JAS_STREAM_WRBUF); if ((newpos = (*stream->ops_->seek_)(stream->obj_, offset, origin)) < 0) { return -1; } return newpos; } long jas_stream_tell(jas_stream_t *stream) { int adjust; int offset; if (stream->bufmode_ & JAS_STREAM_RDBUF) { adjust = -stream->cnt_; } else if (stream->bufmode_ & JAS_STREAM_WRBUF) { adjust = stream->ptr_ - stream->bufstart_; } else { adjust = 0; } if ((offset = (*stream->ops_->seek_)(stream->obj_, 0, SEEK_CUR)) < 0) { return -1; } return offset + adjust; } /******************************************************************************\ * Buffer initialization code. \******************************************************************************/ static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf, int bufsize) { /* If this function is being called, the buffer should not have been initialized yet. */ assert(!stream->bufbase_); if (bufmode != JAS_STREAM_UNBUF) { /* The full- or line-buffered mode is being employed. */ if (!buf) { /* The caller has not specified a buffer to employ, so allocate one. */ if ((stream->bufbase_ = jas_malloc(JAS_STREAM_BUFSIZE + JAS_STREAM_MAXPUTBACK))) { stream->bufmode_ |= JAS_STREAM_FREEBUF; stream->bufsize_ = JAS_STREAM_BUFSIZE; } else { /* The buffer allocation has failed. Resort to unbuffered operation. */ stream->bufbase_ = stream->tinybuf_; stream->bufsize_ = 1; } } else { /* The caller has specified a buffer to employ. */ /* The buffer must be large enough to accommodate maximum putback. */ assert(bufsize > JAS_STREAM_MAXPUTBACK); stream->bufbase_ = JAS_CAST(uchar *, buf); stream->bufsize_ = bufsize - JAS_STREAM_MAXPUTBACK; } } else { /* The unbuffered mode is being employed. */ /* A buffer should not have been supplied by the caller. */ assert(!buf); /* Use a trivial one-character buffer. */ stream->bufbase_ = stream->tinybuf_; stream->bufsize_ = 1; } stream->bufstart_ = &stream->bufbase_[JAS_STREAM_MAXPUTBACK]; stream->ptr_ = stream->bufstart_; stream->cnt_ = 0; stream->bufmode_ |= bufmode & JAS_STREAM_BUFMODEMASK; } /******************************************************************************\ * Buffer filling and flushing code. \******************************************************************************/ int jas_stream_flush(jas_stream_t *stream) { if (stream->bufmode_ & JAS_STREAM_RDBUF) { return 0; } return jas_stream_flushbuf(stream, EOF); } int jas_stream_fillbuf(jas_stream_t *stream, int getflag) { int c; /* The stream must not be in an error or EOF state. */ if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) { return EOF; } /* The stream must be open for reading. */ if ((stream->openmode_ & JAS_STREAM_READ) == 0) { return EOF; } /* Make a half-hearted attempt to confirm that the buffer is not currently being used for writing. This check is not intended to be foolproof! */ assert((stream->bufmode_ & JAS_STREAM_WRBUF) == 0); assert(stream->ptr_ - stream->bufstart_ <= stream->bufsize_); /* Mark the buffer as being used for reading. */ stream->bufmode_ |= JAS_STREAM_RDBUF; /* Read new data into the buffer. */ stream->ptr_ = stream->bufstart_; if ((stream->cnt_ = (*stream->ops_->read_)(stream->obj_, (char *) stream->bufstart_, stream->bufsize_)) <= 0) { if (stream->cnt_ < 0) { stream->flags_ |= JAS_STREAM_ERR; } else { stream->flags_ |= JAS_STREAM_EOF; } stream->cnt_ = 0; return EOF; } assert(stream->cnt_ > 0); /* Get or peek at the first character in the buffer. */ c = (getflag) ? jas_stream_getc2(stream) : (*stream->ptr_); return c; } int jas_stream_flushbuf(jas_stream_t *stream, int c) { int len; int n; /* The stream should not be in an error or EOF state. */ if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) { return EOF; } /* The stream must be open for writing. */ if ((stream->openmode_ & (JAS_STREAM_WRITE | JAS_STREAM_APPEND)) == 0) { return EOF; } /* The buffer should not currently be in use for reading. */ assert(!(stream->bufmode_ & JAS_STREAM_RDBUF)); /* Note: Do not use the quantity stream->cnt to determine the number of characters in the buffer! Depending on how this function was called, the stream->cnt value may be "off-by-one". */ len = stream->ptr_ - stream->bufstart_; if (len > 0) { n = (*stream->ops_->write_)(stream->obj_, (char *) stream->bufstart_, len); if (n != len) { stream->flags_ |= JAS_STREAM_ERR; return EOF; } } stream->cnt_ = stream->bufsize_; stream->ptr_ = stream->bufstart_; stream->bufmode_ |= JAS_STREAM_WRBUF; if (c != EOF) { assert(stream->cnt_ > 0); return jas_stream_putc2(stream, c); } return 0; } /******************************************************************************\ * Miscellaneous code. \******************************************************************************/ static int jas_strtoopenmode(const char *s) { int openmode = 0; while (*s != '\0') { switch (*s) { case 'r': openmode |= JAS_STREAM_READ; break; case 'w': openmode |= JAS_STREAM_WRITE | JAS_STREAM_CREATE; break; case 'b': openmode |= JAS_STREAM_BINARY; break; case 'a': openmode |= JAS_STREAM_APPEND; break; case '+': openmode |= JAS_STREAM_READ | JAS_STREAM_WRITE; break; default: break; } ++s; } return openmode; } int jas_stream_copy(jas_stream_t *out, jas_stream_t *in, int n) { int all; int c; int m; all = (n < 0) ? 1 : 0; m = n; while (all || m > 0) { if ((c = jas_stream_getc_macro(in)) == EOF) { /* The next character of input could not be read. */ /* Return with an error if an I/O error occured (not including EOF) or if an explicit copy count was specified. */ return (!all || jas_stream_error(in)) ? (-1) : 0; } if (jas_stream_putc_macro(out, c) == EOF) { return -1; } --m; } return 0; } long jas_stream_setrwcount(jas_stream_t *stream, long rwcnt) { int old; old = stream->rwcnt_; stream->rwcnt_ = rwcnt; return old; } int jas_stream_display(jas_stream_t *stream, FILE *fp, int n) { unsigned char buf[16]; int i; int j; int m; int c; int display; int cnt; cnt = n - (n % 16); display = 1; for (i = 0; i < n; i += 16) { if (n > 16 && i > 0) { display = (i >= cnt) ? 1 : 0; } if (display) { fprintf(fp, "%08x:", i); } m = JAS_MIN(n - i, 16); for (j = 0; j < m; ++j) { if ((c = jas_stream_getc(stream)) == EOF) { abort(); return -1; } buf[j] = c; } if (display) { for (j = 0; j < m; ++j) { fprintf(fp, " %02x", buf[j]); } fputc(' ', fp); for (; j < 16; ++j) { fprintf(fp, " "); } for (j = 0; j < m; ++j) { if (isprint(buf[j])) { fputc(buf[j], fp); } else { fputc(' ', fp); } } fprintf(fp, "\n"); } } return 0; } long jas_stream_length(jas_stream_t *stream) { long oldpos; long pos; if ((oldpos = jas_stream_tell(stream)) < 0) { return -1; } if (jas_stream_seek(stream, 0, SEEK_END) < 0) { return -1; } if ((pos = jas_stream_tell(stream)) < 0) { return -1; } if (jas_stream_seek(stream, oldpos, SEEK_SET) < 0) { return -1; } return pos; } /******************************************************************************\ * Memory stream object. \******************************************************************************/ static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { int n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; } static int mem_resize(jas_stream_memobj_t *m, int bufsize) { unsigned char *buf; //assert(m->buf_); assert(bufsize >= 0); JAS_DBGLOG(100, ("mem_resize(%p, %d)\n", m, bufsize)); if (!(buf = jas_realloc2(m->buf_, bufsize, sizeof(unsigned char))) && bufsize) { JAS_DBGLOG(100, ("mem_resize realloc failed\n")); return -1; } JAS_DBGLOG(100, ("mem_resize realloc succeeded\n")); m->buf_ = buf; m->bufsize_ = bufsize; return 0; } static int mem_write(jas_stream_obj_t *obj, char *buf, int cnt) { int n; int ret; jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; long newbufsize; long newpos; assert(buf); assert(cnt >= 0); JAS_DBGLOG(100, ("mem_write(%p, %p, %d)\n", obj, buf, cnt)); newpos = m->pos_ + cnt; if (newpos > m->bufsize_ && m->growable_) { newbufsize = m->bufsize_; while (newbufsize < newpos) { newbufsize <<= 1; assert(newbufsize >= 0); } JAS_DBGLOG(100, ("mem_write resizing from %d to %z\n", m->bufsize_, newbufsize)); JAS_DBGLOG(100, ("mem_write resizing from %d to %ul\n", m->bufsize_, JAS_CAST(unsigned long, newbufsize))); if (mem_resize(m, newbufsize)) { return -1; } } if (m->pos_ > m->len_) { /* The current position is beyond the end of the file, so pad the file to the current position with zeros. */ n = JAS_MIN(m->pos_, m->bufsize_) - m->len_; if (n > 0) { memset(&m->buf_[m->len_], 0, n); m->len_ += n; } if (m->pos_ != m->len_) { /* The buffer is not big enough. */ return 0; } } n = m->bufsize_ - m->pos_; ret = JAS_MIN(n, cnt); if (ret > 0) { memcpy(&m->buf_[m->pos_], buf, ret); m->pos_ += ret; } if (m->pos_ > m->len_) { m->len_ = m->pos_; } assert(ret == cnt); return ret; } static long mem_seek(jas_stream_obj_t *obj, long offset, int origin) { jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; long newpos; JAS_DBGLOG(100, ("mem_seek(%p, %ld, %d)\n", obj, offset, origin)); switch (origin) { case SEEK_SET: newpos = offset; break; case SEEK_END: newpos = m->len_ - offset; break; case SEEK_CUR: newpos = m->pos_ + offset; break; default: abort(); break; } if (newpos < 0) { return -1; } m->pos_ = newpos; return m->pos_; } static int mem_close(jas_stream_obj_t *obj) { JAS_DBGLOG(100, ("mem_close(%p)\n", obj)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; JAS_DBGLOG(100, ("mem_close myalloc=%d\n", m->myalloc_)); if (m->myalloc_ && m->buf_) { JAS_DBGLOG(100, ("mem_close freeing buffer %p\n", m->buf_)); jas_free(m->buf_); m->buf_ = 0; } jas_free(obj); return 0; } /******************************************************************************\ * File stream object. \******************************************************************************/ static int file_read(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_read(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return read(fileobj->fd, buf, cnt); } static int file_write(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_write(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return write(fileobj->fd, buf, cnt); } static long file_seek(jas_stream_obj_t *obj, long offset, int origin) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_seek(%p, %ld, %d)\n", obj, offset, origin)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return lseek(fileobj->fd, offset, origin); } static int file_close(jas_stream_obj_t *obj) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_close(%p)\n", obj)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); int ret; ret = close(fileobj->fd); if (fileobj->flags & JAS_STREAM_FILEOBJ_DELONCLOSE) { unlink(fileobj->pathname); } jas_free(fileobj); return ret; } /******************************************************************************\ * Stdio file stream object. \******************************************************************************/ static int sfile_read(jas_stream_obj_t *obj, char *buf, int cnt) { FILE *fp; size_t n; int result; JAS_DBGLOG(100, ("sfile_read(%p, %p, %d)\n", obj, buf, cnt)); fp = JAS_CAST(FILE *, obj); n = fread(buf, 1, cnt, fp); if (n != cnt) { result = (!ferror(fp) && feof(fp)) ? 0 : -1; } result = JAS_CAST(int, n); return result; } static int sfile_write(jas_stream_obj_t *obj, char *buf, int cnt) { FILE *fp; size_t n; JAS_DBGLOG(100, ("sfile_write(%p, %p, %d)\n", obj, buf, cnt)); fp = JAS_CAST(FILE *, obj); n = fwrite(buf, 1, cnt, fp); return (n != JAS_CAST(size_t, cnt)) ? (-1) : cnt; } static long sfile_seek(jas_stream_obj_t *obj, long offset, int origin) { FILE *fp; JAS_DBGLOG(100, ("sfile_seek(%p, %ld, %d)\n", obj, offset, origin)); fp = JAS_CAST(FILE *, obj); return fseek(fp, offset, origin); } static int sfile_close(jas_stream_obj_t *obj) { FILE *fp; JAS_DBGLOG(100, ("sfile_close(%p)\n", obj)); fp = JAS_CAST(FILE *, obj); return fclose(fp); }
/* * Copyright (c) 1999-2000 Image Power, Inc. and the University of * British Columbia. * Copyright (c) 2001-2003 Michael David Adams. * All rights reserved. */ /* __START_OF_JASPER_LICENSE__ * * JasPer License Version 2.0 * * Copyright (c) 2001-2006 Michael David Adams * Copyright (c) 1999-2000 Image Power, Inc. * Copyright (c) 1999-2000 The University of British Columbia * * All rights reserved. * * Permission is hereby granted, free of charge, to any person (the * "User") obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Software, and to permit * persons to whom the Software is furnished to do so, subject to the * following conditions: * * 1. The above copyright notices and this permission notice (which * includes the disclaimer below) shall be included in all copies or * substantial portions of the Software. * * 2. The name of a copyright holder shall not be used to endorse or * promote products derived from the Software without specific prior * written permission. * * THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS * LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER * THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS * "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL * INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE * PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE * THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY. * EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS * BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL * PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS * GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE * ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE * IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL * SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES, * AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL * SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH * THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH, * PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH * RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY * EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES. * * __END_OF_JASPER_LICENSE__ */ /* * I/O Stream Library * * $Id$ */ /******************************************************************************\ * Includes. \******************************************************************************/ #include <assert.h> #if defined(HAVE_FCNTL_H) #include <fcntl.h> #endif #include <stdlib.h> #include <stdarg.h> #include <stdio.h> #include <ctype.h> #if defined(HAVE_UNISTD_H) #include <unistd.h> #endif #if defined(WIN32) || defined(HAVE_IO_H) #include <io.h> #endif #include "jasper/jas_debug.h" #include "jasper/jas_types.h" #include "jasper/jas_stream.h" #include "jasper/jas_malloc.h" #include "jasper/jas_math.h" /******************************************************************************\ * Local function prototypes. \******************************************************************************/ static int jas_strtoopenmode(const char *s); static void jas_stream_destroy(jas_stream_t *stream); static jas_stream_t *jas_stream_create(void); static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf, int bufsize); static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt); static int mem_write(jas_stream_obj_t *obj, char *buf, int cnt); static long mem_seek(jas_stream_obj_t *obj, long offset, int origin); static int mem_close(jas_stream_obj_t *obj); static int sfile_read(jas_stream_obj_t *obj, char *buf, int cnt); static int sfile_write(jas_stream_obj_t *obj, char *buf, int cnt); static long sfile_seek(jas_stream_obj_t *obj, long offset, int origin); static int sfile_close(jas_stream_obj_t *obj); static int file_read(jas_stream_obj_t *obj, char *buf, int cnt); static int file_write(jas_stream_obj_t *obj, char *buf, int cnt); static long file_seek(jas_stream_obj_t *obj, long offset, int origin); static int file_close(jas_stream_obj_t *obj); /******************************************************************************\ * Local data. \******************************************************************************/ static jas_stream_ops_t jas_stream_fileops = { file_read, file_write, file_seek, file_close }; static jas_stream_ops_t jas_stream_sfileops = { sfile_read, sfile_write, sfile_seek, sfile_close }; static jas_stream_ops_t jas_stream_memops = { mem_read, mem_write, mem_seek, mem_close }; /******************************************************************************\ * Code for opening and closing streams. \******************************************************************************/ static jas_stream_t *jas_stream_create() { jas_stream_t *stream; if (!(stream = jas_malloc(sizeof(jas_stream_t)))) { return 0; } stream->openmode_ = 0; stream->bufmode_ = 0; stream->flags_ = 0; stream->bufbase_ = 0; stream->bufstart_ = 0; stream->bufsize_ = 0; stream->ptr_ = 0; stream->cnt_ = 0; stream->ops_ = 0; stream->obj_ = 0; stream->rwcnt_ = 0; stream->rwlimit_ = -1; return stream; } #if 0 /* Obsolete code. */ jas_stream_t *jas_stream_memopen(char *buf, int bufsize) { jas_stream_t *stream; jas_stream_memobj_t *obj; JAS_DBGLOG(100, ("jas_stream_memopen(%p, %d)\n", buf, bufsize)); if (!(stream = jas_stream_create())) { return 0; } /* A stream associated with a memory buffer is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Since the stream data is already resident in memory, buffering is not necessary. */ /* But... It still may be faster to use buffering anyways. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); /* Select the operations for a memory stream. */ stream->ops_ = &jas_stream_memops; /* Allocate memory for the underlying memory stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) { jas_stream_destroy(stream); return 0; } stream->obj_ = (void *) obj; /* Initialize a few important members of the memory stream object. */ obj->myalloc_ = 0; obj->buf_ = 0; /* If the buffer size specified is nonpositive, then the buffer is allocated internally and automatically grown as needed. */ if (bufsize <= 0) { obj->bufsize_ = 1024; obj->growable_ = 1; } else { obj->bufsize_ = bufsize; obj->growable_ = 0; } if (buf) { obj->buf_ = (unsigned char *) buf; } else { obj->buf_ = jas_malloc(obj->bufsize_); obj->myalloc_ = 1; } if (!obj->buf_) { jas_stream_close(stream); return 0; } JAS_DBGLOG(100, ("jas_stream_memopen buffer buf=%p myalloc=%d\n", obj->buf_, obj->myalloc_)); if (bufsize > 0 && buf) { /* If a buffer was supplied by the caller and its length is positive, make the associated buffer data appear in the stream initially. */ obj->len_ = bufsize; } else { /* The stream is initially empty. */ obj->len_ = 0; } obj->pos_ = 0; return stream; } #else /* This function will eventually replace jas_stream_memopen. If buf is 0 and bufsize > 0: a buffer is dynamically allocated with size bufsize and this buffer is not growable. If buf is 0 and bufsize is 0: a buffer is dynamically allocated whose size will automatically grow to accommodate the amount of data written. If buf is not 0: bufsize (which, in this case, is not currently allowed to be zero) is the size of the (nongrowable) buffer pointed to by buf. */ jas_stream_t *jas_stream_memopen2(char *buf, size_t bufsize) { jas_stream_t *stream; jas_stream_memobj_t *obj; JAS_DBGLOG(100, ("jas_stream_memopen2(%p, %zu)\n", buf, bufsize)); assert((buf && bufsize > 0) || (!buf)); if (!(stream = jas_stream_create())) { return 0; } /* A stream associated with a memory buffer is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Since the stream data is already resident in memory, buffering is not necessary. */ /* But... It still may be faster to use buffering anyways. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); /* Select the operations for a memory stream. */ stream->ops_ = &jas_stream_memops; /* Allocate memory for the underlying memory stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) { jas_stream_destroy(stream); return 0; } stream->obj_ = (void *) obj; /* Initialize a few important members of the memory stream object. */ obj->myalloc_ = 0; obj->buf_ = 0; /* If the buffer size specified is nonpositive, then the buffer is allocated internally and automatically grown as needed. */ if (!bufsize) { obj->bufsize_ = 1024; obj->growable_ = 1; } else { obj->bufsize_ = bufsize; obj->growable_ = 0; } if (buf) { obj->buf_ = JAS_CAST(unsigned char *, buf); } else { obj->buf_ = jas_malloc(obj->bufsize_); obj->myalloc_ = 1; } if (!obj->buf_) { jas_stream_close(stream); return 0; } JAS_DBGLOG(100, ("jas_stream_memopen2 buffer buf=%p myalloc=%d\n", obj->buf_, obj->myalloc_)); if (bufsize > 0 && buf) { /* If a buffer was supplied by the caller and its length is positive, make the associated buffer data appear in the stream initially. */ obj->len_ = bufsize; } else { /* The stream is initially empty. */ obj->len_ = 0; } obj->pos_ = 0; return stream; } /* NOTE: The version of the function jas_stream_memopen only exists for backwards compatibility. Eventually, it should be replaced by jas_stream_memopen2. In retrospect, it was a very poor choice to have specified the buffer size parameter (bufsize) to have type int. On some machines, int may only be a 16-bit integer. This precludes larger-sized buffer allocations, which are needed in practice. If bufsize <= 0, the buffer is growable; otherwise, the buffer has a fixed size of bufsize. If buf is 0, the buffer is dynamically allocated with jas_malloc. If buf is not 0 and bufsize <= 0 (which is not permitted in any circumstances), bad things will happen (especially if the buf was not allocated with jas_malloc). */ jas_stream_t *jas_stream_memopen(char *buf, int bufsize) { char *new_buf; size_t new_bufsize; JAS_DBGLOG(100, ("jas_stream_memopen(%p, %d)\n", buf, bufsize)); if (bufsize < 0) { jas_deprecated("negative buffer size for jas_stream_memopen"); } if (buf && bufsize <= 0) { // This was never a valid thing to do with the old API. jas_eprintf("Invalid use of jas_stream_memopen detected.\n"); jas_deprecated("A user-provided buffer for " "jas_stream_memopen cannot be growable.\n"); } if (bufsize <= 0) { new_bufsize = 0; new_buf = 0; } else { new_bufsize = bufsize; new_buf = buf; } return jas_stream_memopen2(new_buf, new_bufsize); } #endif jas_stream_t *jas_stream_fopen(const char *filename, const char *mode) { jas_stream_t *stream; jas_stream_fileobj_t *obj; int openflags; /* Allocate a stream object. */ if (!(stream = jas_stream_create())) { return 0; } /* Parse the mode string. */ stream->openmode_ = jas_strtoopenmode(mode); /* Determine the correct flags to use for opening the file. */ if ((stream->openmode_ & JAS_STREAM_READ) && (stream->openmode_ & JAS_STREAM_WRITE)) { openflags = O_RDWR; } else if (stream->openmode_ & JAS_STREAM_READ) { openflags = O_RDONLY; } else if (stream->openmode_ & JAS_STREAM_WRITE) { openflags = O_WRONLY; } else { openflags = 0; } if (stream->openmode_ & JAS_STREAM_APPEND) { openflags |= O_APPEND; } if (stream->openmode_ & JAS_STREAM_BINARY) { openflags |= O_BINARY; } if (stream->openmode_ & JAS_STREAM_CREATE) { openflags |= O_CREAT | O_TRUNC; } /* Allocate space for the underlying file stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = -1; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = (void *) obj; /* Select the operations for a file stream object. */ stream->ops_ = &jas_stream_fileops; /* Open the underlying file. */ if ((obj->fd = open(filename, openflags, JAS_STREAM_PERMS)) < 0) { // Free the underlying file object, since it will not otherwise // be freed. jas_free(obj); jas_stream_destroy(stream); return 0; } /* By default, use full buffering for this type of stream. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); return stream; } jas_stream_t *jas_stream_freopen(const char *path, const char *mode, FILE *fp) { jas_stream_t *stream; int openflags; /* Eliminate compiler warning about unused variable. */ path = 0; /* Allocate a stream object. */ if (!(stream = jas_stream_create())) { return 0; } /* Parse the mode string. */ stream->openmode_ = jas_strtoopenmode(mode); /* Determine the correct flags to use for opening the file. */ if ((stream->openmode_ & JAS_STREAM_READ) && (stream->openmode_ & JAS_STREAM_WRITE)) { openflags = O_RDWR; } else if (stream->openmode_ & JAS_STREAM_READ) { openflags = O_RDONLY; } else if (stream->openmode_ & JAS_STREAM_WRITE) { openflags = O_WRONLY; } else { openflags = 0; } if (stream->openmode_ & JAS_STREAM_APPEND) { openflags |= O_APPEND; } if (stream->openmode_ & JAS_STREAM_BINARY) { openflags |= O_BINARY; } if (stream->openmode_ & JAS_STREAM_CREATE) { openflags |= O_CREAT | O_TRUNC; } stream->obj_ = JAS_CAST(void *, fp); /* Select the operations for a file stream object. */ stream->ops_ = &jas_stream_sfileops; /* By default, use full buffering for this type of stream. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); return stream; } jas_stream_t *jas_stream_tmpfile() { jas_stream_t *stream; jas_stream_fileobj_t *obj; if (!(stream = jas_stream_create())) { return 0; } /* A temporary file stream is always opened for both reading and writing in binary mode. */ stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY; /* Allocate memory for the underlying temporary file object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = -1; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = obj; /* Choose a file name. */ tmpnam(obj->pathname); /* Open the underlying file. */ if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY, JAS_STREAM_PERMS)) < 0) { jas_stream_destroy(stream); return 0; } /* Unlink the file so that it will disappear if the program terminates abnormally. */ /* Under UNIX, one can unlink an open file and continue to do I/O on it. Not all operating systems support this functionality, however. For example, under Microsoft Windows the unlink operation will fail, since the file is open. */ if (unlink(obj->pathname)) { /* We will try unlinking the file again after it is closed. */ obj->flags |= JAS_STREAM_FILEOBJ_DELONCLOSE; } /* Use full buffering. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); stream->ops_ = &jas_stream_fileops; return stream; } jas_stream_t *jas_stream_fdopen(int fd, const char *mode) { jas_stream_t *stream; jas_stream_fileobj_t *obj; /* Allocate a stream object. */ if (!(stream = jas_stream_create())) { return 0; } /* Parse the mode string. */ stream->openmode_ = jas_strtoopenmode(mode); #if defined(WIN32) /* Argh!!! Someone ought to banish text mode (i.e., O_TEXT) to the greatest depths of purgatory! */ /* Ensure that the file descriptor is in binary mode, if the caller has specified the binary mode flag. Arguably, the caller ought to take care of this, but text mode is a ugly wart anyways, so we save the caller some grief by handling this within the stream library. */ /* This ugliness is mainly for the benefit of those who run the JasPer software under Windows from shells that insist on opening files in text mode. For example, in the Cygwin environment, shells often open files in text mode when I/O redirection is used. Grr... */ if (stream->openmode_ & JAS_STREAM_BINARY) { setmode(fd, O_BINARY); } #endif /* Allocate space for the underlying file stream object. */ if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) { jas_stream_destroy(stream); return 0; } obj->fd = fd; obj->flags = 0; obj->pathname[0] = '\0'; stream->obj_ = (void *) obj; /* Do not close the underlying file descriptor when the stream is closed. */ obj->flags |= JAS_STREAM_FILEOBJ_NOCLOSE; /* By default, use full buffering for this type of stream. */ jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0); /* Select the operations for a file stream object. */ stream->ops_ = &jas_stream_fileops; return stream; } static void jas_stream_destroy(jas_stream_t *stream) { /* If the memory for the buffer was allocated with malloc, free this memory. */ if ((stream->bufmode_ & JAS_STREAM_FREEBUF) && stream->bufbase_) { JAS_DBGLOG(100, ("jas_stream_destroy freeing buffer %p\n", stream->bufbase_)); jas_free(stream->bufbase_); stream->bufbase_ = 0; } jas_free(stream); } int jas_stream_close(jas_stream_t *stream) { /* Flush buffer if necessary. */ jas_stream_flush(stream); /* Close the underlying stream object. */ (*stream->ops_->close_)(stream->obj_); jas_stream_destroy(stream); return 0; } /******************************************************************************\ * Code for reading and writing streams. \******************************************************************************/ int jas_stream_getc_func(jas_stream_t *stream) { assert(stream->ptr_ - stream->bufbase_ <= stream->bufsize_ + JAS_STREAM_MAXPUTBACK); return jas_stream_getc_macro(stream); } int jas_stream_putc_func(jas_stream_t *stream, int c) { assert(stream->ptr_ - stream->bufstart_ <= stream->bufsize_); return jas_stream_putc_macro(stream, c); } int jas_stream_ungetc(jas_stream_t *stream, int c) { if (!stream->ptr_ || stream->ptr_ == stream->bufbase_) { return -1; } /* Reset the EOF indicator (since we now have at least one character to read). */ stream->flags_ &= ~JAS_STREAM_EOF; --stream->rwcnt_; --stream->ptr_; ++stream->cnt_; *stream->ptr_ = c; return 0; } int jas_stream_read(jas_stream_t *stream, void *buf, int cnt) { int n; int c; char *bufptr; if (cnt < 0) { jas_deprecated("negative count for jas_stream_read"); } bufptr = buf; n = 0; while (n < cnt) { if ((c = jas_stream_getc(stream)) == EOF) { return n; } *bufptr++ = c; ++n; } return n; } int jas_stream_write(jas_stream_t *stream, const void *buf, int cnt) { int n; const char *bufptr; if (cnt < 0) { jas_deprecated("negative count for jas_stream_write"); } bufptr = buf; n = 0; while (n < cnt) { if (jas_stream_putc(stream, *bufptr) == EOF) { return n; } ++bufptr; ++n; } return n; } /* Note: This function uses a fixed size buffer. Therefore, it cannot handle invocations that will produce more output than can be held by the buffer. */ int jas_stream_printf(jas_stream_t *stream, const char *fmt, ...) { va_list ap; char buf[4096]; int ret; va_start(ap, fmt); ret = vsnprintf(buf, sizeof buf, fmt, ap); jas_stream_puts(stream, buf); va_end(ap); return ret; } int jas_stream_puts(jas_stream_t *stream, const char *s) { while (*s != '\0') { if (jas_stream_putc_macro(stream, *s) == EOF) { return -1; } ++s; } return 0; } char *jas_stream_gets(jas_stream_t *stream, char *buf, int bufsize) { int c; char *bufptr; assert(bufsize > 0); bufptr = buf; while (bufsize > 1) { if ((c = jas_stream_getc(stream)) == EOF) { break; } *bufptr++ = c; --bufsize; if (c == '\n') { break; } } *bufptr = '\0'; return buf; } int jas_stream_gobble(jas_stream_t *stream, int n) { int m; if (n < 0) { jas_deprecated("negative count for jas_stream_gobble"); } m = n; for (m = n; m > 0; --m) { if (jas_stream_getc(stream) == EOF) { return n - m; } } return n; } int jas_stream_pad(jas_stream_t *stream, int n, int c) { int m; if (n < 0) { jas_deprecated("negative count for jas_stream_pad"); } m = n; for (m = n; m > 0; --m) { if (jas_stream_putc(stream, c) == EOF) return n - m; } return n; } /******************************************************************************\ * Code for getting and setting the stream position. \******************************************************************************/ int jas_stream_isseekable(jas_stream_t *stream) { if (stream->ops_ == &jas_stream_memops) { return 1; } else if (stream->ops_ == &jas_stream_fileops) { if ((*stream->ops_->seek_)(stream->obj_, 0, SEEK_CUR) < 0) { return 0; } return 1; } else { return 0; } } int jas_stream_rewind(jas_stream_t *stream) { return jas_stream_seek(stream, 0, SEEK_SET); } long jas_stream_seek(jas_stream_t *stream, long offset, int origin) { long newpos; /* The buffer cannot be in use for both reading and writing. */ assert(!((stream->bufmode_ & JAS_STREAM_RDBUF) && (stream->bufmode_ & JAS_STREAM_WRBUF))); /* Reset the EOF indicator (since we may not be at the EOF anymore). */ stream->flags_ &= ~JAS_STREAM_EOF; if (stream->bufmode_ & JAS_STREAM_RDBUF) { if (origin == SEEK_CUR) { offset -= stream->cnt_; } } else if (stream->bufmode_ & JAS_STREAM_WRBUF) { if (jas_stream_flush(stream)) { return -1; } } stream->cnt_ = 0; stream->ptr_ = stream->bufstart_; stream->bufmode_ &= ~(JAS_STREAM_RDBUF | JAS_STREAM_WRBUF); if ((newpos = (*stream->ops_->seek_)(stream->obj_, offset, origin)) < 0) { return -1; } return newpos; } long jas_stream_tell(jas_stream_t *stream) { int adjust; int offset; if (stream->bufmode_ & JAS_STREAM_RDBUF) { adjust = -stream->cnt_; } else if (stream->bufmode_ & JAS_STREAM_WRBUF) { adjust = stream->ptr_ - stream->bufstart_; } else { adjust = 0; } if ((offset = (*stream->ops_->seek_)(stream->obj_, 0, SEEK_CUR)) < 0) { return -1; } return offset + adjust; } /******************************************************************************\ * Buffer initialization code. \******************************************************************************/ static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf, int bufsize) { /* If this function is being called, the buffer should not have been initialized yet. */ assert(!stream->bufbase_); if (bufmode != JAS_STREAM_UNBUF) { /* The full- or line-buffered mode is being employed. */ if (!buf) { /* The caller has not specified a buffer to employ, so allocate one. */ if ((stream->bufbase_ = jas_malloc(JAS_STREAM_BUFSIZE + JAS_STREAM_MAXPUTBACK))) { stream->bufmode_ |= JAS_STREAM_FREEBUF; stream->bufsize_ = JAS_STREAM_BUFSIZE; } else { /* The buffer allocation has failed. Resort to unbuffered operation. */ stream->bufbase_ = stream->tinybuf_; stream->bufsize_ = 1; } } else { /* The caller has specified a buffer to employ. */ /* The buffer must be large enough to accommodate maximum putback. */ assert(bufsize > JAS_STREAM_MAXPUTBACK); stream->bufbase_ = JAS_CAST(uchar *, buf); stream->bufsize_ = bufsize - JAS_STREAM_MAXPUTBACK; } } else { /* The unbuffered mode is being employed. */ /* A buffer should not have been supplied by the caller. */ assert(!buf); /* Use a trivial one-character buffer. */ stream->bufbase_ = stream->tinybuf_; stream->bufsize_ = 1; } stream->bufstart_ = &stream->bufbase_[JAS_STREAM_MAXPUTBACK]; stream->ptr_ = stream->bufstart_; stream->cnt_ = 0; stream->bufmode_ |= bufmode & JAS_STREAM_BUFMODEMASK; } /******************************************************************************\ * Buffer filling and flushing code. \******************************************************************************/ int jas_stream_flush(jas_stream_t *stream) { if (stream->bufmode_ & JAS_STREAM_RDBUF) { return 0; } return jas_stream_flushbuf(stream, EOF); } int jas_stream_fillbuf(jas_stream_t *stream, int getflag) { int c; /* The stream must not be in an error or EOF state. */ if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) { return EOF; } /* The stream must be open for reading. */ if ((stream->openmode_ & JAS_STREAM_READ) == 0) { return EOF; } /* Make a half-hearted attempt to confirm that the buffer is not currently being used for writing. This check is not intended to be foolproof! */ assert((stream->bufmode_ & JAS_STREAM_WRBUF) == 0); assert(stream->ptr_ - stream->bufstart_ <= stream->bufsize_); /* Mark the buffer as being used for reading. */ stream->bufmode_ |= JAS_STREAM_RDBUF; /* Read new data into the buffer. */ stream->ptr_ = stream->bufstart_; if ((stream->cnt_ = (*stream->ops_->read_)(stream->obj_, (char *) stream->bufstart_, stream->bufsize_)) <= 0) { if (stream->cnt_ < 0) { stream->flags_ |= JAS_STREAM_ERR; } else { stream->flags_ |= JAS_STREAM_EOF; } stream->cnt_ = 0; return EOF; } assert(stream->cnt_ > 0); /* Get or peek at the first character in the buffer. */ c = (getflag) ? jas_stream_getc2(stream) : (*stream->ptr_); return c; } int jas_stream_flushbuf(jas_stream_t *stream, int c) { int len; int n; /* The stream should not be in an error or EOF state. */ if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) { return EOF; } /* The stream must be open for writing. */ if ((stream->openmode_ & (JAS_STREAM_WRITE | JAS_STREAM_APPEND)) == 0) { return EOF; } /* The buffer should not currently be in use for reading. */ assert(!(stream->bufmode_ & JAS_STREAM_RDBUF)); /* Note: Do not use the quantity stream->cnt to determine the number of characters in the buffer! Depending on how this function was called, the stream->cnt value may be "off-by-one". */ len = stream->ptr_ - stream->bufstart_; if (len > 0) { n = (*stream->ops_->write_)(stream->obj_, (char *) stream->bufstart_, len); if (n != len) { stream->flags_ |= JAS_STREAM_ERR; return EOF; } } stream->cnt_ = stream->bufsize_; stream->ptr_ = stream->bufstart_; stream->bufmode_ |= JAS_STREAM_WRBUF; if (c != EOF) { assert(stream->cnt_ > 0); return jas_stream_putc2(stream, c); } return 0; } /******************************************************************************\ * Miscellaneous code. \******************************************************************************/ static int jas_strtoopenmode(const char *s) { int openmode = 0; while (*s != '\0') { switch (*s) { case 'r': openmode |= JAS_STREAM_READ; break; case 'w': openmode |= JAS_STREAM_WRITE | JAS_STREAM_CREATE; break; case 'b': openmode |= JAS_STREAM_BINARY; break; case 'a': openmode |= JAS_STREAM_APPEND; break; case '+': openmode |= JAS_STREAM_READ | JAS_STREAM_WRITE; break; default: break; } ++s; } return openmode; } int jas_stream_copy(jas_stream_t *out, jas_stream_t *in, int n) { int all; int c; int m; all = (n < 0) ? 1 : 0; m = n; while (all || m > 0) { if ((c = jas_stream_getc_macro(in)) == EOF) { /* The next character of input could not be read. */ /* Return with an error if an I/O error occured (not including EOF) or if an explicit copy count was specified. */ return (!all || jas_stream_error(in)) ? (-1) : 0; } if (jas_stream_putc_macro(out, c) == EOF) { return -1; } --m; } return 0; } long jas_stream_setrwcount(jas_stream_t *stream, long rwcnt) { int old; old = stream->rwcnt_; stream->rwcnt_ = rwcnt; return old; } int jas_stream_display(jas_stream_t *stream, FILE *fp, int n) { unsigned char buf[16]; int i; int j; int m; int c; int display; int cnt; cnt = n - (n % 16); display = 1; for (i = 0; i < n; i += 16) { if (n > 16 && i > 0) { display = (i >= cnt) ? 1 : 0; } if (display) { fprintf(fp, "%08x:", i); } m = JAS_MIN(n - i, 16); for (j = 0; j < m; ++j) { if ((c = jas_stream_getc(stream)) == EOF) { abort(); return -1; } buf[j] = c; } if (display) { for (j = 0; j < m; ++j) { fprintf(fp, " %02x", buf[j]); } fputc(' ', fp); for (; j < 16; ++j) { fprintf(fp, " "); } for (j = 0; j < m; ++j) { if (isprint(buf[j])) { fputc(buf[j], fp); } else { fputc(' ', fp); } } fprintf(fp, "\n"); } } return 0; } long jas_stream_length(jas_stream_t *stream) { long oldpos; long pos; if ((oldpos = jas_stream_tell(stream)) < 0) { return -1; } if (jas_stream_seek(stream, 0, SEEK_END) < 0) { return -1; } if ((pos = jas_stream_tell(stream)) < 0) { return -1; } if (jas_stream_seek(stream, oldpos, SEEK_SET) < 0) { return -1; } return pos; } /******************************************************************************\ * Memory stream object. \******************************************************************************/ static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { ssize_t n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; } static int mem_resize(jas_stream_memobj_t *m, size_t bufsize) { unsigned char *buf; //assert(m->buf_); //assert(bufsize >= 0); JAS_DBGLOG(100, ("mem_resize(%p, %zu)\n", m, bufsize)); if (!bufsize) { jas_eprintf( "mem_resize was not really designed to handle a buffer of size 0\n" "This may not work.\n" ); } if (!(buf = jas_realloc2(m->buf_, bufsize, sizeof(unsigned char))) && bufsize) { JAS_DBGLOG(100, ("mem_resize realloc failed\n")); return -1; } JAS_DBGLOG(100, ("mem_resize realloc succeeded\n")); m->buf_ = buf; m->bufsize_ = bufsize; return 0; } static int mem_write(jas_stream_obj_t *obj, char *buf, int cnt) { size_t n; int ret; jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; size_t newbufsize; size_t newpos; assert(buf); assert(cnt >= 0); JAS_DBGLOG(100, ("mem_write(%p, %p, %d)\n", obj, buf, cnt)); newpos = m->pos_ + cnt; if (newpos > m->bufsize_ && m->growable_) { newbufsize = m->bufsize_; while (newbufsize < newpos) { //newbufsize <<= 1; if (!jas_safe_size_mul(newbufsize, 2, &newbufsize)) { JAS_DBGLOG(100, ("new buffer size would cause overflow\n")); return -1; } } JAS_DBGLOG(100, ("mem_write resizing from %d to %zu\n", m->bufsize_, newbufsize)); assert(newbufsize > 0); if (mem_resize(m, newbufsize)) { return -1; } } if (m->pos_ > m->len_) { /* The current position is beyond the end of the file, so pad the file to the current position with zeros. */ n = JAS_MIN(m->pos_, m->bufsize_) - m->len_; if (n > 0) { memset(&m->buf_[m->len_], 0, n); m->len_ += n; } if (m->pos_ != m->len_) { /* The buffer is not big enough. */ return 0; } } n = m->bufsize_ - m->pos_; ret = JAS_MIN(n, cnt); if (ret > 0) { memcpy(&m->buf_[m->pos_], buf, ret); m->pos_ += ret; } if (m->pos_ > m->len_) { m->len_ = m->pos_; } assert(ret == cnt); return ret; } static long mem_seek(jas_stream_obj_t *obj, long offset, int origin) { jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; size_t newpos; JAS_DBGLOG(100, ("mem_seek(%p, %ld, %d)\n", obj, offset, origin)); switch (origin) { case SEEK_SET: newpos = offset; break; case SEEK_END: newpos = m->len_ - offset; break; case SEEK_CUR: newpos = m->pos_ + offset; break; default: abort(); break; } if (newpos < 0) { return -1; } m->pos_ = newpos; return m->pos_; } static int mem_close(jas_stream_obj_t *obj) { JAS_DBGLOG(100, ("mem_close(%p)\n", obj)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; JAS_DBGLOG(100, ("mem_close myalloc=%d\n", m->myalloc_)); if (m->myalloc_ && m->buf_) { JAS_DBGLOG(100, ("mem_close freeing buffer %p\n", m->buf_)); jas_free(m->buf_); m->buf_ = 0; } jas_free(obj); return 0; } /******************************************************************************\ * File stream object. \******************************************************************************/ static int file_read(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_read(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return read(fileobj->fd, buf, cnt); } static int file_write(jas_stream_obj_t *obj, char *buf, int cnt) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_write(%p, %p, %d)\n", obj, buf, cnt)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return write(fileobj->fd, buf, cnt); } static long file_seek(jas_stream_obj_t *obj, long offset, int origin) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_seek(%p, %ld, %d)\n", obj, offset, origin)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); return lseek(fileobj->fd, offset, origin); } static int file_close(jas_stream_obj_t *obj) { jas_stream_fileobj_t *fileobj; JAS_DBGLOG(100, ("file_close(%p)\n", obj)); fileobj = JAS_CAST(jas_stream_fileobj_t *, obj); int ret; ret = close(fileobj->fd); if (fileobj->flags & JAS_STREAM_FILEOBJ_DELONCLOSE) { unlink(fileobj->pathname); } jas_free(fileobj); return ret; } /******************************************************************************\ * Stdio file stream object. \******************************************************************************/ static int sfile_read(jas_stream_obj_t *obj, char *buf, int cnt) { FILE *fp; size_t n; int result; JAS_DBGLOG(100, ("sfile_read(%p, %p, %d)\n", obj, buf, cnt)); fp = JAS_CAST(FILE *, obj); n = fread(buf, 1, cnt, fp); if (n != cnt) { result = (!ferror(fp) && feof(fp)) ? 0 : -1; } result = JAS_CAST(int, n); return result; } static int sfile_write(jas_stream_obj_t *obj, char *buf, int cnt) { FILE *fp; size_t n; JAS_DBGLOG(100, ("sfile_write(%p, %p, %d)\n", obj, buf, cnt)); fp = JAS_CAST(FILE *, obj); n = fwrite(buf, 1, cnt, fp); return (n != JAS_CAST(size_t, cnt)) ? (-1) : cnt; } static long sfile_seek(jas_stream_obj_t *obj, long offset, int origin) { FILE *fp; JAS_DBGLOG(100, ("sfile_seek(%p, %ld, %d)\n", obj, offset, origin)); fp = JAS_CAST(FILE *, obj); return fseek(fp, offset, origin); } static int sfile_close(jas_stream_obj_t *obj) { FILE *fp; JAS_DBGLOG(100, ("sfile_close(%p)\n", obj)); fp = JAS_CAST(FILE *, obj); return fclose(fp); }
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { int n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; }
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt) { ssize_t n; assert(cnt >= 0); assert(buf); JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt)); jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj; n = m->len_ - m->pos_; cnt = JAS_MIN(n, cnt); memcpy(buf, &m->buf_[m->pos_], cnt); m->pos_ += cnt; return cnt; }
{'added': [(172, '#if 0'), (173, ''), (174, '/* Obsolete code. */'), (175, ''), (245, '#else'), (246, ''), (247, '/*'), (248, 'This function will eventually replace jas_stream_memopen.'), (249, 'If buf is 0 and bufsize > 0:'), (250, '\ta buffer is dynamically allocated with size bufsize and this buffer is'), (251, '\tnot growable.'), (252, 'If buf is 0 and bufsize is 0:'), (253, '\ta buffer is dynamically allocated whose size will automatically grow to'), (254, '\taccommodate the amount of data written.'), (255, 'If buf is not 0:'), (256, '\tbufsize (which, in this case, is not currently allowed to be zero) is'), (257, '\tthe size of the (nongrowable) buffer pointed to by buf.'), (258, '*/'), (259, ''), (260, 'jas_stream_t *jas_stream_memopen2(char *buf, size_t bufsize)'), (261, '{'), (262, '\tjas_stream_t *stream;'), (263, '\tjas_stream_memobj_t *obj;'), (264, ''), (265, '\tJAS_DBGLOG(100, ("jas_stream_memopen2(%p, %zu)\\n", buf, bufsize));'), (266, ''), (267, '\tassert((buf && bufsize > 0) || (!buf));'), (268, ''), (269, '\tif (!(stream = jas_stream_create())) {'), (270, '\t\treturn 0;'), (271, '\t}'), (272, ''), (273, '\t/* A stream associated with a memory buffer is always opened'), (274, '\tfor both reading and writing in binary mode. */'), (275, '\tstream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY;'), (276, ''), (277, '\t/* Since the stream data is already resident in memory, buffering'), (278, '\tis not necessary. */'), (279, '\t/* But... It still may be faster to use buffering anyways. */'), (280, '\tjas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);'), (281, ''), (282, '\t/* Select the operations for a memory stream. */'), (283, '\tstream->ops_ = &jas_stream_memops;'), (284, ''), (285, '\t/* Allocate memory for the underlying memory stream object. */'), (286, '\tif (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) {'), (287, '\t\tjas_stream_destroy(stream);'), (288, '\t\treturn 0;'), (289, '\t}'), (290, '\tstream->obj_ = (void *) obj;'), (291, ''), (292, '\t/* Initialize a few important members of the memory stream object. */'), (293, '\tobj->myalloc_ = 0;'), (294, '\tobj->buf_ = 0;'), (295, ''), (296, '\t/* If the buffer size specified is nonpositive, then the buffer'), (297, '\tis allocated internally and automatically grown as needed. */'), (298, '\tif (!bufsize) {'), (299, '\t\tobj->bufsize_ = 1024;'), (300, '\t\tobj->growable_ = 1;'), (301, '\t} else {'), (302, '\t\tobj->bufsize_ = bufsize;'), (303, '\t\tobj->growable_ = 0;'), (304, '\t}'), (305, '\tif (buf) {'), (306, '\t\tobj->buf_ = JAS_CAST(unsigned char *, buf);'), (307, '\t} else {'), (308, '\t\tobj->buf_ = jas_malloc(obj->bufsize_);'), (309, '\t\tobj->myalloc_ = 1;'), (310, '\t}'), (311, '\tif (!obj->buf_) {'), (312, '\t\tjas_stream_close(stream);'), (313, '\t\treturn 0;'), (314, '\t}'), (315, '\tJAS_DBGLOG(100, ("jas_stream_memopen2 buffer buf=%p myalloc=%d\\n",'), (316, '\t obj->buf_, obj->myalloc_));'), (317, ''), (318, '\tif (bufsize > 0 && buf) {'), (319, '\t\t/* If a buffer was supplied by the caller and its length is positive,'), (320, '\t\t make the associated buffer data appear in the stream initially. */'), (321, '\t\tobj->len_ = bufsize;'), (322, '\t} else {'), (323, '\t\t/* The stream is initially empty. */'), (324, '\t\tobj->len_ = 0;'), (325, '\t}'), (326, '\tobj->pos_ = 0;'), (327, ''), (328, '\treturn stream;'), (329, '}'), (330, ''), (331, '/*'), (332, 'NOTE:'), (333, 'The version of the function jas_stream_memopen only exists for backwards'), (334, 'compatibility.'), (335, 'Eventually, it should be replaced by jas_stream_memopen2.'), (336, 'In retrospect, it was a very poor choice to have specified the buffer'), (337, 'size parameter (bufsize) to have type int. On some machines, int may only'), (338, 'be a 16-bit integer. This precludes larger-sized buffer allocations, which'), (339, 'are needed in practice.'), (340, ''), (341, 'If bufsize <= 0, the buffer is growable; otherwise, the buffer has a fixed'), (342, 'size of bufsize.'), (343, 'If buf is 0, the buffer is dynamically allocated with jas_malloc.'), (344, 'If buf is not 0 and bufsize <= 0 (which is not permitted in any'), (345, 'circumstances), bad things will happen (especially if the buf was not'), (346, 'allocated with jas_malloc).'), (347, '*/'), (348, 'jas_stream_t *jas_stream_memopen(char *buf, int bufsize)'), (349, '{'), (350, '\tchar *new_buf;'), (351, '\tsize_t new_bufsize;'), (352, ''), (353, '\tJAS_DBGLOG(100, ("jas_stream_memopen(%p, %d)\\n", buf, bufsize));'), (354, '\tif (bufsize < 0) {'), (355, '\t\tjas_deprecated("negative buffer size for jas_stream_memopen");'), (356, '\t}'), (357, '\tif (buf && bufsize <= 0) {'), (358, '\t\t// This was never a valid thing to do with the old API.'), (359, '\t\tjas_eprintf("Invalid use of jas_stream_memopen detected.\\n");'), (360, '\t\tjas_deprecated("A user-provided buffer for "'), (361, '\t\t "jas_stream_memopen cannot be growable.\\n");'), (362, '\t}'), (363, '\tif (bufsize <= 0) {'), (364, '\t\tnew_bufsize = 0;'), (365, '\t\tnew_buf = 0;'), (366, '\t} else {'), (367, '\t\tnew_bufsize = bufsize;'), (368, '\t\tnew_buf = buf;'), (369, '\t}'), (370, '\treturn jas_stream_memopen2(new_buf, new_bufsize);'), (371, '}'), (372, ''), (373, '#endif'), (374, ''), (657, '\tif (cnt < 0) {'), (658, '\t\tjas_deprecated("negative count for jas_stream_read");'), (659, '\t}'), (660, ''), (680, '\tif (cnt < 0) {'), (681, '\t\tjas_deprecated("negative count for jas_stream_write");'), (682, '\t}'), (683, ''), (749, '\tif (n < 0) {'), (750, '\t\tjas_deprecated("negative count for jas_stream_gobble");'), (751, '\t}'), (764, '\tif (n < 0) {'), (765, '\t\tjas_deprecated("negative count for jas_stream_pad");'), (766, '\t}'), (1139, '\tssize_t n;'), (1152, 'static int mem_resize(jas_stream_memobj_t *m, size_t bufsize)'), (1157, '\t//assert(bufsize >= 0);'), (1158, ''), (1159, '\tJAS_DBGLOG(100, ("mem_resize(%p, %zu)\\n", m, bufsize));'), (1160, '\tif (!bufsize) {'), (1161, '\t\tjas_eprintf('), (1162, '\t\t "mem_resize was not really designed to handle a buffer of size 0\\n"'), (1163, '\t\t "This may not work.\\n"'), (1164, '\t\t);'), (1165, '\t}'), (1180, '\tsize_t n;'), (1183, '\tsize_t newbufsize;'), (1184, '\tsize_t newpos;'), (1194, '\t\t\t//newbufsize <<= 1;'), (1195, '\t\t\tif (!jas_safe_size_mul(newbufsize, 2, &newbufsize)) {'), (1196, '\t\t\t\tJAS_DBGLOG(100, ("new buffer size would cause overflow\\n"));'), (1197, '\t\t\t\treturn -1;'), (1198, '\t\t\t}'), (1200, '\t\tJAS_DBGLOG(100, ("mem_write resizing from %d to %zu\\n", m->bufsize_,'), (1202, '\t\tassert(newbufsize > 0);'), (1236, '\tsize_t newpos;')], 'deleted': [(991, '\tint n;'), (1004, 'static int mem_resize(jas_stream_memobj_t *m, int bufsize)'), (1009, '\tassert(bufsize >= 0);'), (1011, '\tJAS_DBGLOG(100, ("mem_resize(%p, %d)\\n", m, bufsize));'), (1025, '\tint n;'), (1028, '\tlong newbufsize;'), (1029, '\tlong newpos;'), (1039, '\t\t\tnewbufsize <<= 1;'), (1040, '\t\t\tassert(newbufsize >= 0);'), (1042, '\t\tJAS_DBGLOG(100, ("mem_write resizing from %d to %z\\n", m->bufsize_,'), (1044, '\t\tJAS_DBGLOG(100, ("mem_write resizing from %d to %ul\\n", m->bufsize_,'), (1045, '\t\t JAS_CAST(unsigned long, newbufsize)));'), (1079, '\tlong newpos;')]}
170
13
911
5,339
13
100
1
https://github.com/mdadams/jasper
CVE-2016-9262
CWE-190
253
mach0.c
C
parse_import_ptr
/* radare - LGPL - Copyright 2010-2018 - nibble, pancake */ #include <stdio.h> #include <r_types.h> #include <r_util.h> #include "mach0.h" #define bprintf if (bin->verbose) eprintf typedef struct _ulebr { ut8 *p; } ulebr; // OMG; THIS SHOULD BE KILLED; this var exposes the local native endian, which is completely unnecessary static bool little_; static ut64 read_uleb128(ulebr *r, ut8 *end) { ut64 result = 0; int bit = 0; ut64 slice = 0; ut8 *p = r->p; do { if (p == end) { eprintf ("malformed uleb128"); break; } slice = *p & 0x7f; if (bit > 63) { eprintf ("uleb128 too big for uint64, bit=%d, result=0x%"PFMT64x, bit, result); } else { result |= (slice << bit); bit += 7; } } while (*p++ & 0x80); r->p = p; return result; } static st64 read_sleb128(ulebr *r, ut8 *end) { st64 result = 0; int bit = 0; ut8 byte = 0; ut8 *p = r->p; do { if (p == end) { eprintf ("malformed sleb128"); break; } byte = *p++; result |= (((st64)(byte & 0x7f)) << bit); bit += 7; } while (byte & 0x80); // sign extend negative numbers if ((byte & 0x40)) { result |= (-1LL) << bit; } r->p = p; return result; } static ut64 entry_to_vaddr(struct MACH0_(obj_t)* bin) { switch (bin->main_cmd.cmd) { case LC_MAIN: return bin->entry + bin->baddr; case LC_UNIXTHREAD: case LC_THREAD: return bin->entry; default: return 0; } } static ut64 addr_to_offset(struct MACH0_(obj_t)* bin, ut64 addr) { ut64 segment_base, segment_size; int i; if (!bin->segs) { return 0; } for (i = 0; i < bin->nsegs; i++) { segment_base = (ut64)bin->segs[i].vmaddr; segment_size = (ut64)bin->segs[i].vmsize; if (addr >= segment_base && addr < segment_base + segment_size) { return bin->segs[i].fileoff + (addr - segment_base); } } return 0; } static int init_hdr(struct MACH0_(obj_t)* bin) { ut8 magicbytes[4] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; if (r_buf_read_at (bin->b, 0, magicbytes, 4) < 1) { return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { bin->big_endian = true; } else if (r_read_le32(magicbytes) == FAT_MAGIC) { bin->big_endian = false; } else if (r_read_be32(magicbytes) == FAT_MAGIC) { bin->big_endian = true; } else if (r_read_le32(magicbytes) == 0xfeedfacf) { bin->big_endian = false; } else if (r_read_be32(magicbytes) == 0xfeedfacf) { bin->big_endian = true; } else { return false; // object files are magic == 0, but body is different :? } len = r_buf_read_at (bin->b, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (machohdrbytes)) { bprintf ("Error: read (hdr)\n"); return false; } bin->hdr.magic = r_read_ble (&machohdrbytes[0], bin->big_endian, 32); bin->hdr.cputype = r_read_ble (&machohdrbytes[4], bin->big_endian, 32); bin->hdr.cpusubtype = r_read_ble (&machohdrbytes[8], bin->big_endian, 32); bin->hdr.filetype = r_read_ble (&machohdrbytes[12], bin->big_endian, 32); bin->hdr.ncmds = r_read_ble (&machohdrbytes[16], bin->big_endian, 32); bin->hdr.sizeofcmds = r_read_ble (&machohdrbytes[20], bin->big_endian, 32); bin->hdr.flags = r_read_ble (&machohdrbytes[24], bin->big_endian, 32); #if R_BIN_MACH064 bin->hdr.reserved = r_read_ble (&machohdrbytes[28], bin->big_endian, 32); #endif sdb_set (bin->kv, "mach0_header.format", "xxxxddx " "magic cputype cpusubtype filetype ncmds sizeofcmds flags", 0); sdb_num_set (bin->kv, "mach0_header.offset", 0, 0); // wat about fatmach0? sdb_set (bin->kv, "mach_filetype.cparse", "enum mach_filetype{MH_OBJECT=1," "MH_EXECUTE=2, MH_FVMLIB=3, MH_CORE=4, MH_PRELOAD=5, MH_DYLIB=6," "MH_DYLINKER=7, MH_BUNDLE=8, MH_DYLIB_STUB=9, MH_DSYM=10," "MH_KEXT_BUNDLE=11}" ,0); sdb_set (bin->kv, "mach_flags.cparse", "enum mach_flags{MH_NOUNDEFS=1," "MH_INCRLINK=2,MH_DYLDLINK=4,MH_BINDATLOAD=8,MH_PREBOUND=0x10," "MH_SPLIT_SEGS=0x20,MH_LAZY_INIT=0x40,MH_TWOLEVEL=0x80," "MH_FORCE_FLAT=0x100,MH_NOMULTIDEFS=0x200,MH_NOFIXPREBINDING=0x400," "MH_PREBINDABLE=0x800, MH_ALLMODSBOUND=0x1000," "MH_SUBSECTIONS_VIA_SYMBOLS=0x2000," "MH_CANONICAL=0x4000,MH_WEAK_DEFINES=0x8000," "MH_BINDS_TO_WEAK=0x10000,MH_ALLOW_STACK_EXECUTION=0x20000," "MH_ROOT_SAFE=0x40000,MH_SETUID_SAFE=0x80000," "MH_NO_REEXPORTED_DYLIBS=0x100000,MH_PIE=0x200000," "MH_DEAD_STRIPPABLE_DYLIB=0x400000," "MH_HAS_TLV_DESCRIPTORS=0x800000," "MH_NO_HEAP_EXECUTION=0x1000000 }",0); return true; } static int parse_segments(struct MACH0_(obj_t)* bin, ut64 off) { int i, j, k, sect, len; ut32 size_sects; ut8 segcom[sizeof (struct MACH0_(segment_command))] = {0}; ut8 sec[sizeof (struct MACH0_(section))] = {0}; if (!UT32_MUL (&size_sects, bin->nsegs, sizeof (struct MACH0_(segment_command)))) { return false; } if (!size_sects || size_sects > bin->size) { return false; } if (off > bin->size || off + sizeof (struct MACH0_(segment_command)) > bin->size) { return false; } if (!(bin->segs = realloc (bin->segs, bin->nsegs * sizeof(struct MACH0_(segment_command))))) { perror ("realloc (seg)"); return false; } j = bin->nsegs - 1; len = r_buf_read_at (bin->b, off, segcom, sizeof (struct MACH0_(segment_command))); if (len != sizeof (struct MACH0_(segment_command))) { bprintf ("Error: read (seg)\n"); return false; } i = 0; bin->segs[j].cmd = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].cmdsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); memcpy (&bin->segs[j].segname, &segcom[i], 16); i += 16; #if R_BIN_MACH064 bin->segs[j].vmaddr = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].vmsize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].fileoff = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].filesize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); #else bin->segs[j].vmaddr = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].vmsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].fileoff = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].filesize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); #endif bin->segs[j].maxprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].initprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].nsects = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].flags = r_read_ble32 (&segcom[i], bin->big_endian); sdb_num_set (bin->kv, sdb_fmt ("mach0_segment_%d.offset", j), off, 0); sdb_num_set (bin->kv, "mach0_segments.count", 0, 0); sdb_set (bin->kv, "mach0_segment.format", "xd[16]zxxxxoodx " "cmd cmdsize segname vmaddr vmsize " "fileoff filesize maxprot initprot nsects flags", 0); if (bin->segs[j].nsects > 0) { sect = bin->nsects; bin->nsects += bin->segs[j].nsects; if (bin->nsects > 128) { int new_nsects = bin->nsects & 0xf; bprintf ("WARNING: mach0 header contains too many sections (%d). Wrapping to %d\n", bin->nsects, new_nsects); bin->nsects = new_nsects; } if ((int)bin->nsects < 1) { bprintf ("Warning: Invalid number of sections\n"); bin->nsects = sect; return false; } if (!UT32_MUL (&size_sects, bin->nsects-sect, sizeof (struct MACH0_(section)))){ bin->nsects = sect; return false; } if (!size_sects || size_sects > bin->size){ bin->nsects = sect; return false; } if (bin->segs[j].cmdsize != sizeof (struct MACH0_(segment_command)) \ + (sizeof (struct MACH0_(section))*bin->segs[j].nsects)){ bin->nsects = sect; return false; } if (off + sizeof (struct MACH0_(segment_command)) > bin->size ||\ off + sizeof (struct MACH0_(segment_command)) + size_sects > bin->size){ bin->nsects = sect; return false; } if (!(bin->sects = realloc (bin->sects, bin->nsects * sizeof (struct MACH0_(section))))) { perror ("realloc (sects)"); bin->nsects = sect; return false; } for (k = sect, j = 0; k < bin->nsects; k++, j++) { ut64 offset = off + sizeof (struct MACH0_(segment_command)) + j * sizeof (struct MACH0_(section)); len = r_buf_read_at (bin->b, offset, sec, sizeof (struct MACH0_(section))); if (len != sizeof (struct MACH0_(section))) { bprintf ("Error: read (sects)\n"); bin->nsects = sect; return false; } i = 0; memcpy (&bin->sects[k].sectname, &sec[i], 16); i += 16; memcpy (&bin->sects[k].segname, &sec[i], 16); bin->sects[k].segname[15] = 0; i += 16; #if R_BIN_MACH064 bin->sects[k].addr = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); bin->sects[k].size = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); #else bin->sects[k].addr = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].size = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); #endif bin->sects[k].offset = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].align = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reloff = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].nreloc = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].flags = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved1 = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved2 = r_read_ble32 (&sec[i], bin->big_endian); #if R_BIN_MACH064 i += sizeof (ut32); bin->sects[k].reserved3 = r_read_ble32 (&sec[i], bin->big_endian); #endif } } return true; } static int parse_symtab(struct MACH0_(obj_t)* bin, ut64 off) { struct symtab_command st; ut32 size_sym; int i; ut8 symt[sizeof (struct symtab_command)] = {0}; ut8 nlst[sizeof (struct MACH0_(nlist))] = {0}; if (off > (ut64)bin->size || off + sizeof (struct symtab_command) > (ut64)bin->size) { return false; } int len = r_buf_read_at (bin->b, off, symt, sizeof (struct symtab_command)); if (len != sizeof (struct symtab_command)) { bprintf ("Error: read (symtab)\n"); return false; } st.cmd = r_read_ble32 (&symt[0], bin->big_endian); st.cmdsize = r_read_ble32 (&symt[4], bin->big_endian); st.symoff = r_read_ble32 (&symt[8], bin->big_endian); st.nsyms = r_read_ble32 (&symt[12], bin->big_endian); st.stroff = r_read_ble32 (&symt[16], bin->big_endian); st.strsize = r_read_ble32 (&symt[20], bin->big_endian); bin->symtab = NULL; bin->nsymtab = 0; if (st.strsize > 0 && st.strsize < bin->size && st.nsyms > 0) { bin->nsymtab = st.nsyms; if (st.stroff > bin->size || st.stroff + st.strsize > bin->size) { return false; } if (!UT32_MUL (&size_sym, bin->nsymtab, sizeof (struct MACH0_(nlist)))) { bprintf("fail2\n"); return false; } if (!size_sym) { bprintf("fail3\n"); return false; } if (st.symoff > bin->size || st.symoff + size_sym > bin->size) { bprintf("fail4\n"); return false; } if (!(bin->symstr = calloc (1, st.strsize + 2))) { perror ("calloc (symstr)"); return false; } bin->symstrlen = st.strsize; len = r_buf_read_at (bin->b, st.stroff, (ut8*)bin->symstr, st.strsize); if (len != st.strsize) { bprintf ("Error: read (symstr)\n"); R_FREE (bin->symstr); return false; } if (!(bin->symtab = calloc (bin->nsymtab, sizeof (struct MACH0_(nlist))))) { perror ("calloc (symtab)"); return false; } for (i = 0; i < bin->nsymtab; i++) { len = r_buf_read_at (bin->b, st.symoff + (i * sizeof (struct MACH0_(nlist))), nlst, sizeof (struct MACH0_(nlist))); if (len != sizeof (struct MACH0_(nlist))) { bprintf ("Error: read (nlist)\n"); R_FREE (bin->symtab); return false; } //XXX not very safe what if is n_un.n_name instead? bin->symtab[i].n_strx = r_read_ble32 (&nlst[0], bin->big_endian); bin->symtab[i].n_type = r_read_ble8 (&nlst[4]); bin->symtab[i].n_sect = r_read_ble8 (&nlst[5]); bin->symtab[i].n_desc = r_read_ble16 (&nlst[6], bin->big_endian); #if R_BIN_MACH064 bin->symtab[i].n_value = r_read_ble64 (&nlst[8], bin->big_endian); #else bin->symtab[i].n_value = r_read_ble32 (&nlst[8], bin->big_endian); #endif } } return true; } static int parse_dysymtab(struct MACH0_(obj_t)* bin, ut64 off) { int len, i; ut32 size_tab; ut8 dysym[sizeof (struct dysymtab_command)] = {0}; ut8 dytoc[sizeof (struct dylib_table_of_contents)] = {0}; ut8 dymod[sizeof (struct MACH0_(dylib_module))] = {0}; ut8 idsyms[sizeof (ut32)] = {0}; if (off > bin->size || off + sizeof (struct dysymtab_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, dysym, sizeof (struct dysymtab_command)); if (len != sizeof (struct dysymtab_command)) { bprintf ("Error: read (dysymtab)\n"); return false; } bin->dysymtab.cmd = r_read_ble32 (&dysym[0], bin->big_endian); bin->dysymtab.cmdsize = r_read_ble32 (&dysym[4], bin->big_endian); bin->dysymtab.ilocalsym = r_read_ble32 (&dysym[8], bin->big_endian); bin->dysymtab.nlocalsym = r_read_ble32 (&dysym[12], bin->big_endian); bin->dysymtab.iextdefsym = r_read_ble32 (&dysym[16], bin->big_endian); bin->dysymtab.nextdefsym = r_read_ble32 (&dysym[20], bin->big_endian); bin->dysymtab.iundefsym = r_read_ble32 (&dysym[24], bin->big_endian); bin->dysymtab.nundefsym = r_read_ble32 (&dysym[28], bin->big_endian); bin->dysymtab.tocoff = r_read_ble32 (&dysym[32], bin->big_endian); bin->dysymtab.ntoc = r_read_ble32 (&dysym[36], bin->big_endian); bin->dysymtab.modtaboff = r_read_ble32 (&dysym[40], bin->big_endian); bin->dysymtab.nmodtab = r_read_ble32 (&dysym[44], bin->big_endian); bin->dysymtab.extrefsymoff = r_read_ble32 (&dysym[48], bin->big_endian); bin->dysymtab.nextrefsyms = r_read_ble32 (&dysym[52], bin->big_endian); bin->dysymtab.indirectsymoff = r_read_ble32 (&dysym[56], bin->big_endian); bin->dysymtab.nindirectsyms = r_read_ble32 (&dysym[60], bin->big_endian); bin->dysymtab.extreloff = r_read_ble32 (&dysym[64], bin->big_endian); bin->dysymtab.nextrel = r_read_ble32 (&dysym[68], bin->big_endian); bin->dysymtab.locreloff = r_read_ble32 (&dysym[72], bin->big_endian); bin->dysymtab.nlocrel = r_read_ble32 (&dysym[76], bin->big_endian); bin->ntoc = bin->dysymtab.ntoc; if (bin->ntoc > 0) { if (!(bin->toc = calloc (bin->ntoc, sizeof (struct dylib_table_of_contents)))) { perror ("calloc (toc)"); return false; } if (!UT32_MUL (&size_tab, bin->ntoc, sizeof (struct dylib_table_of_contents))){ R_FREE (bin->toc); return false; } if (!size_tab){ R_FREE (bin->toc); return false; } if (bin->dysymtab.tocoff > bin->size || bin->dysymtab.tocoff + size_tab > bin->size){ R_FREE (bin->toc); return false; } for (i = 0; i < bin->ntoc; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.tocoff + i * sizeof (struct dylib_table_of_contents), dytoc, sizeof (struct dylib_table_of_contents)); if (len != sizeof (struct dylib_table_of_contents)) { bprintf ("Error: read (toc)\n"); R_FREE (bin->toc); return false; } bin->toc[i].symbol_index = r_read_ble32 (&dytoc[0], bin->big_endian); bin->toc[i].module_index = r_read_ble32 (&dytoc[4], bin->big_endian); } } bin->nmodtab = bin->dysymtab.nmodtab; if (bin->nmodtab > 0) { if (!(bin->modtab = calloc (bin->nmodtab, sizeof (struct MACH0_(dylib_module))))) { perror ("calloc (modtab)"); return false; } if (!UT32_MUL (&size_tab, bin->nmodtab, sizeof (struct MACH0_(dylib_module)))){ R_FREE (bin->modtab); return false; } if (!size_tab){ R_FREE (bin->modtab); return false; } if (bin->dysymtab.modtaboff > bin->size || \ bin->dysymtab.modtaboff + size_tab > bin->size){ R_FREE (bin->modtab); return false; } for (i = 0; i < bin->nmodtab; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.modtaboff + i * sizeof (struct MACH0_(dylib_module)), dymod, sizeof (struct MACH0_(dylib_module))); if (len == -1) { bprintf ("Error: read (modtab)\n"); R_FREE (bin->modtab); return false; } bin->modtab[i].module_name = r_read_ble32 (&dymod[0], bin->big_endian); bin->modtab[i].iextdefsym = r_read_ble32 (&dymod[4], bin->big_endian); bin->modtab[i].nextdefsym = r_read_ble32 (&dymod[8], bin->big_endian); bin->modtab[i].irefsym = r_read_ble32 (&dymod[12], bin->big_endian); bin->modtab[i].nrefsym = r_read_ble32 (&dymod[16], bin->big_endian); bin->modtab[i].ilocalsym = r_read_ble32 (&dymod[20], bin->big_endian); bin->modtab[i].nlocalsym = r_read_ble32 (&dymod[24], bin->big_endian); bin->modtab[i].iextrel = r_read_ble32 (&dymod[28], bin->big_endian); bin->modtab[i].nextrel = r_read_ble32 (&dymod[32], bin->big_endian); bin->modtab[i].iinit_iterm = r_read_ble32 (&dymod[36], bin->big_endian); bin->modtab[i].ninit_nterm = r_read_ble32 (&dymod[40], bin->big_endian); #if R_BIN_MACH064 bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_addr = r_read_ble64 (&dymod[48], bin->big_endian); #else bin->modtab[i].objc_module_info_addr = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[48], bin->big_endian); #endif } } bin->nindirectsyms = bin->dysymtab.nindirectsyms; if (bin->nindirectsyms > 0) { if (!(bin->indirectsyms = calloc (bin->nindirectsyms, sizeof (ut32)))) { perror ("calloc (indirectsyms)"); return false; } if (!UT32_MUL (&size_tab, bin->nindirectsyms, sizeof (ut32))){ R_FREE (bin->indirectsyms); return false; } if (!size_tab){ R_FREE (bin->indirectsyms); return false; } if (bin->dysymtab.indirectsymoff > bin->size || \ bin->dysymtab.indirectsymoff + size_tab > bin->size){ R_FREE (bin->indirectsyms); return false; } for (i = 0; i < bin->nindirectsyms; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.indirectsymoff + i * sizeof (ut32), idsyms, 4); if (len == -1) { bprintf ("Error: read (indirect syms)\n"); R_FREE (bin->indirectsyms); return false; } bin->indirectsyms[i] = r_read_ble32 (&idsyms[0], bin->big_endian); } } /* TODO extrefsyms, extrel, locrel */ return true; } static bool parse_signature(struct MACH0_(obj_t) *bin, ut64 off) { int i,len; ut32 data; bin->signature = NULL; struct linkedit_data_command link = {0}; ut8 lit[sizeof (struct linkedit_data_command)] = {0}; struct blob_index_t idx = {0}; struct super_blob_t super = {{0}}; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, lit, sizeof (struct linkedit_data_command)); if (len != sizeof (struct linkedit_data_command)) { bprintf ("Failed to get data while parsing LC_CODE_SIGNATURE command\n"); return false; } link.cmd = r_read_ble32 (&lit[0], bin->big_endian); link.cmdsize = r_read_ble32 (&lit[4], bin->big_endian); link.dataoff = r_read_ble32 (&lit[8], bin->big_endian); link.datasize = r_read_ble32 (&lit[12], bin->big_endian); data = link.dataoff; if (data > bin->size || data + sizeof (struct super_blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); return true; } super.blob.magic = r_read_ble32 (bin->b->buf + data, little_); super.blob.length = r_read_ble32 (bin->b->buf + data + 4, little_); super.count = r_read_ble32 (bin->b->buf + data + 8, little_); for (i = 0; i < super.count; ++i) { if ((ut8 *)(bin->b->buf + data + i) > (ut8 *)(bin->b->buf + bin->size)) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_index_t bi; if (r_buf_read_at (bin->b, data + 12 + (i * sizeof (struct blob_index_t)), (ut8*)&bi, sizeof (struct blob_index_t)) < sizeof (struct blob_index_t)) { break; } idx.type = r_read_ble32 (&bi.type, little_); idx.offset = r_read_ble32 (&bi.offset, little_); if (idx.type == CSSLOT_ENTITLEMENTS) { ut64 off = data + idx.offset; if (off > bin->size || off + sizeof (struct blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_t entitlements = {0}; entitlements.magic = r_read_ble32 (bin->b->buf + off, little_); entitlements.length = r_read_ble32 (bin->b->buf + off + 4, little_); len = entitlements.length - sizeof (struct blob_t); if (len <= bin->size && len > 1) { bin->signature = calloc (1, len + 1); if (bin->signature) { ut8 *src = bin->b->buf + off + sizeof (struct blob_t); if (off + sizeof (struct blob_t) + len < bin->b->length) { memcpy (bin->signature, src, len); bin->signature[len] = '\0'; return true; } bin->signature = (ut8 *)strdup ("Malformed entitlement"); return true; } } else { bin->signature = (ut8 *)strdup ("Malformed entitlement"); } } } if (!bin->signature) { bin->signature = (ut8 *)strdup ("No entitlement found"); } return true; } static int parse_thread(struct MACH0_(obj_t)* bin, struct load_command *lc, ut64 off, bool is_first_thread) { ut64 ptr_thread, pc = UT64_MAX, pc_offset = UT64_MAX; ut32 flavor, count; ut8 *arw_ptr = NULL; int arw_sz, len = 0; ut8 thc[sizeof (struct thread_command)] = {0}; ut8 tmp[4]; if (off > bin->size || off + sizeof (struct thread_command) > bin->size) return false; len = r_buf_read_at (bin->b, off, thc, 8); if (len < 1) { goto wrong_read; } bin->thread.cmd = r_read_ble32 (&thc[0], bin->big_endian); bin->thread.cmdsize = r_read_ble32 (&thc[4], bin->big_endian); if (r_buf_read_at (bin->b, off + sizeof (struct thread_command), tmp, 4) < 4) { goto wrong_read; } flavor = r_read_ble32 (tmp, bin->big_endian); if (len == -1) goto wrong_read; if (off + sizeof (struct thread_command) + sizeof (flavor) > bin->size || \ off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (ut32) > bin->size) return false; // TODO: use count for checks if (r_buf_read_at (bin->b, off + sizeof (struct thread_command) + sizeof (flavor), tmp, 4) < 4) { goto wrong_read; } count = r_read_ble32 (tmp, bin->big_endian); ptr_thread = off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (count); if (ptr_thread > bin->size) return false; switch (bin->hdr.cputype) { case CPU_TYPE_I386: case CPU_TYPE_X86_64: switch (flavor) { case X86_THREAD_STATE32: if (ptr_thread + sizeof (struct x86_thread_state32) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_32, "16i", 1)) == -1) { bprintf ("Error: read (thread state x86_32)\n"); return false; } pc = bin->thread_state.x86_32.eip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state32, eip); arw_ptr = (ut8 *)&bin->thread_state.x86_32; arw_sz = sizeof (struct x86_thread_state32); break; case X86_THREAD_STATE64: if (ptr_thread + sizeof (struct x86_thread_state64) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_64, "32l", 1)) == -1) { bprintf ("Error: read (thread state x86_64)\n"); return false; } pc = bin->thread_state.x86_64.rip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state64, rip); arw_ptr = (ut8 *)&bin->thread_state.x86_64; arw_sz = sizeof (struct x86_thread_state64); break; //default: bprintf ("Unknown type\n"); } break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: if (flavor == X86_THREAD_STATE32) { if (ptr_thread + sizeof (struct ppc_thread_state32) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_32, bin->big_endian?"40I":"40i", 1)) == -1) { bprintf ("Error: read (thread state ppc_32)\n"); return false; } pc = bin->thread_state.ppc_32.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state32, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_32; arw_sz = sizeof (struct ppc_thread_state32); } else if (flavor == X86_THREAD_STATE64) { if (ptr_thread + sizeof (struct ppc_thread_state64) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_64, bin->big_endian?"34LI3LI":"34li3li", 1)) == -1) { bprintf ("Error: read (thread state ppc_64)\n"); return false; } pc = bin->thread_state.ppc_64.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state64, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_64; arw_sz = sizeof (struct ppc_thread_state64); } break; case CPU_TYPE_ARM: if (ptr_thread + sizeof (struct arm_thread_state32) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_32, bin->big_endian?"17I":"17i", 1)) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = bin->thread_state.arm_32.r15; pc_offset = ptr_thread + r_offsetof (struct arm_thread_state32, r15); arw_ptr = (ut8 *)&bin->thread_state.arm_32; arw_sz = sizeof (struct arm_thread_state32); break; case CPU_TYPE_ARM64: if (ptr_thread + sizeof (struct arm_thread_state64) > bin->size) { return false; } if ((len = r_buf_fread_at(bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_64, bin->big_endian?"34LI1I":"34Li1i", 1)) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = r_read_be64 (&bin->thread_state.arm_64.pc); pc_offset = ptr_thread + r_offsetof (struct arm_thread_state64, pc); arw_ptr = (ut8*)&bin->thread_state.arm_64; arw_sz = sizeof (struct arm_thread_state64); break; default: bprintf ("Error: read (unknown thread state structure)\n"); return false; } // TODO: this shouldnt be an bprintf... if (arw_ptr && arw_sz > 0) { int i; ut8 *p = arw_ptr; bprintf ("arw "); for (i = 0; i < arw_sz; i++) { bprintf ("%02x", 0xff & p[i]); } bprintf ("\n"); } if (is_first_thread) { bin->main_cmd = *lc; if (pc != UT64_MAX) { bin->entry = pc; } if (pc_offset != UT64_MAX) { sdb_num_set (bin->kv, "mach0.entry.offset", pc_offset, 0); } } return true; wrong_read: bprintf ("Error: read (thread)\n"); return false; } static int parse_function_starts (struct MACH0_(obj_t)* bin, ut64 off) { struct linkedit_data_command fc; ut8 sfc[sizeof (struct linkedit_data_command)] = {0}; ut8 *buf; int len; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { bprintf ("Likely overflow while parsing" " LC_FUNCTION_STARTS command\n"); } bin->func_start = NULL; len = r_buf_read_at (bin->b, off, sfc, sizeof (struct linkedit_data_command)); if (len < 1) { bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS command\n"); } fc.cmd = r_read_ble32 (&sfc[0], bin->big_endian); fc.cmdsize = r_read_ble32 (&sfc[4], bin->big_endian); fc.dataoff = r_read_ble32 (&sfc[8], bin->big_endian); fc.datasize = r_read_ble32 (&sfc[12], bin->big_endian); buf = calloc (1, fc.datasize + 1); if (!buf) { bprintf ("Failed to allocate buffer\n"); return false; } bin->func_size = fc.datasize; if (fc.dataoff > bin->size || fc.dataoff + fc.datasize > bin->size) { free (buf); bprintf ("Likely overflow while parsing " "LC_FUNCTION_STARTS command\n"); return false; } len = r_buf_read_at (bin->b, fc.dataoff, buf, fc.datasize); if (len != fc.datasize) { free (buf); bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS\n"); return false; } buf[fc.datasize] = 0; // null-terminated buffer bin->func_start = buf; return true; } static int parse_dylib(struct MACH0_(obj_t)* bin, ut64 off) { struct dylib_command dl; int lib, len; ut8 sdl[sizeof (struct dylib_command)] = {0}; if (off > bin->size || off + sizeof (struct dylib_command) > bin->size) return false; lib = bin->nlibs - 1; if (!(bin->libs = realloc (bin->libs, bin->nlibs * R_BIN_MACH0_STRING_LENGTH))) { perror ("realloc (libs)"); return false; } len = r_buf_read_at (bin->b, off, sdl, sizeof (struct dylib_command)); if (len < 1) { bprintf ("Error: read (dylib)\n"); return false; } dl.cmd = r_read_ble32 (&sdl[0], bin->big_endian); dl.cmdsize = r_read_ble32 (&sdl[4], bin->big_endian); dl.dylib.name = r_read_ble32 (&sdl[8], bin->big_endian); dl.dylib.timestamp = r_read_ble32 (&sdl[12], bin->big_endian); dl.dylib.current_version = r_read_ble32 (&sdl[16], bin->big_endian); dl.dylib.compatibility_version = r_read_ble32 (&sdl[20], bin->big_endian); if (off + dl.dylib.name > bin->size ||\ off + dl.dylib.name + R_BIN_MACH0_STRING_LENGTH > bin->size) return false; len = r_buf_read_at (bin->b, off+dl.dylib.name, (ut8*)bin->libs[lib], R_BIN_MACH0_STRING_LENGTH); if (len < 1) { bprintf ("Error: read (dylib str)"); return false; } return true; } static const char *cmd_to_string(ut32 cmd) { switch (cmd) { case LC_DATA_IN_CODE: return "LC_DATA_IN_CODE"; case LC_CODE_SIGNATURE: return "LC_CODE_SIGNATURE"; case LC_RPATH: return "LC_RPATH"; case LC_SEGMENT: return "LC_SEGMENT"; case LC_SEGMENT_64: return "LC_SEGMENT_64"; case LC_SYMTAB: return "LC_SYMTAB"; case LC_SYMSEG: return "LC_SYMSEG"; case LC_ID_DYLIB: return "LC_ID_DYLIB"; case LC_DYSYMTAB: return "LC_DYSYMTAB"; case LC_FUNCTION_STARTS: return "LC_FUNCTION_STARTS"; case LC_DYLIB_CODE_SIGN_DRS: return "LC_DYLIB_CODE_SIGN_DRS"; case LC_VERSION_MIN_MACOSX: return "LC_VERSION_MIN_MACOSX"; case LC_VERSION_MIN_IPHONEOS: return "LC_VERSION_MIN_IPHONEOS"; case LC_VERSION_MIN_TVOS: return "LC_VERSION_MIN_TVOS"; case LC_VERSION_MIN_WATCHOS: return "LC_VERSION_MIN_WATCHOS"; case LC_DYLD_INFO: return "LC_DYLD_INFO"; case LC_SOURCE_VERSION: return "LC_SOURCE_VERSION"; case LC_MAIN: return "LC_MAIN"; case LC_UUID: return "LC_UUID"; case LC_ENCRYPTION_INFO_64: return "LC_ENCRYPTION_INFO_64"; case LC_ENCRYPTION_INFO: return "LC_ENCRYPTION_INFO"; case LC_LOAD_DYLINKER: return "LC_LOAD_DYLINKER"; case LC_LOAD_DYLIB: return "LC_LOAD_DYLIB"; case LC_THREAD: return "LC_THREAD"; case LC_UNIXTHREAD: return "LC_UNIXTHREAD"; case LC_IDENT: return "LC_IDENT"; case LC_DYLD_INFO_ONLY: return "LC_DYLD_INFO_ONLY"; } return ""; } static int init_items(struct MACH0_(obj_t)* bin) { struct load_command lc = {0, 0}; ut8 loadc[sizeof (struct load_command)] = {0}; bool is_first_thread = true; ut64 off = 0LL; int i, len; bin->uuidn = 0; bin->os = 0; bin->has_crypto = 0; if (bin->hdr.sizeofcmds > bin->size) { bprintf ("Warning: chopping hdr.sizeofcmds\n"); bin->hdr.sizeofcmds = bin->size - 128; //return false; } //bprintf ("Commands: %d\n", bin->hdr.ncmds); for (i = 0, off = sizeof (struct MACH0_(mach_header)); \ i < bin->hdr.ncmds; i++, off += lc.cmdsize) { if (off > bin->size || off + sizeof (struct load_command) > bin->size){ bprintf ("mach0: out of bounds command\n"); return false; } len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command)); if (len < 1) { bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off); return false; } lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian); lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian); if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) { bprintf ("Warning: mach0_header %d = cmdsize<1. (0x%llx vs 0x%llx)\n", i, (ut64)(off + lc.cmdsize), (ut64)(bin->size)); break; } // TODO: a different format for each cmd sdb_num_set (bin->kv, sdb_fmt ("mach0_cmd_%d.offset", i), off, 0); sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.format", i), "xd cmd size", 0); //bprintf ("%d\n", lc.cmd); switch (lc.cmd) { case LC_DATA_IN_CODE: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "data_in_code", 0); // TODO table of non-instructions in __text break; case LC_RPATH: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "rpath", 0); //bprintf ("--->\n"); break; case LC_SEGMENT_64: case LC_SEGMENT: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "segment", 0); bin->nsegs++; if (!parse_segments (bin, off)) { bprintf ("error parsing segment\n"); bin->nsegs--; return false; } break; case LC_SYMTAB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "symtab", 0); if (!parse_symtab (bin, off)) { bprintf ("error parsing symtab\n"); return false; } break; case LC_DYSYMTAB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dysymtab", 0); if (!parse_dysymtab(bin, off)) { bprintf ("error parsing dysymtab\n"); return false; } break; case LC_DYLIB_CODE_SIGN_DRS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dylib_code_sign_drs", 0); //bprintf ("[mach0] code is signed\n"); break; case LC_VERSION_MIN_MACOSX: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_macosx", 0); bin->os = 1; // set OS = osx //bprintf ("[mach0] Requires OSX >= x\n"); break; case LC_VERSION_MIN_IPHONEOS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_iphoneos", 0); bin->os = 2; // set OS = ios //bprintf ("[mach0] Requires iOS >= x\n"); break; case LC_VERSION_MIN_TVOS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_tvos", 0); bin->os = 4; break; case LC_VERSION_MIN_WATCHOS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_watchos", 0); bin->os = 3; break; case LC_UUID: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "uuid", 0); { struct uuid_command uc = {0}; if (off + sizeof (struct uuid_command) > bin->size) { bprintf ("UUID out of obunds\n"); return false; } if (r_buf_fread_at (bin->b, off, (ut8*)&uc, "24c", 1) != -1) { char key[128]; char val[128]; snprintf (key, sizeof (key)-1, "uuid.%d", bin->uuidn++); r_hex_bin2str ((ut8*)&uc.uuid, 16, val); sdb_set (bin->kv, key, val, 0); //for (i=0;i<16; i++) bprintf ("%02x%c", uc.uuid[i], (i==15)?'\n':'-'); } } break; case LC_ENCRYPTION_INFO_64: /* TODO: the struct is probably different here */ case LC_ENCRYPTION_INFO: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "encryption_info", 0); { struct MACH0_(encryption_info_command) eic = {0}; ut8 seic[sizeof (struct MACH0_(encryption_info_command))] = {0}; if (off + sizeof (struct MACH0_(encryption_info_command)) > bin->size) { bprintf ("encryption info out of bounds\n"); return false; } if (r_buf_read_at (bin->b, off, seic, sizeof (struct MACH0_(encryption_info_command))) != -1) { eic.cmd = r_read_ble32 (&seic[0], bin->big_endian); eic.cmdsize = r_read_ble32 (&seic[4], bin->big_endian); eic.cryptoff = r_read_ble32 (&seic[8], bin->big_endian); eic.cryptsize = r_read_ble32 (&seic[12], bin->big_endian); eic.cryptid = r_read_ble32 (&seic[16], bin->big_endian); bin->has_crypto = eic.cryptid; sdb_set (bin->kv, "crypto", "true", 0); sdb_num_set (bin->kv, "cryptid", eic.cryptid, 0); sdb_num_set (bin->kv, "cryptoff", eic.cryptoff, 0); sdb_num_set (bin->kv, "cryptsize", eic.cryptsize, 0); sdb_num_set (bin->kv, "cryptheader", off, 0); } } break; case LC_LOAD_DYLINKER: { sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dylinker", 0); free (bin->intrp); bin->intrp = NULL; //bprintf ("[mach0] load dynamic linker\n"); struct dylinker_command dy = {0}; ut8 sdy[sizeof (struct dylinker_command)] = {0}; if (off + sizeof (struct dylinker_command) > bin->size){ bprintf ("Warning: Cannot parse dylinker command\n"); return false; } if (r_buf_read_at (bin->b, off, sdy, sizeof (struct dylinker_command)) == -1) { bprintf ("Warning: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { dy.cmd = r_read_ble32 (&sdy[0], bin->big_endian); dy.cmdsize = r_read_ble32 (&sdy[4], bin->big_endian); dy.name = r_read_ble32 (&sdy[8], bin->big_endian); int len = dy.cmdsize; char *buf = malloc (len+1); if (buf) { // wtf @ off + 0xc ? r_buf_read_at (bin->b, off + 0xc, (ut8*)buf, len); buf[len] = 0; free (bin->intrp); bin->intrp = buf; } } } break; case LC_MAIN: { struct { ut64 eo; ut64 ss; } ep = {0}; ut8 sep[2 * sizeof (ut64)] = {0}; sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "main", 0); if (!is_first_thread) { bprintf("Error: LC_MAIN with other threads\n"); return false; } if (off + 8 > bin->size || off + sizeof (ep) > bin->size) { bprintf ("invalid command size for main\n"); return false; } r_buf_read_at (bin->b, off + 8, sep, 2 * sizeof (ut64)); ep.eo = r_read_ble64 (&sep[0], bin->big_endian); ep.ss = r_read_ble64 (&sep[8], bin->big_endian); bin->entry = ep.eo; bin->main_cmd = lc; sdb_num_set (bin->kv, "mach0.entry.offset", off + 8, 0); sdb_num_set (bin->kv, "stacksize", ep.ss, 0); is_first_thread = false; } break; case LC_UNIXTHREAD: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "unixthread", 0); if (!is_first_thread) { bprintf("Error: LC_UNIXTHREAD with other threads\n"); return false; } case LC_THREAD: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "thread", 0); if (!parse_thread (bin, &lc, off, is_first_thread)) { bprintf ("Cannot parse thread\n"); return false; } is_first_thread = false; break; case LC_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "load_dylib", 0); bin->nlibs++; if (!parse_dylib (bin, off)){ bprintf ("Cannot parse dylib\n"); bin->nlibs--; return false; } break; case LC_DYLD_INFO: case LC_DYLD_INFO_ONLY: { ut8 dyldi[sizeof (struct dyld_info_command)] = {0}; sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dyld_info", 0); bin->dyld_info = calloc (1, sizeof (struct dyld_info_command)); if (bin->dyld_info) { if (off + sizeof (struct dyld_info_command) > bin->size){ bprintf ("Cannot parse dyldinfo\n"); R_FREE (bin->dyld_info); return false; } if (r_buf_read_at (bin->b, off, dyldi, sizeof (struct dyld_info_command)) == -1) { free (bin->dyld_info); bin->dyld_info = NULL; bprintf ("Error: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { bin->dyld_info->cmd = r_read_ble32 (&dyldi[0], bin->big_endian); bin->dyld_info->cmdsize = r_read_ble32 (&dyldi[4], bin->big_endian); bin->dyld_info->rebase_off = r_read_ble32 (&dyldi[8], bin->big_endian); bin->dyld_info->rebase_size = r_read_ble32 (&dyldi[12], bin->big_endian); bin->dyld_info->bind_off = r_read_ble32 (&dyldi[16], bin->big_endian); bin->dyld_info->bind_size = r_read_ble32 (&dyldi[20], bin->big_endian); bin->dyld_info->weak_bind_off = r_read_ble32 (&dyldi[24], bin->big_endian); bin->dyld_info->weak_bind_size = r_read_ble32 (&dyldi[28], bin->big_endian); bin->dyld_info->lazy_bind_off = r_read_ble32 (&dyldi[32], bin->big_endian); bin->dyld_info->lazy_bind_size = r_read_ble32 (&dyldi[36], bin->big_endian); bin->dyld_info->export_off = r_read_ble32 (&dyldi[40], bin->big_endian); bin->dyld_info->export_size = r_read_ble32 (&dyldi[44], bin->big_endian); } } } break; case LC_CODE_SIGNATURE: parse_signature (bin, off); sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "signature", 0); /* ut32 dataoff // ut32 datasize */ break; case LC_SOURCE_VERSION: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version", 0); /* uint64_t version; */ /* A.B.C.D.E packed as a24.b10.c10.d10.e10 */ //bprintf ("mach0: TODO: Show source version\n"); break; case LC_SEGMENT_SPLIT_INFO: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "split_info", 0); /* TODO */ break; case LC_FUNCTION_STARTS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "function_starts", 0); if (!parse_function_starts (bin, off)) { bprintf ("Cannot parse LC_FUNCTION_STARTS\n"); } break; case LC_REEXPORT_DYLIB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dylib", 0); /* TODO */ break; default: //bprintf ("mach0: Unknown header command %x\n", lc.cmd); break; } } return true; } static int init(struct MACH0_(obj_t)* bin) { union { ut16 word; ut8 byte[2]; } endian = { 1 }; little_ = endian.byte[0]; if (!init_hdr (bin)) { bprintf ("Warning: File is not MACH0\n"); return false; } if (!init_items (bin)) { bprintf ("Warning: Cannot initialize items\n"); } bin->baddr = MACH0_(get_baddr)(bin); return true; } void* MACH0_(mach0_free)(struct MACH0_(obj_t)* bin) { if (!bin) { return NULL; } free (bin->segs); free (bin->sects); free (bin->symtab); free (bin->symstr); free (bin->indirectsyms); free (bin->imports_by_ord); free (bin->dyld_info); free (bin->toc); free (bin->modtab); free (bin->libs); free (bin->func_start); free (bin->signature); r_buf_free (bin->b); free (bin); return NULL; } struct MACH0_(obj_t)* MACH0_(mach0_new)(const char* file, bool verbose) { ut8 *buf; struct MACH0_(obj_t) *bin; if (!(bin = malloc (sizeof (struct MACH0_(obj_t))))) { return NULL; } memset (bin, 0, sizeof (struct MACH0_(obj_t))); bin->verbose = verbose; bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &bin->size))) { return MACH0_(mach0_free)(bin); } bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return MACH0_(mach0_free)(bin); } free (buf); bin->dyld_info = NULL; if (!init (bin)) { return MACH0_(mach0_free)(bin); } bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; return bin; } struct MACH0_(obj_t)* MACH0_(new_buf)(RBuffer *buf, bool verbose) { if (!buf) { return NULL; } RBuffer * buf_copy = r_buf_new_with_buf (buf); if (!buf_copy) { return NULL; } return MACH0_(new_buf_steal) (buf_copy, verbose); } struct MACH0_(obj_t)* MACH0_(new_buf_steal)(RBuffer *buf, bool verbose) { struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t)); if (!bin) { return NULL; } bin->kv = sdb_new (NULL, "bin.mach0", 0); bin->size = r_buf_size (buf); bin->verbose = verbose; bin->b = buf; if (!init (bin)) { return MACH0_(mach0_free)(bin); } return bin; } // prot: r = 1, w = 2, x = 4 // perm: r = 4, w = 2, x = 1 static int prot2perm (int x) { int r = 0; if (x&1) r |= 4; if (x&2) r |= 2; if (x&4) r |= 1; return r; } struct section_t* MACH0_(get_sections)(struct MACH0_(obj_t)* bin) { struct section_t *sections; char segname[32], sectname[32]; int i, j, to; if (!bin) { return NULL; } /* for core files */ if (bin->nsects < 1 && bin->nsegs > 0) { struct MACH0_(segment_command) *seg; if (!(sections = calloc ((bin->nsegs + 1), sizeof (struct section_t)))) { return NULL; } for (i = 0; i < bin->nsegs; i++) { seg = &bin->segs[i]; sections[i].addr = seg->vmaddr; sections[i].offset = seg->fileoff; sections[i].size = seg->vmsize; sections[i].vsize = seg->vmsize; sections[i].align = 4096; sections[i].flags = seg->flags; r_str_ncpy (sectname, seg->segname, sizeof (sectname)); r_str_filter (sectname, -1); // hack to support multiple sections with same name sections[i].srwx = prot2perm (seg->initprot); sections[i].last = 0; } sections[i].last = 1; return sections; } if (!bin->sects) { return NULL; } to = R_MIN (bin->nsects, 128); // limit number of sections here to avoid fuzzed bins if (to < 1) { return NULL; } if (!(sections = calloc (bin->nsects + 1, sizeof (struct section_t)))) { return NULL; } for (i = 0; i < to; i++) { sections[i].offset = (ut64)bin->sects[i].offset; sections[i].addr = (ut64)bin->sects[i].addr; sections[i].size = (bin->sects[i].flags == S_ZEROFILL) ? 0 : (ut64)bin->sects[i].size; sections[i].vsize = (ut64)bin->sects[i].size; sections[i].align = bin->sects[i].align; sections[i].flags = bin->sects[i].flags; r_str_ncpy (sectname, bin->sects[i].sectname, sizeof (sectname)); r_str_filter (sectname, -1); // hack to support multiple sections with same name // snprintf (segname, sizeof (segname), "%d", i); // wtf snprintf (segname, sizeof (segname), "%d.%s", i, bin->sects[i].segname); for (j = 0; j < bin->nsegs; j++) { if (sections[i].addr >= bin->segs[j].vmaddr && sections[i].addr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) { sections[i].srwx = prot2perm (bin->segs[j].initprot); break; } } // XXX: if two sections have the same name are merged :O // XXX: append section index in flag name maybe? // XXX: do not load out of bound sections? // XXX: load segments instead of sections? what about PAGEZERO and ... snprintf (sections[i].name, sizeof (sections[i].name), "%s.%s", segname, sectname); sections[i].last = 0; } sections[i].last = 1; return sections; } static int parse_import_stub(struct MACH0_(obj_t)* bin, struct symbol_t *symbol, int idx) { int i, j, nsyms, stridx; const char *symstr; if (idx < 0) { return 0; } symbol->offset = 0LL; symbol->addr = 0LL; symbol->name[0] = '\0'; if (!bin || !bin->sects) { return false; } for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == S_SYMBOL_STUBS && bin->sects[i].reserved2 > 0) { nsyms = (int)(bin->sects[i].size / bin->sects[i].reserved2); if (nsyms > bin->size) { bprintf ("mach0: Invalid symbol table size\n"); } for (j = 0; j < nsyms; j++) { if (bin->sects) { if (bin->sects[i].reserved1 + j >= bin->nindirectsyms) { continue; } } if (bin->indirectsyms) { if (idx != bin->indirectsyms[bin->sects[i].reserved1 + j]) { continue; } } if (idx > bin->nsymtab) { continue; } symbol->type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; symbol->offset = bin->sects[i].offset + j * bin->sects[i].reserved2; symbol->addr = bin->sects[i].addr + j * bin->sects[i].reserved2; symbol->size = 0; stridx = bin->symtab[idx].n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char *)bin->symstr + stridx; } else { symstr = "???"; } // Remove the extra underscore that every import seems to have in Mach-O. if (*symstr == '_') { symstr++; } snprintf (symbol->name, R_BIN_MACH0_STRING_LENGTH, "imp.%s", symstr); return true; } } } return false; } #if 0 static ut64 get_text_base(struct MACH0_(obj_t)* bin) { ut64 ret = 0LL; struct section_t *sections; if ((sections = MACH0_(get_sections) (bin))) { int i; for (i = 0; !sections[i].last; i++) { if (strstr(sections[i].name, "text")) { ret = sections[i].offset; break; } } free (sections); } return ret; } #endif static int inSymtab(SdbHash *hash, struct symbol_t *symbols, const char *name, ut64 addr) { bool found; const char *key = sdb_fmt ("%s.%"PFMT64x, name, addr); (void)sdb_ht_find (hash, key, &found); if (found) { return true; } sdb_ht_insert (hash, key, "1"); return false; } struct symbol_t* MACH0_(get_symbols)(struct MACH0_(obj_t)* bin) { const char *symstr; struct symbol_t *symbols; int from, to, i, j, s, stridx, symbols_size, symbols_count; SdbHash *hash; //ut64 text_base = get_text_base (bin); if (!bin || !bin->symtab || !bin->symstr) { return NULL; } /* parse symbol table */ /* parse dynamic symbol table */ symbols_count = (bin->dysymtab.nextdefsym + \ bin->dysymtab.nlocalsym + \ bin->dysymtab.nundefsym ); symbols_count += bin->nsymtab; //symbols_count = bin->nsymtab; symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t); if (symbols_size < 1) { return NULL; } if (!(symbols = calloc (1, symbols_size))) { return NULL; } hash = sdb_ht_new (); j = 0; // symbol_idx for (s = 0; s < 2; s++) { switch (s) { case 0: from = bin->dysymtab.iextdefsym; to = from + bin->dysymtab.nextdefsym; break; case 1: from = bin->dysymtab.ilocalsym; to = from + bin->dysymtab.nlocalsym; break; #if NOT_USED case 2: from = bin->dysymtab.iundefsym; to = from + bin->dysymtab.nundefsym; break; #endif } if (from == to) { continue; } #define OLD 1 #if OLD from = R_MIN (R_MAX (0, from), symbols_size / sizeof (struct symbol_t)); to = R_MIN (to , symbols_size / sizeof (struct symbol_t)); to = R_MIN (to, bin->nsymtab); #else from = R_MIN (R_MAX (0, from), symbols_size/sizeof (struct symbol_t)); to = symbols_count; //symbols_size/sizeof(struct symbol_t); #endif int maxsymbols = symbols_size / sizeof (struct symbol_t); if (to > 0x500000) { bprintf ("WARNING: corrupted mach0 header: symbol table is too big %d\n", to); free (symbols); sdb_ht_free (hash); return NULL; } if (symbols_count >= maxsymbols) { symbols_count = maxsymbols - 1; } for (i = from; i < to && j < symbols_count; i++, j++) { symbols[j].offset = addr_to_offset (bin, bin->symtab[i].n_value); symbols[j].addr = bin->symtab[i].n_value; symbols[j].size = 0; /* TODO: Is it anywhere? */ if (bin->symtab[i].n_type & N_EXT) { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT; } else { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; } stridx = bin->symtab[i].n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char*)bin->symstr + stridx; } else { symstr = "???"; } { int i = 0; int len = 0; len = bin->symstrlen - stridx; if (len > 0) { for (i = 0; i < len; i++) { if ((ut8)(symstr[i] & 0xff) == 0xff || !symstr[i]) { len = i; break; } } char *symstr_dup = NULL; if (len > 0) { symstr_dup = r_str_ndup (symstr, len); } if (!symstr_dup) { symbols[j].name[0] = 0; } else { r_str_ncpy (symbols[j].name, symstr_dup, R_BIN_MACH0_STRING_LENGTH); r_str_filter (symbols[j].name, -1); symbols[j].name[R_BIN_MACH0_STRING_LENGTH - 2] = 0; } free (symstr_dup); } else { symbols[j].name[0] = 0; } symbols[j].last = 0; } if (inSymtab (hash, symbols, symbols[j].name, symbols[j].addr)) { symbols[j].name[0] = 0; j--; } } } to = R_MIN (bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym); for (i = bin->dysymtab.iundefsym; i < to; i++) { if (j > symbols_count) { bprintf ("mach0-get-symbols: error\n"); break; } if (parse_import_stub(bin, &symbols[j], i)) symbols[j++].last = 0; } #if 1 // symtab is wrongly parsed and produces dupped syms with incorrect vaddr */ for (i = 0; i < bin->nsymtab; i++) { struct MACH0_(nlist) *st = &bin->symtab[i]; #if 0 bprintf ("stridx %d -> section %d type %d value = %d\n", st->n_strx, st->n_sect, st->n_type, st->n_value); #endif stridx = st->n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char*)bin->symstr + stridx; } else { symstr = "???"; } // 0 is for imports // 1 is for symbols // 2 is for func.eh (exception handlers?) int section = st->n_sect; if (section == 1 && j < symbols_count) { // text ??st->n_type == 1) /* is symbol */ symbols[j].addr = st->n_value; // + text_base; symbols[j].offset = addr_to_offset (bin, symbols[j].addr); symbols[j].size = 0; /* find next symbol and crop */ if (st->n_type & N_EXT) { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT; } else { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; } strncpy (symbols[j].name, symstr, R_BIN_MACH0_STRING_LENGTH); symbols[j].name[R_BIN_MACH0_STRING_LENGTH - 1] = 0; symbols[j].last = 0; if (inSymtab (hash, symbols, symbols[j].name, symbols[j].addr)) { symbols[j].name[0] = 0; } else { j++; } } } #endif sdb_ht_free (hash); symbols[j].last = 1; return symbols; } static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++) if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) { sym = j; break; } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; } struct import_t* MACH0_(get_imports)(struct MACH0_(obj_t)* bin) { struct import_t *imports; int i, j, idx, stridx; const char *symstr; if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) return NULL; if (bin->dysymtab.nundefsym < 1 || bin->dysymtab.nundefsym > 0xfffff) { return NULL; } if (!(imports = malloc ((bin->dysymtab.nundefsym + 1) * sizeof (struct import_t)))) { return NULL; } for (i = j = 0; i < bin->dysymtab.nundefsym; i++) { idx = bin->dysymtab.iundefsym + i; if (idx < 0 || idx >= bin->nsymtab) { bprintf ("WARNING: Imports index out of bounds. Ignoring relocs\n"); free (imports); return NULL; } stridx = bin->symtab[idx].n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char *)bin->symstr + stridx; } else { symstr = ""; } if (!*symstr) { continue; } { int i = 0; int len = 0; char *symstr_dup = NULL; len = bin->symstrlen - stridx; imports[j].name[0] = 0; if (len > 0) { for (i = 0; i < len; i++) { if ((unsigned char)symstr[i] == 0xff || !symstr[i]) { len = i; break; } } symstr_dup = r_str_ndup (symstr, len); if (symstr_dup) { r_str_ncpy (imports[j].name, symstr_dup, R_BIN_MACH0_STRING_LENGTH); r_str_filter (imports[j].name, - 1); imports[j].name[R_BIN_MACH0_STRING_LENGTH - 2] = 0; free (symstr_dup); } } } imports[j].ord = i; imports[j++].last = 0; } imports[j].last = 1; if (!bin->imports_by_ord_size) { if (j > 0) { bin->imports_by_ord_size = j; bin->imports_by_ord = (RBinImport**)calloc (j, sizeof (RBinImport*)); } else { bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; } } return imports; } struct reloc_t* MACH0_(get_relocs)(struct MACH0_(obj_t)* bin) { struct reloc_t *relocs; int i = 0, len; ulebr ur = {NULL}; int wordsize = MACH0_(get_bits)(bin) / 8; if (bin->dyld_info) { ut8 *opcodes,*end, type = 0, rel_type = 0; int lib_ord, seg_idx = -1, sym_ord = -1; size_t j, count, skip, bind_size, lazy_size; st64 addend = 0; ut64 segmentAddress = 0LL; ut64 addr = 0LL; ut8 done = 0; #define CASE(T) case (T / 8): rel_type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return NULL; } #undef CASE bind_size = bin->dyld_info->bind_size; lazy_size = bin->dyld_info->lazy_bind_size; if (!bind_size || !lazy_size) { return NULL; } if ((bind_size + lazy_size)<1) { return NULL; } if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) { return NULL; } if (bin->dyld_info->lazy_bind_off > bin->size || \ bin->dyld_info->lazy_bind_off + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->bind_off+bind_size+lazy_size > bin->size) { return NULL; } // NOTE(eddyb) it's a waste of memory, but we don't know the actual number of relocs. if (!(relocs = calloc (1, (1 + bind_size + lazy_size) * sizeof (struct reloc_t)))) { return NULL; } opcodes = calloc (1, bind_size + lazy_size + 1); if (!opcodes) { free (relocs); return NULL; } len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size); i = r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size); if (len < 1 || i < 1) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); free (opcodes); relocs[i].last = 1; return relocs; } i = 0; // that +2 is a minimum required for uleb128, this may be wrong, // the correct fix would be to make ULEB() must use rutil's // implementation that already checks for buffer boundaries for (ur.p = opcodes, end = opcodes + bind_size + lazy_size ; (ur.p+2 < end) && !done; ) { ut8 imm = *ur.p & BIND_IMMEDIATE_MASK, op = *ur.p & BIND_OPCODE_MASK; ++ur.p; switch (op) { #define ULEB() read_uleb128 (&ur,end) #define SLEB() read_sleb128 (&ur,end) case BIND_OPCODE_DONE: done = 1; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: lib_ord = imm; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: lib_ord = ULEB(); break; case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0; break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: { char *sym_name = (char*)ur.p; //ut8 sym_flags = imm; while (*ur.p++ && ur.p<end) { /* empty loop */ } sym_ord = -1; if (bin->symtab && bin->dysymtab.nundefsym < 0xffff) for (j = 0; j < bin->dysymtab.nundefsym; j++) { int stridx = 0; int iundefsym = bin->dysymtab.iundefsym; if (iundefsym>=0 && iundefsym < bin->nsymtab) { int sidx = iundefsym +j; if (sidx<0 || sidx>= bin->nsymtab) continue; stridx = bin->symtab[sidx].n_strx; if (stridx < 0 || stridx >= bin->symstrlen) continue; } if (!strcmp ((char *)bin->symstr + stridx, sym_name)) { sym_ord = j; break; } } break; } case BIND_OPCODE_SET_TYPE_IMM: type = imm; break; case BIND_OPCODE_SET_ADDEND_SLEB: addend = SLEB(); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx < 0 || seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); addr = 0LL; return 0; // early exit to avoid future mayhem } else { addr = bin->segs[seg_idx].vmaddr + ULEB(); segmentAddress = bin->segs[seg_idx].vmaddr \ + bin->segs[seg_idx].vmsize; } break; case BIND_OPCODE_ADD_ADDR_ULEB: addr += ULEB(); break; #define DO_BIND() do {\ if (sym_ord < 0 || seg_idx < 0 ) break;\ if (i >= (bind_size + lazy_size)) break;\ relocs[i].addr = addr;\ relocs[i].offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\ if (type == BIND_TYPE_TEXT_PCREL32)\ relocs[i].addend = addend - (bin->baddr + addr);\ else relocs[i].addend = addend;\ /* library ordinal ??? */ \ relocs[i].ord = lib_ord;\ relocs[i].ord = sym_ord;\ relocs[i].type = rel_type;\ relocs[i++].last = 0;\ } while (0) case BIND_OPCODE_DO_BIND: if (addr >= segmentAddress) { bprintf ("Error: Malformed DO bind opcode\n"); goto beach; } DO_BIND(); addr += wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: if (addr >= segmentAddress) { bprintf ("Error: Malformed ADDR ULEB bind opcode\n"); goto beach; } DO_BIND(); addr += ULEB() + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: if (addr >= segmentAddress) { bprintf ("Error: Malformed IMM SCALED bind opcode\n"); goto beach; } DO_BIND(); addr += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = ULEB(); skip = ULEB(); for (j = 0; j < count; j++) { if (addr >= segmentAddress) { bprintf ("Error: Malformed ULEB TIMES bind opcode\n"); goto beach; } DO_BIND(); addr += skip + wordsize; } break; #undef DO_BIND #undef ULEB #undef SLEB default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *ur.p); free (opcodes); relocs[i].last = 1; return relocs; } } free (opcodes); } else { int j; if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) { return NULL; } if (!(relocs = malloc ((bin->dysymtab.nundefsym + 1) * sizeof (struct reloc_t)))) { return NULL; } for (j = 0; j < bin->dysymtab.nundefsym; j++) { if (parse_import_ptr (bin, &relocs[i], bin->dysymtab.iundefsym + j)) { relocs[i].ord = j; relocs[i++].last = 0; } } } beach: relocs[i].last = 1; return relocs; } struct addr_t* MACH0_(get_entrypoint)(struct MACH0_(obj_t)* bin) { struct addr_t *entry; int i; if (!bin->entry && !bin->sects) { return NULL; } if (!(entry = calloc (1, sizeof (struct addr_t)))) { return NULL; } if (bin->entry) { entry->addr = entry_to_vaddr (bin); entry->offset = addr_to_offset (bin, entry->addr); entry->haddr = sdb_num_get (bin->kv, "mach0.entry.offset", 0); sdb_num_set (bin->kv, "mach0.entry.vaddr", entry->addr, 0); sdb_num_set (bin->kv, "mach0.entry.paddr", bin->entry, 0); } if (!bin->entry || entry->offset == 0) { // XXX: section name doesnt matters at all.. just check for exec flags for (i = 0; i < bin->nsects; i++) { if (!strncmp (bin->sects[i].sectname, "__text", 6)) { entry->offset = (ut64)bin->sects[i].offset; sdb_num_set (bin->kv, "mach0.entry", entry->offset, 0); entry->addr = (ut64)bin->sects[i].addr; if (!entry->addr) { // workaround for object files entry->addr = entry->offset; } break; } } bin->entry = entry->addr; } return entry; } void MACH0_(kv_loadlibs)(struct MACH0_(obj_t)* bin) { int i; for (i = 0; i < bin->nlibs; i++) { sdb_set (bin->kv, sdb_fmt ("libs.%d.name", i), bin->libs[i], 0); } } struct lib_t* MACH0_(get_libs)(struct MACH0_(obj_t)* bin) { struct lib_t *libs; int i; if (!bin->nlibs) { return NULL; } if (!(libs = calloc ((bin->nlibs + 1), sizeof (struct lib_t)))) { return NULL; } for (i = 0; i < bin->nlibs; i++) { sdb_set (bin->kv, sdb_fmt ("libs.%d.name", i), bin->libs[i], 0); strncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH); libs[i].name[R_BIN_MACH0_STRING_LENGTH-1] = '\0'; libs[i].last = 0; } libs[i].last = 1; return libs; } ut64 MACH0_(get_baddr)(struct MACH0_(obj_t)* bin) { int i; if (bin->hdr.filetype != MH_EXECUTE && bin->hdr.filetype != MH_DYLINKER) { return 0; } for (i = 0; i < bin->nsegs; ++i) { if (bin->segs[i].fileoff == 0 && bin->segs[i].filesize != 0) { return bin->segs[i].vmaddr; } } return 0; } char* MACH0_(get_class)(struct MACH0_(obj_t)* bin) { #if R_BIN_MACH064 return r_str_new ("MACH064"); #else return r_str_new ("MACH0"); #endif } //XXX we are mixing up bits from cpu and opcodes //since thumb use 16 bits opcode but run in 32 bits //cpus so here we should only return 32 or 64 int MACH0_(get_bits)(struct MACH0_(obj_t)* bin) { if (bin) { int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); if (bin->hdr.cputype == CPU_TYPE_ARM && bin->entry & 1) { return 16; } return bits; } return 32; } int MACH0_(get_bits_from_hdr)(struct MACH0_(mach_header)* hdr) { if (hdr->magic == MH_MAGIC_64 || hdr->magic == MH_CIGAM_64) { return 64; } if ((hdr->cpusubtype & CPU_SUBTYPE_MASK) == (CPU_SUBTYPE_ARM_V7K << 24)) { return 16; } return 32; } bool MACH0_(is_big_endian)(struct MACH0_(obj_t)* bin) { if (bin) { const int cpu = bin->hdr.cputype; return cpu == CPU_TYPE_POWERPC || cpu == CPU_TYPE_POWERPC64; } return false; } const char* MACH0_(get_intrp)(struct MACH0_(obj_t)* bin) { return bin? bin->intrp: NULL; } const char* MACH0_(get_os)(struct MACH0_(obj_t)* bin) { if (bin) switch (bin->os) { case 1: return "macos"; case 2: return "ios"; case 3: return "watchos"; case 4: return "tvos"; } return "darwin"; } const char* MACH0_(get_cputype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *archstr = "unknown"; switch (hdr->cputype) { case CPU_TYPE_VAX: archstr = "vax"; break; case CPU_TYPE_MC680x0: archstr = "mc680x0"; break; case CPU_TYPE_I386: case CPU_TYPE_X86_64: archstr = "x86"; break; case CPU_TYPE_MC88000: archstr = "mc88000"; break; case CPU_TYPE_MC98000: archstr = "mc98000"; break; case CPU_TYPE_HPPA: archstr = "hppa"; break; case CPU_TYPE_ARM: case CPU_TYPE_ARM64: archstr = "arm"; break; case CPU_TYPE_SPARC: archstr = "sparc"; break; case CPU_TYPE_MIPS: archstr = "mips"; break; case CPU_TYPE_I860: archstr = "i860"; break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: archstr = "ppc"; } return archstr; } const char* MACH0_(get_cputype)(struct MACH0_(obj_t)* bin) { return bin? MACH0_(get_cputype_from_hdr) (&bin->hdr): "unknown"; } // TODO: use const char* char* MACH0_(get_cpusubtype_from_hdr)(struct MACH0_(mach_header) *hdr) { if (hdr) { switch (hdr->cputype) { case CPU_TYPE_VAX: switch (hdr->cpusubtype) { case CPU_SUBTYPE_VAX_ALL: return strdup ("all"); case CPU_SUBTYPE_VAX780: return strdup ("vax780"); case CPU_SUBTYPE_VAX785: return strdup ("vax785"); case CPU_SUBTYPE_VAX750: return strdup ("vax750"); case CPU_SUBTYPE_VAX730: return strdup ("vax730"); case CPU_SUBTYPE_UVAXI: return strdup ("uvaxI"); case CPU_SUBTYPE_UVAXII: return strdup ("uvaxII"); case CPU_SUBTYPE_VAX8200: return strdup ("vax8200"); case CPU_SUBTYPE_VAX8500: return strdup ("vax8500"); case CPU_SUBTYPE_VAX8600: return strdup ("vax8600"); case CPU_SUBTYPE_VAX8650: return strdup ("vax8650"); case CPU_SUBTYPE_VAX8800: return strdup ("vax8800"); case CPU_SUBTYPE_UVAXIII: return strdup ("uvaxIII"); default: return strdup ("Unknown vax subtype"); } case CPU_TYPE_MC680x0: switch (hdr->cpusubtype) { case CPU_SUBTYPE_MC68030: return strdup ("mc68030"); case CPU_SUBTYPE_MC68040: return strdup ("mc68040"); case CPU_SUBTYPE_MC68030_ONLY: return strdup ("mc68030 only"); default: return strdup ("Unknown mc680x0 subtype"); } case CPU_TYPE_I386: switch (hdr->cpusubtype) { case CPU_SUBTYPE_386: return strdup ("386"); case CPU_SUBTYPE_486: return strdup ("486"); case CPU_SUBTYPE_486SX: return strdup ("486sx"); case CPU_SUBTYPE_PENT: return strdup ("Pentium"); case CPU_SUBTYPE_PENTPRO: return strdup ("Pentium Pro"); case CPU_SUBTYPE_PENTII_M3: return strdup ("Pentium 3 M3"); case CPU_SUBTYPE_PENTII_M5: return strdup ("Pentium 3 M5"); case CPU_SUBTYPE_CELERON: return strdup ("Celeron"); case CPU_SUBTYPE_CELERON_MOBILE: return strdup ("Celeron Mobile"); case CPU_SUBTYPE_PENTIUM_3: return strdup ("Pentium 3"); case CPU_SUBTYPE_PENTIUM_3_M: return strdup ("Pentium 3 M"); case CPU_SUBTYPE_PENTIUM_3_XEON: return strdup ("Pentium 3 Xeon"); case CPU_SUBTYPE_PENTIUM_M: return strdup ("Pentium Mobile"); case CPU_SUBTYPE_PENTIUM_4: return strdup ("Pentium 4"); case CPU_SUBTYPE_PENTIUM_4_M: return strdup ("Pentium 4 M"); case CPU_SUBTYPE_ITANIUM: return strdup ("Itanium"); case CPU_SUBTYPE_ITANIUM_2: return strdup ("Itanium 2"); case CPU_SUBTYPE_XEON: return strdup ("Xeon"); case CPU_SUBTYPE_XEON_MP: return strdup ("Xeon MP"); default: return strdup ("Unknown i386 subtype"); } case CPU_TYPE_X86_64: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_X86_64_ALL: return strdup ("x86 64 all"); case CPU_SUBTYPE_X86_ARCH1: return strdup ("x86 arch 1"); default: return strdup ("Unknown x86 subtype"); } case CPU_TYPE_MC88000: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_MC88000_ALL: return strdup ("all"); case CPU_SUBTYPE_MC88100: return strdup ("mc88100"); case CPU_SUBTYPE_MC88110: return strdup ("mc88110"); default: return strdup ("Unknown mc88000 subtype"); } case CPU_TYPE_MC98000: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_MC98000_ALL: return strdup ("all"); case CPU_SUBTYPE_MC98601: return strdup ("mc98601"); default: return strdup ("Unknown mc98000 subtype"); } case CPU_TYPE_HPPA: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_HPPA_7100: return strdup ("hppa7100"); case CPU_SUBTYPE_HPPA_7100LC: return strdup ("hppa7100LC"); default: return strdup ("Unknown hppa subtype"); } case CPU_TYPE_ARM64: return strdup ("v8"); case CPU_TYPE_ARM: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_ARM_ALL: return strdup ("all"); case CPU_SUBTYPE_ARM_V4T: return strdup ("v4t"); case CPU_SUBTYPE_ARM_V5: return strdup ("v5"); case CPU_SUBTYPE_ARM_V6: return strdup ("v6"); case CPU_SUBTYPE_ARM_XSCALE: return strdup ("xscale"); case CPU_SUBTYPE_ARM_V7: return strdup ("v7"); case CPU_SUBTYPE_ARM_V7F: return strdup ("v7f"); case CPU_SUBTYPE_ARM_V7S: return strdup ("v7s"); case CPU_SUBTYPE_ARM_V7K: return strdup ("v7k"); case CPU_SUBTYPE_ARM_V7M: return strdup ("v7m"); case CPU_SUBTYPE_ARM_V7EM: return strdup ("v7em"); default: return r_str_newf ("unknown ARM subtype %d", hdr->cpusubtype & 0xff); } case CPU_TYPE_SPARC: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_SPARC_ALL: return strdup ("all"); default: return strdup ("Unknown sparc subtype"); } case CPU_TYPE_MIPS: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_MIPS_ALL: return strdup ("all"); case CPU_SUBTYPE_MIPS_R2300: return strdup ("r2300"); case CPU_SUBTYPE_MIPS_R2600: return strdup ("r2600"); case CPU_SUBTYPE_MIPS_R2800: return strdup ("r2800"); case CPU_SUBTYPE_MIPS_R2000a: return strdup ("r2000a"); case CPU_SUBTYPE_MIPS_R2000: return strdup ("r2000"); case CPU_SUBTYPE_MIPS_R3000a: return strdup ("r3000a"); case CPU_SUBTYPE_MIPS_R3000: return strdup ("r3000"); default: return strdup ("Unknown mips subtype"); } case CPU_TYPE_I860: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_I860_ALL: return strdup ("all"); case CPU_SUBTYPE_I860_860: return strdup ("860"); default: return strdup ("Unknown i860 subtype"); } case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_POWERPC_ALL: return strdup ("all"); case CPU_SUBTYPE_POWERPC_601: return strdup ("601"); case CPU_SUBTYPE_POWERPC_602: return strdup ("602"); case CPU_SUBTYPE_POWERPC_603: return strdup ("603"); case CPU_SUBTYPE_POWERPC_603e: return strdup ("603e"); case CPU_SUBTYPE_POWERPC_603ev: return strdup ("603ev"); case CPU_SUBTYPE_POWERPC_604: return strdup ("604"); case CPU_SUBTYPE_POWERPC_604e: return strdup ("604e"); case CPU_SUBTYPE_POWERPC_620: return strdup ("620"); case CPU_SUBTYPE_POWERPC_750: return strdup ("750"); case CPU_SUBTYPE_POWERPC_7400: return strdup ("7400"); case CPU_SUBTYPE_POWERPC_7450: return strdup ("7450"); case CPU_SUBTYPE_POWERPC_970: return strdup ("970"); default: return strdup ("Unknown ppc subtype"); } } } return strdup ("Unknown cputype"); } char* MACH0_(get_cpusubtype)(struct MACH0_(obj_t)* bin) { if (bin) { return MACH0_(get_cpusubtype_from_hdr) (&bin->hdr); } return strdup ("Unknown"); } int MACH0_(is_pie)(struct MACH0_(obj_t)* bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_PIE); } int MACH0_(has_nx)(struct MACH0_(obj_t)* bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_NO_HEAP_EXECUTION); } char* MACH0_(get_filetype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *mhtype = "Unknown"; switch (hdr->filetype) { case MH_OBJECT: mhtype = "Relocatable object"; break; case MH_EXECUTE: mhtype = "Executable file"; break; case MH_FVMLIB: mhtype = "Fixed VM shared library"; break; case MH_CORE: mhtype = "Core file"; break; case MH_PRELOAD: mhtype = "Preloaded executable file"; break; case MH_DYLIB: mhtype = "Dynamically bound shared library"; break; case MH_DYLINKER: mhtype = "Dynamic link editor"; break; case MH_BUNDLE: mhtype = "Dynamically bound bundle file"; break; case MH_DYLIB_STUB: mhtype = "Shared library stub for static linking (no sections)"; break; case MH_DSYM: mhtype = "Companion file with only debug sections"; break; } return strdup (mhtype); } char* MACH0_(get_filetype)(struct MACH0_(obj_t)* bin) { if (bin) { return MACH0_(get_filetype_from_hdr) (&bin->hdr); } return strdup ("Unknown"); } ut64 MACH0_(get_main)(struct MACH0_(obj_t)* bin) { ut64 addr = 0LL; struct symbol_t *symbols; int i; if (!(symbols = MACH0_(get_symbols) (bin))) { return 0; } for (i = 0; !symbols[i].last; i++) { const char *name = symbols[i].name; if (!strcmp (name, "__Dmain")) { addr = symbols[i].addr; break; } if (strstr (name, "4main") && !strstr (name, "STATIC")) { addr = symbols[i].addr; break; } if (!strcmp (symbols[i].name, "_main")) { addr = symbols[i].addr; // break; } } free (symbols); if (!addr && bin->main_cmd.cmd == LC_MAIN) { addr = bin->entry + bin->baddr; } if (!addr) { ut8 b[128]; ut64 entry = addr_to_offset(bin, bin->entry); // XXX: X86 only and hacky! if (entry > bin->size || entry + sizeof (b) > bin->size) { return 0; } i = r_buf_read_at (bin->b, entry, b, sizeof (b)); if (i < 1) { return 0; } for (i = 0; i < 64; i++) { if (b[i] == 0xe8 && !b[i+3] && !b[i+4]) { int delta = b[i+1] | (b[i+2] << 8) | (b[i+3] << 16) | (b[i+4] << 24); return bin->entry + i + 5 + delta; } } } return addr; } void MACH0_(mach_headerfields)(RBinFile *file) { RBuffer *buf = file->buf; int n = 0; struct MACH0_(mach_header) *mh = MACH0_(get_hdr_from_bytes)(buf); if (!mh) { return; } printf ("0x00000000 Magic 0x%x\n", mh->magic); printf ("0x00000004 CpuType 0x%x\n", mh->cputype); printf ("0x00000008 CpuSubType 0x%x\n", mh->cpusubtype); printf ("0x0000000c FileType 0x%x\n", mh->filetype); printf ("0x00000010 nCmds %d\n", mh->ncmds); printf ("0x00000014 sizeOfCmds %d\n", mh->sizeofcmds); printf ("0x00000018 Flags 0x%x\n", mh->flags); ut64 addr = 0x20 - 4; ut32 word = 0; ut8 wordbuf[sizeof (word)]; #define READWORD() \ addr += 4; \ if (!r_buf_read_at (buf, addr, (ut8*)wordbuf, 4)) { \ eprintf ("Invalid address in buffer."); \ break; \ } \ word = r_read_le32 (wordbuf); for (n = 0; n < mh->ncmds; n++) { printf ("\n# Load Command %d\n", n); READWORD(); int lcType = word; eprintf ("0x%08"PFMT64x" cmd 0x%x %s\n", addr, lcType, cmd_to_string (lcType)); READWORD(); int lcSize = word; word &= 0xFFFFFF; printf ("0x%08"PFMT64x" cmdsize %d\n", addr, word); if (lcSize < 1) { eprintf ("Invalid size for a load command\n"); break; } switch (lcType) { case LC_ID_DYLIB: // install_name_tool printf ("0x%08"PFMT64x" id %s\n", addr + 20, r_buf_get_at (buf, addr + 20, NULL)); break; case LC_UUID: printf ("0x%08"PFMT64x" uuid %s\n", addr + 20, r_buf_get_at (buf, addr + 32, NULL)); break; case LC_LOAD_DYLIB: printf ("0x%08"PFMT64x" uuid %s\n", addr + 20, r_buf_get_at (buf, addr + 20, NULL)); break; case LC_RPATH: printf ("0x%08"PFMT64x" uuid %s\n", addr + 8, r_buf_get_at (buf, addr + 8, NULL)); break; case LC_CODE_SIGNATURE: { ut32 *words = (ut32*)r_buf_get_at (buf, addr + 4, NULL); printf ("0x%08"PFMT64x" dataoff 0x%08x\n", addr + 4, words[0]); printf ("0x%08"PFMT64x" datasize %d\n", addr + 8, words[1]); printf ("# wtf mach0.sign %d @ 0x%x\n", words[1], words[0]); } break; } addr += word - 8; } free (mh); } RList* MACH0_(mach_fields)(RBinFile *bf) { struct MACH0_(mach_header) *mh = MACH0_(get_hdr_from_bytes)(bf->buf); if (!mh) { return NULL; } RList *ret = r_list_new (); if (!ret) { return NULL; } ret->free = free; ut64 addr = 0; #define ROW(nam,siz,val,fmt) \ r_list_append (ret, r_bin_field_new (addr, addr, siz, nam, sdb_fmt ("0x%08x", val), fmt)); \ addr += 4; ROW("hdr.magic", 4, mh->magic, "x"); ROW("hdr.cputype", 4, mh->cputype, NULL); ROW("hdr.cpusubtype", 4, mh->cpusubtype, NULL); ROW("hdr.filetype", 4, mh->filetype, NULL); ROW("hdr.ncmds", 4, mh->ncmds, NULL); ROW("hdr.sizeofcmds", 4, mh->sizeofcmds, NULL); free (mh); return ret; } struct MACH0_(mach_header) * MACH0_(get_hdr_from_bytes)(RBuffer *buf) { ut8 magicbytes[sizeof (ut32)] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; struct MACH0_(mach_header) *macho_hdr = R_NEW0 (struct MACH0_(mach_header)); bool big_endian = false; if (!macho_hdr) { return NULL; } if (r_buf_read_at (buf, 0, magicbytes, 4) < 1) { free (macho_hdr); return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { big_endian = true; } else { /* also extract non-mach0s */ #if 0 free (macho_hdr); return NULL; #endif } len = r_buf_read_at (buf, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (struct MACH0_(mach_header))) { free (macho_hdr); return NULL; } macho_hdr->magic = r_read_ble (&machohdrbytes[0], big_endian, 32); macho_hdr->cputype = r_read_ble (&machohdrbytes[4], big_endian, 32); macho_hdr->cpusubtype = r_read_ble (&machohdrbytes[8], big_endian, 32); macho_hdr->filetype = r_read_ble (&machohdrbytes[12], big_endian, 32); macho_hdr->ncmds = r_read_ble (&machohdrbytes[16], big_endian, 32); macho_hdr->sizeofcmds = r_read_ble (&machohdrbytes[20], big_endian, 32); macho_hdr->flags = r_read_ble (&machohdrbytes[24], big_endian, 32); #if R_BIN_MACH064 macho_hdr->reserved = r_read_ble (&machohdrbytes[28], big_endian, 32); #endif return macho_hdr; }
/* radare - LGPL - Copyright 2010-2018 - nibble, pancake */ #include <stdio.h> #include <r_types.h> #include <r_util.h> #include "mach0.h" #define bprintf if (bin->verbose) eprintf typedef struct _ulebr { ut8 *p; } ulebr; // OMG; THIS SHOULD BE KILLED; this var exposes the local native endian, which is completely unnecessary static bool little_; static ut64 read_uleb128(ulebr *r, ut8 *end) { ut64 result = 0; int bit = 0; ut64 slice = 0; ut8 *p = r->p; do { if (p == end) { eprintf ("malformed uleb128"); break; } slice = *p & 0x7f; if (bit > 63) { eprintf ("uleb128 too big for uint64, bit=%d, result=0x%"PFMT64x, bit, result); } else { result |= (slice << bit); bit += 7; } } while (*p++ & 0x80); r->p = p; return result; } static st64 read_sleb128(ulebr *r, ut8 *end) { st64 result = 0; int bit = 0; ut8 byte = 0; ut8 *p = r->p; do { if (p == end) { eprintf ("malformed sleb128"); break; } byte = *p++; result |= (((st64)(byte & 0x7f)) << bit); bit += 7; } while (byte & 0x80); // sign extend negative numbers if ((byte & 0x40)) { result |= (-1LL) << bit; } r->p = p; return result; } static ut64 entry_to_vaddr(struct MACH0_(obj_t)* bin) { switch (bin->main_cmd.cmd) { case LC_MAIN: return bin->entry + bin->baddr; case LC_UNIXTHREAD: case LC_THREAD: return bin->entry; default: return 0; } } static ut64 addr_to_offset(struct MACH0_(obj_t)* bin, ut64 addr) { ut64 segment_base, segment_size; int i; if (!bin->segs) { return 0; } for (i = 0; i < bin->nsegs; i++) { segment_base = (ut64)bin->segs[i].vmaddr; segment_size = (ut64)bin->segs[i].vmsize; if (addr >= segment_base && addr < segment_base + segment_size) { return bin->segs[i].fileoff + (addr - segment_base); } } return 0; } static int init_hdr(struct MACH0_(obj_t)* bin) { ut8 magicbytes[4] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; if (r_buf_read_at (bin->b, 0, magicbytes, 4) < 1) { return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { bin->big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { bin->big_endian = true; } else if (r_read_le32(magicbytes) == FAT_MAGIC) { bin->big_endian = false; } else if (r_read_be32(magicbytes) == FAT_MAGIC) { bin->big_endian = true; } else if (r_read_le32(magicbytes) == 0xfeedfacf) { bin->big_endian = false; } else if (r_read_be32(magicbytes) == 0xfeedfacf) { bin->big_endian = true; } else { return false; // object files are magic == 0, but body is different :? } len = r_buf_read_at (bin->b, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (machohdrbytes)) { bprintf ("Error: read (hdr)\n"); return false; } bin->hdr.magic = r_read_ble (&machohdrbytes[0], bin->big_endian, 32); bin->hdr.cputype = r_read_ble (&machohdrbytes[4], bin->big_endian, 32); bin->hdr.cpusubtype = r_read_ble (&machohdrbytes[8], bin->big_endian, 32); bin->hdr.filetype = r_read_ble (&machohdrbytes[12], bin->big_endian, 32); bin->hdr.ncmds = r_read_ble (&machohdrbytes[16], bin->big_endian, 32); bin->hdr.sizeofcmds = r_read_ble (&machohdrbytes[20], bin->big_endian, 32); bin->hdr.flags = r_read_ble (&machohdrbytes[24], bin->big_endian, 32); #if R_BIN_MACH064 bin->hdr.reserved = r_read_ble (&machohdrbytes[28], bin->big_endian, 32); #endif sdb_set (bin->kv, "mach0_header.format", "xxxxddx " "magic cputype cpusubtype filetype ncmds sizeofcmds flags", 0); sdb_num_set (bin->kv, "mach0_header.offset", 0, 0); // wat about fatmach0? sdb_set (bin->kv, "mach_filetype.cparse", "enum mach_filetype{MH_OBJECT=1," "MH_EXECUTE=2, MH_FVMLIB=3, MH_CORE=4, MH_PRELOAD=5, MH_DYLIB=6," "MH_DYLINKER=7, MH_BUNDLE=8, MH_DYLIB_STUB=9, MH_DSYM=10," "MH_KEXT_BUNDLE=11}" ,0); sdb_set (bin->kv, "mach_flags.cparse", "enum mach_flags{MH_NOUNDEFS=1," "MH_INCRLINK=2,MH_DYLDLINK=4,MH_BINDATLOAD=8,MH_PREBOUND=0x10," "MH_SPLIT_SEGS=0x20,MH_LAZY_INIT=0x40,MH_TWOLEVEL=0x80," "MH_FORCE_FLAT=0x100,MH_NOMULTIDEFS=0x200,MH_NOFIXPREBINDING=0x400," "MH_PREBINDABLE=0x800, MH_ALLMODSBOUND=0x1000," "MH_SUBSECTIONS_VIA_SYMBOLS=0x2000," "MH_CANONICAL=0x4000,MH_WEAK_DEFINES=0x8000," "MH_BINDS_TO_WEAK=0x10000,MH_ALLOW_STACK_EXECUTION=0x20000," "MH_ROOT_SAFE=0x40000,MH_SETUID_SAFE=0x80000," "MH_NO_REEXPORTED_DYLIBS=0x100000,MH_PIE=0x200000," "MH_DEAD_STRIPPABLE_DYLIB=0x400000," "MH_HAS_TLV_DESCRIPTORS=0x800000," "MH_NO_HEAP_EXECUTION=0x1000000 }",0); return true; } static int parse_segments(struct MACH0_(obj_t)* bin, ut64 off) { int i, j, k, sect, len; ut32 size_sects; ut8 segcom[sizeof (struct MACH0_(segment_command))] = {0}; ut8 sec[sizeof (struct MACH0_(section))] = {0}; if (!UT32_MUL (&size_sects, bin->nsegs, sizeof (struct MACH0_(segment_command)))) { return false; } if (!size_sects || size_sects > bin->size) { return false; } if (off > bin->size || off + sizeof (struct MACH0_(segment_command)) > bin->size) { return false; } if (!(bin->segs = realloc (bin->segs, bin->nsegs * sizeof(struct MACH0_(segment_command))))) { perror ("realloc (seg)"); return false; } j = bin->nsegs - 1; len = r_buf_read_at (bin->b, off, segcom, sizeof (struct MACH0_(segment_command))); if (len != sizeof (struct MACH0_(segment_command))) { bprintf ("Error: read (seg)\n"); return false; } i = 0; bin->segs[j].cmd = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].cmdsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); memcpy (&bin->segs[j].segname, &segcom[i], 16); i += 16; #if R_BIN_MACH064 bin->segs[j].vmaddr = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].vmsize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].fileoff = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); bin->segs[j].filesize = r_read_ble64 (&segcom[i], bin->big_endian); i += sizeof (ut64); #else bin->segs[j].vmaddr = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].vmsize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].fileoff = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].filesize = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); #endif bin->segs[j].maxprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].initprot = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].nsects = r_read_ble32 (&segcom[i], bin->big_endian); i += sizeof (ut32); bin->segs[j].flags = r_read_ble32 (&segcom[i], bin->big_endian); sdb_num_set (bin->kv, sdb_fmt ("mach0_segment_%d.offset", j), off, 0); sdb_num_set (bin->kv, "mach0_segments.count", 0, 0); sdb_set (bin->kv, "mach0_segment.format", "xd[16]zxxxxoodx " "cmd cmdsize segname vmaddr vmsize " "fileoff filesize maxprot initprot nsects flags", 0); if (bin->segs[j].nsects > 0) { sect = bin->nsects; bin->nsects += bin->segs[j].nsects; if (bin->nsects > 128) { int new_nsects = bin->nsects & 0xf; bprintf ("WARNING: mach0 header contains too many sections (%d). Wrapping to %d\n", bin->nsects, new_nsects); bin->nsects = new_nsects; } if ((int)bin->nsects < 1) { bprintf ("Warning: Invalid number of sections\n"); bin->nsects = sect; return false; } if (!UT32_MUL (&size_sects, bin->nsects-sect, sizeof (struct MACH0_(section)))){ bin->nsects = sect; return false; } if (!size_sects || size_sects > bin->size){ bin->nsects = sect; return false; } if (bin->segs[j].cmdsize != sizeof (struct MACH0_(segment_command)) \ + (sizeof (struct MACH0_(section))*bin->segs[j].nsects)){ bin->nsects = sect; return false; } if (off + sizeof (struct MACH0_(segment_command)) > bin->size ||\ off + sizeof (struct MACH0_(segment_command)) + size_sects > bin->size){ bin->nsects = sect; return false; } if (!(bin->sects = realloc (bin->sects, bin->nsects * sizeof (struct MACH0_(section))))) { perror ("realloc (sects)"); bin->nsects = sect; return false; } for (k = sect, j = 0; k < bin->nsects; k++, j++) { ut64 offset = off + sizeof (struct MACH0_(segment_command)) + j * sizeof (struct MACH0_(section)); len = r_buf_read_at (bin->b, offset, sec, sizeof (struct MACH0_(section))); if (len != sizeof (struct MACH0_(section))) { bprintf ("Error: read (sects)\n"); bin->nsects = sect; return false; } i = 0; memcpy (&bin->sects[k].sectname, &sec[i], 16); i += 16; memcpy (&bin->sects[k].segname, &sec[i], 16); bin->sects[k].segname[15] = 0; i += 16; #if R_BIN_MACH064 bin->sects[k].addr = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); bin->sects[k].size = r_read_ble64 (&sec[i], bin->big_endian); i += sizeof (ut64); #else bin->sects[k].addr = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].size = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); #endif bin->sects[k].offset = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].align = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reloff = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].nreloc = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].flags = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved1 = r_read_ble32 (&sec[i], bin->big_endian); i += sizeof (ut32); bin->sects[k].reserved2 = r_read_ble32 (&sec[i], bin->big_endian); #if R_BIN_MACH064 i += sizeof (ut32); bin->sects[k].reserved3 = r_read_ble32 (&sec[i], bin->big_endian); #endif } } return true; } static int parse_symtab(struct MACH0_(obj_t)* bin, ut64 off) { struct symtab_command st; ut32 size_sym; int i; ut8 symt[sizeof (struct symtab_command)] = {0}; ut8 nlst[sizeof (struct MACH0_(nlist))] = {0}; if (off > (ut64)bin->size || off + sizeof (struct symtab_command) > (ut64)bin->size) { return false; } int len = r_buf_read_at (bin->b, off, symt, sizeof (struct symtab_command)); if (len != sizeof (struct symtab_command)) { bprintf ("Error: read (symtab)\n"); return false; } st.cmd = r_read_ble32 (&symt[0], bin->big_endian); st.cmdsize = r_read_ble32 (&symt[4], bin->big_endian); st.symoff = r_read_ble32 (&symt[8], bin->big_endian); st.nsyms = r_read_ble32 (&symt[12], bin->big_endian); st.stroff = r_read_ble32 (&symt[16], bin->big_endian); st.strsize = r_read_ble32 (&symt[20], bin->big_endian); bin->symtab = NULL; bin->nsymtab = 0; if (st.strsize > 0 && st.strsize < bin->size && st.nsyms > 0) { bin->nsymtab = st.nsyms; if (st.stroff > bin->size || st.stroff + st.strsize > bin->size) { return false; } if (!UT32_MUL (&size_sym, bin->nsymtab, sizeof (struct MACH0_(nlist)))) { bprintf("fail2\n"); return false; } if (!size_sym) { bprintf("fail3\n"); return false; } if (st.symoff > bin->size || st.symoff + size_sym > bin->size) { bprintf("fail4\n"); return false; } if (!(bin->symstr = calloc (1, st.strsize + 2))) { perror ("calloc (symstr)"); return false; } bin->symstrlen = st.strsize; len = r_buf_read_at (bin->b, st.stroff, (ut8*)bin->symstr, st.strsize); if (len != st.strsize) { bprintf ("Error: read (symstr)\n"); R_FREE (bin->symstr); return false; } if (!(bin->symtab = calloc (bin->nsymtab, sizeof (struct MACH0_(nlist))))) { perror ("calloc (symtab)"); return false; } for (i = 0; i < bin->nsymtab; i++) { len = r_buf_read_at (bin->b, st.symoff + (i * sizeof (struct MACH0_(nlist))), nlst, sizeof (struct MACH0_(nlist))); if (len != sizeof (struct MACH0_(nlist))) { bprintf ("Error: read (nlist)\n"); R_FREE (bin->symtab); return false; } //XXX not very safe what if is n_un.n_name instead? bin->symtab[i].n_strx = r_read_ble32 (&nlst[0], bin->big_endian); bin->symtab[i].n_type = r_read_ble8 (&nlst[4]); bin->symtab[i].n_sect = r_read_ble8 (&nlst[5]); bin->symtab[i].n_desc = r_read_ble16 (&nlst[6], bin->big_endian); #if R_BIN_MACH064 bin->symtab[i].n_value = r_read_ble64 (&nlst[8], bin->big_endian); #else bin->symtab[i].n_value = r_read_ble32 (&nlst[8], bin->big_endian); #endif } } return true; } static int parse_dysymtab(struct MACH0_(obj_t)* bin, ut64 off) { int len, i; ut32 size_tab; ut8 dysym[sizeof (struct dysymtab_command)] = {0}; ut8 dytoc[sizeof (struct dylib_table_of_contents)] = {0}; ut8 dymod[sizeof (struct MACH0_(dylib_module))] = {0}; ut8 idsyms[sizeof (ut32)] = {0}; if (off > bin->size || off + sizeof (struct dysymtab_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, dysym, sizeof (struct dysymtab_command)); if (len != sizeof (struct dysymtab_command)) { bprintf ("Error: read (dysymtab)\n"); return false; } bin->dysymtab.cmd = r_read_ble32 (&dysym[0], bin->big_endian); bin->dysymtab.cmdsize = r_read_ble32 (&dysym[4], bin->big_endian); bin->dysymtab.ilocalsym = r_read_ble32 (&dysym[8], bin->big_endian); bin->dysymtab.nlocalsym = r_read_ble32 (&dysym[12], bin->big_endian); bin->dysymtab.iextdefsym = r_read_ble32 (&dysym[16], bin->big_endian); bin->dysymtab.nextdefsym = r_read_ble32 (&dysym[20], bin->big_endian); bin->dysymtab.iundefsym = r_read_ble32 (&dysym[24], bin->big_endian); bin->dysymtab.nundefsym = r_read_ble32 (&dysym[28], bin->big_endian); bin->dysymtab.tocoff = r_read_ble32 (&dysym[32], bin->big_endian); bin->dysymtab.ntoc = r_read_ble32 (&dysym[36], bin->big_endian); bin->dysymtab.modtaboff = r_read_ble32 (&dysym[40], bin->big_endian); bin->dysymtab.nmodtab = r_read_ble32 (&dysym[44], bin->big_endian); bin->dysymtab.extrefsymoff = r_read_ble32 (&dysym[48], bin->big_endian); bin->dysymtab.nextrefsyms = r_read_ble32 (&dysym[52], bin->big_endian); bin->dysymtab.indirectsymoff = r_read_ble32 (&dysym[56], bin->big_endian); bin->dysymtab.nindirectsyms = r_read_ble32 (&dysym[60], bin->big_endian); bin->dysymtab.extreloff = r_read_ble32 (&dysym[64], bin->big_endian); bin->dysymtab.nextrel = r_read_ble32 (&dysym[68], bin->big_endian); bin->dysymtab.locreloff = r_read_ble32 (&dysym[72], bin->big_endian); bin->dysymtab.nlocrel = r_read_ble32 (&dysym[76], bin->big_endian); bin->ntoc = bin->dysymtab.ntoc; if (bin->ntoc > 0) { if (!(bin->toc = calloc (bin->ntoc, sizeof (struct dylib_table_of_contents)))) { perror ("calloc (toc)"); return false; } if (!UT32_MUL (&size_tab, bin->ntoc, sizeof (struct dylib_table_of_contents))){ R_FREE (bin->toc); return false; } if (!size_tab){ R_FREE (bin->toc); return false; } if (bin->dysymtab.tocoff > bin->size || bin->dysymtab.tocoff + size_tab > bin->size){ R_FREE (bin->toc); return false; } for (i = 0; i < bin->ntoc; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.tocoff + i * sizeof (struct dylib_table_of_contents), dytoc, sizeof (struct dylib_table_of_contents)); if (len != sizeof (struct dylib_table_of_contents)) { bprintf ("Error: read (toc)\n"); R_FREE (bin->toc); return false; } bin->toc[i].symbol_index = r_read_ble32 (&dytoc[0], bin->big_endian); bin->toc[i].module_index = r_read_ble32 (&dytoc[4], bin->big_endian); } } bin->nmodtab = bin->dysymtab.nmodtab; if (bin->nmodtab > 0) { if (!(bin->modtab = calloc (bin->nmodtab, sizeof (struct MACH0_(dylib_module))))) { perror ("calloc (modtab)"); return false; } if (!UT32_MUL (&size_tab, bin->nmodtab, sizeof (struct MACH0_(dylib_module)))){ R_FREE (bin->modtab); return false; } if (!size_tab){ R_FREE (bin->modtab); return false; } if (bin->dysymtab.modtaboff > bin->size || \ bin->dysymtab.modtaboff + size_tab > bin->size){ R_FREE (bin->modtab); return false; } for (i = 0; i < bin->nmodtab; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.modtaboff + i * sizeof (struct MACH0_(dylib_module)), dymod, sizeof (struct MACH0_(dylib_module))); if (len == -1) { bprintf ("Error: read (modtab)\n"); R_FREE (bin->modtab); return false; } bin->modtab[i].module_name = r_read_ble32 (&dymod[0], bin->big_endian); bin->modtab[i].iextdefsym = r_read_ble32 (&dymod[4], bin->big_endian); bin->modtab[i].nextdefsym = r_read_ble32 (&dymod[8], bin->big_endian); bin->modtab[i].irefsym = r_read_ble32 (&dymod[12], bin->big_endian); bin->modtab[i].nrefsym = r_read_ble32 (&dymod[16], bin->big_endian); bin->modtab[i].ilocalsym = r_read_ble32 (&dymod[20], bin->big_endian); bin->modtab[i].nlocalsym = r_read_ble32 (&dymod[24], bin->big_endian); bin->modtab[i].iextrel = r_read_ble32 (&dymod[28], bin->big_endian); bin->modtab[i].nextrel = r_read_ble32 (&dymod[32], bin->big_endian); bin->modtab[i].iinit_iterm = r_read_ble32 (&dymod[36], bin->big_endian); bin->modtab[i].ninit_nterm = r_read_ble32 (&dymod[40], bin->big_endian); #if R_BIN_MACH064 bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_addr = r_read_ble64 (&dymod[48], bin->big_endian); #else bin->modtab[i].objc_module_info_addr = r_read_ble32 (&dymod[44], bin->big_endian); bin->modtab[i].objc_module_info_size = r_read_ble32 (&dymod[48], bin->big_endian); #endif } } bin->nindirectsyms = bin->dysymtab.nindirectsyms; if (bin->nindirectsyms > 0) { if (!(bin->indirectsyms = calloc (bin->nindirectsyms, sizeof (ut32)))) { perror ("calloc (indirectsyms)"); return false; } if (!UT32_MUL (&size_tab, bin->nindirectsyms, sizeof (ut32))){ R_FREE (bin->indirectsyms); return false; } if (!size_tab){ R_FREE (bin->indirectsyms); return false; } if (bin->dysymtab.indirectsymoff > bin->size || \ bin->dysymtab.indirectsymoff + size_tab > bin->size){ R_FREE (bin->indirectsyms); return false; } for (i = 0; i < bin->nindirectsyms; i++) { len = r_buf_read_at (bin->b, bin->dysymtab.indirectsymoff + i * sizeof (ut32), idsyms, 4); if (len == -1) { bprintf ("Error: read (indirect syms)\n"); R_FREE (bin->indirectsyms); return false; } bin->indirectsyms[i] = r_read_ble32 (&idsyms[0], bin->big_endian); } } /* TODO extrefsyms, extrel, locrel */ return true; } static bool parse_signature(struct MACH0_(obj_t) *bin, ut64 off) { int i,len; ut32 data; bin->signature = NULL; struct linkedit_data_command link = {0}; ut8 lit[sizeof (struct linkedit_data_command)] = {0}; struct blob_index_t idx = {0}; struct super_blob_t super = {{0}}; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { return false; } len = r_buf_read_at (bin->b, off, lit, sizeof (struct linkedit_data_command)); if (len != sizeof (struct linkedit_data_command)) { bprintf ("Failed to get data while parsing LC_CODE_SIGNATURE command\n"); return false; } link.cmd = r_read_ble32 (&lit[0], bin->big_endian); link.cmdsize = r_read_ble32 (&lit[4], bin->big_endian); link.dataoff = r_read_ble32 (&lit[8], bin->big_endian); link.datasize = r_read_ble32 (&lit[12], bin->big_endian); data = link.dataoff; if (data > bin->size || data + sizeof (struct super_blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); return true; } super.blob.magic = r_read_ble32 (bin->b->buf + data, little_); super.blob.length = r_read_ble32 (bin->b->buf + data + 4, little_); super.count = r_read_ble32 (bin->b->buf + data + 8, little_); for (i = 0; i < super.count; ++i) { if ((ut8 *)(bin->b->buf + data + i) > (ut8 *)(bin->b->buf + bin->size)) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_index_t bi; if (r_buf_read_at (bin->b, data + 12 + (i * sizeof (struct blob_index_t)), (ut8*)&bi, sizeof (struct blob_index_t)) < sizeof (struct blob_index_t)) { break; } idx.type = r_read_ble32 (&bi.type, little_); idx.offset = r_read_ble32 (&bi.offset, little_); if (idx.type == CSSLOT_ENTITLEMENTS) { ut64 off = data + idx.offset; if (off > bin->size || off + sizeof (struct blob_t) > bin->size) { bin->signature = (ut8 *)strdup ("Malformed entitlement"); break; } struct blob_t entitlements = {0}; entitlements.magic = r_read_ble32 (bin->b->buf + off, little_); entitlements.length = r_read_ble32 (bin->b->buf + off + 4, little_); len = entitlements.length - sizeof (struct blob_t); if (len <= bin->size && len > 1) { bin->signature = calloc (1, len + 1); if (bin->signature) { ut8 *src = bin->b->buf + off + sizeof (struct blob_t); if (off + sizeof (struct blob_t) + len < bin->b->length) { memcpy (bin->signature, src, len); bin->signature[len] = '\0'; return true; } bin->signature = (ut8 *)strdup ("Malformed entitlement"); return true; } } else { bin->signature = (ut8 *)strdup ("Malformed entitlement"); } } } if (!bin->signature) { bin->signature = (ut8 *)strdup ("No entitlement found"); } return true; } static int parse_thread(struct MACH0_(obj_t)* bin, struct load_command *lc, ut64 off, bool is_first_thread) { ut64 ptr_thread, pc = UT64_MAX, pc_offset = UT64_MAX; ut32 flavor, count; ut8 *arw_ptr = NULL; int arw_sz, len = 0; ut8 thc[sizeof (struct thread_command)] = {0}; ut8 tmp[4]; if (off > bin->size || off + sizeof (struct thread_command) > bin->size) return false; len = r_buf_read_at (bin->b, off, thc, 8); if (len < 1) { goto wrong_read; } bin->thread.cmd = r_read_ble32 (&thc[0], bin->big_endian); bin->thread.cmdsize = r_read_ble32 (&thc[4], bin->big_endian); if (r_buf_read_at (bin->b, off + sizeof (struct thread_command), tmp, 4) < 4) { goto wrong_read; } flavor = r_read_ble32 (tmp, bin->big_endian); if (len == -1) goto wrong_read; if (off + sizeof (struct thread_command) + sizeof (flavor) > bin->size || \ off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (ut32) > bin->size) return false; // TODO: use count for checks if (r_buf_read_at (bin->b, off + sizeof (struct thread_command) + sizeof (flavor), tmp, 4) < 4) { goto wrong_read; } count = r_read_ble32 (tmp, bin->big_endian); ptr_thread = off + sizeof (struct thread_command) + sizeof (flavor) + sizeof (count); if (ptr_thread > bin->size) return false; switch (bin->hdr.cputype) { case CPU_TYPE_I386: case CPU_TYPE_X86_64: switch (flavor) { case X86_THREAD_STATE32: if (ptr_thread + sizeof (struct x86_thread_state32) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_32, "16i", 1)) == -1) { bprintf ("Error: read (thread state x86_32)\n"); return false; } pc = bin->thread_state.x86_32.eip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state32, eip); arw_ptr = (ut8 *)&bin->thread_state.x86_32; arw_sz = sizeof (struct x86_thread_state32); break; case X86_THREAD_STATE64: if (ptr_thread + sizeof (struct x86_thread_state64) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.x86_64, "32l", 1)) == -1) { bprintf ("Error: read (thread state x86_64)\n"); return false; } pc = bin->thread_state.x86_64.rip; pc_offset = ptr_thread + r_offsetof(struct x86_thread_state64, rip); arw_ptr = (ut8 *)&bin->thread_state.x86_64; arw_sz = sizeof (struct x86_thread_state64); break; //default: bprintf ("Unknown type\n"); } break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: if (flavor == X86_THREAD_STATE32) { if (ptr_thread + sizeof (struct ppc_thread_state32) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_32, bin->big_endian?"40I":"40i", 1)) == -1) { bprintf ("Error: read (thread state ppc_32)\n"); return false; } pc = bin->thread_state.ppc_32.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state32, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_32; arw_sz = sizeof (struct ppc_thread_state32); } else if (flavor == X86_THREAD_STATE64) { if (ptr_thread + sizeof (struct ppc_thread_state64) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.ppc_64, bin->big_endian?"34LI3LI":"34li3li", 1)) == -1) { bprintf ("Error: read (thread state ppc_64)\n"); return false; } pc = bin->thread_state.ppc_64.srr0; pc_offset = ptr_thread + r_offsetof(struct ppc_thread_state64, srr0); arw_ptr = (ut8 *)&bin->thread_state.ppc_64; arw_sz = sizeof (struct ppc_thread_state64); } break; case CPU_TYPE_ARM: if (ptr_thread + sizeof (struct arm_thread_state32) > bin->size) return false; if ((len = r_buf_fread_at (bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_32, bin->big_endian?"17I":"17i", 1)) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = bin->thread_state.arm_32.r15; pc_offset = ptr_thread + r_offsetof (struct arm_thread_state32, r15); arw_ptr = (ut8 *)&bin->thread_state.arm_32; arw_sz = sizeof (struct arm_thread_state32); break; case CPU_TYPE_ARM64: if (ptr_thread + sizeof (struct arm_thread_state64) > bin->size) { return false; } if ((len = r_buf_fread_at(bin->b, ptr_thread, (ut8*)&bin->thread_state.arm_64, bin->big_endian?"34LI1I":"34Li1i", 1)) == -1) { bprintf ("Error: read (thread state arm)\n"); return false; } pc = r_read_be64 (&bin->thread_state.arm_64.pc); pc_offset = ptr_thread + r_offsetof (struct arm_thread_state64, pc); arw_ptr = (ut8*)&bin->thread_state.arm_64; arw_sz = sizeof (struct arm_thread_state64); break; default: bprintf ("Error: read (unknown thread state structure)\n"); return false; } // TODO: this shouldnt be an bprintf... if (arw_ptr && arw_sz > 0) { int i; ut8 *p = arw_ptr; bprintf ("arw "); for (i = 0; i < arw_sz; i++) { bprintf ("%02x", 0xff & p[i]); } bprintf ("\n"); } if (is_first_thread) { bin->main_cmd = *lc; if (pc != UT64_MAX) { bin->entry = pc; } if (pc_offset != UT64_MAX) { sdb_num_set (bin->kv, "mach0.entry.offset", pc_offset, 0); } } return true; wrong_read: bprintf ("Error: read (thread)\n"); return false; } static int parse_function_starts (struct MACH0_(obj_t)* bin, ut64 off) { struct linkedit_data_command fc; ut8 sfc[sizeof (struct linkedit_data_command)] = {0}; ut8 *buf; int len; if (off > bin->size || off + sizeof (struct linkedit_data_command) > bin->size) { bprintf ("Likely overflow while parsing" " LC_FUNCTION_STARTS command\n"); } bin->func_start = NULL; len = r_buf_read_at (bin->b, off, sfc, sizeof (struct linkedit_data_command)); if (len < 1) { bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS command\n"); } fc.cmd = r_read_ble32 (&sfc[0], bin->big_endian); fc.cmdsize = r_read_ble32 (&sfc[4], bin->big_endian); fc.dataoff = r_read_ble32 (&sfc[8], bin->big_endian); fc.datasize = r_read_ble32 (&sfc[12], bin->big_endian); buf = calloc (1, fc.datasize + 1); if (!buf) { bprintf ("Failed to allocate buffer\n"); return false; } bin->func_size = fc.datasize; if (fc.dataoff > bin->size || fc.dataoff + fc.datasize > bin->size) { free (buf); bprintf ("Likely overflow while parsing " "LC_FUNCTION_STARTS command\n"); return false; } len = r_buf_read_at (bin->b, fc.dataoff, buf, fc.datasize); if (len != fc.datasize) { free (buf); bprintf ("Failed to get data while parsing" " LC_FUNCTION_STARTS\n"); return false; } buf[fc.datasize] = 0; // null-terminated buffer bin->func_start = buf; return true; } static int parse_dylib(struct MACH0_(obj_t)* bin, ut64 off) { struct dylib_command dl; int lib, len; ut8 sdl[sizeof (struct dylib_command)] = {0}; if (off > bin->size || off + sizeof (struct dylib_command) > bin->size) return false; lib = bin->nlibs - 1; if (!(bin->libs = realloc (bin->libs, bin->nlibs * R_BIN_MACH0_STRING_LENGTH))) { perror ("realloc (libs)"); return false; } len = r_buf_read_at (bin->b, off, sdl, sizeof (struct dylib_command)); if (len < 1) { bprintf ("Error: read (dylib)\n"); return false; } dl.cmd = r_read_ble32 (&sdl[0], bin->big_endian); dl.cmdsize = r_read_ble32 (&sdl[4], bin->big_endian); dl.dylib.name = r_read_ble32 (&sdl[8], bin->big_endian); dl.dylib.timestamp = r_read_ble32 (&sdl[12], bin->big_endian); dl.dylib.current_version = r_read_ble32 (&sdl[16], bin->big_endian); dl.dylib.compatibility_version = r_read_ble32 (&sdl[20], bin->big_endian); if (off + dl.dylib.name > bin->size ||\ off + dl.dylib.name + R_BIN_MACH0_STRING_LENGTH > bin->size) return false; len = r_buf_read_at (bin->b, off+dl.dylib.name, (ut8*)bin->libs[lib], R_BIN_MACH0_STRING_LENGTH); if (len < 1) { bprintf ("Error: read (dylib str)"); return false; } return true; } static const char *cmd_to_string(ut32 cmd) { switch (cmd) { case LC_DATA_IN_CODE: return "LC_DATA_IN_CODE"; case LC_CODE_SIGNATURE: return "LC_CODE_SIGNATURE"; case LC_RPATH: return "LC_RPATH"; case LC_SEGMENT: return "LC_SEGMENT"; case LC_SEGMENT_64: return "LC_SEGMENT_64"; case LC_SYMTAB: return "LC_SYMTAB"; case LC_SYMSEG: return "LC_SYMSEG"; case LC_ID_DYLIB: return "LC_ID_DYLIB"; case LC_DYSYMTAB: return "LC_DYSYMTAB"; case LC_FUNCTION_STARTS: return "LC_FUNCTION_STARTS"; case LC_DYLIB_CODE_SIGN_DRS: return "LC_DYLIB_CODE_SIGN_DRS"; case LC_VERSION_MIN_MACOSX: return "LC_VERSION_MIN_MACOSX"; case LC_VERSION_MIN_IPHONEOS: return "LC_VERSION_MIN_IPHONEOS"; case LC_VERSION_MIN_TVOS: return "LC_VERSION_MIN_TVOS"; case LC_VERSION_MIN_WATCHOS: return "LC_VERSION_MIN_WATCHOS"; case LC_DYLD_INFO: return "LC_DYLD_INFO"; case LC_SOURCE_VERSION: return "LC_SOURCE_VERSION"; case LC_MAIN: return "LC_MAIN"; case LC_UUID: return "LC_UUID"; case LC_ENCRYPTION_INFO_64: return "LC_ENCRYPTION_INFO_64"; case LC_ENCRYPTION_INFO: return "LC_ENCRYPTION_INFO"; case LC_LOAD_DYLINKER: return "LC_LOAD_DYLINKER"; case LC_LOAD_DYLIB: return "LC_LOAD_DYLIB"; case LC_THREAD: return "LC_THREAD"; case LC_UNIXTHREAD: return "LC_UNIXTHREAD"; case LC_IDENT: return "LC_IDENT"; case LC_DYLD_INFO_ONLY: return "LC_DYLD_INFO_ONLY"; } return ""; } static int init_items(struct MACH0_(obj_t)* bin) { struct load_command lc = {0, 0}; ut8 loadc[sizeof (struct load_command)] = {0}; bool is_first_thread = true; ut64 off = 0LL; int i, len; bin->uuidn = 0; bin->os = 0; bin->has_crypto = 0; if (bin->hdr.sizeofcmds > bin->size) { bprintf ("Warning: chopping hdr.sizeofcmds\n"); bin->hdr.sizeofcmds = bin->size - 128; //return false; } //bprintf ("Commands: %d\n", bin->hdr.ncmds); for (i = 0, off = sizeof (struct MACH0_(mach_header)); \ i < bin->hdr.ncmds; i++, off += lc.cmdsize) { if (off > bin->size || off + sizeof (struct load_command) > bin->size){ bprintf ("mach0: out of bounds command\n"); return false; } len = r_buf_read_at (bin->b, off, loadc, sizeof (struct load_command)); if (len < 1) { bprintf ("Error: read (lc) at 0x%08"PFMT64x"\n", off); return false; } lc.cmd = r_read_ble32 (&loadc[0], bin->big_endian); lc.cmdsize = r_read_ble32 (&loadc[4], bin->big_endian); if (lc.cmdsize < 1 || off + lc.cmdsize > bin->size) { bprintf ("Warning: mach0_header %d = cmdsize<1. (0x%llx vs 0x%llx)\n", i, (ut64)(off + lc.cmdsize), (ut64)(bin->size)); break; } // TODO: a different format for each cmd sdb_num_set (bin->kv, sdb_fmt ("mach0_cmd_%d.offset", i), off, 0); sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.format", i), "xd cmd size", 0); //bprintf ("%d\n", lc.cmd); switch (lc.cmd) { case LC_DATA_IN_CODE: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "data_in_code", 0); // TODO table of non-instructions in __text break; case LC_RPATH: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "rpath", 0); //bprintf ("--->\n"); break; case LC_SEGMENT_64: case LC_SEGMENT: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "segment", 0); bin->nsegs++; if (!parse_segments (bin, off)) { bprintf ("error parsing segment\n"); bin->nsegs--; return false; } break; case LC_SYMTAB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "symtab", 0); if (!parse_symtab (bin, off)) { bprintf ("error parsing symtab\n"); return false; } break; case LC_DYSYMTAB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dysymtab", 0); if (!parse_dysymtab(bin, off)) { bprintf ("error parsing dysymtab\n"); return false; } break; case LC_DYLIB_CODE_SIGN_DRS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dylib_code_sign_drs", 0); //bprintf ("[mach0] code is signed\n"); break; case LC_VERSION_MIN_MACOSX: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_macosx", 0); bin->os = 1; // set OS = osx //bprintf ("[mach0] Requires OSX >= x\n"); break; case LC_VERSION_MIN_IPHONEOS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_iphoneos", 0); bin->os = 2; // set OS = ios //bprintf ("[mach0] Requires iOS >= x\n"); break; case LC_VERSION_MIN_TVOS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_tvos", 0); bin->os = 4; break; case LC_VERSION_MIN_WATCHOS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version_min_watchos", 0); bin->os = 3; break; case LC_UUID: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "uuid", 0); { struct uuid_command uc = {0}; if (off + sizeof (struct uuid_command) > bin->size) { bprintf ("UUID out of obunds\n"); return false; } if (r_buf_fread_at (bin->b, off, (ut8*)&uc, "24c", 1) != -1) { char key[128]; char val[128]; snprintf (key, sizeof (key)-1, "uuid.%d", bin->uuidn++); r_hex_bin2str ((ut8*)&uc.uuid, 16, val); sdb_set (bin->kv, key, val, 0); //for (i=0;i<16; i++) bprintf ("%02x%c", uc.uuid[i], (i==15)?'\n':'-'); } } break; case LC_ENCRYPTION_INFO_64: /* TODO: the struct is probably different here */ case LC_ENCRYPTION_INFO: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "encryption_info", 0); { struct MACH0_(encryption_info_command) eic = {0}; ut8 seic[sizeof (struct MACH0_(encryption_info_command))] = {0}; if (off + sizeof (struct MACH0_(encryption_info_command)) > bin->size) { bprintf ("encryption info out of bounds\n"); return false; } if (r_buf_read_at (bin->b, off, seic, sizeof (struct MACH0_(encryption_info_command))) != -1) { eic.cmd = r_read_ble32 (&seic[0], bin->big_endian); eic.cmdsize = r_read_ble32 (&seic[4], bin->big_endian); eic.cryptoff = r_read_ble32 (&seic[8], bin->big_endian); eic.cryptsize = r_read_ble32 (&seic[12], bin->big_endian); eic.cryptid = r_read_ble32 (&seic[16], bin->big_endian); bin->has_crypto = eic.cryptid; sdb_set (bin->kv, "crypto", "true", 0); sdb_num_set (bin->kv, "cryptid", eic.cryptid, 0); sdb_num_set (bin->kv, "cryptoff", eic.cryptoff, 0); sdb_num_set (bin->kv, "cryptsize", eic.cryptsize, 0); sdb_num_set (bin->kv, "cryptheader", off, 0); } } break; case LC_LOAD_DYLINKER: { sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dylinker", 0); free (bin->intrp); bin->intrp = NULL; //bprintf ("[mach0] load dynamic linker\n"); struct dylinker_command dy = {0}; ut8 sdy[sizeof (struct dylinker_command)] = {0}; if (off + sizeof (struct dylinker_command) > bin->size){ bprintf ("Warning: Cannot parse dylinker command\n"); return false; } if (r_buf_read_at (bin->b, off, sdy, sizeof (struct dylinker_command)) == -1) { bprintf ("Warning: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { dy.cmd = r_read_ble32 (&sdy[0], bin->big_endian); dy.cmdsize = r_read_ble32 (&sdy[4], bin->big_endian); dy.name = r_read_ble32 (&sdy[8], bin->big_endian); int len = dy.cmdsize; char *buf = malloc (len+1); if (buf) { // wtf @ off + 0xc ? r_buf_read_at (bin->b, off + 0xc, (ut8*)buf, len); buf[len] = 0; free (bin->intrp); bin->intrp = buf; } } } break; case LC_MAIN: { struct { ut64 eo; ut64 ss; } ep = {0}; ut8 sep[2 * sizeof (ut64)] = {0}; sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "main", 0); if (!is_first_thread) { bprintf("Error: LC_MAIN with other threads\n"); return false; } if (off + 8 > bin->size || off + sizeof (ep) > bin->size) { bprintf ("invalid command size for main\n"); return false; } r_buf_read_at (bin->b, off + 8, sep, 2 * sizeof (ut64)); ep.eo = r_read_ble64 (&sep[0], bin->big_endian); ep.ss = r_read_ble64 (&sep[8], bin->big_endian); bin->entry = ep.eo; bin->main_cmd = lc; sdb_num_set (bin->kv, "mach0.entry.offset", off + 8, 0); sdb_num_set (bin->kv, "stacksize", ep.ss, 0); is_first_thread = false; } break; case LC_UNIXTHREAD: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "unixthread", 0); if (!is_first_thread) { bprintf("Error: LC_UNIXTHREAD with other threads\n"); return false; } case LC_THREAD: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "thread", 0); if (!parse_thread (bin, &lc, off, is_first_thread)) { bprintf ("Cannot parse thread\n"); return false; } is_first_thread = false; break; case LC_LOAD_DYLIB: case LC_LOAD_WEAK_DYLIB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "load_dylib", 0); bin->nlibs++; if (!parse_dylib (bin, off)){ bprintf ("Cannot parse dylib\n"); bin->nlibs--; return false; } break; case LC_DYLD_INFO: case LC_DYLD_INFO_ONLY: { ut8 dyldi[sizeof (struct dyld_info_command)] = {0}; sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dyld_info", 0); bin->dyld_info = calloc (1, sizeof (struct dyld_info_command)); if (bin->dyld_info) { if (off + sizeof (struct dyld_info_command) > bin->size){ bprintf ("Cannot parse dyldinfo\n"); R_FREE (bin->dyld_info); return false; } if (r_buf_read_at (bin->b, off, dyldi, sizeof (struct dyld_info_command)) == -1) { free (bin->dyld_info); bin->dyld_info = NULL; bprintf ("Error: read (LC_DYLD_INFO) at 0x%08"PFMT64x"\n", off); } else { bin->dyld_info->cmd = r_read_ble32 (&dyldi[0], bin->big_endian); bin->dyld_info->cmdsize = r_read_ble32 (&dyldi[4], bin->big_endian); bin->dyld_info->rebase_off = r_read_ble32 (&dyldi[8], bin->big_endian); bin->dyld_info->rebase_size = r_read_ble32 (&dyldi[12], bin->big_endian); bin->dyld_info->bind_off = r_read_ble32 (&dyldi[16], bin->big_endian); bin->dyld_info->bind_size = r_read_ble32 (&dyldi[20], bin->big_endian); bin->dyld_info->weak_bind_off = r_read_ble32 (&dyldi[24], bin->big_endian); bin->dyld_info->weak_bind_size = r_read_ble32 (&dyldi[28], bin->big_endian); bin->dyld_info->lazy_bind_off = r_read_ble32 (&dyldi[32], bin->big_endian); bin->dyld_info->lazy_bind_size = r_read_ble32 (&dyldi[36], bin->big_endian); bin->dyld_info->export_off = r_read_ble32 (&dyldi[40], bin->big_endian); bin->dyld_info->export_size = r_read_ble32 (&dyldi[44], bin->big_endian); } } } break; case LC_CODE_SIGNATURE: parse_signature (bin, off); sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "signature", 0); /* ut32 dataoff // ut32 datasize */ break; case LC_SOURCE_VERSION: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "version", 0); /* uint64_t version; */ /* A.B.C.D.E packed as a24.b10.c10.d10.e10 */ //bprintf ("mach0: TODO: Show source version\n"); break; case LC_SEGMENT_SPLIT_INFO: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "split_info", 0); /* TODO */ break; case LC_FUNCTION_STARTS: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "function_starts", 0); if (!parse_function_starts (bin, off)) { bprintf ("Cannot parse LC_FUNCTION_STARTS\n"); } break; case LC_REEXPORT_DYLIB: sdb_set (bin->kv, sdb_fmt ("mach0_cmd_%d.cmd", i), "dylib", 0); /* TODO */ break; default: //bprintf ("mach0: Unknown header command %x\n", lc.cmd); break; } } return true; } static int init(struct MACH0_(obj_t)* bin) { union { ut16 word; ut8 byte[2]; } endian = { 1 }; little_ = endian.byte[0]; if (!init_hdr (bin)) { bprintf ("Warning: File is not MACH0\n"); return false; } if (!init_items (bin)) { bprintf ("Warning: Cannot initialize items\n"); } bin->baddr = MACH0_(get_baddr)(bin); return true; } void* MACH0_(mach0_free)(struct MACH0_(obj_t)* bin) { if (!bin) { return NULL; } free (bin->segs); free (bin->sects); free (bin->symtab); free (bin->symstr); free (bin->indirectsyms); free (bin->imports_by_ord); free (bin->dyld_info); free (bin->toc); free (bin->modtab); free (bin->libs); free (bin->func_start); free (bin->signature); r_buf_free (bin->b); free (bin); return NULL; } struct MACH0_(obj_t)* MACH0_(mach0_new)(const char* file, bool verbose) { ut8 *buf; struct MACH0_(obj_t) *bin; if (!(bin = malloc (sizeof (struct MACH0_(obj_t))))) { return NULL; } memset (bin, 0, sizeof (struct MACH0_(obj_t))); bin->verbose = verbose; bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &bin->size))) { return MACH0_(mach0_free)(bin); } bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return MACH0_(mach0_free)(bin); } free (buf); bin->dyld_info = NULL; if (!init (bin)) { return MACH0_(mach0_free)(bin); } bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; return bin; } struct MACH0_(obj_t)* MACH0_(new_buf)(RBuffer *buf, bool verbose) { if (!buf) { return NULL; } RBuffer * buf_copy = r_buf_new_with_buf (buf); if (!buf_copy) { return NULL; } return MACH0_(new_buf_steal) (buf_copy, verbose); } struct MACH0_(obj_t)* MACH0_(new_buf_steal)(RBuffer *buf, bool verbose) { struct MACH0_(obj_t) *bin = R_NEW0 (struct MACH0_(obj_t)); if (!bin) { return NULL; } bin->kv = sdb_new (NULL, "bin.mach0", 0); bin->size = r_buf_size (buf); bin->verbose = verbose; bin->b = buf; if (!init (bin)) { return MACH0_(mach0_free)(bin); } return bin; } // prot: r = 1, w = 2, x = 4 // perm: r = 4, w = 2, x = 1 static int prot2perm (int x) { int r = 0; if (x&1) r |= 4; if (x&2) r |= 2; if (x&4) r |= 1; return r; } struct section_t* MACH0_(get_sections)(struct MACH0_(obj_t)* bin) { struct section_t *sections; char segname[32], sectname[32]; int i, j, to; if (!bin) { return NULL; } /* for core files */ if (bin->nsects < 1 && bin->nsegs > 0) { struct MACH0_(segment_command) *seg; if (!(sections = calloc ((bin->nsegs + 1), sizeof (struct section_t)))) { return NULL; } for (i = 0; i < bin->nsegs; i++) { seg = &bin->segs[i]; sections[i].addr = seg->vmaddr; sections[i].offset = seg->fileoff; sections[i].size = seg->vmsize; sections[i].vsize = seg->vmsize; sections[i].align = 4096; sections[i].flags = seg->flags; r_str_ncpy (sectname, seg->segname, sizeof (sectname)); r_str_filter (sectname, -1); // hack to support multiple sections with same name sections[i].srwx = prot2perm (seg->initprot); sections[i].last = 0; } sections[i].last = 1; return sections; } if (!bin->sects) { return NULL; } to = R_MIN (bin->nsects, 128); // limit number of sections here to avoid fuzzed bins if (to < 1) { return NULL; } if (!(sections = calloc (bin->nsects + 1, sizeof (struct section_t)))) { return NULL; } for (i = 0; i < to; i++) { sections[i].offset = (ut64)bin->sects[i].offset; sections[i].addr = (ut64)bin->sects[i].addr; sections[i].size = (bin->sects[i].flags == S_ZEROFILL) ? 0 : (ut64)bin->sects[i].size; sections[i].vsize = (ut64)bin->sects[i].size; sections[i].align = bin->sects[i].align; sections[i].flags = bin->sects[i].flags; r_str_ncpy (sectname, bin->sects[i].sectname, sizeof (sectname)); r_str_filter (sectname, -1); // hack to support multiple sections with same name // snprintf (segname, sizeof (segname), "%d", i); // wtf snprintf (segname, sizeof (segname), "%d.%s", i, bin->sects[i].segname); for (j = 0; j < bin->nsegs; j++) { if (sections[i].addr >= bin->segs[j].vmaddr && sections[i].addr < (bin->segs[j].vmaddr + bin->segs[j].vmsize)) { sections[i].srwx = prot2perm (bin->segs[j].initprot); break; } } // XXX: if two sections have the same name are merged :O // XXX: append section index in flag name maybe? // XXX: do not load out of bound sections? // XXX: load segments instead of sections? what about PAGEZERO and ... snprintf (sections[i].name, sizeof (sections[i].name), "%s.%s", segname, sectname); sections[i].last = 0; } sections[i].last = 1; return sections; } static int parse_import_stub(struct MACH0_(obj_t)* bin, struct symbol_t *symbol, int idx) { int i, j, nsyms, stridx; const char *symstr; if (idx < 0) { return 0; } symbol->offset = 0LL; symbol->addr = 0LL; symbol->name[0] = '\0'; if (!bin || !bin->sects) { return false; } for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == S_SYMBOL_STUBS && bin->sects[i].reserved2 > 0) { nsyms = (int)(bin->sects[i].size / bin->sects[i].reserved2); if (nsyms > bin->size) { bprintf ("mach0: Invalid symbol table size\n"); } for (j = 0; j < nsyms; j++) { if (bin->sects) { if (bin->sects[i].reserved1 + j >= bin->nindirectsyms) { continue; } } if (bin->indirectsyms) { if (idx != bin->indirectsyms[bin->sects[i].reserved1 + j]) { continue; } } if (idx > bin->nsymtab) { continue; } symbol->type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; symbol->offset = bin->sects[i].offset + j * bin->sects[i].reserved2; symbol->addr = bin->sects[i].addr + j * bin->sects[i].reserved2; symbol->size = 0; stridx = bin->symtab[idx].n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char *)bin->symstr + stridx; } else { symstr = "???"; } // Remove the extra underscore that every import seems to have in Mach-O. if (*symstr == '_') { symstr++; } snprintf (symbol->name, R_BIN_MACH0_STRING_LENGTH, "imp.%s", symstr); return true; } } } return false; } #if 0 static ut64 get_text_base(struct MACH0_(obj_t)* bin) { ut64 ret = 0LL; struct section_t *sections; if ((sections = MACH0_(get_sections) (bin))) { int i; for (i = 0; !sections[i].last; i++) { if (strstr(sections[i].name, "text")) { ret = sections[i].offset; break; } } free (sections); } return ret; } #endif static int inSymtab(SdbHash *hash, struct symbol_t *symbols, const char *name, ut64 addr) { bool found; const char *key = sdb_fmt ("%s.%"PFMT64x, name, addr); (void)sdb_ht_find (hash, key, &found); if (found) { return true; } sdb_ht_insert (hash, key, "1"); return false; } struct symbol_t* MACH0_(get_symbols)(struct MACH0_(obj_t)* bin) { const char *symstr; struct symbol_t *symbols; int from, to, i, j, s, stridx, symbols_size, symbols_count; SdbHash *hash; //ut64 text_base = get_text_base (bin); if (!bin || !bin->symtab || !bin->symstr) { return NULL; } /* parse symbol table */ /* parse dynamic symbol table */ symbols_count = (bin->dysymtab.nextdefsym + \ bin->dysymtab.nlocalsym + \ bin->dysymtab.nundefsym ); symbols_count += bin->nsymtab; //symbols_count = bin->nsymtab; symbols_size = (symbols_count + 1) * 2 * sizeof (struct symbol_t); if (symbols_size < 1) { return NULL; } if (!(symbols = calloc (1, symbols_size))) { return NULL; } hash = sdb_ht_new (); j = 0; // symbol_idx for (s = 0; s < 2; s++) { switch (s) { case 0: from = bin->dysymtab.iextdefsym; to = from + bin->dysymtab.nextdefsym; break; case 1: from = bin->dysymtab.ilocalsym; to = from + bin->dysymtab.nlocalsym; break; #if NOT_USED case 2: from = bin->dysymtab.iundefsym; to = from + bin->dysymtab.nundefsym; break; #endif } if (from == to) { continue; } #define OLD 1 #if OLD from = R_MIN (R_MAX (0, from), symbols_size / sizeof (struct symbol_t)); to = R_MIN (to , symbols_size / sizeof (struct symbol_t)); to = R_MIN (to, bin->nsymtab); #else from = R_MIN (R_MAX (0, from), symbols_size/sizeof (struct symbol_t)); to = symbols_count; //symbols_size/sizeof(struct symbol_t); #endif int maxsymbols = symbols_size / sizeof (struct symbol_t); if (to > 0x500000) { bprintf ("WARNING: corrupted mach0 header: symbol table is too big %d\n", to); free (symbols); sdb_ht_free (hash); return NULL; } if (symbols_count >= maxsymbols) { symbols_count = maxsymbols - 1; } for (i = from; i < to && j < symbols_count; i++, j++) { symbols[j].offset = addr_to_offset (bin, bin->symtab[i].n_value); symbols[j].addr = bin->symtab[i].n_value; symbols[j].size = 0; /* TODO: Is it anywhere? */ if (bin->symtab[i].n_type & N_EXT) { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT; } else { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; } stridx = bin->symtab[i].n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char*)bin->symstr + stridx; } else { symstr = "???"; } { int i = 0; int len = 0; len = bin->symstrlen - stridx; if (len > 0) { for (i = 0; i < len; i++) { if ((ut8)(symstr[i] & 0xff) == 0xff || !symstr[i]) { len = i; break; } } char *symstr_dup = NULL; if (len > 0) { symstr_dup = r_str_ndup (symstr, len); } if (!symstr_dup) { symbols[j].name[0] = 0; } else { r_str_ncpy (symbols[j].name, symstr_dup, R_BIN_MACH0_STRING_LENGTH); r_str_filter (symbols[j].name, -1); symbols[j].name[R_BIN_MACH0_STRING_LENGTH - 2] = 0; } free (symstr_dup); } else { symbols[j].name[0] = 0; } symbols[j].last = 0; } if (inSymtab (hash, symbols, symbols[j].name, symbols[j].addr)) { symbols[j].name[0] = 0; j--; } } } to = R_MIN (bin->nsymtab, bin->dysymtab.iundefsym + bin->dysymtab.nundefsym); for (i = bin->dysymtab.iundefsym; i < to; i++) { if (j > symbols_count) { bprintf ("mach0-get-symbols: error\n"); break; } if (parse_import_stub(bin, &symbols[j], i)) { symbols[j++].last = 0; } } #if 1 // symtab is wrongly parsed and produces dupped syms with incorrect vaddr */ for (i = 0; i < bin->nsymtab; i++) { struct MACH0_(nlist) *st = &bin->symtab[i]; #if 0 bprintf ("stridx %d -> section %d type %d value = %d\n", st->n_strx, st->n_sect, st->n_type, st->n_value); #endif stridx = st->n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char*)bin->symstr + stridx; } else { symstr = "???"; } // 0 is for imports // 1 is for symbols // 2 is for func.eh (exception handlers?) int section = st->n_sect; if (section == 1 && j < symbols_count) { // text ??st->n_type == 1) /* is symbol */ symbols[j].addr = st->n_value; // + text_base; symbols[j].offset = addr_to_offset (bin, symbols[j].addr); symbols[j].size = 0; /* find next symbol and crop */ if (st->n_type & N_EXT) { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_EXT; } else { symbols[j].type = R_BIN_MACH0_SYMBOL_TYPE_LOCAL; } strncpy (symbols[j].name, symstr, R_BIN_MACH0_STRING_LENGTH); symbols[j].name[R_BIN_MACH0_STRING_LENGTH - 1] = 0; symbols[j].last = 0; if (inSymtab (hash, symbols, symbols[j].name, symbols[j].addr)) { symbols[j].name[0] = 0; } else { j++; } } } #endif sdb_ht_free (hash); symbols[j].last = 1; return symbols; } static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) { int indidx = bin->sects[i].reserved1 + j; if (indidx < 0 || indidx >= bin->nindirectsyms) { break; } if (idx == bin->indirectsyms[indidx]) { sym = j; break; } } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; } struct import_t* MACH0_(get_imports)(struct MACH0_(obj_t)* bin) { struct import_t *imports; int i, j, idx, stridx; const char *symstr; if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) { return NULL; } if (bin->dysymtab.nundefsym < 1 || bin->dysymtab.nundefsym > 0xfffff) { return NULL; } if (!(imports = malloc ((bin->dysymtab.nundefsym + 1) * sizeof (struct import_t)))) { return NULL; } for (i = j = 0; i < bin->dysymtab.nundefsym; i++) { idx = bin->dysymtab.iundefsym + i; if (idx < 0 || idx >= bin->nsymtab) { bprintf ("WARNING: Imports index out of bounds. Ignoring relocs\n"); free (imports); return NULL; } stridx = bin->symtab[idx].n_strx; if (stridx >= 0 && stridx < bin->symstrlen) { symstr = (char *)bin->symstr + stridx; } else { symstr = ""; } if (!*symstr) { continue; } { int i = 0; int len = 0; char *symstr_dup = NULL; len = bin->symstrlen - stridx; imports[j].name[0] = 0; if (len > 0) { for (i = 0; i < len; i++) { if ((unsigned char)symstr[i] == 0xff || !symstr[i]) { len = i; break; } } symstr_dup = r_str_ndup (symstr, len); if (symstr_dup) { r_str_ncpy (imports[j].name, symstr_dup, R_BIN_MACH0_STRING_LENGTH); r_str_filter (imports[j].name, - 1); imports[j].name[R_BIN_MACH0_STRING_LENGTH - 2] = 0; free (symstr_dup); } } } imports[j].ord = i; imports[j++].last = 0; } imports[j].last = 1; if (!bin->imports_by_ord_size) { if (j > 0) { bin->imports_by_ord_size = j; bin->imports_by_ord = (RBinImport**)calloc (j, sizeof (RBinImport*)); } else { bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; } } return imports; } struct reloc_t* MACH0_(get_relocs)(struct MACH0_(obj_t)* bin) { struct reloc_t *relocs; int i = 0, len; ulebr ur = {NULL}; int wordsize = MACH0_(get_bits)(bin) / 8; if (bin->dyld_info) { ut8 *opcodes,*end, type = 0, rel_type = 0; int lib_ord, seg_idx = -1, sym_ord = -1; size_t j, count, skip, bind_size, lazy_size; st64 addend = 0; ut64 segmentAddress = 0LL; ut64 addr = 0LL; ut8 done = 0; #define CASE(T) case (T / 8): rel_type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return NULL; } #undef CASE bind_size = bin->dyld_info->bind_size; lazy_size = bin->dyld_info->lazy_bind_size; if (!bind_size || !lazy_size) { return NULL; } if ((bind_size + lazy_size)<1) { return NULL; } if (bin->dyld_info->bind_off > bin->size || bin->dyld_info->bind_off + bind_size > bin->size) { return NULL; } if (bin->dyld_info->lazy_bind_off > bin->size || \ bin->dyld_info->lazy_bind_off + lazy_size > bin->size) { return NULL; } if (bin->dyld_info->bind_off+bind_size+lazy_size > bin->size) { return NULL; } // NOTE(eddyb) it's a waste of memory, but we don't know the actual number of relocs. if (!(relocs = calloc (1, (1 + bind_size + lazy_size) * sizeof (struct reloc_t)))) { return NULL; } opcodes = calloc (1, bind_size + lazy_size + 1); if (!opcodes) { free (relocs); return NULL; } len = r_buf_read_at (bin->b, bin->dyld_info->bind_off, opcodes, bind_size); i = r_buf_read_at (bin->b, bin->dyld_info->lazy_bind_off, opcodes + bind_size, lazy_size); if (len < 1 || i < 1) { bprintf ("Error: read (dyld_info bind) at 0x%08"PFMT64x"\n", (ut64)(size_t)bin->dyld_info->bind_off); free (opcodes); relocs[i].last = 1; return relocs; } i = 0; // that +2 is a minimum required for uleb128, this may be wrong, // the correct fix would be to make ULEB() must use rutil's // implementation that already checks for buffer boundaries for (ur.p = opcodes, end = opcodes + bind_size + lazy_size ; (ur.p+2 < end) && !done; ) { ut8 imm = *ur.p & BIND_IMMEDIATE_MASK, op = *ur.p & BIND_OPCODE_MASK; ++ur.p; switch (op) { #define ULEB() read_uleb128 (&ur,end) #define SLEB() read_sleb128 (&ur,end) case BIND_OPCODE_DONE: done = 1; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_IMM: lib_ord = imm; break; case BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB: lib_ord = ULEB(); break; case BIND_OPCODE_SET_DYLIB_SPECIAL_IMM: lib_ord = imm? (st8)(BIND_OPCODE_MASK | imm) : 0; break; case BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM: { char *sym_name = (char*)ur.p; //ut8 sym_flags = imm; while (*ur.p++ && ur.p<end) { /* empty loop */ } sym_ord = -1; if (bin->symtab && bin->dysymtab.nundefsym < 0xffff) for (j = 0; j < bin->dysymtab.nundefsym; j++) { int stridx = 0; int iundefsym = bin->dysymtab.iundefsym; if (iundefsym>=0 && iundefsym < bin->nsymtab) { int sidx = iundefsym +j; if (sidx<0 || sidx>= bin->nsymtab) continue; stridx = bin->symtab[sidx].n_strx; if (stridx < 0 || stridx >= bin->symstrlen) continue; } if (!strcmp ((char *)bin->symstr + stridx, sym_name)) { sym_ord = j; break; } } break; } case BIND_OPCODE_SET_TYPE_IMM: type = imm; break; case BIND_OPCODE_SET_ADDEND_SLEB: addend = SLEB(); break; case BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB: seg_idx = imm; if (seg_idx < 0 || seg_idx >= bin->nsegs) { bprintf ("Error: BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB" " has unexistent segment %d\n", seg_idx); addr = 0LL; return 0; // early exit to avoid future mayhem } else { addr = bin->segs[seg_idx].vmaddr + ULEB(); segmentAddress = bin->segs[seg_idx].vmaddr \ + bin->segs[seg_idx].vmsize; } break; case BIND_OPCODE_ADD_ADDR_ULEB: addr += ULEB(); break; #define DO_BIND() do {\ if (sym_ord < 0 || seg_idx < 0 ) break;\ if (i >= (bind_size + lazy_size)) break;\ relocs[i].addr = addr;\ relocs[i].offset = addr - bin->segs[seg_idx].vmaddr + bin->segs[seg_idx].fileoff;\ if (type == BIND_TYPE_TEXT_PCREL32)\ relocs[i].addend = addend - (bin->baddr + addr);\ else relocs[i].addend = addend;\ /* library ordinal ??? */ \ relocs[i].ord = lib_ord;\ relocs[i].ord = sym_ord;\ relocs[i].type = rel_type;\ relocs[i++].last = 0;\ } while (0) case BIND_OPCODE_DO_BIND: if (addr >= segmentAddress) { bprintf ("Error: Malformed DO bind opcode\n"); goto beach; } DO_BIND(); addr += wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_ULEB: if (addr >= segmentAddress) { bprintf ("Error: Malformed ADDR ULEB bind opcode\n"); goto beach; } DO_BIND(); addr += ULEB() + wordsize; break; case BIND_OPCODE_DO_BIND_ADD_ADDR_IMM_SCALED: if (addr >= segmentAddress) { bprintf ("Error: Malformed IMM SCALED bind opcode\n"); goto beach; } DO_BIND(); addr += (ut64)imm * (ut64)wordsize + wordsize; break; case BIND_OPCODE_DO_BIND_ULEB_TIMES_SKIPPING_ULEB: count = ULEB(); skip = ULEB(); for (j = 0; j < count; j++) { if (addr >= segmentAddress) { bprintf ("Error: Malformed ULEB TIMES bind opcode\n"); goto beach; } DO_BIND(); addr += skip + wordsize; } break; #undef DO_BIND #undef ULEB #undef SLEB default: bprintf ("Error: unknown bind opcode 0x%02x in dyld_info\n", *ur.p); free (opcodes); relocs[i].last = 1; return relocs; } } free (opcodes); } else { int j; if (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) { return NULL; } if (!(relocs = malloc ((bin->dysymtab.nundefsym + 1) * sizeof (struct reloc_t)))) { return NULL; } for (j = 0; j < bin->dysymtab.nundefsym; j++) { if (parse_import_ptr (bin, &relocs[i], bin->dysymtab.iundefsym + j)) { relocs[i].ord = j; relocs[i++].last = 0; } } } beach: relocs[i].last = 1; return relocs; } struct addr_t* MACH0_(get_entrypoint)(struct MACH0_(obj_t)* bin) { struct addr_t *entry; int i; if (!bin->entry && !bin->sects) { return NULL; } if (!(entry = calloc (1, sizeof (struct addr_t)))) { return NULL; } if (bin->entry) { entry->addr = entry_to_vaddr (bin); entry->offset = addr_to_offset (bin, entry->addr); entry->haddr = sdb_num_get (bin->kv, "mach0.entry.offset", 0); sdb_num_set (bin->kv, "mach0.entry.vaddr", entry->addr, 0); sdb_num_set (bin->kv, "mach0.entry.paddr", bin->entry, 0); } if (!bin->entry || entry->offset == 0) { // XXX: section name doesnt matters at all.. just check for exec flags for (i = 0; i < bin->nsects; i++) { if (!strncmp (bin->sects[i].sectname, "__text", 6)) { entry->offset = (ut64)bin->sects[i].offset; sdb_num_set (bin->kv, "mach0.entry", entry->offset, 0); entry->addr = (ut64)bin->sects[i].addr; if (!entry->addr) { // workaround for object files entry->addr = entry->offset; } break; } } bin->entry = entry->addr; } return entry; } void MACH0_(kv_loadlibs)(struct MACH0_(obj_t)* bin) { int i; for (i = 0; i < bin->nlibs; i++) { sdb_set (bin->kv, sdb_fmt ("libs.%d.name", i), bin->libs[i], 0); } } struct lib_t* MACH0_(get_libs)(struct MACH0_(obj_t)* bin) { struct lib_t *libs; int i; if (!bin->nlibs) { return NULL; } if (!(libs = calloc ((bin->nlibs + 1), sizeof (struct lib_t)))) { return NULL; } for (i = 0; i < bin->nlibs; i++) { sdb_set (bin->kv, sdb_fmt ("libs.%d.name", i), bin->libs[i], 0); strncpy (libs[i].name, bin->libs[i], R_BIN_MACH0_STRING_LENGTH); libs[i].name[R_BIN_MACH0_STRING_LENGTH-1] = '\0'; libs[i].last = 0; } libs[i].last = 1; return libs; } ut64 MACH0_(get_baddr)(struct MACH0_(obj_t)* bin) { int i; if (bin->hdr.filetype != MH_EXECUTE && bin->hdr.filetype != MH_DYLINKER) { return 0; } for (i = 0; i < bin->nsegs; ++i) { if (bin->segs[i].fileoff == 0 && bin->segs[i].filesize != 0) { return bin->segs[i].vmaddr; } } return 0; } char* MACH0_(get_class)(struct MACH0_(obj_t)* bin) { #if R_BIN_MACH064 return r_str_new ("MACH064"); #else return r_str_new ("MACH0"); #endif } //XXX we are mixing up bits from cpu and opcodes //since thumb use 16 bits opcode but run in 32 bits //cpus so here we should only return 32 or 64 int MACH0_(get_bits)(struct MACH0_(obj_t)* bin) { if (bin) { int bits = MACH0_(get_bits_from_hdr) (&bin->hdr); if (bin->hdr.cputype == CPU_TYPE_ARM && bin->entry & 1) { return 16; } return bits; } return 32; } int MACH0_(get_bits_from_hdr)(struct MACH0_(mach_header)* hdr) { if (hdr->magic == MH_MAGIC_64 || hdr->magic == MH_CIGAM_64) { return 64; } if ((hdr->cpusubtype & CPU_SUBTYPE_MASK) == (CPU_SUBTYPE_ARM_V7K << 24)) { return 16; } return 32; } bool MACH0_(is_big_endian)(struct MACH0_(obj_t)* bin) { if (bin) { const int cpu = bin->hdr.cputype; return cpu == CPU_TYPE_POWERPC || cpu == CPU_TYPE_POWERPC64; } return false; } const char* MACH0_(get_intrp)(struct MACH0_(obj_t)* bin) { return bin? bin->intrp: NULL; } const char* MACH0_(get_os)(struct MACH0_(obj_t)* bin) { if (bin) switch (bin->os) { case 1: return "macos"; case 2: return "ios"; case 3: return "watchos"; case 4: return "tvos"; } return "darwin"; } const char* MACH0_(get_cputype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *archstr = "unknown"; switch (hdr->cputype) { case CPU_TYPE_VAX: archstr = "vax"; break; case CPU_TYPE_MC680x0: archstr = "mc680x0"; break; case CPU_TYPE_I386: case CPU_TYPE_X86_64: archstr = "x86"; break; case CPU_TYPE_MC88000: archstr = "mc88000"; break; case CPU_TYPE_MC98000: archstr = "mc98000"; break; case CPU_TYPE_HPPA: archstr = "hppa"; break; case CPU_TYPE_ARM: case CPU_TYPE_ARM64: archstr = "arm"; break; case CPU_TYPE_SPARC: archstr = "sparc"; break; case CPU_TYPE_MIPS: archstr = "mips"; break; case CPU_TYPE_I860: archstr = "i860"; break; case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: archstr = "ppc"; } return archstr; } const char* MACH0_(get_cputype)(struct MACH0_(obj_t)* bin) { return bin? MACH0_(get_cputype_from_hdr) (&bin->hdr): "unknown"; } // TODO: use const char* char* MACH0_(get_cpusubtype_from_hdr)(struct MACH0_(mach_header) *hdr) { if (hdr) { switch (hdr->cputype) { case CPU_TYPE_VAX: switch (hdr->cpusubtype) { case CPU_SUBTYPE_VAX_ALL: return strdup ("all"); case CPU_SUBTYPE_VAX780: return strdup ("vax780"); case CPU_SUBTYPE_VAX785: return strdup ("vax785"); case CPU_SUBTYPE_VAX750: return strdup ("vax750"); case CPU_SUBTYPE_VAX730: return strdup ("vax730"); case CPU_SUBTYPE_UVAXI: return strdup ("uvaxI"); case CPU_SUBTYPE_UVAXII: return strdup ("uvaxII"); case CPU_SUBTYPE_VAX8200: return strdup ("vax8200"); case CPU_SUBTYPE_VAX8500: return strdup ("vax8500"); case CPU_SUBTYPE_VAX8600: return strdup ("vax8600"); case CPU_SUBTYPE_VAX8650: return strdup ("vax8650"); case CPU_SUBTYPE_VAX8800: return strdup ("vax8800"); case CPU_SUBTYPE_UVAXIII: return strdup ("uvaxIII"); default: return strdup ("Unknown vax subtype"); } case CPU_TYPE_MC680x0: switch (hdr->cpusubtype) { case CPU_SUBTYPE_MC68030: return strdup ("mc68030"); case CPU_SUBTYPE_MC68040: return strdup ("mc68040"); case CPU_SUBTYPE_MC68030_ONLY: return strdup ("mc68030 only"); default: return strdup ("Unknown mc680x0 subtype"); } case CPU_TYPE_I386: switch (hdr->cpusubtype) { case CPU_SUBTYPE_386: return strdup ("386"); case CPU_SUBTYPE_486: return strdup ("486"); case CPU_SUBTYPE_486SX: return strdup ("486sx"); case CPU_SUBTYPE_PENT: return strdup ("Pentium"); case CPU_SUBTYPE_PENTPRO: return strdup ("Pentium Pro"); case CPU_SUBTYPE_PENTII_M3: return strdup ("Pentium 3 M3"); case CPU_SUBTYPE_PENTII_M5: return strdup ("Pentium 3 M5"); case CPU_SUBTYPE_CELERON: return strdup ("Celeron"); case CPU_SUBTYPE_CELERON_MOBILE: return strdup ("Celeron Mobile"); case CPU_SUBTYPE_PENTIUM_3: return strdup ("Pentium 3"); case CPU_SUBTYPE_PENTIUM_3_M: return strdup ("Pentium 3 M"); case CPU_SUBTYPE_PENTIUM_3_XEON: return strdup ("Pentium 3 Xeon"); case CPU_SUBTYPE_PENTIUM_M: return strdup ("Pentium Mobile"); case CPU_SUBTYPE_PENTIUM_4: return strdup ("Pentium 4"); case CPU_SUBTYPE_PENTIUM_4_M: return strdup ("Pentium 4 M"); case CPU_SUBTYPE_ITANIUM: return strdup ("Itanium"); case CPU_SUBTYPE_ITANIUM_2: return strdup ("Itanium 2"); case CPU_SUBTYPE_XEON: return strdup ("Xeon"); case CPU_SUBTYPE_XEON_MP: return strdup ("Xeon MP"); default: return strdup ("Unknown i386 subtype"); } case CPU_TYPE_X86_64: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_X86_64_ALL: return strdup ("x86 64 all"); case CPU_SUBTYPE_X86_ARCH1: return strdup ("x86 arch 1"); default: return strdup ("Unknown x86 subtype"); } case CPU_TYPE_MC88000: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_MC88000_ALL: return strdup ("all"); case CPU_SUBTYPE_MC88100: return strdup ("mc88100"); case CPU_SUBTYPE_MC88110: return strdup ("mc88110"); default: return strdup ("Unknown mc88000 subtype"); } case CPU_TYPE_MC98000: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_MC98000_ALL: return strdup ("all"); case CPU_SUBTYPE_MC98601: return strdup ("mc98601"); default: return strdup ("Unknown mc98000 subtype"); } case CPU_TYPE_HPPA: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_HPPA_7100: return strdup ("hppa7100"); case CPU_SUBTYPE_HPPA_7100LC: return strdup ("hppa7100LC"); default: return strdup ("Unknown hppa subtype"); } case CPU_TYPE_ARM64: return strdup ("v8"); case CPU_TYPE_ARM: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_ARM_ALL: return strdup ("all"); case CPU_SUBTYPE_ARM_V4T: return strdup ("v4t"); case CPU_SUBTYPE_ARM_V5: return strdup ("v5"); case CPU_SUBTYPE_ARM_V6: return strdup ("v6"); case CPU_SUBTYPE_ARM_XSCALE: return strdup ("xscale"); case CPU_SUBTYPE_ARM_V7: return strdup ("v7"); case CPU_SUBTYPE_ARM_V7F: return strdup ("v7f"); case CPU_SUBTYPE_ARM_V7S: return strdup ("v7s"); case CPU_SUBTYPE_ARM_V7K: return strdup ("v7k"); case CPU_SUBTYPE_ARM_V7M: return strdup ("v7m"); case CPU_SUBTYPE_ARM_V7EM: return strdup ("v7em"); default: return r_str_newf ("unknown ARM subtype %d", hdr->cpusubtype & 0xff); } case CPU_TYPE_SPARC: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_SPARC_ALL: return strdup ("all"); default: return strdup ("Unknown sparc subtype"); } case CPU_TYPE_MIPS: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_MIPS_ALL: return strdup ("all"); case CPU_SUBTYPE_MIPS_R2300: return strdup ("r2300"); case CPU_SUBTYPE_MIPS_R2600: return strdup ("r2600"); case CPU_SUBTYPE_MIPS_R2800: return strdup ("r2800"); case CPU_SUBTYPE_MIPS_R2000a: return strdup ("r2000a"); case CPU_SUBTYPE_MIPS_R2000: return strdup ("r2000"); case CPU_SUBTYPE_MIPS_R3000a: return strdup ("r3000a"); case CPU_SUBTYPE_MIPS_R3000: return strdup ("r3000"); default: return strdup ("Unknown mips subtype"); } case CPU_TYPE_I860: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_I860_ALL: return strdup ("all"); case CPU_SUBTYPE_I860_860: return strdup ("860"); default: return strdup ("Unknown i860 subtype"); } case CPU_TYPE_POWERPC: case CPU_TYPE_POWERPC64: switch (hdr->cpusubtype & 0xff) { case CPU_SUBTYPE_POWERPC_ALL: return strdup ("all"); case CPU_SUBTYPE_POWERPC_601: return strdup ("601"); case CPU_SUBTYPE_POWERPC_602: return strdup ("602"); case CPU_SUBTYPE_POWERPC_603: return strdup ("603"); case CPU_SUBTYPE_POWERPC_603e: return strdup ("603e"); case CPU_SUBTYPE_POWERPC_603ev: return strdup ("603ev"); case CPU_SUBTYPE_POWERPC_604: return strdup ("604"); case CPU_SUBTYPE_POWERPC_604e: return strdup ("604e"); case CPU_SUBTYPE_POWERPC_620: return strdup ("620"); case CPU_SUBTYPE_POWERPC_750: return strdup ("750"); case CPU_SUBTYPE_POWERPC_7400: return strdup ("7400"); case CPU_SUBTYPE_POWERPC_7450: return strdup ("7450"); case CPU_SUBTYPE_POWERPC_970: return strdup ("970"); default: return strdup ("Unknown ppc subtype"); } } } return strdup ("Unknown cputype"); } char* MACH0_(get_cpusubtype)(struct MACH0_(obj_t)* bin) { if (bin) { return MACH0_(get_cpusubtype_from_hdr) (&bin->hdr); } return strdup ("Unknown"); } int MACH0_(is_pie)(struct MACH0_(obj_t)* bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_PIE); } int MACH0_(has_nx)(struct MACH0_(obj_t)* bin) { return (bin && bin->hdr.filetype == MH_EXECUTE && bin->hdr.flags & MH_NO_HEAP_EXECUTION); } char* MACH0_(get_filetype_from_hdr)(struct MACH0_(mach_header) *hdr) { const char *mhtype = "Unknown"; switch (hdr->filetype) { case MH_OBJECT: mhtype = "Relocatable object"; break; case MH_EXECUTE: mhtype = "Executable file"; break; case MH_FVMLIB: mhtype = "Fixed VM shared library"; break; case MH_CORE: mhtype = "Core file"; break; case MH_PRELOAD: mhtype = "Preloaded executable file"; break; case MH_DYLIB: mhtype = "Dynamically bound shared library"; break; case MH_DYLINKER: mhtype = "Dynamic link editor"; break; case MH_BUNDLE: mhtype = "Dynamically bound bundle file"; break; case MH_DYLIB_STUB: mhtype = "Shared library stub for static linking (no sections)"; break; case MH_DSYM: mhtype = "Companion file with only debug sections"; break; } return strdup (mhtype); } char* MACH0_(get_filetype)(struct MACH0_(obj_t)* bin) { if (bin) { return MACH0_(get_filetype_from_hdr) (&bin->hdr); } return strdup ("Unknown"); } ut64 MACH0_(get_main)(struct MACH0_(obj_t)* bin) { ut64 addr = 0LL; struct symbol_t *symbols; int i; if (!(symbols = MACH0_(get_symbols) (bin))) { return 0; } for (i = 0; !symbols[i].last; i++) { const char *name = symbols[i].name; if (!strcmp (name, "__Dmain")) { addr = symbols[i].addr; break; } if (strstr (name, "4main") && !strstr (name, "STATIC")) { addr = symbols[i].addr; break; } if (!strcmp (symbols[i].name, "_main")) { addr = symbols[i].addr; // break; } } free (symbols); if (!addr && bin->main_cmd.cmd == LC_MAIN) { addr = bin->entry + bin->baddr; } if (!addr) { ut8 b[128]; ut64 entry = addr_to_offset(bin, bin->entry); // XXX: X86 only and hacky! if (entry > bin->size || entry + sizeof (b) > bin->size) { return 0; } i = r_buf_read_at (bin->b, entry, b, sizeof (b)); if (i < 1) { return 0; } for (i = 0; i < 64; i++) { if (b[i] == 0xe8 && !b[i+3] && !b[i+4]) { int delta = b[i+1] | (b[i+2] << 8) | (b[i+3] << 16) | (b[i+4] << 24); return bin->entry + i + 5 + delta; } } } return addr; } void MACH0_(mach_headerfields)(RBinFile *file) { RBuffer *buf = file->buf; int n = 0; struct MACH0_(mach_header) *mh = MACH0_(get_hdr_from_bytes)(buf); if (!mh) { return; } printf ("0x00000000 Magic 0x%x\n", mh->magic); printf ("0x00000004 CpuType 0x%x\n", mh->cputype); printf ("0x00000008 CpuSubType 0x%x\n", mh->cpusubtype); printf ("0x0000000c FileType 0x%x\n", mh->filetype); printf ("0x00000010 nCmds %d\n", mh->ncmds); printf ("0x00000014 sizeOfCmds %d\n", mh->sizeofcmds); printf ("0x00000018 Flags 0x%x\n", mh->flags); ut64 addr = 0x20 - 4; ut32 word = 0; ut8 wordbuf[sizeof (word)]; #define READWORD() \ addr += 4; \ if (!r_buf_read_at (buf, addr, (ut8*)wordbuf, 4)) { \ eprintf ("Invalid address in buffer."); \ break; \ } \ word = r_read_le32 (wordbuf); for (n = 0; n < mh->ncmds; n++) { printf ("\n# Load Command %d\n", n); READWORD(); int lcType = word; eprintf ("0x%08"PFMT64x" cmd 0x%x %s\n", addr, lcType, cmd_to_string (lcType)); READWORD(); int lcSize = word; word &= 0xFFFFFF; printf ("0x%08"PFMT64x" cmdsize %d\n", addr, word); if (lcSize < 1) { eprintf ("Invalid size for a load command\n"); break; } switch (lcType) { case LC_ID_DYLIB: // install_name_tool printf ("0x%08"PFMT64x" id %s\n", addr + 20, r_buf_get_at (buf, addr + 20, NULL)); break; case LC_UUID: printf ("0x%08"PFMT64x" uuid %s\n", addr + 20, r_buf_get_at (buf, addr + 32, NULL)); break; case LC_LOAD_DYLIB: printf ("0x%08"PFMT64x" uuid %s\n", addr + 20, r_buf_get_at (buf, addr + 20, NULL)); break; case LC_RPATH: printf ("0x%08"PFMT64x" uuid %s\n", addr + 8, r_buf_get_at (buf, addr + 8, NULL)); break; case LC_CODE_SIGNATURE: { ut32 *words = (ut32*)r_buf_get_at (buf, addr + 4, NULL); printf ("0x%08"PFMT64x" dataoff 0x%08x\n", addr + 4, words[0]); printf ("0x%08"PFMT64x" datasize %d\n", addr + 8, words[1]); printf ("# wtf mach0.sign %d @ 0x%x\n", words[1], words[0]); } break; } addr += word - 8; } free (mh); } RList* MACH0_(mach_fields)(RBinFile *bf) { struct MACH0_(mach_header) *mh = MACH0_(get_hdr_from_bytes)(bf->buf); if (!mh) { return NULL; } RList *ret = r_list_new (); if (!ret) { return NULL; } ret->free = free; ut64 addr = 0; #define ROW(nam,siz,val,fmt) \ r_list_append (ret, r_bin_field_new (addr, addr, siz, nam, sdb_fmt ("0x%08x", val), fmt)); \ addr += 4; ROW("hdr.magic", 4, mh->magic, "x"); ROW("hdr.cputype", 4, mh->cputype, NULL); ROW("hdr.cpusubtype", 4, mh->cpusubtype, NULL); ROW("hdr.filetype", 4, mh->filetype, NULL); ROW("hdr.ncmds", 4, mh->ncmds, NULL); ROW("hdr.sizeofcmds", 4, mh->sizeofcmds, NULL); free (mh); return ret; } struct MACH0_(mach_header) * MACH0_(get_hdr_from_bytes)(RBuffer *buf) { ut8 magicbytes[sizeof (ut32)] = {0}; ut8 machohdrbytes[sizeof (struct MACH0_(mach_header))] = {0}; int len; struct MACH0_(mach_header) *macho_hdr = R_NEW0 (struct MACH0_(mach_header)); bool big_endian = false; if (!macho_hdr) { return NULL; } if (r_buf_read_at (buf, 0, magicbytes, 4) < 1) { free (macho_hdr); return false; } if (r_read_le32 (magicbytes) == 0xfeedface) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedface) { big_endian = true; } else if (r_read_le32 (magicbytes) == FAT_MAGIC) { big_endian = false; } else if (r_read_be32 (magicbytes) == FAT_MAGIC) { big_endian = true; } else if (r_read_le32 (magicbytes) == 0xfeedfacf) { big_endian = false; } else if (r_read_be32 (magicbytes) == 0xfeedfacf) { big_endian = true; } else { /* also extract non-mach0s */ #if 0 free (macho_hdr); return NULL; #endif } len = r_buf_read_at (buf, 0, machohdrbytes, sizeof (machohdrbytes)); if (len != sizeof (struct MACH0_(mach_header))) { free (macho_hdr); return NULL; } macho_hdr->magic = r_read_ble (&machohdrbytes[0], big_endian, 32); macho_hdr->cputype = r_read_ble (&machohdrbytes[4], big_endian, 32); macho_hdr->cpusubtype = r_read_ble (&machohdrbytes[8], big_endian, 32); macho_hdr->filetype = r_read_ble (&machohdrbytes[12], big_endian, 32); macho_hdr->ncmds = r_read_ble (&machohdrbytes[16], big_endian, 32); macho_hdr->sizeofcmds = r_read_ble (&machohdrbytes[20], big_endian, 32); macho_hdr->flags = r_read_ble (&machohdrbytes[24], big_endian, 32); #if R_BIN_MACH064 macho_hdr->reserved = r_read_ble (&machohdrbytes[28], big_endian, 32); #endif return macho_hdr; }
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++) if (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) { sym = j; break; } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; }
static int parse_import_ptr(struct MACH0_(obj_t)* bin, struct reloc_t *reloc, int idx) { int i, j, sym, wordsize; ut32 stype; wordsize = MACH0_(get_bits)(bin) / 8; if (idx < 0 || idx >= bin->nsymtab) { return 0; } if ((bin->symtab[idx].n_desc & REFERENCE_TYPE) == REFERENCE_FLAG_UNDEFINED_LAZY) { stype = S_LAZY_SYMBOL_POINTERS; } else { stype = S_NON_LAZY_SYMBOL_POINTERS; } reloc->offset = 0; reloc->addr = 0; reloc->addend = 0; #define CASE(T) case (T / 8): reloc->type = R_BIN_RELOC_ ## T; break switch (wordsize) { CASE(8); CASE(16); CASE(32); CASE(64); default: return false; } #undef CASE for (i = 0; i < bin->nsects; i++) { if ((bin->sects[i].flags & SECTION_TYPE) == stype) { for (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) { int indidx = bin->sects[i].reserved1 + j; if (indidx < 0 || indidx >= bin->nindirectsyms) { break; } if (idx == bin->indirectsyms[indidx]) { sym = j; break; } } reloc->offset = sym == -1 ? 0 : bin->sects[i].offset + sym * wordsize; reloc->addr = sym == -1 ? 0 : bin->sects[i].addr + sym * wordsize; return true; } } return false; }
{'added': [(1589, '\t\tif (parse_import_stub(bin, &symbols[j], i)) {'), (1591, '\t\t}'), (1666, '\t\t\tfor (j = 0, sym = -1; bin->sects[i].reserved1 + j < bin->nindirectsyms; j++) {'), (1667, '\t\t\t\tint indidx = bin->sects[i].reserved1 + j;'), (1668, '\t\t\t\tif (indidx < 0 || indidx >= bin->nindirectsyms) {'), (1669, '\t\t\t\t\tbreak;'), (1670, '\t\t\t\t}'), (1671, '\t\t\t\tif (idx == bin->indirectsyms[indidx]) {'), (1675, '\t\t\t}'), (1689, '\tif (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms) {'), (1691, '\t}')], 'deleted': [(1589, '\t\tif (parse_import_stub(bin, &symbols[j], i))'), (1665, '\t\t\tfor (j=0, sym=-1; bin->sects[i].reserved1+j < bin->nindirectsyms; j++)'), (1666, '\t\t\t\tif (idx == bin->indirectsyms[bin->sects[i].reserved1 + j]) {'), (1670, ''), (1684, '\tif (!bin->symtab || !bin->symstr || !bin->sects || !bin->indirectsyms)')]}
11
5
2,270
18,097
36
287
10
https://github.com/radare/radare2
CVE-2018-11380
CWE-125
2,128
snmp-api.c
C
snmp_api_set_time_ticks
/* * Copyright (C) 2019 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \file * An implementation of the Simple Network Management Protocol (RFC 3411-3418) * \author * Yago Fontoura do Rosario <yago.rosario@hotmail.com.br */ #include "contiki.h" #include "snmp-api.h" #include "snmp-message.h" #include "snmp-ber.h" #include "snmp-oid.h" static void snmp_api_replace_oid(snmp_varbind_t *varbind, uint32_t *oid) { uint8_t i; i = 0; while(oid[i] != ((uint32_t)-1)) { varbind->oid[i] = oid[i]; i++; } varbind->oid[i] = ((uint32_t)-1); } /*---------------------------------------------------------------------------*/ void snmp_api_set_string(snmp_varbind_t *varbind, uint32_t *oid, char *string) { snmp_api_replace_oid(varbind, oid); varbind->value_type = BER_DATA_TYPE_OCTET_STRING; varbind->value.string.string = string; varbind->value.string.length = strlen(string); } /*---------------------------------------------------------------------------*/ void snmp_api_set_time_ticks(snmp_varbind_t *varbind, uint32_t *oid, uint32_t integer) { snmp_api_replace_oid(varbind, oid); varbind->value_type = SNMP_DATA_TYPE_TIME_TICKS; varbind->value.integer = integer; } /*---------------------------------------------------------------------------*/ void snmp_api_set_oid(snmp_varbind_t *varbind, uint32_t *oid, uint32_t *ret_oid) { snmp_api_replace_oid(varbind, oid); varbind->value_type = BER_DATA_TYPE_OID; varbind->value.oid = ret_oid; } /*---------------------------------------------------------------------------*/ void snmp_api_add_resource(snmp_mib_resource_t *new_resource) { return snmp_mib_add(new_resource); }
/* * Copyright (C) 2019-2020 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ /*---------------------------------------------------------------------------*/ /** * \file * SNMP Implementation of the public API * \author * Yago Fontoura do Rosario <yago.rosario@hotmail.com.br */ #include "contiki.h" #include "snmp-api.h" #include "snmp-message.h" #include "snmp-ber.h" /*---------------------------------------------------------------------------*/ void snmp_api_set_string(snmp_varbind_t *varbind, snmp_oid_t *oid, char *string) { memcpy(&varbind->oid, oid, sizeof(snmp_oid_t)); varbind->value_type = BER_DATA_TYPE_OCTET_STRING; varbind->value.string.string = string; varbind->value.string.length = strlen(string); } /*---------------------------------------------------------------------------*/ void snmp_api_set_time_ticks(snmp_varbind_t *varbind, snmp_oid_t *oid, uint32_t integer) { memcpy(&varbind->oid, oid, sizeof(snmp_oid_t)); varbind->value_type = BER_DATA_TYPE_TIMETICKS; varbind->value.integer = integer; } /*---------------------------------------------------------------------------*/ void snmp_api_set_oid(snmp_varbind_t *varbind, snmp_oid_t *oid, snmp_oid_t *ret_oid) { memcpy(&varbind->oid, oid, sizeof(snmp_oid_t)); varbind->value_type = BER_DATA_TYPE_OBJECT_IDENTIFIER; memcpy(&varbind->value.oid, ret_oid, sizeof(snmp_oid_t)); } /*---------------------------------------------------------------------------*/ void snmp_api_add_resource(snmp_mib_resource_t *new_resource) { return snmp_mib_add(new_resource); }
snmp_api_set_time_ticks(snmp_varbind_t *varbind, uint32_t *oid, uint32_t integer) { snmp_api_replace_oid(varbind, oid); varbind->value_type = SNMP_DATA_TYPE_TIME_TICKS; varbind->value.integer = integer; }
snmp_api_set_time_ticks(snmp_varbind_t *varbind, snmp_oid_t *oid, uint32_t integer) { memcpy(&varbind->oid, oid, sizeof(snmp_oid_t)); varbind->value_type = BER_DATA_TYPE_TIMETICKS; varbind->value.integer = integer; }
{'added': [(2, ' * Copyright (C) 2019-2020 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br>'), (35, ' * SNMP Implementation of the public API'), (49, 'snmp_api_set_string(snmp_varbind_t *varbind, snmp_oid_t *oid, char *string)'), (51, ' memcpy(&varbind->oid, oid, sizeof(snmp_oid_t));'), (58, 'snmp_api_set_time_ticks(snmp_varbind_t *varbind, snmp_oid_t *oid, uint32_t integer)'), (60, ' memcpy(&varbind->oid, oid, sizeof(snmp_oid_t));'), (61, ' varbind->value_type = BER_DATA_TYPE_TIMETICKS;'), (66, 'snmp_api_set_oid(snmp_varbind_t *varbind, snmp_oid_t *oid, snmp_oid_t *ret_oid)'), (68, ' memcpy(&varbind->oid, oid, sizeof(snmp_oid_t));'), (69, ' varbind->value_type = BER_DATA_TYPE_OBJECT_IDENTIFIER;'), (70, ' memcpy(&varbind->value.oid, ret_oid, sizeof(snmp_oid_t));')], 'deleted': [(2, ' * Copyright (C) 2019 Yago Fontoura do Rosario <yago.rosario@hotmail.com.br>'), (35, ' * An implementation of the Simple Network Management Protocol (RFC 3411-3418)'), (46, '#include "snmp-oid.h"'), (48, 'static void'), (49, 'snmp_api_replace_oid(snmp_varbind_t *varbind, uint32_t *oid)'), (50, '{'), (51, ' uint8_t i;'), (52, ''), (53, ' i = 0;'), (54, ' while(oid[i] != ((uint32_t)-1)) {'), (55, ' varbind->oid[i] = oid[i];'), (56, ' i++;'), (57, ' }'), (58, ' varbind->oid[i] = ((uint32_t)-1);'), (59, '}'), (62, 'snmp_api_set_string(snmp_varbind_t *varbind, uint32_t *oid, char *string)'), (64, ''), (65, ' snmp_api_replace_oid(varbind, oid);'), (72, 'snmp_api_set_time_ticks(snmp_varbind_t *varbind, uint32_t *oid, uint32_t integer)'), (74, ''), (75, ' snmp_api_replace_oid(varbind, oid);'), (76, ' varbind->value_type = SNMP_DATA_TYPE_TIME_TICKS;'), (81, 'snmp_api_set_oid(snmp_varbind_t *varbind, uint32_t *oid, uint32_t *ret_oid)'), (83, ''), (84, ' snmp_api_replace_oid(varbind, oid);'), (85, ' varbind->value_type = BER_DATA_TYPE_OID;'), (86, ' varbind->value.oid = ret_oid;')]}
11
27
31
184
6
36
1
https://github.com/contiki-ng/contiki-ng
CVE-2020-12141
CWE-125
2,814
eval.c
C
eval_next_line
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * eval.c: Expression evaluation. */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) #ifdef VMS # include <float.h> #endif #define NAMESPACE_CHAR (char_u *)"abglstvw" /* * When recursively copying lists and dicts we need to remember which ones we * have done to avoid endless recursiveness. This unique ID is used for that. * The last bit is used for previous_funccal, ignored when comparing. */ static int current_copyID = 0; /* * Info used by a ":for" loop. */ typedef struct { int fi_semicolon; // TRUE if ending in '; var]' int fi_varcount; // nr of variables in the list int fi_break_count; // nr of line breaks encountered listwatch_T fi_lw; // keep an eye on the item used. list_T *fi_list; // list being used int fi_bi; // index of blob blob_T *fi_blob; // blob being used char_u *fi_string; // copy of string being used int fi_byte_idx; // byte index in fi_string } forinfo_T; static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval7(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval8(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9_leader(typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp); static int free_unref_items(int copyID); static char_u *make_expanded_name(char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end); /* * Return "n1" divided by "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_divide(varnumber_T n1, varnumber_T n2, int *failed) { varnumber_T result; if (n2 == 0) { if (in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } if (n1 == 0) result = VARNUM_MIN; // similar to NaN else if (n1 < 0) result = -VARNUM_MAX; else result = VARNUM_MAX; } else result = n1 / n2; return result; } /* * Return "n1" modulus "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_modulus(varnumber_T n1, varnumber_T n2, int *failed) { if (n2 == 0 && in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } return (n2 == 0) ? 0 : (n1 % n2); } /* * Initialize the global and v: variables. */ void eval_init(void) { evalvars_init(); func_init(); } #if defined(EXITFREE) || defined(PROTO) void eval_clear(void) { evalvars_clear(); free_scriptnames(); // must come after evalvars_clear(). free_locales(); // autoloaded script names free_autoload_scriptnames(); // unreferenced lists and dicts (void)garbage_collect(FALSE); // functions not garbage collected free_all_functions(); } #endif void fill_evalarg_from_eap(evalarg_T *evalarg, exarg_T *eap, int skip) { init_evalarg(evalarg); evalarg->eval_flags = skip ? 0 : EVAL_EVALUATE; if (eap != NULL) { evalarg->eval_cstack = eap->cstack; if (sourcing_a_script(eap) || eap->getline == get_list_line) { evalarg->eval_getline = eap->getline; evalarg->eval_cookie = eap->cookie; } } } /* * Top level evaluation function, returning a boolean. * Sets "error" to TRUE if there was an error. * Return TRUE or FALSE. */ int eval_to_bool( char_u *arg, int *error, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; varnumber_T retval = FALSE; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL) *error = TRUE; else { *error = FALSE; if (!skip) { if (in_vim9script()) retval = tv_get_bool_chk(&tv, error); else retval = (tv_get_number_chk(&tv, error) != 0); clear_tv(&tv); } } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return (int)retval; } /* * Call eval1() and give an error message if not done at a lower level. */ static int eval1_emsg(char_u **arg, typval_T *rettv, exarg_T *eap) { char_u *start = *arg; int ret; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); ret = eval1(arg, rettv, &evalarg); if (ret == FAIL) { // Report the invalid expression unless the expression evaluation has // been cancelled due to an aborting error, an interrupt, or an // exception, or we already gave a more specific error. // Also check called_emsg for when using assert_fails(). if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), start); } clear_evalarg(&evalarg, eap); return ret; } /* * Return whether a typval is a valid expression to pass to eval_expr_typval() * or eval_expr_to_bool(). An empty string returns FALSE; */ int eval_expr_valid_arg(typval_T *tv) { return tv->v_type != VAR_UNKNOWN && (tv->v_type != VAR_STRING || (tv->vval.v_string != NULL && *tv->vval.v_string != NUL)); } /* * Evaluate an expression, which can be a function, partial or string. * Pass arguments "argv[argc]". * Return the result in "rettv" and OK or FAIL. */ int eval_expr_typval(typval_T *expr, typval_T *argv, int argc, typval_T *rettv) { char_u *s; char_u buf[NUMBUFLEN]; funcexe_T funcexe; if (expr->v_type == VAR_FUNC) { s = expr->vval.v_string; if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } else if (expr->v_type == VAR_PARTIAL) { partial_T *partial = expr->vval.v_partial; if (partial == NULL) return FAIL; if (partial->pt_func != NULL && partial->pt_func->uf_def_status != UF_NOT_COMPILED) { if (call_def_function(partial->pt_func, argc, argv, partial, rettv) == FAIL) return FAIL; } else { s = partial_name(partial); if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; funcexe.fe_partial = partial; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } } else if (expr->v_type == VAR_INSTR) { return exe_typval_instr(expr, rettv); } else { s = tv_get_string_buf_chk_strict(expr, buf, in_vim9script()); if (s == NULL) return FAIL; s = skipwhite(s); if (eval1_emsg(&s, rettv, NULL) == FAIL) return FAIL; if (*skipwhite(s) != NUL) // check for trailing chars after expr { clear_tv(rettv); semsg(_(e_invalid_expression_str), s); return FAIL; } } return OK; } /* * Like eval_to_bool() but using a typval_T instead of a string. * Works for string, funcref and partial. */ int eval_expr_to_bool(typval_T *expr, int *error) { typval_T rettv; int res; if (eval_expr_typval(expr, NULL, 0, &rettv) == FAIL) { *error = TRUE; return FALSE; } res = (tv_get_bool_chk(&rettv, error) != 0); clear_tv(&rettv); return res; } /* * Top level evaluation function, returning a string. If "skip" is TRUE, * only parsing to "nextcmd" is done, without reporting errors. Return * pointer to allocated memory, or NULL for failure or when "skip" is TRUE. */ char_u * eval_to_string_skip( char_u *arg, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL || skip) retval = NULL; else { retval = vim_strsave(tv_get_string(&tv)); clear_tv(&tv); } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return retval; } /* * Skip over an expression at "*pp". * Return FAIL for an error, OK otherwise. */ int skip_expr(char_u **pp, evalarg_T *evalarg) { typval_T rettv; *pp = skipwhite(*pp); return eval1(pp, &rettv, evalarg); } /* * Skip over an expression at "*arg". * If in Vim9 script and line breaks are encountered, the lines are * concatenated. "evalarg->eval_tofree" will be set accordingly. * "arg" is advanced to just after the expression. * "start" is set to the start of the expression, "end" to just after the end. * Also when the expression is copied to allocated memory. * Return FAIL for an error, OK otherwise. */ int skip_expr_concatenate( char_u **arg, char_u **start, char_u **end, evalarg_T *evalarg) { typval_T rettv; int res; int vim9script = in_vim9script(); garray_T *gap = evalarg == NULL ? NULL : &evalarg->eval_ga; garray_T *freegap = evalarg == NULL ? NULL : &evalarg->eval_freega; int save_flags = evalarg == NULL ? 0 : evalarg->eval_flags; int evaluate = evalarg == NULL ? FALSE : (evalarg->eval_flags & EVAL_EVALUATE); if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { ga_init2(gap, sizeof(char_u *), 10); // leave room for "start" if (ga_grow(gap, 1) == OK) ++gap->ga_len; ga_init2(freegap, sizeof(char_u *), 10); } *start = *arg; // Don't evaluate the expression. if (evalarg != NULL) evalarg->eval_flags &= ~EVAL_EVALUATE; *arg = skipwhite(*arg); res = eval1(arg, &rettv, evalarg); *end = *arg; if (evalarg != NULL) evalarg->eval_flags = save_flags; if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { if (evalarg->eval_ga.ga_len == 1) { // just the one line, no need to concatenate ga_clear(gap); gap->ga_itemsize = 0; } else { char_u *p; size_t endoff = STRLEN(*arg); // Line breaks encountered, concatenate all the lines. *((char_u **)gap->ga_data) = *start; p = ga_concat_strings(gap, " "); // free the lines only when using getsourceline() if (evalarg->eval_cookie != NULL) { // Do not free the first line, the caller can still use it. *((char_u **)gap->ga_data) = NULL; // Do not free the last line, "arg" points into it, free it // later. vim_free(evalarg->eval_tofree); evalarg->eval_tofree = ((char_u **)gap->ga_data)[gap->ga_len - 1]; ((char_u **)gap->ga_data)[gap->ga_len - 1] = NULL; ga_clear_strings(gap); } else { ga_clear(gap); // free lines that were explicitly marked for freeing ga_clear_strings(freegap); } gap->ga_itemsize = 0; if (p == NULL) return FAIL; *start = p; vim_free(evalarg->eval_tofree_lambda); evalarg->eval_tofree_lambda = p; // Compute "end" relative to the end. *end = *start + STRLEN(*start) - endoff; } } return res; } /* * Convert "tv" to a string. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Returns an allocated string (NULL when out of memory). */ char_u * typval2string(typval_T *tv, int convert) { garray_T ga; char_u *retval; #ifdef FEAT_FLOAT char_u numbuf[NUMBUFLEN]; #endif if (convert && tv->v_type == VAR_LIST) { ga_init2(&ga, sizeof(char), 80); if (tv->vval.v_list != NULL) { list_join(&ga, tv->vval.v_list, (char_u *)"\n", TRUE, FALSE, 0); if (tv->vval.v_list->lv_len > 0) ga_append(&ga, NL); } ga_append(&ga, NUL); retval = (char_u *)ga.ga_data; } #ifdef FEAT_FLOAT else if (convert && tv->v_type == VAR_FLOAT) { vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); retval = vim_strsave(numbuf); } #endif else retval = vim_strsave(tv_get_string(tv)); return retval; } /* * Top level evaluation function, returning a string. Does not handle line * breaks. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Return pointer to allocated memory, or NULL for failure. */ char_u * eval_to_string_eap( char_u *arg, int convert, exarg_T *eap) { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); if (eval0(arg, &tv, NULL, &evalarg) == FAIL) retval = NULL; else { retval = typval2string(&tv, convert); clear_tv(&tv); } clear_evalarg(&evalarg, NULL); return retval; } char_u * eval_to_string( char_u *arg, int convert) { return eval_to_string_eap(arg, convert, NULL); } /* * Call eval_to_string() without using current local variables and using * textlock. When "use_sandbox" is TRUE use the sandbox. * Use legacy Vim script syntax. */ char_u * eval_to_string_safe( char_u *arg, int use_sandbox, int keep_script_version) { char_u *retval; funccal_entry_T funccal_entry; int save_sc_version = current_sctx.sc_version; int save_garbage = may_garbage_collect; if (!keep_script_version) current_sctx.sc_version = 1; save_funccal(&funccal_entry); if (use_sandbox) ++sandbox; ++textlock; may_garbage_collect = FALSE; retval = eval_to_string(arg, FALSE); if (use_sandbox) --sandbox; --textlock; may_garbage_collect = save_garbage; restore_funccal(); current_sctx.sc_version = save_sc_version; return retval; } /* * Top level evaluation function, returning a number. * Evaluates "expr" silently. * Returns -1 for an error. */ varnumber_T eval_to_number(char_u *expr) { typval_T rettv; varnumber_T retval; char_u *p = skipwhite(expr); ++emsg_off; if (eval1(&p, &rettv, &EVALARG_EVALUATE) == FAIL) retval = -1; else { retval = tv_get_number_chk(&rettv, NULL); clear_tv(&rettv); } --emsg_off; return retval; } /* * Top level evaluation function. * Returns an allocated typval_T with the result. * Returns NULL when there is an error. */ typval_T * eval_expr(char_u *arg, exarg_T *eap) { typval_T *tv; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); tv = ALLOC_ONE(typval_T); if (tv != NULL && eval0(arg, tv, eap, &evalarg) == FAIL) VIM_CLEAR(tv); clear_evalarg(&evalarg, eap); return tv; } /* * "*arg" points to what can be a function name in the form of "import.Name" or * "Funcref". Return the name of the function. Set "tofree" to something that * was allocated. * If "verbose" is FALSE no errors are given. * Return NULL for any failure. */ static char_u * deref_function_name( char_u **arg, char_u **tofree, evalarg_T *evalarg, int verbose) { typval_T ref; char_u *name = *arg; ref.v_type = VAR_UNKNOWN; if (eval9(arg, &ref, evalarg, FALSE) == FAIL) { dictitem_T *v; // If <SID>VarName was used it would not be found, try another way. v = find_var_also_in_script(name, NULL, FALSE); if (v == NULL) return NULL; copy_tv(&v->di_tv, &ref); } if (*skipwhite(*arg) != NUL) { if (verbose) semsg(_(e_trailing_characters_str), *arg); name = NULL; } else if (ref.v_type == VAR_FUNC && ref.vval.v_string != NULL) { name = ref.vval.v_string; ref.vval.v_string = NULL; *tofree = name; } else if (ref.v_type == VAR_PARTIAL && ref.vval.v_partial != NULL) { if (ref.vval.v_partial->pt_argc > 0 || ref.vval.v_partial->pt_dict != NULL) { if (verbose) emsg(_(e_cannot_use_partial_here)); name = NULL; } else { name = vim_strsave(partial_name(ref.vval.v_partial)); *tofree = name; } } else { if (verbose) semsg(_(e_not_callable_type_str), name); name = NULL; } clear_tv(&ref); return name; } /* * Call some Vim script function and return the result in "*rettv". * Uses argv[0] to argv[argc - 1] for the function arguments. argv[argc] * should have type VAR_UNKNOWN. * Returns OK or FAIL. */ int call_vim_function( char_u *func, int argc, typval_T *argv, typval_T *rettv) { int ret; funcexe_T funcexe; char_u *arg; char_u *name; char_u *tofree = NULL; int ignore_errors; rettv->v_type = VAR_UNKNOWN; // clear_tv() uses this CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = TRUE; // The name might be "import.Func" or "Funcref". We don't know, we need to // ignore errors for an undefined name. But we do want errors when an // autoload script has errors. Guess that when there is a dot in the name // showing errors is the right choice. ignore_errors = vim_strchr(func, '.') == NULL; arg = func; if (ignore_errors) ++emsg_off; name = deref_function_name(&arg, &tofree, &EVALARG_EVALUATE, FALSE); if (ignore_errors) --emsg_off; if (name == NULL) name = func; ret = call_func(name, -1, rettv, argc, argv, &funcexe); if (ret == FAIL) clear_tv(rettv); vim_free(tofree); return ret; } /* * Call Vim script function "func" and return the result as a string. * Uses "argv[0]" to "argv[argc - 1]" for the function arguments. "argv[argc]" * should have type VAR_UNKNOWN. * Returns NULL when calling the function fails. */ void * call_func_retstr( char_u *func, int argc, typval_T *argv) { typval_T rettv; char_u *retval; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; retval = vim_strsave(tv_get_string(&rettv)); clear_tv(&rettv); return retval; } /* * Call Vim script function "func" and return the result as a List. * Uses "argv" and "argc" as call_func_retstr(). * Returns NULL when there is something wrong. */ void * call_func_retlist( char_u *func, int argc, typval_T *argv) { typval_T rettv; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; if (rettv.v_type != VAR_LIST) { clear_tv(&rettv); return NULL; } return rettv.vval.v_list; } #if defined(FEAT_FOLDING) || defined(PROTO) /* * Evaluate "arg", which is 'foldexpr'. * Note: caller must set "curwin" to match "arg". * Returns the foldlevel, and any character preceding it in "*cp". Doesn't * give error messages. */ int eval_foldexpr(win_T *wp, int *cp) { char_u *arg; typval_T tv; varnumber_T retval; char_u *s; sctx_T saved_sctx = current_sctx; int use_sandbox = was_set_insecurely((char_u *)"foldexpr", OPT_LOCAL); arg = wp->w_p_fde; current_sctx = wp->w_p_script_ctx[WV_FDE]; ++emsg_off; if (use_sandbox) ++sandbox; ++textlock; *cp = NUL; if (eval0(arg, &tv, NULL, &EVALARG_EVALUATE) == FAIL) retval = 0; else { // If the result is a number, just return the number. if (tv.v_type == VAR_NUMBER) retval = tv.vval.v_number; else if (tv.v_type != VAR_STRING || tv.vval.v_string == NULL) retval = 0; else { // If the result is a string, check if there is a non-digit before // the number. s = tv.vval.v_string; if (!VIM_ISDIGIT(*s) && *s != '-') *cp = *s++; retval = atol((char *)s); } clear_tv(&tv); } --emsg_off; if (use_sandbox) --sandbox; --textlock; clear_evalarg(&EVALARG_EVALUATE, NULL); current_sctx = saved_sctx; return (int)retval; } #endif /* * Get an lval: variable, Dict item or List item that can be assigned a value * to: "name", "na{me}", "name[expr]", "name[expr:expr]", "name[expr][expr]", * "name.key", "name.key[expr]" etc. * Indexing only works if "name" is an existing List or Dictionary. * "name" points to the start of the name. * If "rettv" is not NULL it points to the value to be assigned. * "unlet" is TRUE for ":unlet": slightly different behavior when something is * wrong; must end in space or cmd separator. * * flags: * GLV_QUIET: do not give error messages * GLV_READ_ONLY: will not change the variable * GLV_NO_AUTOLOAD: do not use script autoloading * * Returns a pointer to just after the name, including indexes. * When an evaluation error occurs "lp->ll_name" is NULL; * Returns NULL for a parsing error. Still need to free items in "lp"! */ char_u * get_lval( char_u *name, typval_T *rettv, lval_T *lp, int unlet, int skip, int flags, // GLV_ values int fne_flags) // flags for find_name_end() { char_u *p; char_u *expr_start, *expr_end; int cc; dictitem_T *v; typval_T var1; typval_T var2; int empty1 = FALSE; char_u *key = NULL; int len; hashtab_T *ht = NULL; int quiet = flags & GLV_QUIET; int writing; int vim9script = in_vim9script(); // Clear everything in "lp". CLEAR_POINTER(lp); if (skip || (flags & GLV_COMPILING)) { // When skipping or compiling just find the end of the name. lp->ll_name = name; lp->ll_name_end = find_name_end(name, NULL, NULL, FNE_INCL_BR | fne_flags); return lp->ll_name_end; } // Cannot use "s:var" at the Vim9 script level. "s: type" is OK. if (vim9script && at_script_level() && name[0] == 's' && name[1] == ':' && !VIM_ISWHITE(name[2])) { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), name); return NULL; } // Find the end of the name. p = find_name_end(name, &expr_start, &expr_end, fne_flags); lp->ll_name_end = p; if (expr_start != NULL) { // Don't expand the name when we already know there is an error. if (unlet && !VIM_ISWHITE(*p) && !ends_excmd(*p) && *p != '[' && *p != '.') { semsg(_(e_trailing_characters_str), p); return NULL; } lp->ll_exp_name = make_expanded_name(name, expr_start, expr_end, p); if (lp->ll_exp_name == NULL) { // Report an invalid expression in braces, unless the // expression evaluation has been cancelled due to an // aborting error, an interrupt, or an exception. if (!aborting() && !quiet) { emsg_severe = TRUE; semsg(_(e_invalid_argument_str), name); return NULL; } } lp->ll_name = lp->ll_exp_name; } else { lp->ll_name = name; if (vim9script) { // "a: type" is declaring variable "a" with a type, not "a:". // However, "g:[key]" is indexing a dictionary. if (p == name + 2 && p[-1] == ':' && *p != '[') { --p; lp->ll_name_end = p; } if (*p == ':') { char_u *tp = skipwhite(p + 1); if (tp == p + 1 && !quiet) { semsg(_(e_white_space_required_after_str_str), ":", p); return NULL; } if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) { semsg(_(e_using_type_not_in_script_context_str), p); return NULL; } // parse the type after the name lp->ll_type = parse_type(&tp, &SCRIPT_ITEM(current_sctx.sc_sid)->sn_type_list, !quiet); if (lp->ll_type == NULL && !quiet) return NULL; lp->ll_name_end = tp; } } } if (lp->ll_name == NULL) return p; if (*p == '.') { imported_T *import = find_imported(lp->ll_name, p - lp->ll_name, TRUE); if (import != NULL) { ufunc_T *ufunc; type_T *type; lp->ll_sid = import->imp_sid; lp->ll_name = skipwhite(p + 1); p = find_name_end(lp->ll_name, NULL, NULL, fne_flags); lp->ll_name_end = p; // check the item is exported cc = *p; *p = NUL; if (find_exported(import->imp_sid, lp->ll_name, &ufunc, &type, NULL, NULL, TRUE) == -1) { *p = cc; return NULL; } *p = cc; } } // Without [idx] or .key we are done. if ((*p != '[' && *p != '.')) return p; if (vim9script && lval_root != NULL) { // using local variable lp->ll_tv = lval_root; v = NULL; } else { cc = *p; *p = NUL; // When we would write to the variable pass &ht and prevent autoload. writing = !(flags & GLV_READ_ONLY); v = find_var(lp->ll_name, writing ? &ht : NULL, (flags & GLV_NO_AUTOLOAD) || writing); if (v == NULL && !quiet) semsg(_(e_undefined_variable_str), lp->ll_name); *p = cc; if (v == NULL) return NULL; lp->ll_tv = &v->di_tv; } if (vim9script && (flags & GLV_NO_DECL) == 0) { if (!quiet) semsg(_(e_variable_already_declared), lp->ll_name); return NULL; } /* * Loop until no more [idx] or .key is following. */ var1.v_type = VAR_UNKNOWN; var2.v_type = VAR_UNKNOWN; while (*p == '[' || (*p == '.' && p[1] != '=' && p[1] != '.')) { if (*p == '.' && lp->ll_tv->v_type != VAR_DICT) { if (!quiet) semsg(_(e_dot_can_only_be_used_on_dictionary_str), name); return NULL; } if (lp->ll_tv->v_type != VAR_LIST && lp->ll_tv->v_type != VAR_DICT && lp->ll_tv->v_type != VAR_BLOB) { if (!quiet) emsg(_(e_can_only_index_list_dictionary_or_blob)); return NULL; } // a NULL list/blob works like an empty list/blob, allocate one now. if (lp->ll_tv->v_type == VAR_LIST && lp->ll_tv->vval.v_list == NULL) rettv_list_alloc(lp->ll_tv); else if (lp->ll_tv->v_type == VAR_BLOB && lp->ll_tv->vval.v_blob == NULL) rettv_blob_alloc(lp->ll_tv); if (lp->ll_range) { if (!quiet) emsg(_(e_slice_must_come_last)); return NULL; } if (vim9script && lp->ll_valtype == NULL && v != NULL && lp->ll_tv == &v->di_tv && ht != NULL && ht == get_script_local_ht()) { svar_T *sv = find_typval_in_script(lp->ll_tv, 0, TRUE); // Vim9 script local variable: get the type if (sv != NULL) lp->ll_valtype = sv->sv_type; } len = -1; if (*p == '.') { key = p + 1; for (len = 0; ASCII_ISALNUM(key[len]) || key[len] == '_'; ++len) ; if (len == 0) { if (!quiet) emsg(_(e_cannot_use_empty_key_for_dictionary)); return NULL; } p = key + len; } else { // Get the index [expr] or the first index [expr: ]. p = skipwhite(p + 1); if (*p == ':') empty1 = TRUE; else { empty1 = FALSE; if (eval1(&p, &var1, &EVALARG_EVALUATE) == FAIL) // recursive! return NULL; if (tv_get_string_chk(&var1) == NULL) { // not a number or string clear_tv(&var1); return NULL; } p = skipwhite(p); } // Optionally get the second index [ :expr]. if (*p == ':') { if (lp->ll_tv->v_type == VAR_DICT) { if (!quiet) emsg(_(e_cannot_slice_dictionary)); clear_tv(&var1); return NULL; } if (rettv != NULL && !(rettv->v_type == VAR_LIST && rettv->vval.v_list != NULL) && !(rettv->v_type == VAR_BLOB && rettv->vval.v_blob != NULL)) { if (!quiet) emsg(_(e_slice_requires_list_or_blob_value)); clear_tv(&var1); return NULL; } p = skipwhite(p + 1); if (*p == ']') lp->ll_empty2 = TRUE; else { lp->ll_empty2 = FALSE; // recursive! if (eval1(&p, &var2, &EVALARG_EVALUATE) == FAIL) { clear_tv(&var1); return NULL; } if (tv_get_string_chk(&var2) == NULL) { // not a number or string clear_tv(&var1); clear_tv(&var2); return NULL; } } lp->ll_range = TRUE; } else lp->ll_range = FALSE; if (*p != ']') { if (!quiet) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); clear_tv(&var2); return NULL; } // Skip to past ']'. ++p; } if (lp->ll_tv->v_type == VAR_DICT) { if (len == -1) { // "[key]": get key from "var1" key = tv_get_string_chk(&var1); // is number or string if (key == NULL) { clear_tv(&var1); return NULL; } } lp->ll_list = NULL; // a NULL dict is equivalent with an empty dict if (lp->ll_tv->vval.v_dict == NULL) { lp->ll_tv->vval.v_dict = dict_alloc(); if (lp->ll_tv->vval.v_dict == NULL) { clear_tv(&var1); return NULL; } ++lp->ll_tv->vval.v_dict->dv_refcount; } lp->ll_dict = lp->ll_tv->vval.v_dict; lp->ll_di = dict_find(lp->ll_dict, key, len); // When assigning to a scope dictionary check that a function and // variable name is valid (only variable name unless it is l: or // g: dictionary). Disallow overwriting a builtin function. if (rettv != NULL && lp->ll_dict->dv_scope != 0) { int prevval; int wrong; if (len != -1) { prevval = key[len]; key[len] = NUL; } else prevval = 0; // avoid compiler warning wrong = (lp->ll_dict->dv_scope == VAR_DEF_SCOPE && rettv->v_type == VAR_FUNC && var_wrong_func_name(key, lp->ll_di == NULL)) || !valid_varname(key, -1, TRUE); if (len != -1) key[len] = prevval; if (wrong) { clear_tv(&var1); return NULL; } } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; if (lp->ll_di == NULL) { // Can't add "v:" or "a:" variable. if (lp->ll_dict == get_vimvar_dict() || &lp->ll_dict->dv_hashtab == get_funccal_args_ht()) { semsg(_(e_illegal_variable_name_str), name); clear_tv(&var1); return NULL; } // Key does not exist in dict: may need to add it. if (*p == '[' || *p == '.' || unlet) { if (!quiet) semsg(_(e_key_not_present_in_dictionary), key); clear_tv(&var1); return NULL; } if (len == -1) lp->ll_newkey = vim_strsave(key); else lp->ll_newkey = vim_strnsave(key, len); clear_tv(&var1); if (lp->ll_newkey == NULL) p = NULL; break; } // existing variable, need to check if it can be changed else if ((flags & GLV_READ_ONLY) == 0 && (var_check_ro(lp->ll_di->di_flags, name, FALSE) || var_check_lock(lp->ll_di->di_flags, name, FALSE))) { clear_tv(&var1); return NULL; } clear_tv(&var1); lp->ll_tv = &lp->ll_di->di_tv; } else if (lp->ll_tv->v_type == VAR_BLOB) { long bloblen = blob_len(lp->ll_tv->vval.v_blob); /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); if (check_blob_index(bloblen, lp->ll_n1, quiet) == FAIL) { clear_tv(&var2); return NULL; } if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); clear_tv(&var2); if (check_blob_range(bloblen, lp->ll_n1, lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_blob = lp->ll_tv->vval.v_blob; lp->ll_tv = NULL; break; } else { /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); lp->ll_dict = NULL; lp->ll_list = lp->ll_tv->vval.v_list; lp->ll_li = check_range_index_one(lp->ll_list, &lp->ll_n1, (flags & GLV_ASSIGN_WITH_OP) == 0, quiet); if (lp->ll_li == NULL) { clear_tv(&var2); return NULL; } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; /* * May need to find the item or absolute index for the second * index of a range. * When no index given: "lp->ll_empty2" is TRUE. * Otherwise "lp->ll_n2" is set to the second index. */ if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); // is number or string clear_tv(&var2); if (check_range_index_two(lp->ll_list, &lp->ll_n1, lp->ll_li, &lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_tv = &lp->ll_li->li_tv; } } clear_tv(&var1); lp->ll_name_end = p; return p; } /* * Clear lval "lp" that was filled by get_lval(). */ void clear_lval(lval_T *lp) { vim_free(lp->ll_exp_name); vim_free(lp->ll_newkey); } /* * Set a variable that was parsed by get_lval() to "rettv". * "endp" points to just after the parsed name. * "op" is NULL, "+" for "+=", "-" for "-=", "*" for "*=", "/" for "/=", * "%" for "%=", "." for ".=" or "=" for "=". */ void set_var_lval( lval_T *lp, char_u *endp, typval_T *rettv, int copy, int flags, // ASSIGN_CONST, ASSIGN_NO_DECL char_u *op, int var_idx) // index for "let [a, b] = list" { int cc; dictitem_T *di; if (lp->ll_tv == NULL) { cc = *endp; *endp = NUL; if (in_vim9script() && check_reserved_name(lp->ll_name) == FAIL) return; if (lp->ll_blob != NULL) { int error = FALSE, val; if (op != NULL && *op != '=') { semsg(_(e_wrong_variable_type_for_str_equal), op); return; } if (value_check_lock(lp->ll_blob->bv_lock, lp->ll_name, FALSE)) return; if (lp->ll_range && rettv->v_type == VAR_BLOB) { if (lp->ll_empty2) lp->ll_n2 = blob_len(lp->ll_blob) - 1; if (blob_set_range(lp->ll_blob, lp->ll_n1, lp->ll_n2, rettv) == FAIL) return; } else { val = (int)tv_get_number_chk(rettv, &error); if (!error) blob_set_append(lp->ll_blob, lp->ll_n1, val); } } else if (op != NULL && *op != '=') { typval_T tv; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_modify_existing_variable)); *endp = cc; return; } // handle +=, -=, *=, /=, %= and .= di = NULL; if (eval_variable(lp->ll_name, (int)STRLEN(lp->ll_name), lp->ll_sid, &tv, &di, EVAL_VAR_VERBOSE) == OK) { if ((di == NULL || (!var_check_ro(di->di_flags, lp->ll_name, FALSE) && !tv_check_lock(&di->di_tv, lp->ll_name, FALSE))) && tv_op(&tv, rettv, op) == OK) set_var_const(lp->ll_name, lp->ll_sid, NULL, &tv, FALSE, ASSIGN_NO_DECL, 0); clear_tv(&tv); } } else { if (lp->ll_type != NULL && check_typval_arg_type(lp->ll_type, rettv, NULL, 0) == FAIL) return; set_var_const(lp->ll_name, lp->ll_sid, lp->ll_type, rettv, copy, flags, var_idx); } *endp = cc; } else if (value_check_lock(lp->ll_newkey == NULL ? lp->ll_tv->v_lock : lp->ll_tv->vval.v_dict->dv_lock, lp->ll_name, FALSE)) ; else if (lp->ll_range) { if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_range)); return; } (void)list_assign_range(lp->ll_list, rettv->vval.v_list, lp->ll_n1, lp->ll_n2, lp->ll_empty2, op, lp->ll_name); } else { /* * Assign to a List or Dictionary item. */ if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_list_or_dict)); return; } if (lp->ll_valtype != NULL && check_typval_arg_type(lp->ll_valtype, rettv, NULL, 0) == FAIL) return; if (lp->ll_newkey != NULL) { if (op != NULL && *op != '=') { semsg(_(e_key_not_present_in_dictionary), lp->ll_newkey); return; } if (dict_wrong_func_name(lp->ll_tv->vval.v_dict, rettv, lp->ll_newkey)) return; // Need to add an item to the Dictionary. di = dictitem_alloc(lp->ll_newkey); if (di == NULL) return; if (dict_add(lp->ll_tv->vval.v_dict, di) == FAIL) { vim_free(di); return; } lp->ll_tv = &di->di_tv; } else if (op != NULL && *op != '=') { tv_op(lp->ll_tv, rettv, op); return; } else clear_tv(lp->ll_tv); /* * Assign the value to the variable or list item. */ if (copy) copy_tv(rettv, lp->ll_tv); else { *lp->ll_tv = *rettv; lp->ll_tv->v_lock = 0; init_tv(rettv); } } } /* * Handle "tv1 += tv2", "tv1 -= tv2", "tv1 *= tv2", "tv1 /= tv2", "tv1 %= tv2" * and "tv1 .= tv2" * Returns OK or FAIL. */ int tv_op(typval_T *tv1, typval_T *tv2, char_u *op) { varnumber_T n; char_u numbuf[NUMBUFLEN]; char_u *s; int failed = FALSE; // Can't do anything with a Funcref or Dict on the right. // v:true and friends only work with "..=". if (tv2->v_type != VAR_FUNC && tv2->v_type != VAR_DICT && ((tv2->v_type != VAR_BOOL && tv2->v_type != VAR_SPECIAL) || *op == '.')) { switch (tv1->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_DICT: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; case VAR_BLOB: if (*op != '+' || tv2->v_type != VAR_BLOB) break; // BLOB += BLOB if (tv1->vval.v_blob != NULL && tv2->vval.v_blob != NULL) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; int i, len = blob_len(b2); for (i = 0; i < len; i++) ga_append(&b1->bv_ga, blob_get(b2, i)); } return OK; case VAR_LIST: if (*op != '+' || tv2->v_type != VAR_LIST) break; // List += List if (tv2->vval.v_list != NULL) { if (tv1->vval.v_list == NULL) { tv1->vval.v_list = tv2->vval.v_list; ++tv1->vval.v_list->lv_refcount; } else list_extend(tv1->vval.v_list, tv2->vval.v_list, NULL); } return OK; case VAR_NUMBER: case VAR_STRING: if (tv2->v_type == VAR_LIST) break; if (vim_strchr((char_u *)"+-*/%", *op) != NULL) { // nr += nr , nr -= nr , nr *=nr , nr /= nr , nr %= nr n = tv_get_number(tv1); #ifdef FEAT_FLOAT if (tv2->v_type == VAR_FLOAT) { float_T f = n; if (*op == '%') break; switch (*op) { case '+': f += tv2->vval.v_float; break; case '-': f -= tv2->vval.v_float; break; case '*': f *= tv2->vval.v_float; break; case '/': f /= tv2->vval.v_float; break; } clear_tv(tv1); tv1->v_type = VAR_FLOAT; tv1->vval.v_float = f; } else #endif { switch (*op) { case '+': n += tv_get_number(tv2); break; case '-': n -= tv_get_number(tv2); break; case '*': n *= tv_get_number(tv2); break; case '/': n = num_divide(n, tv_get_number(tv2), &failed); break; case '%': n = num_modulus(n, tv_get_number(tv2), &failed); break; } clear_tv(tv1); tv1->v_type = VAR_NUMBER; tv1->vval.v_number = n; } } else { if (tv2->v_type == VAR_FLOAT) break; // str .= str s = tv_get_string(tv1); s = concat_str(s, tv_get_string_buf(tv2, numbuf)); clear_tv(tv1); tv1->v_type = VAR_STRING; tv1->vval.v_string = s; } return failed ? FAIL : OK; case VAR_FLOAT: #ifdef FEAT_FLOAT { float_T f; if (*op == '%' || *op == '.' || (tv2->v_type != VAR_FLOAT && tv2->v_type != VAR_NUMBER && tv2->v_type != VAR_STRING)) break; if (tv2->v_type == VAR_FLOAT) f = tv2->vval.v_float; else f = tv_get_number(tv2); switch (*op) { case '+': tv1->vval.v_float += f; break; case '-': tv1->vval.v_float -= f; break; case '*': tv1->vval.v_float *= f; break; case '/': tv1->vval.v_float /= f; break; } } #endif return OK; } } semsg(_(e_wrong_variable_type_for_str_equal), op); return FAIL; } /* * Evaluate the expression used in a ":for var in expr" command. * "arg" points to "var". * Set "*errp" to TRUE for an error, FALSE otherwise; * Return a pointer that holds the info. Null when there is an error. */ void * eval_for_line( char_u *arg, int *errp, exarg_T *eap, evalarg_T *evalarg) { forinfo_T *fi; char_u *var_list_end; char_u *expr; typval_T tv; list_T *l; int skip = !(evalarg->eval_flags & EVAL_EVALUATE); *errp = TRUE; // default: there is an error fi = ALLOC_CLEAR_ONE(forinfo_T); if (fi == NULL) return NULL; var_list_end = skip_var_list(arg, TRUE, &fi->fi_varcount, &fi->fi_semicolon, FALSE); if (var_list_end == NULL) return fi; expr = skipwhite_and_linebreak(var_list_end, evalarg); if (expr[0] != 'i' || expr[1] != 'n' || !(expr[2] == NUL || VIM_ISWHITE(expr[2]))) { if (in_vim9script() && *expr == ':' && expr != var_list_end) semsg(_(e_no_white_space_allowed_before_colon_str), expr); else emsg(_(e_missing_in_after_for)); return fi; } if (skip) ++emsg_skip; expr = skipwhite_and_linebreak(expr + 2, evalarg); if (eval0(expr, &tv, eap, evalarg) == OK) { *errp = FALSE; if (!skip) { if (tv.v_type == VAR_LIST) { l = tv.vval.v_list; if (l == NULL) { // a null list is like an empty list: do nothing clear_tv(&tv); } else { // Need a real list here. CHECK_LIST_MATERIALIZE(l); // No need to increment the refcount, it's already set for // the list being used in "tv". fi->fi_list = l; list_add_watch(l, &fi->fi_lw); fi->fi_lw.lw_item = l->lv_first; } } else if (tv.v_type == VAR_BLOB) { fi->fi_bi = 0; if (tv.vval.v_blob != NULL) { typval_T btv; // Make a copy, so that the iteration still works when the // blob is changed. blob_copy(tv.vval.v_blob, &btv); fi->fi_blob = btv.vval.v_blob; } clear_tv(&tv); } else if (tv.v_type == VAR_STRING) { fi->fi_byte_idx = 0; fi->fi_string = tv.vval.v_string; tv.vval.v_string = NULL; if (fi->fi_string == NULL) fi->fi_string = vim_strsave((char_u *)""); } else { emsg(_(e_string_list_or_blob_required)); clear_tv(&tv); } } } if (skip) --emsg_skip; fi->fi_break_count = evalarg->eval_break_count; return fi; } /* * Used when looping over a :for line, skip the "in expr" part. */ void skip_for_lines(void *fi_void, evalarg_T *evalarg) { forinfo_T *fi = (forinfo_T *)fi_void; int i; for (i = 0; i < fi->fi_break_count; ++i) eval_next_line(NULL, evalarg); } /* * Use the first item in a ":for" list. Advance to the next. * Assign the values to the variable (list). "arg" points to the first one. * Return TRUE when a valid item was found, FALSE when at end of list or * something wrong. */ int next_for_item(void *fi_void, char_u *arg) { forinfo_T *fi = (forinfo_T *)fi_void; int result; int flag = ASSIGN_FOR_LOOP | (in_vim9script() ? (ASSIGN_FINAL // first round: error if variable exists | (fi->fi_bi == 0 ? 0 : ASSIGN_DECL) | ASSIGN_NO_MEMBER_TYPE) : 0); listitem_T *item; int skip_assign = in_vim9script() && arg[0] == '_' && !eval_isnamec(arg[1]); if (fi->fi_blob != NULL) { typval_T tv; if (fi->fi_bi >= blob_len(fi->fi_blob)) return FALSE; tv.v_type = VAR_NUMBER; tv.v_lock = VAR_FIXED; tv.vval.v_number = blob_get(fi->fi_blob, fi->fi_bi); ++fi->fi_bi; if (skip_assign) return TRUE; return ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; } if (fi->fi_string != NULL) { typval_T tv; int len; len = mb_ptr2len(fi->fi_string + fi->fi_byte_idx); if (len == 0) return FALSE; tv.v_type = VAR_STRING; tv.v_lock = VAR_FIXED; tv.vval.v_string = vim_strnsave(fi->fi_string + fi->fi_byte_idx, len); fi->fi_byte_idx += len; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; vim_free(tv.vval.v_string); return result; } item = fi->fi_lw.lw_item; if (item == NULL) result = FALSE; else { fi->fi_lw.lw_item = item->li_next; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = (ex_let_vars(arg, &item->li_tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK); } return result; } /* * Free the structure used to store info used by ":for". */ void free_for_info(void *fi_void) { forinfo_T *fi = (forinfo_T *)fi_void; if (fi == NULL) return; if (fi->fi_list != NULL) { list_rem_watch(fi->fi_list, &fi->fi_lw); list_unref(fi->fi_list); } else if (fi->fi_blob != NULL) blob_unref(fi->fi_blob); else vim_free(fi->fi_string); vim_free(fi); } void set_context_for_expression( expand_T *xp, char_u *arg, cmdidx_T cmdidx) { int has_expr = cmdidx != CMD_let && cmdidx != CMD_var; int c; char_u *p; if (cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_const || cmdidx == CMD_final) { xp->xp_context = EXPAND_USER_VARS; if (vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#") == NULL) { // ":let var1 var2 ...": find last space. for (p = arg + STRLEN(arg); p >= arg; ) { xp->xp_pattern = p; MB_PTR_BACK(arg, p); if (VIM_ISWHITE(*p)) break; } return; } } else xp->xp_context = cmdidx == CMD_call ? EXPAND_FUNCTIONS : EXPAND_EXPRESSION; while ((xp->xp_pattern = vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#")) != NULL) { c = *xp->xp_pattern; if (c == '&') { c = xp->xp_pattern[1]; if (c == '&') { ++xp->xp_pattern; xp->xp_context = has_expr ? EXPAND_EXPRESSION : EXPAND_NOTHING; } else if (c != ' ') { xp->xp_context = EXPAND_SETTINGS; if ((c == 'l' || c == 'g') && xp->xp_pattern[2] == ':') xp->xp_pattern += 2; } } else if (c == '$') { // environment variable xp->xp_context = EXPAND_ENV_VARS; } else if (c == '=') { has_expr = TRUE; xp->xp_context = EXPAND_EXPRESSION; } else if (c == '#' && xp->xp_context == EXPAND_EXPRESSION) { // Autoload function/variable contains '#'. break; } else if ((c == '<' || c == '#') && xp->xp_context == EXPAND_FUNCTIONS && vim_strchr(xp->xp_pattern, '(') == NULL) { // Function name can start with "<SNR>" and contain '#'. break; } else if (has_expr) { if (c == '"') // string { while ((c = *++xp->xp_pattern) != NUL && c != '"') if (c == '\\' && xp->xp_pattern[1] != NUL) ++xp->xp_pattern; xp->xp_context = EXPAND_NOTHING; } else if (c == '\'') // literal string { // Trick: '' is like stopping and starting a literal string. while ((c = *++xp->xp_pattern) != NUL && c != '\'') /* skip */ ; xp->xp_context = EXPAND_NOTHING; } else if (c == '|') { if (xp->xp_pattern[1] == '|') { ++xp->xp_pattern; xp->xp_context = EXPAND_EXPRESSION; } else xp->xp_context = EXPAND_COMMANDS; } else xp->xp_context = EXPAND_EXPRESSION; } else // Doesn't look like something valid, expand as an expression // anyway. xp->xp_context = EXPAND_EXPRESSION; arg = xp->xp_pattern; if (*arg != NUL) while ((c = *++arg) != NUL && (c == ' ' || c == '\t')) /* skip */ ; } // ":exe one two" completes "two" if ((cmdidx == CMD_execute || cmdidx == CMD_echo || cmdidx == CMD_echon || cmdidx == CMD_echomsg) && xp->xp_context == EXPAND_EXPRESSION) { for (;;) { char_u *n = skiptowhite(arg); if (n == arg || IS_WHITE_OR_NUL(*skipwhite(n))) break; arg = skipwhite(n); } } xp->xp_pattern = arg; } /* * Return TRUE if "pat" matches "text". * Does not use 'cpo' and always uses 'magic'. */ int pattern_match(char_u *pat, char_u *text, int ic) { int matches = FALSE; char_u *save_cpo; regmatch_T regmatch; // avoid 'l' flag in 'cpoptions' save_cpo = p_cpo; p_cpo = empty_option; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { regmatch.rm_ic = ic; matches = vim_regexec_nl(&regmatch, text, (colnr_T)0); vim_regfree(regmatch.regprog); } p_cpo = save_cpo; return matches; } /* * Handle a name followed by "(". Both for just "name(arg)" and for * "expr->name(arg)". * Returns OK or FAIL. */ static int eval_func( char_u **arg, // points to "(", will be advanced evalarg_T *evalarg, char_u *name, int name_len, typval_T *rettv, int flags, typval_T *basetv) // "expr" for "expr->name(arg)" { int evaluate = flags & EVAL_EVALUATE; char_u *s = name; int len = name_len; partial_T *partial; int ret = OK; type_T *type = NULL; int found_var = FALSE; if (!evaluate) check_vars(s, len); // If "s" is the name of a variable of type VAR_FUNC // use its contents. s = deref_func_name(s, &len, &partial, in_vim9script() ? &type : NULL, !evaluate, FALSE, &found_var); // Need to make a copy, in case evaluating the arguments makes // the name invalid. s = vim_strsave(s); if (s == NULL || (evaluate && (*s == NUL || (flags & EVAL_CONSTANT)))) ret = FAIL; else { funcexe_T funcexe; // Invoke the function. CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = partial; funcexe.fe_basetv = basetv; funcexe.fe_check_type = type; funcexe.fe_found_var = found_var; ret = get_func_tv(s, len, rettv, arg, evalarg, &funcexe); } vim_free(s); // If evaluate is FALSE rettv->v_type was not set in // get_func_tv, but it's needed in handle_subscript() to parse // what follows. So set it here. if (rettv->v_type == VAR_UNKNOWN && !evaluate && **arg == '(') { rettv->vval.v_string = NULL; rettv->v_type = VAR_FUNC; } // Stop the expression evaluation when immediately // aborting on error, or when an interrupt occurred or // an exception was thrown but not caught. if (evaluate && aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } return ret; } /* * After a NL, skip over empty lines and comment-only lines. */ static char_u * newline_skip_comments(char_u *arg) { char_u *p = arg + 1; for (;;) { p = skipwhite(p); if (*p == NUL) break; if (vim9_comment_start(p)) { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = nl; } if (*p != NL) break; ++p; // skip another NL } return p; } /* * Get the next line source line without advancing. But do skip over comment * lines. * Only called for Vim9 script. */ static char_u * getline_peek_skip_comments(evalarg_T *evalarg) { for (;;) { char_u *next = getline_peek(evalarg->eval_getline, evalarg->eval_cookie); char_u *p; if (next == NULL) break; p = skipwhite(next); if (*p != NUL && !vim9_comment_start(p)) return next; if (eval_next_line(NULL, evalarg) == NULL) break; } return NULL; } /* * If inside Vim9 script, "arg" points to the end of a line (ignoring a # * comment) and there is a next line, return the next line (skipping blanks) * and set "getnext". * Otherwise return the next non-white at or after "arg" and set "getnext" to * FALSE. * "arg" must point somewhere inside a line, not at the start. */ char_u * eval_next_non_blank(char_u *arg, evalarg_T *evalarg, int *getnext) { char_u *p = skipwhite(arg); *getnext = FALSE; if (in_vim9script() && evalarg != NULL && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL || *p == NL) && (*p == NUL || *p == NL || (vim9_comment_start(p) && VIM_ISWHITE(p[-1])))) { char_u *next; if (*p == NL) next = newline_skip_comments(p); else if (evalarg->eval_cookie != NULL) next = getline_peek_skip_comments(evalarg); else next = peek_next_line_from_context(evalarg->eval_cctx); if (next != NULL) { *getnext = TRUE; return skipwhite(next); } } return p; } /* * To be called after eval_next_non_blank() sets "getnext" to TRUE. * Only called for Vim9 script. */ char_u * eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { vim_free(evalarg->eval_tofree); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); } /* * Call eval_next_non_blank() and get the next line if needed. */ char_u * skipwhite_and_linebreak(char_u *arg, evalarg_T *evalarg) { int getnext; char_u *p = skipwhite_and_nl(arg); if (evalarg == NULL) return skipwhite(arg); eval_next_non_blank(p, evalarg, &getnext); if (getnext) return eval_next_line(arg, evalarg); return p; } /* * Initialize "evalarg" for use. */ void init_evalarg(evalarg_T *evalarg) { CLEAR_POINTER(evalarg); ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20); } /* * After using "evalarg" filled from "eap": free the memory. */ void clear_evalarg(evalarg_T *evalarg, exarg_T *eap) { if (evalarg != NULL) { if (evalarg->eval_tofree != NULL) { if (eap != NULL) { // We may need to keep the original command line, e.g. for // ":let" it has the variable names. But we may also need the // new one, "nextcmd" points into it. Keep both. vim_free(eap->cmdline_tofree); eap->cmdline_tofree = *eap->cmdlinep; *eap->cmdlinep = evalarg->eval_tofree; } else vim_free(evalarg->eval_tofree); evalarg->eval_tofree = NULL; } ga_clear_strings(&evalarg->eval_tofree_ga); VIM_CLEAR(evalarg->eval_tofree_lambda); } } /* * The "evaluate" argument: When FALSE, the argument is only parsed but not * executed. The function may return OK, but the rettv will be of type * VAR_UNKNOWN. The function still returns FAIL for a syntax error. */ /* * Handle zero level expression. * This calls eval1() and handles error message and nextcmd. * Put the result in "rettv" when returning OK and "evaluate" is TRUE. * Note: "rettv.v_lock" is not set. * "evalarg" can be NULL, EVALARG_EVALUATE or a pointer. * Return OK or FAIL. */ int eval0( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg) { return eval0_retarg(arg, rettv, eap, evalarg, NULL); } /* * Like eval0() but when "retarg" is not NULL store the pointer to after the * expression and don't check what comes after the expression. */ int eval0_retarg( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg, char_u **retarg) { int ret; char_u *p; char_u *expr_end; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; int flags = evalarg == NULL ? 0 : evalarg->eval_flags; int check_for_end = retarg == NULL; int end_error = FALSE; p = skipwhite(arg); ret = eval1(&p, rettv, evalarg); if (ret != FAIL) { expr_end = p; p = skipwhite(p); // In Vim9 script a command block is not split at NL characters for // commands using an expression argument. Skip over a '#' comment to // check for a following NL. Require white space before the '#'. if (in_vim9script() && p > expr_end && retarg == NULL) while (*p == '#') { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = skipwhite(nl + 1); if (eap != NULL && *p != NUL) eap->nextcmd = p; check_for_end = FALSE; } if (check_for_end) end_error = !ends_excmd2(arg, p); } if (ret == FAIL || end_error) { if (ret != FAIL) clear_tv(rettv); /* * Report the invalid expression unless the expression evaluation has * been cancelled due to an aborting error, an interrupt, or an * exception, or we already gave a more specific error. * Also check called_emsg for when using assert_fails(). */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before && (flags & EVAL_CONSTANT) == 0 && (!in_vim9script() || !vim9_bad_comment(p))) { if (end_error) semsg(_(e_trailing_characters_str), p); else semsg(_(e_invalid_expression_str), arg); } // Some of the expression may not have been consumed. Do not check for // a next command to avoid more errors, unless "|" is following, which // could only be a command separator. if (eap != NULL && p != NULL && skipwhite(p)[0] == '|' && skipwhite(p)[1] != '|') eap->nextcmd = check_nextcmd(p); return FAIL; } if (retarg != NULL) *retarg = p; else if (check_for_end && eap != NULL) set_nextcmd(eap, p); return ret; } /* * Handle top level expression: * expr2 ? expr1 : expr1 * expr2 ?? expr1 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Note: "rettv.v_lock" is not set. * * Return OK or FAIL. */ int eval1(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; CLEAR_POINTER(rettv); /* * Get the first variable. */ if (eval2(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); if (*p == '?') { int op_falsy = p[1] == '?'; int result; typval_T var2; evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = evalarg_used->eval_flags & EVAL_EVALUATE; if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = p; } result = FALSE; if (evaluate) { int error = FALSE; if (op_falsy) result = tv2bool(rettv); else if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; if (error || !op_falsy || !result) clear_tv(rettv); if (error) return FAIL; } /* * Get the second variable. Recursive! */ if (op_falsy) ++*arg; if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg - (op_falsy ? 1 : 0), op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = (op_falsy ? !result : result) ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { evalarg_used->eval_flags = orig_flags; return FAIL; } if (!op_falsy || !result) *rettv = var2; if (!op_falsy) { /* * Check for the ":". */ p = eval_next_non_blank(*arg, evalarg_used, &getnext); if (*p != ':') { emsg(_(e_missing_colon_after_questionmark)); if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = p; } /* * Get the third variable. Recursive! */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (evaluate && !result) *rettv = var2; } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle first level expression: * expr2 || expr2 || expr2 logical OR * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval3(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "||" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '|' && p[1] == '|') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int evaluate; int orig_flags; long result = FALSE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "||". */ while (p[0] == '|' && p[1] == '|') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval3(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && !result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) != 0) result = TRUE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle second level expression: * expr3 && expr3 && expr3 logical AND * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval4(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "&&" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '&' && p[1] == '&') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; long result = TRUE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) == 0) result = FALSE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "&&". */ while (p[0] == '&' && p[1] == '&') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = result ? orig_flags : orig_flags & ~EVAL_EVALUATE; CLEAR_FIELD(var2); if (eval4(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) == 0) result = FALSE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle third level expression: * var1 == var2 * var1 =~ var2 * var1 != var2 * var1 !~ var2 * var1 > var2 * var1 >= var2 * var1 < var2 * var1 <= var2 * var1 is var2 * var1 isnot var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; exprtype_T type = EXPR_UNKNOWN; int len = 2; int type_is = FALSE; /* * Get the first expression. */ if (eval5(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); type = get_compare_type(p, &len, &type_is); /* * If there is a comparative operator, use it. */ if (type != EXPR_UNKNOWN) { typval_T var2; int ic; int vim9script = in_vim9script(); int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); long comp_lnum = SOURCING_LNUM; if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, len); clear_tv(rettv); return FAIL; } if (vim9script && type_is && (p[len] == '?' || p[len] == '#')) { semsg(_(e_invalid_expression_str), p); clear_tv(rettv); return FAIL; } // extra question mark appended: ignore case if (p[len] == '?') { ic = TRUE; ++len; } // extra '#' appended: match case else if (p[len] == '#') { ic = FALSE; ++len; } // nothing appended: use 'ignorecase' if not in Vim script else ic = vim9script ? FALSE : p_ic; /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[len])) { error_white_both(p, len); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + len, evalarg); if (eval5(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { int ret; // use the line of the comparison for messages SOURCING_LNUM = comp_lnum; if (vim9script && check_compare_types(type, rettv, &var2) == FAIL) { ret = FAIL; clear_tv(rettv); } else ret = typval_compare(rettv, &var2, type, ic); clear_tv(&var2); return ret; } } return OK; } /* * Make a copy of blob "tv1" and append blob "tv2". */ void eval_addblob(typval_T *tv1, typval_T *tv2) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; blob_T *b = blob_alloc(); int i; if (b != NULL) { for (i = 0; i < blob_len(b1); i++) ga_append(&b->bv_ga, blob_get(b1, i)); for (i = 0; i < blob_len(b2); i++) ga_append(&b->bv_ga, blob_get(b2, i)); clear_tv(tv1); rettv_blob_set(tv1, b); } } /* * Make a copy of list "tv1" and append list "tv2". */ int eval_addlist(typval_T *tv1, typval_T *tv2) { typval_T var3; // concatenate Lists if (list_concat(tv1->vval.v_list, tv2->vval.v_list, &var3) == FAIL) { clear_tv(tv1); clear_tv(tv2); return FAIL; } clear_tv(tv1); *tv1 = var3; return OK; } /* * Handle the bitwise left/right shift operator expression: * var1 << var2 * var1 >> var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval6(arg, rettv, evalarg) == FAIL) return FAIL; /* * Repeat computing, until no '<<' or '>>' is following. */ for (;;) { char_u *p; int getnext; exprtype_T type; int evaluate; typval_T var2; int vim9script; p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '<' && p[1] == '<') type = EXPR_LSHIFT; else if (p[0] == '>' && p[1] == '>') type = EXPR_RSHIFT; else return OK; // Handle a bitwise left or right shift operator if (rettv->v_type != VAR_NUMBER) { // left operand should be a number emsg(_(e_bitshift_ops_must_be_number)); clear_tv(rettv); return FAIL; } evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); vim9script = in_vim9script(); if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[2])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + 2, evalarg); if (eval6(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (var2.v_type != VAR_NUMBER || var2.vval.v_number < 0) { // right operand should be a positive number if (var2.v_type != VAR_NUMBER) emsg(_(e_bitshift_ops_must_be_number)); else emsg(_(e_bitshift_ops_must_be_postive)); clear_tv(rettv); clear_tv(&var2); return FAIL; } if (evaluate) { if (var2.vval.v_number > MAX_LSHIFT_BITS) // shifting more bits than we have always results in zero rettv->vval.v_number = 0; else if (type == EXPR_LSHIFT) rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number << var2.vval.v_number; else rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number >> var2.vval.v_number; } clear_tv(&var2); } return OK; } /* * Handle fifth level expression: * + number addition, concatenation of list or blob * - number subtraction * . string concatenation (if script version is 1) * .. string concatenation * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval7(arg, rettv, evalarg, FALSE) == FAIL) return FAIL; /* * Repeat computing, until no '+', '-' or '.' is following. */ for (;;) { int evaluate; int getnext; char_u *p; int op; int oplen; int concat; typval_T var2; int vim9script = in_vim9script(); // "." is only string concatenation when scriptversion is 1 // "+=", "-=" and "..=" are assignments // "++" and "--" on the next line are a separate command. p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; concat = op == '.' && (*(p + 1) == '.' || in_old_script(2)); if ((op != '+' && op != '-' && !concat) || p[1] == '=' || (p[1] == '.' && p[2] == '=')) break; if (getnext && (op == '+' || op == '-') && p[0] == p[1]) break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); oplen = (concat && p[1] == '.') ? 2 : 1; if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = p; } if ((op != '+' || (rettv->v_type != VAR_LIST && rettv->v_type != VAR_BLOB)) #ifdef FEAT_FLOAT && (op == '.' || rettv->v_type != VAR_FLOAT) #endif && evaluate) { int error = FALSE; // For "list + ...", an illegal use of the first operand as // a number cannot be determined before evaluating the 2nd // operand: if this is also a list, all is ok. // For "something . ...", "something - ..." or "non-list + ...", // we know that the first operand needs to be a string or number // without evaluating the 2nd operand. So check before to avoid // side effects after an error. if (op != '.') tv_get_number_chk(rettv, &error); if ((op == '.' && tv_get_string_chk(rettv) == NULL) || error) { clear_tv(rettv); return FAIL; } } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[oplen])) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + oplen, evalarg); if (eval7(arg, &var2, evalarg, !vim9script && op == '.') == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { /* * Compute the result. */ if (op == '.') { char_u buf1[NUMBUFLEN], buf2[NUMBUFLEN]; char_u *s1 = tv_get_string_buf(rettv, buf1); char_u *s2 = NULL; if (vim9script && (var2.v_type == VAR_VOID || var2.v_type == VAR_CHANNEL || var2.v_type == VAR_JOB)) semsg(_(e_using_invalid_value_as_string_str), vartype_name(var2.v_type)); #ifdef FEAT_FLOAT else if (vim9script && var2.v_type == VAR_FLOAT) { vim_snprintf((char *)buf2, NUMBUFLEN, "%g", var2.vval.v_float); s2 = buf2; } #endif else s2 = tv_get_string_buf_chk(&var2, buf2); if (s2 == NULL) // type error ? { clear_tv(rettv); clear_tv(&var2); return FAIL; } p = concat_str(s1, s2); clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = p; } else if (op == '+' && rettv->v_type == VAR_BLOB && var2.v_type == VAR_BLOB) eval_addblob(rettv, &var2); else if (op == '+' && rettv->v_type == VAR_LIST && var2.v_type == VAR_LIST) { if (eval_addlist(rettv, &var2) == FAIL) return FAIL; } else { int error = FALSE; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1 = 0, f2 = 0; if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; n1 = 0; } else #endif { n1 = tv_get_number_chk(rettv, &error); if (error) { // This can only happen for "list + non-list" or // "blob + non-blob". For "non-list + ..." or // "something - ...", we returned before evaluating the // 2nd operand. clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) f1 = n1; #endif } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); if (error) { clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f2 = n2; #endif } clear_tv(rettv); #ifdef FEAT_FLOAT // If there is a float on either side the result is a float. if (rettv->v_type == VAR_FLOAT || var2.v_type == VAR_FLOAT) { if (op == '+') f1 = f1 + f2; else f1 = f1 - f2; rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { if (op == '+') n1 = n1 + n2; else n1 = n1 - n2; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } clear_tv(&var2); } } return OK; } /* * Handle sixth level expression: * * number multiplication * / number division * % number modulo * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval7( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { #ifdef FEAT_FLOAT int use_float = FALSE; #endif /* * Get the first expression. */ if (eval8(arg, rettv, evalarg, want_string) == FAIL) return FAIL; /* * Repeat computing, until no '*', '/' or '%' is following. */ for (;;) { int evaluate; int getnext; typval_T var2; char_u *p; int op; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1, f2; #endif int error; // "*=", "/=" and "%=" are assignments p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; if ((op != '*' && op != '/' && op != '%') || p[1] == '=') break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && in_vim9script() && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = p; } #ifdef FEAT_FLOAT f1 = 0; f2 = 0; #endif error = FALSE; if (evaluate) { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; use_float = TRUE; n1 = 0; } else #endif n1 = tv_get_number_chk(rettv, &error); clear_tv(rettv); if (error) return FAIL; } else n1 = 0; /* * Get the second variable. */ if (evaluate && in_vim9script() && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (eval8(arg, &var2, evalarg, FALSE) == FAIL) return FAIL; if (evaluate) { #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { if (!use_float) { f1 = n1; use_float = TRUE; } f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); clear_tv(&var2); if (error) return FAIL; #ifdef FEAT_FLOAT if (use_float) f2 = n2; #endif } /* * Compute the result. * When either side is a float the result is a float. */ #ifdef FEAT_FLOAT if (use_float) { if (op == '*') f1 = f1 * f2; else if (op == '/') { # ifdef VMS // VMS crashes on divide by zero, work around it if (f2 == 0.0) { if (f1 == 0) f1 = -1 * __F_FLT_MAX - 1L; // similar to NaN else if (f1 < 0) f1 = -1 * __F_FLT_MAX; else f1 = __F_FLT_MAX; } else f1 = f1 / f2; # else // We rely on the floating point library to handle divide // by zero to result in "inf" and not a crash. f1 = f1 / f2; # endif } else { emsg(_(e_cannot_use_percent_with_float)); return FAIL; } rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { int failed = FALSE; if (op == '*') n1 = n1 * n2; else if (op == '/') n1 = num_divide(n1, n2, &failed); else n1 = num_modulus(n1, n2, &failed); if (failed) return FAIL; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } } return OK; } /* * Handle a type cast before a base level expression. * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * Return OK or FAIL. */ static int eval8( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { type_T *want_type = NULL; garray_T type_list; // list of pointers to allocated types int res; int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); // Recognize <type> in Vim9 script only. if (in_vim9script() && **arg == '<' && eval_isnamec1((*arg)[1]) && STRNCMP(*arg, "<SNR>", 5) != 0) { ++*arg; ga_init2(&type_list, sizeof(type_T *), 10); want_type = parse_type(arg, &type_list, TRUE); if (want_type == NULL && (evaluate || **arg != '>')) { clear_type_list(&type_list); return FAIL; } if (**arg != '>') { if (*skipwhite(*arg) == '>') semsg(_(e_no_white_space_allowed_before_str_str), ">", *arg); else emsg(_(e_missing_gt)); clear_type_list(&type_list); return FAIL; } ++*arg; *arg = skipwhite_and_linebreak(*arg, evalarg); } res = eval9(arg, rettv, evalarg, want_string); if (want_type != NULL && evaluate) { if (res == OK) { type_T *actual = typval2type(rettv, get_copyID(), &type_list, TVTT_DO_MEMBER); if (!equal_type(want_type, actual, 0)) { if (want_type == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { int n = tv2bool(rettv); // can use "0" and "1" for boolean in some places clear_tv(rettv); rettv->v_type = VAR_BOOL; rettv->vval.v_number = n ? VVAL_TRUE : VVAL_FALSE; } else { where_T where = WHERE_INIT; where.wt_variable = TRUE; res = check_type(want_type, actual, TRUE, where); } } } clear_type_list(&type_list); } return res; } int eval_leader(char_u **arg, int vim9) { char_u *s = *arg; char_u *p = *arg; while (*p == '!' || *p == '-' || *p == '+') { char_u *n = skipwhite(p + 1); // ++, --, -+ and +- are not accepted in Vim9 script if (vim9 && (*p == '-' || *p == '+') && (*n == '-' || *n == '+')) { semsg(_(e_invalid_expression_str), s); return FAIL; } p = n; } *arg = p; return OK; } /* * Check for a predefined value "true", "false" and "null.*". * Return OK when recognized. */ int handle_predefined(char_u *s, int len, typval_T *rettv) { switch (len) { case 4: if (STRNCMP(s, "true", 4) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_TRUE; return OK; } if (STRNCMP(s, "null", 4) == 0) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; return OK; } break; case 5: if (STRNCMP(s, "false", 5) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_FALSE; return OK; } break; case 8: if (STRNCMP(s, "null_job", 8) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_JOB; rettv->vval.v_job = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } break; case 9: if (STRNCMP(s, "null_", 5) != 0) break; if (STRNCMP(s + 5, "list", 4) == 0) { rettv->v_type = VAR_LIST; rettv->vval.v_list = NULL; return OK; } if (STRNCMP(s + 5, "dict", 4) == 0) { rettv->v_type = VAR_DICT; rettv->vval.v_dict = NULL; return OK; } if (STRNCMP(s + 5, "blob", 4) == 0) { rettv->v_type = VAR_BLOB; rettv->vval.v_blob = NULL; return OK; } break; case 11: if (STRNCMP(s, "null_string", 11) == 0) { rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; return OK; } break; case 12: if (STRNCMP(s, "null_channel", 12) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_CHANNEL; rettv->vval.v_channel = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } if (STRNCMP(s, "null_partial", 12) == 0) { rettv->v_type = VAR_PARTIAL; rettv->vval.v_partial = NULL; return OK; } break; case 13: if (STRNCMP(s, "null_function", 13) == 0) { rettv->v_type = VAR_FUNC; rettv->vval.v_string = NULL; return OK; } break; } return FAIL; } /* * Handle sixth level expression: * number number constant * 0zFFFFFFFF Blob constant * "string" string constant * 'string' literal string constant * &option-name option value * @r register contents * identifier variable value * function() function call * $VAR environment variable * (expression) nested expression * [expr, expr] List * {arg, arg -> expr} Lambda * {key: val, key: val} Dictionary * #{key: val, key: val} Dictionary with literal keys * * Also handle: * ! in front logical NOT * - in front unary minus * + in front unary plus (ignored) * trailing [] subscript in String or List * trailing .name entry in Dictionary * trailing ->name() method call * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval9( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int len; char_u *s; char_u *name_start = NULL; char_u *start_leader, *end_leader; int ret = OK; char_u *alias; static int recurse = 0; int vim9script = in_vim9script(); /* * Initialise variable so that clear_tv() can't mistake this for a * string and free a string that isn't there. */ rettv->v_type = VAR_UNKNOWN; /* * Skip '!', '-' and '+' characters. They are handled later. */ start_leader = *arg; if (eval_leader(arg, vim9script) == FAIL) return FAIL; end_leader = *arg; if (**arg == '.' && (!isdigit(*(*arg + 1)) #ifdef FEAT_FLOAT || in_old_script(2) #endif )) { semsg(_(e_invalid_expression_str), *arg); ++*arg; return FAIL; } // Limit recursion to 1000 levels. At least at 10000 we run out of stack // and crash. With MSVC the stack is smaller. if (recurse == #ifdef _MSC_VER 300 #else 1000 #endif ) { semsg(_(e_expression_too_recursive_str), *arg); return FAIL; } ++recurse; switch (**arg) { /* * Number constant. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': ret = eval_number(arg, rettv, evaluate, want_string); // Apply prefixed "-" and "+" now. Matters especially when // "->" follows. if (ret == OK && evaluate && end_leader > start_leader && rettv->v_type != VAR_BLOB) ret = eval9_leader(rettv, TRUE, start_leader, &end_leader); break; /* * String constant: "string". */ case '"': ret = eval_string(arg, rettv, evaluate, FALSE); break; /* * Literal string constant: 'str''ing'. */ case '\'': ret = eval_lit_string(arg, rettv, evaluate, FALSE); break; /* * List: [expr, expr] */ case '[': ret = eval_list(arg, rettv, evalarg, TRUE); break; /* * Dictionary: #{key: val, key: val} */ case '#': if (vim9script) { ret = vim9_bad_comment(*arg) ? FAIL : NOTDONE; } else if ((*arg)[1] == '{') { ++*arg; ret = eval_dict(arg, rettv, evalarg, TRUE); } else ret = NOTDONE; break; /* * Lambda: {arg, arg -> expr} * Dictionary: {'key': val, 'key': val} */ case '{': if (vim9script) ret = NOTDONE; else ret = get_lambda_tv(arg, rettv, vim9script, evalarg); if (ret == NOTDONE) ret = eval_dict(arg, rettv, evalarg, FALSE); break; /* * Option value: &name */ case '&': ret = eval_option(arg, rettv, evaluate); break; /* * Environment variable: $VAR. * Interpolated string: $"string" or $'string'. */ case '$': if ((*arg)[1] == '"' || (*arg)[1] == '\'') ret = eval_interp_string(arg, rettv, evaluate); else ret = eval_env_var(arg, rettv, evaluate); break; /* * Register contents: @r. */ case '@': ++*arg; if (evaluate) { if (vim9script && IS_WHITE_OR_NUL(**arg)) semsg(_(e_syntax_error_at_str), *arg); else if (vim9script && !valid_yank_reg(**arg, FALSE)) emsg_invreg(**arg); else { rettv->v_type = VAR_STRING; rettv->vval.v_string = get_reg_contents(**arg, GREG_EXPR_SRC); } } if (**arg != NUL) ++*arg; break; /* * nested expression: (expression). * or lambda: (arg) => expr */ case '(': ret = NOTDONE; if (vim9script) { ret = get_lambda_tv(arg, rettv, TRUE, evalarg); if (ret == OK && evaluate) { ufunc_T *ufunc = rettv->vval.v_partial->pt_func; // Compile it here to get the return type. The return // type is optional, when it's missing use t_unknown. // This is recognized in compile_return(). if (ufunc->uf_ret_type->tt_type == VAR_VOID) ufunc->uf_ret_type = &t_unknown; if (compile_def_function(ufunc, FALSE, get_compile_type(ufunc), NULL) == FAIL) { clear_tv(rettv); ret = FAIL; } } } if (ret == NOTDONE) { *arg = skipwhite_and_linebreak(*arg + 1, evalarg); ret = eval1(arg, rettv, evalarg); // recursive! *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ')') ++*arg; else if (ret == OK) { emsg(_(e_missing_closing_paren)); clear_tv(rettv); ret = FAIL; } } break; default: ret = NOTDONE; break; } if (ret == NOTDONE) { /* * Must be a variable or function name. * Can also be a curly-braces kind of name: {expr}. */ s = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) s = alias; if (len <= 0) ret = FAIL; else { int flags = evalarg == NULL ? 0 : evalarg->eval_flags; if (evaluate && vim9script && len == 1 && *s == '_') { emsg(_(e_cannot_use_underscore_here)); ret = FAIL; } else if (evaluate && vim9script && len > 2 && s[0] == 's' && s[1] == ':') { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), s); ret = FAIL; } else if ((vim9script ? **arg : *skipwhite(*arg)) == '(') { // "name(..." recursive! *arg = skipwhite(*arg); ret = eval_func(arg, evalarg, s, len, rettv, flags, NULL); } else if (flags & EVAL_CONSTANT) ret = FAIL; else if (evaluate) { // get the value of "true", "false", etc. or a variable ret = FAIL; if (vim9script) ret = handle_predefined(s, len, rettv); if (ret == FAIL) { name_start = s; ret = eval_variable(s, len, 0, rettv, NULL, EVAL_VAR_VERBOSE + EVAL_VAR_IMPORT); } } else { // skip the name check_vars(s, len); ret = OK; } } vim_free(alias); } // Handle following '[', '(' and '.' for expr[expr], expr.name, // expr(expr), expr->name(expr) if (ret == OK) ret = handle_subscript(arg, name_start, rettv, evalarg, TRUE); /* * Apply logical NOT and unary '-', from right to left, ignore '+'. */ if (ret == OK && evaluate && end_leader > start_leader) ret = eval9_leader(rettv, FALSE, start_leader, &end_leader); --recurse; return ret; } /* * Apply the leading "!" and "-" before an eval9 expression to "rettv". * When "numeric_only" is TRUE only handle "+" and "-". * Adjusts "end_leaderp" until it is at "start_leader". */ static int eval9_leader( typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp) { char_u *end_leader = *end_leaderp; int ret = OK; int error = FALSE; varnumber_T val = 0; vartype_T type = rettv->v_type; int vim9script = in_vim9script(); #ifdef FEAT_FLOAT float_T f = 0.0; if (rettv->v_type == VAR_FLOAT) f = rettv->vval.v_float; else #endif { while (VIM_ISWHITE(end_leader[-1])) --end_leader; if (vim9script && end_leader[-1] == '!') val = tv2bool(rettv); else val = tv_get_number_chk(rettv, &error); } if (error) { clear_tv(rettv); ret = FAIL; } else { while (end_leader > start_leader) { --end_leader; if (*end_leader == '!') { if (numeric_only) { ++end_leader; break; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { if (vim9script) { rettv->v_type = VAR_BOOL; val = f == 0.0 ? VVAL_TRUE : VVAL_FALSE; } else f = !f; } else #endif { val = !val; type = VAR_BOOL; } } else if (*end_leader == '-') { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f = -f; else #endif { val = -val; type = VAR_NUMBER; } } } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { clear_tv(rettv); rettv->vval.v_float = f; } else #endif { clear_tv(rettv); if (vim9script) rettv->v_type = type; else rettv->v_type = VAR_NUMBER; rettv->vval.v_number = val; } } *end_leaderp = end_leader; return ret; } /* * Call the function referred to in "rettv". */ static int call_func_rettv( char_u **arg, evalarg_T *evalarg, typval_T *rettv, int evaluate, dict_T *selfdict, typval_T *basetv) { partial_T *pt = NULL; funcexe_T funcexe; typval_T functv; char_u *s; int ret; // need to copy the funcref so that we can clear rettv if (evaluate) { functv = *rettv; rettv->v_type = VAR_UNKNOWN; // Invoke the function. Recursive! if (functv.v_type == VAR_PARTIAL) { pt = functv.vval.v_partial; s = partial_name(pt); } else { s = functv.vval.v_string; if (s == NULL || *s == NUL) { emsg(_(e_empty_function_name)); ret = FAIL; goto theend; } } } else s = (char_u *)""; CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = pt; funcexe.fe_selfdict = selfdict; funcexe.fe_basetv = basetv; ret = get_func_tv(s, -1, rettv, arg, evalarg, &funcexe); theend: // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&functv); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_lambda( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); typval_T base = *rettv; int ret; rettv->v_type = VAR_UNKNOWN; if (**arg == '{') { // ->{lambda}() ret = get_lambda_tv(arg, rettv, FALSE, evalarg); } else { // ->(lambda)() ++*arg; ret = eval1(arg, rettv, evalarg); *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ')') { emsg(_(e_missing_closing_paren)); return FAIL; } if (rettv->v_type != VAR_STRING && rettv->v_type != VAR_FUNC && rettv->v_type != VAR_PARTIAL) { emsg(_(e_string_or_function_required_for_arrow_parens_expr)); return FAIL; } ++*arg; } if (ret != OK) return FAIL; if (**arg != '(') { if (verbose) { if (*skipwhite(*arg) == '(') emsg(_(e_no_white_space_allowed_before_parenthesis)); else semsg(_(e_missing_parenthesis_str), "lambda"); } clear_tv(rettv); ret = FAIL; } else ret = call_func_rettv(arg, evalarg, rettv, evaluate, NULL, &base); // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_method( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { char_u *name; long len; char_u *alias; char_u *tofree = NULL; typval_T base = *rettv; int ret = OK; int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); rettv->v_type = VAR_UNKNOWN; name = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) name = alias; if (len <= 0) { if (verbose) emsg(_(e_missing_name_after_method)); ret = FAIL; } else { char_u *paren; // If there is no "(" immediately following, but there is further on, // it can be "import.Func()", "dict.Func()", "list[nr]", etc. // Does not handle anything where "(" is part of the expression. *arg = skipwhite(*arg); if (**arg != '(' && alias == NULL && (paren = vim_strchr(*arg, '(')) != NULL) { char_u *deref; *arg = name; *paren = NUL; deref = deref_function_name(arg, &tofree, evalarg, verbose); if (deref == NULL) { *arg = name + len; ret = FAIL; } else { name = deref; len = (long)STRLEN(name); } *paren = '('; } if (ret == OK) { *arg = skipwhite(*arg); if (**arg != '(') { if (verbose) semsg(_(e_missing_parenthesis_str), name); ret = FAIL; } else if (VIM_ISWHITE((*arg)[-1])) { if (verbose) emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else ret = eval_func(arg, evalarg, name, len, rettv, evaluate ? EVAL_EVALUATE : 0, &base); } } // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); vim_free(tofree); return ret; } /* * Evaluate an "[expr]" or "[expr:expr]" index. Also "dict.key". * "*arg" points to the '[' or '.'. * Returns FAIL or OK. "*arg" is advanced to after the ']'. */ static int eval_index( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int empty1 = FALSE, empty2 = FALSE; typval_T var1, var2; int range = FALSE; char_u *key = NULL; int keylen = -1; int vim9script = in_vim9script(); if (check_can_index(rettv, evaluate, verbose) == FAIL) return FAIL; init_tv(&var1); init_tv(&var2); if (**arg == '.') { /* * dict.name */ key = *arg + 1; for (keylen = 0; eval_isdictc(key[keylen]); ++keylen) ; if (keylen == 0) return FAIL; *arg = key + keylen; } else { /* * something[idx] * * Get the (first) variable from inside the []. */ *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (**arg == ':') empty1 = TRUE; else if (eval1(arg, &var1, evalarg) == FAIL) // recursive! return FAIL; else if (vim9script && **arg == ':') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg); clear_tv(&var1); return FAIL; } else if (evaluate) { int error = FALSE; #ifdef FEAT_FLOAT // allow for indexing with float if (vim9script && rettv->v_type == VAR_DICT && var1.v_type == VAR_FLOAT) { var1.vval.v_string = typval_tostring(&var1, TRUE); var1.v_type = VAR_STRING; } #endif if (vim9script && rettv->v_type == VAR_LIST) tv_get_number_chk(&var1, &error); else error = tv_get_string_chk(&var1) == NULL; if (error) { // not a number or string clear_tv(&var1); return FAIL; } } /* * Get the second variable from inside the [:]. */ *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ':') { range = TRUE; ++*arg; if (vim9script && !IS_WHITE_OR_NUL(**arg) && **arg != ']') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg - 1); if (!empty1) clear_tv(&var1); return FAIL; } *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ']') empty2 = TRUE; else if (eval1(arg, &var2, evalarg) == FAIL) // recursive! { if (!empty1) clear_tv(&var1); return FAIL; } else if (evaluate && tv_get_string_chk(&var2) == NULL) { // not a number or string if (!empty1) clear_tv(&var1); clear_tv(&var2); return FAIL; } } // Check for the ']'. *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ']') { if (verbose) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); if (range) clear_tv(&var2); return FAIL; } *arg = *arg + 1; // skip over the ']' } if (evaluate) { int res = eval_index_inner(rettv, range, empty1 ? NULL : &var1, empty2 ? NULL : &var2, FALSE, key, keylen, verbose); if (!empty1) clear_tv(&var1); if (range) clear_tv(&var2); return res; } return OK; } /* * Check if "rettv" can have an [index] or [sli:ce] */ int check_can_index(typval_T *rettv, int evaluate, int verbose) { switch (rettv->v_type) { case VAR_FUNC: case VAR_PARTIAL: if (verbose) emsg(_(e_cannot_index_a_funcref)); return FAIL; case VAR_FLOAT: #ifdef FEAT_FLOAT if (verbose) emsg(_(e_using_float_as_string)); return FAIL; #endif case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: if (verbose) emsg(_(e_cannot_index_special_variable)); return FAIL; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: if (evaluate) { emsg(_(e_cannot_index_special_variable)); return FAIL; } // FALLTHROUGH case VAR_STRING: case VAR_LIST: case VAR_DICT: case VAR_BLOB: break; case VAR_NUMBER: if (in_vim9script()) emsg(_(e_cannot_index_number)); break; } return OK; } /* * slice() function */ void f_slice(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && ((argvars[0].v_type != VAR_STRING && argvars[0].v_type != VAR_LIST && argvars[0].v_type != VAR_BLOB && check_for_list_arg(argvars, 0) == FAIL) || check_for_number_arg(argvars, 1) == FAIL || check_for_opt_number_arg(argvars, 2) == FAIL)) return; if (check_can_index(argvars, TRUE, FALSE) == OK) { copy_tv(argvars, rettv); eval_index_inner(rettv, TRUE, argvars + 1, argvars[2].v_type == VAR_UNKNOWN ? NULL : argvars + 2, TRUE, NULL, 0, FALSE); } } /* * Apply index or range to "rettv". * "var1" is the first index, NULL for [:expr]. * "var2" is the second index, NULL for [expr] and [expr: ] * "exclusive" is TRUE for slice(): second index is exclusive, use character * index for string. * Alternatively, "key" is not NULL, then key[keylen] is the dict index. */ int eval_index_inner( typval_T *rettv, int is_range, typval_T *var1, typval_T *var2, int exclusive, char_u *key, int keylen, int verbose) { varnumber_T n1, n2 = 0; long len; n1 = 0; if (var1 != NULL && rettv->v_type != VAR_DICT) n1 = tv_get_number(var1); if (is_range) { if (rettv->v_type == VAR_DICT) { if (verbose) emsg(_(e_cannot_slice_dictionary)); return FAIL; } if (var2 != NULL) n2 = tv_get_number(var2); else n2 = VARNUM_MAX; } switch (rettv->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_FUNC: case VAR_PARTIAL: case VAR_FLOAT: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; // not evaluating, skipping over subscript case VAR_NUMBER: case VAR_STRING: { char_u *s = tv_get_string(rettv); len = (long)STRLEN(s); if (in_vim9script() || exclusive) { if (is_range) s = string_slice(s, n1, n2, exclusive); else s = char_from_string(s, n1); } else if (is_range) { // The resulting variable is a substring. If the indexes // are out of range the result is empty. if (n1 < 0) { n1 = len + n1; if (n1 < 0) n1 = 0; } if (n2 < 0) n2 = len + n2; else if (n2 >= len) n2 = len; if (n1 >= len || n2 < 0 || n1 > n2) s = NULL; else s = vim_strnsave(s + n1, n2 - n1 + 1); } else { // The resulting variable is a string of a single // character. If the index is too big or negative the // result is empty. if (n1 >= len || n1 < 0) s = NULL; else s = vim_strnsave(s + n1, 1); } clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = s; } break; case VAR_BLOB: blob_slice_or_index(rettv->vval.v_blob, is_range, n1, n2, exclusive, rettv); break; case VAR_LIST: if (var1 == NULL) n1 = 0; if (var2 == NULL) n2 = VARNUM_MAX; if (list_slice_or_index(rettv->vval.v_list, is_range, n1, n2, exclusive, rettv, verbose) == FAIL) return FAIL; break; case VAR_DICT: { dictitem_T *item; typval_T tmp; if (key == NULL) { key = tv_get_string_chk(var1); if (key == NULL) return FAIL; } item = dict_find(rettv->vval.v_dict, key, keylen); if (item == NULL) { if (verbose) { if (keylen > 0) key[keylen] = NUL; semsg(_(e_key_not_present_in_dictionary), key); } return FAIL; } copy_tv(&item->di_tv, &tmp); clear_tv(rettv); *rettv = tmp; } break; } return OK; } /* * Return the function name of partial "pt". */ char_u * partial_name(partial_T *pt) { if (pt != NULL) { if (pt->pt_name != NULL) return pt->pt_name; if (pt->pt_func != NULL) return pt->pt_func->uf_name; } return (char_u *)""; } static void partial_free(partial_T *pt) { int i; for (i = 0; i < pt->pt_argc; ++i) clear_tv(&pt->pt_argv[i]); vim_free(pt->pt_argv); dict_unref(pt->pt_dict); if (pt->pt_name != NULL) { func_unref(pt->pt_name); vim_free(pt->pt_name); } else func_ptr_unref(pt->pt_func); // "out_up" is no longer used, decrement refcount on partial that owns it. partial_unref(pt->pt_outer.out_up_partial); // Using pt_outer from another partial. partial_unref(pt->pt_outer_partial); // Decrease the reference count for the context of a closure. If down // to the minimum it may be time to free it. if (pt->pt_funcstack != NULL) { --pt->pt_funcstack->fs_refcount; funcstack_check_refcount(pt->pt_funcstack); } vim_free(pt); } /* * Unreference a closure: decrement the reference count and free it when it * becomes zero. */ void partial_unref(partial_T *pt) { if (pt != NULL) { if (--pt->pt_refcount <= 0) partial_free(pt); // If the reference count goes down to one, the funcstack may be the // only reference and can be freed if no other partials reference it. else if (pt->pt_refcount == 1 && pt->pt_funcstack != NULL) funcstack_check_refcount(pt->pt_funcstack); } } /* * Return the next (unique) copy ID. * Used for serializing nested structures. */ int get_copyID(void) { current_copyID += COPYID_INC; return current_copyID; } /* * Garbage collection for lists and dictionaries. * * We use reference counts to be able to free most items right away when they * are no longer used. But for composite items it's possible that it becomes * unused while the reference count is > 0: When there is a recursive * reference. Example: * :let l = [1, 2, 3] * :let d = {9: l} * :let l[1] = d * * Since this is quite unusual we handle this with garbage collection: every * once in a while find out which lists and dicts are not referenced from any * variable. * * Here is a good reference text about garbage collection (refers to Python * but it applies to all reference-counting mechanisms): * http://python.ca/nas/python/gc/ */ /* * Do garbage collection for lists and dicts. * When "testing" is TRUE this is called from test_garbagecollect_now(). * Return TRUE if some memory was freed. */ int garbage_collect(int testing) { int copyID; int abort = FALSE; buf_T *buf; win_T *wp; int did_free = FALSE; tabpage_T *tp; if (!testing) { // Only do this once. want_garbage_collect = FALSE; may_garbage_collect = FALSE; garbage_collect_at_exit = FALSE; } // The execution stack can grow big, limit the size. if (exestack.ga_maxlen - exestack.ga_len > 500) { size_t new_len; char_u *pp; int n; // Keep 150% of the current size, with a minimum of the growth size. n = exestack.ga_len / 2; if (n < exestack.ga_growsize) n = exestack.ga_growsize; // Don't make it bigger though. if (exestack.ga_len + n < exestack.ga_maxlen) { new_len = (size_t)exestack.ga_itemsize * (exestack.ga_len + n); pp = vim_realloc(exestack.ga_data, new_len); if (pp == NULL) return FAIL; exestack.ga_maxlen = exestack.ga_len + n; exestack.ga_data = pp; } } // We advance by two because we add one for items referenced through // previous_funccal. copyID = get_copyID(); /* * 1. Go through all accessible variables and mark all lists and dicts * with copyID. */ // Don't free variables in the previous_funccal list unless they are only // referenced through previous_funccal. This must be first, because if // the item is referenced elsewhere the funccal must not be freed. abort = abort || set_ref_in_previous_funccal(copyID); // script-local variables abort = abort || garbage_collect_scriptvars(copyID); // buffer-local variables FOR_ALL_BUFFERS(buf) abort = abort || set_ref_in_item(&buf->b_bufvar.di_tv, copyID, NULL, NULL); // window-local variables FOR_ALL_TAB_WINDOWS(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); if (aucmd_win != NULL) abort = abort || set_ref_in_item(&aucmd_win->w_winvar.di_tv, copyID, NULL, NULL); #ifdef FEAT_PROP_POPUP FOR_ALL_POPUPWINS(wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); FOR_ALL_TABPAGES(tp) FOR_ALL_POPUPWINS_IN_TAB(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); #endif // tabpage-local variables FOR_ALL_TABPAGES(tp) abort = abort || set_ref_in_item(&tp->tp_winvar.di_tv, copyID, NULL, NULL); // global variables abort = abort || garbage_collect_globvars(copyID); // function-local variables abort = abort || set_ref_in_call_stack(copyID); // named functions (matters for closures) abort = abort || set_ref_in_functions(copyID); // function call arguments, if v:testing is set. abort = abort || set_ref_in_func_args(copyID); // funcstacks keep variables for closures abort = abort || set_ref_in_funcstacks(copyID); // v: vars abort = abort || garbage_collect_vimvars(copyID); // callbacks in buffers abort = abort || set_ref_in_buffers(copyID); // 'completefunc', 'omnifunc' and 'thesaurusfunc' callbacks abort = abort || set_ref_in_insexpand_funcs(copyID); // 'operatorfunc' callback abort = abort || set_ref_in_opfunc(copyID); // 'tagfunc' callback abort = abort || set_ref_in_tagfunc(copyID); // 'imactivatefunc' and 'imstatusfunc' callbacks abort = abort || set_ref_in_im_funcs(copyID); #ifdef FEAT_LUA abort = abort || set_ref_in_lua(copyID); #endif #ifdef FEAT_PYTHON abort = abort || set_ref_in_python(copyID); #endif #ifdef FEAT_PYTHON3 abort = abort || set_ref_in_python3(copyID); #endif #ifdef FEAT_JOB_CHANNEL abort = abort || set_ref_in_channel(copyID); abort = abort || set_ref_in_job(copyID); #endif #ifdef FEAT_NETBEANS_INTG abort = abort || set_ref_in_nb_channel(copyID); #endif #ifdef FEAT_TIMERS abort = abort || set_ref_in_timer(copyID); #endif #ifdef FEAT_QUICKFIX abort = abort || set_ref_in_quickfix(copyID); #endif #ifdef FEAT_TERMINAL abort = abort || set_ref_in_term(copyID); #endif #ifdef FEAT_PROP_POPUP abort = abort || set_ref_in_popups(copyID); #endif if (!abort) { /* * 2. Free lists and dictionaries that are not referenced. */ did_free = free_unref_items(copyID); /* * 3. Check if any funccal can be freed now. * This may call us back recursively. */ free_unref_funccal(copyID, testing); } else if (p_verbose > 0) { verb_msg(_("Not enough memory to set references, garbage collection aborted!")); } return did_free; } /* * Free lists, dictionaries, channels and jobs that are no longer referenced. */ static int free_unref_items(int copyID) { int did_free = FALSE; // Let all "free" functions know that we are here. This means no // dictionaries, lists, channels or jobs are to be freed, because we will // do that here. in_free_unref_items = TRUE; /* * PASS 1: free the contents of the items. We don't free the items * themselves yet, so that it is possible to decrement refcount counters */ // Go through the list of dicts and free items without the copyID. did_free |= dict_free_nonref(copyID); // Go through the list of lists and free items without the copyID. did_free |= list_free_nonref(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. did_free |= free_unused_jobs_contents(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. did_free |= free_unused_channels_contents(copyID, COPYID_MASK); #endif /* * PASS 2: free the items themselves. */ dict_free_items(copyID); list_free_items(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. free_unused_jobs(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. free_unused_channels(copyID, COPYID_MASK); #endif in_free_unref_items = FALSE; return did_free; } /* * Mark all lists and dicts referenced through hashtab "ht" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_ht(hashtab_T *ht, int copyID, list_stack_T **list_stack) { int todo; int abort = FALSE; hashitem_T *hi; hashtab_T *cur_ht; ht_stack_T *ht_stack = NULL; ht_stack_T *tempitem; cur_ht = ht; for (;;) { if (!abort) { // Mark each item in the hashtab. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. todo = (int)cur_ht->ht_used; for (hi = cur_ht->ht_array; todo > 0; ++hi) if (!HASHITEM_EMPTY(hi)) { --todo; abort = abort || set_ref_in_item(&HI2DI(hi)->di_tv, copyID, &ht_stack, list_stack); } } if (ht_stack == NULL) break; // take an item from the stack cur_ht = ht_stack->ht; tempitem = ht_stack; ht_stack = ht_stack->prev; free(tempitem); } return abort; } #if defined(FEAT_LUA) || defined(FEAT_PYTHON) || defined(FEAT_PYTHON3) \ || defined(PROTO) /* * Mark a dict and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_dict(dict_T *d, int copyID) { if (d != NULL && d->dv_copyID != copyID) { d->dv_copyID = copyID; return set_ref_in_ht(&d->dv_hashtab, copyID, NULL); } return FALSE; } #endif /* * Mark a list and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_list(list_T *ll, int copyID) { if (ll != NULL && ll->lv_copyID != copyID) { ll->lv_copyID = copyID; return set_ref_in_list_items(ll, copyID, NULL); } return FALSE; } /* * Mark all lists and dicts referenced through list "l" with "copyID". * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_list_items(list_T *l, int copyID, ht_stack_T **ht_stack) { listitem_T *li; int abort = FALSE; list_T *cur_l; list_stack_T *list_stack = NULL; list_stack_T *tempitem; cur_l = l; for (;;) { if (!abort && cur_l->lv_first != &range_list_item) // Mark each item in the list. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. for (li = cur_l->lv_first; !abort && li != NULL; li = li->li_next) abort = abort || set_ref_in_item(&li->li_tv, copyID, ht_stack, &list_stack); if (list_stack == NULL) break; // take an item from the stack cur_l = list_stack->list; tempitem = list_stack; list_stack = list_stack->prev; free(tempitem); } return abort; } /* * Mark the partial in callback 'cb' with "copyID". */ int set_ref_in_callback(callback_T *cb, int copyID) { typval_T tv; if (cb->cb_name == NULL || *cb->cb_name == NUL || cb->cb_partial == NULL) return FALSE; tv.v_type = VAR_PARTIAL; tv.vval.v_partial = cb->cb_partial; return set_ref_in_item(&tv, copyID, NULL, NULL); } /* * Mark all lists and dicts referenced through typval "tv" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_item( typval_T *tv, int copyID, ht_stack_T **ht_stack, list_stack_T **list_stack) { int abort = FALSE; if (tv->v_type == VAR_DICT) { dict_T *dd = tv->vval.v_dict; if (dd != NULL && dd->dv_copyID != copyID) { // Didn't see this dict yet. dd->dv_copyID = copyID; if (ht_stack == NULL) { abort = set_ref_in_ht(&dd->dv_hashtab, copyID, list_stack); } else { ht_stack_T *newitem = ALLOC_ONE(ht_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->ht = &dd->dv_hashtab; newitem->prev = *ht_stack; *ht_stack = newitem; } } } } else if (tv->v_type == VAR_LIST) { list_T *ll = tv->vval.v_list; if (ll != NULL && ll->lv_copyID != copyID) { // Didn't see this list yet. ll->lv_copyID = copyID; if (list_stack == NULL) { abort = set_ref_in_list_items(ll, copyID, ht_stack); } else { list_stack_T *newitem = ALLOC_ONE(list_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->list = ll; newitem->prev = *list_stack; *list_stack = newitem; } } } } else if (tv->v_type == VAR_FUNC) { abort = set_ref_in_func(tv->vval.v_string, NULL, copyID); } else if (tv->v_type == VAR_PARTIAL) { partial_T *pt = tv->vval.v_partial; int i; if (pt != NULL && pt->pt_copyID != copyID) { // Didn't see this partial yet. pt->pt_copyID = copyID; abort = set_ref_in_func(pt->pt_name, pt->pt_func, copyID); if (pt->pt_dict != NULL) { typval_T dtv; dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } for (i = 0; i < pt->pt_argc; ++i) abort = abort || set_ref_in_item(&pt->pt_argv[i], copyID, ht_stack, list_stack); // pt_funcstack is handled in set_ref_in_funcstacks() } } #ifdef FEAT_JOB_CHANNEL else if (tv->v_type == VAR_JOB) { job_T *job = tv->vval.v_job; typval_T dtv; if (job != NULL && job->jv_copyID != copyID) { job->jv_copyID = copyID; if (job->jv_channel != NULL) { dtv.v_type = VAR_CHANNEL; dtv.vval.v_channel = job->jv_channel; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (job->jv_exit_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = job->jv_exit_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } else if (tv->v_type == VAR_CHANNEL) { channel_T *ch =tv->vval.v_channel; ch_part_T part; typval_T dtv; jsonq_T *jq; cbq_T *cq; if (ch != NULL && ch->ch_copyID != copyID) { ch->ch_copyID = copyID; for (part = PART_SOCK; part < PART_COUNT; ++part) { for (jq = ch->ch_part[part].ch_json_head.jq_next; jq != NULL; jq = jq->jq_next) set_ref_in_item(jq->jq_value, copyID, ht_stack, list_stack); for (cq = ch->ch_part[part].ch_cb_head.cq_next; cq != NULL; cq = cq->cq_next) if (cq->cq_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = cq->cq_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_part[part].ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_part[part].ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } if (ch->ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_close_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_close_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } #endif return abort; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * When "copyID" is not NULL replace recursive lists and dicts with "...". * When both "echo_style" and "composite_val" are FALSE, put quotes around * strings as "string()", otherwise does not put quotes around strings, as * ":echo" displays values. * When "restore_copyID" is FALSE, repeated items in dictionaries and lists * are replaced with "...". * May return NULL. */ char_u * echo_string_core( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID, int echo_style, int restore_copyID, int composite_val) { static int recurse = 0; char_u *r = NULL; if (recurse >= DICT_MAXNEST) { if (!did_echo_string_emsg) { // Only give this message once for a recursive call to avoid // flooding the user with errors. And stop iterating over lists // and dicts. did_echo_string_emsg = TRUE; emsg(_(e_variable_nested_too_deep_for_displaying)); } *tofree = NULL; return (char_u *)"{E724}"; } ++recurse; switch (tv->v_type) { case VAR_STRING: if (echo_style && !composite_val) { *tofree = NULL; r = tv->vval.v_string; if (r == NULL) r = (char_u *)""; } else { *tofree = string_quote(tv->vval.v_string, FALSE); r = *tofree; } break; case VAR_FUNC: { char_u buf[MAX_FUNC_NAME_LEN]; if (echo_style) { r = tv->vval.v_string == NULL ? (char_u *)"function()" : make_ufunc_name_readable(tv->vval.v_string, buf, MAX_FUNC_NAME_LEN); if (r == buf) { r = vim_strsave(buf); *tofree = r; } else *tofree = NULL; } else { *tofree = string_quote(tv->vval.v_string == NULL ? NULL : make_ufunc_name_readable( tv->vval.v_string, buf, MAX_FUNC_NAME_LEN), TRUE); r = *tofree; } } break; case VAR_PARTIAL: { partial_T *pt = tv->vval.v_partial; char_u *fname = string_quote(pt == NULL ? NULL : partial_name(pt), FALSE); garray_T ga; int i; char_u *tf; ga_init2(&ga, 1, 100); ga_concat(&ga, (char_u *)"function("); if (fname != NULL) { // When using uf_name prepend "g:" for a global function. if (pt != NULL && pt->pt_name == NULL && fname[0] == '\'' && vim_isupper(fname[1])) { ga_concat(&ga, (char_u *)"'g:"); ga_concat(&ga, fname + 1); } else ga_concat(&ga, fname); vim_free(fname); } if (pt != NULL && pt->pt_argc > 0) { ga_concat(&ga, (char_u *)", ["); for (i = 0; i < pt->pt_argc; ++i) { if (i > 0) ga_concat(&ga, (char_u *)", "); ga_concat(&ga, tv2string(&pt->pt_argv[i], &tf, numbuf, copyID)); vim_free(tf); } ga_concat(&ga, (char_u *)"]"); } if (pt != NULL && pt->pt_dict != NULL) { typval_T dtv; ga_concat(&ga, (char_u *)", "); dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; ga_concat(&ga, tv2string(&dtv, &tf, numbuf, copyID)); vim_free(tf); } // terminate with ')' and a NUL ga_concat_len(&ga, (char_u *)")", 2); *tofree = ga.ga_data; r = *tofree; break; } case VAR_BLOB: r = blob2string(tv->vval.v_blob, tofree, numbuf); break; case VAR_LIST: if (tv->vval.v_list == NULL) { // NULL list is equivalent to empty list. *tofree = NULL; r = (char_u *)"[]"; } else if (copyID != 0 && tv->vval.v_list->lv_copyID == copyID && tv->vval.v_list->lv_len > 0) { *tofree = NULL; r = (char_u *)"[...]"; } else { int old_copyID = tv->vval.v_list->lv_copyID; tv->vval.v_list->lv_copyID = copyID; *tofree = list2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_list->lv_copyID = old_copyID; r = *tofree; } break; case VAR_DICT: if (tv->vval.v_dict == NULL) { // NULL dict is equivalent to empty dict. *tofree = NULL; r = (char_u *)"{}"; } else if (copyID != 0 && tv->vval.v_dict->dv_copyID == copyID && tv->vval.v_dict->dv_hashtab.ht_used != 0) { *tofree = NULL; r = (char_u *)"{...}"; } else { int old_copyID = tv->vval.v_dict->dv_copyID; tv->vval.v_dict->dv_copyID = copyID; *tofree = dict2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_dict->dv_copyID = old_copyID; r = *tofree; } break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: *tofree = NULL; r = tv_get_string_buf(tv, numbuf); break; case VAR_JOB: case VAR_CHANNEL: #ifdef FEAT_JOB_CHANNEL *tofree = NULL; r = tv->v_type == VAR_JOB ? job_to_string_buf(tv, numbuf) : channel_to_string_buf(tv, numbuf); if (composite_val) { *tofree = string_quote(r, FALSE); r = *tofree; } #endif break; case VAR_INSTR: *tofree = NULL; r = (char_u *)"instructions"; break; case VAR_FLOAT: #ifdef FEAT_FLOAT *tofree = NULL; vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); r = numbuf; break; #endif case VAR_BOOL: case VAR_SPECIAL: *tofree = NULL; r = (char_u *)get_var_special_name(tv->vval.v_number); break; } if (--recurse == 0) did_echo_string_emsg = FALSE; return r; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * Does not put quotes around strings, as ":echo" displays values. * When "copyID" is not NULL replace recursive lists and dicts with "...". * May return NULL. */ char_u * echo_string( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID) { return echo_string_core(tv, tofree, numbuf, copyID, TRUE, FALSE, FALSE); } /* * Convert the specified byte index of line 'lnum' in buffer 'buf' to a * character index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_byteidx_to_charidx(buf_T *buf, int lnum, int byteidx) { char_u *str; char_u *t; int count; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; if (*str == NUL) return 0; // count the number of characters t = str; for (count = 0; *t != NUL && t <= str + byteidx; count++) t += mb_ptr2len(t); // In insert mode, when the cursor is at the end of a non-empty line, // byteidx points to the NUL character immediately past the end of the // string. In this case, add one to the character count. if (*t == NUL && byteidx != 0 && t == str + byteidx) count++; return count - 1; } /* * Convert the specified character index of line 'lnum' in buffer 'buf' to a * byte index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_charidx_to_byteidx(buf_T *buf, int lnum, int charidx) { char_u *str; char_u *t; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; // Convert the character offset to a byte offset t = str; while (*t != NUL && --charidx > 0) t += mb_ptr2len(t); return t - str; } /* * Translate a String variable into a position. * Returns NULL when there is an error. */ pos_T * var2fpos( typval_T *varp, int dollar_lnum, // TRUE when $ is last line int *fnum, // set to fnum for '0, 'A, etc. int charcol) // return character column { char_u *name; static pos_T pos; pos_T *pp; // Argument can be [lnum, col, coladd]. if (varp->v_type == VAR_LIST) { list_T *l; int len; int error = FALSE; listitem_T *li; l = varp->vval.v_list; if (l == NULL) return NULL; // Get the line number pos.lnum = list_find_nr(l, 0L, &error); if (error || pos.lnum <= 0 || pos.lnum > curbuf->b_ml.ml_line_count) return NULL; // invalid line number if (charcol) len = (long)mb_charlen(ml_get(pos.lnum)); else len = (long)STRLEN(ml_get(pos.lnum)); // Get the column number // We accept "$" for the column number: last column. li = list_find(l, 1L); if (li != NULL && li->li_tv.v_type == VAR_STRING && li->li_tv.vval.v_string != NULL && STRCMP(li->li_tv.vval.v_string, "$") == 0) { pos.col = len + 1; } else { pos.col = list_find_nr(l, 1L, &error); if (error) return NULL; } // Accept a position up to the NUL after the line. if (pos.col == 0 || (int)pos.col > len + 1) return NULL; // invalid column number --pos.col; // Get the virtual offset. Defaults to zero. pos.coladd = list_find_nr(l, 2L, &error); if (error) pos.coladd = 0; return &pos; } if (in_vim9script() && check_for_string_arg(varp, 0) == FAIL) return NULL; name = tv_get_string_chk(varp); if (name == NULL) return NULL; pos.lnum = 0; if (name[0] == '.' && (!in_vim9script() || name[1] == NUL)) { // cursor pos = curwin->w_cursor; } else if (name[0] == 'v' && name[1] == NUL) { // Visual start if (VIsual_active) pos = VIsual; else pos = curwin->w_cursor; } else if (name[0] == '\'' && (!in_vim9script() || (name[1] != NUL && name[2] == NUL))) { // mark pp = getmark_buf_fnum(curbuf, name[1], FALSE, fnum); if (pp == NULL || pp == (pos_T *)-1 || pp->lnum <= 0) return NULL; pos = *pp; } if (pos.lnum != 0) { if (charcol) pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col); return &pos; } pos.coladd = 0; if (name[0] == 'w' && dollar_lnum) { pos.col = 0; if (name[1] == '0') // "w0": first visible line { update_topline(); // In silent Ex mode topline is zero, but that's not a valid line // number; use one instead. pos.lnum = curwin->w_topline > 0 ? curwin->w_topline : 1; return &pos; } else if (name[1] == '$') // "w$": last visible line { validate_botline(); // In silent Ex mode botline is zero, return zero then. pos.lnum = curwin->w_botline > 0 ? curwin->w_botline - 1 : 0; return &pos; } } else if (name[0] == '$') // last column or line { if (dollar_lnum) { pos.lnum = curbuf->b_ml.ml_line_count; pos.col = 0; } else { pos.lnum = curwin->w_cursor.lnum; if (charcol) pos.col = (colnr_T)mb_charlen(ml_get_curline()); else pos.col = (colnr_T)STRLEN(ml_get_curline()); } return &pos; } if (in_vim9script()) semsg(_(e_invalid_value_for_line_number_str), name); return NULL; } /* * Convert list in "arg" into a position and optional file number. * When "fnump" is NULL there is no file number, only 3 items. * Note that the column is passed on as-is, the caller may want to decrement * it to use 1 for the first column. * Return FAIL when conversion is not possible, doesn't check the position for * validity. */ int list2fpos( typval_T *arg, pos_T *posp, int *fnump, colnr_T *curswantp, int charcol) { list_T *l = arg->vval.v_list; long i = 0; long n; // List must be: [fnum, lnum, col, coladd, curswant], where "fnum" is only // there when "fnump" isn't NULL; "coladd" and "curswant" are optional. if (arg->v_type != VAR_LIST || l == NULL || l->lv_len < (fnump == NULL ? 2 : 3) || l->lv_len > (fnump == NULL ? 4 : 5)) return FAIL; if (fnump != NULL) { n = list_find_nr(l, i++, NULL); // fnum if (n < 0) return FAIL; if (n == 0) n = curbuf->b_fnum; // current buffer *fnump = n; } n = list_find_nr(l, i++, NULL); // lnum if (n < 0) return FAIL; posp->lnum = n; n = list_find_nr(l, i++, NULL); // col if (n < 0) return FAIL; // If character position is specified, then convert to byte position if (charcol) { buf_T *buf; // Get the text for the specified line in a loaded buffer buf = buflist_findnr(fnump == NULL ? curbuf->b_fnum : *fnump); if (buf == NULL || buf->b_ml.ml_mfp == NULL) return FAIL; n = buf_charidx_to_byteidx(buf, posp->lnum, n) + 1; } posp->col = n; n = list_find_nr(l, i, NULL); // off if (n < 0) posp->coladd = 0; else posp->coladd = n; if (curswantp != NULL) *curswantp = list_find_nr(l, i + 1, NULL); // curswant return OK; } /* * Get the length of an environment variable name. * Advance "arg" to the first character after the name. * Return 0 for error. */ int get_env_len(char_u **arg) { char_u *p; int len; for (p = *arg; vim_isIDc(*p); ++p) ; if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a function or internal variable. * "arg" is advanced to after the name. * Return 0 if something is wrong. */ int get_id_len(char_u **arg) { char_u *p; int len; // Find the end of the name. for (p = *arg; eval_isnamec(*p); ++p) { if (*p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. len = (int)(p - *arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, **arg) == NULL) || len > 1) break; } } if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a variable or function. * Only the name is recognized, does not handle ".key" or "[idx]". * "arg" is advanced to the first non-white character after the name. * Return -1 if curly braces expansion failed. * Return 0 if something else is wrong. * If the name contains 'magic' {}'s, expand them and return the * expanded name in an allocated string via 'alias' - caller must free. */ int get_name_len( char_u **arg, char_u **alias, int evaluate, int verbose) { int len; char_u *p; char_u *expr_start; char_u *expr_end; *alias = NULL; // default to no alias if ((*arg)[0] == K_SPECIAL && (*arg)[1] == KS_EXTRA && (*arg)[2] == (int)KE_SNR) { // hard coded <SNR>, already translated *arg += 3; return get_id_len(arg) + 3; } len = eval_fname_script(*arg); if (len > 0) { // literal "<SID>", "s:" or "<SNR>" *arg += len; } /* * Find the end of the name; check for {} construction. */ p = find_name_end(*arg, &expr_start, &expr_end, len > 0 ? 0 : FNE_CHECK_START); if (expr_start != NULL) { char_u *temp_string; if (!evaluate) { len += (int)(p - *arg); *arg = skipwhite(p); return len; } /* * Include any <SID> etc in the expanded string: * Thus the -len here. */ temp_string = make_expanded_name(*arg - len, expr_start, expr_end, p); if (temp_string == NULL) return -1; *alias = temp_string; *arg = skipwhite(p); return (int)STRLEN(temp_string); } len += get_id_len(arg); // Only give an error when there is something, otherwise it will be // reported at a higher level. if (len == 0 && verbose && **arg != NUL) semsg(_(e_invalid_expression_str), *arg); return len; } /* * Find the end of a variable or function name, taking care of magic braces. * If "expr_start" is not NULL then "expr_start" and "expr_end" are set to the * start and end of the first magic braces item. * "flags" can have FNE_INCL_BR and FNE_CHECK_START. * Return a pointer to just after the name. Equal to "arg" if there is no * valid name. */ char_u * find_name_end( char_u *arg, char_u **expr_start, char_u **expr_end, int flags) { int mb_nest = 0; int br_nest = 0; char_u *p; int len; int vim9script = in_vim9script(); if (expr_start != NULL) { *expr_start = NULL; *expr_end = NULL; } // Quick check for valid starting character. if ((flags & FNE_CHECK_START) && !eval_isnamec1(*arg) && (*arg != '{' || vim9script)) return arg; for (p = arg; *p != NUL && (eval_isnamec(*p) || (*p == '{' && !vim9script) || ((flags & FNE_INCL_BR) && (*p == '[' || (*p == '.' && eval_isdictc(p[1])))) || mb_nest != 0 || br_nest != 0); MB_PTR_ADV(p)) { if (*p == '\'') { // skip over 'string' to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '\''; MB_PTR_ADV(p)) ; if (*p == NUL) break; } else if (*p == '"') { // skip over "str\"ing" to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '"'; MB_PTR_ADV(p)) if (*p == '\\' && p[1] != NUL) ++p; if (*p == NUL) break; } else if (br_nest == 0 && mb_nest == 0 && *p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. But {ns}: is. len = (int)(p - arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, *arg) == NULL) || (len > 1 && p[-1] != '}')) break; } if (mb_nest == 0) { if (*p == '[') ++br_nest; else if (*p == ']') --br_nest; } if (br_nest == 0 && !vim9script) { if (*p == '{') { mb_nest++; if (expr_start != NULL && *expr_start == NULL) *expr_start = p; } else if (*p == '}') { mb_nest--; if (expr_start != NULL && mb_nest == 0 && *expr_end == NULL) *expr_end = p; } } } return p; } /* * Expands out the 'magic' {}'s in a variable/function name. * Note that this can call itself recursively, to deal with * constructs like foo{bar}{baz}{bam} * The four pointer arguments point to "foo{expre}ss{ion}bar" * "in_start" ^ * "expr_start" ^ * "expr_end" ^ * "in_end" ^ * * Returns a new allocated string, which the caller must free. * Returns NULL for failure. */ static char_u * make_expanded_name( char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end) { char_u c1; char_u *retval = NULL; char_u *temp_result; if (expr_end == NULL || in_end == NULL) return NULL; *expr_start = NUL; *expr_end = NUL; c1 = *in_end; *in_end = NUL; temp_result = eval_to_string(expr_start + 1, FALSE); if (temp_result != NULL) { retval = alloc(STRLEN(temp_result) + (expr_start - in_start) + (in_end - expr_end) + 1); if (retval != NULL) { STRCPY(retval, in_start); STRCAT(retval, temp_result); STRCAT(retval, expr_end + 1); } } vim_free(temp_result); *in_end = c1; // put char back for error messages *expr_start = '{'; *expr_end = '}'; if (retval != NULL) { temp_result = find_name_end(retval, &expr_start, &expr_end, 0); if (expr_start != NULL) { // Further expansion! temp_result = make_expanded_name(retval, expr_start, expr_end, temp_result); vim_free(retval); retval = temp_result; } } return retval; } /* * Return TRUE if character "c" can be used in a variable or function name. * Does not include '{' or '}' for magic braces. */ int eval_isnamec(int c) { return ASCII_ISALNUM(c) || c == '_' || c == ':' || c == AUTOLOAD_CHAR; } /* * Return TRUE if character "c" can be used as the first character in a * variable or function name (excluding '{' and '}'). */ int eval_isnamec1(int c) { return ASCII_ISALPHA(c) || c == '_'; } /* * Return TRUE if character "c" can be used as the first character of a * dictionary key. */ int eval_isdictc(int c) { return ASCII_ISALNUM(c) || c == '_'; } /* * Handle: * - expr[expr], expr[expr:expr] subscript * - ".name" lookup * - function call with Funcref variable: func(expr) * - method call: var->method() * * Can all be combined in any order: dict.func(expr)[idx]['func'](expr)->len() * "name_start" points to a variable before the subscript or is NULL. */ int handle_subscript( char_u **arg, char_u *name_start, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int ret = OK; dict_T *selfdict = NULL; int check_white = TRUE; int getnext; char_u *p; while (ret == OK) { // When at the end of the line and ".name" or "->{" or "->X" follows in // the next line then consume the line break. p = eval_next_non_blank(*arg, evalarg, &getnext); if (getnext && ((rettv->v_type == VAR_DICT && *p == '.' && eval_isdictc(p[1])) || (p[0] == '-' && p[1] == '>' && (p[2] == '{' || ASCII_ISALPHA(in_vim9script() ? *skipwhite(p + 2) : p[2]))))) { *arg = eval_next_line(*arg, evalarg); p = *arg; check_white = FALSE; } if (rettv->v_type == VAR_ANY) { char_u *exp_name; int cc; int idx; ufunc_T *ufunc; type_T *type; // Found script from "import {name} as name", script item name must // follow. "rettv->vval.v_number" has the script ID. if (**arg != '.') { if (verbose) semsg(_(e_expected_dot_after_name_str), name_start != NULL ? name_start: *arg); ret = FAIL; break; } ++*arg; if (IS_WHITE_OR_NUL(**arg)) { if (verbose) emsg(_(e_no_white_space_allowed_after_dot)); ret = FAIL; break; } // isolate the name exp_name = *arg; while (eval_isnamec(**arg)) ++*arg; cc = **arg; **arg = NUL; idx = find_exported(rettv->vval.v_number, exp_name, &ufunc, &type, evalarg->eval_cctx, evalarg->eval_cstack, verbose); **arg = cc; if (idx < 0 && ufunc == NULL) { ret = FAIL; break; } if (idx >= 0) { scriptitem_T *si = SCRIPT_ITEM(rettv->vval.v_number); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; copy_tv(sv->sv_tv, rettv); } else { rettv->v_type = VAR_FUNC; rettv->vval.v_string = vim_strsave(ufunc->uf_name); } continue; } if ((**arg == '(' && (!evaluate || rettv->v_type == VAR_FUNC || rettv->v_type == VAR_PARTIAL)) && (!check_white || !VIM_ISWHITE(*(*arg - 1)))) { ret = call_func_rettv(arg, evalarg, rettv, evaluate, selfdict, NULL); // Stop the expression evaluation when immediately aborting on // error, or when an interrupt occurred or an exception was thrown // but not caught. if (aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } dict_unref(selfdict); selfdict = NULL; } else if (p[0] == '-' && p[1] == '>') { if (in_vim9script()) *arg = skipwhite(p + 2); else *arg = p + 2; if (ret == OK) { if (VIM_ISWHITE(**arg)) { emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else if ((**arg == '{' && !in_vim9script()) || **arg == '(') // expr->{lambda}() or expr->(lambda)() ret = eval_lambda(arg, rettv, evalarg, verbose); else // expr->name() ret = eval_method(arg, rettv, evalarg, verbose); } } // "." is ".name" lookup when we found a dict or when evaluating and // scriptversion is at least 2, where string concatenation is "..". else if (**arg == '[' || (**arg == '.' && (rettv->v_type == VAR_DICT || (!evaluate && (*arg)[1] != '.' && !in_old_script(2))))) { dict_unref(selfdict); if (rettv->v_type == VAR_DICT) { selfdict = rettv->vval.v_dict; if (selfdict != NULL) ++selfdict->dv_refcount; } else selfdict = NULL; if (eval_index(arg, rettv, evalarg, verbose) == FAIL) { clear_tv(rettv); ret = FAIL; } } else break; } // Turn "dict.Func" into a partial for "Func" bound to "dict". // Don't do this when "Func" is already a partial that was bound // explicitly (pt_auto is FALSE). if (selfdict != NULL && (rettv->v_type == VAR_FUNC || (rettv->v_type == VAR_PARTIAL && (rettv->vval.v_partial->pt_auto || rettv->vval.v_partial->pt_dict == NULL)))) selfdict = make_partial(selfdict, rettv); dict_unref(selfdict); return ret; } /* * Make a copy of an item. * Lists and Dictionaries are also copied. A deep copy if "deep" is set. * "top" is TRUE for the toplevel of copy(). * For deepcopy() "copyID" is zero for a full copy or the ID for when a * reference to an already copied list/dict can be used. * Returns FAIL or OK. */ int item_copy( typval_T *from, typval_T *to, int deep, int top, int copyID) { static int recurse = 0; int ret = OK; if (recurse >= DICT_MAXNEST) { emsg(_(e_variable_nested_too_deep_for_making_copy)); return FAIL; } ++recurse; switch (from->v_type) { case VAR_NUMBER: case VAR_FLOAT: case VAR_STRING: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: copy_tv(from, to); break; case VAR_LIST: to->v_type = VAR_LIST; to->v_lock = 0; if (from->vval.v_list == NULL) to->vval.v_list = NULL; else if (copyID != 0 && from->vval.v_list->lv_copyID == copyID) { // use the copy made earlier to->vval.v_list = from->vval.v_list->lv_copylist; ++to->vval.v_list->lv_refcount; } else to->vval.v_list = list_copy(from->vval.v_list, deep, top, copyID); if (to->vval.v_list == NULL) ret = FAIL; break; case VAR_BLOB: ret = blob_copy(from->vval.v_blob, to); break; case VAR_DICT: to->v_type = VAR_DICT; to->v_lock = 0; if (from->vval.v_dict == NULL) to->vval.v_dict = NULL; else if (copyID != 0 && from->vval.v_dict->dv_copyID == copyID) { // use the copy made earlier to->vval.v_dict = from->vval.v_dict->dv_copydict; ++to->vval.v_dict->dv_refcount; } else to->vval.v_dict = dict_copy(from->vval.v_dict, deep, top, copyID); if (to->vval.v_dict == NULL) ret = FAIL; break; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: internal_error_no_abort("item_copy(UNKNOWN)"); ret = FAIL; } --recurse; return ret; } void echo_one(typval_T *rettv, int with_space, int *atstart, int *needclr) { char_u *tofree; char_u numbuf[NUMBUFLEN]; char_u *p = echo_string(rettv, &tofree, numbuf, get_copyID()); if (*atstart) { *atstart = FALSE; // Call msg_start() after eval1(), evaluating the expression // may cause a message to appear. if (with_space) { // Mark the saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back // at the more prompt. msg_sb_eol(); msg_start(); } } else if (with_space) msg_puts_attr(" ", echo_attr); if (p != NULL) for ( ; *p != NUL && !got_int; ++p) { if (*p == '\n' || *p == '\r' || *p == TAB) { if (*p != TAB && *needclr) { // remove any text still there from the command msg_clr_eos(); *needclr = FALSE; } msg_putchar_attr(*p, echo_attr); } else { if (has_mbyte) { int i = (*mb_ptr2len)(p); (void)msg_outtrans_len_attr(p, i, echo_attr); p += i - 1; } else (void)msg_outtrans_len_attr(p, 1, echo_attr); } } vim_free(tofree); } /* * ":echo expr1 ..." print each argument separated with a space, add a * newline at the end. * ":echon expr1 ..." print each argument plain. */ void ex_echo(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; char_u *arg_start; int needclr = TRUE; int atstart = TRUE; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap->skip); if (eap->skip) ++emsg_skip; while ((!ends_excmd2(eap->cmd, arg) || *arg == '"') && !got_int) { // If eval1() causes an error message the text from the command may // still need to be cleared. E.g., "echo 22,44". need_clr_eos = needclr; arg_start = arg; if (eval1(&arg, &rettv, &evalarg) == FAIL) { /* * Report the invalid expression unless the expression evaluation * has been cancelled due to an aborting error, an interrupt, or an * exception. */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), arg_start); need_clr_eos = FALSE; break; } need_clr_eos = FALSE; if (!eap->skip) { if (rettv.v_type == VAR_VOID) { semsg(_(e_expression_does_not_result_in_value_str), arg_start); break; } echo_one(&rettv, eap->cmdidx == CMD_echo, &atstart, &needclr); } clear_tv(&rettv); arg = skipwhite(arg); } set_nextcmd(eap, arg); clear_evalarg(&evalarg, eap); if (eap->skip) --emsg_skip; else { // remove text that may still be there from the command if (needclr) msg_clr_eos(); if (eap->cmdidx == CMD_echo) msg_end(); } } /* * ":echohl {name}". */ void ex_echohl(exarg_T *eap) { echo_attr = syn_name2attr(eap->arg); } /* * Returns the :echo attribute */ int get_echo_attr(void) { return echo_attr; } /* * ":execute expr1 ..." execute the result of an expression. * ":echomsg expr1 ..." Print a message * ":echoerr expr1 ..." Print an error * ":echoconsole expr1 ..." Print a message on stdout * Each gets spaces around each argument and a newline at the end for * echo commands */ void ex_execute(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; int ret = OK; char_u *p; garray_T ga; int len; long start_lnum = SOURCING_LNUM; ga_init2(&ga, 1, 80); if (eap->skip) ++emsg_skip; while (!ends_excmd2(eap->cmd, arg) || *arg == '"') { ret = eval1_emsg(&arg, &rettv, eap); if (ret == FAIL) break; if (!eap->skip) { char_u buf[NUMBUFLEN]; if (eap->cmdidx == CMD_execute) { if (rettv.v_type == VAR_CHANNEL || rettv.v_type == VAR_JOB) { semsg(_(e_using_invalid_value_as_string_str), vartype_name(rettv.v_type)); p = NULL; } else p = tv_get_string_buf(&rettv, buf); } else p = tv_stringify(&rettv, buf); if (p == NULL) { clear_tv(&rettv); ret = FAIL; break; } len = (int)STRLEN(p); if (ga_grow(&ga, len + 2) == FAIL) { clear_tv(&rettv); ret = FAIL; break; } if (ga.ga_len) ((char_u *)(ga.ga_data))[ga.ga_len++] = ' '; STRCPY((char_u *)(ga.ga_data) + ga.ga_len, p); ga.ga_len += len; } clear_tv(&rettv); arg = skipwhite(arg); } if (ret != FAIL && ga.ga_data != NULL) { // use the first line of continuation lines for messages SOURCING_LNUM = start_lnum; if (eap->cmdidx == CMD_echomsg || eap->cmdidx == CMD_echoerr) { // Mark the already saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back at the // more prompt. msg_sb_eol(); } if (eap->cmdidx == CMD_echomsg) { msg_attr(ga.ga_data, echo_attr); out_flush(); } else if (eap->cmdidx == CMD_echoconsole) { ui_write(ga.ga_data, (int)STRLEN(ga.ga_data), TRUE); ui_write((char_u *)"\r\n", 2, TRUE); } else if (eap->cmdidx == CMD_echoerr) { int save_did_emsg = did_emsg; // We don't want to abort following commands, restore did_emsg. emsg(ga.ga_data); if (!force_abort) did_emsg = save_did_emsg; } else if (eap->cmdidx == CMD_execute) { int save_sticky_cmdmod_flags = sticky_cmdmod_flags; // "legacy exe cmd" and "vim9cmd exe cmd" applies to "cmd". sticky_cmdmod_flags = cmdmod.cmod_flags & (CMOD_LEGACY | CMOD_VIM9CMD); do_cmdline((char_u *)ga.ga_data, eap->getline, eap->cookie, DOCMD_NOWAIT|DOCMD_VERBOSE); sticky_cmdmod_flags = save_sticky_cmdmod_flags; } } ga_clear(&ga); if (eap->skip) --emsg_skip; set_nextcmd(eap, arg); } /* * Skip over the name of an option: "&option", "&g:option" or "&l:option". * "arg" points to the "&" or '+' when called, to "option" when returning. * Returns NULL when no option name found. Otherwise pointer to the char * after the option name. */ char_u * find_option_end(char_u **arg, int *scope) { char_u *p = *arg; ++p; if (*p == 'g' && p[1] == ':') { *scope = OPT_GLOBAL; p += 2; } else if (*p == 'l' && p[1] == ':') { *scope = OPT_LOCAL; p += 2; } else *scope = 0; if (!ASCII_ISALPHA(*p)) return NULL; *arg = p; if (p[0] == 't' && p[1] == '_' && p[2] != NUL && p[3] != NUL) p += 4; // termcap option else while (ASCII_ISALPHA(*p)) ++p; return p; } /* * Display script name where an item was last set. * Should only be invoked when 'verbose' is non-zero. */ void last_set_msg(sctx_T script_ctx) { char_u *p; if (script_ctx.sc_sid != 0) { p = home_replace_save(NULL, get_scriptname(script_ctx.sc_sid)); if (p != NULL) { verbose_enter(); msg_puts(_("\n\tLast set from ")); msg_puts((char *)p); if (script_ctx.sc_lnum > 0) { msg_puts(_(line_msg)); msg_outnum((long)script_ctx.sc_lnum); } verbose_leave(); vim_free(p); } } } #endif // FEAT_EVAL /* * Perform a substitution on "str" with pattern "pat" and substitute "sub". * When "sub" is NULL "expr" is used, must be a VAR_FUNC or VAR_PARTIAL. * "flags" can be "g" to do a global substitute. * Returns an allocated string, NULL for error. */ char_u * do_string_sub( char_u *str, char_u *pat, char_u *sub, typval_T *expr, char_u *flags) { int sublen; regmatch_T regmatch; int i; int do_all; char_u *tail; char_u *end; garray_T ga; char_u *ret; char_u *save_cpo; char_u *zero_width = NULL; // Make 'cpoptions' empty, so that the 'l' flag doesn't work here save_cpo = p_cpo; p_cpo = empty_option; ga_init2(&ga, 1, 200); do_all = (flags[0] == 'g'); regmatch.rm_ic = p_ic; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { tail = str; end = str + STRLEN(str); while (vim_regexec_nl(&regmatch, str, (colnr_T)(tail - str))) { // Skip empty match except for first match. if (regmatch.startp[0] == regmatch.endp[0]) { if (zero_width == regmatch.startp[0]) { // avoid getting stuck on a match with an empty string i = mb_ptr2len(tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); ga.ga_len += i; tail += i; continue; } zero_width = regmatch.startp[0]; } /* * Get some space for a temporary buffer to do the substitution * into. It will contain: * - The text up to where the match is. * - The substituted text. * - The text after the match. */ sublen = vim_regsub(&regmatch, sub, expr, tail, 0, REGSUB_MAGIC); if (ga_grow(&ga, (int)((end - tail) + sublen - (regmatch.endp[0] - regmatch.startp[0]))) == FAIL) { ga_clear(&ga); break; } // copy the text up to where the match is i = (int)(regmatch.startp[0] - tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); // add the substituted text (void)vim_regsub(&regmatch, sub, expr, (char_u *)ga.ga_data + ga.ga_len + i, sublen, REGSUB_COPY | REGSUB_MAGIC); ga.ga_len += i + sublen - 1; tail = regmatch.endp[0]; if (*tail == NUL) break; if (!do_all) break; } if (ga.ga_data != NULL) STRCPY((char *)ga.ga_data + ga.ga_len, tail); vim_regfree(regmatch.regprog); } ret = vim_strsave(ga.ga_data == NULL ? str : (char_u *)ga.ga_data); ga_clear(&ga); if (p_cpo == empty_option) p_cpo = save_cpo; else { // Darn, evaluating {sub} expression or {expr} changed the value. // If it's still empty it was changed and restored, need to restore in // the complicated way. if (*p_cpo == NUL) set_option_value_give_err((char_u *)"cpo", 0L, save_cpo, 0); free_string_option(save_cpo); } return ret; }
/* vi:set ts=8 sts=4 sw=4 noet: * * VIM - Vi IMproved by Bram Moolenaar * * Do ":help uganda" in Vim to read copying and usage conditions. * Do ":help credits" in Vim to see a list of people who contributed. * See README.txt for an overview of the Vim source code. */ /* * eval.c: Expression evaluation. */ #define USING_FLOAT_STUFF #include "vim.h" #if defined(FEAT_EVAL) || defined(PROTO) #ifdef VMS # include <float.h> #endif #define NAMESPACE_CHAR (char_u *)"abglstvw" /* * When recursively copying lists and dicts we need to remember which ones we * have done to avoid endless recursiveness. This unique ID is used for that. * The last bit is used for previous_funccal, ignored when comparing. */ static int current_copyID = 0; /* * Info used by a ":for" loop. */ typedef struct { int fi_semicolon; // TRUE if ending in '; var]' int fi_varcount; // nr of variables in the list int fi_break_count; // nr of line breaks encountered listwatch_T fi_lw; // keep an eye on the item used. list_T *fi_list; // list being used int fi_bi; // index of blob blob_T *fi_blob; // blob being used char_u *fi_string; // copy of string being used int fi_byte_idx; // byte index in fi_string } forinfo_T; static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg); static int eval7(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval8(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9(char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string); static int eval9_leader(typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp); static int free_unref_items(int copyID); static char_u *make_expanded_name(char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end); /* * Return "n1" divided by "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_divide(varnumber_T n1, varnumber_T n2, int *failed) { varnumber_T result; if (n2 == 0) { if (in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } if (n1 == 0) result = VARNUM_MIN; // similar to NaN else if (n1 < 0) result = -VARNUM_MAX; else result = VARNUM_MAX; } else result = n1 / n2; return result; } /* * Return "n1" modulus "n2", taking care of dividing by zero. * If "failed" is not NULL set it to TRUE when dividing by zero fails. */ varnumber_T num_modulus(varnumber_T n1, varnumber_T n2, int *failed) { if (n2 == 0 && in_vim9script()) { emsg(_(e_divide_by_zero)); if (failed != NULL) *failed = TRUE; } return (n2 == 0) ? 0 : (n1 % n2); } /* * Initialize the global and v: variables. */ void eval_init(void) { evalvars_init(); func_init(); } #if defined(EXITFREE) || defined(PROTO) void eval_clear(void) { evalvars_clear(); free_scriptnames(); // must come after evalvars_clear(). free_locales(); // autoloaded script names free_autoload_scriptnames(); // unreferenced lists and dicts (void)garbage_collect(FALSE); // functions not garbage collected free_all_functions(); } #endif void fill_evalarg_from_eap(evalarg_T *evalarg, exarg_T *eap, int skip) { init_evalarg(evalarg); evalarg->eval_flags = skip ? 0 : EVAL_EVALUATE; if (eap != NULL) { evalarg->eval_cstack = eap->cstack; if (sourcing_a_script(eap) || eap->getline == get_list_line) { evalarg->eval_getline = eap->getline; evalarg->eval_cookie = eap->cookie; } } } /* * Top level evaluation function, returning a boolean. * Sets "error" to TRUE if there was an error. * Return TRUE or FALSE. */ int eval_to_bool( char_u *arg, int *error, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; varnumber_T retval = FALSE; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL) *error = TRUE; else { *error = FALSE; if (!skip) { if (in_vim9script()) retval = tv_get_bool_chk(&tv, error); else retval = (tv_get_number_chk(&tv, error) != 0); clear_tv(&tv); } } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return (int)retval; } /* * Call eval1() and give an error message if not done at a lower level. */ static int eval1_emsg(char_u **arg, typval_T *rettv, exarg_T *eap) { char_u *start = *arg; int ret; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); ret = eval1(arg, rettv, &evalarg); if (ret == FAIL) { // Report the invalid expression unless the expression evaluation has // been cancelled due to an aborting error, an interrupt, or an // exception, or we already gave a more specific error. // Also check called_emsg for when using assert_fails(). if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), start); } clear_evalarg(&evalarg, eap); return ret; } /* * Return whether a typval is a valid expression to pass to eval_expr_typval() * or eval_expr_to_bool(). An empty string returns FALSE; */ int eval_expr_valid_arg(typval_T *tv) { return tv->v_type != VAR_UNKNOWN && (tv->v_type != VAR_STRING || (tv->vval.v_string != NULL && *tv->vval.v_string != NUL)); } /* * Evaluate an expression, which can be a function, partial or string. * Pass arguments "argv[argc]". * Return the result in "rettv" and OK or FAIL. */ int eval_expr_typval(typval_T *expr, typval_T *argv, int argc, typval_T *rettv) { char_u *s; char_u buf[NUMBUFLEN]; funcexe_T funcexe; if (expr->v_type == VAR_FUNC) { s = expr->vval.v_string; if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } else if (expr->v_type == VAR_PARTIAL) { partial_T *partial = expr->vval.v_partial; if (partial == NULL) return FAIL; if (partial->pt_func != NULL && partial->pt_func->uf_def_status != UF_NOT_COMPILED) { if (call_def_function(partial->pt_func, argc, argv, partial, rettv) == FAIL) return FAIL; } else { s = partial_name(partial); if (s == NULL || *s == NUL) return FAIL; CLEAR_FIELD(funcexe); funcexe.fe_evaluate = TRUE; funcexe.fe_partial = partial; if (call_func(s, -1, rettv, argc, argv, &funcexe) == FAIL) return FAIL; } } else if (expr->v_type == VAR_INSTR) { return exe_typval_instr(expr, rettv); } else { s = tv_get_string_buf_chk_strict(expr, buf, in_vim9script()); if (s == NULL) return FAIL; s = skipwhite(s); if (eval1_emsg(&s, rettv, NULL) == FAIL) return FAIL; if (*skipwhite(s) != NUL) // check for trailing chars after expr { clear_tv(rettv); semsg(_(e_invalid_expression_str), s); return FAIL; } } return OK; } /* * Like eval_to_bool() but using a typval_T instead of a string. * Works for string, funcref and partial. */ int eval_expr_to_bool(typval_T *expr, int *error) { typval_T rettv; int res; if (eval_expr_typval(expr, NULL, 0, &rettv) == FAIL) { *error = TRUE; return FALSE; } res = (tv_get_bool_chk(&rettv, error) != 0); clear_tv(&rettv); return res; } /* * Top level evaluation function, returning a string. If "skip" is TRUE, * only parsing to "nextcmd" is done, without reporting errors. Return * pointer to allocated memory, or NULL for failure or when "skip" is TRUE. */ char_u * eval_to_string_skip( char_u *arg, exarg_T *eap, int skip) // only parse, don't execute { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, skip); if (skip) ++emsg_skip; if (eval0(arg, &tv, eap, &evalarg) == FAIL || skip) retval = NULL; else { retval = vim_strsave(tv_get_string(&tv)); clear_tv(&tv); } if (skip) --emsg_skip; clear_evalarg(&evalarg, eap); return retval; } /* * Initialize "evalarg" for use. */ void init_evalarg(evalarg_T *evalarg) { CLEAR_POINTER(evalarg); ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20); } /* * If "evalarg->eval_tofree" is not NULL free it later. * Caller is expected to overwrite "evalarg->eval_tofree" next. */ static void free_eval_tofree_later(evalarg_T *evalarg) { if (evalarg->eval_tofree != NULL) { if (ga_grow(&evalarg->eval_tofree_ga, 1) == OK) ((char_u **)evalarg->eval_tofree_ga.ga_data) [evalarg->eval_tofree_ga.ga_len++] = evalarg->eval_tofree; else vim_free(evalarg->eval_tofree); } } /* * After using "evalarg" filled from "eap": free the memory. */ void clear_evalarg(evalarg_T *evalarg, exarg_T *eap) { if (evalarg != NULL) { if (evalarg->eval_tofree != NULL) { if (eap != NULL) { // We may need to keep the original command line, e.g. for // ":let" it has the variable names. But we may also need the // new one, "nextcmd" points into it. Keep both. vim_free(eap->cmdline_tofree); eap->cmdline_tofree = *eap->cmdlinep; *eap->cmdlinep = evalarg->eval_tofree; } else vim_free(evalarg->eval_tofree); evalarg->eval_tofree = NULL; } ga_clear_strings(&evalarg->eval_tofree_ga); VIM_CLEAR(evalarg->eval_tofree_lambda); } } /* * Skip over an expression at "*pp". * Return FAIL for an error, OK otherwise. */ int skip_expr(char_u **pp, evalarg_T *evalarg) { typval_T rettv; *pp = skipwhite(*pp); return eval1(pp, &rettv, evalarg); } /* * Skip over an expression at "*arg". * If in Vim9 script and line breaks are encountered, the lines are * concatenated. "evalarg->eval_tofree" will be set accordingly. * "arg" is advanced to just after the expression. * "start" is set to the start of the expression, "end" to just after the end. * Also when the expression is copied to allocated memory. * Return FAIL for an error, OK otherwise. */ int skip_expr_concatenate( char_u **arg, char_u **start, char_u **end, evalarg_T *evalarg) { typval_T rettv; int res; int vim9script = in_vim9script(); garray_T *gap = evalarg == NULL ? NULL : &evalarg->eval_ga; garray_T *freegap = evalarg == NULL ? NULL : &evalarg->eval_freega; int save_flags = evalarg == NULL ? 0 : evalarg->eval_flags; int evaluate = evalarg == NULL ? FALSE : (evalarg->eval_flags & EVAL_EVALUATE); if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { ga_init2(gap, sizeof(char_u *), 10); // leave room for "start" if (ga_grow(gap, 1) == OK) ++gap->ga_len; ga_init2(freegap, sizeof(char_u *), 10); } *start = *arg; // Don't evaluate the expression. if (evalarg != NULL) evalarg->eval_flags &= ~EVAL_EVALUATE; *arg = skipwhite(*arg); res = eval1(arg, &rettv, evalarg); *end = *arg; if (evalarg != NULL) evalarg->eval_flags = save_flags; if (vim9script && evaluate && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL)) { if (evalarg->eval_ga.ga_len == 1) { // just the one line, no need to concatenate ga_clear(gap); gap->ga_itemsize = 0; } else { char_u *p; size_t endoff = STRLEN(*arg); // Line breaks encountered, concatenate all the lines. *((char_u **)gap->ga_data) = *start; p = ga_concat_strings(gap, " "); // free the lines only when using getsourceline() if (evalarg->eval_cookie != NULL) { // Do not free the first line, the caller can still use it. *((char_u **)gap->ga_data) = NULL; // Do not free the last line, "arg" points into it, free it // later. Also free "eval_tofree" later if needed. free_eval_tofree_later(evalarg); evalarg->eval_tofree = ((char_u **)gap->ga_data)[gap->ga_len - 1]; ((char_u **)gap->ga_data)[gap->ga_len - 1] = NULL; ga_clear_strings(gap); } else { ga_clear(gap); // free lines that were explicitly marked for freeing ga_clear_strings(freegap); } gap->ga_itemsize = 0; if (p == NULL) return FAIL; *start = p; vim_free(evalarg->eval_tofree_lambda); evalarg->eval_tofree_lambda = p; // Compute "end" relative to the end. *end = *start + STRLEN(*start) - endoff; } } return res; } /* * Convert "tv" to a string. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Returns an allocated string (NULL when out of memory). */ char_u * typval2string(typval_T *tv, int convert) { garray_T ga; char_u *retval; #ifdef FEAT_FLOAT char_u numbuf[NUMBUFLEN]; #endif if (convert && tv->v_type == VAR_LIST) { ga_init2(&ga, sizeof(char), 80); if (tv->vval.v_list != NULL) { list_join(&ga, tv->vval.v_list, (char_u *)"\n", TRUE, FALSE, 0); if (tv->vval.v_list->lv_len > 0) ga_append(&ga, NL); } ga_append(&ga, NUL); retval = (char_u *)ga.ga_data; } #ifdef FEAT_FLOAT else if (convert && tv->v_type == VAR_FLOAT) { vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); retval = vim_strsave(numbuf); } #endif else retval = vim_strsave(tv_get_string(tv)); return retval; } /* * Top level evaluation function, returning a string. Does not handle line * breaks. * When "convert" is TRUE convert a List into a sequence of lines and convert * a Float to a String. * Return pointer to allocated memory, or NULL for failure. */ char_u * eval_to_string_eap( char_u *arg, int convert, exarg_T *eap) { typval_T tv; char_u *retval; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); if (eval0(arg, &tv, NULL, &evalarg) == FAIL) retval = NULL; else { retval = typval2string(&tv, convert); clear_tv(&tv); } clear_evalarg(&evalarg, NULL); return retval; } char_u * eval_to_string( char_u *arg, int convert) { return eval_to_string_eap(arg, convert, NULL); } /* * Call eval_to_string() without using current local variables and using * textlock. When "use_sandbox" is TRUE use the sandbox. * Use legacy Vim script syntax. */ char_u * eval_to_string_safe( char_u *arg, int use_sandbox, int keep_script_version) { char_u *retval; funccal_entry_T funccal_entry; int save_sc_version = current_sctx.sc_version; int save_garbage = may_garbage_collect; if (!keep_script_version) current_sctx.sc_version = 1; save_funccal(&funccal_entry); if (use_sandbox) ++sandbox; ++textlock; may_garbage_collect = FALSE; retval = eval_to_string(arg, FALSE); if (use_sandbox) --sandbox; --textlock; may_garbage_collect = save_garbage; restore_funccal(); current_sctx.sc_version = save_sc_version; return retval; } /* * Top level evaluation function, returning a number. * Evaluates "expr" silently. * Returns -1 for an error. */ varnumber_T eval_to_number(char_u *expr) { typval_T rettv; varnumber_T retval; char_u *p = skipwhite(expr); ++emsg_off; if (eval1(&p, &rettv, &EVALARG_EVALUATE) == FAIL) retval = -1; else { retval = tv_get_number_chk(&rettv, NULL); clear_tv(&rettv); } --emsg_off; return retval; } /* * Top level evaluation function. * Returns an allocated typval_T with the result. * Returns NULL when there is an error. */ typval_T * eval_expr(char_u *arg, exarg_T *eap) { typval_T *tv; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap != NULL && eap->skip); tv = ALLOC_ONE(typval_T); if (tv != NULL && eval0(arg, tv, eap, &evalarg) == FAIL) VIM_CLEAR(tv); clear_evalarg(&evalarg, eap); return tv; } /* * "*arg" points to what can be a function name in the form of "import.Name" or * "Funcref". Return the name of the function. Set "tofree" to something that * was allocated. * If "verbose" is FALSE no errors are given. * Return NULL for any failure. */ static char_u * deref_function_name( char_u **arg, char_u **tofree, evalarg_T *evalarg, int verbose) { typval_T ref; char_u *name = *arg; ref.v_type = VAR_UNKNOWN; if (eval9(arg, &ref, evalarg, FALSE) == FAIL) { dictitem_T *v; // If <SID>VarName was used it would not be found, try another way. v = find_var_also_in_script(name, NULL, FALSE); if (v == NULL) return NULL; copy_tv(&v->di_tv, &ref); } if (*skipwhite(*arg) != NUL) { if (verbose) semsg(_(e_trailing_characters_str), *arg); name = NULL; } else if (ref.v_type == VAR_FUNC && ref.vval.v_string != NULL) { name = ref.vval.v_string; ref.vval.v_string = NULL; *tofree = name; } else if (ref.v_type == VAR_PARTIAL && ref.vval.v_partial != NULL) { if (ref.vval.v_partial->pt_argc > 0 || ref.vval.v_partial->pt_dict != NULL) { if (verbose) emsg(_(e_cannot_use_partial_here)); name = NULL; } else { name = vim_strsave(partial_name(ref.vval.v_partial)); *tofree = name; } } else { if (verbose) semsg(_(e_not_callable_type_str), name); name = NULL; } clear_tv(&ref); return name; } /* * Call some Vim script function and return the result in "*rettv". * Uses argv[0] to argv[argc - 1] for the function arguments. argv[argc] * should have type VAR_UNKNOWN. * Returns OK or FAIL. */ int call_vim_function( char_u *func, int argc, typval_T *argv, typval_T *rettv) { int ret; funcexe_T funcexe; char_u *arg; char_u *name; char_u *tofree = NULL; int ignore_errors; rettv->v_type = VAR_UNKNOWN; // clear_tv() uses this CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = TRUE; // The name might be "import.Func" or "Funcref". We don't know, we need to // ignore errors for an undefined name. But we do want errors when an // autoload script has errors. Guess that when there is a dot in the name // showing errors is the right choice. ignore_errors = vim_strchr(func, '.') == NULL; arg = func; if (ignore_errors) ++emsg_off; name = deref_function_name(&arg, &tofree, &EVALARG_EVALUATE, FALSE); if (ignore_errors) --emsg_off; if (name == NULL) name = func; ret = call_func(name, -1, rettv, argc, argv, &funcexe); if (ret == FAIL) clear_tv(rettv); vim_free(tofree); return ret; } /* * Call Vim script function "func" and return the result as a string. * Uses "argv[0]" to "argv[argc - 1]" for the function arguments. "argv[argc]" * should have type VAR_UNKNOWN. * Returns NULL when calling the function fails. */ void * call_func_retstr( char_u *func, int argc, typval_T *argv) { typval_T rettv; char_u *retval; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; retval = vim_strsave(tv_get_string(&rettv)); clear_tv(&rettv); return retval; } /* * Call Vim script function "func" and return the result as a List. * Uses "argv" and "argc" as call_func_retstr(). * Returns NULL when there is something wrong. */ void * call_func_retlist( char_u *func, int argc, typval_T *argv) { typval_T rettv; if (call_vim_function(func, argc, argv, &rettv) == FAIL) return NULL; if (rettv.v_type != VAR_LIST) { clear_tv(&rettv); return NULL; } return rettv.vval.v_list; } #if defined(FEAT_FOLDING) || defined(PROTO) /* * Evaluate "arg", which is 'foldexpr'. * Note: caller must set "curwin" to match "arg". * Returns the foldlevel, and any character preceding it in "*cp". Doesn't * give error messages. */ int eval_foldexpr(win_T *wp, int *cp) { char_u *arg; typval_T tv; varnumber_T retval; char_u *s; sctx_T saved_sctx = current_sctx; int use_sandbox = was_set_insecurely((char_u *)"foldexpr", OPT_LOCAL); arg = wp->w_p_fde; current_sctx = wp->w_p_script_ctx[WV_FDE]; ++emsg_off; if (use_sandbox) ++sandbox; ++textlock; *cp = NUL; if (eval0(arg, &tv, NULL, &EVALARG_EVALUATE) == FAIL) retval = 0; else { // If the result is a number, just return the number. if (tv.v_type == VAR_NUMBER) retval = tv.vval.v_number; else if (tv.v_type != VAR_STRING || tv.vval.v_string == NULL) retval = 0; else { // If the result is a string, check if there is a non-digit before // the number. s = tv.vval.v_string; if (!VIM_ISDIGIT(*s) && *s != '-') *cp = *s++; retval = atol((char *)s); } clear_tv(&tv); } --emsg_off; if (use_sandbox) --sandbox; --textlock; clear_evalarg(&EVALARG_EVALUATE, NULL); current_sctx = saved_sctx; return (int)retval; } #endif /* * Get an lval: variable, Dict item or List item that can be assigned a value * to: "name", "na{me}", "name[expr]", "name[expr:expr]", "name[expr][expr]", * "name.key", "name.key[expr]" etc. * Indexing only works if "name" is an existing List or Dictionary. * "name" points to the start of the name. * If "rettv" is not NULL it points to the value to be assigned. * "unlet" is TRUE for ":unlet": slightly different behavior when something is * wrong; must end in space or cmd separator. * * flags: * GLV_QUIET: do not give error messages * GLV_READ_ONLY: will not change the variable * GLV_NO_AUTOLOAD: do not use script autoloading * * Returns a pointer to just after the name, including indexes. * When an evaluation error occurs "lp->ll_name" is NULL; * Returns NULL for a parsing error. Still need to free items in "lp"! */ char_u * get_lval( char_u *name, typval_T *rettv, lval_T *lp, int unlet, int skip, int flags, // GLV_ values int fne_flags) // flags for find_name_end() { char_u *p; char_u *expr_start, *expr_end; int cc; dictitem_T *v; typval_T var1; typval_T var2; int empty1 = FALSE; char_u *key = NULL; int len; hashtab_T *ht = NULL; int quiet = flags & GLV_QUIET; int writing; int vim9script = in_vim9script(); // Clear everything in "lp". CLEAR_POINTER(lp); if (skip || (flags & GLV_COMPILING)) { // When skipping or compiling just find the end of the name. lp->ll_name = name; lp->ll_name_end = find_name_end(name, NULL, NULL, FNE_INCL_BR | fne_flags); return lp->ll_name_end; } // Cannot use "s:var" at the Vim9 script level. "s: type" is OK. if (vim9script && at_script_level() && name[0] == 's' && name[1] == ':' && !VIM_ISWHITE(name[2])) { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), name); return NULL; } // Find the end of the name. p = find_name_end(name, &expr_start, &expr_end, fne_flags); lp->ll_name_end = p; if (expr_start != NULL) { // Don't expand the name when we already know there is an error. if (unlet && !VIM_ISWHITE(*p) && !ends_excmd(*p) && *p != '[' && *p != '.') { semsg(_(e_trailing_characters_str), p); return NULL; } lp->ll_exp_name = make_expanded_name(name, expr_start, expr_end, p); if (lp->ll_exp_name == NULL) { // Report an invalid expression in braces, unless the // expression evaluation has been cancelled due to an // aborting error, an interrupt, or an exception. if (!aborting() && !quiet) { emsg_severe = TRUE; semsg(_(e_invalid_argument_str), name); return NULL; } } lp->ll_name = lp->ll_exp_name; } else { lp->ll_name = name; if (vim9script) { // "a: type" is declaring variable "a" with a type, not "a:". // However, "g:[key]" is indexing a dictionary. if (p == name + 2 && p[-1] == ':' && *p != '[') { --p; lp->ll_name_end = p; } if (*p == ':') { char_u *tp = skipwhite(p + 1); if (tp == p + 1 && !quiet) { semsg(_(e_white_space_required_after_str_str), ":", p); return NULL; } if (!SCRIPT_ID_VALID(current_sctx.sc_sid)) { semsg(_(e_using_type_not_in_script_context_str), p); return NULL; } // parse the type after the name lp->ll_type = parse_type(&tp, &SCRIPT_ITEM(current_sctx.sc_sid)->sn_type_list, !quiet); if (lp->ll_type == NULL && !quiet) return NULL; lp->ll_name_end = tp; } } } if (lp->ll_name == NULL) return p; if (*p == '.') { imported_T *import = find_imported(lp->ll_name, p - lp->ll_name, TRUE); if (import != NULL) { ufunc_T *ufunc; type_T *type; lp->ll_sid = import->imp_sid; lp->ll_name = skipwhite(p + 1); p = find_name_end(lp->ll_name, NULL, NULL, fne_flags); lp->ll_name_end = p; // check the item is exported cc = *p; *p = NUL; if (find_exported(import->imp_sid, lp->ll_name, &ufunc, &type, NULL, NULL, TRUE) == -1) { *p = cc; return NULL; } *p = cc; } } // Without [idx] or .key we are done. if ((*p != '[' && *p != '.')) return p; if (vim9script && lval_root != NULL) { // using local variable lp->ll_tv = lval_root; v = NULL; } else { cc = *p; *p = NUL; // When we would write to the variable pass &ht and prevent autoload. writing = !(flags & GLV_READ_ONLY); v = find_var(lp->ll_name, writing ? &ht : NULL, (flags & GLV_NO_AUTOLOAD) || writing); if (v == NULL && !quiet) semsg(_(e_undefined_variable_str), lp->ll_name); *p = cc; if (v == NULL) return NULL; lp->ll_tv = &v->di_tv; } if (vim9script && (flags & GLV_NO_DECL) == 0) { if (!quiet) semsg(_(e_variable_already_declared), lp->ll_name); return NULL; } /* * Loop until no more [idx] or .key is following. */ var1.v_type = VAR_UNKNOWN; var2.v_type = VAR_UNKNOWN; while (*p == '[' || (*p == '.' && p[1] != '=' && p[1] != '.')) { if (*p == '.' && lp->ll_tv->v_type != VAR_DICT) { if (!quiet) semsg(_(e_dot_can_only_be_used_on_dictionary_str), name); return NULL; } if (lp->ll_tv->v_type != VAR_LIST && lp->ll_tv->v_type != VAR_DICT && lp->ll_tv->v_type != VAR_BLOB) { if (!quiet) emsg(_(e_can_only_index_list_dictionary_or_blob)); return NULL; } // a NULL list/blob works like an empty list/blob, allocate one now. if (lp->ll_tv->v_type == VAR_LIST && lp->ll_tv->vval.v_list == NULL) rettv_list_alloc(lp->ll_tv); else if (lp->ll_tv->v_type == VAR_BLOB && lp->ll_tv->vval.v_blob == NULL) rettv_blob_alloc(lp->ll_tv); if (lp->ll_range) { if (!quiet) emsg(_(e_slice_must_come_last)); return NULL; } if (vim9script && lp->ll_valtype == NULL && v != NULL && lp->ll_tv == &v->di_tv && ht != NULL && ht == get_script_local_ht()) { svar_T *sv = find_typval_in_script(lp->ll_tv, 0, TRUE); // Vim9 script local variable: get the type if (sv != NULL) lp->ll_valtype = sv->sv_type; } len = -1; if (*p == '.') { key = p + 1; for (len = 0; ASCII_ISALNUM(key[len]) || key[len] == '_'; ++len) ; if (len == 0) { if (!quiet) emsg(_(e_cannot_use_empty_key_for_dictionary)); return NULL; } p = key + len; } else { // Get the index [expr] or the first index [expr: ]. p = skipwhite(p + 1); if (*p == ':') empty1 = TRUE; else { empty1 = FALSE; if (eval1(&p, &var1, &EVALARG_EVALUATE) == FAIL) // recursive! return NULL; if (tv_get_string_chk(&var1) == NULL) { // not a number or string clear_tv(&var1); return NULL; } p = skipwhite(p); } // Optionally get the second index [ :expr]. if (*p == ':') { if (lp->ll_tv->v_type == VAR_DICT) { if (!quiet) emsg(_(e_cannot_slice_dictionary)); clear_tv(&var1); return NULL; } if (rettv != NULL && !(rettv->v_type == VAR_LIST && rettv->vval.v_list != NULL) && !(rettv->v_type == VAR_BLOB && rettv->vval.v_blob != NULL)) { if (!quiet) emsg(_(e_slice_requires_list_or_blob_value)); clear_tv(&var1); return NULL; } p = skipwhite(p + 1); if (*p == ']') lp->ll_empty2 = TRUE; else { lp->ll_empty2 = FALSE; // recursive! if (eval1(&p, &var2, &EVALARG_EVALUATE) == FAIL) { clear_tv(&var1); return NULL; } if (tv_get_string_chk(&var2) == NULL) { // not a number or string clear_tv(&var1); clear_tv(&var2); return NULL; } } lp->ll_range = TRUE; } else lp->ll_range = FALSE; if (*p != ']') { if (!quiet) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); clear_tv(&var2); return NULL; } // Skip to past ']'. ++p; } if (lp->ll_tv->v_type == VAR_DICT) { if (len == -1) { // "[key]": get key from "var1" key = tv_get_string_chk(&var1); // is number or string if (key == NULL) { clear_tv(&var1); return NULL; } } lp->ll_list = NULL; // a NULL dict is equivalent with an empty dict if (lp->ll_tv->vval.v_dict == NULL) { lp->ll_tv->vval.v_dict = dict_alloc(); if (lp->ll_tv->vval.v_dict == NULL) { clear_tv(&var1); return NULL; } ++lp->ll_tv->vval.v_dict->dv_refcount; } lp->ll_dict = lp->ll_tv->vval.v_dict; lp->ll_di = dict_find(lp->ll_dict, key, len); // When assigning to a scope dictionary check that a function and // variable name is valid (only variable name unless it is l: or // g: dictionary). Disallow overwriting a builtin function. if (rettv != NULL && lp->ll_dict->dv_scope != 0) { int prevval; int wrong; if (len != -1) { prevval = key[len]; key[len] = NUL; } else prevval = 0; // avoid compiler warning wrong = (lp->ll_dict->dv_scope == VAR_DEF_SCOPE && rettv->v_type == VAR_FUNC && var_wrong_func_name(key, lp->ll_di == NULL)) || !valid_varname(key, -1, TRUE); if (len != -1) key[len] = prevval; if (wrong) { clear_tv(&var1); return NULL; } } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; if (lp->ll_di == NULL) { // Can't add "v:" or "a:" variable. if (lp->ll_dict == get_vimvar_dict() || &lp->ll_dict->dv_hashtab == get_funccal_args_ht()) { semsg(_(e_illegal_variable_name_str), name); clear_tv(&var1); return NULL; } // Key does not exist in dict: may need to add it. if (*p == '[' || *p == '.' || unlet) { if (!quiet) semsg(_(e_key_not_present_in_dictionary), key); clear_tv(&var1); return NULL; } if (len == -1) lp->ll_newkey = vim_strsave(key); else lp->ll_newkey = vim_strnsave(key, len); clear_tv(&var1); if (lp->ll_newkey == NULL) p = NULL; break; } // existing variable, need to check if it can be changed else if ((flags & GLV_READ_ONLY) == 0 && (var_check_ro(lp->ll_di->di_flags, name, FALSE) || var_check_lock(lp->ll_di->di_flags, name, FALSE))) { clear_tv(&var1); return NULL; } clear_tv(&var1); lp->ll_tv = &lp->ll_di->di_tv; } else if (lp->ll_tv->v_type == VAR_BLOB) { long bloblen = blob_len(lp->ll_tv->vval.v_blob); /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); if (check_blob_index(bloblen, lp->ll_n1, quiet) == FAIL) { clear_tv(&var2); return NULL; } if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); clear_tv(&var2); if (check_blob_range(bloblen, lp->ll_n1, lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_blob = lp->ll_tv->vval.v_blob; lp->ll_tv = NULL; break; } else { /* * Get the number and item for the only or first index of the List. */ if (empty1) lp->ll_n1 = 0; else // is number or string lp->ll_n1 = (long)tv_get_number(&var1); clear_tv(&var1); lp->ll_dict = NULL; lp->ll_list = lp->ll_tv->vval.v_list; lp->ll_li = check_range_index_one(lp->ll_list, &lp->ll_n1, (flags & GLV_ASSIGN_WITH_OP) == 0, quiet); if (lp->ll_li == NULL) { clear_tv(&var2); return NULL; } if (lp->ll_valtype != NULL) // use the type of the member lp->ll_valtype = lp->ll_valtype->tt_member; /* * May need to find the item or absolute index for the second * index of a range. * When no index given: "lp->ll_empty2" is TRUE. * Otherwise "lp->ll_n2" is set to the second index. */ if (lp->ll_range && !lp->ll_empty2) { lp->ll_n2 = (long)tv_get_number(&var2); // is number or string clear_tv(&var2); if (check_range_index_two(lp->ll_list, &lp->ll_n1, lp->ll_li, &lp->ll_n2, quiet) == FAIL) return NULL; } lp->ll_tv = &lp->ll_li->li_tv; } } clear_tv(&var1); lp->ll_name_end = p; return p; } /* * Clear lval "lp" that was filled by get_lval(). */ void clear_lval(lval_T *lp) { vim_free(lp->ll_exp_name); vim_free(lp->ll_newkey); } /* * Set a variable that was parsed by get_lval() to "rettv". * "endp" points to just after the parsed name. * "op" is NULL, "+" for "+=", "-" for "-=", "*" for "*=", "/" for "/=", * "%" for "%=", "." for ".=" or "=" for "=". */ void set_var_lval( lval_T *lp, char_u *endp, typval_T *rettv, int copy, int flags, // ASSIGN_CONST, ASSIGN_NO_DECL char_u *op, int var_idx) // index for "let [a, b] = list" { int cc; dictitem_T *di; if (lp->ll_tv == NULL) { cc = *endp; *endp = NUL; if (in_vim9script() && check_reserved_name(lp->ll_name) == FAIL) return; if (lp->ll_blob != NULL) { int error = FALSE, val; if (op != NULL && *op != '=') { semsg(_(e_wrong_variable_type_for_str_equal), op); return; } if (value_check_lock(lp->ll_blob->bv_lock, lp->ll_name, FALSE)) return; if (lp->ll_range && rettv->v_type == VAR_BLOB) { if (lp->ll_empty2) lp->ll_n2 = blob_len(lp->ll_blob) - 1; if (blob_set_range(lp->ll_blob, lp->ll_n1, lp->ll_n2, rettv) == FAIL) return; } else { val = (int)tv_get_number_chk(rettv, &error); if (!error) blob_set_append(lp->ll_blob, lp->ll_n1, val); } } else if (op != NULL && *op != '=') { typval_T tv; if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_modify_existing_variable)); *endp = cc; return; } // handle +=, -=, *=, /=, %= and .= di = NULL; if (eval_variable(lp->ll_name, (int)STRLEN(lp->ll_name), lp->ll_sid, &tv, &di, EVAL_VAR_VERBOSE) == OK) { if ((di == NULL || (!var_check_ro(di->di_flags, lp->ll_name, FALSE) && !tv_check_lock(&di->di_tv, lp->ll_name, FALSE))) && tv_op(&tv, rettv, op) == OK) set_var_const(lp->ll_name, lp->ll_sid, NULL, &tv, FALSE, ASSIGN_NO_DECL, 0); clear_tv(&tv); } } else { if (lp->ll_type != NULL && check_typval_arg_type(lp->ll_type, rettv, NULL, 0) == FAIL) return; set_var_const(lp->ll_name, lp->ll_sid, lp->ll_type, rettv, copy, flags, var_idx); } *endp = cc; } else if (value_check_lock(lp->ll_newkey == NULL ? lp->ll_tv->v_lock : lp->ll_tv->vval.v_dict->dv_lock, lp->ll_name, FALSE)) ; else if (lp->ll_range) { if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_range)); return; } (void)list_assign_range(lp->ll_list, rettv->vval.v_list, lp->ll_n1, lp->ll_n2, lp->ll_empty2, op, lp->ll_name); } else { /* * Assign to a List or Dictionary item. */ if ((flags & (ASSIGN_CONST | ASSIGN_FINAL)) && (flags & ASSIGN_FOR_LOOP) == 0) { emsg(_(e_cannot_lock_list_or_dict)); return; } if (lp->ll_valtype != NULL && check_typval_arg_type(lp->ll_valtype, rettv, NULL, 0) == FAIL) return; if (lp->ll_newkey != NULL) { if (op != NULL && *op != '=') { semsg(_(e_key_not_present_in_dictionary), lp->ll_newkey); return; } if (dict_wrong_func_name(lp->ll_tv->vval.v_dict, rettv, lp->ll_newkey)) return; // Need to add an item to the Dictionary. di = dictitem_alloc(lp->ll_newkey); if (di == NULL) return; if (dict_add(lp->ll_tv->vval.v_dict, di) == FAIL) { vim_free(di); return; } lp->ll_tv = &di->di_tv; } else if (op != NULL && *op != '=') { tv_op(lp->ll_tv, rettv, op); return; } else clear_tv(lp->ll_tv); /* * Assign the value to the variable or list item. */ if (copy) copy_tv(rettv, lp->ll_tv); else { *lp->ll_tv = *rettv; lp->ll_tv->v_lock = 0; init_tv(rettv); } } } /* * Handle "tv1 += tv2", "tv1 -= tv2", "tv1 *= tv2", "tv1 /= tv2", "tv1 %= tv2" * and "tv1 .= tv2" * Returns OK or FAIL. */ int tv_op(typval_T *tv1, typval_T *tv2, char_u *op) { varnumber_T n; char_u numbuf[NUMBUFLEN]; char_u *s; int failed = FALSE; // Can't do anything with a Funcref or Dict on the right. // v:true and friends only work with "..=". if (tv2->v_type != VAR_FUNC && tv2->v_type != VAR_DICT && ((tv2->v_type != VAR_BOOL && tv2->v_type != VAR_SPECIAL) || *op == '.')) { switch (tv1->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_DICT: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; case VAR_BLOB: if (*op != '+' || tv2->v_type != VAR_BLOB) break; // BLOB += BLOB if (tv1->vval.v_blob != NULL && tv2->vval.v_blob != NULL) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; int i, len = blob_len(b2); for (i = 0; i < len; i++) ga_append(&b1->bv_ga, blob_get(b2, i)); } return OK; case VAR_LIST: if (*op != '+' || tv2->v_type != VAR_LIST) break; // List += List if (tv2->vval.v_list != NULL) { if (tv1->vval.v_list == NULL) { tv1->vval.v_list = tv2->vval.v_list; ++tv1->vval.v_list->lv_refcount; } else list_extend(tv1->vval.v_list, tv2->vval.v_list, NULL); } return OK; case VAR_NUMBER: case VAR_STRING: if (tv2->v_type == VAR_LIST) break; if (vim_strchr((char_u *)"+-*/%", *op) != NULL) { // nr += nr , nr -= nr , nr *=nr , nr /= nr , nr %= nr n = tv_get_number(tv1); #ifdef FEAT_FLOAT if (tv2->v_type == VAR_FLOAT) { float_T f = n; if (*op == '%') break; switch (*op) { case '+': f += tv2->vval.v_float; break; case '-': f -= tv2->vval.v_float; break; case '*': f *= tv2->vval.v_float; break; case '/': f /= tv2->vval.v_float; break; } clear_tv(tv1); tv1->v_type = VAR_FLOAT; tv1->vval.v_float = f; } else #endif { switch (*op) { case '+': n += tv_get_number(tv2); break; case '-': n -= tv_get_number(tv2); break; case '*': n *= tv_get_number(tv2); break; case '/': n = num_divide(n, tv_get_number(tv2), &failed); break; case '%': n = num_modulus(n, tv_get_number(tv2), &failed); break; } clear_tv(tv1); tv1->v_type = VAR_NUMBER; tv1->vval.v_number = n; } } else { if (tv2->v_type == VAR_FLOAT) break; // str .= str s = tv_get_string(tv1); s = concat_str(s, tv_get_string_buf(tv2, numbuf)); clear_tv(tv1); tv1->v_type = VAR_STRING; tv1->vval.v_string = s; } return failed ? FAIL : OK; case VAR_FLOAT: #ifdef FEAT_FLOAT { float_T f; if (*op == '%' || *op == '.' || (tv2->v_type != VAR_FLOAT && tv2->v_type != VAR_NUMBER && tv2->v_type != VAR_STRING)) break; if (tv2->v_type == VAR_FLOAT) f = tv2->vval.v_float; else f = tv_get_number(tv2); switch (*op) { case '+': tv1->vval.v_float += f; break; case '-': tv1->vval.v_float -= f; break; case '*': tv1->vval.v_float *= f; break; case '/': tv1->vval.v_float /= f; break; } } #endif return OK; } } semsg(_(e_wrong_variable_type_for_str_equal), op); return FAIL; } /* * Evaluate the expression used in a ":for var in expr" command. * "arg" points to "var". * Set "*errp" to TRUE for an error, FALSE otherwise; * Return a pointer that holds the info. Null when there is an error. */ void * eval_for_line( char_u *arg, int *errp, exarg_T *eap, evalarg_T *evalarg) { forinfo_T *fi; char_u *var_list_end; char_u *expr; typval_T tv; list_T *l; int skip = !(evalarg->eval_flags & EVAL_EVALUATE); *errp = TRUE; // default: there is an error fi = ALLOC_CLEAR_ONE(forinfo_T); if (fi == NULL) return NULL; var_list_end = skip_var_list(arg, TRUE, &fi->fi_varcount, &fi->fi_semicolon, FALSE); if (var_list_end == NULL) return fi; expr = skipwhite_and_linebreak(var_list_end, evalarg); if (expr[0] != 'i' || expr[1] != 'n' || !(expr[2] == NUL || VIM_ISWHITE(expr[2]))) { if (in_vim9script() && *expr == ':' && expr != var_list_end) semsg(_(e_no_white_space_allowed_before_colon_str), expr); else emsg(_(e_missing_in_after_for)); return fi; } if (skip) ++emsg_skip; expr = skipwhite_and_linebreak(expr + 2, evalarg); if (eval0(expr, &tv, eap, evalarg) == OK) { *errp = FALSE; if (!skip) { if (tv.v_type == VAR_LIST) { l = tv.vval.v_list; if (l == NULL) { // a null list is like an empty list: do nothing clear_tv(&tv); } else { // Need a real list here. CHECK_LIST_MATERIALIZE(l); // No need to increment the refcount, it's already set for // the list being used in "tv". fi->fi_list = l; list_add_watch(l, &fi->fi_lw); fi->fi_lw.lw_item = l->lv_first; } } else if (tv.v_type == VAR_BLOB) { fi->fi_bi = 0; if (tv.vval.v_blob != NULL) { typval_T btv; // Make a copy, so that the iteration still works when the // blob is changed. blob_copy(tv.vval.v_blob, &btv); fi->fi_blob = btv.vval.v_blob; } clear_tv(&tv); } else if (tv.v_type == VAR_STRING) { fi->fi_byte_idx = 0; fi->fi_string = tv.vval.v_string; tv.vval.v_string = NULL; if (fi->fi_string == NULL) fi->fi_string = vim_strsave((char_u *)""); } else { emsg(_(e_string_list_or_blob_required)); clear_tv(&tv); } } } if (skip) --emsg_skip; fi->fi_break_count = evalarg->eval_break_count; return fi; } /* * Used when looping over a :for line, skip the "in expr" part. */ void skip_for_lines(void *fi_void, evalarg_T *evalarg) { forinfo_T *fi = (forinfo_T *)fi_void; int i; for (i = 0; i < fi->fi_break_count; ++i) eval_next_line(NULL, evalarg); } /* * Use the first item in a ":for" list. Advance to the next. * Assign the values to the variable (list). "arg" points to the first one. * Return TRUE when a valid item was found, FALSE when at end of list or * something wrong. */ int next_for_item(void *fi_void, char_u *arg) { forinfo_T *fi = (forinfo_T *)fi_void; int result; int flag = ASSIGN_FOR_LOOP | (in_vim9script() ? (ASSIGN_FINAL // first round: error if variable exists | (fi->fi_bi == 0 ? 0 : ASSIGN_DECL) | ASSIGN_NO_MEMBER_TYPE) : 0); listitem_T *item; int skip_assign = in_vim9script() && arg[0] == '_' && !eval_isnamec(arg[1]); if (fi->fi_blob != NULL) { typval_T tv; if (fi->fi_bi >= blob_len(fi->fi_blob)) return FALSE; tv.v_type = VAR_NUMBER; tv.v_lock = VAR_FIXED; tv.vval.v_number = blob_get(fi->fi_blob, fi->fi_bi); ++fi->fi_bi; if (skip_assign) return TRUE; return ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; } if (fi->fi_string != NULL) { typval_T tv; int len; len = mb_ptr2len(fi->fi_string + fi->fi_byte_idx); if (len == 0) return FALSE; tv.v_type = VAR_STRING; tv.v_lock = VAR_FIXED; tv.vval.v_string = vim_strnsave(fi->fi_string + fi->fi_byte_idx, len); fi->fi_byte_idx += len; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = ex_let_vars(arg, &tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK; vim_free(tv.vval.v_string); return result; } item = fi->fi_lw.lw_item; if (item == NULL) result = FALSE; else { fi->fi_lw.lw_item = item->li_next; ++fi->fi_bi; if (skip_assign) result = TRUE; else result = (ex_let_vars(arg, &item->li_tv, TRUE, fi->fi_semicolon, fi->fi_varcount, flag, NULL) == OK); } return result; } /* * Free the structure used to store info used by ":for". */ void free_for_info(void *fi_void) { forinfo_T *fi = (forinfo_T *)fi_void; if (fi == NULL) return; if (fi->fi_list != NULL) { list_rem_watch(fi->fi_list, &fi->fi_lw); list_unref(fi->fi_list); } else if (fi->fi_blob != NULL) blob_unref(fi->fi_blob); else vim_free(fi->fi_string); vim_free(fi); } void set_context_for_expression( expand_T *xp, char_u *arg, cmdidx_T cmdidx) { int has_expr = cmdidx != CMD_let && cmdidx != CMD_var; int c; char_u *p; if (cmdidx == CMD_let || cmdidx == CMD_var || cmdidx == CMD_const || cmdidx == CMD_final) { xp->xp_context = EXPAND_USER_VARS; if (vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#") == NULL) { // ":let var1 var2 ...": find last space. for (p = arg + STRLEN(arg); p >= arg; ) { xp->xp_pattern = p; MB_PTR_BACK(arg, p); if (VIM_ISWHITE(*p)) break; } return; } } else xp->xp_context = cmdidx == CMD_call ? EXPAND_FUNCTIONS : EXPAND_EXPRESSION; while ((xp->xp_pattern = vim_strpbrk(arg, (char_u *)"\"'+-*/%.=!?~|&$([<>,#")) != NULL) { c = *xp->xp_pattern; if (c == '&') { c = xp->xp_pattern[1]; if (c == '&') { ++xp->xp_pattern; xp->xp_context = has_expr ? EXPAND_EXPRESSION : EXPAND_NOTHING; } else if (c != ' ') { xp->xp_context = EXPAND_SETTINGS; if ((c == 'l' || c == 'g') && xp->xp_pattern[2] == ':') xp->xp_pattern += 2; } } else if (c == '$') { // environment variable xp->xp_context = EXPAND_ENV_VARS; } else if (c == '=') { has_expr = TRUE; xp->xp_context = EXPAND_EXPRESSION; } else if (c == '#' && xp->xp_context == EXPAND_EXPRESSION) { // Autoload function/variable contains '#'. break; } else if ((c == '<' || c == '#') && xp->xp_context == EXPAND_FUNCTIONS && vim_strchr(xp->xp_pattern, '(') == NULL) { // Function name can start with "<SNR>" and contain '#'. break; } else if (has_expr) { if (c == '"') // string { while ((c = *++xp->xp_pattern) != NUL && c != '"') if (c == '\\' && xp->xp_pattern[1] != NUL) ++xp->xp_pattern; xp->xp_context = EXPAND_NOTHING; } else if (c == '\'') // literal string { // Trick: '' is like stopping and starting a literal string. while ((c = *++xp->xp_pattern) != NUL && c != '\'') /* skip */ ; xp->xp_context = EXPAND_NOTHING; } else if (c == '|') { if (xp->xp_pattern[1] == '|') { ++xp->xp_pattern; xp->xp_context = EXPAND_EXPRESSION; } else xp->xp_context = EXPAND_COMMANDS; } else xp->xp_context = EXPAND_EXPRESSION; } else // Doesn't look like something valid, expand as an expression // anyway. xp->xp_context = EXPAND_EXPRESSION; arg = xp->xp_pattern; if (*arg != NUL) while ((c = *++arg) != NUL && (c == ' ' || c == '\t')) /* skip */ ; } // ":exe one two" completes "two" if ((cmdidx == CMD_execute || cmdidx == CMD_echo || cmdidx == CMD_echon || cmdidx == CMD_echomsg) && xp->xp_context == EXPAND_EXPRESSION) { for (;;) { char_u *n = skiptowhite(arg); if (n == arg || IS_WHITE_OR_NUL(*skipwhite(n))) break; arg = skipwhite(n); } } xp->xp_pattern = arg; } /* * Return TRUE if "pat" matches "text". * Does not use 'cpo' and always uses 'magic'. */ int pattern_match(char_u *pat, char_u *text, int ic) { int matches = FALSE; char_u *save_cpo; regmatch_T regmatch; // avoid 'l' flag in 'cpoptions' save_cpo = p_cpo; p_cpo = empty_option; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { regmatch.rm_ic = ic; matches = vim_regexec_nl(&regmatch, text, (colnr_T)0); vim_regfree(regmatch.regprog); } p_cpo = save_cpo; return matches; } /* * Handle a name followed by "(". Both for just "name(arg)" and for * "expr->name(arg)". * Returns OK or FAIL. */ static int eval_func( char_u **arg, // points to "(", will be advanced evalarg_T *evalarg, char_u *name, int name_len, typval_T *rettv, int flags, typval_T *basetv) // "expr" for "expr->name(arg)" { int evaluate = flags & EVAL_EVALUATE; char_u *s = name; int len = name_len; partial_T *partial; int ret = OK; type_T *type = NULL; int found_var = FALSE; if (!evaluate) check_vars(s, len); // If "s" is the name of a variable of type VAR_FUNC // use its contents. s = deref_func_name(s, &len, &partial, in_vim9script() ? &type : NULL, !evaluate, FALSE, &found_var); // Need to make a copy, in case evaluating the arguments makes // the name invalid. s = vim_strsave(s); if (s == NULL || (evaluate && (*s == NUL || (flags & EVAL_CONSTANT)))) ret = FAIL; else { funcexe_T funcexe; // Invoke the function. CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = partial; funcexe.fe_basetv = basetv; funcexe.fe_check_type = type; funcexe.fe_found_var = found_var; ret = get_func_tv(s, len, rettv, arg, evalarg, &funcexe); } vim_free(s); // If evaluate is FALSE rettv->v_type was not set in // get_func_tv, but it's needed in handle_subscript() to parse // what follows. So set it here. if (rettv->v_type == VAR_UNKNOWN && !evaluate && **arg == '(') { rettv->vval.v_string = NULL; rettv->v_type = VAR_FUNC; } // Stop the expression evaluation when immediately // aborting on error, or when an interrupt occurred or // an exception was thrown but not caught. if (evaluate && aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } return ret; } /* * After a NL, skip over empty lines and comment-only lines. */ static char_u * newline_skip_comments(char_u *arg) { char_u *p = arg + 1; for (;;) { p = skipwhite(p); if (*p == NUL) break; if (vim9_comment_start(p)) { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = nl; } if (*p != NL) break; ++p; // skip another NL } return p; } /* * Get the next line source line without advancing. But do skip over comment * lines. * Only called for Vim9 script. */ static char_u * getline_peek_skip_comments(evalarg_T *evalarg) { for (;;) { char_u *next = getline_peek(evalarg->eval_getline, evalarg->eval_cookie); char_u *p; if (next == NULL) break; p = skipwhite(next); if (*p != NUL && !vim9_comment_start(p)) return next; if (eval_next_line(NULL, evalarg) == NULL) break; } return NULL; } /* * If inside Vim9 script, "arg" points to the end of a line (ignoring a # * comment) and there is a next line, return the next line (skipping blanks) * and set "getnext". * Otherwise return the next non-white at or after "arg" and set "getnext" to * FALSE. * "arg" must point somewhere inside a line, not at the start. */ char_u * eval_next_non_blank(char_u *arg, evalarg_T *evalarg, int *getnext) { char_u *p = skipwhite(arg); *getnext = FALSE; if (in_vim9script() && evalarg != NULL && (evalarg->eval_cookie != NULL || evalarg->eval_cctx != NULL || *p == NL) && (*p == NUL || *p == NL || (vim9_comment_start(p) && VIM_ISWHITE(p[-1])))) { char_u *next; if (*p == NL) next = newline_skip_comments(p); else if (evalarg->eval_cookie != NULL) next = getline_peek_skip_comments(evalarg); else next = peek_next_line_from_context(evalarg->eval_cctx); if (next != NULL) { *getnext = TRUE; return skipwhite(next); } } return p; } /* * To be called after eval_next_non_blank() sets "getnext" to TRUE. * Only called for Vim9 script. */ char_u * eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { free_eval_tofree_later(evalarg); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); } /* * Call eval_next_non_blank() and get the next line if needed. */ char_u * skipwhite_and_linebreak(char_u *arg, evalarg_T *evalarg) { int getnext; char_u *p = skipwhite_and_nl(arg); if (evalarg == NULL) return skipwhite(arg); eval_next_non_blank(p, evalarg, &getnext); if (getnext) return eval_next_line(arg, evalarg); return p; } /* * The "evaluate" argument: When FALSE, the argument is only parsed but not * executed. The function may return OK, but the rettv will be of type * VAR_UNKNOWN. The function still returns FAIL for a syntax error. */ /* * Handle zero level expression. * This calls eval1() and handles error message and nextcmd. * Put the result in "rettv" when returning OK and "evaluate" is TRUE. * Note: "rettv.v_lock" is not set. * "evalarg" can be NULL, EVALARG_EVALUATE or a pointer. * Return OK or FAIL. */ int eval0( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg) { return eval0_retarg(arg, rettv, eap, evalarg, NULL); } /* * Like eval0() but when "retarg" is not NULL store the pointer to after the * expression and don't check what comes after the expression. */ int eval0_retarg( char_u *arg, typval_T *rettv, exarg_T *eap, evalarg_T *evalarg, char_u **retarg) { int ret; char_u *p; char_u *expr_end; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; int flags = evalarg == NULL ? 0 : evalarg->eval_flags; int check_for_end = retarg == NULL; int end_error = FALSE; p = skipwhite(arg); ret = eval1(&p, rettv, evalarg); if (ret != FAIL) { expr_end = p; p = skipwhite(p); // In Vim9 script a command block is not split at NL characters for // commands using an expression argument. Skip over a '#' comment to // check for a following NL. Require white space before the '#'. if (in_vim9script() && p > expr_end && retarg == NULL) while (*p == '#') { char_u *nl = vim_strchr(p, NL); if (nl == NULL) break; p = skipwhite(nl + 1); if (eap != NULL && *p != NUL) eap->nextcmd = p; check_for_end = FALSE; } if (check_for_end) end_error = !ends_excmd2(arg, p); } if (ret == FAIL || end_error) { if (ret != FAIL) clear_tv(rettv); /* * Report the invalid expression unless the expression evaluation has * been cancelled due to an aborting error, an interrupt, or an * exception, or we already gave a more specific error. * Also check called_emsg for when using assert_fails(). */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before && (flags & EVAL_CONSTANT) == 0 && (!in_vim9script() || !vim9_bad_comment(p))) { if (end_error) semsg(_(e_trailing_characters_str), p); else semsg(_(e_invalid_expression_str), arg); } // Some of the expression may not have been consumed. Do not check for // a next command to avoid more errors, unless "|" is following, which // could only be a command separator. if (eap != NULL && p != NULL && skipwhite(p)[0] == '|' && skipwhite(p)[1] != '|') eap->nextcmd = check_nextcmd(p); return FAIL; } if (retarg != NULL) *retarg = p; else if (check_for_end && eap != NULL) set_nextcmd(eap, p); return ret; } /* * Handle top level expression: * expr2 ? expr1 : expr1 * expr2 ?? expr1 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Note: "rettv.v_lock" is not set. * * Return OK or FAIL. */ int eval1(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; CLEAR_POINTER(rettv); /* * Get the first variable. */ if (eval2(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); if (*p == '?') { int op_falsy = p[1] == '?'; int result; typval_T var2; evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = evalarg_used->eval_flags & EVAL_EVALUATE; if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = p; } result = FALSE; if (evaluate) { int error = FALSE; if (op_falsy) result = tv2bool(rettv); else if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; if (error || !op_falsy || !result) clear_tv(rettv); if (error) return FAIL; } /* * Get the second variable. Recursive! */ if (op_falsy) ++*arg; if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg - (op_falsy ? 1 : 0), op_falsy ? 2 : 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = (op_falsy ? !result : result) ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { evalarg_used->eval_flags = orig_flags; return FAIL; } if (!op_falsy || !result) *rettv = var2; if (!op_falsy) { /* * Check for the ":". */ p = eval_next_non_blank(*arg, evalarg_used, &getnext); if (*p != ':') { emsg(_(e_missing_colon_after_questionmark)); if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = p; } /* * Get the third variable. Recursive! */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval1(arg, &var2, evalarg_used) == FAIL) { if (evaluate && result) clear_tv(rettv); evalarg_used->eval_flags = orig_flags; return FAIL; } if (evaluate && !result) *rettv = var2; } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle first level expression: * expr2 || expr2 || expr2 logical OR * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval2(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval3(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "||" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '|' && p[1] == '|') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int evaluate; int orig_flags; long result = FALSE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) != 0) result = TRUE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "||". */ while (p[0] == '|' && p[1] == '|') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = !result ? orig_flags : orig_flags & ~EVAL_EVALUATE; if (eval3(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && !result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) != 0) result = TRUE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle second level expression: * expr3 && expr3 && expr3 logical AND * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval3(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; /* * Get the first expression. */ if (eval4(arg, rettv, evalarg) == FAIL) return FAIL; /* * Handle the "&&" operator. */ p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '&' && p[1] == '&') { evalarg_T *evalarg_used = evalarg; evalarg_T local_evalarg; int orig_flags; int evaluate; long result = TRUE; typval_T var2; int error = FALSE; int vim9script = in_vim9script(); if (evalarg == NULL) { init_evalarg(&local_evalarg); evalarg_used = &local_evalarg; } orig_flags = evalarg_used->eval_flags; evaluate = orig_flags & EVAL_EVALUATE; if (evaluate) { if (vim9script) result = tv_get_bool_chk(rettv, &error); else if (tv_get_number_chk(rettv, &error) == 0) result = FALSE; clear_tv(rettv); if (error) return FAIL; } /* * Repeat until there is no following "&&". */ while (p[0] == '&' && p[1] == '&') { if (getnext) *arg = eval_next_line(*arg, evalarg_used); else { if (evaluate && vim9script && !VIM_ISWHITE(p[-1])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = p; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[2])) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 2, evalarg_used); evalarg_used->eval_flags = result ? orig_flags : orig_flags & ~EVAL_EVALUATE; CLEAR_FIELD(var2); if (eval4(arg, &var2, evalarg_used) == FAIL) return FAIL; /* * Compute the result. */ if (evaluate && result) { if (vim9script) result = tv_get_bool_chk(&var2, &error); else if (tv_get_number_chk(&var2, &error) == 0) result = FALSE; clear_tv(&var2); if (error) return FAIL; } if (evaluate) { if (vim9script) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = result ? VVAL_TRUE : VVAL_FALSE; } else { rettv->v_type = VAR_NUMBER; rettv->vval.v_number = result; } } p = eval_next_non_blank(*arg, evalarg_used, &getnext); } if (evalarg == NULL) clear_evalarg(&local_evalarg, NULL); else evalarg->eval_flags = orig_flags; } return OK; } /* * Handle third level expression: * var1 == var2 * var1 =~ var2 * var1 != var2 * var1 !~ var2 * var1 > var2 * var1 >= var2 * var1 < var2 * var1 <= var2 * var1 is var2 * var1 isnot var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval4(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { char_u *p; int getnext; exprtype_T type = EXPR_UNKNOWN; int len = 2; int type_is = FALSE; /* * Get the first expression. */ if (eval5(arg, rettv, evalarg) == FAIL) return FAIL; p = eval_next_non_blank(*arg, evalarg, &getnext); type = get_compare_type(p, &len, &type_is); /* * If there is a comparative operator, use it. */ if (type != EXPR_UNKNOWN) { typval_T var2; int ic; int vim9script = in_vim9script(); int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); long comp_lnum = SOURCING_LNUM; if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, len); clear_tv(rettv); return FAIL; } if (vim9script && type_is && (p[len] == '?' || p[len] == '#')) { semsg(_(e_invalid_expression_str), p); clear_tv(rettv); return FAIL; } // extra question mark appended: ignore case if (p[len] == '?') { ic = TRUE; ++len; } // extra '#' appended: match case else if (p[len] == '#') { ic = FALSE; ++len; } // nothing appended: use 'ignorecase' if not in Vim script else ic = vim9script ? FALSE : p_ic; /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[len])) { error_white_both(p, len); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + len, evalarg); if (eval5(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { int ret; // use the line of the comparison for messages SOURCING_LNUM = comp_lnum; if (vim9script && check_compare_types(type, rettv, &var2) == FAIL) { ret = FAIL; clear_tv(rettv); } else ret = typval_compare(rettv, &var2, type, ic); clear_tv(&var2); return ret; } } return OK; } /* * Make a copy of blob "tv1" and append blob "tv2". */ void eval_addblob(typval_T *tv1, typval_T *tv2) { blob_T *b1 = tv1->vval.v_blob; blob_T *b2 = tv2->vval.v_blob; blob_T *b = blob_alloc(); int i; if (b != NULL) { for (i = 0; i < blob_len(b1); i++) ga_append(&b->bv_ga, blob_get(b1, i)); for (i = 0; i < blob_len(b2); i++) ga_append(&b->bv_ga, blob_get(b2, i)); clear_tv(tv1); rettv_blob_set(tv1, b); } } /* * Make a copy of list "tv1" and append list "tv2". */ int eval_addlist(typval_T *tv1, typval_T *tv2) { typval_T var3; // concatenate Lists if (list_concat(tv1->vval.v_list, tv2->vval.v_list, &var3) == FAIL) { clear_tv(tv1); clear_tv(tv2); return FAIL; } clear_tv(tv1); *tv1 = var3; return OK; } /* * Handle the bitwise left/right shift operator expression: * var1 << var2 * var1 >> var2 * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval5(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval6(arg, rettv, evalarg) == FAIL) return FAIL; /* * Repeat computing, until no '<<' or '>>' is following. */ for (;;) { char_u *p; int getnext; exprtype_T type; int evaluate; typval_T var2; int vim9script; p = eval_next_non_blank(*arg, evalarg, &getnext); if (p[0] == '<' && p[1] == '<') type = EXPR_LSHIFT; else if (p[0] == '>' && p[1] == '>') type = EXPR_RSHIFT; else return OK; // Handle a bitwise left or right shift operator if (rettv->v_type != VAR_NUMBER) { // left operand should be a number emsg(_(e_bitshift_ops_must_be_number)); clear_tv(rettv); return FAIL; } evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); vim9script = in_vim9script(); if (getnext) { *arg = eval_next_line(*arg, evalarg); p = *arg; } else if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 2); clear_tv(rettv); return FAIL; } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL(p[2])) { error_white_both(p, 2); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(p + 2, evalarg); if (eval6(arg, &var2, evalarg) == FAIL) { clear_tv(rettv); return FAIL; } if (var2.v_type != VAR_NUMBER || var2.vval.v_number < 0) { // right operand should be a positive number if (var2.v_type != VAR_NUMBER) emsg(_(e_bitshift_ops_must_be_number)); else emsg(_(e_bitshift_ops_must_be_postive)); clear_tv(rettv); clear_tv(&var2); return FAIL; } if (evaluate) { if (var2.vval.v_number > MAX_LSHIFT_BITS) // shifting more bits than we have always results in zero rettv->vval.v_number = 0; else if (type == EXPR_LSHIFT) rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number << var2.vval.v_number; else rettv->vval.v_number = (uvarnumber_T)rettv->vval.v_number >> var2.vval.v_number; } clear_tv(&var2); } return OK; } /* * Handle fifth level expression: * + number addition, concatenation of list or blob * - number subtraction * . string concatenation (if script version is 1) * .. string concatenation * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval6(char_u **arg, typval_T *rettv, evalarg_T *evalarg) { /* * Get the first expression. */ if (eval7(arg, rettv, evalarg, FALSE) == FAIL) return FAIL; /* * Repeat computing, until no '+', '-' or '.' is following. */ for (;;) { int evaluate; int getnext; char_u *p; int op; int oplen; int concat; typval_T var2; int vim9script = in_vim9script(); // "." is only string concatenation when scriptversion is 1 // "+=", "-=" and "..=" are assignments // "++" and "--" on the next line are a separate command. p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; concat = op == '.' && (*(p + 1) == '.' || in_old_script(2)); if ((op != '+' && op != '-' && !concat) || p[1] == '=' || (p[1] == '.' && p[2] == '=')) break; if (getnext && (op == '+' || op == '-') && p[0] == p[1]) break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); oplen = (concat && p[1] == '.') ? 2 : 1; if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && vim9script && !VIM_ISWHITE(**arg)) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = p; } if ((op != '+' || (rettv->v_type != VAR_LIST && rettv->v_type != VAR_BLOB)) #ifdef FEAT_FLOAT && (op == '.' || rettv->v_type != VAR_FLOAT) #endif && evaluate) { int error = FALSE; // For "list + ...", an illegal use of the first operand as // a number cannot be determined before evaluating the 2nd // operand: if this is also a list, all is ok. // For "something . ...", "something - ..." or "non-list + ...", // we know that the first operand needs to be a string or number // without evaluating the 2nd operand. So check before to avoid // side effects after an error. if (op != '.') tv_get_number_chk(rettv, &error); if ((op == '.' && tv_get_string_chk(rettv) == NULL) || error) { clear_tv(rettv); return FAIL; } } /* * Get the second variable. */ if (evaluate && vim9script && !IS_WHITE_OR_NUL((*arg)[oplen])) { error_white_both(*arg, oplen); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + oplen, evalarg); if (eval7(arg, &var2, evalarg, !vim9script && op == '.') == FAIL) { clear_tv(rettv); return FAIL; } if (evaluate) { /* * Compute the result. */ if (op == '.') { char_u buf1[NUMBUFLEN], buf2[NUMBUFLEN]; char_u *s1 = tv_get_string_buf(rettv, buf1); char_u *s2 = NULL; if (vim9script && (var2.v_type == VAR_VOID || var2.v_type == VAR_CHANNEL || var2.v_type == VAR_JOB)) semsg(_(e_using_invalid_value_as_string_str), vartype_name(var2.v_type)); #ifdef FEAT_FLOAT else if (vim9script && var2.v_type == VAR_FLOAT) { vim_snprintf((char *)buf2, NUMBUFLEN, "%g", var2.vval.v_float); s2 = buf2; } #endif else s2 = tv_get_string_buf_chk(&var2, buf2); if (s2 == NULL) // type error ? { clear_tv(rettv); clear_tv(&var2); return FAIL; } p = concat_str(s1, s2); clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = p; } else if (op == '+' && rettv->v_type == VAR_BLOB && var2.v_type == VAR_BLOB) eval_addblob(rettv, &var2); else if (op == '+' && rettv->v_type == VAR_LIST && var2.v_type == VAR_LIST) { if (eval_addlist(rettv, &var2) == FAIL) return FAIL; } else { int error = FALSE; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1 = 0, f2 = 0; if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; n1 = 0; } else #endif { n1 = tv_get_number_chk(rettv, &error); if (error) { // This can only happen for "list + non-list" or // "blob + non-blob". For "non-list + ..." or // "something - ...", we returned before evaluating the // 2nd operand. clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) f1 = n1; #endif } #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); if (error) { clear_tv(rettv); clear_tv(&var2); return FAIL; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f2 = n2; #endif } clear_tv(rettv); #ifdef FEAT_FLOAT // If there is a float on either side the result is a float. if (rettv->v_type == VAR_FLOAT || var2.v_type == VAR_FLOAT) { if (op == '+') f1 = f1 + f2; else f1 = f1 - f2; rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { if (op == '+') n1 = n1 + n2; else n1 = n1 - n2; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } clear_tv(&var2); } } return OK; } /* * Handle sixth level expression: * * number multiplication * / number division * % number modulo * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval7( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { #ifdef FEAT_FLOAT int use_float = FALSE; #endif /* * Get the first expression. */ if (eval8(arg, rettv, evalarg, want_string) == FAIL) return FAIL; /* * Repeat computing, until no '*', '/' or '%' is following. */ for (;;) { int evaluate; int getnext; typval_T var2; char_u *p; int op; varnumber_T n1, n2; #ifdef FEAT_FLOAT float_T f1, f2; #endif int error; // "*=", "/=" and "%=" are assignments p = eval_next_non_blank(*arg, evalarg, &getnext); op = *p; if ((op != '*' && op != '/' && op != '%') || p[1] == '=') break; evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); if (getnext) *arg = eval_next_line(*arg, evalarg); else { if (evaluate && in_vim9script() && !VIM_ISWHITE(**arg)) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = p; } #ifdef FEAT_FLOAT f1 = 0; f2 = 0; #endif error = FALSE; if (evaluate) { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { f1 = rettv->vval.v_float; use_float = TRUE; n1 = 0; } else #endif n1 = tv_get_number_chk(rettv, &error); clear_tv(rettv); if (error) return FAIL; } else n1 = 0; /* * Get the second variable. */ if (evaluate && in_vim9script() && !IS_WHITE_OR_NUL((*arg)[1])) { error_white_both(*arg, 1); clear_tv(rettv); return FAIL; } *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (eval8(arg, &var2, evalarg, FALSE) == FAIL) return FAIL; if (evaluate) { #ifdef FEAT_FLOAT if (var2.v_type == VAR_FLOAT) { if (!use_float) { f1 = n1; use_float = TRUE; } f2 = var2.vval.v_float; n2 = 0; } else #endif { n2 = tv_get_number_chk(&var2, &error); clear_tv(&var2); if (error) return FAIL; #ifdef FEAT_FLOAT if (use_float) f2 = n2; #endif } /* * Compute the result. * When either side is a float the result is a float. */ #ifdef FEAT_FLOAT if (use_float) { if (op == '*') f1 = f1 * f2; else if (op == '/') { # ifdef VMS // VMS crashes on divide by zero, work around it if (f2 == 0.0) { if (f1 == 0) f1 = -1 * __F_FLT_MAX - 1L; // similar to NaN else if (f1 < 0) f1 = -1 * __F_FLT_MAX; else f1 = __F_FLT_MAX; } else f1 = f1 / f2; # else // We rely on the floating point library to handle divide // by zero to result in "inf" and not a crash. f1 = f1 / f2; # endif } else { emsg(_(e_cannot_use_percent_with_float)); return FAIL; } rettv->v_type = VAR_FLOAT; rettv->vval.v_float = f1; } else #endif { int failed = FALSE; if (op == '*') n1 = n1 * n2; else if (op == '/') n1 = num_divide(n1, n2, &failed); else n1 = num_modulus(n1, n2, &failed); if (failed) return FAIL; rettv->v_type = VAR_NUMBER; rettv->vval.v_number = n1; } } } return OK; } /* * Handle a type cast before a base level expression. * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * Return OK or FAIL. */ static int eval8( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { type_T *want_type = NULL; garray_T type_list; // list of pointers to allocated types int res; int evaluate = evalarg == NULL ? 0 : (evalarg->eval_flags & EVAL_EVALUATE); // Recognize <type> in Vim9 script only. if (in_vim9script() && **arg == '<' && eval_isnamec1((*arg)[1]) && STRNCMP(*arg, "<SNR>", 5) != 0) { ++*arg; ga_init2(&type_list, sizeof(type_T *), 10); want_type = parse_type(arg, &type_list, TRUE); if (want_type == NULL && (evaluate || **arg != '>')) { clear_type_list(&type_list); return FAIL; } if (**arg != '>') { if (*skipwhite(*arg) == '>') semsg(_(e_no_white_space_allowed_before_str_str), ">", *arg); else emsg(_(e_missing_gt)); clear_type_list(&type_list); return FAIL; } ++*arg; *arg = skipwhite_and_linebreak(*arg, evalarg); } res = eval9(arg, rettv, evalarg, want_string); if (want_type != NULL && evaluate) { if (res == OK) { type_T *actual = typval2type(rettv, get_copyID(), &type_list, TVTT_DO_MEMBER); if (!equal_type(want_type, actual, 0)) { if (want_type == &t_bool && actual != &t_bool && (actual->tt_flags & TTFLAG_BOOL_OK)) { int n = tv2bool(rettv); // can use "0" and "1" for boolean in some places clear_tv(rettv); rettv->v_type = VAR_BOOL; rettv->vval.v_number = n ? VVAL_TRUE : VVAL_FALSE; } else { where_T where = WHERE_INIT; where.wt_variable = TRUE; res = check_type(want_type, actual, TRUE, where); } } } clear_type_list(&type_list); } return res; } int eval_leader(char_u **arg, int vim9) { char_u *s = *arg; char_u *p = *arg; while (*p == '!' || *p == '-' || *p == '+') { char_u *n = skipwhite(p + 1); // ++, --, -+ and +- are not accepted in Vim9 script if (vim9 && (*p == '-' || *p == '+') && (*n == '-' || *n == '+')) { semsg(_(e_invalid_expression_str), s); return FAIL; } p = n; } *arg = p; return OK; } /* * Check for a predefined value "true", "false" and "null.*". * Return OK when recognized. */ int handle_predefined(char_u *s, int len, typval_T *rettv) { switch (len) { case 4: if (STRNCMP(s, "true", 4) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_TRUE; return OK; } if (STRNCMP(s, "null", 4) == 0) { rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; return OK; } break; case 5: if (STRNCMP(s, "false", 5) == 0) { rettv->v_type = VAR_BOOL; rettv->vval.v_number = VVAL_FALSE; return OK; } break; case 8: if (STRNCMP(s, "null_job", 8) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_JOB; rettv->vval.v_job = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } break; case 9: if (STRNCMP(s, "null_", 5) != 0) break; if (STRNCMP(s + 5, "list", 4) == 0) { rettv->v_type = VAR_LIST; rettv->vval.v_list = NULL; return OK; } if (STRNCMP(s + 5, "dict", 4) == 0) { rettv->v_type = VAR_DICT; rettv->vval.v_dict = NULL; return OK; } if (STRNCMP(s + 5, "blob", 4) == 0) { rettv->v_type = VAR_BLOB; rettv->vval.v_blob = NULL; return OK; } break; case 11: if (STRNCMP(s, "null_string", 11) == 0) { rettv->v_type = VAR_STRING; rettv->vval.v_string = NULL; return OK; } break; case 12: if (STRNCMP(s, "null_channel", 12) == 0) { #ifdef FEAT_JOB_CHANNEL rettv->v_type = VAR_CHANNEL; rettv->vval.v_channel = NULL; #else rettv->v_type = VAR_SPECIAL; rettv->vval.v_number = VVAL_NULL; #endif return OK; } if (STRNCMP(s, "null_partial", 12) == 0) { rettv->v_type = VAR_PARTIAL; rettv->vval.v_partial = NULL; return OK; } break; case 13: if (STRNCMP(s, "null_function", 13) == 0) { rettv->v_type = VAR_FUNC; rettv->vval.v_string = NULL; return OK; } break; } return FAIL; } /* * Handle sixth level expression: * number number constant * 0zFFFFFFFF Blob constant * "string" string constant * 'string' literal string constant * &option-name option value * @r register contents * identifier variable value * function() function call * $VAR environment variable * (expression) nested expression * [expr, expr] List * {arg, arg -> expr} Lambda * {key: val, key: val} Dictionary * #{key: val, key: val} Dictionary with literal keys * * Also handle: * ! in front logical NOT * - in front unary minus * + in front unary plus (ignored) * trailing [] subscript in String or List * trailing .name entry in Dictionary * trailing ->name() method call * * "arg" must point to the first non-white of the expression. * "arg" is advanced to just after the recognized expression. * * Return OK or FAIL. */ static int eval9( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int want_string) // after "." operator { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int len; char_u *s; char_u *name_start = NULL; char_u *start_leader, *end_leader; int ret = OK; char_u *alias; static int recurse = 0; int vim9script = in_vim9script(); /* * Initialise variable so that clear_tv() can't mistake this for a * string and free a string that isn't there. */ rettv->v_type = VAR_UNKNOWN; /* * Skip '!', '-' and '+' characters. They are handled later. */ start_leader = *arg; if (eval_leader(arg, vim9script) == FAIL) return FAIL; end_leader = *arg; if (**arg == '.' && (!isdigit(*(*arg + 1)) #ifdef FEAT_FLOAT || in_old_script(2) #endif )) { semsg(_(e_invalid_expression_str), *arg); ++*arg; return FAIL; } // Limit recursion to 1000 levels. At least at 10000 we run out of stack // and crash. With MSVC the stack is smaller. if (recurse == #ifdef _MSC_VER 300 #else 1000 #endif ) { semsg(_(e_expression_too_recursive_str), *arg); return FAIL; } ++recurse; switch (**arg) { /* * Number constant. */ case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case '.': ret = eval_number(arg, rettv, evaluate, want_string); // Apply prefixed "-" and "+" now. Matters especially when // "->" follows. if (ret == OK && evaluate && end_leader > start_leader && rettv->v_type != VAR_BLOB) ret = eval9_leader(rettv, TRUE, start_leader, &end_leader); break; /* * String constant: "string". */ case '"': ret = eval_string(arg, rettv, evaluate, FALSE); break; /* * Literal string constant: 'str''ing'. */ case '\'': ret = eval_lit_string(arg, rettv, evaluate, FALSE); break; /* * List: [expr, expr] */ case '[': ret = eval_list(arg, rettv, evalarg, TRUE); break; /* * Dictionary: #{key: val, key: val} */ case '#': if (vim9script) { ret = vim9_bad_comment(*arg) ? FAIL : NOTDONE; } else if ((*arg)[1] == '{') { ++*arg; ret = eval_dict(arg, rettv, evalarg, TRUE); } else ret = NOTDONE; break; /* * Lambda: {arg, arg -> expr} * Dictionary: {'key': val, 'key': val} */ case '{': if (vim9script) ret = NOTDONE; else ret = get_lambda_tv(arg, rettv, vim9script, evalarg); if (ret == NOTDONE) ret = eval_dict(arg, rettv, evalarg, FALSE); break; /* * Option value: &name */ case '&': ret = eval_option(arg, rettv, evaluate); break; /* * Environment variable: $VAR. * Interpolated string: $"string" or $'string'. */ case '$': if ((*arg)[1] == '"' || (*arg)[1] == '\'') ret = eval_interp_string(arg, rettv, evaluate); else ret = eval_env_var(arg, rettv, evaluate); break; /* * Register contents: @r. */ case '@': ++*arg; if (evaluate) { if (vim9script && IS_WHITE_OR_NUL(**arg)) semsg(_(e_syntax_error_at_str), *arg); else if (vim9script && !valid_yank_reg(**arg, FALSE)) emsg_invreg(**arg); else { rettv->v_type = VAR_STRING; rettv->vval.v_string = get_reg_contents(**arg, GREG_EXPR_SRC); } } if (**arg != NUL) ++*arg; break; /* * nested expression: (expression). * or lambda: (arg) => expr */ case '(': ret = NOTDONE; if (vim9script) { ret = get_lambda_tv(arg, rettv, TRUE, evalarg); if (ret == OK && evaluate) { ufunc_T *ufunc = rettv->vval.v_partial->pt_func; // Compile it here to get the return type. The return // type is optional, when it's missing use t_unknown. // This is recognized in compile_return(). if (ufunc->uf_ret_type->tt_type == VAR_VOID) ufunc->uf_ret_type = &t_unknown; if (compile_def_function(ufunc, FALSE, get_compile_type(ufunc), NULL) == FAIL) { clear_tv(rettv); ret = FAIL; } } } if (ret == NOTDONE) { *arg = skipwhite_and_linebreak(*arg + 1, evalarg); ret = eval1(arg, rettv, evalarg); // recursive! *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ')') ++*arg; else if (ret == OK) { emsg(_(e_missing_closing_paren)); clear_tv(rettv); ret = FAIL; } } break; default: ret = NOTDONE; break; } if (ret == NOTDONE) { /* * Must be a variable or function name. * Can also be a curly-braces kind of name: {expr}. */ s = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) s = alias; if (len <= 0) ret = FAIL; else { int flags = evalarg == NULL ? 0 : evalarg->eval_flags; if (evaluate && vim9script && len == 1 && *s == '_') { emsg(_(e_cannot_use_underscore_here)); ret = FAIL; } else if (evaluate && vim9script && len > 2 && s[0] == 's' && s[1] == ':') { semsg(_(e_cannot_use_s_colon_in_vim9_script_str), s); ret = FAIL; } else if ((vim9script ? **arg : *skipwhite(*arg)) == '(') { // "name(..." recursive! *arg = skipwhite(*arg); ret = eval_func(arg, evalarg, s, len, rettv, flags, NULL); } else if (flags & EVAL_CONSTANT) ret = FAIL; else if (evaluate) { // get the value of "true", "false", etc. or a variable ret = FAIL; if (vim9script) ret = handle_predefined(s, len, rettv); if (ret == FAIL) { name_start = s; ret = eval_variable(s, len, 0, rettv, NULL, EVAL_VAR_VERBOSE + EVAL_VAR_IMPORT); } } else { // skip the name check_vars(s, len); ret = OK; } } vim_free(alias); } // Handle following '[', '(' and '.' for expr[expr], expr.name, // expr(expr), expr->name(expr) if (ret == OK) ret = handle_subscript(arg, name_start, rettv, evalarg, TRUE); /* * Apply logical NOT and unary '-', from right to left, ignore '+'. */ if (ret == OK && evaluate && end_leader > start_leader) ret = eval9_leader(rettv, FALSE, start_leader, &end_leader); --recurse; return ret; } /* * Apply the leading "!" and "-" before an eval9 expression to "rettv". * When "numeric_only" is TRUE only handle "+" and "-". * Adjusts "end_leaderp" until it is at "start_leader". */ static int eval9_leader( typval_T *rettv, int numeric_only, char_u *start_leader, char_u **end_leaderp) { char_u *end_leader = *end_leaderp; int ret = OK; int error = FALSE; varnumber_T val = 0; vartype_T type = rettv->v_type; int vim9script = in_vim9script(); #ifdef FEAT_FLOAT float_T f = 0.0; if (rettv->v_type == VAR_FLOAT) f = rettv->vval.v_float; else #endif { while (VIM_ISWHITE(end_leader[-1])) --end_leader; if (vim9script && end_leader[-1] == '!') val = tv2bool(rettv); else val = tv_get_number_chk(rettv, &error); } if (error) { clear_tv(rettv); ret = FAIL; } else { while (end_leader > start_leader) { --end_leader; if (*end_leader == '!') { if (numeric_only) { ++end_leader; break; } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { if (vim9script) { rettv->v_type = VAR_BOOL; val = f == 0.0 ? VVAL_TRUE : VVAL_FALSE; } else f = !f; } else #endif { val = !val; type = VAR_BOOL; } } else if (*end_leader == '-') { #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) f = -f; else #endif { val = -val; type = VAR_NUMBER; } } } #ifdef FEAT_FLOAT if (rettv->v_type == VAR_FLOAT) { clear_tv(rettv); rettv->vval.v_float = f; } else #endif { clear_tv(rettv); if (vim9script) rettv->v_type = type; else rettv->v_type = VAR_NUMBER; rettv->vval.v_number = val; } } *end_leaderp = end_leader; return ret; } /* * Call the function referred to in "rettv". */ static int call_func_rettv( char_u **arg, evalarg_T *evalarg, typval_T *rettv, int evaluate, dict_T *selfdict, typval_T *basetv) { partial_T *pt = NULL; funcexe_T funcexe; typval_T functv; char_u *s; int ret; // need to copy the funcref so that we can clear rettv if (evaluate) { functv = *rettv; rettv->v_type = VAR_UNKNOWN; // Invoke the function. Recursive! if (functv.v_type == VAR_PARTIAL) { pt = functv.vval.v_partial; s = partial_name(pt); } else { s = functv.vval.v_string; if (s == NULL || *s == NUL) { emsg(_(e_empty_function_name)); ret = FAIL; goto theend; } } } else s = (char_u *)""; CLEAR_FIELD(funcexe); funcexe.fe_firstline = curwin->w_cursor.lnum; funcexe.fe_lastline = curwin->w_cursor.lnum; funcexe.fe_evaluate = evaluate; funcexe.fe_partial = pt; funcexe.fe_selfdict = selfdict; funcexe.fe_basetv = basetv; ret = get_func_tv(s, -1, rettv, arg, evalarg, &funcexe); theend: // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&functv); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_lambda( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); typval_T base = *rettv; int ret; rettv->v_type = VAR_UNKNOWN; if (**arg == '{') { // ->{lambda}() ret = get_lambda_tv(arg, rettv, FALSE, evalarg); } else { // ->(lambda)() ++*arg; ret = eval1(arg, rettv, evalarg); *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ')') { emsg(_(e_missing_closing_paren)); return FAIL; } if (rettv->v_type != VAR_STRING && rettv->v_type != VAR_FUNC && rettv->v_type != VAR_PARTIAL) { emsg(_(e_string_or_function_required_for_arrow_parens_expr)); return FAIL; } ++*arg; } if (ret != OK) return FAIL; if (**arg != '(') { if (verbose) { if (*skipwhite(*arg) == '(') emsg(_(e_no_white_space_allowed_before_parenthesis)); else semsg(_(e_missing_parenthesis_str), "lambda"); } clear_tv(rettv); ret = FAIL; } else ret = call_func_rettv(arg, evalarg, rettv, evaluate, NULL, &base); // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); return ret; } /* * Evaluate "->method()". * "*arg" points to "method". * Returns FAIL or OK. "*arg" is advanced to after the ')'. */ static int eval_method( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { char_u *name; long len; char_u *alias; char_u *tofree = NULL; typval_T base = *rettv; int ret = OK; int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); rettv->v_type = VAR_UNKNOWN; name = *arg; len = get_name_len(arg, &alias, evaluate, TRUE); if (alias != NULL) name = alias; if (len <= 0) { if (verbose) emsg(_(e_missing_name_after_method)); ret = FAIL; } else { char_u *paren; // If there is no "(" immediately following, but there is further on, // it can be "import.Func()", "dict.Func()", "list[nr]", etc. // Does not handle anything where "(" is part of the expression. *arg = skipwhite(*arg); if (**arg != '(' && alias == NULL && (paren = vim_strchr(*arg, '(')) != NULL) { char_u *deref; *arg = name; *paren = NUL; deref = deref_function_name(arg, &tofree, evalarg, verbose); if (deref == NULL) { *arg = name + len; ret = FAIL; } else { name = deref; len = (long)STRLEN(name); } *paren = '('; } if (ret == OK) { *arg = skipwhite(*arg); if (**arg != '(') { if (verbose) semsg(_(e_missing_parenthesis_str), name); ret = FAIL; } else if (VIM_ISWHITE((*arg)[-1])) { if (verbose) emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else ret = eval_func(arg, evalarg, name, len, rettv, evaluate ? EVAL_EVALUATE : 0, &base); } } // Clear the funcref afterwards, so that deleting it while // evaluating the arguments is possible (see test55). if (evaluate) clear_tv(&base); vim_free(tofree); return ret; } /* * Evaluate an "[expr]" or "[expr:expr]" index. Also "dict.key". * "*arg" points to the '[' or '.'. * Returns FAIL or OK. "*arg" is advanced to after the ']'. */ static int eval_index( char_u **arg, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int empty1 = FALSE, empty2 = FALSE; typval_T var1, var2; int range = FALSE; char_u *key = NULL; int keylen = -1; int vim9script = in_vim9script(); if (check_can_index(rettv, evaluate, verbose) == FAIL) return FAIL; init_tv(&var1); init_tv(&var2); if (**arg == '.') { /* * dict.name */ key = *arg + 1; for (keylen = 0; eval_isdictc(key[keylen]); ++keylen) ; if (keylen == 0) return FAIL; *arg = key + keylen; } else { /* * something[idx] * * Get the (first) variable from inside the []. */ *arg = skipwhite_and_linebreak(*arg + 1, evalarg); if (**arg == ':') empty1 = TRUE; else if (eval1(arg, &var1, evalarg) == FAIL) // recursive! return FAIL; else if (vim9script && **arg == ':') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg); clear_tv(&var1); return FAIL; } else if (evaluate) { int error = FALSE; #ifdef FEAT_FLOAT // allow for indexing with float if (vim9script && rettv->v_type == VAR_DICT && var1.v_type == VAR_FLOAT) { var1.vval.v_string = typval_tostring(&var1, TRUE); var1.v_type = VAR_STRING; } #endif if (vim9script && rettv->v_type == VAR_LIST) tv_get_number_chk(&var1, &error); else error = tv_get_string_chk(&var1) == NULL; if (error) { // not a number or string clear_tv(&var1); return FAIL; } } /* * Get the second variable from inside the [:]. */ *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ':') { range = TRUE; ++*arg; if (vim9script && !IS_WHITE_OR_NUL(**arg) && **arg != ']') { semsg(_(e_white_space_required_before_and_after_str_at_str), ":", *arg - 1); if (!empty1) clear_tv(&var1); return FAIL; } *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg == ']') empty2 = TRUE; else if (eval1(arg, &var2, evalarg) == FAIL) // recursive! { if (!empty1) clear_tv(&var1); return FAIL; } else if (evaluate && tv_get_string_chk(&var2) == NULL) { // not a number or string if (!empty1) clear_tv(&var1); clear_tv(&var2); return FAIL; } } // Check for the ']'. *arg = skipwhite_and_linebreak(*arg, evalarg); if (**arg != ']') { if (verbose) emsg(_(e_missing_closing_square_brace)); clear_tv(&var1); if (range) clear_tv(&var2); return FAIL; } *arg = *arg + 1; // skip over the ']' } if (evaluate) { int res = eval_index_inner(rettv, range, empty1 ? NULL : &var1, empty2 ? NULL : &var2, FALSE, key, keylen, verbose); if (!empty1) clear_tv(&var1); if (range) clear_tv(&var2); return res; } return OK; } /* * Check if "rettv" can have an [index] or [sli:ce] */ int check_can_index(typval_T *rettv, int evaluate, int verbose) { switch (rettv->v_type) { case VAR_FUNC: case VAR_PARTIAL: if (verbose) emsg(_(e_cannot_index_a_funcref)); return FAIL; case VAR_FLOAT: #ifdef FEAT_FLOAT if (verbose) emsg(_(e_using_float_as_string)); return FAIL; #endif case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: if (verbose) emsg(_(e_cannot_index_special_variable)); return FAIL; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: if (evaluate) { emsg(_(e_cannot_index_special_variable)); return FAIL; } // FALLTHROUGH case VAR_STRING: case VAR_LIST: case VAR_DICT: case VAR_BLOB: break; case VAR_NUMBER: if (in_vim9script()) emsg(_(e_cannot_index_number)); break; } return OK; } /* * slice() function */ void f_slice(typval_T *argvars, typval_T *rettv) { if (in_vim9script() && ((argvars[0].v_type != VAR_STRING && argvars[0].v_type != VAR_LIST && argvars[0].v_type != VAR_BLOB && check_for_list_arg(argvars, 0) == FAIL) || check_for_number_arg(argvars, 1) == FAIL || check_for_opt_number_arg(argvars, 2) == FAIL)) return; if (check_can_index(argvars, TRUE, FALSE) == OK) { copy_tv(argvars, rettv); eval_index_inner(rettv, TRUE, argvars + 1, argvars[2].v_type == VAR_UNKNOWN ? NULL : argvars + 2, TRUE, NULL, 0, FALSE); } } /* * Apply index or range to "rettv". * "var1" is the first index, NULL for [:expr]. * "var2" is the second index, NULL for [expr] and [expr: ] * "exclusive" is TRUE for slice(): second index is exclusive, use character * index for string. * Alternatively, "key" is not NULL, then key[keylen] is the dict index. */ int eval_index_inner( typval_T *rettv, int is_range, typval_T *var1, typval_T *var2, int exclusive, char_u *key, int keylen, int verbose) { varnumber_T n1, n2 = 0; long len; n1 = 0; if (var1 != NULL && rettv->v_type != VAR_DICT) n1 = tv_get_number(var1); if (is_range) { if (rettv->v_type == VAR_DICT) { if (verbose) emsg(_(e_cannot_slice_dictionary)); return FAIL; } if (var2 != NULL) n2 = tv_get_number(var2); else n2 = VARNUM_MAX; } switch (rettv->v_type) { case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: case VAR_FUNC: case VAR_PARTIAL: case VAR_FLOAT: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: break; // not evaluating, skipping over subscript case VAR_NUMBER: case VAR_STRING: { char_u *s = tv_get_string(rettv); len = (long)STRLEN(s); if (in_vim9script() || exclusive) { if (is_range) s = string_slice(s, n1, n2, exclusive); else s = char_from_string(s, n1); } else if (is_range) { // The resulting variable is a substring. If the indexes // are out of range the result is empty. if (n1 < 0) { n1 = len + n1; if (n1 < 0) n1 = 0; } if (n2 < 0) n2 = len + n2; else if (n2 >= len) n2 = len; if (n1 >= len || n2 < 0 || n1 > n2) s = NULL; else s = vim_strnsave(s + n1, n2 - n1 + 1); } else { // The resulting variable is a string of a single // character. If the index is too big or negative the // result is empty. if (n1 >= len || n1 < 0) s = NULL; else s = vim_strnsave(s + n1, 1); } clear_tv(rettv); rettv->v_type = VAR_STRING; rettv->vval.v_string = s; } break; case VAR_BLOB: blob_slice_or_index(rettv->vval.v_blob, is_range, n1, n2, exclusive, rettv); break; case VAR_LIST: if (var1 == NULL) n1 = 0; if (var2 == NULL) n2 = VARNUM_MAX; if (list_slice_or_index(rettv->vval.v_list, is_range, n1, n2, exclusive, rettv, verbose) == FAIL) return FAIL; break; case VAR_DICT: { dictitem_T *item; typval_T tmp; if (key == NULL) { key = tv_get_string_chk(var1); if (key == NULL) return FAIL; } item = dict_find(rettv->vval.v_dict, key, keylen); if (item == NULL) { if (verbose) { if (keylen > 0) key[keylen] = NUL; semsg(_(e_key_not_present_in_dictionary), key); } return FAIL; } copy_tv(&item->di_tv, &tmp); clear_tv(rettv); *rettv = tmp; } break; } return OK; } /* * Return the function name of partial "pt". */ char_u * partial_name(partial_T *pt) { if (pt != NULL) { if (pt->pt_name != NULL) return pt->pt_name; if (pt->pt_func != NULL) return pt->pt_func->uf_name; } return (char_u *)""; } static void partial_free(partial_T *pt) { int i; for (i = 0; i < pt->pt_argc; ++i) clear_tv(&pt->pt_argv[i]); vim_free(pt->pt_argv); dict_unref(pt->pt_dict); if (pt->pt_name != NULL) { func_unref(pt->pt_name); vim_free(pt->pt_name); } else func_ptr_unref(pt->pt_func); // "out_up" is no longer used, decrement refcount on partial that owns it. partial_unref(pt->pt_outer.out_up_partial); // Using pt_outer from another partial. partial_unref(pt->pt_outer_partial); // Decrease the reference count for the context of a closure. If down // to the minimum it may be time to free it. if (pt->pt_funcstack != NULL) { --pt->pt_funcstack->fs_refcount; funcstack_check_refcount(pt->pt_funcstack); } vim_free(pt); } /* * Unreference a closure: decrement the reference count and free it when it * becomes zero. */ void partial_unref(partial_T *pt) { if (pt != NULL) { if (--pt->pt_refcount <= 0) partial_free(pt); // If the reference count goes down to one, the funcstack may be the // only reference and can be freed if no other partials reference it. else if (pt->pt_refcount == 1 && pt->pt_funcstack != NULL) funcstack_check_refcount(pt->pt_funcstack); } } /* * Return the next (unique) copy ID. * Used for serializing nested structures. */ int get_copyID(void) { current_copyID += COPYID_INC; return current_copyID; } /* * Garbage collection for lists and dictionaries. * * We use reference counts to be able to free most items right away when they * are no longer used. But for composite items it's possible that it becomes * unused while the reference count is > 0: When there is a recursive * reference. Example: * :let l = [1, 2, 3] * :let d = {9: l} * :let l[1] = d * * Since this is quite unusual we handle this with garbage collection: every * once in a while find out which lists and dicts are not referenced from any * variable. * * Here is a good reference text about garbage collection (refers to Python * but it applies to all reference-counting mechanisms): * http://python.ca/nas/python/gc/ */ /* * Do garbage collection for lists and dicts. * When "testing" is TRUE this is called from test_garbagecollect_now(). * Return TRUE if some memory was freed. */ int garbage_collect(int testing) { int copyID; int abort = FALSE; buf_T *buf; win_T *wp; int did_free = FALSE; tabpage_T *tp; if (!testing) { // Only do this once. want_garbage_collect = FALSE; may_garbage_collect = FALSE; garbage_collect_at_exit = FALSE; } // The execution stack can grow big, limit the size. if (exestack.ga_maxlen - exestack.ga_len > 500) { size_t new_len; char_u *pp; int n; // Keep 150% of the current size, with a minimum of the growth size. n = exestack.ga_len / 2; if (n < exestack.ga_growsize) n = exestack.ga_growsize; // Don't make it bigger though. if (exestack.ga_len + n < exestack.ga_maxlen) { new_len = (size_t)exestack.ga_itemsize * (exestack.ga_len + n); pp = vim_realloc(exestack.ga_data, new_len); if (pp == NULL) return FAIL; exestack.ga_maxlen = exestack.ga_len + n; exestack.ga_data = pp; } } // We advance by two because we add one for items referenced through // previous_funccal. copyID = get_copyID(); /* * 1. Go through all accessible variables and mark all lists and dicts * with copyID. */ // Don't free variables in the previous_funccal list unless they are only // referenced through previous_funccal. This must be first, because if // the item is referenced elsewhere the funccal must not be freed. abort = abort || set_ref_in_previous_funccal(copyID); // script-local variables abort = abort || garbage_collect_scriptvars(copyID); // buffer-local variables FOR_ALL_BUFFERS(buf) abort = abort || set_ref_in_item(&buf->b_bufvar.di_tv, copyID, NULL, NULL); // window-local variables FOR_ALL_TAB_WINDOWS(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); if (aucmd_win != NULL) abort = abort || set_ref_in_item(&aucmd_win->w_winvar.di_tv, copyID, NULL, NULL); #ifdef FEAT_PROP_POPUP FOR_ALL_POPUPWINS(wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); FOR_ALL_TABPAGES(tp) FOR_ALL_POPUPWINS_IN_TAB(tp, wp) abort = abort || set_ref_in_item(&wp->w_winvar.di_tv, copyID, NULL, NULL); #endif // tabpage-local variables FOR_ALL_TABPAGES(tp) abort = abort || set_ref_in_item(&tp->tp_winvar.di_tv, copyID, NULL, NULL); // global variables abort = abort || garbage_collect_globvars(copyID); // function-local variables abort = abort || set_ref_in_call_stack(copyID); // named functions (matters for closures) abort = abort || set_ref_in_functions(copyID); // function call arguments, if v:testing is set. abort = abort || set_ref_in_func_args(copyID); // funcstacks keep variables for closures abort = abort || set_ref_in_funcstacks(copyID); // v: vars abort = abort || garbage_collect_vimvars(copyID); // callbacks in buffers abort = abort || set_ref_in_buffers(copyID); // 'completefunc', 'omnifunc' and 'thesaurusfunc' callbacks abort = abort || set_ref_in_insexpand_funcs(copyID); // 'operatorfunc' callback abort = abort || set_ref_in_opfunc(copyID); // 'tagfunc' callback abort = abort || set_ref_in_tagfunc(copyID); // 'imactivatefunc' and 'imstatusfunc' callbacks abort = abort || set_ref_in_im_funcs(copyID); #ifdef FEAT_LUA abort = abort || set_ref_in_lua(copyID); #endif #ifdef FEAT_PYTHON abort = abort || set_ref_in_python(copyID); #endif #ifdef FEAT_PYTHON3 abort = abort || set_ref_in_python3(copyID); #endif #ifdef FEAT_JOB_CHANNEL abort = abort || set_ref_in_channel(copyID); abort = abort || set_ref_in_job(copyID); #endif #ifdef FEAT_NETBEANS_INTG abort = abort || set_ref_in_nb_channel(copyID); #endif #ifdef FEAT_TIMERS abort = abort || set_ref_in_timer(copyID); #endif #ifdef FEAT_QUICKFIX abort = abort || set_ref_in_quickfix(copyID); #endif #ifdef FEAT_TERMINAL abort = abort || set_ref_in_term(copyID); #endif #ifdef FEAT_PROP_POPUP abort = abort || set_ref_in_popups(copyID); #endif if (!abort) { /* * 2. Free lists and dictionaries that are not referenced. */ did_free = free_unref_items(copyID); /* * 3. Check if any funccal can be freed now. * This may call us back recursively. */ free_unref_funccal(copyID, testing); } else if (p_verbose > 0) { verb_msg(_("Not enough memory to set references, garbage collection aborted!")); } return did_free; } /* * Free lists, dictionaries, channels and jobs that are no longer referenced. */ static int free_unref_items(int copyID) { int did_free = FALSE; // Let all "free" functions know that we are here. This means no // dictionaries, lists, channels or jobs are to be freed, because we will // do that here. in_free_unref_items = TRUE; /* * PASS 1: free the contents of the items. We don't free the items * themselves yet, so that it is possible to decrement refcount counters */ // Go through the list of dicts and free items without the copyID. did_free |= dict_free_nonref(copyID); // Go through the list of lists and free items without the copyID. did_free |= list_free_nonref(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. did_free |= free_unused_jobs_contents(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. did_free |= free_unused_channels_contents(copyID, COPYID_MASK); #endif /* * PASS 2: free the items themselves. */ dict_free_items(copyID); list_free_items(copyID); #ifdef FEAT_JOB_CHANNEL // Go through the list of jobs and free items without the copyID. This // must happen before doing channels, because jobs refer to channels, but // the reference from the channel to the job isn't tracked. free_unused_jobs(copyID, COPYID_MASK); // Go through the list of channels and free items without the copyID. free_unused_channels(copyID, COPYID_MASK); #endif in_free_unref_items = FALSE; return did_free; } /* * Mark all lists and dicts referenced through hashtab "ht" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_ht(hashtab_T *ht, int copyID, list_stack_T **list_stack) { int todo; int abort = FALSE; hashitem_T *hi; hashtab_T *cur_ht; ht_stack_T *ht_stack = NULL; ht_stack_T *tempitem; cur_ht = ht; for (;;) { if (!abort) { // Mark each item in the hashtab. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. todo = (int)cur_ht->ht_used; for (hi = cur_ht->ht_array; todo > 0; ++hi) if (!HASHITEM_EMPTY(hi)) { --todo; abort = abort || set_ref_in_item(&HI2DI(hi)->di_tv, copyID, &ht_stack, list_stack); } } if (ht_stack == NULL) break; // take an item from the stack cur_ht = ht_stack->ht; tempitem = ht_stack; ht_stack = ht_stack->prev; free(tempitem); } return abort; } #if defined(FEAT_LUA) || defined(FEAT_PYTHON) || defined(FEAT_PYTHON3) \ || defined(PROTO) /* * Mark a dict and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_dict(dict_T *d, int copyID) { if (d != NULL && d->dv_copyID != copyID) { d->dv_copyID = copyID; return set_ref_in_ht(&d->dv_hashtab, copyID, NULL); } return FALSE; } #endif /* * Mark a list and its items with "copyID". * Returns TRUE if setting references failed somehow. */ int set_ref_in_list(list_T *ll, int copyID) { if (ll != NULL && ll->lv_copyID != copyID) { ll->lv_copyID = copyID; return set_ref_in_list_items(ll, copyID, NULL); } return FALSE; } /* * Mark all lists and dicts referenced through list "l" with "copyID". * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_list_items(list_T *l, int copyID, ht_stack_T **ht_stack) { listitem_T *li; int abort = FALSE; list_T *cur_l; list_stack_T *list_stack = NULL; list_stack_T *tempitem; cur_l = l; for (;;) { if (!abort && cur_l->lv_first != &range_list_item) // Mark each item in the list. If the item contains a hashtab // it is added to ht_stack, if it contains a list it is added to // list_stack. for (li = cur_l->lv_first; !abort && li != NULL; li = li->li_next) abort = abort || set_ref_in_item(&li->li_tv, copyID, ht_stack, &list_stack); if (list_stack == NULL) break; // take an item from the stack cur_l = list_stack->list; tempitem = list_stack; list_stack = list_stack->prev; free(tempitem); } return abort; } /* * Mark the partial in callback 'cb' with "copyID". */ int set_ref_in_callback(callback_T *cb, int copyID) { typval_T tv; if (cb->cb_name == NULL || *cb->cb_name == NUL || cb->cb_partial == NULL) return FALSE; tv.v_type = VAR_PARTIAL; tv.vval.v_partial = cb->cb_partial; return set_ref_in_item(&tv, copyID, NULL, NULL); } /* * Mark all lists and dicts referenced through typval "tv" with "copyID". * "list_stack" is used to add lists to be marked. Can be NULL. * "ht_stack" is used to add hashtabs to be marked. Can be NULL. * * Returns TRUE if setting references failed somehow. */ int set_ref_in_item( typval_T *tv, int copyID, ht_stack_T **ht_stack, list_stack_T **list_stack) { int abort = FALSE; if (tv->v_type == VAR_DICT) { dict_T *dd = tv->vval.v_dict; if (dd != NULL && dd->dv_copyID != copyID) { // Didn't see this dict yet. dd->dv_copyID = copyID; if (ht_stack == NULL) { abort = set_ref_in_ht(&dd->dv_hashtab, copyID, list_stack); } else { ht_stack_T *newitem = ALLOC_ONE(ht_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->ht = &dd->dv_hashtab; newitem->prev = *ht_stack; *ht_stack = newitem; } } } } else if (tv->v_type == VAR_LIST) { list_T *ll = tv->vval.v_list; if (ll != NULL && ll->lv_copyID != copyID) { // Didn't see this list yet. ll->lv_copyID = copyID; if (list_stack == NULL) { abort = set_ref_in_list_items(ll, copyID, ht_stack); } else { list_stack_T *newitem = ALLOC_ONE(list_stack_T); if (newitem == NULL) abort = TRUE; else { newitem->list = ll; newitem->prev = *list_stack; *list_stack = newitem; } } } } else if (tv->v_type == VAR_FUNC) { abort = set_ref_in_func(tv->vval.v_string, NULL, copyID); } else if (tv->v_type == VAR_PARTIAL) { partial_T *pt = tv->vval.v_partial; int i; if (pt != NULL && pt->pt_copyID != copyID) { // Didn't see this partial yet. pt->pt_copyID = copyID; abort = set_ref_in_func(pt->pt_name, pt->pt_func, copyID); if (pt->pt_dict != NULL) { typval_T dtv; dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } for (i = 0; i < pt->pt_argc; ++i) abort = abort || set_ref_in_item(&pt->pt_argv[i], copyID, ht_stack, list_stack); // pt_funcstack is handled in set_ref_in_funcstacks() } } #ifdef FEAT_JOB_CHANNEL else if (tv->v_type == VAR_JOB) { job_T *job = tv->vval.v_job; typval_T dtv; if (job != NULL && job->jv_copyID != copyID) { job->jv_copyID = copyID; if (job->jv_channel != NULL) { dtv.v_type = VAR_CHANNEL; dtv.vval.v_channel = job->jv_channel; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (job->jv_exit_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = job->jv_exit_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } else if (tv->v_type == VAR_CHANNEL) { channel_T *ch =tv->vval.v_channel; ch_part_T part; typval_T dtv; jsonq_T *jq; cbq_T *cq; if (ch != NULL && ch->ch_copyID != copyID) { ch->ch_copyID = copyID; for (part = PART_SOCK; part < PART_COUNT; ++part) { for (jq = ch->ch_part[part].ch_json_head.jq_next; jq != NULL; jq = jq->jq_next) set_ref_in_item(jq->jq_value, copyID, ht_stack, list_stack); for (cq = ch->ch_part[part].ch_cb_head.cq_next; cq != NULL; cq = cq->cq_next) if (cq->cq_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = cq->cq_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_part[part].ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_part[part].ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } if (ch->ch_callback.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_callback.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } if (ch->ch_close_cb.cb_partial != NULL) { dtv.v_type = VAR_PARTIAL; dtv.vval.v_partial = ch->ch_close_cb.cb_partial; set_ref_in_item(&dtv, copyID, ht_stack, list_stack); } } } #endif return abort; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * When "copyID" is not NULL replace recursive lists and dicts with "...". * When both "echo_style" and "composite_val" are FALSE, put quotes around * strings as "string()", otherwise does not put quotes around strings, as * ":echo" displays values. * When "restore_copyID" is FALSE, repeated items in dictionaries and lists * are replaced with "...". * May return NULL. */ char_u * echo_string_core( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID, int echo_style, int restore_copyID, int composite_val) { static int recurse = 0; char_u *r = NULL; if (recurse >= DICT_MAXNEST) { if (!did_echo_string_emsg) { // Only give this message once for a recursive call to avoid // flooding the user with errors. And stop iterating over lists // and dicts. did_echo_string_emsg = TRUE; emsg(_(e_variable_nested_too_deep_for_displaying)); } *tofree = NULL; return (char_u *)"{E724}"; } ++recurse; switch (tv->v_type) { case VAR_STRING: if (echo_style && !composite_val) { *tofree = NULL; r = tv->vval.v_string; if (r == NULL) r = (char_u *)""; } else { *tofree = string_quote(tv->vval.v_string, FALSE); r = *tofree; } break; case VAR_FUNC: { char_u buf[MAX_FUNC_NAME_LEN]; if (echo_style) { r = tv->vval.v_string == NULL ? (char_u *)"function()" : make_ufunc_name_readable(tv->vval.v_string, buf, MAX_FUNC_NAME_LEN); if (r == buf) { r = vim_strsave(buf); *tofree = r; } else *tofree = NULL; } else { *tofree = string_quote(tv->vval.v_string == NULL ? NULL : make_ufunc_name_readable( tv->vval.v_string, buf, MAX_FUNC_NAME_LEN), TRUE); r = *tofree; } } break; case VAR_PARTIAL: { partial_T *pt = tv->vval.v_partial; char_u *fname = string_quote(pt == NULL ? NULL : partial_name(pt), FALSE); garray_T ga; int i; char_u *tf; ga_init2(&ga, 1, 100); ga_concat(&ga, (char_u *)"function("); if (fname != NULL) { // When using uf_name prepend "g:" for a global function. if (pt != NULL && pt->pt_name == NULL && fname[0] == '\'' && vim_isupper(fname[1])) { ga_concat(&ga, (char_u *)"'g:"); ga_concat(&ga, fname + 1); } else ga_concat(&ga, fname); vim_free(fname); } if (pt != NULL && pt->pt_argc > 0) { ga_concat(&ga, (char_u *)", ["); for (i = 0; i < pt->pt_argc; ++i) { if (i > 0) ga_concat(&ga, (char_u *)", "); ga_concat(&ga, tv2string(&pt->pt_argv[i], &tf, numbuf, copyID)); vim_free(tf); } ga_concat(&ga, (char_u *)"]"); } if (pt != NULL && pt->pt_dict != NULL) { typval_T dtv; ga_concat(&ga, (char_u *)", "); dtv.v_type = VAR_DICT; dtv.vval.v_dict = pt->pt_dict; ga_concat(&ga, tv2string(&dtv, &tf, numbuf, copyID)); vim_free(tf); } // terminate with ')' and a NUL ga_concat_len(&ga, (char_u *)")", 2); *tofree = ga.ga_data; r = *tofree; break; } case VAR_BLOB: r = blob2string(tv->vval.v_blob, tofree, numbuf); break; case VAR_LIST: if (tv->vval.v_list == NULL) { // NULL list is equivalent to empty list. *tofree = NULL; r = (char_u *)"[]"; } else if (copyID != 0 && tv->vval.v_list->lv_copyID == copyID && tv->vval.v_list->lv_len > 0) { *tofree = NULL; r = (char_u *)"[...]"; } else { int old_copyID = tv->vval.v_list->lv_copyID; tv->vval.v_list->lv_copyID = copyID; *tofree = list2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_list->lv_copyID = old_copyID; r = *tofree; } break; case VAR_DICT: if (tv->vval.v_dict == NULL) { // NULL dict is equivalent to empty dict. *tofree = NULL; r = (char_u *)"{}"; } else if (copyID != 0 && tv->vval.v_dict->dv_copyID == copyID && tv->vval.v_dict->dv_hashtab.ht_used != 0) { *tofree = NULL; r = (char_u *)"{...}"; } else { int old_copyID = tv->vval.v_dict->dv_copyID; tv->vval.v_dict->dv_copyID = copyID; *tofree = dict2string(tv, copyID, restore_copyID); if (restore_copyID) tv->vval.v_dict->dv_copyID = old_copyID; r = *tofree; } break; case VAR_NUMBER: case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: *tofree = NULL; r = tv_get_string_buf(tv, numbuf); break; case VAR_JOB: case VAR_CHANNEL: #ifdef FEAT_JOB_CHANNEL *tofree = NULL; r = tv->v_type == VAR_JOB ? job_to_string_buf(tv, numbuf) : channel_to_string_buf(tv, numbuf); if (composite_val) { *tofree = string_quote(r, FALSE); r = *tofree; } #endif break; case VAR_INSTR: *tofree = NULL; r = (char_u *)"instructions"; break; case VAR_FLOAT: #ifdef FEAT_FLOAT *tofree = NULL; vim_snprintf((char *)numbuf, NUMBUFLEN, "%g", tv->vval.v_float); r = numbuf; break; #endif case VAR_BOOL: case VAR_SPECIAL: *tofree = NULL; r = (char_u *)get_var_special_name(tv->vval.v_number); break; } if (--recurse == 0) did_echo_string_emsg = FALSE; return r; } /* * Return a string with the string representation of a variable. * If the memory is allocated "tofree" is set to it, otherwise NULL. * "numbuf" is used for a number. * Does not put quotes around strings, as ":echo" displays values. * When "copyID" is not NULL replace recursive lists and dicts with "...". * May return NULL. */ char_u * echo_string( typval_T *tv, char_u **tofree, char_u *numbuf, int copyID) { return echo_string_core(tv, tofree, numbuf, copyID, TRUE, FALSE, FALSE); } /* * Convert the specified byte index of line 'lnum' in buffer 'buf' to a * character index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_byteidx_to_charidx(buf_T *buf, int lnum, int byteidx) { char_u *str; char_u *t; int count; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; if (*str == NUL) return 0; // count the number of characters t = str; for (count = 0; *t != NUL && t <= str + byteidx; count++) t += mb_ptr2len(t); // In insert mode, when the cursor is at the end of a non-empty line, // byteidx points to the NUL character immediately past the end of the // string. In this case, add one to the character count. if (*t == NUL && byteidx != 0 && t == str + byteidx) count++; return count - 1; } /* * Convert the specified character index of line 'lnum' in buffer 'buf' to a * byte index. Works only for loaded buffers. Returns -1 on failure. * The index of the first byte and the first character is zero. */ int buf_charidx_to_byteidx(buf_T *buf, int lnum, int charidx) { char_u *str; char_u *t; if (buf == NULL || buf->b_ml.ml_mfp == NULL) return -1; if (lnum > buf->b_ml.ml_line_count) lnum = buf->b_ml.ml_line_count; str = ml_get_buf(buf, lnum, FALSE); if (str == NULL) return -1; // Convert the character offset to a byte offset t = str; while (*t != NUL && --charidx > 0) t += mb_ptr2len(t); return t - str; } /* * Translate a String variable into a position. * Returns NULL when there is an error. */ pos_T * var2fpos( typval_T *varp, int dollar_lnum, // TRUE when $ is last line int *fnum, // set to fnum for '0, 'A, etc. int charcol) // return character column { char_u *name; static pos_T pos; pos_T *pp; // Argument can be [lnum, col, coladd]. if (varp->v_type == VAR_LIST) { list_T *l; int len; int error = FALSE; listitem_T *li; l = varp->vval.v_list; if (l == NULL) return NULL; // Get the line number pos.lnum = list_find_nr(l, 0L, &error); if (error || pos.lnum <= 0 || pos.lnum > curbuf->b_ml.ml_line_count) return NULL; // invalid line number if (charcol) len = (long)mb_charlen(ml_get(pos.lnum)); else len = (long)STRLEN(ml_get(pos.lnum)); // Get the column number // We accept "$" for the column number: last column. li = list_find(l, 1L); if (li != NULL && li->li_tv.v_type == VAR_STRING && li->li_tv.vval.v_string != NULL && STRCMP(li->li_tv.vval.v_string, "$") == 0) { pos.col = len + 1; } else { pos.col = list_find_nr(l, 1L, &error); if (error) return NULL; } // Accept a position up to the NUL after the line. if (pos.col == 0 || (int)pos.col > len + 1) return NULL; // invalid column number --pos.col; // Get the virtual offset. Defaults to zero. pos.coladd = list_find_nr(l, 2L, &error); if (error) pos.coladd = 0; return &pos; } if (in_vim9script() && check_for_string_arg(varp, 0) == FAIL) return NULL; name = tv_get_string_chk(varp); if (name == NULL) return NULL; pos.lnum = 0; if (name[0] == '.' && (!in_vim9script() || name[1] == NUL)) { // cursor pos = curwin->w_cursor; } else if (name[0] == 'v' && name[1] == NUL) { // Visual start if (VIsual_active) pos = VIsual; else pos = curwin->w_cursor; } else if (name[0] == '\'' && (!in_vim9script() || (name[1] != NUL && name[2] == NUL))) { // mark pp = getmark_buf_fnum(curbuf, name[1], FALSE, fnum); if (pp == NULL || pp == (pos_T *)-1 || pp->lnum <= 0) return NULL; pos = *pp; } if (pos.lnum != 0) { if (charcol) pos.col = buf_byteidx_to_charidx(curbuf, pos.lnum, pos.col); return &pos; } pos.coladd = 0; if (name[0] == 'w' && dollar_lnum) { pos.col = 0; if (name[1] == '0') // "w0": first visible line { update_topline(); // In silent Ex mode topline is zero, but that's not a valid line // number; use one instead. pos.lnum = curwin->w_topline > 0 ? curwin->w_topline : 1; return &pos; } else if (name[1] == '$') // "w$": last visible line { validate_botline(); // In silent Ex mode botline is zero, return zero then. pos.lnum = curwin->w_botline > 0 ? curwin->w_botline - 1 : 0; return &pos; } } else if (name[0] == '$') // last column or line { if (dollar_lnum) { pos.lnum = curbuf->b_ml.ml_line_count; pos.col = 0; } else { pos.lnum = curwin->w_cursor.lnum; if (charcol) pos.col = (colnr_T)mb_charlen(ml_get_curline()); else pos.col = (colnr_T)STRLEN(ml_get_curline()); } return &pos; } if (in_vim9script()) semsg(_(e_invalid_value_for_line_number_str), name); return NULL; } /* * Convert list in "arg" into a position and optional file number. * When "fnump" is NULL there is no file number, only 3 items. * Note that the column is passed on as-is, the caller may want to decrement * it to use 1 for the first column. * Return FAIL when conversion is not possible, doesn't check the position for * validity. */ int list2fpos( typval_T *arg, pos_T *posp, int *fnump, colnr_T *curswantp, int charcol) { list_T *l = arg->vval.v_list; long i = 0; long n; // List must be: [fnum, lnum, col, coladd, curswant], where "fnum" is only // there when "fnump" isn't NULL; "coladd" and "curswant" are optional. if (arg->v_type != VAR_LIST || l == NULL || l->lv_len < (fnump == NULL ? 2 : 3) || l->lv_len > (fnump == NULL ? 4 : 5)) return FAIL; if (fnump != NULL) { n = list_find_nr(l, i++, NULL); // fnum if (n < 0) return FAIL; if (n == 0) n = curbuf->b_fnum; // current buffer *fnump = n; } n = list_find_nr(l, i++, NULL); // lnum if (n < 0) return FAIL; posp->lnum = n; n = list_find_nr(l, i++, NULL); // col if (n < 0) return FAIL; // If character position is specified, then convert to byte position if (charcol) { buf_T *buf; // Get the text for the specified line in a loaded buffer buf = buflist_findnr(fnump == NULL ? curbuf->b_fnum : *fnump); if (buf == NULL || buf->b_ml.ml_mfp == NULL) return FAIL; n = buf_charidx_to_byteidx(buf, posp->lnum, n) + 1; } posp->col = n; n = list_find_nr(l, i, NULL); // off if (n < 0) posp->coladd = 0; else posp->coladd = n; if (curswantp != NULL) *curswantp = list_find_nr(l, i + 1, NULL); // curswant return OK; } /* * Get the length of an environment variable name. * Advance "arg" to the first character after the name. * Return 0 for error. */ int get_env_len(char_u **arg) { char_u *p; int len; for (p = *arg; vim_isIDc(*p); ++p) ; if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a function or internal variable. * "arg" is advanced to after the name. * Return 0 if something is wrong. */ int get_id_len(char_u **arg) { char_u *p; int len; // Find the end of the name. for (p = *arg; eval_isnamec(*p); ++p) { if (*p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. len = (int)(p - *arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, **arg) == NULL) || len > 1) break; } } if (p == *arg) // no name found return 0; len = (int)(p - *arg); *arg = p; return len; } /* * Get the length of the name of a variable or function. * Only the name is recognized, does not handle ".key" or "[idx]". * "arg" is advanced to the first non-white character after the name. * Return -1 if curly braces expansion failed. * Return 0 if something else is wrong. * If the name contains 'magic' {}'s, expand them and return the * expanded name in an allocated string via 'alias' - caller must free. */ int get_name_len( char_u **arg, char_u **alias, int evaluate, int verbose) { int len; char_u *p; char_u *expr_start; char_u *expr_end; *alias = NULL; // default to no alias if ((*arg)[0] == K_SPECIAL && (*arg)[1] == KS_EXTRA && (*arg)[2] == (int)KE_SNR) { // hard coded <SNR>, already translated *arg += 3; return get_id_len(arg) + 3; } len = eval_fname_script(*arg); if (len > 0) { // literal "<SID>", "s:" or "<SNR>" *arg += len; } /* * Find the end of the name; check for {} construction. */ p = find_name_end(*arg, &expr_start, &expr_end, len > 0 ? 0 : FNE_CHECK_START); if (expr_start != NULL) { char_u *temp_string; if (!evaluate) { len += (int)(p - *arg); *arg = skipwhite(p); return len; } /* * Include any <SID> etc in the expanded string: * Thus the -len here. */ temp_string = make_expanded_name(*arg - len, expr_start, expr_end, p); if (temp_string == NULL) return -1; *alias = temp_string; *arg = skipwhite(p); return (int)STRLEN(temp_string); } len += get_id_len(arg); // Only give an error when there is something, otherwise it will be // reported at a higher level. if (len == 0 && verbose && **arg != NUL) semsg(_(e_invalid_expression_str), *arg); return len; } /* * Find the end of a variable or function name, taking care of magic braces. * If "expr_start" is not NULL then "expr_start" and "expr_end" are set to the * start and end of the first magic braces item. * "flags" can have FNE_INCL_BR and FNE_CHECK_START. * Return a pointer to just after the name. Equal to "arg" if there is no * valid name. */ char_u * find_name_end( char_u *arg, char_u **expr_start, char_u **expr_end, int flags) { int mb_nest = 0; int br_nest = 0; char_u *p; int len; int vim9script = in_vim9script(); if (expr_start != NULL) { *expr_start = NULL; *expr_end = NULL; } // Quick check for valid starting character. if ((flags & FNE_CHECK_START) && !eval_isnamec1(*arg) && (*arg != '{' || vim9script)) return arg; for (p = arg; *p != NUL && (eval_isnamec(*p) || (*p == '{' && !vim9script) || ((flags & FNE_INCL_BR) && (*p == '[' || (*p == '.' && eval_isdictc(p[1])))) || mb_nest != 0 || br_nest != 0); MB_PTR_ADV(p)) { if (*p == '\'') { // skip over 'string' to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '\''; MB_PTR_ADV(p)) ; if (*p == NUL) break; } else if (*p == '"') { // skip over "str\"ing" to avoid counting [ and ] inside it. for (p = p + 1; *p != NUL && *p != '"'; MB_PTR_ADV(p)) if (*p == '\\' && p[1] != NUL) ++p; if (*p == NUL) break; } else if (br_nest == 0 && mb_nest == 0 && *p == ':') { // "s:" is start of "s:var", but "n:" is not and can be used in // slice "[n:]". Also "xx:" is not a namespace. But {ns}: is. len = (int)(p - arg); if ((len == 1 && vim_strchr(NAMESPACE_CHAR, *arg) == NULL) || (len > 1 && p[-1] != '}')) break; } if (mb_nest == 0) { if (*p == '[') ++br_nest; else if (*p == ']') --br_nest; } if (br_nest == 0 && !vim9script) { if (*p == '{') { mb_nest++; if (expr_start != NULL && *expr_start == NULL) *expr_start = p; } else if (*p == '}') { mb_nest--; if (expr_start != NULL && mb_nest == 0 && *expr_end == NULL) *expr_end = p; } } } return p; } /* * Expands out the 'magic' {}'s in a variable/function name. * Note that this can call itself recursively, to deal with * constructs like foo{bar}{baz}{bam} * The four pointer arguments point to "foo{expre}ss{ion}bar" * "in_start" ^ * "expr_start" ^ * "expr_end" ^ * "in_end" ^ * * Returns a new allocated string, which the caller must free. * Returns NULL for failure. */ static char_u * make_expanded_name( char_u *in_start, char_u *expr_start, char_u *expr_end, char_u *in_end) { char_u c1; char_u *retval = NULL; char_u *temp_result; if (expr_end == NULL || in_end == NULL) return NULL; *expr_start = NUL; *expr_end = NUL; c1 = *in_end; *in_end = NUL; temp_result = eval_to_string(expr_start + 1, FALSE); if (temp_result != NULL) { retval = alloc(STRLEN(temp_result) + (expr_start - in_start) + (in_end - expr_end) + 1); if (retval != NULL) { STRCPY(retval, in_start); STRCAT(retval, temp_result); STRCAT(retval, expr_end + 1); } } vim_free(temp_result); *in_end = c1; // put char back for error messages *expr_start = '{'; *expr_end = '}'; if (retval != NULL) { temp_result = find_name_end(retval, &expr_start, &expr_end, 0); if (expr_start != NULL) { // Further expansion! temp_result = make_expanded_name(retval, expr_start, expr_end, temp_result); vim_free(retval); retval = temp_result; } } return retval; } /* * Return TRUE if character "c" can be used in a variable or function name. * Does not include '{' or '}' for magic braces. */ int eval_isnamec(int c) { return ASCII_ISALNUM(c) || c == '_' || c == ':' || c == AUTOLOAD_CHAR; } /* * Return TRUE if character "c" can be used as the first character in a * variable or function name (excluding '{' and '}'). */ int eval_isnamec1(int c) { return ASCII_ISALPHA(c) || c == '_'; } /* * Return TRUE if character "c" can be used as the first character of a * dictionary key. */ int eval_isdictc(int c) { return ASCII_ISALNUM(c) || c == '_'; } /* * Handle: * - expr[expr], expr[expr:expr] subscript * - ".name" lookup * - function call with Funcref variable: func(expr) * - method call: var->method() * * Can all be combined in any order: dict.func(expr)[idx]['func'](expr)->len() * "name_start" points to a variable before the subscript or is NULL. */ int handle_subscript( char_u **arg, char_u *name_start, typval_T *rettv, evalarg_T *evalarg, int verbose) // give error messages { int evaluate = evalarg != NULL && (evalarg->eval_flags & EVAL_EVALUATE); int ret = OK; dict_T *selfdict = NULL; int check_white = TRUE; int getnext; char_u *p; while (ret == OK) { // When at the end of the line and ".name" or "->{" or "->X" follows in // the next line then consume the line break. p = eval_next_non_blank(*arg, evalarg, &getnext); if (getnext && ((rettv->v_type == VAR_DICT && *p == '.' && eval_isdictc(p[1])) || (p[0] == '-' && p[1] == '>' && (p[2] == '{' || ASCII_ISALPHA(in_vim9script() ? *skipwhite(p + 2) : p[2]))))) { *arg = eval_next_line(*arg, evalarg); p = *arg; check_white = FALSE; } if (rettv->v_type == VAR_ANY) { char_u *exp_name; int cc; int idx; ufunc_T *ufunc; type_T *type; // Found script from "import {name} as name", script item name must // follow. "rettv->vval.v_number" has the script ID. if (**arg != '.') { if (verbose) semsg(_(e_expected_dot_after_name_str), name_start != NULL ? name_start: *arg); ret = FAIL; break; } ++*arg; if (IS_WHITE_OR_NUL(**arg)) { if (verbose) emsg(_(e_no_white_space_allowed_after_dot)); ret = FAIL; break; } // isolate the name exp_name = *arg; while (eval_isnamec(**arg)) ++*arg; cc = **arg; **arg = NUL; idx = find_exported(rettv->vval.v_number, exp_name, &ufunc, &type, evalarg->eval_cctx, evalarg->eval_cstack, verbose); **arg = cc; if (idx < 0 && ufunc == NULL) { ret = FAIL; break; } if (idx >= 0) { scriptitem_T *si = SCRIPT_ITEM(rettv->vval.v_number); svar_T *sv = ((svar_T *)si->sn_var_vals.ga_data) + idx; copy_tv(sv->sv_tv, rettv); } else { rettv->v_type = VAR_FUNC; rettv->vval.v_string = vim_strsave(ufunc->uf_name); } continue; } if ((**arg == '(' && (!evaluate || rettv->v_type == VAR_FUNC || rettv->v_type == VAR_PARTIAL)) && (!check_white || !VIM_ISWHITE(*(*arg - 1)))) { ret = call_func_rettv(arg, evalarg, rettv, evaluate, selfdict, NULL); // Stop the expression evaluation when immediately aborting on // error, or when an interrupt occurred or an exception was thrown // but not caught. if (aborting()) { if (ret == OK) clear_tv(rettv); ret = FAIL; } dict_unref(selfdict); selfdict = NULL; } else if (p[0] == '-' && p[1] == '>') { if (in_vim9script()) *arg = skipwhite(p + 2); else *arg = p + 2; if (ret == OK) { if (VIM_ISWHITE(**arg)) { emsg(_(e_no_white_space_allowed_before_parenthesis)); ret = FAIL; } else if ((**arg == '{' && !in_vim9script()) || **arg == '(') // expr->{lambda}() or expr->(lambda)() ret = eval_lambda(arg, rettv, evalarg, verbose); else // expr->name() ret = eval_method(arg, rettv, evalarg, verbose); } } // "." is ".name" lookup when we found a dict or when evaluating and // scriptversion is at least 2, where string concatenation is "..". else if (**arg == '[' || (**arg == '.' && (rettv->v_type == VAR_DICT || (!evaluate && (*arg)[1] != '.' && !in_old_script(2))))) { dict_unref(selfdict); if (rettv->v_type == VAR_DICT) { selfdict = rettv->vval.v_dict; if (selfdict != NULL) ++selfdict->dv_refcount; } else selfdict = NULL; if (eval_index(arg, rettv, evalarg, verbose) == FAIL) { clear_tv(rettv); ret = FAIL; } } else break; } // Turn "dict.Func" into a partial for "Func" bound to "dict". // Don't do this when "Func" is already a partial that was bound // explicitly (pt_auto is FALSE). if (selfdict != NULL && (rettv->v_type == VAR_FUNC || (rettv->v_type == VAR_PARTIAL && (rettv->vval.v_partial->pt_auto || rettv->vval.v_partial->pt_dict == NULL)))) selfdict = make_partial(selfdict, rettv); dict_unref(selfdict); return ret; } /* * Make a copy of an item. * Lists and Dictionaries are also copied. A deep copy if "deep" is set. * "top" is TRUE for the toplevel of copy(). * For deepcopy() "copyID" is zero for a full copy or the ID for when a * reference to an already copied list/dict can be used. * Returns FAIL or OK. */ int item_copy( typval_T *from, typval_T *to, int deep, int top, int copyID) { static int recurse = 0; int ret = OK; if (recurse >= DICT_MAXNEST) { emsg(_(e_variable_nested_too_deep_for_making_copy)); return FAIL; } ++recurse; switch (from->v_type) { case VAR_NUMBER: case VAR_FLOAT: case VAR_STRING: case VAR_FUNC: case VAR_PARTIAL: case VAR_BOOL: case VAR_SPECIAL: case VAR_JOB: case VAR_CHANNEL: case VAR_INSTR: copy_tv(from, to); break; case VAR_LIST: to->v_type = VAR_LIST; to->v_lock = 0; if (from->vval.v_list == NULL) to->vval.v_list = NULL; else if (copyID != 0 && from->vval.v_list->lv_copyID == copyID) { // use the copy made earlier to->vval.v_list = from->vval.v_list->lv_copylist; ++to->vval.v_list->lv_refcount; } else to->vval.v_list = list_copy(from->vval.v_list, deep, top, copyID); if (to->vval.v_list == NULL) ret = FAIL; break; case VAR_BLOB: ret = blob_copy(from->vval.v_blob, to); break; case VAR_DICT: to->v_type = VAR_DICT; to->v_lock = 0; if (from->vval.v_dict == NULL) to->vval.v_dict = NULL; else if (copyID != 0 && from->vval.v_dict->dv_copyID == copyID) { // use the copy made earlier to->vval.v_dict = from->vval.v_dict->dv_copydict; ++to->vval.v_dict->dv_refcount; } else to->vval.v_dict = dict_copy(from->vval.v_dict, deep, top, copyID); if (to->vval.v_dict == NULL) ret = FAIL; break; case VAR_UNKNOWN: case VAR_ANY: case VAR_VOID: internal_error_no_abort("item_copy(UNKNOWN)"); ret = FAIL; } --recurse; return ret; } void echo_one(typval_T *rettv, int with_space, int *atstart, int *needclr) { char_u *tofree; char_u numbuf[NUMBUFLEN]; char_u *p = echo_string(rettv, &tofree, numbuf, get_copyID()); if (*atstart) { *atstart = FALSE; // Call msg_start() after eval1(), evaluating the expression // may cause a message to appear. if (with_space) { // Mark the saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back // at the more prompt. msg_sb_eol(); msg_start(); } } else if (with_space) msg_puts_attr(" ", echo_attr); if (p != NULL) for ( ; *p != NUL && !got_int; ++p) { if (*p == '\n' || *p == '\r' || *p == TAB) { if (*p != TAB && *needclr) { // remove any text still there from the command msg_clr_eos(); *needclr = FALSE; } msg_putchar_attr(*p, echo_attr); } else { if (has_mbyte) { int i = (*mb_ptr2len)(p); (void)msg_outtrans_len_attr(p, i, echo_attr); p += i - 1; } else (void)msg_outtrans_len_attr(p, 1, echo_attr); } } vim_free(tofree); } /* * ":echo expr1 ..." print each argument separated with a space, add a * newline at the end. * ":echon expr1 ..." print each argument plain. */ void ex_echo(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; char_u *arg_start; int needclr = TRUE; int atstart = TRUE; int did_emsg_before = did_emsg; int called_emsg_before = called_emsg; evalarg_T evalarg; fill_evalarg_from_eap(&evalarg, eap, eap->skip); if (eap->skip) ++emsg_skip; while ((!ends_excmd2(eap->cmd, arg) || *arg == '"') && !got_int) { // If eval1() causes an error message the text from the command may // still need to be cleared. E.g., "echo 22,44". need_clr_eos = needclr; arg_start = arg; if (eval1(&arg, &rettv, &evalarg) == FAIL) { /* * Report the invalid expression unless the expression evaluation * has been cancelled due to an aborting error, an interrupt, or an * exception. */ if (!aborting() && did_emsg == did_emsg_before && called_emsg == called_emsg_before) semsg(_(e_invalid_expression_str), arg_start); need_clr_eos = FALSE; break; } need_clr_eos = FALSE; if (!eap->skip) { if (rettv.v_type == VAR_VOID) { semsg(_(e_expression_does_not_result_in_value_str), arg_start); break; } echo_one(&rettv, eap->cmdidx == CMD_echo, &atstart, &needclr); } clear_tv(&rettv); arg = skipwhite(arg); } set_nextcmd(eap, arg); clear_evalarg(&evalarg, eap); if (eap->skip) --emsg_skip; else { // remove text that may still be there from the command if (needclr) msg_clr_eos(); if (eap->cmdidx == CMD_echo) msg_end(); } } /* * ":echohl {name}". */ void ex_echohl(exarg_T *eap) { echo_attr = syn_name2attr(eap->arg); } /* * Returns the :echo attribute */ int get_echo_attr(void) { return echo_attr; } /* * ":execute expr1 ..." execute the result of an expression. * ":echomsg expr1 ..." Print a message * ":echoerr expr1 ..." Print an error * ":echoconsole expr1 ..." Print a message on stdout * Each gets spaces around each argument and a newline at the end for * echo commands */ void ex_execute(exarg_T *eap) { char_u *arg = eap->arg; typval_T rettv; int ret = OK; char_u *p; garray_T ga; int len; long start_lnum = SOURCING_LNUM; ga_init2(&ga, 1, 80); if (eap->skip) ++emsg_skip; while (!ends_excmd2(eap->cmd, arg) || *arg == '"') { ret = eval1_emsg(&arg, &rettv, eap); if (ret == FAIL) break; if (!eap->skip) { char_u buf[NUMBUFLEN]; if (eap->cmdidx == CMD_execute) { if (rettv.v_type == VAR_CHANNEL || rettv.v_type == VAR_JOB) { semsg(_(e_using_invalid_value_as_string_str), vartype_name(rettv.v_type)); p = NULL; } else p = tv_get_string_buf(&rettv, buf); } else p = tv_stringify(&rettv, buf); if (p == NULL) { clear_tv(&rettv); ret = FAIL; break; } len = (int)STRLEN(p); if (ga_grow(&ga, len + 2) == FAIL) { clear_tv(&rettv); ret = FAIL; break; } if (ga.ga_len) ((char_u *)(ga.ga_data))[ga.ga_len++] = ' '; STRCPY((char_u *)(ga.ga_data) + ga.ga_len, p); ga.ga_len += len; } clear_tv(&rettv); arg = skipwhite(arg); } if (ret != FAIL && ga.ga_data != NULL) { // use the first line of continuation lines for messages SOURCING_LNUM = start_lnum; if (eap->cmdidx == CMD_echomsg || eap->cmdidx == CMD_echoerr) { // Mark the already saved text as finishing the line, so that what // follows is displayed on a new line when scrolling back at the // more prompt. msg_sb_eol(); } if (eap->cmdidx == CMD_echomsg) { msg_attr(ga.ga_data, echo_attr); out_flush(); } else if (eap->cmdidx == CMD_echoconsole) { ui_write(ga.ga_data, (int)STRLEN(ga.ga_data), TRUE); ui_write((char_u *)"\r\n", 2, TRUE); } else if (eap->cmdidx == CMD_echoerr) { int save_did_emsg = did_emsg; // We don't want to abort following commands, restore did_emsg. emsg(ga.ga_data); if (!force_abort) did_emsg = save_did_emsg; } else if (eap->cmdidx == CMD_execute) { int save_sticky_cmdmod_flags = sticky_cmdmod_flags; // "legacy exe cmd" and "vim9cmd exe cmd" applies to "cmd". sticky_cmdmod_flags = cmdmod.cmod_flags & (CMOD_LEGACY | CMOD_VIM9CMD); do_cmdline((char_u *)ga.ga_data, eap->getline, eap->cookie, DOCMD_NOWAIT|DOCMD_VERBOSE); sticky_cmdmod_flags = save_sticky_cmdmod_flags; } } ga_clear(&ga); if (eap->skip) --emsg_skip; set_nextcmd(eap, arg); } /* * Skip over the name of an option: "&option", "&g:option" or "&l:option". * "arg" points to the "&" or '+' when called, to "option" when returning. * Returns NULL when no option name found. Otherwise pointer to the char * after the option name. */ char_u * find_option_end(char_u **arg, int *scope) { char_u *p = *arg; ++p; if (*p == 'g' && p[1] == ':') { *scope = OPT_GLOBAL; p += 2; } else if (*p == 'l' && p[1] == ':') { *scope = OPT_LOCAL; p += 2; } else *scope = 0; if (!ASCII_ISALPHA(*p)) return NULL; *arg = p; if (p[0] == 't' && p[1] == '_' && p[2] != NUL && p[3] != NUL) p += 4; // termcap option else while (ASCII_ISALPHA(*p)) ++p; return p; } /* * Display script name where an item was last set. * Should only be invoked when 'verbose' is non-zero. */ void last_set_msg(sctx_T script_ctx) { char_u *p; if (script_ctx.sc_sid != 0) { p = home_replace_save(NULL, get_scriptname(script_ctx.sc_sid)); if (p != NULL) { verbose_enter(); msg_puts(_("\n\tLast set from ")); msg_puts((char *)p); if (script_ctx.sc_lnum > 0) { msg_puts(_(line_msg)); msg_outnum((long)script_ctx.sc_lnum); } verbose_leave(); vim_free(p); } } } #endif // FEAT_EVAL /* * Perform a substitution on "str" with pattern "pat" and substitute "sub". * When "sub" is NULL "expr" is used, must be a VAR_FUNC or VAR_PARTIAL. * "flags" can be "g" to do a global substitute. * Returns an allocated string, NULL for error. */ char_u * do_string_sub( char_u *str, char_u *pat, char_u *sub, typval_T *expr, char_u *flags) { int sublen; regmatch_T regmatch; int i; int do_all; char_u *tail; char_u *end; garray_T ga; char_u *ret; char_u *save_cpo; char_u *zero_width = NULL; // Make 'cpoptions' empty, so that the 'l' flag doesn't work here save_cpo = p_cpo; p_cpo = empty_option; ga_init2(&ga, 1, 200); do_all = (flags[0] == 'g'); regmatch.rm_ic = p_ic; regmatch.regprog = vim_regcomp(pat, RE_MAGIC + RE_STRING); if (regmatch.regprog != NULL) { tail = str; end = str + STRLEN(str); while (vim_regexec_nl(&regmatch, str, (colnr_T)(tail - str))) { // Skip empty match except for first match. if (regmatch.startp[0] == regmatch.endp[0]) { if (zero_width == regmatch.startp[0]) { // avoid getting stuck on a match with an empty string i = mb_ptr2len(tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); ga.ga_len += i; tail += i; continue; } zero_width = regmatch.startp[0]; } /* * Get some space for a temporary buffer to do the substitution * into. It will contain: * - The text up to where the match is. * - The substituted text. * - The text after the match. */ sublen = vim_regsub(&regmatch, sub, expr, tail, 0, REGSUB_MAGIC); if (ga_grow(&ga, (int)((end - tail) + sublen - (regmatch.endp[0] - regmatch.startp[0]))) == FAIL) { ga_clear(&ga); break; } // copy the text up to where the match is i = (int)(regmatch.startp[0] - tail); mch_memmove((char_u *)ga.ga_data + ga.ga_len, tail, (size_t)i); // add the substituted text (void)vim_regsub(&regmatch, sub, expr, (char_u *)ga.ga_data + ga.ga_len + i, sublen, REGSUB_COPY | REGSUB_MAGIC); ga.ga_len += i + sublen - 1; tail = regmatch.endp[0]; if (*tail == NUL) break; if (!do_all) break; } if (ga.ga_data != NULL) STRCPY((char *)ga.ga_data + ga.ga_len, tail); vim_regfree(regmatch.regprog); } ret = vim_strsave(ga.ga_data == NULL ? str : (char_u *)ga.ga_data); ga_clear(&ga); if (p_cpo == empty_option) p_cpo = save_cpo; else { // Darn, evaluating {sub} expression or {expr} changed the value. // If it's still empty it was changed and restored, need to restore in // the complicated way. if (*p_cpo == NUL) set_option_value_give_err((char_u *)"cpo", 0L, save_cpo, 0); free_string_option(save_cpo); } return ret; }
eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { vim_free(evalarg->eval_tofree); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); }
eval_next_line(char_u *arg, evalarg_T *evalarg) { garray_T *gap = &evalarg->eval_ga; char_u *line; if (arg != NULL) { if (*arg == NL) return newline_skip_comments(arg); // Truncate before a trailing comment, so that concatenating the lines // won't turn the rest into a comment. if (*skipwhite(arg) == '#') *arg = NUL; } if (evalarg->eval_cookie != NULL) line = evalarg->eval_getline(0, evalarg->eval_cookie, 0, GETLINE_CONCAT_ALL); else line = next_line_from_context(evalarg->eval_cctx, TRUE); if (line == NULL) return NULL; ++evalarg->eval_break_count; if (gap->ga_itemsize > 0 && ga_grow(gap, 1) == OK) { char_u *p = skipwhite(line); // Going to concatenate the lines after parsing. For an empty or // comment line use an empty string. if (*p == NUL || vim9_comment_start(p)) { vim_free(line); line = vim_strsave((char_u *)""); } ((char_u **)gap->ga_data)[gap->ga_len] = line; ++gap->ga_len; } else if (evalarg->eval_cookie != NULL) { free_eval_tofree_later(evalarg); evalarg->eval_tofree = line; } // Advanced to the next line, "arg" no longer points into the previous // line. evalarg->eval_using_cmdline = FALSE; return skipwhite(line); }
{'added': [(356, '/*'), (357, ' * Initialize "evalarg" for use.'), (358, ' */'), (359, ' void'), (360, 'init_evalarg(evalarg_T *evalarg)'), (361, '{'), (362, ' CLEAR_POINTER(evalarg);'), (363, ' ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20);'), (364, '}'), (365, ''), (366, '/*'), (367, ' * If "evalarg->eval_tofree" is not NULL free it later.'), (368, ' * Caller is expected to overwrite "evalarg->eval_tofree" next.'), (369, ' */'), (370, ' static void'), (371, 'free_eval_tofree_later(evalarg_T *evalarg)'), (372, '{'), (373, ' if (evalarg->eval_tofree != NULL)'), (374, ' {'), (375, '\tif (ga_grow(&evalarg->eval_tofree_ga, 1) == OK)'), (376, '\t ((char_u **)evalarg->eval_tofree_ga.ga_data)'), (377, '\t\t[evalarg->eval_tofree_ga.ga_len++]'), (378, '\t\t= evalarg->eval_tofree;'), (379, '\telse'), (380, '\t vim_free(evalarg->eval_tofree);'), (381, ' }'), (382, '}'), (383, ''), (384, '/*'), (385, ' * After using "evalarg" filled from "eap": free the memory.'), (386, ' */'), (387, ' void'), (388, 'clear_evalarg(evalarg_T *evalarg, exarg_T *eap)'), (389, '{'), (390, ' if (evalarg != NULL)'), (391, ' {'), (392, '\tif (evalarg->eval_tofree != NULL)'), (393, '\t{'), (394, '\t if (eap != NULL)'), (395, '\t {'), (396, '\t\t// We may need to keep the original command line, e.g. for'), (397, '\t\t// ":let" it has the variable names. But we may also need the'), (398, '\t\t// new one, "nextcmd" points into it. Keep both.'), (399, '\t\tvim_free(eap->cmdline_tofree);'), (400, '\t\teap->cmdline_tofree = *eap->cmdlinep;'), (401, '\t\t*eap->cmdlinep = evalarg->eval_tofree;'), (402, '\t }'), (403, '\t else'), (404, '\t\tvim_free(evalarg->eval_tofree);'), (405, '\t evalarg->eval_tofree = NULL;'), (406, '\t}'), (407, ''), (408, '\tga_clear_strings(&evalarg->eval_tofree_ga);'), (409, '\tVIM_CLEAR(evalarg->eval_tofree_lambda);'), (410, ' }'), (411, '}'), (412, ''), (495, '\t\t// later. Also free "eval_tofree" later if needed.'), (496, '\t\tfree_eval_tofree_later(evalarg);'), (2334, '\tfree_eval_tofree_later(evalarg);')], 'deleted': [(438, '\t\t// later.'), (439, '\t\tvim_free(evalarg->eval_tofree);'), (2277, '\tvim_free(evalarg->eval_tofree);'), (2304, '/*'), (2305, ' * Initialize "evalarg" for use.'), (2306, ' */'), (2307, ' void'), (2308, 'init_evalarg(evalarg_T *evalarg)'), (2309, '{'), (2310, ' CLEAR_POINTER(evalarg);'), (2311, ' ga_init2(&evalarg->eval_tofree_ga, sizeof(char_u *), 20);'), (2312, '}'), (2313, ''), (2314, '/*'), (2315, ' * After using "evalarg" filled from "eap": free the memory.'), (2316, ' */'), (2317, ' void'), (2318, 'clear_evalarg(evalarg_T *evalarg, exarg_T *eap)'), (2319, '{'), (2320, ' if (evalarg != NULL)'), (2321, ' {'), (2322, '\tif (evalarg->eval_tofree != NULL)'), (2323, '\t{'), (2324, '\t if (eap != NULL)'), (2325, '\t {'), (2326, '\t\t// We may need to keep the original command line, e.g. for'), (2327, '\t\t// ":let" it has the variable names. But we may also need the'), (2328, '\t\t// new one, "nextcmd" points into it. Keep both.'), (2329, '\t\tvim_free(eap->cmdline_tofree);'), (2330, '\t\teap->cmdline_tofree = *eap->cmdlinep;'), (2331, '\t\t*eap->cmdlinep = evalarg->eval_tofree;'), (2332, '\t }'), (2333, '\t else'), (2334, '\t\tvim_free(evalarg->eval_tofree);'), (2335, '\t evalarg->eval_tofree = NULL;'), (2336, '\t}'), (2337, ''), (2338, '\tga_clear_strings(&evalarg->eval_tofree_ga);'), (2339, '\tVIM_CLEAR(evalarg->eval_tofree_lambda);'), (2340, ' }'), (2341, '}'), (2342, '')]}
60
42
5,145
27,604
38
228
11
https://github.com/vim/vim
CVE-2022-2889
CWE-416
20
scene_dump.c
C
gf_dump_vrml_field
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2022 * All rights reserved * * This file is part of GPAC / Scene Management sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/scene_manager.h> #include <gpac/constants.h> #include <gpac/utf.h> #include <gpac/internal/scenegraph_dev.h> #include <gpac/nodes_x3d.h> #include <gpac/nodes_svg.h> #include <gpac/events.h> #include <gpac/base_coding.h> #ifndef __SYMBIAN32__ #include <wchar.h> #endif #ifndef GPAC_DISABLE_SCENE_DUMP /*for QP types*/ #include "../bifs/quant.h" struct _scenedump { /*the scene we're dumping - set at each SceneReplace or mannually*/ GF_SceneGraph *sg; #ifndef GPAC_DISABLE_VRML /*the proto we're dumping*/ GF_Proto *current_proto; #endif FILE *trace; u32 indent; char *filename; GF_SceneDumpFormat dump_mode; u16 CurrentESID; u8 ind_char; Bool XMLDump, X3DDump, LSRDump; GF_List *dump_nodes; /*nodes created through conditionals while parsing but not applied*/ GF_List *mem_def_nodes; Bool skip_scene_replace; /*for route insert/replace in conditionals in current scene replace*/ GF_List *current_com_list; GF_List *inserted_routes; Bool in_text; }; static GF_Err gf_dump_vrml_route(GF_SceneDumper *sdump, GF_Route *r, u32 dump_type); static void gf_dump_vrml_node(GF_SceneDumper *sdump, GF_Node *node, Bool in_list, char *fieldContainer); #ifndef GPAC_DISABLE_SVG void gf_dump_svg_element(GF_SceneDumper *sdump, GF_Node *n, GF_Node *parent, Bool is_root); #endif GF_EXPORT GF_SceneDumper *gf_sm_dumper_new(GF_SceneGraph *graph, char *_rad_name, Bool is_final_name, char indent_char, GF_SceneDumpFormat dump_mode) { GF_SceneDumper *tmp; if (!graph) return NULL; GF_SAFEALLOC(tmp, GF_SceneDumper); if (!tmp) return NULL; /*store original*/ tmp->dump_mode = dump_mode; #ifndef GPAC_DISABLE_SVG if ((graph->RootNode && (graph->RootNode->sgprivate->tag>=GF_NODE_RANGE_LAST_VRML) ) || (dump_mode==GF_SM_DUMP_LASER) || (dump_mode==GF_SM_DUMP_SVG)) { tmp->XMLDump = GF_TRUE; if (dump_mode==GF_SM_DUMP_LASER) { tmp->LSRDump = GF_TRUE; } if (_rad_name) { const char* ext_name = tmp->LSRDump ? ".xsr" : ".svg"; tmp->filename = (char *)gf_malloc(strlen(_rad_name) + strlen(ext_name) + 1); strcpy(tmp->filename, _rad_name); if (!is_final_name) strcat(tmp->filename, ext_name); tmp->trace = gf_fopen(tmp->filename, "wt"); if (!tmp->trace) { gf_free(tmp); return NULL; } } else { tmp->trace = stdout; } } else #endif { if (dump_mode==GF_SM_DUMP_AUTO_TXT) { if (!graph->RootNode || (graph->RootNode->sgprivate->tag<=GF_NODE_RANGE_LAST_MPEG4) ) { dump_mode = GF_SM_DUMP_BT; } else if (graph->RootNode->sgprivate->tag<=GF_NODE_RANGE_LAST_X3D) { dump_mode = GF_SM_DUMP_X3D_VRML; } } else if (dump_mode==GF_SM_DUMP_AUTO_XML) { if (!graph->RootNode || (graph->RootNode->sgprivate->tag<=GF_NODE_RANGE_LAST_MPEG4) ) { dump_mode = GF_SM_DUMP_XMTA; } else { dump_mode = GF_SM_DUMP_X3D_XML; } } if (_rad_name) { const char* ext_name; switch (dump_mode) { case GF_SM_DUMP_X3D_XML: ext_name = ".x3d"; tmp->XMLDump = GF_TRUE; tmp->X3DDump = GF_TRUE; break; case GF_SM_DUMP_XMTA: ext_name = ".xmt"; tmp->XMLDump = GF_TRUE; break; case GF_SM_DUMP_X3D_VRML: ext_name = ".x3dv"; tmp->X3DDump = GF_TRUE; break; case GF_SM_DUMP_VRML: ext_name = ".wrl"; break; default: ext_name = ".bt"; break; } tmp->filename = (char *)gf_malloc(strlen(_rad_name ? _rad_name : "") + strlen(ext_name) + 1); strcpy(tmp->filename, _rad_name ? _rad_name : ""); if (!is_final_name) strcat(tmp->filename, ext_name); tmp->trace = gf_fopen(tmp->filename, "wt"); if (!tmp->trace) { gf_free(tmp); return NULL; } } else { tmp->trace = stdout; switch (dump_mode) { case GF_SM_DUMP_X3D_XML: tmp->XMLDump = GF_TRUE; tmp->X3DDump = GF_TRUE; break; case GF_SM_DUMP_XMTA: tmp->XMLDump = GF_TRUE; break; case GF_SM_DUMP_X3D_VRML: tmp->X3DDump = GF_TRUE; break; default: break; } } } tmp->ind_char = indent_char; tmp->dump_nodes = gf_list_new(); tmp->mem_def_nodes = gf_list_new(); tmp->inserted_routes = gf_list_new(); tmp->sg = graph; return tmp; } GF_EXPORT void gf_sm_dumper_set_extra_graph(GF_SceneDumper *sdump, GF_SceneGraph *extra) { sdump->sg = extra; } GF_EXPORT void gf_sm_dumper_del(GF_SceneDumper *sdump) { gf_list_del(sdump->dump_nodes); while (gf_list_count(sdump->mem_def_nodes)) { GF_Node *tmp = (GF_Node *)gf_list_get(sdump->mem_def_nodes, 0); gf_list_rem(sdump->mem_def_nodes, 0); gf_node_unregister(tmp, NULL); } gf_list_del(sdump->mem_def_nodes); gf_list_del(sdump->inserted_routes); if (sdump->trace != stdout) gf_fclose(sdump->trace); if (sdump->filename) { gf_free(sdump->filename); sdump->filename = NULL; } gf_free(sdump); } char *gf_sm_dump_get_name(GF_SceneDumper *bd) { if (!bd) return NULL; return bd->filename; } static void gf_dump_setup(GF_SceneDumper *sdump, GF_Descriptor *root_od) { if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); if (sdump->dump_mode==GF_SM_DUMP_XML) return; gf_fprintf(sdump->trace, "<!-- %s Scene Dump -->\n", (sdump->dump_mode==GF_SM_DUMP_SVG) ? "SVG" : (sdump->dump_mode==GF_SM_DUMP_LASER) ? "LASeR" : sdump->X3DDump ? "X3D" : "XMT-A" ); } if (sdump->dump_mode==GF_SM_DUMP_SVG) return; if (sdump->LSRDump) { gf_fprintf(sdump->trace, "<saf:SAFSession xmlns:saf=\"urn:mpeg:mpeg4:SAF:2005\" >\n"); #ifndef GPAC_DISABLE_OD_DUMP if (root_od) { GF_ObjectDescriptor *iod = (GF_ObjectDescriptor *)root_od; u32 i, count; gf_fprintf(sdump->trace, "<saf:sceneHeader>\n"); count = gf_list_count(iod->ESDescriptors); for (i=0; i<count; i++) { GF_LASERConfig lsrcfg; GF_ESD *esd = (GF_ESD *)gf_list_get(iod->ESDescriptors, i); if (!esd || !esd->decoderConfig) continue; if (esd->decoderConfig->streamType != GF_STREAM_SCENE) continue; if (esd->decoderConfig->objectTypeIndication != 0x09) continue; if (!esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data) continue; gf_odf_get_laser_config(esd->decoderConfig->decoderSpecificInfo, &lsrcfg); gf_odf_dump_desc((GF_Descriptor*)&lsrcfg, sdump->trace, 1, 1); } gf_fprintf(sdump->trace, "</saf:sceneHeader>\n"); } #endif return; } if (!sdump->X3DDump) { /*setup XMT*/ if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<XMT-A xmlns=\"urn:mpeg:mpeg4:xmta:schema:2002\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"urn:mpeg:mpeg4:xmta:schema:2002 xmt-a.xsd\">\n"); gf_fprintf(sdump->trace, " <Header>\n"); #ifndef GPAC_DISABLE_OD_DUMP if (root_od) gf_odf_dump_desc(root_od, sdump->trace, 1, 1); #endif gf_fprintf(sdump->trace, " </Header>\n"); gf_fprintf(sdump->trace, " <Body>\n"); if (!root_od) { gf_fprintf(sdump->trace, " <Replace>\n"); } } else { if (sdump->dump_mode==GF_SM_DUMP_VRML) { gf_fprintf(sdump->trace, "#VRML V2.0\n"); } else { /*dump root OD*/ #ifndef GPAC_DISABLE_OD_DUMP if (root_od) gf_odf_dump_desc(root_od, sdump->trace, 0, 0); #endif } gf_fprintf(sdump->trace, "\n"); } } else { if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n"); gf_fprintf(sdump->trace, "<X3D xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\" version=\"3.0\">\n"); gf_fprintf(sdump->trace, "<head>\n"); gf_fprintf(sdump->trace, "<meta content=\"X3D File Converted/Dumped by GPAC Version %s - %s\" name=\"generator\"/>\n", gf_gpac_version(), gf_gpac_copyright() ); gf_fprintf(sdump->trace, "</head>\n"); gf_fprintf(sdump->trace, " <Scene>\n"); } else { gf_fprintf(sdump->trace, "#X3D V3.0\n\n"); } } } static void gf_dump_finalize(GF_SceneDumper *sdump, GF_Descriptor *root_od) { if (sdump->dump_mode==GF_SM_DUMP_SVG) return; if (sdump->LSRDump) { gf_fprintf(sdump->trace, "<saf:endOfSAFSession/>\n</saf:SAFSession>\n"); return; } if (!sdump->XMLDump) return; if (!sdump->X3DDump) { if (!root_od) { gf_fprintf(sdump->trace, " </Replace>\n"); } gf_fprintf(sdump->trace, " </Body>\n"); gf_fprintf(sdump->trace, "</XMT-A>\n"); } else { gf_fprintf(sdump->trace, " </Scene>\n"); gf_fprintf(sdump->trace, "</X3D>\n"); } } static Bool gf_dump_vrml_is_def_node(GF_SceneDumper *sdump, GF_Node *node) { s32 i = gf_list_find(sdump->dump_nodes, node); if (i>=0) return 0; gf_list_add(sdump->dump_nodes, node); return 1; } static GF_Node *gf_dump_find_node(GF_SceneDumper *sdump, u32 ID) { GF_Node *ret = gf_sg_find_node(sdump->sg, ID); if (ret) return ret; return NULL; } #define DUMP_IND(sdump) \ if (sdump->trace) { \ u32 z; \ for (z=0; z<sdump->indent; z++) gf_fprintf(sdump->trace, "%c", sdump->ind_char); \ } static void StartElement(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "%s {\n", name); } else { gf_fprintf(sdump->trace, "<%s", name); } } static void EndElementHeader(GF_SceneDumper *sdump, Bool has_sub_el) { if (!sdump->trace) return; if (sdump->XMLDump) { if (has_sub_el) { gf_fprintf(sdump->trace, ">\n"); } else { gf_fprintf(sdump->trace, "/>\n"); } } } static void EndElement(GF_SceneDumper *sdump, const char *name, Bool had_sub_el) { if (!sdump->trace) return; if (!sdump->XMLDump) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } else { if (had_sub_el) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "</%s>\n", name); } } } static void StartAttribute(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; if (!sdump->XMLDump) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "%s ", name); } else { gf_fprintf(sdump->trace, " %s=\"", name); } } static void EndAttribute(GF_SceneDumper *sdump) { if (!sdump->trace) return; if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "\n"); } else { gf_fprintf(sdump->trace, "\""); } } static void StartList(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; DUMP_IND(sdump); if (!sdump->XMLDump) { if (name) gf_fprintf(sdump->trace, "%s [\n", name); else gf_fprintf(sdump->trace, "[\n"); } else { gf_fprintf(sdump->trace, "<%s>\n", name); } } static void EndList(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "]\n"); } else { gf_fprintf(sdump->trace, "</%s>\n", name); } } static void scene_dump_utf_string(GF_SceneDumper *sdump, Bool escape_xml, char *str) { u32 len, i; u16 *uniLine; if (!str) return; len = (u32) strlen(str); if (!len) return; uniLine = (u16*)gf_malloc(sizeof(u16) * len*4); len = gf_utf8_mbstowcs(uniLine, len, (const char **) &str); if (len != GF_UTF8_FAIL) { for (i=0; i<len; i++) { //if (uniLine[i] == (u16) '\"') gf_fprintf(sdump->trace, "\\"); switch (uniLine[i]) { case '\'': if (escape_xml) gf_fprintf(sdump->trace, "&apos;"); else gf_fprintf(sdump->trace, "'"); break; case '\"': if (escape_xml) gf_fprintf(sdump->trace, "&quot;"); else gf_fprintf(sdump->trace, "\""); break; case '&': gf_fprintf(sdump->trace, "&amp;"); break; case '>': gf_fprintf(sdump->trace, "&gt;"); break; case '<': gf_fprintf(sdump->trace, "&lt;"); break; case '\r': case '\n': /* Does nothing : gf_fprintf(sdump->trace, "");, fflush instead ?*/ break; default: if (uniLine[i]<128) { gf_fprintf(sdump->trace, "%c", (u8) uniLine[i]); } else { gf_fprintf(sdump->trace, "&#%d;", uniLine[i]); } break; } } } gf_free(uniLine); } #ifndef GPAC_DISABLE_VRML static void scene_dump_vrml_id(GF_SceneDumper *sdump, GF_Node *node) { u32 id; const char *node_name; if (!sdump->trace) return; /*FIXME - optimize id/name fetch*/ node_name = gf_node_get_name_and_id(node, &id); if (node_name) gf_fprintf(sdump->trace, "%s", node_name); else gf_fprintf(sdump->trace, "N%d", id - 1); } static Bool scene_dump_vrml_find_route_name(GF_SceneDumper *sdump, u32 ID, const char **outName) { GF_Route *r; u32 i; GF_Command *com; r = gf_sg_route_find(sdump->sg, ID); if (r) { (*outName) = r->name; return 1; } i=0; while ((com = (GF_Command *)gf_list_enum(sdump->inserted_routes, &i))) { if (com->tag == GF_SG_ROUTE_INSERT) { if (com->RouteID==ID) { (*outName) = com->def_name; return 1; } } } if (!sdump->current_com_list) return 0; i=1; while ((com = (GF_Command *)gf_list_enum(sdump->current_com_list, &i))) { if ((com->tag == GF_SG_ROUTE_INSERT) || (com->tag == GF_SG_ROUTE_REPLACE)) { if (com->RouteID==ID) { (*outName) = com->def_name; return 1; } } else return 0; } return 0; } static void scene_dump_vrml_route_id(GF_SceneDumper *sdump, u32 routeID, char *rName) { if (!sdump->trace) return; if (!rName) scene_dump_vrml_find_route_name(sdump, routeID, (const char **) &rName); if (rName) gf_fprintf(sdump->trace, "%s", rName); else gf_fprintf(sdump->trace, "R%d", routeID - 1); } static void gf_dump_vrml_sffield(GF_SceneDumper *sdump, u32 type, void *ptr, Bool is_mf, GF_Node *node) { switch (type) { case GF_SG_VRML_SFBOOL: gf_fprintf(sdump->trace, "%s", * ((SFBool *)ptr) ? "true" : "false"); break; case GF_SG_VRML_SFINT32: gf_fprintf(sdump->trace, "%d", * ((SFInt32 *)ptr) ); break; case GF_SG_VRML_SFFLOAT: gf_fprintf(sdump->trace, "%g", FIX2FLT( * ((SFFloat *)ptr) ) ); break; case GF_SG_VRML_SFDOUBLE: gf_fprintf(sdump->trace, "%g", * ((SFDouble *)ptr) ); break; case GF_SG_VRML_SFTIME: gf_fprintf(sdump->trace, "%g", * ((SFTime *)ptr) ); break; case GF_SG_VRML_SFCOLOR: gf_fprintf(sdump->trace, "%g %g %g", FIX2FLT( ((SFColor *)ptr)->red ), FIX2FLT( ((SFColor *)ptr)->green ), FIX2FLT( ((SFColor *)ptr)->blue )); break; case GF_SG_VRML_SFCOLORRGBA: gf_fprintf(sdump->trace, "%g %g %g %g", FIX2FLT( ((SFColorRGBA *)ptr)->red ), FIX2FLT( ((SFColorRGBA *)ptr)->green ), FIX2FLT( ((SFColorRGBA *)ptr)->blue ), FIX2FLT( ((SFColorRGBA *)ptr)->alpha )); break; case GF_SG_VRML_SFVEC2F: gf_fprintf(sdump->trace, "%g %g", FIX2FLT( ((SFVec2f *)ptr)->x ), FIX2FLT( ((SFVec2f *)ptr)->y )); break; case GF_SG_VRML_SFVEC2D: gf_fprintf(sdump->trace, "%g %g", ((SFVec2d *)ptr)->x, ((SFVec2d *)ptr)->y); break; case GF_SG_VRML_SFVEC3F: gf_fprintf(sdump->trace, "%g %g %g", FIX2FLT( ((SFVec3f *)ptr)->x ), FIX2FLT( ((SFVec3f *)ptr)->y ), FIX2FLT( ((SFVec3f *)ptr)->z )); break; case GF_SG_VRML_SFVEC3D: gf_fprintf(sdump->trace, "%g %g %g", ((SFVec3d *)ptr)->x, ((SFVec3d *)ptr)->y, ((SFVec3d *)ptr)->z); break; case GF_SG_VRML_SFROTATION: gf_fprintf(sdump->trace, "%g %g %g %g", FIX2FLT( ((SFRotation *)ptr)->x ), FIX2FLT( ((SFRotation *)ptr)->y ), FIX2FLT( ((SFRotation *)ptr)->z ), FIX2FLT( ((SFRotation *)ptr)->q ) ); break; case GF_SG_VRML_SFATTRREF: { SFAttrRef *ar = (SFAttrRef *)ptr; if (ar->node) { GF_FieldInfo pinfo; gf_node_get_field(ar->node, ar->fieldIndex, &pinfo); scene_dump_vrml_id(sdump, ar->node); gf_fprintf(sdump->trace, ".%s", pinfo.name); } } break; case GF_SG_VRML_SFSCRIPT: { u32 len, i; char *str; str = (char*)((SFScript *)ptr)->script_text; if (!str) { if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "\"\""); } break; } len = (u32)strlen(str); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "\"%s\"", str); } else { u16 *uniLine; uniLine = (u16*)gf_malloc(sizeof(short) * (len + 1)); len = gf_utf8_mbstowcs(uniLine, len, (const char **)&str); if (len != GF_UTF8_FAIL) { for (i = 0; i<len; i++) { switch (uniLine[i]) { case '&': gf_fprintf(sdump->trace, "&amp;"); break; case '<': gf_fprintf(sdump->trace, "&lt;"); break; case '>': gf_fprintf(sdump->trace, "&gt;"); break; case '\'': case '"': gf_fprintf(sdump->trace, "&apos;"); break; case 0: break; /*FIXME: how the heck can we preserve newlines and spaces of JavaScript in an XML attribute in any viewer ? */ default: if (uniLine[i]<128) { gf_fprintf(sdump->trace, "%c", (u8)uniLine[i]); } else { gf_fprintf(sdump->trace, "&#%d;", uniLine[i]); } break; } } } gf_free(uniLine); } DUMP_IND(sdump); } break; case GF_SG_VRML_SFSTRING: { char *str; if (sdump->XMLDump) { if (is_mf) gf_fprintf(sdump->trace, sdump->X3DDump ? "\"" : "&quot;"); } else { gf_fprintf(sdump->trace, "\""); } /*dump in unicode*/ str = ((SFString *)ptr)->buffer; if (node && (gf_node_get_tag(node)==TAG_MPEG4_BitWrapper)) { u32 bufsize = 37 + ((M_BitWrapper*)node)->buffer_len * 2 + 3; str = gf_malloc(sizeof(char) * bufsize); if (str) { s32 res; strcpy(str, "data:application/octet-string;base64,"); res = gf_base64_encode(((M_BitWrapper*)node)->buffer.buffer, ((M_BitWrapper*)node)->buffer_len, str+37, bufsize-37); if (res<0) { gf_free(str); str = NULL; } else { str[res+37] = 0; } } } if (str && str[0]) { if (sdump->XMLDump) { scene_dump_utf_string(sdump, 1, str); } else if (!strchr(str, '\"')) { gf_fprintf(sdump->trace, "%s", str); } else { u32 i, len = (u32)strlen(str); for (i=0; i<len; i++) { if (str[i]=='\"') gf_fputc('\\', sdump->trace); gf_fputc(str[i], sdump->trace); } } } if (node && (gf_node_get_tag(node)==TAG_MPEG4_BitWrapper)) { if (str) gf_free(str); } if (sdump->XMLDump) { if (is_mf) gf_fprintf(sdump->trace, sdump->X3DDump ? "\"" : "&quot;"); } else { gf_fprintf(sdump->trace, "\""); } } break; case GF_SG_VRML_SFURL: if (((SFURL *)ptr)->url) { #if 0 u32 len; char *str; short uniLine[5000]; str = ((SFURL *)ptr)->url; len = gf_utf8_mbstowcs(uniLine, 5000, (const char **) &str); if (len != GF_UTF8_FAIL) { gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); fwprintf(sdump->trace, (unsigned short *) uniLine); gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); } #else gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); gf_fprintf(sdump->trace, "%s", ((SFURL *)ptr)->url); gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); #endif } else { if (sdump->XMLDump) { gf_fprintf(sdump->trace, "&quot;od://od%d&quot;", ((SFURL *)ptr)->OD_ID); } else { gf_fprintf(sdump->trace, "od:%d", ((SFURL *)ptr)->OD_ID); } } break; case GF_SG_VRML_SFIMAGE: { u32 i, count; SFImage *img = (SFImage *)ptr; gf_fprintf(sdump->trace, "%d %d %d", img->width, img->height, img->numComponents); count = img->width * img->height * img->numComponents; for (i=0; i<count; ) { switch (img->numComponents) { case 1: gf_fprintf(sdump->trace, " 0x%02X", img->pixels[i]); i++; break; case 2: gf_fprintf(sdump->trace, " 0x%02X%02X", img->pixels[i], img->pixels[i+1]); i+=2; break; case 3: gf_fprintf(sdump->trace, " 0x%02X%02X%02X", img->pixels[i], img->pixels[i+1], img->pixels[i+2]); i+=3; break; case 4: gf_fprintf(sdump->trace, " 0x%02X%02X%02X%02X", img->pixels[i], img->pixels[i+1], img->pixels[i+2], img->pixels[i+3]); i+=4; break; } } } break; } } static void gf_dump_vrml_simple_field(GF_SceneDumper *sdump, GF_FieldInfo field, GF_Node *parent) { u32 i, sf_type; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); return; case GF_SG_VRML_MFNODE: list = * ((GF_ChildNodeItem **) field.far_ptr); assert( list ); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, NULL); list = list->next; } sdump->indent--; return; case GF_SG_VRML_SFCOMMANDBUFFER: return; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { if (sdump->XMLDump) StartAttribute(sdump, "value"); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, parent); if (sdump->XMLDump) EndAttribute(sdump); } else { GenMFField *mffield; mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "["); } else if (sf_type==GF_SG_VRML_SFSTRING) { gf_fprintf(sdump->trace, " value=\'"); } else { StartAttribute(sdump, "value"); } for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); /*this is to cope with single MFString which shall appear as SF in XMT*/ gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, parent); } if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "]"); } else if (sf_type==GF_SG_VRML_SFSTRING) { gf_fprintf(sdump->trace, "\'"); } else { EndAttribute(sdump); } } } static void gf_dump_vrml_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; Bool needs_field_container; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: assert ( *(GF_Node **)field.far_ptr); if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; } } else { StartAttribute(sdump, field.name); } gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); if (sdump->XMLDump) { if (!sdump->X3DDump) { sdump->indent--; EndElement(sdump, (char *) field.name, 1); } } else { EndAttribute(sdump); } return; case GF_SG_VRML_MFNODE: needs_field_container = 0; if (sdump->XMLDump && sdump->X3DDump) { u32 count, nb_ndt; GF_FieldInfo info; if (!strcmp(field.name, "children")) { needs_field_container = 0; } else { nb_ndt = 0; count = gf_node_get_field_count(node); for (i=0; i<count; i++) { gf_node_get_field(node, i, &info); if ((info.eventType==GF_SG_EVENT_IN) || (info.eventType==GF_SG_EVENT_OUT)) continue; if (info.NDTtype==field.NDTtype) nb_ndt++; } needs_field_container = (nb_ndt>1) ? 1 : 0; } } #ifndef GPAC_DISABLE_X3D if (!sdump->X3DDump) { if (gf_node_get_tag(node)==TAG_X3D_Switch) field.name = "choice"; } #endif list = * ((GF_ChildNodeItem **) field.far_ptr); assert(list); if (!sdump->XMLDump || !sdump->X3DDump) StartList(sdump, field.name); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, needs_field_container ? (char *) field.name : NULL); list = list->next; } sdump->indent--; if (!sdump->XMLDump || !sdump->X3DDump) EndList(sdump, field.name); return; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *cb = (SFCommandBuffer *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; if (!gf_list_count(cb->commandList)) { /*the arch does not allow for that (we would need a codec and so on, or decompress the command list in all cases...)*/ if (sdump->trace && cb->bufferSize) { if (sdump->XMLDump) gf_fprintf(sdump->trace, "<!--SFCommandBuffer cannot be dumped while playing - use MP4Box instead-->\n"); else gf_fprintf(sdump->trace, "#SFCommandBuffer cannot be dumped while playing - use MP4Box instead\n"); } } else { gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent, 0); } sdump->indent--; EndElement(sdump, (char *) field.name, 1); } return; case GF_SG_VRML_MFATTRREF: if (sdump->XMLDump) { MFAttrRef *ar = (MFAttrRef *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; for (i=0; i<ar->count; i++) { if (ar->vals[i].node) { GF_FieldInfo pinfo; DUMP_IND(sdump); gf_node_get_field(ar->vals[i].node, ar->vals[i].fieldIndex, &pinfo); gf_fprintf(sdump->trace, "<store node=\""); scene_dump_vrml_id(sdump, ar->vals[i].node); gf_fprintf(sdump->trace, "\" field=\"%s\"/>\n", pinfo.name); } } sdump->indent--; EndElement(sdump, (char *) field.name, 1); return; } break; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { StartAttribute(sdump, field.name); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); EndAttribute(sdump); } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, " %s=\'", (char *) field.name); break; default: StartAttribute(sdump, field.name); break; } } else { StartAttribute(sdump, field.name); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "["); if (mffield) { for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node); } } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "]"); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, "\'"); break; default: EndAttribute(sdump); break; } } else { EndAttribute(sdump); } } } static const char *GetXMTFieldTypeName(u32 fieldType) { switch (fieldType) { case GF_SG_VRML_SFBOOL: return "Boolean"; case GF_SG_VRML_SFINT32: return "Integer"; case GF_SG_VRML_SFCOLOR: return "Color"; case GF_SG_VRML_SFVEC2F: return "Vector2"; case GF_SG_VRML_SFIMAGE: return "Image"; case GF_SG_VRML_SFTIME: return "Time"; case GF_SG_VRML_SFFLOAT: return "Float"; case GF_SG_VRML_SFVEC3F: return "Vector3"; case GF_SG_VRML_SFROTATION: return "Rotation"; case GF_SG_VRML_SFSTRING: return "String"; case GF_SG_VRML_SFNODE: return "Node"; case GF_SG_VRML_MFBOOL: return "Booleans"; case GF_SG_VRML_MFINT32: return "Integers"; case GF_SG_VRML_MFCOLOR: return "Colors"; case GF_SG_VRML_MFVEC2F: return "Vector2Array"; case GF_SG_VRML_MFIMAGE: return "Images"; case GF_SG_VRML_MFTIME: return "Times"; case GF_SG_VRML_MFFLOAT: return "Floats"; case GF_SG_VRML_MFVEC3F: return "Vector3Array"; case GF_SG_VRML_MFROTATION: return "Rotations"; case GF_SG_VRML_MFSTRING: return "Strings"; case GF_SG_VRML_MFNODE: return "Nodes"; default: return "unknown"; } } static const char *GetXMTFieldTypeValueName(u32 fieldType) { switch (fieldType) { case GF_SG_VRML_SFBOOL: return "booleanValue"; case GF_SG_VRML_SFINT32: return "intValue"; case GF_SG_VRML_SFCOLOR: return "colorValue"; case GF_SG_VRML_SFVEC2F: return "vector2Value"; case GF_SG_VRML_SFIMAGE: return "imageValue"; case GF_SG_VRML_SFTIME: return "timeValue"; case GF_SG_VRML_SFFLOAT: return "floatValue"; case GF_SG_VRML_SFVEC3F: return "vector3Value"; case GF_SG_VRML_SFROTATION: return "rotationValue"; case GF_SG_VRML_SFSTRING: return "stringValue"; case GF_SG_VRML_MFBOOL: return "booleanArrayValue"; case GF_SG_VRML_MFINT32: return "intArrayValue"; case GF_SG_VRML_MFCOLOR: return "colorArrayValue"; case GF_SG_VRML_MFVEC2F: return "vector2ArrayValue"; case GF_SG_VRML_MFIMAGE: return "imageArrayValue"; case GF_SG_VRML_MFTIME: return "timeArrayValue"; case GF_SG_VRML_MFFLOAT: return "floatArrayValue"; case GF_SG_VRML_MFVEC3F: return "vector3ArrayValue"; case GF_SG_VRML_MFROTATION: return "rotationArrayValue"; case GF_SG_VRML_MFSTRING: return "stringArrayValue"; default: return "unknown"; } } /*field dumping for proto declaration and Script*/ static void gf_dump_vrml_dyn_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field, Bool has_sublist) { u32 i, sf_type; void *slot_ptr; if (gf_sg_vrml_is_sf_field(field.fieldType)) { DUMP_IND(sdump); if (sdump->XMLDump) { if (sdump->X3DDump) { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" accessType=\"%s\"", field.name, gf_sg_vrml_get_field_type_name(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 1)); } else { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" vrml97Hint=\"%s\"", field.name, GetXMTFieldTypeName(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 0)); } if ((field.eventType == GF_SG_EVENT_FIELD) || (field.eventType == GF_SG_EVENT_EXPOSED_FIELD)) { if (field.fieldType == GF_SG_VRML_SFNODE) { if (!sdump->X3DDump) { gf_fprintf(sdump->trace, ">\n"); sdump->indent++; gf_fprintf(sdump->trace, "<node>"); gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); gf_fprintf(sdump->trace, "</node>"); sdump->indent--; if (!has_sublist) gf_fprintf(sdump->trace, "</field>\n"); } else { if (field.far_ptr) { gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); gf_fprintf(sdump->trace, "</field>\n"); } else { gf_fprintf(sdump->trace, "/>\n"); } } DUMP_IND(sdump); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } if (field.far_ptr) gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); if (has_sublist) gf_fprintf(sdump->trace, "\">\n"); else gf_fprintf(sdump->trace, "\"/>\n"); } } else { gf_fprintf(sdump->trace, "/>\n"); } } else { gf_fprintf(sdump->trace, "%s %s %s", gf_sg_vrml_get_event_type_name(field.eventType, sdump->X3DDump), gf_sg_vrml_get_field_type_name(field.fieldType), field.name); if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { gf_fprintf(sdump->trace, " "); if (field.fieldType == GF_SG_VRML_SFNODE) { gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); } else if (field.far_ptr) { gf_dump_vrml_simple_field(sdump, field, node); } } gf_fprintf(sdump->trace, "\n"); } } else if (field.far_ptr) { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "%s %s %s", gf_sg_vrml_get_event_type_name(field.eventType, sdump->X3DDump), gf_sg_vrml_get_field_type_name(field.fieldType), field.name); if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { gf_fprintf(sdump->trace, " ["); if (sf_type == GF_SG_VRML_SFNODE) { GF_ChildNodeItem *l = *(GF_ChildNodeItem **)field.far_ptr; gf_fprintf(sdump->trace, "\n"); sdump->indent++; while (l) { gf_dump_vrml_node(sdump, l->node, 1, NULL); l = l->next; } sdump->indent--; DUMP_IND(sdump); } else { for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); if (field.fieldType != GF_SG_VRML_MFNODE) { gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node); } } } gf_fprintf(sdump->trace, "]"); } gf_fprintf(sdump->trace, "\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" accessType=\"%s\"", field.name, gf_sg_vrml_get_field_type_name(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 1)); } else { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" vrml97Hint=\"%s\"", field.name, GetXMTFieldTypeName(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 0)); } if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { if (sf_type == GF_SG_VRML_SFNODE) { GF_ChildNodeItem *list = *(GF_ChildNodeItem **)field.far_ptr; gf_fprintf(sdump->trace, ">\n"); sdump->indent++; if (!sdump->X3DDump) gf_fprintf(sdump->trace, "<nodes>"); while (list) { gf_dump_vrml_node(sdump, list->node, 1, NULL); list = list->next; } if (!sdump->X3DDump) gf_fprintf(sdump->trace, "</nodes>"); sdump->indent++; DUMP_IND(sdump); if (!has_sublist) gf_fprintf(sdump->trace, "</field>\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); if (field.fieldType != GF_SG_VRML_MFNODE) { gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node); } } if (has_sublist) gf_fprintf(sdump->trace, "\">\n"); else gf_fprintf(sdump->trace, "\"/>\n"); } } else { gf_fprintf(sdump->trace, "/>\n"); } } } } /*field dumping for proto instance*/ static void gf_dump_vrml_proto_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; void *slot_ptr; DUMP_IND(sdump); gf_fprintf(sdump->trace, "<fieldValue name=\"%s\" ", field.name); if (gf_sg_vrml_is_sf_field(field.fieldType)) { if (field.fieldType == GF_SG_VRML_SFNODE) { gf_fprintf(sdump->trace, ">\n"); sdump->indent++; if (!sdump->X3DDump) gf_fprintf(sdump->trace, "<node>"); gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); if (!sdump->X3DDump) gf_fprintf(sdump->trace, "</node>"); sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</fieldValue>\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); gf_fprintf(sdump->trace, "\"/>\n"); } } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { if (sf_type == GF_SG_VRML_SFNODE) { GF_ChildNodeItem *list = *(GF_ChildNodeItem **)field.far_ptr; gf_fprintf(sdump->trace, ">\n"); sdump->indent++; if (!sdump->X3DDump) gf_fprintf(sdump->trace, "<nodes>"); while (list) { gf_dump_vrml_node(sdump, list->node, 1, NULL); list = list->next; } if (!sdump->X3DDump) gf_fprintf(sdump->trace, "</nodes>"); sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</fieldValue>\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } if (mffield) { for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); if (field.fieldType != GF_SG_VRML_MFNODE) { gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node); } } } gf_fprintf(sdump->trace, "\"/>\n"); } } } } static GF_Route *gf_dump_vrml_get_IS(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo *field) { u32 i; GF_Route *r; i=0; while ((r = (GF_Route*)gf_list_enum(sdump->current_proto->sub_graph->Routes, &i))) { if (!r->IS_route) continue; if ((r->ToNode==node) && (r->ToField.fieldIndex==field->fieldIndex)) return r; } if (!node || !node->sgprivate->interact || !node->sgprivate->interact->routes) return NULL; i=0; while ((r = (GF_Route*)gf_list_enum(node->sgprivate->interact->routes, &i))) { if (!r->IS_route) continue; if (r->FromField.fieldIndex == field->fieldIndex) return r; } return NULL; } static void gf_dump_vrml_IS_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field, Bool isScript, Bool skip_is) { GF_FieldInfo pfield; GF_Route *r = gf_dump_vrml_get_IS(sdump, node, &field); if (r->FromNode) { pfield.fieldIndex = r->ToField.fieldIndex; gf_sg_proto_get_field(sdump->current_proto, NULL, &pfield); } else { pfield.fieldIndex = r->FromField.fieldIndex; gf_sg_proto_get_field(sdump->current_proto, NULL, &pfield); } if (!sdump->XMLDump) { DUMP_IND(sdump); if (isScript) gf_fprintf(sdump->trace, "%s %s ", gf_sg_vrml_get_event_type_name(field.eventType, sdump->X3DDump), gf_sg_vrml_get_field_type_name(field.fieldType)); gf_fprintf(sdump->trace, "%s IS %s\n", field.name, pfield.name); } else { if (!skip_is) { StartElement(sdump, "IS"); EndElementHeader(sdump, 1); sdump->indent++; } DUMP_IND(sdump); gf_fprintf(sdump->trace, "<connect nodeField=\"%s\" protoField=\"%s\"/>\n", field.name, pfield.name); if (!skip_is) { sdump->indent--; EndElement(sdump, "IS", 1); } } } static Bool scene_dump_vrml_can_dump(GF_SceneDumper *sdump, GF_Node *node) { #ifndef GPAC_DISABLE_VRML u32 tag; if (node->sgprivate->tag==TAG_ProtoNode) return 1; if (sdump->X3DDump || (sdump->dump_mode==GF_SM_DUMP_VRML)) { if (node->sgprivate->tag>=GF_NODE_RANGE_FIRST_X3D) return 1; if (node->sgprivate->tag==TAG_MPEG4_Rectangle) return 1; if (node->sgprivate->tag==TAG_MPEG4_Circle) return 1; #ifndef GPAC_DISABLE_X3D tag = gf_node_x3d_type_by_class_name(gf_node_get_class_name(node)); return tag ? 1 : 0; #else return 0; #endif } else { if (node->sgprivate->tag<=GF_NODE_RANGE_LAST_MPEG4) return 1; #ifndef GPAC_DISABLE_X3D if (node->sgprivate->tag==TAG_X3D_Rectangle2D) return 1; if (node->sgprivate->tag==TAG_X3D_Circle2D) return 1; #endif tag = gf_node_mpeg4_type_by_class_name(gf_node_get_class_name(node)); return tag ? 1 : 0; } #else return 1; #endif } static void gf_dump_vrml_node(GF_SceneDumper *sdump, GF_Node *node, Bool in_list, char *fieldContainer) { u32 i, count, to_dump, sub_el, ID; u32 *def_fields; Bool isDEF, isScript, isProto, hasISed; char *name; GF_Node *base; GF_FieldInfo field, base_field; if (!node) { gf_fprintf(sdump->trace, "NULL"); return; } /*this dumper works only for VRML like graphs*/ if (node->sgprivate->tag>GF_NODE_RANGE_LAST_X3D) return; if (!scene_dump_vrml_can_dump(sdump, node)) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[Scene Dump] node %s not part of %s standard - removing\n", gf_node_get_class_name(node), sdump->X3DDump ? "X3D" : (sdump->dump_mode==GF_SM_DUMP_VRML) ? "VRML" : "MPEG4")); if (!in_list) gf_fprintf(sdump->trace, "NULL"); return; } /*convert whatever possible*/ name = (char*)gf_node_get_class_name(node); #ifndef GPAC_DISABLE_VRML if (sdump->X3DDump) { if (node->sgprivate->tag == TAG_MPEG4_Circle) name = "Circle2D"; else if (node->sgprivate->tag == TAG_MPEG4_Rectangle) name = "Rectangle2D"; #ifndef GPAC_DISABLE_X3D } else { if (node->sgprivate->tag == TAG_X3D_Circle2D) name = "Circle"; else if (node->sgprivate->tag == TAG_X3D_Rectangle2D) name = "Rectangle"; #endif } #endif isProto = (gf_node_get_tag(node) == TAG_ProtoNode) ? 1 : 0; ID = gf_node_get_id(node); isDEF = 0; if (ID) { isDEF = gf_dump_vrml_is_def_node(sdump, node); if (!isDEF) { if (!sdump->XMLDump) { if (in_list) DUMP_IND(sdump); gf_fprintf(sdump->trace, "USE "); scene_dump_vrml_id(sdump, node); if (in_list) gf_fprintf(sdump->trace, "\n"); } else { if (isProto) { StartElement(sdump, "ProtoInstance"); StartAttribute(sdump, "name"); gf_fprintf(sdump->trace, "%s", name); EndAttribute(sdump); } else { StartElement(sdump, name); } StartAttribute(sdump, "USE"); scene_dump_vrml_id(sdump, node); EndAttribute(sdump); EndElementHeader(sdump, 0); } return; } } /*get all fields*/ count = gf_node_get_field_count(node); def_fields = (u32*)gf_malloc(sizeof(u32) * count); base = NULL; switch (gf_node_get_tag(node)) { #ifndef GPAC_DISABLE_VRML #ifndef GPAC_DISABLE_X3D case TAG_X3D_Script: #endif case TAG_MPEG4_Script: isScript = 1; break; #endif default: isScript = 0; break; } if (!isScript) { if (isProto) { base = gf_sg_proto_create_instance(node->sgprivate->scenegraph, ((GF_ProtoInstance *)node)->proto_interface); } else { base = gf_node_new(node->sgprivate->scenegraph, node->sgprivate->tag); } } if (base) gf_node_register(base, NULL); hasISed = 0; to_dump = sub_el = 0; for (i=0; i<count; i++) { if (isScript) { /*dyn script fields are complex types*/ def_fields[i] = (i>2) ? 2 : 1; } else { def_fields[i] = 0; } gf_node_get_field(node, i, &field); if (sdump->current_proto) { if (gf_dump_vrml_get_IS(sdump, node, &field) != NULL) { def_fields[i] = 3; if ((field.fieldType == GF_SG_VRML_SFNODE) || (field.fieldType == GF_SG_VRML_MFNODE)) def_fields[i] = sdump->XMLDump ? 4 : 3; /*in XMT the ISed is not an attribute*/ if (sdump->XMLDump) sub_el++; to_dump++; hasISed = 1; continue; } } if (!isScript && ((field.eventType == GF_SG_EVENT_IN) || (field.eventType == GF_SG_EVENT_OUT)) ) { continue; } /*proto instance in XMT lists all fields as elements*/ if (sdump->XMLDump && isProto) { def_fields[i] = 2; to_dump++; sub_el++; continue; } switch (field.fieldType) { case GF_SG_VRML_SFNODE: if (* (GF_Node **) field.far_ptr) { def_fields[i] = 2; to_dump++; sub_el++; } break; case GF_SG_VRML_MFNODE: if (* (GF_ChildNodeItem**) field.far_ptr) { def_fields[i] = 2; to_dump++; sub_el++; } break; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *p = (SFCommandBuffer *)field.far_ptr; if (p->bufferSize || gf_list_count(p->commandList)) { def_fields[i] = 2; to_dump++; sub_el++; } } break; case GF_SG_VRML_MFATTRREF: { MFAttrRef *p = (MFAttrRef*)field.far_ptr; if (p->count) { def_fields[i] = 2; to_dump++; sub_el++; } } break; default: if (isScript) { to_dump++; } else { gf_node_get_field(base, i, &base_field); if (!gf_sg_vrml_field_equal(base_field.far_ptr, field.far_ptr, field.fieldType)) { def_fields[i] = 1; to_dump++; } } break; } } if (base) gf_node_unregister(base, NULL); if (!to_dump) { if (in_list) DUMP_IND(sdump); if (!sdump->XMLDump) { if (isDEF) { gf_fprintf(sdump->trace, "DEF "); scene_dump_vrml_id(sdump, node); gf_fprintf(sdump->trace, " "); } gf_fprintf(sdump->trace, "%s {}\n", name); } else { if (isDEF) { if (isProto) { gf_fprintf(sdump->trace, "<ProtoInstance name=\"%s\" DEF=\"", name); } else { gf_fprintf(sdump->trace, "<%s DEF=\"", name); } scene_dump_vrml_id(sdump, node); gf_fprintf(sdump->trace, "\"/>\n"); } else { if (isProto) { gf_fprintf(sdump->trace, "<ProtoInstance name=\"%s\"/>\n", name); } else { gf_fprintf(sdump->trace, "<%s/>\n", name); } } } gf_free(def_fields); return; } if (!sdump->XMLDump) { if (in_list) DUMP_IND(sdump); if (isDEF) { gf_fprintf(sdump->trace, "DEF "); scene_dump_vrml_id(sdump, node); gf_fprintf(sdump->trace, " "); } gf_fprintf(sdump->trace, "%s {\n", name); } else { if (isProto) { StartElement(sdump, "ProtoInstance"); StartAttribute(sdump, "name"); gf_fprintf(sdump->trace, "%s", name); EndAttribute(sdump); } else { StartElement(sdump, name); } if (isDEF) { StartAttribute(sdump, "DEF"); scene_dump_vrml_id(sdump, node); EndAttribute(sdump); } } sdump->indent ++; for (i=0; i<count; i++) { switch (def_fields[i]) { /*regular field*/ case 1: gf_node_get_field(node, i, &field); if (!isScript) { gf_dump_vrml_field(sdump, node, field); } /*special script dump case, static fields except url*/ else if (i==1 || i==2) { if (*((SFBool *)field.far_ptr)) gf_dump_vrml_field(sdump, node, field); } /*in bt first dump fields - in XMT first dump url*/ else if (i && !sdump->XMLDump) { gf_dump_vrml_dyn_field(sdump, node, field, 0); } else if (!i && sdump->XMLDump) { gf_dump_vrml_field(sdump, node, field); } break; /*IS field*/ case 3: if (sdump->XMLDump) break; gf_node_get_field(node, i, &field); gf_dump_vrml_IS_field(sdump, node, field, isScript, 0); def_fields[i] = 0; break; default: break; } } if (fieldContainer) gf_fprintf(sdump->trace, " fieldContainer=\"%s\"", fieldContainer); if (isScript) sub_el = 1; EndElementHeader(sdump, sub_el ? 1 : 0); if (sub_el) { /*dump all normal IS elements for XMT*/ if (hasISed && sdump->XMLDump) { StartElement(sdump, "IS"); EndElementHeader(sdump, 1); sdump->indent++; } for (i=0; i<count; i++) { if (def_fields[i]==3) { gf_node_get_field(node, i, &field); gf_dump_vrml_IS_field(sdump, node, field, isScript, 1); } } if (hasISed && sdump->XMLDump) { sdump->indent--; EndElement(sdump, "IS", 1); } /*dump all sub elements and complex IS*/ for (i=0; i<count; i++) { switch (def_fields[i]) { case 2: gf_node_get_field(node, i, &field); if (!isScript) { if (isProto && sdump->XMLDump) { gf_dump_vrml_proto_field(sdump, node, field); } else { gf_dump_vrml_field(sdump, node, field); } } else { #ifndef GPAC_DISABLE_X3D /*X3D script metadata, NOT DYN*/ if ((i==3) && (node->sgprivate->tag==TAG_X3D_Script) ) { if (*((GF_Node **)field.far_ptr)) gf_dump_vrml_field(sdump, node, field); } else #endif { gf_dump_vrml_dyn_field(sdump, node, field, 0); } } break; case 4: gf_node_get_field(node, i, &field); gf_dump_vrml_IS_field(sdump, node, field, isScript, 0); break; } } } /*finally dump script - XMT dumping is broken!!*/ if (isScript && !sdump->XMLDump) { gf_node_get_field(node, 0, &field); gf_dump_vrml_field(sdump, node, field); } sdump->indent --; if (!sdump->XMLDump && !in_list) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "}"); } else { EndElement(sdump, isProto ? "ProtoInstance" : name, sub_el); } gf_free(def_fields); } static GF_Err DumpMultipleIndexedReplace(GF_SceneDumper *sdump, GF_Command *com) { u32 i; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); gf_node_get_field(com->node, inf->fieldIndex, &field); field.fieldType = inf->fieldType; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace extended=\"indices\" atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\">\n", field.name); } else { gf_fprintf(sdump->trace, "MULTIPLEINDREPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s [\n", field.name); } sdump->indent++; i=0; while ((inf = (GF_CommandField *) gf_list_enum(com->command_fields, &i))) { field.far_ptr = inf->field_ptr; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<repValue position=\"%d\" ", inf->pos); } else { gf_fprintf(sdump->trace, "%d BY ", inf->pos); } gf_dump_vrml_simple_field(sdump, field, com->node); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "/>"); } else { gf_fprintf(sdump->trace, "\n"); } } sdump->indent--; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "</Replace>\n"); } else { gf_fprintf(sdump->trace, "]\n"); } return GF_OK; } static GF_Err DumpMultipleReplace(GF_SceneDumper *sdump, GF_Command *com) { u32 i; GF_FieldInfo info; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace extended=\"fields\" atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\">\n"); sdump->indent++; i=0; while ((inf = (GF_CommandField *) gf_list_enum(com->command_fields, &i))) { gf_node_get_field(com->node, inf->fieldIndex, &info); info.far_ptr = inf->field_ptr; DUMP_IND(sdump); if (gf_sg_vrml_get_sf_type(info.fieldType) != GF_SG_VRML_SFNODE) { gf_fprintf(sdump->trace, "<repField atField=\"%s\" ", info.name); gf_dump_vrml_simple_field(sdump, info, com->node); gf_fprintf(sdump->trace, "/>\n"); } else { gf_fprintf(sdump->trace, "<repField>"); gf_dump_vrml_field(sdump, com->node, info); gf_fprintf(sdump->trace, "</repField>\n"); } } sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</Replace>\n"); } else { gf_fprintf(sdump->trace, "MULTIPLEREPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, " {\n"); sdump->indent++; i=0; while ((inf = (GF_CommandField *) gf_list_enum(com->command_fields, &i))) { gf_node_get_field(com->node, inf->fieldIndex, &info); info.far_ptr = inf->field_ptr; gf_dump_vrml_field(sdump, com->node, info); } sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } return GF_OK; } static GF_Err DumpGlobalQP(GF_SceneDumper *sdump, GF_Command *com) { GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace extended=\"globalQuant\">\n"); } else { gf_fprintf(sdump->trace, "GLOBALQP "); } gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Replace>\n"); else gf_fprintf(sdump->trace, "\n"); return GF_OK; } static GF_Err DumpNodeInsert(GF_SceneDumper *sdump, GF_Command *com) { GF_CommandField *inf; char posname[20]; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, "END"); break; default: sprintf(posname, "%d", inf->pos); break; } DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" position=\"%s\">", posname); } else { if (inf->pos==-1) { gf_fprintf(sdump->trace, "APPEND TO "); } else gf_fprintf(sdump->trace, "INSERT AT "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".children"); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, " "); } gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Insert>"); gf_fprintf(sdump->trace, "\n"); return GF_OK; } static GF_Err DumpRouteInsert(GF_SceneDumper *sdump, GF_Command *com, Bool is_scene_replace) { GF_Route r; memset(&r, 0, sizeof(GF_Route)); r.ID = com->RouteID; r.name = com->def_name; r.FromNode = gf_dump_find_node(sdump, com->fromNodeID); r.FromField.fieldIndex = com->fromFieldIndex; r.ToNode = gf_dump_find_node(sdump, com->toNodeID); r.ToField.fieldIndex = com->toFieldIndex; gf_list_add(sdump->inserted_routes, com); if (is_scene_replace) { gf_dump_vrml_route(sdump, &r, 0); } else { DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert>\n"); } else { gf_fprintf(sdump->trace, "INSERT "); } gf_dump_vrml_route(sdump, &r, 2); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Insert>"); } return GF_OK; } static GF_Err DumpIndexInsert(GF_SceneDumper *sdump, GF_Command *com) { GF_Err e; GF_FieldInfo field, sffield; GF_CommandField *inf; char posname[20]; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, "END"); break; default: sprintf(posname, "%d", inf->pos); break; } e = gf_node_get_field(com->node, inf->fieldIndex, &field); if (e) return e; if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" position=\"%s\"", field.name, posname); } else { if (inf->pos==-1) { gf_fprintf(sdump->trace, "APPEND TO "); } else gf_fprintf(sdump->trace, "INSERT AT "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, " "); } memcpy(&sffield, &field, sizeof(GF_FieldInfo)); sffield.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); if (field.fieldType==GF_SG_VRML_MFNODE) { if (sdump->XMLDump) gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Insert>"); gf_fprintf(sdump->trace, "\n"); } else { sffield.far_ptr = inf->field_ptr; gf_dump_vrml_simple_field(sdump, sffield, com->node); if (sdump->XMLDump) gf_fprintf(sdump->trace, "/>"); gf_fprintf(sdump->trace, "\n"); } return e; } static GF_Err DumpIndexDelete(GF_SceneDumper *sdump, GF_Command *com) { char posname[20]; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); switch (inf->pos) { case -1: strcpy(posname, sdump->XMLDump ? "END" : "LAST"); break; case 0: strcpy(posname, "BEGIN"); break; default: sprintf(posname, "%d", inf->pos); break; } gf_node_get_field(com->node, inf->fieldIndex, &field); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" position=\"%s\"/>", field.name, posname); } else { gf_fprintf(sdump->trace, "DELETE "); if (inf->pos==-1) gf_fprintf(sdump->trace, "%s ", posname); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpNodeDelete(GF_SceneDumper *sdump, GF_Command *com) { DUMP_IND(sdump); if (sdump->XMLDump) { if (com->tag==GF_SG_NODE_DELETE_EX) { gf_fprintf(sdump->trace, "<Delete extended=\"deleteOrder\" atNode=\""); } else { gf_fprintf(sdump->trace, "<Delete atNode=\""); } scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\"/>\n"); } else { if (com->tag==GF_SG_NODE_DELETE_EX) gf_fprintf(sdump->trace, "X"); gf_fprintf(sdump->trace, "DELETE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpRouteDelete(GF_SceneDumper *sdump, GF_Command *com) { DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete atRoute=\""); scene_dump_vrml_route_id(sdump, com->RouteID, com->def_name); gf_fprintf(sdump->trace, "\"/>\n"); } else { gf_fprintf(sdump->trace, "DELETE ROUTE "); scene_dump_vrml_route_id(sdump, com->RouteID, com->def_name); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpNodeReplace(GF_SceneDumper *sdump, GF_Command *com) { GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\">"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, "</Replace>\n"); } else { gf_fprintf(sdump->trace, "REPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, " BY "); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpFieldReplace(GF_SceneDumper *sdump, GF_Command *com) { GF_Err e; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); e = gf_node_get_field(com->node, inf->fieldIndex, &field); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" ", field.name); } else { gf_fprintf(sdump->trace, "REPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s BY ", field.name); } switch (field.fieldType) { case GF_SG_VRML_SFNODE: if (sdump->XMLDump) gf_fprintf(sdump->trace, ">"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Replace>"); else gf_fprintf(sdump->trace, "\n"); break; case GF_SG_VRML_MFNODE: { GF_ChildNodeItem *tmp; if (sdump->XMLDump) { gf_fprintf(sdump->trace, ">"); } else { gf_fprintf(sdump->trace, " [\n"); } sdump->indent++; tmp = inf->node_list; while (tmp) { gf_dump_vrml_node(sdump, tmp->node, 1, NULL); tmp = tmp->next; } sdump->indent--; if (sdump->XMLDump) { gf_fprintf(sdump->trace, "</Replace>"); } else { EndList(sdump, NULL); } } break; case GF_SG_VRML_SFCOMMANDBUFFER: if (sdump->XMLDump) { SFCommandBuffer *cb = (SFCommandBuffer*)inf->field_ptr; gf_fprintf(sdump->trace, ">\n"); gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent+1, 0); DUMP_IND(sdump); gf_fprintf(sdump->trace, "</Replace>\n"); } else { SFCommandBuffer *cb = (SFCommandBuffer*)inf->field_ptr; gf_fprintf(sdump->trace, " {\n"); gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent+1, 0); DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } break; default: field.far_ptr = inf->field_ptr; gf_dump_vrml_simple_field(sdump, field, com->node); if (sdump->XMLDump) gf_fprintf(sdump->trace, "/>"); gf_fprintf(sdump->trace, "\n"); } return e; } static GF_Err DumpIndexReplace(GF_SceneDumper *sdump, GF_Command *com) { char posname[20]; GF_Err e; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); e = gf_node_get_field(com->node, inf->fieldIndex, &field); if (e) return e; if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM; switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, sdump->XMLDump ? "END" : "LAST"); break; default: sprintf(posname, "%d", inf->pos); break; } DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" position=\"%s\"", field.name, posname); } else { gf_fprintf(sdump->trace, "REPLACE "); if (inf->pos==-1) gf_fprintf(sdump->trace, "%s ", posname); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, " BY "); } if (field.fieldType == GF_SG_VRML_MFNODE) { if (sdump->XMLDump) gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, (sdump->XMLDump) ? "</Replace>\n" : "\n"); } else { field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); field.far_ptr = inf->field_ptr; gf_dump_vrml_simple_field(sdump, field, com->node); gf_fprintf(sdump->trace, sdump->XMLDump ? "/>\n" : "\n"); } return GF_OK; } static GF_Err DumpXReplace(GF_SceneDumper *sdump, GF_Command *com) { char posname[20]; GF_Err e; GF_FieldInfo field, idxField; GF_Node *toNode, *target; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); e = gf_node_get_field(com->node, inf->fieldIndex, &field); if (e) return e; toNode = target = NULL; /*indexed replacement with index given by other node field*/ if (com->toNodeID) { toNode = gf_sg_find_node(com->in_scene, com->toNodeID); if (!toNode) return GF_NON_COMPLIANT_BITSTREAM; e = gf_node_get_field(toNode, com->toFieldIndex, &idxField); if (e) return e; } else { /*indexed replacement */ if (inf->pos>=-1) { if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM; switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, sdump->XMLDump ? "END" : "LAST"); break; default: sprintf(posname, "%d", inf->pos); break; } field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); } } field.far_ptr = inf->field_ptr; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\"", field.name); if (toNode) { gf_fprintf(sdump->trace, " atIndexNode=\""); scene_dump_vrml_id(sdump, toNode); gf_fprintf(sdump->trace, "\" atIndexField=\"%s\"", idxField.name); field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); } if (com->ChildNodeTag) { GF_FieldInfo cfield; GF_Node *cnode; if (com->ChildNodeTag>0) { cnode = gf_node_new(com->in_scene, com->ChildNodeTag); } else { GF_Proto *proto = gf_sg_find_proto(com->in_scene, -com->ChildNodeTag , NULL); if (!proto) return GF_SG_UNKNOWN_NODE; cnode = gf_sg_proto_create_instance(com->in_scene, proto); } if (!cnode) return GF_SG_UNKNOWN_NODE; gf_node_register(cnode, NULL); gf_node_get_field(cnode, com->child_field, &cfield); gf_fprintf(sdump->trace, " atChildField=\"%s\"", cfield.name); gf_node_unregister(cnode, NULL); field.fieldType = cfield.fieldType; } if (com->fromNodeID) { target = gf_sg_find_node(com->in_scene, com->fromNodeID); if (!target) return GF_NON_COMPLIANT_BITSTREAM; e = gf_node_get_field(target, com->fromFieldIndex, &idxField); if (e) return e; gf_fprintf(sdump->trace, " fromNode=\""); scene_dump_vrml_id(sdump, target); gf_fprintf(sdump->trace, "\" fromField=\"%s\">\n", idxField.name); return GF_OK; } else { if (inf->pos>=-1) gf_fprintf(sdump->trace, " position=\"%s\"", posname); } } else { gf_fprintf(sdump->trace, "XREPLACE "); if (inf->pos==-1) gf_fprintf(sdump->trace, "%s ", posname); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (toNode) { gf_fprintf(sdump->trace, "["); scene_dump_vrml_id(sdump, toNode); gf_fprintf(sdump->trace, ".%s]", idxField.name); field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); } else if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); if (com->ChildNodeTag) { GF_FieldInfo cfield; GF_Node *cnode; if (com->ChildNodeTag>0) { cnode = gf_node_new(com->in_scene, com->ChildNodeTag); } else { GF_Proto *proto = gf_sg_find_proto(com->in_scene, -com->ChildNodeTag , NULL); if (!proto) return GF_SG_UNKNOWN_NODE; cnode = gf_sg_proto_create_instance(com->in_scene, proto); } if (!cnode) return GF_SG_UNKNOWN_NODE; gf_node_register(cnode, NULL); gf_node_get_field(cnode, com->child_field, &cfield); gf_fprintf(sdump->trace, ".%s", cfield.name); gf_node_unregister(cnode, NULL); field.fieldType = cfield.fieldType; } gf_fprintf(sdump->trace, " BY "); } if (field.fieldType == GF_SG_VRML_MFNODE) { if (sdump->XMLDump) gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, (sdump->XMLDump) ? "</Replace>\n" : "\n"); } else { gf_dump_vrml_simple_field(sdump, field, com->node); gf_fprintf(sdump->trace, sdump->XMLDump ? "/>\n" : "\n"); } return GF_OK; } static GF_Err DumpRouteReplace(GF_SceneDumper *sdump, GF_Command *com) { const char *name; GF_Route r2; if (!scene_dump_vrml_find_route_name(sdump, com->RouteID, &name)) return GF_BAD_PARAM; memset(&r2, 0, sizeof(GF_Route)); r2.FromNode = gf_dump_find_node(sdump, com->fromNodeID); r2.FromField.fieldIndex = com->fromFieldIndex; r2.ToNode = gf_dump_find_node(sdump, com->toNodeID); r2.ToField.fieldIndex = com->toFieldIndex; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atRoute=\""); scene_dump_vrml_route_id(sdump, com->RouteID, (char *) name); gf_fprintf(sdump->trace, "\">\n"); } else { gf_fprintf(sdump->trace, "REPLACE ROUTE "); scene_dump_vrml_route_id(sdump, com->RouteID, (char *) name); gf_fprintf(sdump->trace, " BY "); } gf_dump_vrml_route(sdump, &r2, 1); if (sdump->XMLDump ) gf_fprintf(sdump->trace, "</Replace>"); return GF_OK; } static GF_Err gf_dump_vrml_route(GF_SceneDumper *sdump, GF_Route *r, u32 dump_type) { char toNodeBuf[100], fromNodeBuf[100], *to_node_p, *from_node_p; const char *node_name; u32 id; if (!r->is_setup) { gf_node_get_field(r->FromNode, r->FromField.fieldIndex, &r->FromField); gf_node_get_field(r->ToNode, r->ToField.fieldIndex, &r->ToField); r->is_setup = 1; } if (!r->FromNode || !r->ToNode) return GF_BAD_PARAM; if (sdump->XMLDump || !dump_type) DUMP_IND(sdump); to_node_p = toNodeBuf; from_node_p = fromNodeBuf; node_name = gf_node_get_name_and_id(r->FromNode, &id); if (node_name) { const char *to_name; from_node_p = (char *)node_name; to_name = gf_node_get_name(r->ToNode); if (to_name) { to_node_p = (char *) to_name; } else { id = gf_node_get_id(r->ToNode); sprintf(toNodeBuf, "node_%d", id); } } else { sprintf(fromNodeBuf, "N%d", id-1); sprintf(toNodeBuf, "N%d", gf_node_get_id(r->ToNode) - 1); } if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<ROUTE"); if (r->ID) { StartAttribute(sdump, "DEF"); scene_dump_vrml_route_id(sdump, r->ID, r->name); EndAttribute(sdump); } gf_fprintf(sdump->trace, " fromNode=\"%s\" fromField=\"%s\" toNode=\"%s\" toField=\"%s\"/>\n", from_node_p, r->FromField.name, to_node_p, r->ToField.name); } else { if (dump_type==2) gf_fprintf(sdump->trace, "ROUTE "); if (r->ID) { gf_fprintf(sdump->trace, "DEF "); scene_dump_vrml_route_id(sdump, r->ID, r->name); gf_fprintf(sdump->trace, " "); } if (dump_type==1) { gf_fprintf(sdump->trace, "%s.%s TO %s.%s\n", from_node_p, r->FromField.name, to_node_p, r->ToField.name); } else { if (dump_type!=2) gf_fprintf(sdump->trace, "ROUTE "); gf_fprintf(sdump->trace, "%s.%s TO %s.%s\n", from_node_p, r->FromField.name, to_node_p, r->ToField.name); } } return GF_OK; } static GF_Err DumpProtos(GF_SceneDumper *sdump, GF_List *protoList) { #ifdef GPAC_DISABLE_VRML return GF_OK; #else u32 i, j, count; GF_FieldInfo field; GF_Err e; GF_SceneGraph *prev_sg; GF_Proto *proto, *prev_proto; prev_proto = sdump->current_proto; i=0; while ((proto = (GF_Proto*)gf_list_enum(protoList, &i))) { sdump->current_proto = proto; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, proto->ExternProto.count ? "EXTERNPROTO " : "PROTO "); gf_fprintf(sdump->trace, "%s [\n", proto->Name); } else { gf_fprintf(sdump->trace, "<ProtoDeclare name=\"%s\" protoID=\"%d\"", proto->Name, proto->ID); if (proto->ExternProto.count) { gf_fprintf(sdump->trace, " locations=\""); gf_dump_vrml_sffield(sdump, GF_SG_VRML_SFURL, &proto->ExternProto.vals[0], 0, NULL); gf_fprintf(sdump->trace, "\""); } gf_fprintf(sdump->trace, ">\n"); } if (sdump->XMLDump && sdump->X3DDump) gf_fprintf(sdump->trace, "<ProtoInterface>"); sdump->indent++; count = gf_list_count(proto->proto_fields); for (j=0; j<count; j++) { GF_ProtoFieldInterface *pf = (GF_ProtoFieldInterface *)gf_list_get(proto->proto_fields, j); field.fieldIndex = pf->ALL_index; field.eventType = pf->EventType; field.far_ptr = pf->def_value; field.fieldType = pf->FieldType; field.name = pf->FieldName; field.NDTtype = NDT_SFWorldNode; field.on_event_in = NULL; gf_dump_vrml_dyn_field(sdump, NULL, field, pf->QP_Type ? 1 : 0); if (!pf->QP_Type) continue; /*dump interface coding - BT/TXT extensions, not supported by any other tool*/ sdump->indent++; DUMP_IND(sdump); if (sdump->XMLDump) { const char *quant_catname = "unknown"; #ifndef GPAC_DISABLE_BIFS switch (pf->QP_Type) { case QC_3DPOS: quant_catname = "position3D"; break; case QC_2DPOS: quant_catname = "position2D"; break; case QC_ORDER: quant_catname = "drawingOrder"; break; case QC_COLOR: quant_catname = "color"; break; case QC_TEXTURE_COORD: quant_catname = "textureCoordinate"; break; case QC_ANGLE: quant_catname = "angle"; break; case QC_SCALE: quant_catname = "scale"; break; case QC_INTERPOL_KEYS: quant_catname = "keys"; break; case QC_NORMALS: quant_catname = "normals"; break; case QC_ROTATION: quant_catname = "rotations"; break; case QC_SIZE_3D: quant_catname = "size3D"; break; case QC_SIZE_2D: quant_catname = "size2D"; break; case QC_LINEAR_SCALAR: quant_catname = "linear"; break; case QC_COORD_INDEX:quant_catname = "coordIndex"; break; } #endif gf_fprintf(sdump->trace, "<InterfaceCodingParameters quantCategoy=\"%s\"", quant_catname); } else { gf_fprintf(sdump->trace, "{QP %d", pf->QP_Type); } #ifndef GPAC_DISABLE_BIFS if (pf->QP_Type==QC_LINEAR_SCALAR) gf_fprintf(sdump->trace, sdump->XMLDump ? " nbBits=\"%d\"" : " nbBits %d", pf->NumBits); if (pf->hasMinMax) { switch (pf->QP_Type) { case QC_LINEAR_SCALAR: case QC_COORD_INDEX: if (sdump->XMLDump) { gf_fprintf(sdump->trace, " intMin=\"%d\" intMax=\"%d\"", *((SFInt32 *)pf->qp_min_value), *((SFInt32 *)pf->qp_max_value)); } else { gf_fprintf(sdump->trace, " b {%d %d}", *((SFInt32 *)pf->qp_min_value), *((SFInt32 *)pf->qp_max_value)); } break; default: if (sdump->XMLDump) { gf_fprintf(sdump->trace, " floatMin=\"%g\" floatMax=\"%g\"", FIX2FLT( *((SFFloat *)pf->qp_min_value) ), FIX2FLT( *((SFFloat *)pf->qp_max_value) )); } else { gf_fprintf(sdump->trace, " b {%g %g}", FIX2FLT( *((SFFloat *)pf->qp_min_value) ), FIX2FLT( *((SFFloat *)pf->qp_max_value) ) ); } break; } } #endif gf_fprintf(sdump->trace, sdump->XMLDump ? "/>\n" : "}\n"); sdump->indent--; if (sdump->XMLDump) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "</field>\n"); } } sdump->indent--; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "]"); } else if (sdump->X3DDump) gf_fprintf(sdump->trace, "</ProtoInterface>\n"); if (proto->ExternProto.count) { if (!sdump->XMLDump) { gf_fprintf(sdump->trace, " \""); gf_dump_vrml_sffield(sdump, GF_SG_VRML_SFURL, &proto->ExternProto.vals[0], 0, NULL); gf_fprintf(sdump->trace, "\"\n\n"); } else { gf_fprintf(sdump->trace, "</ProtoDeclare>\n"); } continue; } if (!sdump->XMLDump) gf_fprintf(sdump->trace, " {\n"); sdump->indent++; if (sdump->XMLDump && sdump->X3DDump) gf_fprintf(sdump->trace, "<ProtoBody>\n"); e = DumpProtos(sdump, proto->sub_graph->protos); if (e) return e; /*set namespace to the proto one*/ prev_sg = sdump->sg; sdump->sg = gf_sg_proto_get_graph(proto); count = gf_list_count(proto->node_code); for (j=0; j<count; j++) { GF_Node *n = (GF_Node*)gf_list_get(proto->node_code, j); gf_dump_vrml_node(sdump, n, 1, NULL); } count = gf_list_count(proto->sub_graph->Routes); for (j=0; j<count; j++) { GF_Route *r = (GF_Route *)gf_list_get(proto->sub_graph->Routes, j); if (r->IS_route) continue; gf_dump_vrml_route(sdump, r, 0); } if (sdump->XMLDump && sdump->X3DDump) gf_fprintf(sdump->trace, "</ProtoBody>\n"); /*restore namespace*/ sdump->sg = prev_sg; sdump->indent--; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "}\n"); } else { gf_fprintf(sdump->trace, "</ProtoDeclare>\n"); } } sdump->current_proto = prev_proto; return GF_OK; #endif } static GF_Err DumpSceneReplace(GF_SceneDumper *sdump, GF_Command *com) { if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, "Replace"); EndElementHeader(sdump, 1); sdump->indent++; } //scene tag is already dumped with X3D header if (!sdump->X3DDump) StartElement(sdump, "Scene"); if (!sdump->X3DDump && com->use_names) { StartAttribute(sdump, "USENAMES"); gf_fprintf(sdump->trace, "%s", com->use_names ? "true" : "false"); EndAttribute(sdump); } if (!sdump->X3DDump) EndElementHeader(sdump, 1); sdump->indent++; } else { if (!sdump->skip_scene_replace) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "REPLACE SCENE BY "); } } DumpProtos(sdump, com->new_proto_list); gf_dump_vrml_node(sdump, com->node, 0, NULL); if (!sdump->XMLDump) gf_fprintf(sdump->trace, "\n\n"); if (com->aggregated) { u32 i, count; count = gf_list_count(com->node->sgprivate->scenegraph->Routes); for (i=0; i<count; i++) { GF_Route *r = (GF_Route *)gf_list_get(com->node->sgprivate->scenegraph->Routes, i); if (r->IS_route) continue; gf_dump_vrml_route(sdump, r, 0); } } return GF_OK; } static GF_Err DumpProtoInsert(GF_SceneDumper *sdump, GF_Command *com) { DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert extended=\"proto\">\n"); } else { gf_fprintf(sdump->trace, "INSERTPROTO [\n"); } sdump->indent++; DumpProtos(sdump, com->new_proto_list); sdump->indent--; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "</Insert>\n"); } else { gf_fprintf(sdump->trace, "]\n"); } return GF_OK; } #endif /*GPAC_DISABLE_VRML*/ #ifndef GPAC_DISABLE_SVG static char *lsr_format_node_id(GF_Node *n, u32 NodeID, char *str) { if (!n) sprintf(str, "N%d", NodeID-1); else { const char *name = gf_node_get_name_and_id(n, &NodeID); if (name) sprintf(str, "%s", name); else sprintf(str, "N%d", NodeID - 1); } return str; } static char szLSRName[1024]; static char *sd_get_lsr_namespace(GF_SceneGraph *sg) { char *lsrns = (char *) gf_sg_get_namespace_qname(sg, GF_XMLNS_LASER); if (lsrns) { sprintf(szLSRName, "%s:", lsrns); return szLSRName; } return ""; } static GF_Err DumpLSRNewScene(GF_SceneDumper *sdump, GF_Command *com) { char *lsrns = sd_get_lsr_namespace(com->in_scene); gf_fprintf(sdump->trace, "<%sNewScene>\n", lsrns); gf_dump_svg_element(sdump, com->node, NULL, 0); gf_fprintf(sdump->trace, "</%sNewScene>\n", lsrns); return GF_OK; } static GF_Err DumpLSRAddReplaceInsert(GF_SceneDumper *sdump, GF_Command *com) { char szID[100]; Bool is_text = 0; GF_CommandField *f; char *lsrns = sd_get_lsr_namespace(com->in_scene); const char *com_name = (com->tag==GF_SG_LSR_REPLACE) ? "Replace" : ( (com->tag==GF_SG_LSR_ADD) ? "Add" : "Insert" ); DUMP_IND(sdump); gf_fprintf(sdump->trace, "<%s%s ref=\"%s\" ", lsrns, com_name, lsr_format_node_id(com->node, com->RouteID, szID)); f = (GF_CommandField *) gf_list_get(com->command_fields, 0); if (f && (f->pos>=0) ) gf_fprintf(sdump->trace, "index=\"%d\" ", f->pos); if (f) { GF_FieldInfo info; if (!f->new_node && !f->node_list) { char *att_name = NULL; if (f->fieldType==SVG_Transform_Scale_datatype) att_name = "scale"; else if (f->fieldType==SVG_Transform_Rotate_datatype) att_name = "rotation"; else if (f->fieldType==SVG_Transform_Translate_datatype) att_name = "translation"; else if (f->fieldIndex==(u32) -1) att_name = "textContent"; else { if (!com->node) return GF_NON_COMPLIANT_BITSTREAM; att_name = (char*) gf_svg_get_attribute_name(com->node, f->fieldIndex); } gf_fprintf(sdump->trace, "attributeName=\"%s\" ", att_name); if (f->field_ptr) { char *att; info.far_ptr = f->field_ptr; info.fieldIndex = f->fieldIndex; info.fieldType = f->fieldType; info.name = att_name; if ((s32) f->pos >= 0) { att = gf_svg_dump_attribute_indexed(com->node, &info); } else { att = gf_svg_dump_attribute(com->node, &info); } gf_fprintf(sdump->trace, "value=\"%s\" ", att ? att : ""); if (att) gf_free(att); } if (com->fromNodeID) { GF_FieldInfo op_info; GF_Node *op = gf_sg_find_node(sdump->sg, com->fromNodeID); gf_fprintf(sdump->trace, "operandElementId=\"%s\" ", lsr_format_node_id(op, com->RouteID, szID)); gf_node_get_field(op, com->fromFieldIndex, &op_info); gf_fprintf(sdump->trace, "operandAttributeName=\"%s\" ", op_info.name); } gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } if (f->new_node && f->new_node->sgprivate->tag==TAG_DOMText) is_text = 1; /*if fieldIndex (eg attributeName) is set, this is children replacement*/ if (f->fieldIndex>0) gf_fprintf(sdump->trace, "attributeName=\"children\" "); } gf_fprintf(sdump->trace, ">"); if (!is_text) { gf_fprintf(sdump->trace, "\n"); sdump->indent++; } if (f) { if (f->new_node) { gf_dump_svg_element(sdump, f->new_node, com->node, 0); } else if (f->node_list) { GF_ChildNodeItem *list = f->node_list; while (list) { gf_dump_svg_element(sdump, list->node, com->node, 0); list = list->next; } } } if (!is_text) { sdump->indent--; DUMP_IND(sdump); } gf_fprintf(sdump->trace, "</%s%s>\n", lsrns, com_name); return GF_OK; } static GF_Err DumpLSRDelete(GF_SceneDumper *sdump, GF_Command *com) { char szID[1024]; GF_CommandField *f; char *lsrns = sd_get_lsr_namespace(com->in_scene); DUMP_IND(sdump); gf_fprintf(sdump->trace, "<%sDelete ref=\"%s\" ", lsrns, lsr_format_node_id(com->node, com->RouteID, szID)); f = (GF_CommandField *) gf_list_get(com->command_fields, 0); if (f && (f->pos>=0) ) gf_fprintf(sdump->trace, "index=\"%d\" ", f->pos); gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } #ifdef GPAC_UNUSED_FUNC static GF_Err DumpLSRInsert(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } static GF_Err SD_SetSceneGraph(GF_SceneDumper *sdump, GF_SceneGraph *sg) { if (sdump) sdump->sg = sg; return GF_OK; } static GF_Err DumpLSRClean(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } static GF_Err DumpLSRRestore(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } static GF_Err DumpLSRSave(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } #endif /*GPAC_UNUSED_FUNC*/ static GF_Err DumpLSRSendEvent(GF_SceneDumper *sdump, GF_Command *com) { char szID[1024]; char *lsrns = sd_get_lsr_namespace(com->in_scene); DUMP_IND(sdump); gf_fprintf(sdump->trace, "<%sSendEvent ref=\"%s\" event=\"%s\"", lsrns, lsr_format_node_id(com->node, com->RouteID, szID), gf_dom_event_get_name(com->send_event_name) ); if (com->send_event_name <= GF_EVENT_MOUSEWHEEL) gf_fprintf(sdump->trace, " pointvalue=\"%g %g\"", FIX2FLT(com->send_event_x), FIX2FLT(com->send_event_y) ); switch (com->send_event_name) { case GF_EVENT_KEYDOWN: case GF_EVENT_LONGKEYPRESS: case GF_EVENT_REPEAT_KEY: case GF_EVENT_SHORT_ACCESSKEY: if (com->send_event_integer) { gf_fprintf(sdump->trace, " stringvalue=\"%s\"", gf_dom_get_key_name(com->send_event_integer) ); break; } default: if (com->send_event_integer) gf_fprintf(sdump->trace, " intvalue=\"%d\"", com->send_event_integer); if (com->send_event_string) gf_fprintf(sdump->trace, " stringvalue=\"%s\"", com->send_event_string); break; } gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } static GF_Err DumpLSRActivate(GF_SceneDumper *sdump, GF_Command *com) { char szID[1024]; char *lsrns = sd_get_lsr_namespace(com->in_scene); DUMP_IND(sdump); if (com->tag==GF_SG_LSR_ACTIVATE) { gf_fprintf(sdump->trace, "<%sActivate ref=\"%s\" />\n", lsrns, lsr_format_node_id(com->node, com->RouteID, szID)); } else { gf_fprintf(sdump->trace, "<%sDeactivate ref=\"%s\" />\n", lsrns, lsr_format_node_id(com->node, com->RouteID, szID)); } return GF_OK; } #endif GF_EXPORT GF_Err gf_sm_dump_command_list(GF_SceneDumper *sdump, GF_List *comList, u32 indent, Bool skip_first_replace) { GF_Err e; u32 i, count; u32 prev_ind; #ifndef GPAC_DISABLE_VRML u32 remain = 0, has_scene_replace = 0; #endif Bool prev_skip; if (!sdump || !sdump->trace|| !comList || !sdump->sg) return GF_BAD_PARAM; prev_skip = sdump->skip_scene_replace; sdump->skip_scene_replace = skip_first_replace; prev_ind = sdump->indent; sdump->indent = indent; e = GF_OK; count = gf_list_count(comList); for (i=0; i<count; i++) { GF_Command *com = (GF_Command *) gf_list_get(comList, i); if (i #ifndef GPAC_DISABLE_VRML && !remain #endif && (sdump->X3DDump || (sdump->dump_mode==GF_SM_DUMP_VRML)) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[Scene Dump] MPEG-4 Commands found, not supported in %s - skipping\n", sdump->X3DDump ? "X3D" : "VRML")); break; } #ifndef GPAC_DISABLE_VRML if (has_scene_replace && (com->tag != GF_SG_ROUTE_INSERT)) { has_scene_replace = 0; if (sdump->XMLDump) { sdump->indent--; EndElement(sdump, "Scene", 1); sdump->indent--; EndElement(sdump, "Replace", 1); } else { DUMP_IND(sdump); gf_fprintf(sdump->trace, "\nAT 0 {\n"); sdump->indent++; } } #endif switch (com->tag) { #ifndef GPAC_DISABLE_VRML /*insert commands*/ case GF_SG_NODE_INSERT: e = DumpNodeInsert(sdump, com); break; case GF_SG_INDEXED_INSERT: e = DumpIndexInsert(sdump, com); break; case GF_SG_ROUTE_INSERT: e = DumpRouteInsert(sdump, com, has_scene_replace); if (remain) remain--; break; /*delete commands*/ case GF_SG_NODE_DELETE: e = DumpNodeDelete(sdump, com); break; case GF_SG_INDEXED_DELETE: e = DumpIndexDelete(sdump, com); break; case GF_SG_ROUTE_DELETE: e = DumpRouteDelete(sdump, com); break; /*replace commands*/ case GF_SG_NODE_REPLACE: e = DumpNodeReplace(sdump, com); break; case GF_SG_FIELD_REPLACE: e = DumpFieldReplace(sdump, com); break; case GF_SG_INDEXED_REPLACE: e = DumpIndexReplace(sdump, com); break; case GF_SG_ROUTE_REPLACE: e = DumpRouteReplace(sdump, com); break; case GF_SG_XREPLACE: e = DumpXReplace(sdump, com); break; case GF_SG_SCENE_REPLACE: /*we don't support replace scene in conditional*/ assert(!sdump->current_com_list); sdump->current_com_list = comList; e = DumpSceneReplace(sdump, com); sdump->current_com_list = NULL; has_scene_replace = 1; remain = count - i - 1; break; /*extended commands*/ case GF_SG_PROTO_INSERT: e = DumpProtoInsert(sdump, com); break; case GF_SG_PROTO_DELETE_ALL: DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete extended=\"allProtos\"/>\n"); } else { gf_fprintf(sdump->trace, "DELETEPROTO ALL\n"); } e = GF_OK; break; case GF_SG_PROTO_DELETE: { u32 j; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete extended=\"protos\" value=\""); } else { gf_fprintf(sdump->trace, "DELETEPROTO ["); } for (j=0; j<com->del_proto_list_size; j++) { if (j) gf_fprintf(sdump->trace, " "); gf_fprintf(sdump->trace, "%d", com->del_proto_list[j]); } if (sdump->XMLDump) { gf_fprintf(sdump->trace, "\"/>\n"); } else { gf_fprintf(sdump->trace, "]\n"); } e = GF_OK; } break; case GF_SG_GLOBAL_QUANTIZER: e = DumpGlobalQP(sdump, com); break; case GF_SG_MULTIPLE_REPLACE: e = DumpMultipleReplace(sdump, com); break; case GF_SG_MULTIPLE_INDEXED_REPLACE: e = DumpMultipleIndexedReplace(sdump, com); break; case GF_SG_NODE_DELETE_EX: e = DumpNodeDelete(sdump, com); break; #endif #ifndef GPAC_DISABLE_SVG /*laser commands*/ case GF_SG_LSR_NEW_SCENE: e = DumpLSRNewScene(sdump, com); break; case GF_SG_LSR_ADD: e = DumpLSRAddReplaceInsert(sdump, com); break; case GF_SG_LSR_CLEAN: //e = DumpLSRClean(sdump, com); break; case GF_SG_LSR_REPLACE: e = DumpLSRAddReplaceInsert(sdump, com); break; case GF_SG_LSR_DELETE: e = DumpLSRDelete(sdump, com); break; case GF_SG_LSR_INSERT: e = DumpLSRAddReplaceInsert(sdump, com); break; case GF_SG_LSR_RESTORE: //e = DumpLSRRestore(sdump, com); break; case GF_SG_LSR_SAVE: //e = DumpLSRSave(sdump, com); break; case GF_SG_LSR_SEND_EVENT: e = DumpLSRSendEvent(sdump, com); break; case GF_SG_LSR_ACTIVATE: case GF_SG_LSR_DEACTIVATE: e = DumpLSRActivate(sdump, com); break; #endif } if (e) break; if (sdump->skip_scene_replace #ifndef GPAC_DISABLE_VRML && !has_scene_replace #endif ) { sdump->skip_scene_replace = 0; if (!sdump->XMLDump && (i+1<count)) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "\nAT 0 {\n"); sdump->indent++; } } } #ifndef GPAC_DISABLE_VRML if (remain && !sdump->XMLDump) { sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } #endif if (has_scene_replace && sdump->XMLDump) { sdump->indent--; if (!sdump->X3DDump) { EndElement(sdump, "Scene", 1); sdump->indent--; EndElement(sdump, "Replace", 1); } } sdump->indent = prev_ind; sdump->skip_scene_replace = prev_skip; return e; } #ifndef GPAC_DISABLE_SVG void gf_dump_svg_element(GF_SceneDumper *sdump, GF_Node *n, GF_Node *parent, Bool is_root) { GF_ChildNodeItem *list; char attName[100], *attValue; u32 nID; SVG_Element *svg = (SVG_Element *)n; GF_FieldInfo info; SVGAttribute *att; u32 tag, ns; if (!n) return; nID = gf_node_get_id(n); tag = n->sgprivate->tag; /*remove undef listener/handlers*/ if (!nID) { switch (tag) { case TAG_SVG_listener: if ((0) && gf_node_get_attribute_by_tag(n, TAG_XMLEV_ATT_handler, 0, 0, &info)==GF_OK) { if (((XMLRI*)info.far_ptr)->target && !gf_node_get_id(((XMLRI*)info.far_ptr)->target) ) return; } break; case TAG_SVG_handler: /*this handler was not declared in the graph*/ if (!n->sgprivate->parents || (n->sgprivate->parents->node != parent)) return; break; case TAG_DOMText: { GF_DOMText *txt = (GF_DOMText *)n; if (txt->textContent) { if ((txt->type==GF_DOM_TEXT_CDATA) || (parent && (parent->sgprivate->tag == TAG_SVG_script)) || (parent && (parent->sgprivate->tag == TAG_SVG_handler)) ) { gf_fprintf(sdump->trace, "<![CDATA["); gf_fprintf(sdump->trace, "%s", txt->textContent); gf_fprintf(sdump->trace, "]]>"); } else if (txt->type==GF_DOM_TEXT_REGULAR) { scene_dump_utf_string(sdump, 0, txt->textContent); } } } return; } } if (!sdump->in_text) { DUMP_IND(sdump); } /*register all namespaces specified on this element */ gf_xml_push_namespaces((GF_DOMNode *)n); gf_fprintf(sdump->trace, "<%s", gf_node_get_class_name(n)); ns = gf_xml_get_element_namespace(n); if (nID) { char attID[100]; gf_fprintf(sdump->trace, " id=\"%s\"", lsr_format_node_id(n, 0, attID)); } att = svg->attributes; while (att) { if (att->data_type==SVG_ID_datatype) { att = att->next; continue; } info.fieldIndex = att->tag; info.fieldType = att->data_type; if (att->tag==TAG_DOM_ATT_any) { u32 att_ns = ((GF_DOMFullAttribute*)att)->xmlns; info.name = ((GF_DOMFullAttribute*)att)->name; if ((att_ns != ns) && strncmp(info.name, "xmlns", 5)) { sprintf(attName, "%s:%s", gf_sg_get_namespace_qname(gf_node_get_graph(n), att_ns), ((GF_DOMFullAttribute*)att)->name); info.name = attName; } } else { info.name = gf_svg_get_attribute_name(n, att->tag); } if (att->data_type==XMLRI_datatype) { XMLRI *xlink = (XMLRI *)att->data; if (xlink->type==XMLRI_ELEMENTID) { if (!xlink->target || !gf_node_get_id((GF_Node*)xlink->target) ) { att = att->next; continue; } if (parent && (parent == (GF_Node *) xlink->target)) { att = att->next; continue; } } else if (xlink->type==XMLRI_STREAMID) { gf_fprintf(sdump->trace, " %s=\"#stream%d\"", info.name, xlink->lsr_stream_id); att = att->next; continue; } else { gf_fprintf(sdump->trace, " %s=\"%s\"", info.name, xlink->string); att = att->next; continue; } } info.far_ptr = att->data; attValue = gf_svg_dump_attribute((GF_Node*)svg, &info); if (attValue) { if (/*strcmp(info.name, "xmlns") &&*/ (info.fieldType = (u32) strlen(attValue))) gf_fprintf(sdump->trace, " %s=\"%s\"", info.name, attValue); gf_free(attValue); } att = att->next; } gf_dom_event_dump_listeners(n, sdump->trace); if (svg->children) { gf_fprintf(sdump->trace, ">"); } else { gf_fprintf(sdump->trace, "/>"); return; } if (n->sgprivate->tag==TAG_LSR_conditional) { GF_DOMUpdates *up = svg->children ? (GF_DOMUpdates *)svg->children->node : NULL; sdump->indent++; if (up && (up->sgprivate->tag==TAG_DOMUpdates)) { if (gf_list_count(up->updates)) { gf_fprintf(sdump->trace, "\n"); gf_sm_dump_command_list(sdump, up->updates, sdump->indent, 0); } else if (up->data) { gf_fprintf(sdump->trace, "<!-- WARNING: LASeR scripts cannot be dumped at run-time -->\n"); } } sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</%s>\n", gf_node_get_class_name(n)); return; } if (tag==TAG_SVG_text || tag==TAG_SVG_textArea) sdump->in_text = 1; sdump->indent++; list = svg->children; while (list) { if (!sdump->in_text) gf_fprintf(sdump->trace, "\n"); gf_dump_svg_element(sdump, list->node, n, 0); list = list->next; } if (!sdump->in_text) gf_fprintf(sdump->trace, "\n"); sdump->indent--; if (!sdump->in_text) DUMP_IND(sdump); gf_fprintf(sdump->trace, "</%s>", gf_node_get_class_name(n)); if (tag==TAG_SVG_text || tag==TAG_SVG_textArea) sdump->in_text = 0; /*removes all namespaces specified on this element */ gf_xml_pop_namespaces((GF_DOMNode *)n); } #endif static void gf_sm_dump_saf_hdr(GF_SceneDumper *dumper, char *unit_name, u64 au_time, Bool is_rap) { gf_fprintf(dumper->trace, "<saf:%s", unit_name); if (au_time) gf_fprintf(dumper->trace, " time=\""LLD"\"", au_time); if (is_rap) gf_fprintf(dumper->trace, " rap=\"true\""); gf_fprintf(dumper->trace, ">\n"); } static void dump_od_to_saf(GF_SceneDumper *dumper, GF_AUContext *au, u32 indent) { u32 i, count; count = gf_list_count(au->commands); for (i=0; i<count; i++) { u32 j, c2; GF_ODUpdate *com = (GF_ODUpdate *)gf_list_get(au->commands, i); if (com->tag != GF_ODF_OD_UPDATE_TAG) continue; c2 = gf_list_count(com->objectDescriptors); for (j=0; j<c2; j++) { GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(com->objectDescriptors, j); GF_ESD *esd = (GF_ESD *) gf_list_get(od->ESDescriptors, 0); GF_MuxInfo *mux; if (!esd || (esd->tag != GF_ODF_ESD_TAG)) { if (od->URLString) { gf_fprintf(dumper->trace, "<saf:RemoteStreamHeader streamID=\"stream%d\" url=\"%s\"", au->owner->ESID, od->URLString); if (au->timing) gf_fprintf(dumper->trace, " time=\""LLD"\"", au->timing); gf_fprintf(dumper->trace, "/>\n"); } continue; } mux = (GF_MuxInfo *)gf_list_get(esd->extensionDescriptors, 0); if (!mux || (mux->tag!=GF_ODF_MUXINFO_TAG)) mux = NULL; gf_fprintf(dumper->trace, "<saf:mediaHeader streamID=\"stream%d\"", esd->ESID); if (esd->decoderConfig) { gf_fprintf(dumper->trace, " streamType=\"%d\" objectTypeIndication=\"%d\" timeStampResolution=\"%d\"", esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication, au->owner->timeScale); } if (au->timing) gf_fprintf(dumper->trace, " time=\""LLD"\"", au->timing); if (mux && mux->file_name) gf_fprintf(dumper->trace, " source=\"%s\"", mux->file_name); gf_fprintf(dumper->trace, "/>\n"); } } gf_fprintf(dumper->trace, "</saf:mediaUnit>\n"); } #ifndef GPAC_DISABLE_SVG static GF_Err SD_DumpDOMElement(GF_SceneDumper *sdump, GF_DOMFullNode *node) { const char *ns; u32 child_type = 0; GF_DOMFullAttribute *att; GF_ChildNodeItem *child; GF_DOMText *txt; ns = gf_sg_get_namespace_qname(node->sgprivate->scenegraph, node->ns); DUMP_IND(sdump); if (ns) gf_fprintf(sdump->trace, "<%s:%s", ns, node->name); else gf_fprintf(sdump->trace, "<%s", node->name); att = (GF_DOMFullAttribute *)node->attributes; while (att) { gf_fprintf(sdump->trace, " %s=\"%s\"", att->name, (char *) att->data); att = (GF_DOMFullAttribute *)att->next; } if (!node->children) { gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } gf_fprintf(sdump->trace, ">"); sdump->indent++; child = node->children; while (child) { switch(child->node->sgprivate->tag) { case TAG_DOMFullNode: if (!child_type) gf_fprintf(sdump->trace, "\n"); child_type = 1; SD_DumpDOMElement(sdump, (GF_DOMFullNode*)child->node); break; case TAG_DOMText: child_type = 2; txt = (GF_DOMText *)child->node; if (txt->type==GF_DOM_TEXT_REGULAR) { scene_dump_utf_string(sdump, 0, txt->textContent); } else if (txt->type==GF_DOM_TEXT_CDATA) { gf_fprintf(sdump->trace, "<![CDATA["); gf_fprintf(sdump->trace, "%s", txt->textContent); gf_fprintf(sdump->trace, "]]>"); } break; } child = child->next; } sdump->indent--; if (child_type!=2) { DUMP_IND(sdump); } if (ns) gf_fprintf(sdump->trace, "</%s:%s>\n", ns, node->name); else gf_fprintf(sdump->trace, "</%s>\n", node->name); return GF_OK; } #endif GF_EXPORT GF_Err gf_sm_dump_graph(GF_SceneDumper *sdump, Bool skip_proto, Bool skip_routes) { u32 tag; if (!sdump->trace || !sdump->sg || !sdump->sg->RootNode) return GF_BAD_PARAM; tag = sdump->sg->RootNode->sgprivate->tag; if (tag<=GF_NODE_RANGE_LAST_X3D) { gf_dump_setup(sdump, NULL); if (sdump->XMLDump) { StartElement(sdump, "Scene"); EndElementHeader(sdump, 1); sdump->indent++; } #ifndef GPAC_DISABLE_VRML GF_Err e; if (!skip_proto) { e = DumpProtos(sdump, sdump->sg->protos); if (e) return e; } if (sdump->X3DDump) { GF_ChildNodeItem *list = ((GF_ParentNode *)sdump->sg->RootNode)->children; while (list) { gf_dump_vrml_node(sdump, list->node, 0, NULL); list = list->next; } } else { gf_dump_vrml_node(sdump, sdump->sg->RootNode, 0, NULL); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "\n\n"); if (!skip_routes) { GF_Route *r; u32 i=0; while ((r = (GF_Route*)gf_list_enum(sdump->sg->Routes, &i))) { if (r->IS_route || (r->graph!=sdump->sg)) continue; e = gf_dump_vrml_route(sdump, r, 0); if (e) return e; } } if (sdump->XMLDump) { sdump->indent--; EndElement(sdump, "Scene", 1); } #endif /*GPAC_DISABLE_VRML*/ gf_dump_finalize(sdump, NULL); return GF_OK; } #ifndef GPAC_DISABLE_SVG else if ((tag>=GF_NODE_RANGE_FIRST_SVG) && (tag<=GF_NODE_RANGE_LAST_SVG)) { sdump->dump_mode = GF_SM_DUMP_SVG; gf_dump_setup(sdump, NULL); gf_dump_svg_element(sdump, sdump->sg->RootNode, NULL, 1); return GF_OK; } else if (tag==TAG_DOMFullNode) { sdump->dump_mode = GF_SM_DUMP_XML; gf_dump_setup(sdump, NULL); SD_DumpDOMElement(sdump, (GF_DOMFullNode*)sdump->sg->RootNode); } #endif return GF_OK; } static void ReorderAUContext(GF_List *sample_list, GF_AUContext *au, Bool lsr_dump) { u64 autime, time; u32 i; Bool has_base; GF_AUContext *ptr; /* this happens when converting from bt to xmt NOTE: Comment is wrong? this happens when just loading BT */ if (!au->timing_sec) { au->timing_sec = (Double) (s64) au->timing; /* Hack to avoid timescale=0 which happens when loading a BT with no SLConfig*/ if (!au->owner->timeScale) au->owner->timeScale = 1000; au->timing_sec /= au->owner->timeScale; } /*this happens when converting from xmt to bt*/ if (!au->timing) { assert(au->owner->timeScale); au->timing = (u64) (au->timing_sec * au->owner->timeScale); } autime = au->timing + au->owner->imp_exp_time; has_base = 0; i=0; while ((ptr = (GF_AUContext*)gf_list_enum(sample_list, &i))) { time = ptr->timing + ptr->owner->imp_exp_time; if ( /*time ordered*/ (time > autime) /*set bifs first for first AU*/ || (!has_base && (time == autime) && (ptr->owner->streamType < au->owner->streamType) ) /*set OD first for laser*/ || (lsr_dump && (au->owner->streamType==GF_STREAM_OD)) ) { gf_list_insert(sample_list, au, i-1); return; } has_base = 0; if ( (ptr->owner->streamType == au->owner->streamType) && (time == autime) ) has_base = 1; } gf_list_add(sample_list, au); } GF_EXPORT GF_Err gf_sm_dump(GF_SceneManager *ctx, char *rad_name, Bool is_final_name, GF_SceneDumpFormat dump_mode) { GF_Err e; GF_List *sample_list; Bool first_par; u32 i, j, indent, num_scene, num_od, first_bifs, num_tracks; Double time; GF_SceneDumper *dumper; GF_StreamContext *sc; GF_AUContext *au; Bool no_root_found = 1; sample_list = gf_list_new(); num_scene = num_od = 0; num_tracks = 0; indent = 0; dumper = gf_sm_dumper_new(ctx->scene_graph, rad_name, is_final_name, ' ', dump_mode); e = GF_OK; /*configure all systems streams we're dumping*/ i=0; while ((sc = (GF_StreamContext*)gf_list_enum(ctx->streams, &i))) { switch (sc->streamType) { case GF_STREAM_SCENE: num_scene ++; num_tracks ++; break; case GF_STREAM_OD: num_od ++; num_tracks ++; break; default: continue; } j=0; while ((au = (GF_AUContext*)gf_list_enum(sc->AUs, &j))) { ReorderAUContext(sample_list, au, dumper->LSRDump); if (dumper->dump_mode==GF_SM_DUMP_SVG) break; } if (dumper->dump_mode==GF_SM_DUMP_SVG) break; } first_bifs = (num_scene==1) ? 1 : 0; num_scene = (num_scene>1) ? 1 : 0; num_od = (num_od>1) ? 1 : 0; gf_dump_setup(dumper, (GF_Descriptor *) ctx->root_od); #ifndef GPAC_DISABLE_SVG if (dumper->dump_mode==GF_SM_DUMP_SVG) { au = (GF_AUContext*)gf_list_get(sample_list, 0); GF_Command *com = NULL; if (au) com = (GF_Command*)gf_list_get(au->commands, 0); if (!au) { gf_dump_svg_element(dumper, dumper->sg->RootNode, NULL, 1); } else if (!com || (com->tag!=GF_SG_LSR_NEW_SCENE) || !com->node) { e = GF_NOT_SUPPORTED; } else { gf_dump_svg_element(dumper, com->node, NULL, 1); } gf_dump_finalize(dumper, (GF_Descriptor *) ctx->root_od); gf_sm_dumper_del(dumper); gf_list_del(sample_list); return e; } #endif time = dumper->LSRDump ? -1 : 0; first_par = 0; while (gf_list_count(sample_list)) { au = (GF_AUContext*)gf_list_get(sample_list, 0); gf_list_rem(sample_list, 0); if (!dumper->XMLDump) { if (!first_bifs || (au->owner->streamType != GF_STREAM_SCENE) ) { if (au->flags & GF_SM_AU_RAP) gf_fprintf(dumper->trace, "RAP "); gf_fprintf(dumper->trace, "AT "LLD" ", au->timing); if ( (au->owner->streamType==GF_STREAM_OD && num_od) || (au->owner->streamType==GF_STREAM_SCENE && num_scene)) { gf_fprintf(dumper->trace, "IN %d ", au->owner->ESID); } gf_fprintf(dumper->trace, "{\n"); indent++; } switch (au->owner->streamType) { case GF_STREAM_OD: if (dumper->LSRDump) { dump_od_to_saf(dumper, au, indent); } else { #ifndef GPAC_DISABLE_OD_DUMP e = gf_odf_dump_com_list(au->commands, dumper->trace, indent+1, 0); #endif } break; case GF_STREAM_SCENE: e = gf_sm_dump_command_list(dumper, au->commands, indent, first_bifs); break; } if (first_bifs) { first_bifs = 0; gf_fprintf(dumper->trace, "\n"); } else { indent--; gf_fprintf(dumper->trace, "}\n\n"); } } else { if (dumper->LSRDump) { /* if (time != au->timing_sec) { time = au->timing_sec; } */ } else if (!time && !num_scene && first_bifs) { } else if (num_scene || num_od) { if (!first_par) { first_par = 1; indent += 1; } else { gf_fprintf(dumper->trace, " </par>\n"); } gf_fprintf(dumper->trace, " <par begin=\"%g\" atES_ID=\"es%d\" isRAP=\"%s\">\n", au->timing_sec, au->owner->ESID, (au->flags & GF_SM_AU_RAP) ? "yes" : "no"); } else if (au->timing_sec>time) { if (!first_par) { first_par = 1; indent += 1; } else { gf_fprintf(dumper->trace, " </par>\n"); } gf_fprintf(dumper->trace, "<par begin=\"%g\">\n", au->timing_sec); } switch (au->owner->streamType) { case GF_STREAM_OD: if (dumper->LSRDump) { dump_od_to_saf(dumper, au, indent+1); } else { #ifndef GPAC_DISABLE_OD_DUMP e = gf_odf_dump_com_list(au->commands, dumper->trace, indent+1, 1); #endif } break; case GF_STREAM_SCENE: if (gf_list_count(au->commands)) { if (dumper->LSRDump) gf_sm_dump_saf_hdr(dumper, "sceneUnit", au->timing, au->flags & GF_SM_AU_RAP); e = gf_sm_dump_command_list(dumper, au->commands, indent+1, first_bifs); first_bifs = 0; no_root_found = 0; if (dumper->LSRDump) gf_fprintf(dumper->trace, "</saf:sceneUnit>\n"); } break; } time = au->timing_sec; } if (dumper->X3DDump || (dumper->dump_mode==GF_SM_DUMP_VRML)) break; } #ifndef GPAC_DISABLE_VRML if (no_root_found && ctx->scene_graph->RootNode) { GF_Route *r; DumpProtos(dumper, ctx->scene_graph->protos); gf_dump_vrml_node(dumper, ctx->scene_graph->RootNode, 0, NULL); i=0; gf_fprintf(dumper->trace, "\n"); while ((r = (GF_Route*)gf_list_enum(dumper->sg->Routes, &i))) { if (r->IS_route || (r->graph!=dumper->sg)) continue; e = gf_dump_vrml_route(dumper, r, 0); if (e) return e; } } #endif /*close command*/ if (!dumper->X3DDump && first_par) gf_fprintf(dumper->trace, " </par>\n"); if (gf_list_count(sample_list) && (dumper->X3DDump || (dumper->dump_mode==GF_SM_DUMP_VRML)) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[Scene Dump] MPEG-4 Commands found, not supported in %s - skipping\n", dumper->X3DDump ? "X3D" : "VRML")); } gf_dump_finalize(dumper, (GF_Descriptor *) ctx->root_od); gf_sm_dumper_del(dumper); gf_list_del(sample_list); return e; } #endif /*GPAC_DISABLE_SCENE_DUMP*/
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2022 * All rights reserved * * This file is part of GPAC / Scene Management sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/scene_manager.h> #include <gpac/constants.h> #include <gpac/utf.h> #include <gpac/internal/scenegraph_dev.h> #include <gpac/nodes_x3d.h> #include <gpac/nodes_svg.h> #include <gpac/events.h> #include <gpac/base_coding.h> #ifndef __SYMBIAN32__ #include <wchar.h> #endif #ifndef GPAC_DISABLE_SCENE_DUMP /*for QP types*/ #include "../bifs/quant.h" struct _scenedump { /*the scene we're dumping - set at each SceneReplace or mannually*/ GF_SceneGraph *sg; #ifndef GPAC_DISABLE_VRML /*the proto we're dumping*/ GF_Proto *current_proto; #endif FILE *trace; u32 indent; char *filename; GF_SceneDumpFormat dump_mode; u16 CurrentESID; u8 ind_char; Bool XMLDump, X3DDump, LSRDump; GF_List *dump_nodes; /*nodes created through conditionals while parsing but not applied*/ GF_List *mem_def_nodes; Bool skip_scene_replace; /*for route insert/replace in conditionals in current scene replace*/ GF_List *current_com_list; GF_List *inserted_routes; Bool in_text; }; static GF_Err gf_dump_vrml_route(GF_SceneDumper *sdump, GF_Route *r, u32 dump_type); static void gf_dump_vrml_node(GF_SceneDumper *sdump, GF_Node *node, Bool in_list, char *fieldContainer); #ifndef GPAC_DISABLE_SVG void gf_dump_svg_element(GF_SceneDumper *sdump, GF_Node *n, GF_Node *parent, Bool is_root); #endif GF_EXPORT GF_SceneDumper *gf_sm_dumper_new(GF_SceneGraph *graph, char *_rad_name, Bool is_final_name, char indent_char, GF_SceneDumpFormat dump_mode) { GF_SceneDumper *tmp; if (!graph) return NULL; GF_SAFEALLOC(tmp, GF_SceneDumper); if (!tmp) return NULL; /*store original*/ tmp->dump_mode = dump_mode; #ifndef GPAC_DISABLE_SVG if ((graph->RootNode && (graph->RootNode->sgprivate->tag>=GF_NODE_RANGE_LAST_VRML) ) || (dump_mode==GF_SM_DUMP_LASER) || (dump_mode==GF_SM_DUMP_SVG)) { tmp->XMLDump = GF_TRUE; if (dump_mode==GF_SM_DUMP_LASER) { tmp->LSRDump = GF_TRUE; } if (_rad_name) { const char* ext_name = tmp->LSRDump ? ".xsr" : ".svg"; tmp->filename = (char *)gf_malloc(strlen(_rad_name) + strlen(ext_name) + 1); strcpy(tmp->filename, _rad_name); if (!is_final_name) strcat(tmp->filename, ext_name); tmp->trace = gf_fopen(tmp->filename, "wt"); if (!tmp->trace) { gf_free(tmp); return NULL; } } else { tmp->trace = stdout; } } else #endif { if (dump_mode==GF_SM_DUMP_AUTO_TXT) { if (!graph->RootNode || (graph->RootNode->sgprivate->tag<=GF_NODE_RANGE_LAST_MPEG4) ) { dump_mode = GF_SM_DUMP_BT; } else if (graph->RootNode->sgprivate->tag<=GF_NODE_RANGE_LAST_X3D) { dump_mode = GF_SM_DUMP_X3D_VRML; } } else if (dump_mode==GF_SM_DUMP_AUTO_XML) { if (!graph->RootNode || (graph->RootNode->sgprivate->tag<=GF_NODE_RANGE_LAST_MPEG4) ) { dump_mode = GF_SM_DUMP_XMTA; } else { dump_mode = GF_SM_DUMP_X3D_XML; } } if (_rad_name) { const char* ext_name; switch (dump_mode) { case GF_SM_DUMP_X3D_XML: ext_name = ".x3d"; tmp->XMLDump = GF_TRUE; tmp->X3DDump = GF_TRUE; break; case GF_SM_DUMP_XMTA: ext_name = ".xmt"; tmp->XMLDump = GF_TRUE; break; case GF_SM_DUMP_X3D_VRML: ext_name = ".x3dv"; tmp->X3DDump = GF_TRUE; break; case GF_SM_DUMP_VRML: ext_name = ".wrl"; break; default: ext_name = ".bt"; break; } tmp->filename = (char *)gf_malloc(strlen(_rad_name ? _rad_name : "") + strlen(ext_name) + 1); strcpy(tmp->filename, _rad_name ? _rad_name : ""); if (!is_final_name) strcat(tmp->filename, ext_name); tmp->trace = gf_fopen(tmp->filename, "wt"); if (!tmp->trace) { gf_free(tmp); return NULL; } } else { tmp->trace = stdout; switch (dump_mode) { case GF_SM_DUMP_X3D_XML: tmp->XMLDump = GF_TRUE; tmp->X3DDump = GF_TRUE; break; case GF_SM_DUMP_XMTA: tmp->XMLDump = GF_TRUE; break; case GF_SM_DUMP_X3D_VRML: tmp->X3DDump = GF_TRUE; break; default: break; } } } tmp->ind_char = indent_char; tmp->dump_nodes = gf_list_new(); tmp->mem_def_nodes = gf_list_new(); tmp->inserted_routes = gf_list_new(); tmp->sg = graph; return tmp; } GF_EXPORT void gf_sm_dumper_set_extra_graph(GF_SceneDumper *sdump, GF_SceneGraph *extra) { sdump->sg = extra; } GF_EXPORT void gf_sm_dumper_del(GF_SceneDumper *sdump) { gf_list_del(sdump->dump_nodes); while (gf_list_count(sdump->mem_def_nodes)) { GF_Node *tmp = (GF_Node *)gf_list_get(sdump->mem_def_nodes, 0); gf_list_rem(sdump->mem_def_nodes, 0); gf_node_unregister(tmp, NULL); } gf_list_del(sdump->mem_def_nodes); gf_list_del(sdump->inserted_routes); if (sdump->trace != stdout) gf_fclose(sdump->trace); if (sdump->filename) { gf_free(sdump->filename); sdump->filename = NULL; } gf_free(sdump); } char *gf_sm_dump_get_name(GF_SceneDumper *bd) { if (!bd) return NULL; return bd->filename; } static void gf_dump_setup(GF_SceneDumper *sdump, GF_Descriptor *root_od) { if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); if (sdump->dump_mode==GF_SM_DUMP_XML) return; gf_fprintf(sdump->trace, "<!-- %s Scene Dump -->\n", (sdump->dump_mode==GF_SM_DUMP_SVG) ? "SVG" : (sdump->dump_mode==GF_SM_DUMP_LASER) ? "LASeR" : sdump->X3DDump ? "X3D" : "XMT-A" ); } if (sdump->dump_mode==GF_SM_DUMP_SVG) return; if (sdump->LSRDump) { gf_fprintf(sdump->trace, "<saf:SAFSession xmlns:saf=\"urn:mpeg:mpeg4:SAF:2005\" >\n"); #ifndef GPAC_DISABLE_OD_DUMP if (root_od) { GF_ObjectDescriptor *iod = (GF_ObjectDescriptor *)root_od; u32 i, count; gf_fprintf(sdump->trace, "<saf:sceneHeader>\n"); count = gf_list_count(iod->ESDescriptors); for (i=0; i<count; i++) { GF_LASERConfig lsrcfg; GF_ESD *esd = (GF_ESD *)gf_list_get(iod->ESDescriptors, i); if (!esd || !esd->decoderConfig) continue; if (esd->decoderConfig->streamType != GF_STREAM_SCENE) continue; if (esd->decoderConfig->objectTypeIndication != 0x09) continue; if (!esd->decoderConfig->decoderSpecificInfo || !esd->decoderConfig->decoderSpecificInfo->data) continue; gf_odf_get_laser_config(esd->decoderConfig->decoderSpecificInfo, &lsrcfg); gf_odf_dump_desc((GF_Descriptor*)&lsrcfg, sdump->trace, 1, 1); } gf_fprintf(sdump->trace, "</saf:sceneHeader>\n"); } #endif return; } if (!sdump->X3DDump) { /*setup XMT*/ if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<XMT-A xmlns=\"urn:mpeg:mpeg4:xmta:schema:2002\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"urn:mpeg:mpeg4:xmta:schema:2002 xmt-a.xsd\">\n"); gf_fprintf(sdump->trace, " <Header>\n"); #ifndef GPAC_DISABLE_OD_DUMP if (root_od) gf_odf_dump_desc(root_od, sdump->trace, 1, 1); #endif gf_fprintf(sdump->trace, " </Header>\n"); gf_fprintf(sdump->trace, " <Body>\n"); if (!root_od) { gf_fprintf(sdump->trace, " <Replace>\n"); } } else { if (sdump->dump_mode==GF_SM_DUMP_VRML) { gf_fprintf(sdump->trace, "#VRML V2.0\n"); } else { /*dump root OD*/ #ifndef GPAC_DISABLE_OD_DUMP if (root_od) gf_odf_dump_desc(root_od, sdump->trace, 0, 0); #endif } gf_fprintf(sdump->trace, "\n"); } } else { if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<!DOCTYPE X3D PUBLIC \"ISO//Web3D//DTD X3D 3.0//EN\" \"http://www.web3d.org/specifications/x3d-3.0.dtd\">\n"); gf_fprintf(sdump->trace, "<X3D xmlns:xsd=\"http://www.w3.org/2001/XMLSchema-instance\" xsd:noNamespaceSchemaLocation=\"http://www.web3d.org/specifications/x3d-3.0.xsd\" version=\"3.0\">\n"); gf_fprintf(sdump->trace, "<head>\n"); gf_fprintf(sdump->trace, "<meta content=\"X3D File Converted/Dumped by GPAC Version %s - %s\" name=\"generator\"/>\n", gf_gpac_version(), gf_gpac_copyright() ); gf_fprintf(sdump->trace, "</head>\n"); gf_fprintf(sdump->trace, " <Scene>\n"); } else { gf_fprintf(sdump->trace, "#X3D V3.0\n\n"); } } } static void gf_dump_finalize(GF_SceneDumper *sdump, GF_Descriptor *root_od) { if (sdump->dump_mode==GF_SM_DUMP_SVG) return; if (sdump->LSRDump) { gf_fprintf(sdump->trace, "<saf:endOfSAFSession/>\n</saf:SAFSession>\n"); return; } if (!sdump->XMLDump) return; if (!sdump->X3DDump) { if (!root_od) { gf_fprintf(sdump->trace, " </Replace>\n"); } gf_fprintf(sdump->trace, " </Body>\n"); gf_fprintf(sdump->trace, "</XMT-A>\n"); } else { gf_fprintf(sdump->trace, " </Scene>\n"); gf_fprintf(sdump->trace, "</X3D>\n"); } } static Bool gf_dump_vrml_is_def_node(GF_SceneDumper *sdump, GF_Node *node) { s32 i = gf_list_find(sdump->dump_nodes, node); if (i>=0) return 0; gf_list_add(sdump->dump_nodes, node); return 1; } static GF_Node *gf_dump_find_node(GF_SceneDumper *sdump, u32 ID) { GF_Node *ret = gf_sg_find_node(sdump->sg, ID); if (ret) return ret; return NULL; } #define DUMP_IND(sdump) \ if (sdump->trace) { \ u32 z; \ for (z=0; z<sdump->indent; z++) gf_fprintf(sdump->trace, "%c", sdump->ind_char); \ } static void StartElement(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "%s {\n", name); } else { gf_fprintf(sdump->trace, "<%s", name); } } static void EndElementHeader(GF_SceneDumper *sdump, Bool has_sub_el) { if (!sdump->trace) return; if (sdump->XMLDump) { if (has_sub_el) { gf_fprintf(sdump->trace, ">\n"); } else { gf_fprintf(sdump->trace, "/>\n"); } } } static void EndElement(GF_SceneDumper *sdump, const char *name, Bool had_sub_el) { if (!sdump->trace) return; if (!sdump->XMLDump) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } else { if (had_sub_el) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "</%s>\n", name); } } } static void StartAttribute(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; if (!sdump->XMLDump) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "%s ", name); } else { gf_fprintf(sdump->trace, " %s=\"", name); } } static void EndAttribute(GF_SceneDumper *sdump) { if (!sdump->trace) return; if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "\n"); } else { gf_fprintf(sdump->trace, "\""); } } static void StartList(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; DUMP_IND(sdump); if (!sdump->XMLDump) { if (name) gf_fprintf(sdump->trace, "%s [\n", name); else gf_fprintf(sdump->trace, "[\n"); } else { gf_fprintf(sdump->trace, "<%s>\n", name); } } static void EndList(GF_SceneDumper *sdump, const char *name) { if (!sdump->trace) return; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "]\n"); } else { gf_fprintf(sdump->trace, "</%s>\n", name); } } static void scene_dump_utf_string(GF_SceneDumper *sdump, Bool escape_xml, char *str) { u32 len, i; u16 *uniLine; if (!str) return; len = (u32) strlen(str); if (!len) return; uniLine = (u16*)gf_malloc(sizeof(u16) * len*4); len = gf_utf8_mbstowcs(uniLine, len, (const char **) &str); if (len != GF_UTF8_FAIL) { for (i=0; i<len; i++) { //if (uniLine[i] == (u16) '\"') gf_fprintf(sdump->trace, "\\"); switch (uniLine[i]) { case '\'': if (escape_xml) gf_fprintf(sdump->trace, "&apos;"); else gf_fprintf(sdump->trace, "'"); break; case '\"': if (escape_xml) gf_fprintf(sdump->trace, "&quot;"); else gf_fprintf(sdump->trace, "\""); break; case '&': gf_fprintf(sdump->trace, "&amp;"); break; case '>': gf_fprintf(sdump->trace, "&gt;"); break; case '<': gf_fprintf(sdump->trace, "&lt;"); break; case '\r': case '\n': /* Does nothing : gf_fprintf(sdump->trace, "");, fflush instead ?*/ break; default: if (uniLine[i]<128) { gf_fprintf(sdump->trace, "%c", (u8) uniLine[i]); } else { gf_fprintf(sdump->trace, "&#%d;", uniLine[i]); } break; } } } gf_free(uniLine); } #ifndef GPAC_DISABLE_VRML static void scene_dump_vrml_id(GF_SceneDumper *sdump, GF_Node *node) { u32 id; const char *node_name; if (!sdump->trace) return; /*FIXME - optimize id/name fetch*/ node_name = gf_node_get_name_and_id(node, &id); if (node_name) gf_fprintf(sdump->trace, "%s", node_name); else gf_fprintf(sdump->trace, "N%d", id - 1); } static Bool scene_dump_vrml_find_route_name(GF_SceneDumper *sdump, u32 ID, const char **outName) { GF_Route *r; u32 i; GF_Command *com; r = gf_sg_route_find(sdump->sg, ID); if (r) { (*outName) = r->name; return 1; } i=0; while ((com = (GF_Command *)gf_list_enum(sdump->inserted_routes, &i))) { if (com->tag == GF_SG_ROUTE_INSERT) { if (com->RouteID==ID) { (*outName) = com->def_name; return 1; } } } if (!sdump->current_com_list) return 0; i=1; while ((com = (GF_Command *)gf_list_enum(sdump->current_com_list, &i))) { if ((com->tag == GF_SG_ROUTE_INSERT) || (com->tag == GF_SG_ROUTE_REPLACE)) { if (com->RouteID==ID) { (*outName) = com->def_name; return 1; } } else return 0; } return 0; } static void scene_dump_vrml_route_id(GF_SceneDumper *sdump, u32 routeID, char *rName) { if (!sdump->trace) return; if (!rName) scene_dump_vrml_find_route_name(sdump, routeID, (const char **) &rName); if (rName) gf_fprintf(sdump->trace, "%s", rName); else gf_fprintf(sdump->trace, "R%d", routeID - 1); } static void gf_dump_vrml_sffield(GF_SceneDumper *sdump, u32 type, void *ptr, Bool is_mf, GF_Node *node) { switch (type) { case GF_SG_VRML_SFBOOL: gf_fprintf(sdump->trace, "%s", * ((SFBool *)ptr) ? "true" : "false"); break; case GF_SG_VRML_SFINT32: gf_fprintf(sdump->trace, "%d", * ((SFInt32 *)ptr) ); break; case GF_SG_VRML_SFFLOAT: gf_fprintf(sdump->trace, "%g", FIX2FLT( * ((SFFloat *)ptr) ) ); break; case GF_SG_VRML_SFDOUBLE: gf_fprintf(sdump->trace, "%g", * ((SFDouble *)ptr) ); break; case GF_SG_VRML_SFTIME: gf_fprintf(sdump->trace, "%g", * ((SFTime *)ptr) ); break; case GF_SG_VRML_SFCOLOR: gf_fprintf(sdump->trace, "%g %g %g", FIX2FLT( ((SFColor *)ptr)->red ), FIX2FLT( ((SFColor *)ptr)->green ), FIX2FLT( ((SFColor *)ptr)->blue )); break; case GF_SG_VRML_SFCOLORRGBA: gf_fprintf(sdump->trace, "%g %g %g %g", FIX2FLT( ((SFColorRGBA *)ptr)->red ), FIX2FLT( ((SFColorRGBA *)ptr)->green ), FIX2FLT( ((SFColorRGBA *)ptr)->blue ), FIX2FLT( ((SFColorRGBA *)ptr)->alpha )); break; case GF_SG_VRML_SFVEC2F: gf_fprintf(sdump->trace, "%g %g", FIX2FLT( ((SFVec2f *)ptr)->x ), FIX2FLT( ((SFVec2f *)ptr)->y )); break; case GF_SG_VRML_SFVEC2D: gf_fprintf(sdump->trace, "%g %g", ((SFVec2d *)ptr)->x, ((SFVec2d *)ptr)->y); break; case GF_SG_VRML_SFVEC3F: gf_fprintf(sdump->trace, "%g %g %g", FIX2FLT( ((SFVec3f *)ptr)->x ), FIX2FLT( ((SFVec3f *)ptr)->y ), FIX2FLT( ((SFVec3f *)ptr)->z )); break; case GF_SG_VRML_SFVEC3D: gf_fprintf(sdump->trace, "%g %g %g", ((SFVec3d *)ptr)->x, ((SFVec3d *)ptr)->y, ((SFVec3d *)ptr)->z); break; case GF_SG_VRML_SFROTATION: gf_fprintf(sdump->trace, "%g %g %g %g", FIX2FLT( ((SFRotation *)ptr)->x ), FIX2FLT( ((SFRotation *)ptr)->y ), FIX2FLT( ((SFRotation *)ptr)->z ), FIX2FLT( ((SFRotation *)ptr)->q ) ); break; case GF_SG_VRML_SFATTRREF: { SFAttrRef *ar = (SFAttrRef *)ptr; if (ar->node) { GF_FieldInfo pinfo; gf_node_get_field(ar->node, ar->fieldIndex, &pinfo); scene_dump_vrml_id(sdump, ar->node); gf_fprintf(sdump->trace, ".%s", pinfo.name); } } break; case GF_SG_VRML_SFSCRIPT: { u32 len, i; char *str; str = (char*)((SFScript *)ptr)->script_text; if (!str) { if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "\"\""); } break; } len = (u32)strlen(str); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "\"%s\"", str); } else { u16 *uniLine; uniLine = (u16*)gf_malloc(sizeof(short) * (len + 1)); len = gf_utf8_mbstowcs(uniLine, len, (const char **)&str); if (len != GF_UTF8_FAIL) { for (i = 0; i<len; i++) { switch (uniLine[i]) { case '&': gf_fprintf(sdump->trace, "&amp;"); break; case '<': gf_fprintf(sdump->trace, "&lt;"); break; case '>': gf_fprintf(sdump->trace, "&gt;"); break; case '\'': case '"': gf_fprintf(sdump->trace, "&apos;"); break; case 0: break; /*FIXME: how the heck can we preserve newlines and spaces of JavaScript in an XML attribute in any viewer ? */ default: if (uniLine[i]<128) { gf_fprintf(sdump->trace, "%c", (u8)uniLine[i]); } else { gf_fprintf(sdump->trace, "&#%d;", uniLine[i]); } break; } } } gf_free(uniLine); } DUMP_IND(sdump); } break; case GF_SG_VRML_SFSTRING: { char *str; if (sdump->XMLDump) { if (is_mf) gf_fprintf(sdump->trace, sdump->X3DDump ? "\"" : "&quot;"); } else { gf_fprintf(sdump->trace, "\""); } /*dump in unicode*/ str = ((SFString *)ptr)->buffer; if (node && (gf_node_get_tag(node)==TAG_MPEG4_BitWrapper)) { u32 bufsize = 37 + ((M_BitWrapper*)node)->buffer_len * 2 + 3; str = gf_malloc(sizeof(char) * bufsize); if (str) { s32 res; strcpy(str, "data:application/octet-string;base64,"); res = gf_base64_encode(((M_BitWrapper*)node)->buffer.buffer, ((M_BitWrapper*)node)->buffer_len, str+37, bufsize-37); if (res<0) { gf_free(str); str = NULL; } else { str[res+37] = 0; } } } if (str && str[0]) { if (sdump->XMLDump) { scene_dump_utf_string(sdump, 1, str); } else if (!strchr(str, '\"')) { gf_fprintf(sdump->trace, "%s", str); } else { u32 i, len = (u32)strlen(str); for (i=0; i<len; i++) { if (str[i]=='\"') gf_fputc('\\', sdump->trace); gf_fputc(str[i], sdump->trace); } } } if (node && (gf_node_get_tag(node)==TAG_MPEG4_BitWrapper)) { if (str) gf_free(str); } if (sdump->XMLDump) { if (is_mf) gf_fprintf(sdump->trace, sdump->X3DDump ? "\"" : "&quot;"); } else { gf_fprintf(sdump->trace, "\""); } } break; case GF_SG_VRML_SFURL: if (((SFURL *)ptr)->url) { #if 0 u32 len; char *str; short uniLine[5000]; str = ((SFURL *)ptr)->url; len = gf_utf8_mbstowcs(uniLine, 5000, (const char **) &str); if (len != GF_UTF8_FAIL) { gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); fwprintf(sdump->trace, (unsigned short *) uniLine); gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); } #else gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); gf_fprintf(sdump->trace, "%s", ((SFURL *)ptr)->url); gf_fprintf(sdump->trace, sdump->XMLDump ? (sdump->X3DDump ? "'" : "&quot;") : "\""); #endif } else { if (sdump->XMLDump) { gf_fprintf(sdump->trace, "&quot;od://od%d&quot;", ((SFURL *)ptr)->OD_ID); } else { gf_fprintf(sdump->trace, "od:%d", ((SFURL *)ptr)->OD_ID); } } break; case GF_SG_VRML_SFIMAGE: { u32 i, count; SFImage *img = (SFImage *)ptr; gf_fprintf(sdump->trace, "%d %d %d", img->width, img->height, img->numComponents); count = img->width * img->height * img->numComponents; for (i=0; i<count; ) { switch (img->numComponents) { case 1: gf_fprintf(sdump->trace, " 0x%02X", img->pixels[i]); i++; break; case 2: gf_fprintf(sdump->trace, " 0x%02X%02X", img->pixels[i], img->pixels[i+1]); i+=2; break; case 3: gf_fprintf(sdump->trace, " 0x%02X%02X%02X", img->pixels[i], img->pixels[i+1], img->pixels[i+2]); i+=3; break; case 4: gf_fprintf(sdump->trace, " 0x%02X%02X%02X%02X", img->pixels[i], img->pixels[i+1], img->pixels[i+2], img->pixels[i+3]); i+=4; break; } } } break; } } static void gf_dump_vrml_simple_field(GF_SceneDumper *sdump, GF_FieldInfo field, GF_Node *parent) { u32 i, sf_type; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); return; case GF_SG_VRML_MFNODE: list = * ((GF_ChildNodeItem **) field.far_ptr); assert( list ); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, NULL); list = list->next; } sdump->indent--; return; case GF_SG_VRML_SFCOMMANDBUFFER: return; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { if (sdump->XMLDump) StartAttribute(sdump, "value"); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, parent); if (sdump->XMLDump) EndAttribute(sdump); } else { GenMFField *mffield; mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "["); } else if (sf_type==GF_SG_VRML_SFSTRING) { gf_fprintf(sdump->trace, " value=\'"); } else { StartAttribute(sdump, "value"); } for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); /*this is to cope with single MFString which shall appear as SF in XMT*/ gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, parent); } if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "]"); } else if (sf_type==GF_SG_VRML_SFSTRING) { gf_fprintf(sdump->trace, "\'"); } else { EndAttribute(sdump); } } } static void gf_dump_vrml_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; Bool needs_field_container; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: assert ( *(GF_Node **)field.far_ptr); if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; } } else { StartAttribute(sdump, field.name); } gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); if (sdump->XMLDump) { if (!sdump->X3DDump) { sdump->indent--; EndElement(sdump, (char *) field.name, 1); } } else { EndAttribute(sdump); } return; case GF_SG_VRML_MFNODE: needs_field_container = 0; if (sdump->XMLDump && sdump->X3DDump) { u32 count, nb_ndt; GF_FieldInfo info; if (!strcmp(field.name, "children")) { needs_field_container = 0; } else { nb_ndt = 0; count = gf_node_get_field_count(node); for (i=0; i<count; i++) { gf_node_get_field(node, i, &info); if ((info.eventType==GF_SG_EVENT_IN) || (info.eventType==GF_SG_EVENT_OUT)) continue; if (info.NDTtype==field.NDTtype) nb_ndt++; } needs_field_container = (nb_ndt>1) ? 1 : 0; } } #ifndef GPAC_DISABLE_X3D if (!sdump->X3DDump) { if (gf_node_get_tag(node)==TAG_X3D_Switch) field.name = "choice"; } #endif list = * ((GF_ChildNodeItem **) field.far_ptr); assert(list); if (!sdump->XMLDump || !sdump->X3DDump) StartList(sdump, field.name); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, needs_field_container ? (char *) field.name : NULL); list = list->next; } sdump->indent--; if (!sdump->XMLDump || !sdump->X3DDump) EndList(sdump, field.name); return; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *cb = (SFCommandBuffer *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; if (!gf_list_count(cb->commandList)) { /*the arch does not allow for that (we would need a codec and so on, or decompress the command list in all cases...)*/ if (sdump->trace && cb->bufferSize) { if (sdump->XMLDump) gf_fprintf(sdump->trace, "<!--SFCommandBuffer cannot be dumped while playing - use MP4Box instead-->\n"); else gf_fprintf(sdump->trace, "#SFCommandBuffer cannot be dumped while playing - use MP4Box instead\n"); } } else { gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent, 0); } sdump->indent--; EndElement(sdump, (char *) field.name, 1); } return; case GF_SG_VRML_MFATTRREF: if (sdump->XMLDump) { MFAttrRef *ar = (MFAttrRef *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; for (i=0; i<ar->count; i++) { if (ar->vals[i].node) { GF_FieldInfo pinfo; DUMP_IND(sdump); gf_node_get_field(ar->vals[i].node, ar->vals[i].fieldIndex, &pinfo); gf_fprintf(sdump->trace, "<store node=\""); scene_dump_vrml_id(sdump, ar->vals[i].node); gf_fprintf(sdump->trace, "\" field=\"%s\"/>\n", pinfo.name); } } sdump->indent--; EndElement(sdump, (char *) field.name, 1); return; } break; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { StartAttribute(sdump, field.name); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); EndAttribute(sdump); } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, " %s=\'", (char *) field.name); break; default: StartAttribute(sdump, field.name); break; } } else { StartAttribute(sdump, field.name); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "["); for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "]"); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, "\'"); break; default: EndAttribute(sdump); break; } } else { EndAttribute(sdump); } } } static const char *GetXMTFieldTypeName(u32 fieldType) { switch (fieldType) { case GF_SG_VRML_SFBOOL: return "Boolean"; case GF_SG_VRML_SFINT32: return "Integer"; case GF_SG_VRML_SFCOLOR: return "Color"; case GF_SG_VRML_SFVEC2F: return "Vector2"; case GF_SG_VRML_SFIMAGE: return "Image"; case GF_SG_VRML_SFTIME: return "Time"; case GF_SG_VRML_SFFLOAT: return "Float"; case GF_SG_VRML_SFVEC3F: return "Vector3"; case GF_SG_VRML_SFROTATION: return "Rotation"; case GF_SG_VRML_SFSTRING: return "String"; case GF_SG_VRML_SFNODE: return "Node"; case GF_SG_VRML_MFBOOL: return "Booleans"; case GF_SG_VRML_MFINT32: return "Integers"; case GF_SG_VRML_MFCOLOR: return "Colors"; case GF_SG_VRML_MFVEC2F: return "Vector2Array"; case GF_SG_VRML_MFIMAGE: return "Images"; case GF_SG_VRML_MFTIME: return "Times"; case GF_SG_VRML_MFFLOAT: return "Floats"; case GF_SG_VRML_MFVEC3F: return "Vector3Array"; case GF_SG_VRML_MFROTATION: return "Rotations"; case GF_SG_VRML_MFSTRING: return "Strings"; case GF_SG_VRML_MFNODE: return "Nodes"; default: return "unknown"; } } static const char *GetXMTFieldTypeValueName(u32 fieldType) { switch (fieldType) { case GF_SG_VRML_SFBOOL: return "booleanValue"; case GF_SG_VRML_SFINT32: return "intValue"; case GF_SG_VRML_SFCOLOR: return "colorValue"; case GF_SG_VRML_SFVEC2F: return "vector2Value"; case GF_SG_VRML_SFIMAGE: return "imageValue"; case GF_SG_VRML_SFTIME: return "timeValue"; case GF_SG_VRML_SFFLOAT: return "floatValue"; case GF_SG_VRML_SFVEC3F: return "vector3Value"; case GF_SG_VRML_SFROTATION: return "rotationValue"; case GF_SG_VRML_SFSTRING: return "stringValue"; case GF_SG_VRML_MFBOOL: return "booleanArrayValue"; case GF_SG_VRML_MFINT32: return "intArrayValue"; case GF_SG_VRML_MFCOLOR: return "colorArrayValue"; case GF_SG_VRML_MFVEC2F: return "vector2ArrayValue"; case GF_SG_VRML_MFIMAGE: return "imageArrayValue"; case GF_SG_VRML_MFTIME: return "timeArrayValue"; case GF_SG_VRML_MFFLOAT: return "floatArrayValue"; case GF_SG_VRML_MFVEC3F: return "vector3ArrayValue"; case GF_SG_VRML_MFROTATION: return "rotationArrayValue"; case GF_SG_VRML_MFSTRING: return "stringArrayValue"; default: return "unknown"; } } /*field dumping for proto declaration and Script*/ static void gf_dump_vrml_dyn_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field, Bool has_sublist) { u32 i, sf_type; void *slot_ptr; if (gf_sg_vrml_is_sf_field(field.fieldType)) { DUMP_IND(sdump); if (sdump->XMLDump) { if (sdump->X3DDump) { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" accessType=\"%s\"", field.name, gf_sg_vrml_get_field_type_name(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 1)); } else { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" vrml97Hint=\"%s\"", field.name, GetXMTFieldTypeName(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 0)); } if ((field.eventType == GF_SG_EVENT_FIELD) || (field.eventType == GF_SG_EVENT_EXPOSED_FIELD)) { if (field.fieldType == GF_SG_VRML_SFNODE) { if (!sdump->X3DDump) { gf_fprintf(sdump->trace, ">\n"); sdump->indent++; gf_fprintf(sdump->trace, "<node>"); gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); gf_fprintf(sdump->trace, "</node>"); sdump->indent--; if (!has_sublist) gf_fprintf(sdump->trace, "</field>\n"); } else { if (field.far_ptr) { gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); gf_fprintf(sdump->trace, "</field>\n"); } else { gf_fprintf(sdump->trace, "/>\n"); } } DUMP_IND(sdump); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } if (field.far_ptr) gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); if (has_sublist) gf_fprintf(sdump->trace, "\">\n"); else gf_fprintf(sdump->trace, "\"/>\n"); } } else { gf_fprintf(sdump->trace, "/>\n"); } } else { gf_fprintf(sdump->trace, "%s %s %s", gf_sg_vrml_get_event_type_name(field.eventType, sdump->X3DDump), gf_sg_vrml_get_field_type_name(field.fieldType), field.name); if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { gf_fprintf(sdump->trace, " "); if (field.fieldType == GF_SG_VRML_SFNODE) { gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); } else if (field.far_ptr) { gf_dump_vrml_simple_field(sdump, field, node); } } gf_fprintf(sdump->trace, "\n"); } } else if (field.far_ptr) { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "%s %s %s", gf_sg_vrml_get_event_type_name(field.eventType, sdump->X3DDump), gf_sg_vrml_get_field_type_name(field.fieldType), field.name); if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { gf_fprintf(sdump->trace, " ["); if (sf_type == GF_SG_VRML_SFNODE) { GF_ChildNodeItem *l = *(GF_ChildNodeItem **)field.far_ptr; gf_fprintf(sdump->trace, "\n"); sdump->indent++; while (l) { gf_dump_vrml_node(sdump, l->node, 1, NULL); l = l->next; } sdump->indent--; DUMP_IND(sdump); } else { for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); if (field.fieldType != GF_SG_VRML_MFNODE) { gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node); } } } gf_fprintf(sdump->trace, "]"); } gf_fprintf(sdump->trace, "\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" accessType=\"%s\"", field.name, gf_sg_vrml_get_field_type_name(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 1)); } else { gf_fprintf(sdump->trace, "<field name=\"%s\" type=\"%s\" vrml97Hint=\"%s\"", field.name, GetXMTFieldTypeName(field.fieldType), gf_sg_vrml_get_event_type_name(field.eventType, 0)); } if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { if (sf_type == GF_SG_VRML_SFNODE) { GF_ChildNodeItem *list = *(GF_ChildNodeItem **)field.far_ptr; gf_fprintf(sdump->trace, ">\n"); sdump->indent++; if (!sdump->X3DDump) gf_fprintf(sdump->trace, "<nodes>"); while (list) { gf_dump_vrml_node(sdump, list->node, 1, NULL); list = list->next; } if (!sdump->X3DDump) gf_fprintf(sdump->trace, "</nodes>"); sdump->indent++; DUMP_IND(sdump); if (!has_sublist) gf_fprintf(sdump->trace, "</field>\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); if (field.fieldType != GF_SG_VRML_MFNODE) { gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node); } } if (has_sublist) gf_fprintf(sdump->trace, "\">\n"); else gf_fprintf(sdump->trace, "\"/>\n"); } } else { gf_fprintf(sdump->trace, "/>\n"); } } } } /*field dumping for proto instance*/ static void gf_dump_vrml_proto_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; void *slot_ptr; DUMP_IND(sdump); gf_fprintf(sdump->trace, "<fieldValue name=\"%s\" ", field.name); if (gf_sg_vrml_is_sf_field(field.fieldType)) { if (field.fieldType == GF_SG_VRML_SFNODE) { gf_fprintf(sdump->trace, ">\n"); sdump->indent++; if (!sdump->X3DDump) gf_fprintf(sdump->trace, "<node>"); gf_dump_vrml_node(sdump, field.far_ptr ? *(GF_Node **)field.far_ptr : NULL, 0, NULL); if (!sdump->X3DDump) gf_fprintf(sdump->trace, "</node>"); sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</fieldValue>\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); gf_fprintf(sdump->trace, "\"/>\n"); } } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if ((field.eventType==GF_SG_EVENT_FIELD) || (field.eventType==GF_SG_EVENT_EXPOSED_FIELD)) { if (sf_type == GF_SG_VRML_SFNODE) { GF_ChildNodeItem *list = *(GF_ChildNodeItem **)field.far_ptr; gf_fprintf(sdump->trace, ">\n"); sdump->indent++; if (!sdump->X3DDump) gf_fprintf(sdump->trace, "<nodes>"); while (list) { gf_dump_vrml_node(sdump, list->node, 1, NULL); list = list->next; } if (!sdump->X3DDump) gf_fprintf(sdump->trace, "</nodes>"); sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</fieldValue>\n"); } else { if (sdump->X3DDump) { gf_fprintf(sdump->trace, " value=\""); } else { gf_fprintf(sdump->trace, " %s=\"", GetXMTFieldTypeValueName(field.fieldType)); } for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); if (field.fieldType != GF_SG_VRML_MFNODE) { gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node); } } gf_fprintf(sdump->trace, "\"/>\n"); } } } } static GF_Route *gf_dump_vrml_get_IS(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo *field) { u32 i; GF_Route *r; i=0; while ((r = (GF_Route*)gf_list_enum(sdump->current_proto->sub_graph->Routes, &i))) { if (!r->IS_route) continue; if ((r->ToNode==node) && (r->ToField.fieldIndex==field->fieldIndex)) return r; } if (!node || !node->sgprivate->interact || !node->sgprivate->interact->routes) return NULL; i=0; while ((r = (GF_Route*)gf_list_enum(node->sgprivate->interact->routes, &i))) { if (!r->IS_route) continue; if (r->FromField.fieldIndex == field->fieldIndex) return r; } return NULL; } static void gf_dump_vrml_IS_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field, Bool isScript, Bool skip_is) { GF_FieldInfo pfield; GF_Route *r = gf_dump_vrml_get_IS(sdump, node, &field); if (r->FromNode) { pfield.fieldIndex = r->ToField.fieldIndex; gf_sg_proto_get_field(sdump->current_proto, NULL, &pfield); } else { pfield.fieldIndex = r->FromField.fieldIndex; gf_sg_proto_get_field(sdump->current_proto, NULL, &pfield); } if (!sdump->XMLDump) { DUMP_IND(sdump); if (isScript) gf_fprintf(sdump->trace, "%s %s ", gf_sg_vrml_get_event_type_name(field.eventType, sdump->X3DDump), gf_sg_vrml_get_field_type_name(field.fieldType)); gf_fprintf(sdump->trace, "%s IS %s\n", field.name, pfield.name); } else { if (!skip_is) { StartElement(sdump, "IS"); EndElementHeader(sdump, 1); sdump->indent++; } DUMP_IND(sdump); gf_fprintf(sdump->trace, "<connect nodeField=\"%s\" protoField=\"%s\"/>\n", field.name, pfield.name); if (!skip_is) { sdump->indent--; EndElement(sdump, "IS", 1); } } } static Bool scene_dump_vrml_can_dump(GF_SceneDumper *sdump, GF_Node *node) { #ifndef GPAC_DISABLE_VRML u32 tag; if (node->sgprivate->tag==TAG_ProtoNode) return 1; if (sdump->X3DDump || (sdump->dump_mode==GF_SM_DUMP_VRML)) { if (node->sgprivate->tag>=GF_NODE_RANGE_FIRST_X3D) return 1; if (node->sgprivate->tag==TAG_MPEG4_Rectangle) return 1; if (node->sgprivate->tag==TAG_MPEG4_Circle) return 1; #ifndef GPAC_DISABLE_X3D tag = gf_node_x3d_type_by_class_name(gf_node_get_class_name(node)); return tag ? 1 : 0; #else return 0; #endif } else { if (node->sgprivate->tag<=GF_NODE_RANGE_LAST_MPEG4) return 1; #ifndef GPAC_DISABLE_X3D if (node->sgprivate->tag==TAG_X3D_Rectangle2D) return 1; if (node->sgprivate->tag==TAG_X3D_Circle2D) return 1; #endif tag = gf_node_mpeg4_type_by_class_name(gf_node_get_class_name(node)); return tag ? 1 : 0; } #else return 1; #endif } static void gf_dump_vrml_node(GF_SceneDumper *sdump, GF_Node *node, Bool in_list, char *fieldContainer) { u32 i, count, to_dump, sub_el, ID; u32 *def_fields; Bool isDEF, isScript, isProto, hasISed; char *name; GF_Node *base; GF_FieldInfo field, base_field; if (!node) { gf_fprintf(sdump->trace, "NULL"); return; } /*this dumper works only for VRML like graphs*/ if (node->sgprivate->tag>GF_NODE_RANGE_LAST_X3D) return; if (!scene_dump_vrml_can_dump(sdump, node)) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[Scene Dump] node %s not part of %s standard - removing\n", gf_node_get_class_name(node), sdump->X3DDump ? "X3D" : (sdump->dump_mode==GF_SM_DUMP_VRML) ? "VRML" : "MPEG4")); if (!in_list) gf_fprintf(sdump->trace, "NULL"); return; } /*convert whatever possible*/ name = (char*)gf_node_get_class_name(node); #ifndef GPAC_DISABLE_VRML if (sdump->X3DDump) { if (node->sgprivate->tag == TAG_MPEG4_Circle) name = "Circle2D"; else if (node->sgprivate->tag == TAG_MPEG4_Rectangle) name = "Rectangle2D"; #ifndef GPAC_DISABLE_X3D } else { if (node->sgprivate->tag == TAG_X3D_Circle2D) name = "Circle"; else if (node->sgprivate->tag == TAG_X3D_Rectangle2D) name = "Rectangle"; #endif } #endif isProto = (gf_node_get_tag(node) == TAG_ProtoNode) ? 1 : 0; ID = gf_node_get_id(node); isDEF = 0; if (ID) { isDEF = gf_dump_vrml_is_def_node(sdump, node); if (!isDEF) { if (!sdump->XMLDump) { if (in_list) DUMP_IND(sdump); gf_fprintf(sdump->trace, "USE "); scene_dump_vrml_id(sdump, node); if (in_list) gf_fprintf(sdump->trace, "\n"); } else { if (isProto) { StartElement(sdump, "ProtoInstance"); StartAttribute(sdump, "name"); gf_fprintf(sdump->trace, "%s", name); EndAttribute(sdump); } else { StartElement(sdump, name); } StartAttribute(sdump, "USE"); scene_dump_vrml_id(sdump, node); EndAttribute(sdump); EndElementHeader(sdump, 0); } return; } } /*get all fields*/ count = gf_node_get_field_count(node); def_fields = (u32*)gf_malloc(sizeof(u32) * count); base = NULL; switch (gf_node_get_tag(node)) { #ifndef GPAC_DISABLE_VRML #ifndef GPAC_DISABLE_X3D case TAG_X3D_Script: #endif case TAG_MPEG4_Script: isScript = 1; break; #endif default: isScript = 0; break; } if (!isScript) { if (isProto) { base = gf_sg_proto_create_instance(node->sgprivate->scenegraph, ((GF_ProtoInstance *)node)->proto_interface); } else { base = gf_node_new(node->sgprivate->scenegraph, node->sgprivate->tag); } } if (base) gf_node_register(base, NULL); hasISed = 0; to_dump = sub_el = 0; for (i=0; i<count; i++) { if (isScript) { /*dyn script fields are complex types*/ def_fields[i] = (i>2) ? 2 : 1; } else { def_fields[i] = 0; } gf_node_get_field(node, i, &field); if (sdump->current_proto) { if (gf_dump_vrml_get_IS(sdump, node, &field) != NULL) { def_fields[i] = 3; if ((field.fieldType == GF_SG_VRML_SFNODE) || (field.fieldType == GF_SG_VRML_MFNODE)) def_fields[i] = sdump->XMLDump ? 4 : 3; /*in XMT the ISed is not an attribute*/ if (sdump->XMLDump) sub_el++; to_dump++; hasISed = 1; continue; } } if (!isScript && ((field.eventType == GF_SG_EVENT_IN) || (field.eventType == GF_SG_EVENT_OUT)) ) { continue; } /*proto instance in XMT lists all fields as elements*/ if (sdump->XMLDump && isProto) { def_fields[i] = 2; to_dump++; sub_el++; continue; } switch (field.fieldType) { case GF_SG_VRML_SFNODE: if (* (GF_Node **) field.far_ptr) { def_fields[i] = 2; to_dump++; sub_el++; } break; case GF_SG_VRML_MFNODE: if (* (GF_ChildNodeItem**) field.far_ptr) { def_fields[i] = 2; to_dump++; sub_el++; } break; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *p = (SFCommandBuffer *)field.far_ptr; if (p->bufferSize || gf_list_count(p->commandList)) { def_fields[i] = 2; to_dump++; sub_el++; } } break; case GF_SG_VRML_MFATTRREF: { MFAttrRef *p = (MFAttrRef*)field.far_ptr; if (p->count) { def_fields[i] = 2; to_dump++; sub_el++; } } break; default: if (isScript) { to_dump++; } else { gf_node_get_field(base, i, &base_field); if (!gf_sg_vrml_field_equal(base_field.far_ptr, field.far_ptr, field.fieldType)) { def_fields[i] = 1; to_dump++; } } break; } } if (base) gf_node_unregister(base, NULL); if (!to_dump) { if (in_list) DUMP_IND(sdump); if (!sdump->XMLDump) { if (isDEF) { gf_fprintf(sdump->trace, "DEF "); scene_dump_vrml_id(sdump, node); gf_fprintf(sdump->trace, " "); } gf_fprintf(sdump->trace, "%s {}\n", name); } else { if (isDEF) { if (isProto) { gf_fprintf(sdump->trace, "<ProtoInstance name=\"%s\" DEF=\"", name); } else { gf_fprintf(sdump->trace, "<%s DEF=\"", name); } scene_dump_vrml_id(sdump, node); gf_fprintf(sdump->trace, "\"/>\n"); } else { if (isProto) { gf_fprintf(sdump->trace, "<ProtoInstance name=\"%s\"/>\n", name); } else { gf_fprintf(sdump->trace, "<%s/>\n", name); } } } gf_free(def_fields); return; } if (!sdump->XMLDump) { if (in_list) DUMP_IND(sdump); if (isDEF) { gf_fprintf(sdump->trace, "DEF "); scene_dump_vrml_id(sdump, node); gf_fprintf(sdump->trace, " "); } gf_fprintf(sdump->trace, "%s {\n", name); } else { if (isProto) { StartElement(sdump, "ProtoInstance"); StartAttribute(sdump, "name"); gf_fprintf(sdump->trace, "%s", name); EndAttribute(sdump); } else { StartElement(sdump, name); } if (isDEF) { StartAttribute(sdump, "DEF"); scene_dump_vrml_id(sdump, node); EndAttribute(sdump); } } sdump->indent ++; for (i=0; i<count; i++) { switch (def_fields[i]) { /*regular field*/ case 1: gf_node_get_field(node, i, &field); if (!isScript) { gf_dump_vrml_field(sdump, node, field); } /*special script dump case, static fields except url*/ else if (i==1 || i==2) { if (*((SFBool *)field.far_ptr)) gf_dump_vrml_field(sdump, node, field); } /*in bt first dump fields - in XMT first dump url*/ else if (i && !sdump->XMLDump) { gf_dump_vrml_dyn_field(sdump, node, field, 0); } else if (!i && sdump->XMLDump) { gf_dump_vrml_field(sdump, node, field); } break; /*IS field*/ case 3: if (sdump->XMLDump) break; gf_node_get_field(node, i, &field); gf_dump_vrml_IS_field(sdump, node, field, isScript, 0); def_fields[i] = 0; break; default: break; } } if (fieldContainer) gf_fprintf(sdump->trace, " fieldContainer=\"%s\"", fieldContainer); if (isScript) sub_el = 1; EndElementHeader(sdump, sub_el ? 1 : 0); if (sub_el) { /*dump all normal IS elements for XMT*/ if (hasISed && sdump->XMLDump) { StartElement(sdump, "IS"); EndElementHeader(sdump, 1); sdump->indent++; } for (i=0; i<count; i++) { if (def_fields[i]==3) { gf_node_get_field(node, i, &field); gf_dump_vrml_IS_field(sdump, node, field, isScript, 1); } } if (hasISed && sdump->XMLDump) { sdump->indent--; EndElement(sdump, "IS", 1); } /*dump all sub elements and complex IS*/ for (i=0; i<count; i++) { switch (def_fields[i]) { case 2: gf_node_get_field(node, i, &field); if (!isScript) { if (isProto && sdump->XMLDump) { gf_dump_vrml_proto_field(sdump, node, field); } else { gf_dump_vrml_field(sdump, node, field); } } else { #ifndef GPAC_DISABLE_X3D /*X3D script metadata, NOT DYN*/ if ((i==3) && (node->sgprivate->tag==TAG_X3D_Script) ) { if (*((GF_Node **)field.far_ptr)) gf_dump_vrml_field(sdump, node, field); } else #endif { gf_dump_vrml_dyn_field(sdump, node, field, 0); } } break; case 4: gf_node_get_field(node, i, &field); gf_dump_vrml_IS_field(sdump, node, field, isScript, 0); break; } } } /*finally dump script - XMT dumping is broken!!*/ if (isScript && !sdump->XMLDump) { gf_node_get_field(node, 0, &field); gf_dump_vrml_field(sdump, node, field); } sdump->indent --; if (!sdump->XMLDump && !in_list) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "}"); } else { EndElement(sdump, isProto ? "ProtoInstance" : name, sub_el); } gf_free(def_fields); } static GF_Err DumpMultipleIndexedReplace(GF_SceneDumper *sdump, GF_Command *com) { u32 i; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); gf_node_get_field(com->node, inf->fieldIndex, &field); field.fieldType = inf->fieldType; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace extended=\"indices\" atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\">\n", field.name); } else { gf_fprintf(sdump->trace, "MULTIPLEINDREPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s [\n", field.name); } sdump->indent++; i=0; while ((inf = (GF_CommandField *) gf_list_enum(com->command_fields, &i))) { field.far_ptr = inf->field_ptr; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<repValue position=\"%d\" ", inf->pos); } else { gf_fprintf(sdump->trace, "%d BY ", inf->pos); } gf_dump_vrml_simple_field(sdump, field, com->node); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "/>"); } else { gf_fprintf(sdump->trace, "\n"); } } sdump->indent--; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "</Replace>\n"); } else { gf_fprintf(sdump->trace, "]\n"); } return GF_OK; } static GF_Err DumpMultipleReplace(GF_SceneDumper *sdump, GF_Command *com) { u32 i; GF_FieldInfo info; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace extended=\"fields\" atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\">\n"); sdump->indent++; i=0; while ((inf = (GF_CommandField *) gf_list_enum(com->command_fields, &i))) { gf_node_get_field(com->node, inf->fieldIndex, &info); info.far_ptr = inf->field_ptr; DUMP_IND(sdump); if (gf_sg_vrml_get_sf_type(info.fieldType) != GF_SG_VRML_SFNODE) { gf_fprintf(sdump->trace, "<repField atField=\"%s\" ", info.name); gf_dump_vrml_simple_field(sdump, info, com->node); gf_fprintf(sdump->trace, "/>\n"); } else { gf_fprintf(sdump->trace, "<repField>"); gf_dump_vrml_field(sdump, com->node, info); gf_fprintf(sdump->trace, "</repField>\n"); } } sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</Replace>\n"); } else { gf_fprintf(sdump->trace, "MULTIPLEREPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, " {\n"); sdump->indent++; i=0; while ((inf = (GF_CommandField *) gf_list_enum(com->command_fields, &i))) { gf_node_get_field(com->node, inf->fieldIndex, &info); info.far_ptr = inf->field_ptr; gf_dump_vrml_field(sdump, com->node, info); } sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } return GF_OK; } static GF_Err DumpGlobalQP(GF_SceneDumper *sdump, GF_Command *com) { GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace extended=\"globalQuant\">\n"); } else { gf_fprintf(sdump->trace, "GLOBALQP "); } gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Replace>\n"); else gf_fprintf(sdump->trace, "\n"); return GF_OK; } static GF_Err DumpNodeInsert(GF_SceneDumper *sdump, GF_Command *com) { GF_CommandField *inf; char posname[20]; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, "END"); break; default: sprintf(posname, "%d", inf->pos); break; } DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" position=\"%s\">", posname); } else { if (inf->pos==-1) { gf_fprintf(sdump->trace, "APPEND TO "); } else gf_fprintf(sdump->trace, "INSERT AT "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".children"); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, " "); } gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Insert>"); gf_fprintf(sdump->trace, "\n"); return GF_OK; } static GF_Err DumpRouteInsert(GF_SceneDumper *sdump, GF_Command *com, Bool is_scene_replace) { GF_Route r; memset(&r, 0, sizeof(GF_Route)); r.ID = com->RouteID; r.name = com->def_name; r.FromNode = gf_dump_find_node(sdump, com->fromNodeID); r.FromField.fieldIndex = com->fromFieldIndex; r.ToNode = gf_dump_find_node(sdump, com->toNodeID); r.ToField.fieldIndex = com->toFieldIndex; gf_list_add(sdump->inserted_routes, com); if (is_scene_replace) { gf_dump_vrml_route(sdump, &r, 0); } else { DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert>\n"); } else { gf_fprintf(sdump->trace, "INSERT "); } gf_dump_vrml_route(sdump, &r, 2); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Insert>"); } return GF_OK; } static GF_Err DumpIndexInsert(GF_SceneDumper *sdump, GF_Command *com) { GF_Err e; GF_FieldInfo field, sffield; GF_CommandField *inf; char posname[20]; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, "END"); break; default: sprintf(posname, "%d", inf->pos); break; } e = gf_node_get_field(com->node, inf->fieldIndex, &field); if (e) return e; if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" position=\"%s\"", field.name, posname); } else { if (inf->pos==-1) { gf_fprintf(sdump->trace, "APPEND TO "); } else gf_fprintf(sdump->trace, "INSERT AT "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, " "); } memcpy(&sffield, &field, sizeof(GF_FieldInfo)); sffield.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); if (field.fieldType==GF_SG_VRML_MFNODE) { if (sdump->XMLDump) gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Insert>"); gf_fprintf(sdump->trace, "\n"); } else { sffield.far_ptr = inf->field_ptr; gf_dump_vrml_simple_field(sdump, sffield, com->node); if (sdump->XMLDump) gf_fprintf(sdump->trace, "/>"); gf_fprintf(sdump->trace, "\n"); } return e; } static GF_Err DumpIndexDelete(GF_SceneDumper *sdump, GF_Command *com) { char posname[20]; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); switch (inf->pos) { case -1: strcpy(posname, sdump->XMLDump ? "END" : "LAST"); break; case 0: strcpy(posname, "BEGIN"); break; default: sprintf(posname, "%d", inf->pos); break; } gf_node_get_field(com->node, inf->fieldIndex, &field); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" position=\"%s\"/>", field.name, posname); } else { gf_fprintf(sdump->trace, "DELETE "); if (inf->pos==-1) gf_fprintf(sdump->trace, "%s ", posname); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpNodeDelete(GF_SceneDumper *sdump, GF_Command *com) { DUMP_IND(sdump); if (sdump->XMLDump) { if (com->tag==GF_SG_NODE_DELETE_EX) { gf_fprintf(sdump->trace, "<Delete extended=\"deleteOrder\" atNode=\""); } else { gf_fprintf(sdump->trace, "<Delete atNode=\""); } scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\"/>\n"); } else { if (com->tag==GF_SG_NODE_DELETE_EX) gf_fprintf(sdump->trace, "X"); gf_fprintf(sdump->trace, "DELETE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpRouteDelete(GF_SceneDumper *sdump, GF_Command *com) { DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete atRoute=\""); scene_dump_vrml_route_id(sdump, com->RouteID, com->def_name); gf_fprintf(sdump->trace, "\"/>\n"); } else { gf_fprintf(sdump->trace, "DELETE ROUTE "); scene_dump_vrml_route_id(sdump, com->RouteID, com->def_name); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpNodeReplace(GF_SceneDumper *sdump, GF_Command *com) { GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\">"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, "</Replace>\n"); } else { gf_fprintf(sdump->trace, "REPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, " BY "); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, "\n"); } return GF_OK; } static GF_Err DumpFieldReplace(GF_SceneDumper *sdump, GF_Command *com) { GF_Err e; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); e = gf_node_get_field(com->node, inf->fieldIndex, &field); DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" ", field.name); } else { gf_fprintf(sdump->trace, "REPLACE "); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s BY ", field.name); } switch (field.fieldType) { case GF_SG_VRML_SFNODE: if (sdump->XMLDump) gf_fprintf(sdump->trace, ">"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); if (sdump->XMLDump) gf_fprintf(sdump->trace, "</Replace>"); else gf_fprintf(sdump->trace, "\n"); break; case GF_SG_VRML_MFNODE: { GF_ChildNodeItem *tmp; if (sdump->XMLDump) { gf_fprintf(sdump->trace, ">"); } else { gf_fprintf(sdump->trace, " [\n"); } sdump->indent++; tmp = inf->node_list; while (tmp) { gf_dump_vrml_node(sdump, tmp->node, 1, NULL); tmp = tmp->next; } sdump->indent--; if (sdump->XMLDump) { gf_fprintf(sdump->trace, "</Replace>"); } else { EndList(sdump, NULL); } } break; case GF_SG_VRML_SFCOMMANDBUFFER: if (sdump->XMLDump) { SFCommandBuffer *cb = (SFCommandBuffer*)inf->field_ptr; gf_fprintf(sdump->trace, ">\n"); gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent+1, 0); DUMP_IND(sdump); gf_fprintf(sdump->trace, "</Replace>\n"); } else { SFCommandBuffer *cb = (SFCommandBuffer*)inf->field_ptr; gf_fprintf(sdump->trace, " {\n"); gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent+1, 0); DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } break; default: field.far_ptr = inf->field_ptr; gf_dump_vrml_simple_field(sdump, field, com->node); if (sdump->XMLDump) gf_fprintf(sdump->trace, "/>"); gf_fprintf(sdump->trace, "\n"); } return e; } static GF_Err DumpIndexReplace(GF_SceneDumper *sdump, GF_Command *com) { char posname[20]; GF_Err e; GF_FieldInfo field; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); e = gf_node_get_field(com->node, inf->fieldIndex, &field); if (e) return e; if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM; switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, sdump->XMLDump ? "END" : "LAST"); break; default: sprintf(posname, "%d", inf->pos); break; } DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\" position=\"%s\"", field.name, posname); } else { gf_fprintf(sdump->trace, "REPLACE "); if (inf->pos==-1) gf_fprintf(sdump->trace, "%s ", posname); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); gf_fprintf(sdump->trace, " BY "); } if (field.fieldType == GF_SG_VRML_MFNODE) { if (sdump->XMLDump) gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, (sdump->XMLDump) ? "</Replace>\n" : "\n"); } else { field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); field.far_ptr = inf->field_ptr; gf_dump_vrml_simple_field(sdump, field, com->node); gf_fprintf(sdump->trace, sdump->XMLDump ? "/>\n" : "\n"); } return GF_OK; } static GF_Err DumpXReplace(GF_SceneDumper *sdump, GF_Command *com) { char posname[20]; GF_Err e; GF_FieldInfo field, idxField; GF_Node *toNode, *target; GF_CommandField *inf; if (!gf_list_count(com->command_fields)) return GF_OK; inf = (GF_CommandField *) gf_list_get(com->command_fields, 0); e = gf_node_get_field(com->node, inf->fieldIndex, &field); if (e) return e; toNode = target = NULL; /*indexed replacement with index given by other node field*/ if (com->toNodeID) { toNode = gf_sg_find_node(com->in_scene, com->toNodeID); if (!toNode) return GF_NON_COMPLIANT_BITSTREAM; e = gf_node_get_field(toNode, com->toFieldIndex, &idxField); if (e) return e; } else { /*indexed replacement */ if (inf->pos>=-1) { if (gf_sg_vrml_is_sf_field(field.fieldType)) return GF_NON_COMPLIANT_BITSTREAM; switch (inf->pos) { case 0: strcpy(posname, "BEGIN"); break; case -1: strcpy(posname, sdump->XMLDump ? "END" : "LAST"); break; default: sprintf(posname, "%d", inf->pos); break; } field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); } } field.far_ptr = inf->field_ptr; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atNode=\""); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, "\" atField=\"%s\"", field.name); if (toNode) { gf_fprintf(sdump->trace, " atIndexNode=\""); scene_dump_vrml_id(sdump, toNode); gf_fprintf(sdump->trace, "\" atIndexField=\"%s\"", idxField.name); field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); } if (com->ChildNodeTag) { GF_FieldInfo cfield; GF_Node *cnode; if (com->ChildNodeTag>0) { cnode = gf_node_new(com->in_scene, com->ChildNodeTag); } else { GF_Proto *proto = gf_sg_find_proto(com->in_scene, -com->ChildNodeTag , NULL); if (!proto) return GF_SG_UNKNOWN_NODE; cnode = gf_sg_proto_create_instance(com->in_scene, proto); } if (!cnode) return GF_SG_UNKNOWN_NODE; gf_node_register(cnode, NULL); gf_node_get_field(cnode, com->child_field, &cfield); gf_fprintf(sdump->trace, " atChildField=\"%s\"", cfield.name); gf_node_unregister(cnode, NULL); field.fieldType = cfield.fieldType; } if (com->fromNodeID) { target = gf_sg_find_node(com->in_scene, com->fromNodeID); if (!target) return GF_NON_COMPLIANT_BITSTREAM; e = gf_node_get_field(target, com->fromFieldIndex, &idxField); if (e) return e; gf_fprintf(sdump->trace, " fromNode=\""); scene_dump_vrml_id(sdump, target); gf_fprintf(sdump->trace, "\" fromField=\"%s\">\n", idxField.name); return GF_OK; } else { if (inf->pos>=-1) gf_fprintf(sdump->trace, " position=\"%s\"", posname); } } else { gf_fprintf(sdump->trace, "XREPLACE "); if (inf->pos==-1) gf_fprintf(sdump->trace, "%s ", posname); scene_dump_vrml_id(sdump, com->node); gf_fprintf(sdump->trace, ".%s", field.name); if (toNode) { gf_fprintf(sdump->trace, "["); scene_dump_vrml_id(sdump, toNode); gf_fprintf(sdump->trace, ".%s]", idxField.name); field.fieldType = gf_sg_vrml_get_sf_type(field.fieldType); } else if (inf->pos!=-1) gf_fprintf(sdump->trace, "[%d]", inf->pos); if (com->ChildNodeTag) { GF_FieldInfo cfield; GF_Node *cnode; if (com->ChildNodeTag>0) { cnode = gf_node_new(com->in_scene, com->ChildNodeTag); } else { GF_Proto *proto = gf_sg_find_proto(com->in_scene, -com->ChildNodeTag , NULL); if (!proto) return GF_SG_UNKNOWN_NODE; cnode = gf_sg_proto_create_instance(com->in_scene, proto); } if (!cnode) return GF_SG_UNKNOWN_NODE; gf_node_register(cnode, NULL); gf_node_get_field(cnode, com->child_field, &cfield); gf_fprintf(sdump->trace, ".%s", cfield.name); gf_node_unregister(cnode, NULL); field.fieldType = cfield.fieldType; } gf_fprintf(sdump->trace, " BY "); } if (field.fieldType == GF_SG_VRML_MFNODE) { if (sdump->XMLDump) gf_fprintf(sdump->trace, ">\n"); gf_dump_vrml_node(sdump, inf->new_node, 0, NULL); gf_fprintf(sdump->trace, (sdump->XMLDump) ? "</Replace>\n" : "\n"); } else { gf_dump_vrml_simple_field(sdump, field, com->node); gf_fprintf(sdump->trace, sdump->XMLDump ? "/>\n" : "\n"); } return GF_OK; } static GF_Err DumpRouteReplace(GF_SceneDumper *sdump, GF_Command *com) { const char *name; GF_Route r2; if (!scene_dump_vrml_find_route_name(sdump, com->RouteID, &name)) return GF_BAD_PARAM; memset(&r2, 0, sizeof(GF_Route)); r2.FromNode = gf_dump_find_node(sdump, com->fromNodeID); r2.FromField.fieldIndex = com->fromFieldIndex; r2.ToNode = gf_dump_find_node(sdump, com->toNodeID); r2.ToField.fieldIndex = com->toFieldIndex; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Replace atRoute=\""); scene_dump_vrml_route_id(sdump, com->RouteID, (char *) name); gf_fprintf(sdump->trace, "\">\n"); } else { gf_fprintf(sdump->trace, "REPLACE ROUTE "); scene_dump_vrml_route_id(sdump, com->RouteID, (char *) name); gf_fprintf(sdump->trace, " BY "); } gf_dump_vrml_route(sdump, &r2, 1); if (sdump->XMLDump ) gf_fprintf(sdump->trace, "</Replace>"); return GF_OK; } static GF_Err gf_dump_vrml_route(GF_SceneDumper *sdump, GF_Route *r, u32 dump_type) { char toNodeBuf[100], fromNodeBuf[100], *to_node_p, *from_node_p; const char *node_name; u32 id; if (!r->is_setup) { gf_node_get_field(r->FromNode, r->FromField.fieldIndex, &r->FromField); gf_node_get_field(r->ToNode, r->ToField.fieldIndex, &r->ToField); r->is_setup = 1; } if (!r->FromNode || !r->ToNode) return GF_BAD_PARAM; if (sdump->XMLDump || !dump_type) DUMP_IND(sdump); to_node_p = toNodeBuf; from_node_p = fromNodeBuf; node_name = gf_node_get_name_and_id(r->FromNode, &id); if (node_name) { const char *to_name; from_node_p = (char *)node_name; to_name = gf_node_get_name(r->ToNode); if (to_name) { to_node_p = (char *) to_name; } else { id = gf_node_get_id(r->ToNode); sprintf(toNodeBuf, "node_%d", id); } } else { sprintf(fromNodeBuf, "N%d", id-1); sprintf(toNodeBuf, "N%d", gf_node_get_id(r->ToNode) - 1); } if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<ROUTE"); if (r->ID) { StartAttribute(sdump, "DEF"); scene_dump_vrml_route_id(sdump, r->ID, r->name); EndAttribute(sdump); } gf_fprintf(sdump->trace, " fromNode=\"%s\" fromField=\"%s\" toNode=\"%s\" toField=\"%s\"/>\n", from_node_p, r->FromField.name, to_node_p, r->ToField.name); } else { if (dump_type==2) gf_fprintf(sdump->trace, "ROUTE "); if (r->ID) { gf_fprintf(sdump->trace, "DEF "); scene_dump_vrml_route_id(sdump, r->ID, r->name); gf_fprintf(sdump->trace, " "); } if (dump_type==1) { gf_fprintf(sdump->trace, "%s.%s TO %s.%s\n", from_node_p, r->FromField.name, to_node_p, r->ToField.name); } else { if (dump_type!=2) gf_fprintf(sdump->trace, "ROUTE "); gf_fprintf(sdump->trace, "%s.%s TO %s.%s\n", from_node_p, r->FromField.name, to_node_p, r->ToField.name); } } return GF_OK; } static GF_Err DumpProtos(GF_SceneDumper *sdump, GF_List *protoList) { #ifdef GPAC_DISABLE_VRML return GF_OK; #else u32 i, j, count; GF_FieldInfo field; GF_Err e; GF_SceneGraph *prev_sg; GF_Proto *proto, *prev_proto; prev_proto = sdump->current_proto; i=0; while ((proto = (GF_Proto*)gf_list_enum(protoList, &i))) { sdump->current_proto = proto; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, proto->ExternProto.count ? "EXTERNPROTO " : "PROTO "); gf_fprintf(sdump->trace, "%s [\n", proto->Name); } else { gf_fprintf(sdump->trace, "<ProtoDeclare name=\"%s\" protoID=\"%d\"", proto->Name, proto->ID); if (proto->ExternProto.count) { gf_fprintf(sdump->trace, " locations=\""); gf_dump_vrml_sffield(sdump, GF_SG_VRML_SFURL, &proto->ExternProto.vals[0], 0, NULL); gf_fprintf(sdump->trace, "\""); } gf_fprintf(sdump->trace, ">\n"); } if (sdump->XMLDump && sdump->X3DDump) gf_fprintf(sdump->trace, "<ProtoInterface>"); sdump->indent++; count = gf_list_count(proto->proto_fields); for (j=0; j<count; j++) { GF_ProtoFieldInterface *pf = (GF_ProtoFieldInterface *)gf_list_get(proto->proto_fields, j); field.fieldIndex = pf->ALL_index; field.eventType = pf->EventType; field.far_ptr = pf->def_value; field.fieldType = pf->FieldType; field.name = pf->FieldName; field.NDTtype = NDT_SFWorldNode; field.on_event_in = NULL; gf_dump_vrml_dyn_field(sdump, NULL, field, pf->QP_Type ? 1 : 0); if (!pf->QP_Type) continue; /*dump interface coding - BT/TXT extensions, not supported by any other tool*/ sdump->indent++; DUMP_IND(sdump); if (sdump->XMLDump) { const char *quant_catname = "unknown"; #ifndef GPAC_DISABLE_BIFS switch (pf->QP_Type) { case QC_3DPOS: quant_catname = "position3D"; break; case QC_2DPOS: quant_catname = "position2D"; break; case QC_ORDER: quant_catname = "drawingOrder"; break; case QC_COLOR: quant_catname = "color"; break; case QC_TEXTURE_COORD: quant_catname = "textureCoordinate"; break; case QC_ANGLE: quant_catname = "angle"; break; case QC_SCALE: quant_catname = "scale"; break; case QC_INTERPOL_KEYS: quant_catname = "keys"; break; case QC_NORMALS: quant_catname = "normals"; break; case QC_ROTATION: quant_catname = "rotations"; break; case QC_SIZE_3D: quant_catname = "size3D"; break; case QC_SIZE_2D: quant_catname = "size2D"; break; case QC_LINEAR_SCALAR: quant_catname = "linear"; break; case QC_COORD_INDEX:quant_catname = "coordIndex"; break; } #endif gf_fprintf(sdump->trace, "<InterfaceCodingParameters quantCategoy=\"%s\"", quant_catname); } else { gf_fprintf(sdump->trace, "{QP %d", pf->QP_Type); } #ifndef GPAC_DISABLE_BIFS if (pf->QP_Type==QC_LINEAR_SCALAR) gf_fprintf(sdump->trace, sdump->XMLDump ? " nbBits=\"%d\"" : " nbBits %d", pf->NumBits); if (pf->hasMinMax) { switch (pf->QP_Type) { case QC_LINEAR_SCALAR: case QC_COORD_INDEX: if (sdump->XMLDump) { gf_fprintf(sdump->trace, " intMin=\"%d\" intMax=\"%d\"", *((SFInt32 *)pf->qp_min_value), *((SFInt32 *)pf->qp_max_value)); } else { gf_fprintf(sdump->trace, " b {%d %d}", *((SFInt32 *)pf->qp_min_value), *((SFInt32 *)pf->qp_max_value)); } break; default: if (sdump->XMLDump) { gf_fprintf(sdump->trace, " floatMin=\"%g\" floatMax=\"%g\"", FIX2FLT( *((SFFloat *)pf->qp_min_value) ), FIX2FLT( *((SFFloat *)pf->qp_max_value) )); } else { gf_fprintf(sdump->trace, " b {%g %g}", FIX2FLT( *((SFFloat *)pf->qp_min_value) ), FIX2FLT( *((SFFloat *)pf->qp_max_value) ) ); } break; } } #endif gf_fprintf(sdump->trace, sdump->XMLDump ? "/>\n" : "}\n"); sdump->indent--; if (sdump->XMLDump) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "</field>\n"); } } sdump->indent--; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "]"); } else if (sdump->X3DDump) gf_fprintf(sdump->trace, "</ProtoInterface>\n"); if (proto->ExternProto.count) { if (!sdump->XMLDump) { gf_fprintf(sdump->trace, " \""); gf_dump_vrml_sffield(sdump, GF_SG_VRML_SFURL, &proto->ExternProto.vals[0], 0, NULL); gf_fprintf(sdump->trace, "\"\n\n"); } else { gf_fprintf(sdump->trace, "</ProtoDeclare>\n"); } continue; } if (!sdump->XMLDump) gf_fprintf(sdump->trace, " {\n"); sdump->indent++; if (sdump->XMLDump && sdump->X3DDump) gf_fprintf(sdump->trace, "<ProtoBody>\n"); e = DumpProtos(sdump, proto->sub_graph->protos); if (e) return e; /*set namespace to the proto one*/ prev_sg = sdump->sg; sdump->sg = gf_sg_proto_get_graph(proto); count = gf_list_count(proto->node_code); for (j=0; j<count; j++) { GF_Node *n = (GF_Node*)gf_list_get(proto->node_code, j); gf_dump_vrml_node(sdump, n, 1, NULL); } count = gf_list_count(proto->sub_graph->Routes); for (j=0; j<count; j++) { GF_Route *r = (GF_Route *)gf_list_get(proto->sub_graph->Routes, j); if (r->IS_route) continue; gf_dump_vrml_route(sdump, r, 0); } if (sdump->XMLDump && sdump->X3DDump) gf_fprintf(sdump->trace, "</ProtoBody>\n"); /*restore namespace*/ sdump->sg = prev_sg; sdump->indent--; DUMP_IND(sdump); if (!sdump->XMLDump) { gf_fprintf(sdump->trace, "}\n"); } else { gf_fprintf(sdump->trace, "</ProtoDeclare>\n"); } } sdump->current_proto = prev_proto; return GF_OK; #endif } static GF_Err DumpSceneReplace(GF_SceneDumper *sdump, GF_Command *com) { if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, "Replace"); EndElementHeader(sdump, 1); sdump->indent++; } //scene tag is already dumped with X3D header if (!sdump->X3DDump) StartElement(sdump, "Scene"); if (!sdump->X3DDump && com->use_names) { StartAttribute(sdump, "USENAMES"); gf_fprintf(sdump->trace, "%s", com->use_names ? "true" : "false"); EndAttribute(sdump); } if (!sdump->X3DDump) EndElementHeader(sdump, 1); sdump->indent++; } else { if (!sdump->skip_scene_replace) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "REPLACE SCENE BY "); } } DumpProtos(sdump, com->new_proto_list); gf_dump_vrml_node(sdump, com->node, 0, NULL); if (!sdump->XMLDump) gf_fprintf(sdump->trace, "\n\n"); if (com->aggregated) { u32 i, count; count = gf_list_count(com->node->sgprivate->scenegraph->Routes); for (i=0; i<count; i++) { GF_Route *r = (GF_Route *)gf_list_get(com->node->sgprivate->scenegraph->Routes, i); if (r->IS_route) continue; gf_dump_vrml_route(sdump, r, 0); } } return GF_OK; } static GF_Err DumpProtoInsert(GF_SceneDumper *sdump, GF_Command *com) { DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Insert extended=\"proto\">\n"); } else { gf_fprintf(sdump->trace, "INSERTPROTO [\n"); } sdump->indent++; DumpProtos(sdump, com->new_proto_list); sdump->indent--; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "</Insert>\n"); } else { gf_fprintf(sdump->trace, "]\n"); } return GF_OK; } #endif /*GPAC_DISABLE_VRML*/ #ifndef GPAC_DISABLE_SVG static char *lsr_format_node_id(GF_Node *n, u32 NodeID, char *str) { if (!n) sprintf(str, "N%d", NodeID-1); else { const char *name = gf_node_get_name_and_id(n, &NodeID); if (name) sprintf(str, "%s", name); else sprintf(str, "N%d", NodeID - 1); } return str; } static char szLSRName[1024]; static char *sd_get_lsr_namespace(GF_SceneGraph *sg) { char *lsrns = (char *) gf_sg_get_namespace_qname(sg, GF_XMLNS_LASER); if (lsrns) { sprintf(szLSRName, "%s:", lsrns); return szLSRName; } return ""; } static GF_Err DumpLSRNewScene(GF_SceneDumper *sdump, GF_Command *com) { char *lsrns = sd_get_lsr_namespace(com->in_scene); gf_fprintf(sdump->trace, "<%sNewScene>\n", lsrns); gf_dump_svg_element(sdump, com->node, NULL, 0); gf_fprintf(sdump->trace, "</%sNewScene>\n", lsrns); return GF_OK; } static GF_Err DumpLSRAddReplaceInsert(GF_SceneDumper *sdump, GF_Command *com) { char szID[100]; Bool is_text = 0; GF_CommandField *f; char *lsrns = sd_get_lsr_namespace(com->in_scene); const char *com_name = (com->tag==GF_SG_LSR_REPLACE) ? "Replace" : ( (com->tag==GF_SG_LSR_ADD) ? "Add" : "Insert" ); DUMP_IND(sdump); gf_fprintf(sdump->trace, "<%s%s ref=\"%s\" ", lsrns, com_name, lsr_format_node_id(com->node, com->RouteID, szID)); f = (GF_CommandField *) gf_list_get(com->command_fields, 0); if (f && (f->pos>=0) ) gf_fprintf(sdump->trace, "index=\"%d\" ", f->pos); if (f) { GF_FieldInfo info; if (!f->new_node && !f->node_list) { char *att_name = NULL; if (f->fieldType==SVG_Transform_Scale_datatype) att_name = "scale"; else if (f->fieldType==SVG_Transform_Rotate_datatype) att_name = "rotation"; else if (f->fieldType==SVG_Transform_Translate_datatype) att_name = "translation"; else if (f->fieldIndex==(u32) -1) att_name = "textContent"; else { if (!com->node) return GF_NON_COMPLIANT_BITSTREAM; att_name = (char*) gf_svg_get_attribute_name(com->node, f->fieldIndex); } gf_fprintf(sdump->trace, "attributeName=\"%s\" ", att_name); if (f->field_ptr) { char *att; info.far_ptr = f->field_ptr; info.fieldIndex = f->fieldIndex; info.fieldType = f->fieldType; info.name = att_name; if ((s32) f->pos >= 0) { att = gf_svg_dump_attribute_indexed(com->node, &info); } else { att = gf_svg_dump_attribute(com->node, &info); } gf_fprintf(sdump->trace, "value=\"%s\" ", att ? att : ""); if (att) gf_free(att); } if (com->fromNodeID) { GF_FieldInfo op_info; GF_Node *op = gf_sg_find_node(sdump->sg, com->fromNodeID); gf_fprintf(sdump->trace, "operandElementId=\"%s\" ", lsr_format_node_id(op, com->RouteID, szID)); gf_node_get_field(op, com->fromFieldIndex, &op_info); gf_fprintf(sdump->trace, "operandAttributeName=\"%s\" ", op_info.name); } gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } if (f->new_node && f->new_node->sgprivate->tag==TAG_DOMText) is_text = 1; /*if fieldIndex (eg attributeName) is set, this is children replacement*/ if (f->fieldIndex>0) gf_fprintf(sdump->trace, "attributeName=\"children\" "); } gf_fprintf(sdump->trace, ">"); if (!is_text) { gf_fprintf(sdump->trace, "\n"); sdump->indent++; } if (f) { if (f->new_node) { gf_dump_svg_element(sdump, f->new_node, com->node, 0); } else if (f->node_list) { GF_ChildNodeItem *list = f->node_list; while (list) { gf_dump_svg_element(sdump, list->node, com->node, 0); list = list->next; } } } if (!is_text) { sdump->indent--; DUMP_IND(sdump); } gf_fprintf(sdump->trace, "</%s%s>\n", lsrns, com_name); return GF_OK; } static GF_Err DumpLSRDelete(GF_SceneDumper *sdump, GF_Command *com) { char szID[1024]; GF_CommandField *f; char *lsrns = sd_get_lsr_namespace(com->in_scene); DUMP_IND(sdump); gf_fprintf(sdump->trace, "<%sDelete ref=\"%s\" ", lsrns, lsr_format_node_id(com->node, com->RouteID, szID)); f = (GF_CommandField *) gf_list_get(com->command_fields, 0); if (f && (f->pos>=0) ) gf_fprintf(sdump->trace, "index=\"%d\" ", f->pos); gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } #ifdef GPAC_UNUSED_FUNC static GF_Err DumpLSRInsert(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } static GF_Err SD_SetSceneGraph(GF_SceneDumper *sdump, GF_SceneGraph *sg) { if (sdump) sdump->sg = sg; return GF_OK; } static GF_Err DumpLSRClean(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } static GF_Err DumpLSRRestore(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } static GF_Err DumpLSRSave(GF_SceneDumper *sdump, GF_Command *com) { return GF_OK; } #endif /*GPAC_UNUSED_FUNC*/ static GF_Err DumpLSRSendEvent(GF_SceneDumper *sdump, GF_Command *com) { char szID[1024]; char *lsrns = sd_get_lsr_namespace(com->in_scene); DUMP_IND(sdump); gf_fprintf(sdump->trace, "<%sSendEvent ref=\"%s\" event=\"%s\"", lsrns, lsr_format_node_id(com->node, com->RouteID, szID), gf_dom_event_get_name(com->send_event_name) ); if (com->send_event_name <= GF_EVENT_MOUSEWHEEL) gf_fprintf(sdump->trace, " pointvalue=\"%g %g\"", FIX2FLT(com->send_event_x), FIX2FLT(com->send_event_y) ); switch (com->send_event_name) { case GF_EVENT_KEYDOWN: case GF_EVENT_LONGKEYPRESS: case GF_EVENT_REPEAT_KEY: case GF_EVENT_SHORT_ACCESSKEY: if (com->send_event_integer) { gf_fprintf(sdump->trace, " stringvalue=\"%s\"", gf_dom_get_key_name(com->send_event_integer) ); break; } default: if (com->send_event_integer) gf_fprintf(sdump->trace, " intvalue=\"%d\"", com->send_event_integer); if (com->send_event_string) gf_fprintf(sdump->trace, " stringvalue=\"%s\"", com->send_event_string); break; } gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } static GF_Err DumpLSRActivate(GF_SceneDumper *sdump, GF_Command *com) { char szID[1024]; char *lsrns = sd_get_lsr_namespace(com->in_scene); DUMP_IND(sdump); if (com->tag==GF_SG_LSR_ACTIVATE) { gf_fprintf(sdump->trace, "<%sActivate ref=\"%s\" />\n", lsrns, lsr_format_node_id(com->node, com->RouteID, szID)); } else { gf_fprintf(sdump->trace, "<%sDeactivate ref=\"%s\" />\n", lsrns, lsr_format_node_id(com->node, com->RouteID, szID)); } return GF_OK; } #endif GF_EXPORT GF_Err gf_sm_dump_command_list(GF_SceneDumper *sdump, GF_List *comList, u32 indent, Bool skip_first_replace) { GF_Err e; u32 i, count; u32 prev_ind; #ifndef GPAC_DISABLE_VRML u32 remain = 0, has_scene_replace = 0; #endif Bool prev_skip; if (!sdump || !sdump->trace|| !comList || !sdump->sg) return GF_BAD_PARAM; prev_skip = sdump->skip_scene_replace; sdump->skip_scene_replace = skip_first_replace; prev_ind = sdump->indent; sdump->indent = indent; e = GF_OK; count = gf_list_count(comList); for (i=0; i<count; i++) { GF_Command *com = (GF_Command *) gf_list_get(comList, i); if (i #ifndef GPAC_DISABLE_VRML && !remain #endif && (sdump->X3DDump || (sdump->dump_mode==GF_SM_DUMP_VRML)) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[Scene Dump] MPEG-4 Commands found, not supported in %s - skipping\n", sdump->X3DDump ? "X3D" : "VRML")); break; } #ifndef GPAC_DISABLE_VRML if (has_scene_replace && (com->tag != GF_SG_ROUTE_INSERT)) { has_scene_replace = 0; if (sdump->XMLDump) { sdump->indent--; EndElement(sdump, "Scene", 1); sdump->indent--; EndElement(sdump, "Replace", 1); } else { DUMP_IND(sdump); gf_fprintf(sdump->trace, "\nAT 0 {\n"); sdump->indent++; } } #endif switch (com->tag) { #ifndef GPAC_DISABLE_VRML /*insert commands*/ case GF_SG_NODE_INSERT: e = DumpNodeInsert(sdump, com); break; case GF_SG_INDEXED_INSERT: e = DumpIndexInsert(sdump, com); break; case GF_SG_ROUTE_INSERT: e = DumpRouteInsert(sdump, com, has_scene_replace); if (remain) remain--; break; /*delete commands*/ case GF_SG_NODE_DELETE: e = DumpNodeDelete(sdump, com); break; case GF_SG_INDEXED_DELETE: e = DumpIndexDelete(sdump, com); break; case GF_SG_ROUTE_DELETE: e = DumpRouteDelete(sdump, com); break; /*replace commands*/ case GF_SG_NODE_REPLACE: e = DumpNodeReplace(sdump, com); break; case GF_SG_FIELD_REPLACE: e = DumpFieldReplace(sdump, com); break; case GF_SG_INDEXED_REPLACE: e = DumpIndexReplace(sdump, com); break; case GF_SG_ROUTE_REPLACE: e = DumpRouteReplace(sdump, com); break; case GF_SG_XREPLACE: e = DumpXReplace(sdump, com); break; case GF_SG_SCENE_REPLACE: /*we don't support replace scene in conditional*/ assert(!sdump->current_com_list); sdump->current_com_list = comList; e = DumpSceneReplace(sdump, com); sdump->current_com_list = NULL; has_scene_replace = 1; remain = count - i - 1; break; /*extended commands*/ case GF_SG_PROTO_INSERT: e = DumpProtoInsert(sdump, com); break; case GF_SG_PROTO_DELETE_ALL: DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete extended=\"allProtos\"/>\n"); } else { gf_fprintf(sdump->trace, "DELETEPROTO ALL\n"); } e = GF_OK; break; case GF_SG_PROTO_DELETE: { u32 j; DUMP_IND(sdump); if (sdump->XMLDump) { gf_fprintf(sdump->trace, "<Delete extended=\"protos\" value=\""); } else { gf_fprintf(sdump->trace, "DELETEPROTO ["); } for (j=0; j<com->del_proto_list_size; j++) { if (j) gf_fprintf(sdump->trace, " "); gf_fprintf(sdump->trace, "%d", com->del_proto_list[j]); } if (sdump->XMLDump) { gf_fprintf(sdump->trace, "\"/>\n"); } else { gf_fprintf(sdump->trace, "]\n"); } e = GF_OK; } break; case GF_SG_GLOBAL_QUANTIZER: e = DumpGlobalQP(sdump, com); break; case GF_SG_MULTIPLE_REPLACE: e = DumpMultipleReplace(sdump, com); break; case GF_SG_MULTIPLE_INDEXED_REPLACE: e = DumpMultipleIndexedReplace(sdump, com); break; case GF_SG_NODE_DELETE_EX: e = DumpNodeDelete(sdump, com); break; #endif #ifndef GPAC_DISABLE_SVG /*laser commands*/ case GF_SG_LSR_NEW_SCENE: e = DumpLSRNewScene(sdump, com); break; case GF_SG_LSR_ADD: e = DumpLSRAddReplaceInsert(sdump, com); break; case GF_SG_LSR_CLEAN: //e = DumpLSRClean(sdump, com); break; case GF_SG_LSR_REPLACE: e = DumpLSRAddReplaceInsert(sdump, com); break; case GF_SG_LSR_DELETE: e = DumpLSRDelete(sdump, com); break; case GF_SG_LSR_INSERT: e = DumpLSRAddReplaceInsert(sdump, com); break; case GF_SG_LSR_RESTORE: //e = DumpLSRRestore(sdump, com); break; case GF_SG_LSR_SAVE: //e = DumpLSRSave(sdump, com); break; case GF_SG_LSR_SEND_EVENT: e = DumpLSRSendEvent(sdump, com); break; case GF_SG_LSR_ACTIVATE: case GF_SG_LSR_DEACTIVATE: e = DumpLSRActivate(sdump, com); break; #endif } if (e) break; if (sdump->skip_scene_replace #ifndef GPAC_DISABLE_VRML && !has_scene_replace #endif ) { sdump->skip_scene_replace = 0; if (!sdump->XMLDump && (i+1<count)) { DUMP_IND(sdump); gf_fprintf(sdump->trace, "\nAT 0 {\n"); sdump->indent++; } } } #ifndef GPAC_DISABLE_VRML if (remain && !sdump->XMLDump) { sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "}\n"); } #endif if (has_scene_replace && sdump->XMLDump) { sdump->indent--; if (!sdump->X3DDump) { EndElement(sdump, "Scene", 1); sdump->indent--; EndElement(sdump, "Replace", 1); } } sdump->indent = prev_ind; sdump->skip_scene_replace = prev_skip; return e; } #ifndef GPAC_DISABLE_SVG void gf_dump_svg_element(GF_SceneDumper *sdump, GF_Node *n, GF_Node *parent, Bool is_root) { GF_ChildNodeItem *list; char attName[100], *attValue; u32 nID; SVG_Element *svg = (SVG_Element *)n; GF_FieldInfo info; SVGAttribute *att; u32 tag, ns; if (!n) return; nID = gf_node_get_id(n); tag = n->sgprivate->tag; /*remove undef listener/handlers*/ if (!nID) { switch (tag) { case TAG_SVG_listener: if ((0) && gf_node_get_attribute_by_tag(n, TAG_XMLEV_ATT_handler, 0, 0, &info)==GF_OK) { if (((XMLRI*)info.far_ptr)->target && !gf_node_get_id(((XMLRI*)info.far_ptr)->target) ) return; } break; case TAG_SVG_handler: /*this handler was not declared in the graph*/ if (!n->sgprivate->parents || (n->sgprivate->parents->node != parent)) return; break; case TAG_DOMText: { GF_DOMText *txt = (GF_DOMText *)n; if (txt->textContent) { if ((txt->type==GF_DOM_TEXT_CDATA) || (parent && (parent->sgprivate->tag == TAG_SVG_script)) || (parent && (parent->sgprivate->tag == TAG_SVG_handler)) ) { gf_fprintf(sdump->trace, "<![CDATA["); gf_fprintf(sdump->trace, "%s", txt->textContent); gf_fprintf(sdump->trace, "]]>"); } else if (txt->type==GF_DOM_TEXT_REGULAR) { scene_dump_utf_string(sdump, 0, txt->textContent); } } } return; } } if (!sdump->in_text) { DUMP_IND(sdump); } /*register all namespaces specified on this element */ gf_xml_push_namespaces((GF_DOMNode *)n); gf_fprintf(sdump->trace, "<%s", gf_node_get_class_name(n)); ns = gf_xml_get_element_namespace(n); if (nID) { char attID[100]; gf_fprintf(sdump->trace, " id=\"%s\"", lsr_format_node_id(n, 0, attID)); } att = svg->attributes; while (att) { if (att->data_type==SVG_ID_datatype) { att = att->next; continue; } info.fieldIndex = att->tag; info.fieldType = att->data_type; if (att->tag==TAG_DOM_ATT_any) { u32 att_ns = ((GF_DOMFullAttribute*)att)->xmlns; info.name = ((GF_DOMFullAttribute*)att)->name; if ((att_ns != ns) && strncmp(info.name, "xmlns", 5)) { sprintf(attName, "%s:%s", gf_sg_get_namespace_qname(gf_node_get_graph(n), att_ns), ((GF_DOMFullAttribute*)att)->name); info.name = attName; } } else { info.name = gf_svg_get_attribute_name(n, att->tag); } if (att->data_type==XMLRI_datatype) { XMLRI *xlink = (XMLRI *)att->data; if (xlink->type==XMLRI_ELEMENTID) { if (!xlink->target || !gf_node_get_id((GF_Node*)xlink->target) ) { att = att->next; continue; } if (parent && (parent == (GF_Node *) xlink->target)) { att = att->next; continue; } } else if (xlink->type==XMLRI_STREAMID) { gf_fprintf(sdump->trace, " %s=\"#stream%d\"", info.name, xlink->lsr_stream_id); att = att->next; continue; } else { gf_fprintf(sdump->trace, " %s=\"%s\"", info.name, xlink->string); att = att->next; continue; } } info.far_ptr = att->data; attValue = gf_svg_dump_attribute((GF_Node*)svg, &info); if (attValue) { if (/*strcmp(info.name, "xmlns") &&*/ (info.fieldType = (u32) strlen(attValue))) gf_fprintf(sdump->trace, " %s=\"%s\"", info.name, attValue); gf_free(attValue); } att = att->next; } gf_dom_event_dump_listeners(n, sdump->trace); if (svg->children) { gf_fprintf(sdump->trace, ">"); } else { gf_fprintf(sdump->trace, "/>"); return; } if (n->sgprivate->tag==TAG_LSR_conditional) { GF_DOMUpdates *up = svg->children ? (GF_DOMUpdates *)svg->children->node : NULL; sdump->indent++; if (up && (up->sgprivate->tag==TAG_DOMUpdates)) { if (gf_list_count(up->updates)) { gf_fprintf(sdump->trace, "\n"); gf_sm_dump_command_list(sdump, up->updates, sdump->indent, 0); } else if (up->data) { gf_fprintf(sdump->trace, "<!-- WARNING: LASeR scripts cannot be dumped at run-time -->\n"); } } sdump->indent--; DUMP_IND(sdump); gf_fprintf(sdump->trace, "</%s>\n", gf_node_get_class_name(n)); return; } if (tag==TAG_SVG_text || tag==TAG_SVG_textArea) sdump->in_text = 1; sdump->indent++; list = svg->children; while (list) { if (!sdump->in_text) gf_fprintf(sdump->trace, "\n"); gf_dump_svg_element(sdump, list->node, n, 0); list = list->next; } if (!sdump->in_text) gf_fprintf(sdump->trace, "\n"); sdump->indent--; if (!sdump->in_text) DUMP_IND(sdump); gf_fprintf(sdump->trace, "</%s>", gf_node_get_class_name(n)); if (tag==TAG_SVG_text || tag==TAG_SVG_textArea) sdump->in_text = 0; /*removes all namespaces specified on this element */ gf_xml_pop_namespaces((GF_DOMNode *)n); } #endif static void gf_sm_dump_saf_hdr(GF_SceneDumper *dumper, char *unit_name, u64 au_time, Bool is_rap) { gf_fprintf(dumper->trace, "<saf:%s", unit_name); if (au_time) gf_fprintf(dumper->trace, " time=\""LLD"\"", au_time); if (is_rap) gf_fprintf(dumper->trace, " rap=\"true\""); gf_fprintf(dumper->trace, ">\n"); } static void dump_od_to_saf(GF_SceneDumper *dumper, GF_AUContext *au, u32 indent) { u32 i, count; count = gf_list_count(au->commands); for (i=0; i<count; i++) { u32 j, c2; GF_ODUpdate *com = (GF_ODUpdate *)gf_list_get(au->commands, i); if (com->tag != GF_ODF_OD_UPDATE_TAG) continue; c2 = gf_list_count(com->objectDescriptors); for (j=0; j<c2; j++) { GF_ObjectDescriptor *od = (GF_ObjectDescriptor *)gf_list_get(com->objectDescriptors, j); GF_ESD *esd = (GF_ESD *) gf_list_get(od->ESDescriptors, 0); GF_MuxInfo *mux; if (!esd || (esd->tag != GF_ODF_ESD_TAG)) { if (od->URLString) { gf_fprintf(dumper->trace, "<saf:RemoteStreamHeader streamID=\"stream%d\" url=\"%s\"", au->owner->ESID, od->URLString); if (au->timing) gf_fprintf(dumper->trace, " time=\""LLD"\"", au->timing); gf_fprintf(dumper->trace, "/>\n"); } continue; } mux = (GF_MuxInfo *)gf_list_get(esd->extensionDescriptors, 0); if (!mux || (mux->tag!=GF_ODF_MUXINFO_TAG)) mux = NULL; gf_fprintf(dumper->trace, "<saf:mediaHeader streamID=\"stream%d\"", esd->ESID); if (esd->decoderConfig) { gf_fprintf(dumper->trace, " streamType=\"%d\" objectTypeIndication=\"%d\" timeStampResolution=\"%d\"", esd->decoderConfig->streamType, esd->decoderConfig->objectTypeIndication, au->owner->timeScale); } if (au->timing) gf_fprintf(dumper->trace, " time=\""LLD"\"", au->timing); if (mux && mux->file_name) gf_fprintf(dumper->trace, " source=\"%s\"", mux->file_name); gf_fprintf(dumper->trace, "/>\n"); } } gf_fprintf(dumper->trace, "</saf:mediaUnit>\n"); } #ifndef GPAC_DISABLE_SVG static GF_Err SD_DumpDOMElement(GF_SceneDumper *sdump, GF_DOMFullNode *node) { const char *ns; u32 child_type = 0; GF_DOMFullAttribute *att; GF_ChildNodeItem *child; GF_DOMText *txt; ns = gf_sg_get_namespace_qname(node->sgprivate->scenegraph, node->ns); DUMP_IND(sdump); if (ns) gf_fprintf(sdump->trace, "<%s:%s", ns, node->name); else gf_fprintf(sdump->trace, "<%s", node->name); att = (GF_DOMFullAttribute *)node->attributes; while (att) { gf_fprintf(sdump->trace, " %s=\"%s\"", att->name, (char *) att->data); att = (GF_DOMFullAttribute *)att->next; } if (!node->children) { gf_fprintf(sdump->trace, "/>\n"); return GF_OK; } gf_fprintf(sdump->trace, ">"); sdump->indent++; child = node->children; while (child) { switch(child->node->sgprivate->tag) { case TAG_DOMFullNode: if (!child_type) gf_fprintf(sdump->trace, "\n"); child_type = 1; SD_DumpDOMElement(sdump, (GF_DOMFullNode*)child->node); break; case TAG_DOMText: child_type = 2; txt = (GF_DOMText *)child->node; if (txt->type==GF_DOM_TEXT_REGULAR) { scene_dump_utf_string(sdump, 0, txt->textContent); } else if (txt->type==GF_DOM_TEXT_CDATA) { gf_fprintf(sdump->trace, "<![CDATA["); gf_fprintf(sdump->trace, "%s", txt->textContent); gf_fprintf(sdump->trace, "]]>"); } break; } child = child->next; } sdump->indent--; if (child_type!=2) { DUMP_IND(sdump); } if (ns) gf_fprintf(sdump->trace, "</%s:%s>\n", ns, node->name); else gf_fprintf(sdump->trace, "</%s>\n", node->name); return GF_OK; } #endif GF_EXPORT GF_Err gf_sm_dump_graph(GF_SceneDumper *sdump, Bool skip_proto, Bool skip_routes) { u32 tag; if (!sdump->trace || !sdump->sg || !sdump->sg->RootNode) return GF_BAD_PARAM; tag = sdump->sg->RootNode->sgprivate->tag; if (tag<=GF_NODE_RANGE_LAST_X3D) { gf_dump_setup(sdump, NULL); if (sdump->XMLDump) { StartElement(sdump, "Scene"); EndElementHeader(sdump, 1); sdump->indent++; } #ifndef GPAC_DISABLE_VRML GF_Err e; if (!skip_proto) { e = DumpProtos(sdump, sdump->sg->protos); if (e) return e; } if (sdump->X3DDump) { GF_ChildNodeItem *list = ((GF_ParentNode *)sdump->sg->RootNode)->children; while (list) { gf_dump_vrml_node(sdump, list->node, 0, NULL); list = list->next; } } else { gf_dump_vrml_node(sdump, sdump->sg->RootNode, 0, NULL); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "\n\n"); if (!skip_routes) { GF_Route *r; u32 i=0; while ((r = (GF_Route*)gf_list_enum(sdump->sg->Routes, &i))) { if (r->IS_route || (r->graph!=sdump->sg)) continue; e = gf_dump_vrml_route(sdump, r, 0); if (e) return e; } } if (sdump->XMLDump) { sdump->indent--; EndElement(sdump, "Scene", 1); } #endif /*GPAC_DISABLE_VRML*/ gf_dump_finalize(sdump, NULL); return GF_OK; } #ifndef GPAC_DISABLE_SVG else if ((tag>=GF_NODE_RANGE_FIRST_SVG) && (tag<=GF_NODE_RANGE_LAST_SVG)) { sdump->dump_mode = GF_SM_DUMP_SVG; gf_dump_setup(sdump, NULL); gf_dump_svg_element(sdump, sdump->sg->RootNode, NULL, 1); return GF_OK; } else if (tag==TAG_DOMFullNode) { sdump->dump_mode = GF_SM_DUMP_XML; gf_dump_setup(sdump, NULL); SD_DumpDOMElement(sdump, (GF_DOMFullNode*)sdump->sg->RootNode); } #endif return GF_OK; } static void ReorderAUContext(GF_List *sample_list, GF_AUContext *au, Bool lsr_dump) { u64 autime, time; u32 i; Bool has_base; GF_AUContext *ptr; /* this happens when converting from bt to xmt NOTE: Comment is wrong? this happens when just loading BT */ if (!au->timing_sec) { au->timing_sec = (Double) (s64) au->timing; /* Hack to avoid timescale=0 which happens when loading a BT with no SLConfig*/ if (!au->owner->timeScale) au->owner->timeScale = 1000; au->timing_sec /= au->owner->timeScale; } /*this happens when converting from xmt to bt*/ if (!au->timing) { assert(au->owner->timeScale); au->timing = (u64) (au->timing_sec * au->owner->timeScale); } autime = au->timing + au->owner->imp_exp_time; has_base = 0; i=0; while ((ptr = (GF_AUContext*)gf_list_enum(sample_list, &i))) { time = ptr->timing + ptr->owner->imp_exp_time; if ( /*time ordered*/ (time > autime) /*set bifs first for first AU*/ || (!has_base && (time == autime) && (ptr->owner->streamType < au->owner->streamType) ) /*set OD first for laser*/ || (lsr_dump && (au->owner->streamType==GF_STREAM_OD)) ) { gf_list_insert(sample_list, au, i-1); return; } has_base = 0; if ( (ptr->owner->streamType == au->owner->streamType) && (time == autime) ) has_base = 1; } gf_list_add(sample_list, au); } GF_EXPORT GF_Err gf_sm_dump(GF_SceneManager *ctx, char *rad_name, Bool is_final_name, GF_SceneDumpFormat dump_mode) { GF_Err e; GF_List *sample_list; Bool first_par; u32 i, j, indent, num_scene, num_od, first_bifs, num_tracks; Double time; GF_SceneDumper *dumper; GF_StreamContext *sc; GF_AUContext *au; Bool no_root_found = 1; sample_list = gf_list_new(); num_scene = num_od = 0; num_tracks = 0; indent = 0; dumper = gf_sm_dumper_new(ctx->scene_graph, rad_name, is_final_name, ' ', dump_mode); e = GF_OK; /*configure all systems streams we're dumping*/ i=0; while ((sc = (GF_StreamContext*)gf_list_enum(ctx->streams, &i))) { switch (sc->streamType) { case GF_STREAM_SCENE: num_scene ++; num_tracks ++; break; case GF_STREAM_OD: num_od ++; num_tracks ++; break; default: continue; } j=0; while ((au = (GF_AUContext*)gf_list_enum(sc->AUs, &j))) { ReorderAUContext(sample_list, au, dumper->LSRDump); if (dumper->dump_mode==GF_SM_DUMP_SVG) break; } if (dumper->dump_mode==GF_SM_DUMP_SVG) break; } first_bifs = (num_scene==1) ? 1 : 0; num_scene = (num_scene>1) ? 1 : 0; num_od = (num_od>1) ? 1 : 0; gf_dump_setup(dumper, (GF_Descriptor *) ctx->root_od); #ifndef GPAC_DISABLE_SVG if (dumper->dump_mode==GF_SM_DUMP_SVG) { au = (GF_AUContext*)gf_list_get(sample_list, 0); GF_Command *com = NULL; if (au) com = (GF_Command*)gf_list_get(au->commands, 0); if (!au) { gf_dump_svg_element(dumper, dumper->sg->RootNode, NULL, 1); } else if (!com || (com->tag!=GF_SG_LSR_NEW_SCENE) || !com->node) { e = GF_NOT_SUPPORTED; } else { gf_dump_svg_element(dumper, com->node, NULL, 1); } gf_dump_finalize(dumper, (GF_Descriptor *) ctx->root_od); gf_sm_dumper_del(dumper); gf_list_del(sample_list); return e; } #endif time = dumper->LSRDump ? -1 : 0; first_par = 0; while (gf_list_count(sample_list)) { au = (GF_AUContext*)gf_list_get(sample_list, 0); gf_list_rem(sample_list, 0); if (!dumper->XMLDump) { if (!first_bifs || (au->owner->streamType != GF_STREAM_SCENE) ) { if (au->flags & GF_SM_AU_RAP) gf_fprintf(dumper->trace, "RAP "); gf_fprintf(dumper->trace, "AT "LLD" ", au->timing); if ( (au->owner->streamType==GF_STREAM_OD && num_od) || (au->owner->streamType==GF_STREAM_SCENE && num_scene)) { gf_fprintf(dumper->trace, "IN %d ", au->owner->ESID); } gf_fprintf(dumper->trace, "{\n"); indent++; } switch (au->owner->streamType) { case GF_STREAM_OD: if (dumper->LSRDump) { dump_od_to_saf(dumper, au, indent); } else { #ifndef GPAC_DISABLE_OD_DUMP e = gf_odf_dump_com_list(au->commands, dumper->trace, indent+1, 0); #endif } break; case GF_STREAM_SCENE: e = gf_sm_dump_command_list(dumper, au->commands, indent, first_bifs); break; } if (first_bifs) { first_bifs = 0; gf_fprintf(dumper->trace, "\n"); } else { indent--; gf_fprintf(dumper->trace, "}\n\n"); } } else { if (dumper->LSRDump) { /* if (time != au->timing_sec) { time = au->timing_sec; } */ } else if (!time && !num_scene && first_bifs) { } else if (num_scene || num_od) { if (!first_par) { first_par = 1; indent += 1; } else { gf_fprintf(dumper->trace, " </par>\n"); } gf_fprintf(dumper->trace, " <par begin=\"%g\" atES_ID=\"es%d\" isRAP=\"%s\">\n", au->timing_sec, au->owner->ESID, (au->flags & GF_SM_AU_RAP) ? "yes" : "no"); } else if (au->timing_sec>time) { if (!first_par) { first_par = 1; indent += 1; } else { gf_fprintf(dumper->trace, " </par>\n"); } gf_fprintf(dumper->trace, "<par begin=\"%g\">\n", au->timing_sec); } switch (au->owner->streamType) { case GF_STREAM_OD: if (dumper->LSRDump) { dump_od_to_saf(dumper, au, indent+1); } else { #ifndef GPAC_DISABLE_OD_DUMP e = gf_odf_dump_com_list(au->commands, dumper->trace, indent+1, 1); #endif } break; case GF_STREAM_SCENE: if (gf_list_count(au->commands)) { if (dumper->LSRDump) gf_sm_dump_saf_hdr(dumper, "sceneUnit", au->timing, au->flags & GF_SM_AU_RAP); e = gf_sm_dump_command_list(dumper, au->commands, indent+1, first_bifs); first_bifs = 0; no_root_found = 0; if (dumper->LSRDump) gf_fprintf(dumper->trace, "</saf:sceneUnit>\n"); } break; } time = au->timing_sec; } if (dumper->X3DDump || (dumper->dump_mode==GF_SM_DUMP_VRML)) break; } #ifndef GPAC_DISABLE_VRML if (no_root_found && ctx->scene_graph->RootNode) { GF_Route *r; DumpProtos(dumper, ctx->scene_graph->protos); gf_dump_vrml_node(dumper, ctx->scene_graph->RootNode, 0, NULL); i=0; gf_fprintf(dumper->trace, "\n"); while ((r = (GF_Route*)gf_list_enum(dumper->sg->Routes, &i))) { if (r->IS_route || (r->graph!=dumper->sg)) continue; e = gf_dump_vrml_route(dumper, r, 0); if (e) return e; } } #endif /*close command*/ if (!dumper->X3DDump && first_par) gf_fprintf(dumper->trace, " </par>\n"); if (gf_list_count(sample_list) && (dumper->X3DDump || (dumper->dump_mode==GF_SM_DUMP_VRML)) ) { GF_LOG(GF_LOG_WARNING, GF_LOG_PARSER, ("[Scene Dump] MPEG-4 Commands found, not supported in %s - skipping\n", dumper->X3DDump ? "X3D" : "VRML")); } gf_dump_finalize(dumper, (GF_Descriptor *) ctx->root_od); gf_sm_dumper_del(dumper); gf_list_del(sample_list); return e; } #endif /*GPAC_DISABLE_SCENE_DUMP*/
static void gf_dump_vrml_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; Bool needs_field_container; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: assert ( *(GF_Node **)field.far_ptr); if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; } } else { StartAttribute(sdump, field.name); } gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); if (sdump->XMLDump) { if (!sdump->X3DDump) { sdump->indent--; EndElement(sdump, (char *) field.name, 1); } } else { EndAttribute(sdump); } return; case GF_SG_VRML_MFNODE: needs_field_container = 0; if (sdump->XMLDump && sdump->X3DDump) { u32 count, nb_ndt; GF_FieldInfo info; if (!strcmp(field.name, "children")) { needs_field_container = 0; } else { nb_ndt = 0; count = gf_node_get_field_count(node); for (i=0; i<count; i++) { gf_node_get_field(node, i, &info); if ((info.eventType==GF_SG_EVENT_IN) || (info.eventType==GF_SG_EVENT_OUT)) continue; if (info.NDTtype==field.NDTtype) nb_ndt++; } needs_field_container = (nb_ndt>1) ? 1 : 0; } } #ifndef GPAC_DISABLE_X3D if (!sdump->X3DDump) { if (gf_node_get_tag(node)==TAG_X3D_Switch) field.name = "choice"; } #endif list = * ((GF_ChildNodeItem **) field.far_ptr); assert(list); if (!sdump->XMLDump || !sdump->X3DDump) StartList(sdump, field.name); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, needs_field_container ? (char *) field.name : NULL); list = list->next; } sdump->indent--; if (!sdump->XMLDump || !sdump->X3DDump) EndList(sdump, field.name); return; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *cb = (SFCommandBuffer *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; if (!gf_list_count(cb->commandList)) { /*the arch does not allow for that (we would need a codec and so on, or decompress the command list in all cases...)*/ if (sdump->trace && cb->bufferSize) { if (sdump->XMLDump) gf_fprintf(sdump->trace, "<!--SFCommandBuffer cannot be dumped while playing - use MP4Box instead-->\n"); else gf_fprintf(sdump->trace, "#SFCommandBuffer cannot be dumped while playing - use MP4Box instead\n"); } } else { gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent, 0); } sdump->indent--; EndElement(sdump, (char *) field.name, 1); } return; case GF_SG_VRML_MFATTRREF: if (sdump->XMLDump) { MFAttrRef *ar = (MFAttrRef *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; for (i=0; i<ar->count; i++) { if (ar->vals[i].node) { GF_FieldInfo pinfo; DUMP_IND(sdump); gf_node_get_field(ar->vals[i].node, ar->vals[i].fieldIndex, &pinfo); gf_fprintf(sdump->trace, "<store node=\""); scene_dump_vrml_id(sdump, ar->vals[i].node); gf_fprintf(sdump->trace, "\" field=\"%s\"/>\n", pinfo.name); } } sdump->indent--; EndElement(sdump, (char *) field.name, 1); return; } break; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { StartAttribute(sdump, field.name); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); EndAttribute(sdump); } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, " %s=\'", (char *) field.name); break; default: StartAttribute(sdump, field.name); break; } } else { StartAttribute(sdump, field.name); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "["); if (mffield) { for (i=0; i<mffield->count; i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node); } } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "]"); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, "\'"); break; default: EndAttribute(sdump); break; } } else { EndAttribute(sdump); } } }
static void gf_dump_vrml_field(GF_SceneDumper *sdump, GF_Node *node, GF_FieldInfo field) { u32 i, sf_type; Bool needs_field_container; GF_ChildNodeItem *list; void *slot_ptr; switch (field.fieldType) { case GF_SG_VRML_SFNODE: assert ( *(GF_Node **)field.far_ptr); if (sdump->XMLDump) { if (!sdump->X3DDump) { StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; } } else { StartAttribute(sdump, field.name); } gf_dump_vrml_node(sdump, *(GF_Node **)field.far_ptr, 0, NULL); if (sdump->XMLDump) { if (!sdump->X3DDump) { sdump->indent--; EndElement(sdump, (char *) field.name, 1); } } else { EndAttribute(sdump); } return; case GF_SG_VRML_MFNODE: needs_field_container = 0; if (sdump->XMLDump && sdump->X3DDump) { u32 count, nb_ndt; GF_FieldInfo info; if (!strcmp(field.name, "children")) { needs_field_container = 0; } else { nb_ndt = 0; count = gf_node_get_field_count(node); for (i=0; i<count; i++) { gf_node_get_field(node, i, &info); if ((info.eventType==GF_SG_EVENT_IN) || (info.eventType==GF_SG_EVENT_OUT)) continue; if (info.NDTtype==field.NDTtype) nb_ndt++; } needs_field_container = (nb_ndt>1) ? 1 : 0; } } #ifndef GPAC_DISABLE_X3D if (!sdump->X3DDump) { if (gf_node_get_tag(node)==TAG_X3D_Switch) field.name = "choice"; } #endif list = * ((GF_ChildNodeItem **) field.far_ptr); assert(list); if (!sdump->XMLDump || !sdump->X3DDump) StartList(sdump, field.name); sdump->indent++; while (list) { gf_dump_vrml_node(sdump, list->node, 1, needs_field_container ? (char *) field.name : NULL); list = list->next; } sdump->indent--; if (!sdump->XMLDump || !sdump->X3DDump) EndList(sdump, field.name); return; case GF_SG_VRML_SFCOMMANDBUFFER: { SFCommandBuffer *cb = (SFCommandBuffer *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; if (!gf_list_count(cb->commandList)) { /*the arch does not allow for that (we would need a codec and so on, or decompress the command list in all cases...)*/ if (sdump->trace && cb->bufferSize) { if (sdump->XMLDump) gf_fprintf(sdump->trace, "<!--SFCommandBuffer cannot be dumped while playing - use MP4Box instead-->\n"); else gf_fprintf(sdump->trace, "#SFCommandBuffer cannot be dumped while playing - use MP4Box instead\n"); } } else { gf_sm_dump_command_list(sdump, cb->commandList, sdump->indent, 0); } sdump->indent--; EndElement(sdump, (char *) field.name, 1); } return; case GF_SG_VRML_MFATTRREF: if (sdump->XMLDump) { MFAttrRef *ar = (MFAttrRef *)field.far_ptr; StartElement(sdump, (char *) field.name); EndElementHeader(sdump, 1); sdump->indent++; for (i=0; i<ar->count; i++) { if (ar->vals[i].node) { GF_FieldInfo pinfo; DUMP_IND(sdump); gf_node_get_field(ar->vals[i].node, ar->vals[i].fieldIndex, &pinfo); gf_fprintf(sdump->trace, "<store node=\""); scene_dump_vrml_id(sdump, ar->vals[i].node); gf_fprintf(sdump->trace, "\" field=\"%s\"/>\n", pinfo.name); } } sdump->indent--; EndElement(sdump, (char *) field.name, 1); return; } break; } if (gf_sg_vrml_is_sf_field(field.fieldType)) { StartAttribute(sdump, field.name); gf_dump_vrml_sffield(sdump, field.fieldType, field.far_ptr, 0, node); EndAttribute(sdump); } else { GenMFField *mffield = (GenMFField *) field.far_ptr; sf_type = gf_sg_vrml_get_sf_type(field.fieldType); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, " %s=\'", (char *) field.name); break; default: StartAttribute(sdump, field.name); break; } } else { StartAttribute(sdump, field.name); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "["); for (i=0; mffield && (i<mffield->count); i++) { if (i) gf_fprintf(sdump->trace, " "); gf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i); gf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node); } if (!sdump->XMLDump) gf_fprintf(sdump->trace, "]"); if (sdump->XMLDump && sdump->X3DDump) { switch (sf_type) { case GF_SG_VRML_SFSTRING: case GF_SG_VRML_SFSCRIPT: case GF_SG_VRML_SFURL: gf_fprintf(sdump->trace, "\'"); break; default: EndAttribute(sdump); break; } } else { EndAttribute(sdump); } } }
{'added': [(788, '\t\tfor (i=0; mffield && (i<mffield->count); i++) {'), (941, '\t\tfor (i=0; mffield && (i<mffield->count); i++) {'), (942, '\t\t\tif (i) gf_fprintf(sdump->trace, " ");'), (943, '\t\t\tgf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i);'), (944, '\t\t\tgf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node);'), (1151, '\t\t\t\t\tfor (i=0; mffield && (i<mffield->count); i++) {'), (1192, '\t\t\t\t\tfor (i=0; mffield && (i<mffield->count); i++) {'), (1263, '\t\t\t\tfor (i=0; mffield && (i<mffield->count); i++) {'), (1264, '\t\t\t\t\tif (i) gf_fprintf(sdump->trace, " ");'), (1265, '\t\t\t\t\tif (field.fieldType != GF_SG_VRML_MFNODE) {'), (1266, '\t\t\t\t\t\tgf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i);'), (1267, '\t\t\t\t\t\tgf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node);')], 'deleted': [(788, '\t\tfor (i=0; i<mffield->count; i++) {'), (941, '\t\tif (mffield) {'), (942, '\t\t\tfor (i=0; i<mffield->count; i++) {'), (943, '\t\t\t\tif (i) gf_fprintf(sdump->trace, " ");'), (944, '\t\t\t\tgf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i);'), (945, '\t\t\t\tgf_dump_vrml_sffield(sdump, sf_type, slot_ptr, 1, node);'), (946, '\t\t\t}'), (1153, '\t\t\t\t\tfor (i=0; i<mffield->count; i++) {'), (1194, '\t\t\t\t\tfor (i=0; i<mffield->count; i++) {'), (1265, '\t\t\t\tif (mffield) {'), (1266, '\t\t\t\t\tfor (i=0; i<mffield->count; i++) {'), (1267, '\t\t\t\t\t\tif (i) gf_fprintf(sdump->trace, " ");'), (1268, '\t\t\t\t\t\tif (field.fieldType != GF_SG_VRML_MFNODE) {'), (1269, '\t\t\t\t\t\t\tgf_sg_vrml_mf_get_item(field.far_ptr, field.fieldType, &slot_ptr, i);'), (1270, '\t\t\t\t\t\t\tgf_dump_vrml_sffield(sdump, sf_type, slot_ptr, (mffield->count>1) ? 1 : 0, node);'), (1271, '\t\t\t\t\t\t}')]}
12
16
3,131
22,359
146
1,023
48
https://github.com/gpac/gpac
CVE-2022-2549
CWE-476
1,286
tightDecode.h
C++
rfb::TightDecoder::FilterGradient
/* Copyright (C) 2000-2003 Constantin Kaplinsky. All Rights Reserved. * Copyright 2004-2005 Cendio AB. * Copyright 2009-2015 Pierre Ossman for Cendio AB * Copyright (C) 2011 D. R. Commander. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ // // Tight decoding functions. // // This file is #included after having set the following macro: // BPP - 8, 16 or 32 namespace rfb { // CONCAT2E concatenates its arguments, expanding them if they are macros #ifndef CONCAT2E #define CONCAT2(a,b) a##b #define CONCAT2E(a,b) CONCAT2(a,b) #endif #define PIXEL_T rdr::CONCAT2E(U,BPP) #if BPP == 32 void TightDecoder::FilterGradient24(const rdr::U8 *inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { /* First pixel in a row */ for (c = 0; c < 3; c++) { pix[c] = inbuf[y*rectWidth*3+c] + prevRow[c]; thisRow[c] = pix[c]; } pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); /* Remaining pixels of a row */ for (x = 1; x < rectWidth; x++) { for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 0xff) { est[c] = 0xff; } else if (est[c] < 0) { est[c] = 0; } pix[c] = inbuf[(y*rectWidth+x)*3+c] + est[c]; thisRow[x*3+c] = pix[c]; } pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } } #endif #if BPP != 8 void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { /* First pixel in a row */ pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); /* Remaining pixels of a row */ for (x = 1; x < rectWidth; x++) { for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } } #endif void TightDecoder::FilterPalette(const PIXEL_T* palette, int palSize, const rdr::U8* inbuf, PIXEL_T* outbuf, int stride, const Rect& r) { // Indexed color int x, h = r.height(), w = r.width(), b, pad = stride - w; PIXEL_T* ptr = outbuf; rdr::U8 bits; const rdr::U8* srcPtr = inbuf; if (palSize <= 2) { // 2-color palette while (h > 0) { for (x = 0; x < w / 8; x++) { bits = *srcPtr++; for (b = 7; b >= 0; b--) { *ptr++ = palette[bits >> b & 1]; } } if (w % 8 != 0) { bits = *srcPtr++; for (b = 7; b >= 8 - w % 8; b--) { *ptr++ = palette[bits >> b & 1]; } } ptr += pad; h--; } } else { // 256-color palette while (h > 0) { PIXEL_T *endOfRow = ptr + w; while (ptr < endOfRow) { *ptr++ = palette[*srcPtr++]; } ptr += pad; h--; } } } #undef PIXEL_T }
/* Copyright (C) 2000-2003 Constantin Kaplinsky. All Rights Reserved. * Copyright 2004-2005 Cendio AB. * Copyright 2009-2015 Pierre Ossman for Cendio AB * Copyright (C) 2011 D. R. Commander. All Rights Reserved. * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ // // Tight decoding functions. // // This file is #included after having set the following macro: // BPP - 8, 16 or 32 namespace rfb { // CONCAT2E concatenates its arguments, expanding them if they are macros #ifndef CONCAT2E #define CONCAT2(a,b) a##b #define CONCAT2E(a,b) CONCAT2(a,b) #endif #define PIXEL_T rdr::CONCAT2E(U,BPP) #if BPP == 32 void TightDecoder::FilterGradient24(const rdr::U8 *inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { for (x = 0; x < rectWidth; x++) { /* First pixel in a row */ if (x == 0) { for (c = 0; c < 3; c++) { pix[c] = inbuf[y*rectWidth*3+c] + prevRow[c]; thisRow[c] = pix[c]; } pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); continue; } for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 0xff) { est[c] = 0xff; } else if (est[c] < 0) { est[c] = 0; } pix[c] = inbuf[(y*rectWidth+x)*3+c] + est[c]; thisRow[x*3+c] = pix[c]; } pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } } #endif #if BPP != 8 void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { for (x = 0; x < rectWidth; x++) { /* First pixel in a row */ if (x == 0) { pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); continue; } for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } } #endif void TightDecoder::FilterPalette(const PIXEL_T* palette, int palSize, const rdr::U8* inbuf, PIXEL_T* outbuf, int stride, const Rect& r) { // Indexed color int x, h = r.height(), w = r.width(), b, pad = stride - w; PIXEL_T* ptr = outbuf; rdr::U8 bits; const rdr::U8* srcPtr = inbuf; if (palSize <= 2) { // 2-color palette while (h > 0) { for (x = 0; x < w / 8; x++) { bits = *srcPtr++; for (b = 7; b >= 0; b--) { *ptr++ = palette[bits >> b & 1]; } } if (w % 8 != 0) { bits = *srcPtr++; for (b = 7; b >= 8 - w % 8; b--) { *ptr++ = palette[bits >> b & 1]; } } ptr += pad; h--; } } else { // 256-color palette while (h > 0) { PIXEL_T *endOfRow = ptr + w; while (ptr < endOfRow) { *ptr++ = palette[*srcPtr++]; } ptr += pad; h--; } } } #undef PIXEL_T }
void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { /* First pixel in a row */ pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); /* Remaining pixels of a row */ for (x = 1; x < rectWidth; x++) { for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } }
void TightDecoder::FilterGradient(const rdr::U8* inbuf, const PixelFormat& pf, PIXEL_T* outbuf, int stride, const Rect& r) { int x, y, c; static rdr::U8 prevRow[TIGHT_MAX_WIDTH*3]; static rdr::U8 thisRow[TIGHT_MAX_WIDTH*3]; rdr::U8 pix[3]; int est[3]; memset(prevRow, 0, sizeof(prevRow)); // Set up shortcut variables int rectHeight = r.height(); int rectWidth = r.width(); for (y = 0; y < rectHeight; y++) { for (x = 0; x < rectWidth; x++) { /* First pixel in a row */ if (x == 0) { pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1); for (c = 0; c < 3; c++) pix[c] += prevRow[c]; memcpy(thisRow, pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1); continue; } for (c = 0; c < 3; c++) { est[c] = prevRow[x*3+c] + pix[c] - prevRow[(x-1)*3+c]; if (est[c] > 255) { est[c] = 255; } else if (est[c] < 0) { est[c] = 0; } } pf.rgbFromBuffer(pix, &inbuf[y*rectWidth+x], 1); for (c = 0; c < 3; c++) pix[c] += est[c]; memcpy(&thisRow[x*3], pix, sizeof(pix)); pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride+x], pix, 1); } memcpy(prevRow, thisRow, sizeof(prevRow)); } }
{'added': [(59, ' for (x = 0; x < rectWidth; x++) {'), (60, ' /* First pixel in a row */'), (61, ' if (x == 0) {'), (62, ' for (c = 0; c < 3; c++) {'), (63, ' pix[c] = inbuf[y*rectWidth*3+c] + prevRow[c];'), (64, ' thisRow[c] = pix[c];'), (65, ' }'), (66, ' pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1);'), (67, ' continue;'), (68, ' }'), (108, ' for (x = 0; x < rectWidth; x++) {'), (109, ' /* First pixel in a row */'), (110, ' if (x == 0) {'), (111, ' pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1);'), (112, ' for (c = 0; c < 3; c++)'), (113, ' pix[c] += prevRow[c];'), (115, ' memcpy(thisRow, pix, sizeof(pix));'), (117, ' pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1);'), (118, ''), (119, ' continue;'), (120, ' }')], 'deleted': [(59, ' /* First pixel in a row */'), (60, ' for (c = 0; c < 3; c++) {'), (61, ' pix[c] = inbuf[y*rectWidth*3+c] + prevRow[c];'), (62, ' thisRow[c] = pix[c];'), (63, ' }'), (64, ' pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1);'), (66, ' /* Remaining pixels of a row */'), (67, ' for (x = 1; x < rectWidth; x++) {'), (106, ' /* First pixel in a row */'), (107, ' pf.rgbFromBuffer(pix, &inbuf[y*rectWidth], 1);'), (108, ' for (c = 0; c < 3; c++)'), (109, ' pix[c] += prevRow[c];'), (111, ' memcpy(thisRow, pix, sizeof(pix));'), (113, ' pf.bufferFromRGB((rdr::U8*)&outbuf[y*stride], pix, 1);'), (115, ' /* Remaining pixels of a row */'), (116, ' for (x = 1; x < rectWidth; x++) {')]}
21
16
115
1,025
36
389
8
https://github.com/CendioOssman/tigervnc
CVE-2019-15693
CWE-787
1,574
regcomp.c
C
S_grok_bslash_N
/* regcomp.c */ /* * 'A fair jaw-cracker dwarf-language must be.' --Samwise Gamgee * * [p.285 of _The Lord of the Rings_, II/iii: "The Ring Goes South"] */ /* This file contains functions for compiling a regular expression. See * also regexec.c which funnily enough, contains functions for executing * a regular expression. * * This file is also copied at build time to ext/re/re_comp.c, where * it's built with -DPERL_EXT_RE_BUILD -DPERL_EXT_RE_DEBUG -DPERL_EXT. * This causes the main functions to be compiled under new names and with * debugging support added, which makes "use re 'debug'" work. */ /* NOTE: this is derived from Henry Spencer's regexp code, and should not * confused with the original package (see point 3 below). Thanks, Henry! */ /* Additional note: this code is very heavily munged from Henry's version * in places. In some spots I've traded clarity for efficiency, so don't * blame Henry for some of the lack of readability. */ /* The names of the functions have been changed from regcomp and * regexec to pregcomp and pregexec in order to avoid conflicts * with the POSIX routines of the same names. */ #ifdef PERL_EXT_RE_BUILD #include "re_top.h" #endif /* * pregcomp and pregexec -- regsub and regerror are not used in perl * * Copyright (c) 1986 by University of Toronto. * Written by Henry Spencer. Not derived from licensed software. * * Permission is granted to anyone to use this software for any * purpose on any computer system, and to redistribute it freely, * subject to the following restrictions: * * 1. The author is not responsible for the consequences of use of * this software, no matter how awful, even if they arise * from defects in it. * * 2. The origin of this software must not be misrepresented, either * by explicit claim or by omission. * * 3. Altered versions must be plainly marked as such, and must not * be misrepresented as being the original software. * * **** Alterations to Henry's code are... **** **** Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, **** 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 **** by Larry Wall and others **** **** You may distribute under the terms of either the GNU General Public **** License or the Artistic License, as specified in the README file. * * Beware that some of this code is subtly aware of the way operator * precedence is structured in regular expressions. Serious changes in * regular-expression syntax might require a total rethink. */ #include "EXTERN.h" #define PERL_IN_REGCOMP_C #include "perl.h" #ifndef PERL_IN_XSUB_RE # include "INTERN.h" #endif #define REG_COMP_C #ifdef PERL_IN_XSUB_RE # include "re_comp.h" EXTERN_C const struct regexp_engine my_reg_engine; #else # include "regcomp.h" #endif #include "dquote_inline.h" #include "invlist_inline.h" #include "unicode_constants.h" #define HAS_NONLATIN1_FOLD_CLOSURE(i) \ _HAS_NONLATIN1_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i) #define HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(i) \ _HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i) #define IS_NON_FINAL_FOLD(c) _IS_NON_FINAL_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c) #define IS_IN_SOME_FOLD_L1(c) _IS_IN_SOME_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c) #ifndef STATIC #define STATIC static #endif /* this is a chain of data about sub patterns we are processing that need to be handled separately/specially in study_chunk. Its so we can simulate recursion without losing state. */ struct scan_frame; typedef struct scan_frame { regnode *last_regnode; /* last node to process in this frame */ regnode *next_regnode; /* next node to process when last is reached */ U32 prev_recursed_depth; I32 stopparen; /* what stopparen do we use */ U32 is_top_frame; /* what flags do we use? */ struct scan_frame *this_prev_frame; /* this previous frame */ struct scan_frame *prev_frame; /* previous frame */ struct scan_frame *next_frame; /* next frame */ } scan_frame; /* Certain characters are output as a sequence with the first being a * backslash. */ #define isBACKSLASHED_PUNCT(c) strchr("-[]\\^", c) struct RExC_state_t { U32 flags; /* RXf_* are we folding, multilining? */ U32 pm_flags; /* PMf_* stuff from the calling PMOP */ char *precomp; /* uncompiled string. */ char *precomp_end; /* pointer to end of uncompiled string. */ REGEXP *rx_sv; /* The SV that is the regexp. */ regexp *rx; /* perl core regexp structure */ regexp_internal *rxi; /* internal data for regexp object pprivate field */ char *start; /* Start of input for compile */ char *end; /* End of input for compile */ char *parse; /* Input-scan pointer. */ char *adjusted_start; /* 'start', adjusted. See code use */ STRLEN precomp_adj; /* an offset beyond precomp. See code use */ SSize_t whilem_seen; /* number of WHILEM in this expr */ regnode *emit_start; /* Start of emitted-code area */ regnode *emit_bound; /* First regnode outside of the allocated space */ regnode *emit; /* Code-emit pointer; if = &emit_dummy, implies compiling, so don't emit */ regnode_ssc emit_dummy; /* placeholder for emit to point to; large enough for the largest non-EXACTish node, so can use it as scratch in pass1 */ I32 naughty; /* How bad is this pattern? */ I32 sawback; /* Did we see \1, ...? */ U32 seen; SSize_t size; /* Code size. */ I32 npar; /* Capture buffer count, (OPEN) plus one. ("par" 0 is the whole pattern)*/ I32 nestroot; /* root parens we are in - used by accept */ I32 extralen; I32 seen_zerolen; regnode **open_parens; /* pointers to open parens */ regnode **close_parens; /* pointers to close parens */ regnode *end_op; /* END node in program */ I32 utf8; /* whether the pattern is utf8 or not */ I32 orig_utf8; /* whether the pattern was originally in utf8 */ /* XXX use this for future optimisation of case * where pattern must be upgraded to utf8. */ I32 uni_semantics; /* If a d charset modifier should use unicode rules, even if the pattern is not in utf8 */ HV *paren_names; /* Paren names */ regnode **recurse; /* Recurse regops */ I32 recurse_count; /* Number of recurse regops we have generated */ U8 *study_chunk_recursed; /* bitmap of which subs we have moved through */ U32 study_chunk_recursed_bytes; /* bytes in bitmap */ I32 in_lookbehind; I32 contains_locale; I32 override_recoding; #ifdef EBCDIC I32 recode_x_to_native; #endif I32 in_multi_char_class; struct reg_code_blocks *code_blocks;/* positions of literal (?{}) within pattern */ int code_index; /* next code_blocks[] slot */ SSize_t maxlen; /* mininum possible number of chars in string to match */ scan_frame *frame_head; scan_frame *frame_last; U32 frame_count; AV *warn_text; #ifdef ADD_TO_REGEXEC char *starttry; /* -Dr: where regtry was called. */ #define RExC_starttry (pRExC_state->starttry) #endif SV *runtime_code_qr; /* qr with the runtime code blocks */ #ifdef DEBUGGING const char *lastparse; I32 lastnum; AV *paren_name_list; /* idx -> name */ U32 study_chunk_recursed_count; SV *mysv1; SV *mysv2; #define RExC_lastparse (pRExC_state->lastparse) #define RExC_lastnum (pRExC_state->lastnum) #define RExC_paren_name_list (pRExC_state->paren_name_list) #define RExC_study_chunk_recursed_count (pRExC_state->study_chunk_recursed_count) #define RExC_mysv (pRExC_state->mysv1) #define RExC_mysv1 (pRExC_state->mysv1) #define RExC_mysv2 (pRExC_state->mysv2) #endif bool seen_unfolded_sharp_s; bool strict; bool study_started; }; #define RExC_flags (pRExC_state->flags) #define RExC_pm_flags (pRExC_state->pm_flags) #define RExC_precomp (pRExC_state->precomp) #define RExC_precomp_adj (pRExC_state->precomp_adj) #define RExC_adjusted_start (pRExC_state->adjusted_start) #define RExC_precomp_end (pRExC_state->precomp_end) #define RExC_rx_sv (pRExC_state->rx_sv) #define RExC_rx (pRExC_state->rx) #define RExC_rxi (pRExC_state->rxi) #define RExC_start (pRExC_state->start) #define RExC_end (pRExC_state->end) #define RExC_parse (pRExC_state->parse) #define RExC_whilem_seen (pRExC_state->whilem_seen) /* Set during the sizing pass when there is a LATIN SMALL LETTER SHARP S in any * EXACTF node, hence was parsed under /di rules. If later in the parse, * something forces the pattern into using /ui rules, the sharp s should be * folded into the sequence 'ss', which takes up more space than previously * calculated. This means that the sizing pass needs to be restarted. (The * node also becomes an EXACTFU_SS.) For all other characters, an EXACTF node * that gets converted to /ui (and EXACTFU) occupies the same amount of space, * so there is no need to resize [perl #125990]. */ #define RExC_seen_unfolded_sharp_s (pRExC_state->seen_unfolded_sharp_s) #ifdef RE_TRACK_PATTERN_OFFSETS #define RExC_offsets (pRExC_state->rxi->u.offsets) /* I am not like the others */ #endif #define RExC_emit (pRExC_state->emit) #define RExC_emit_dummy (pRExC_state->emit_dummy) #define RExC_emit_start (pRExC_state->emit_start) #define RExC_emit_bound (pRExC_state->emit_bound) #define RExC_sawback (pRExC_state->sawback) #define RExC_seen (pRExC_state->seen) #define RExC_size (pRExC_state->size) #define RExC_maxlen (pRExC_state->maxlen) #define RExC_npar (pRExC_state->npar) #define RExC_nestroot (pRExC_state->nestroot) #define RExC_extralen (pRExC_state->extralen) #define RExC_seen_zerolen (pRExC_state->seen_zerolen) #define RExC_utf8 (pRExC_state->utf8) #define RExC_uni_semantics (pRExC_state->uni_semantics) #define RExC_orig_utf8 (pRExC_state->orig_utf8) #define RExC_open_parens (pRExC_state->open_parens) #define RExC_close_parens (pRExC_state->close_parens) #define RExC_end_op (pRExC_state->end_op) #define RExC_paren_names (pRExC_state->paren_names) #define RExC_recurse (pRExC_state->recurse) #define RExC_recurse_count (pRExC_state->recurse_count) #define RExC_study_chunk_recursed (pRExC_state->study_chunk_recursed) #define RExC_study_chunk_recursed_bytes \ (pRExC_state->study_chunk_recursed_bytes) #define RExC_in_lookbehind (pRExC_state->in_lookbehind) #define RExC_contains_locale (pRExC_state->contains_locale) #ifdef EBCDIC # define RExC_recode_x_to_native (pRExC_state->recode_x_to_native) #endif #define RExC_in_multi_char_class (pRExC_state->in_multi_char_class) #define RExC_frame_head (pRExC_state->frame_head) #define RExC_frame_last (pRExC_state->frame_last) #define RExC_frame_count (pRExC_state->frame_count) #define RExC_strict (pRExC_state->strict) #define RExC_study_started (pRExC_state->study_started) #define RExC_warn_text (pRExC_state->warn_text) /* Heuristic check on the complexity of the pattern: if TOO_NAUGHTY, we set * a flag to disable back-off on the fixed/floating substrings - if it's * a high complexity pattern we assume the benefit of avoiding a full match * is worth the cost of checking for the substrings even if they rarely help. */ #define RExC_naughty (pRExC_state->naughty) #define TOO_NAUGHTY (10) #define MARK_NAUGHTY(add) \ if (RExC_naughty < TOO_NAUGHTY) \ RExC_naughty += (add) #define MARK_NAUGHTY_EXP(exp, add) \ if (RExC_naughty < TOO_NAUGHTY) \ RExC_naughty += RExC_naughty / (exp) + (add) #define ISMULT1(c) ((c) == '*' || (c) == '+' || (c) == '?') #define ISMULT2(s) ((*s) == '*' || (*s) == '+' || (*s) == '?' || \ ((*s) == '{' && regcurly(s))) /* * Flags to be passed up and down. */ #define WORST 0 /* Worst case. */ #define HASWIDTH 0x01 /* Known to match non-null strings. */ /* Simple enough to be STAR/PLUS operand; in an EXACTish node must be a single * character. (There needs to be a case: in the switch statement in regexec.c * for any node marked SIMPLE.) Note that this is not the same thing as * REGNODE_SIMPLE */ #define SIMPLE 0x02 #define SPSTART 0x04 /* Starts with * or + */ #define POSTPONED 0x08 /* (?1),(?&name), (??{...}) or similar */ #define TRYAGAIN 0x10 /* Weeded out a declaration. */ #define RESTART_PASS1 0x20 /* Need to restart sizing pass */ #define NEED_UTF8 0x40 /* In conjunction with RESTART_PASS1, need to calcuate sizes as UTF-8 */ #define REG_NODE_NUM(x) ((x) ? (int)((x)-RExC_emit_start) : -1) /* whether trie related optimizations are enabled */ #if PERL_ENABLE_EXTENDED_TRIE_OPTIMISATION #define TRIE_STUDY_OPT #define FULL_TRIE_STUDY #define TRIE_STCLASS #endif #define PBYTE(u8str,paren) ((U8*)(u8str))[(paren) >> 3] #define PBITVAL(paren) (1 << ((paren) & 7)) #define PAREN_TEST(u8str,paren) ( PBYTE(u8str,paren) & PBITVAL(paren)) #define PAREN_SET(u8str,paren) PBYTE(u8str,paren) |= PBITVAL(paren) #define PAREN_UNSET(u8str,paren) PBYTE(u8str,paren) &= (~PBITVAL(paren)) #define REQUIRE_UTF8(flagp) STMT_START { \ if (!UTF) { \ assert(PASS1); \ *flagp = RESTART_PASS1|NEED_UTF8; \ return NULL; \ } \ } STMT_END /* Change from /d into /u rules, and restart the parse if we've already seen * something whose size would increase as a result, by setting *flagp and * returning 'restart_retval'. RExC_uni_semantics is a flag that indicates * we've change to /u during the parse. */ #define REQUIRE_UNI_RULES(flagp, restart_retval) \ STMT_START { \ if (DEPENDS_SEMANTICS) { \ assert(PASS1); \ set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); \ RExC_uni_semantics = 1; \ if (RExC_seen_unfolded_sharp_s) { \ *flagp |= RESTART_PASS1; \ return restart_retval; \ } \ } \ } STMT_END /* This converts the named class defined in regcomp.h to its equivalent class * number defined in handy.h. */ #define namedclass_to_classnum(class) ((int) ((class) / 2)) #define classnum_to_namedclass(classnum) ((classnum) * 2) #define _invlist_union_complement_2nd(a, b, output) \ _invlist_union_maybe_complement_2nd(a, b, TRUE, output) #define _invlist_intersection_complement_2nd(a, b, output) \ _invlist_intersection_maybe_complement_2nd(a, b, TRUE, output) /* About scan_data_t. During optimisation we recurse through the regexp program performing various inplace (keyhole style) optimisations. In addition study_chunk and scan_commit populate this data structure with information about what strings MUST appear in the pattern. We look for the longest string that must appear at a fixed location, and we look for the longest string that may appear at a floating location. So for instance in the pattern: /FOO[xX]A.*B[xX]BAR/ Both 'FOO' and 'A' are fixed strings. Both 'B' and 'BAR' are floating strings (because they follow a .* construct). study_chunk will identify both FOO and BAR as being the longest fixed and floating strings respectively. The strings can be composites, for instance /(f)(o)(o)/ will result in a composite fixed substring 'foo'. For each string some basic information is maintained: - min_offset This is the position the string must appear at, or not before. It also implicitly (when combined with minlenp) tells us how many characters must match before the string we are searching for. Likewise when combined with minlenp and the length of the string it tells us how many characters must appear after the string we have found. - max_offset Only used for floating strings. This is the rightmost point that the string can appear at. If set to SSize_t_MAX it indicates that the string can occur infinitely far to the right. For fixed strings, it is equal to min_offset. - minlenp A pointer to the minimum number of characters of the pattern that the string was found inside. This is important as in the case of positive lookahead or positive lookbehind we can have multiple patterns involved. Consider /(?=FOO).*F/ The minimum length of the pattern overall is 3, the minimum length of the lookahead part is 3, but the minimum length of the part that will actually match is 1. So 'FOO's minimum length is 3, but the minimum length for the F is 1. This is important as the minimum length is used to determine offsets in front of and behind the string being looked for. Since strings can be composites this is the length of the pattern at the time it was committed with a scan_commit. Note that the length is calculated by study_chunk, so that the minimum lengths are not known until the full pattern has been compiled, thus the pointer to the value. - lookbehind In the case of lookbehind the string being searched for can be offset past the start point of the final matching string. If this value was just blithely removed from the min_offset it would invalidate some of the calculations for how many chars must match before or after (as they are derived from min_offset and minlen and the length of the string being searched for). When the final pattern is compiled and the data is moved from the scan_data_t structure into the regexp structure the information about lookbehind is factored in, with the information that would have been lost precalculated in the end_shift field for the associated string. The fields pos_min and pos_delta are used to store the minimum offset and the delta to the maximum offset at the current point in the pattern. */ struct scan_data_substrs { SV *str; /* longest substring found in pattern */ SSize_t min_offset; /* earliest point in string it can appear */ SSize_t max_offset; /* latest point in string it can appear */ SSize_t *minlenp; /* pointer to the minlen relevant to the string */ SSize_t lookbehind; /* is the pos of the string modified by LB */ I32 flags; /* per substring SF_* and SCF_* flags */ }; typedef struct scan_data_t { /*I32 len_min; unused */ /*I32 len_delta; unused */ SSize_t pos_min; SSize_t pos_delta; SV *last_found; SSize_t last_end; /* min value, <0 unless valid. */ SSize_t last_start_min; SSize_t last_start_max; U8 cur_is_floating; /* whether the last_* values should be set as * the next fixed (0) or floating (1) * substring */ /* [0] is longest fixed substring so far, [1] is longest float so far */ struct scan_data_substrs substrs[2]; I32 flags; /* common SF_* and SCF_* flags */ I32 whilem_c; SSize_t *last_closep; regnode_ssc *start_class; } scan_data_t; /* * Forward declarations for pregcomp()'s friends. */ static const scan_data_t zero_scan_data = { 0, 0, NULL, 0, 0, 0, 0, { { NULL, 0, 0, 0, 0, 0 }, { NULL, 0, 0, 0, 0, 0 }, }, 0, 0, NULL, NULL }; /* study flags */ #define SF_BEFORE_SEOL 0x0001 #define SF_BEFORE_MEOL 0x0002 #define SF_BEFORE_EOL (SF_BEFORE_SEOL|SF_BEFORE_MEOL) #define SF_IS_INF 0x0040 #define SF_HAS_PAR 0x0080 #define SF_IN_PAR 0x0100 #define SF_HAS_EVAL 0x0200 /* SCF_DO_SUBSTR is the flag that tells the regexp analyzer to track the * longest substring in the pattern. When it is not set the optimiser keeps * track of position, but does not keep track of the actual strings seen, * * So for instance /foo/ will be parsed with SCF_DO_SUBSTR being true, but * /foo/i will not. * * Similarly, /foo.*(blah|erm|huh).*fnorble/ will have "foo" and "fnorble" * parsed with SCF_DO_SUBSTR on, but while processing the (...) it will be * turned off because of the alternation (BRANCH). */ #define SCF_DO_SUBSTR 0x0400 #define SCF_DO_STCLASS_AND 0x0800 #define SCF_DO_STCLASS_OR 0x1000 #define SCF_DO_STCLASS (SCF_DO_STCLASS_AND|SCF_DO_STCLASS_OR) #define SCF_WHILEM_VISITED_POS 0x2000 #define SCF_TRIE_RESTUDY 0x4000 /* Do restudy? */ #define SCF_SEEN_ACCEPT 0x8000 #define SCF_TRIE_DOING_RESTUDY 0x10000 #define SCF_IN_DEFINE 0x20000 #define UTF cBOOL(RExC_utf8) /* The enums for all these are ordered so things work out correctly */ #define LOC (get_regex_charset(RExC_flags) == REGEX_LOCALE_CHARSET) #define DEPENDS_SEMANTICS (get_regex_charset(RExC_flags) \ == REGEX_DEPENDS_CHARSET) #define UNI_SEMANTICS (get_regex_charset(RExC_flags) == REGEX_UNICODE_CHARSET) #define AT_LEAST_UNI_SEMANTICS (get_regex_charset(RExC_flags) \ >= REGEX_UNICODE_CHARSET) #define ASCII_RESTRICTED (get_regex_charset(RExC_flags) \ == REGEX_ASCII_RESTRICTED_CHARSET) #define AT_LEAST_ASCII_RESTRICTED (get_regex_charset(RExC_flags) \ >= REGEX_ASCII_RESTRICTED_CHARSET) #define ASCII_FOLD_RESTRICTED (get_regex_charset(RExC_flags) \ == REGEX_ASCII_MORE_RESTRICTED_CHARSET) #define FOLD cBOOL(RExC_flags & RXf_PMf_FOLD) /* For programs that want to be strictly Unicode compatible by dying if any * attempt is made to match a non-Unicode code point against a Unicode * property. */ #define ALWAYS_WARN_SUPER ckDEAD(packWARN(WARN_NON_UNICODE)) #define OOB_NAMEDCLASS -1 /* There is no code point that is out-of-bounds, so this is problematic. But * its only current use is to initialize a variable that is always set before * looked at. */ #define OOB_UNICODE 0xDEADBEEF #define CHR_SVLEN(sv) (UTF ? sv_len_utf8(sv) : SvCUR(sv)) /* length of regex to show in messages that don't mark a position within */ #define RegexLengthToShowInErrorMessages 127 /* * If MARKER[12] are adjusted, be sure to adjust the constants at the top * of t/op/regmesg.t, the tests in t/op/re_tests, and those in * op/pragma/warn/regcomp. */ #define MARKER1 "<-- HERE" /* marker as it appears in the description */ #define MARKER2 " <-- HERE " /* marker as it appears within the regex */ #define REPORT_LOCATION " in regex; marked by " MARKER1 \ " in m/%" UTF8f MARKER2 "%" UTF8f "/" /* The code in this file in places uses one level of recursion with parsing * rebased to an alternate string constructed by us in memory. This can take * the form of something that is completely different from the input, or * something that uses the input as part of the alternate. In the first case, * there should be no possibility of an error, as we are in complete control of * the alternate string. But in the second case we don't control the input * portion, so there may be errors in that. Here's an example: * /[abc\x{DF}def]/ui * is handled specially because \x{df} folds to a sequence of more than one * character, 'ss'. What is done is to create and parse an alternate string, * which looks like this: * /(?:\x{DF}|[abc\x{DF}def])/ui * where it uses the input unchanged in the middle of something it constructs, * which is a branch for the DF outside the character class, and clustering * parens around the whole thing. (It knows enough to skip the DF inside the * class while in this substitute parse.) 'abc' and 'def' may have errors that * need to be reported. The general situation looks like this: * * sI tI xI eI * Input: ---------------------------------------------------- * Constructed: --------------------------------------------------- * sC tC xC eC EC * * The input string sI..eI is the input pattern. The string sC..EC is the * constructed substitute parse string. The portions sC..tC and eC..EC are * constructed by us. The portion tC..eC is an exact duplicate of the input * pattern tI..eI. In the diagram, these are vertically aligned. Suppose that * while parsing, we find an error at xC. We want to display a message showing * the real input string. Thus we need to find the point xI in it which * corresponds to xC. xC >= tC, since the portion of the string sC..tC has * been constructed by us, and so shouldn't have errors. We get: * * xI = sI + (tI - sI) + (xC - tC) * * and, the offset into sI is: * * (xI - sI) = (tI - sI) + (xC - tC) * * When the substitute is constructed, we save (tI -sI) as RExC_precomp_adj, * and we save tC as RExC_adjusted_start. * * During normal processing of the input pattern, everything points to that, * with RExC_precomp_adj set to 0, and RExC_adjusted_start set to sI. */ #define tI_sI RExC_precomp_adj #define tC RExC_adjusted_start #define sC RExC_precomp #define xI_offset(xC) ((IV) (tI_sI + (xC - tC))) #define xI(xC) (sC + xI_offset(xC)) #define eC RExC_precomp_end #define REPORT_LOCATION_ARGS(xC) \ UTF8fARG(UTF, \ (xI(xC) > eC) /* Don't run off end */ \ ? eC - sC /* Length before the <--HERE */ \ : ( __ASSERT_(xI_offset(xC) >= 0) xI_offset(xC) ), \ sC), /* The input pattern printed up to the <--HERE */ \ UTF8fARG(UTF, \ (xI(xC) > eC) ? 0 : eC - xI(xC), /* Length after <--HERE */ \ (xI(xC) > eC) ? eC : xI(xC)) /* pattern after <--HERE */ /* Used to point after bad bytes for an error message, but avoid skipping * past a nul byte. */ #define SKIP_IF_CHAR(s) (!*(s) ? 0 : UTF ? UTF8SKIP(s) : 1) /* * Calls SAVEDESTRUCTOR_X if needed, then calls Perl_croak with the given * arg. Show regex, up to a maximum length. If it's too long, chop and add * "...". */ #define _FAIL(code) STMT_START { \ const char *ellipses = ""; \ IV len = RExC_precomp_end - RExC_precomp; \ \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ if (len > RegexLengthToShowInErrorMessages) { \ /* chop 10 shorter than the max, to ensure meaning of "..." */ \ len = RegexLengthToShowInErrorMessages - 10; \ ellipses = "..."; \ } \ code; \ } STMT_END #define FAIL(msg) _FAIL( \ Perl_croak(aTHX_ "%s in regex m/%" UTF8f "%s/", \ msg, UTF8fARG(UTF, len, RExC_precomp), ellipses)) #define FAIL2(msg,arg) _FAIL( \ Perl_croak(aTHX_ msg " in regex m/%" UTF8f "%s/", \ arg, UTF8fARG(UTF, len, RExC_precomp), ellipses)) /* * Simple_vFAIL -- like FAIL, but marks the current location in the scan */ #define Simple_vFAIL(m) STMT_START { \ Perl_croak(aTHX_ "%s" REPORT_LOCATION, \ m, REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* * Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL() */ #define vFAIL(m) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL(m); \ } STMT_END /* * Like Simple_vFAIL(), but accepts two arguments. */ #define Simple_vFAIL2(m,a1) STMT_START { \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* * Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL2(). */ #define vFAIL2(m,a1) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL2(m, a1); \ } STMT_END /* * Like Simple_vFAIL(), but accepts three arguments. */ #define Simple_vFAIL3(m, a1, a2) STMT_START { \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* * Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL3(). */ #define vFAIL3(m,a1,a2) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL3(m, a1, a2); \ } STMT_END /* * Like Simple_vFAIL(), but accepts four arguments. */ #define Simple_vFAIL4(m, a1, a2, a3) STMT_START { \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, a3, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END #define vFAIL4(m,a1,a2,a3) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL4(m, a1, a2, a3); \ } STMT_END /* A specialized version of vFAIL2 that works with UTF8f */ #define vFAIL2utf8f(m, a1) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END #define vFAIL3utf8f(m, a1, a2) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* These have asserts in them because of [perl #122671] Many warnings in * regcomp.c can occur twice. If they get output in pass1 and later in that * pass, the pattern has to be converted to UTF-8 and the pass restarted, they * would get output again. So they should be output in pass2, and these * asserts make sure new warnings follow that paradigm. */ /* m is not necessarily a "literal string", in this macro */ #define reg_warn_non_literal_string(loc, m) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ "%s" REPORT_LOCATION, \ m, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARNreg(loc,m) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN(loc, m) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN_dep(loc, m) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_DEPRECATED), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARNdep(loc,m) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner_d(aTHX_ packWARN(WARN_DEPRECATED), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARNregdep(loc,m) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner_d(aTHX_ packWARN2(WARN_DEPRECATED, \ WARN_REGEXP), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN2reg_d(loc,m, a1) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner_d(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN2reg(loc, m, a1) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN3(loc, m, a1, a2) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN3reg(loc, m, a1, a2) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN4(loc, m, a1, a2, a3) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, a3, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN4reg(loc, m, a1, a2, a3) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, a3, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN5(loc, m, a1, a2, a3, a4) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, a3, a4, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END /* Macros for recording node offsets. 20001227 mjd@plover.com * Nodes are numbered 1, 2, 3, 4. Node #n's position is recorded in * element 2*n-1 of the array. Element #2n holds the byte length node #n. * Element 0 holds the number n. * Position is 1 indexed. */ #ifndef RE_TRACK_PATTERN_OFFSETS #define Set_Node_Offset_To_R(node,byte) #define Set_Node_Offset(node,byte) #define Set_Cur_Node_Offset #define Set_Node_Length_To_R(node,len) #define Set_Node_Length(node,len) #define Set_Node_Cur_Length(node,start) #define Node_Offset(n) #define Node_Length(n) #define Set_Node_Offset_Length(node,offset,len) #define ProgLen(ri) ri->u.proglen #define SetProgLen(ri,x) ri->u.proglen = x #else #define ProgLen(ri) ri->u.offsets[0] #define SetProgLen(ri,x) ri->u.offsets[0] = x #define Set_Node_Offset_To_R(node,byte) STMT_START { \ if (! SIZE_ONLY) { \ MJD_OFFSET_DEBUG(("** (%d) offset of node %d is %d.\n", \ __LINE__, (int)(node), (int)(byte))); \ if((node) < 0) { \ Perl_croak(aTHX_ "value of node is %d in Offset macro", \ (int)(node)); \ } else { \ RExC_offsets[2*(node)-1] = (byte); \ } \ } \ } STMT_END #define Set_Node_Offset(node,byte) \ Set_Node_Offset_To_R((node)-RExC_emit_start, (byte)-RExC_start) #define Set_Cur_Node_Offset Set_Node_Offset(RExC_emit, RExC_parse) #define Set_Node_Length_To_R(node,len) STMT_START { \ if (! SIZE_ONLY) { \ MJD_OFFSET_DEBUG(("** (%d) size of node %d is %d.\n", \ __LINE__, (int)(node), (int)(len))); \ if((node) < 0) { \ Perl_croak(aTHX_ "value of node is %d in Length macro", \ (int)(node)); \ } else { \ RExC_offsets[2*(node)] = (len); \ } \ } \ } STMT_END #define Set_Node_Length(node,len) \ Set_Node_Length_To_R((node)-RExC_emit_start, len) #define Set_Node_Cur_Length(node, start) \ Set_Node_Length(node, RExC_parse - start) /* Get offsets and lengths */ #define Node_Offset(n) (RExC_offsets[2*((n)-RExC_emit_start)-1]) #define Node_Length(n) (RExC_offsets[2*((n)-RExC_emit_start)]) #define Set_Node_Offset_Length(node,offset,len) STMT_START { \ Set_Node_Offset_To_R((node)-RExC_emit_start, (offset)); \ Set_Node_Length_To_R((node)-RExC_emit_start, (len)); \ } STMT_END #endif #if PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS #define EXPERIMENTAL_INPLACESCAN #endif /*PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS*/ #ifdef DEBUGGING int Perl_re_printf(pTHX_ const char *fmt, ...) { va_list ap; int result; PerlIO *f= Perl_debug_log; PERL_ARGS_ASSERT_RE_PRINTF; va_start(ap, fmt); result = PerlIO_vprintf(f, fmt, ap); va_end(ap); return result; } int Perl_re_indentf(pTHX_ const char *fmt, U32 depth, ...) { va_list ap; int result; PerlIO *f= Perl_debug_log; PERL_ARGS_ASSERT_RE_INDENTF; va_start(ap, depth); PerlIO_printf(f, "%*s", ( (int)depth % 20 ) * 2, ""); result = PerlIO_vprintf(f, fmt, ap); va_end(ap); return result; } #endif /* DEBUGGING */ #define DEBUG_RExC_seen() \ DEBUG_OPTIMISE_MORE_r({ \ Perl_re_printf( aTHX_ "RExC_seen: "); \ \ if (RExC_seen & REG_ZERO_LEN_SEEN) \ Perl_re_printf( aTHX_ "REG_ZERO_LEN_SEEN "); \ \ if (RExC_seen & REG_LOOKBEHIND_SEEN) \ Perl_re_printf( aTHX_ "REG_LOOKBEHIND_SEEN "); \ \ if (RExC_seen & REG_GPOS_SEEN) \ Perl_re_printf( aTHX_ "REG_GPOS_SEEN "); \ \ if (RExC_seen & REG_RECURSE_SEEN) \ Perl_re_printf( aTHX_ "REG_RECURSE_SEEN "); \ \ if (RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN) \ Perl_re_printf( aTHX_ "REG_TOP_LEVEL_BRANCHES_SEEN "); \ \ if (RExC_seen & REG_VERBARG_SEEN) \ Perl_re_printf( aTHX_ "REG_VERBARG_SEEN "); \ \ if (RExC_seen & REG_CUTGROUP_SEEN) \ Perl_re_printf( aTHX_ "REG_CUTGROUP_SEEN "); \ \ if (RExC_seen & REG_RUN_ON_COMMENT_SEEN) \ Perl_re_printf( aTHX_ "REG_RUN_ON_COMMENT_SEEN "); \ \ if (RExC_seen & REG_UNFOLDED_MULTI_SEEN) \ Perl_re_printf( aTHX_ "REG_UNFOLDED_MULTI_SEEN "); \ \ if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) \ Perl_re_printf( aTHX_ "REG_UNBOUNDED_QUANTIFIER_SEEN "); \ \ Perl_re_printf( aTHX_ "\n"); \ }); #define DEBUG_SHOW_STUDY_FLAG(flags,flag) \ if ((flags) & flag) Perl_re_printf( aTHX_ "%s ", #flag) #ifdef DEBUGGING static void S_debug_show_study_flags(pTHX_ U32 flags, const char *open_str, const char *close_str) { if (!flags) return; Perl_re_printf( aTHX_ "%s", open_str); DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_SEOL); DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_MEOL); DEBUG_SHOW_STUDY_FLAG(flags, SF_IS_INF); DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_PAR); DEBUG_SHOW_STUDY_FLAG(flags, SF_IN_PAR); DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_EVAL); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_SUBSTR); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_AND); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_OR); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS); DEBUG_SHOW_STUDY_FLAG(flags, SCF_WHILEM_VISITED_POS); DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_RESTUDY); DEBUG_SHOW_STUDY_FLAG(flags, SCF_SEEN_ACCEPT); DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_DOING_RESTUDY); DEBUG_SHOW_STUDY_FLAG(flags, SCF_IN_DEFINE); Perl_re_printf( aTHX_ "%s", close_str); } static void S_debug_studydata(pTHX_ const char *where, scan_data_t *data, U32 depth, int is_inf) { GET_RE_DEBUG_FLAGS_DECL; DEBUG_OPTIMISE_MORE_r({ if (!data) return; Perl_re_indentf(aTHX_ "%s: Pos:%" IVdf "/%" IVdf " Flags: 0x%" UVXf, depth, where, (IV)data->pos_min, (IV)data->pos_delta, (UV)data->flags ); S_debug_show_study_flags(aTHX_ data->flags," [","]"); Perl_re_printf( aTHX_ " Whilem_c: %" IVdf " Lcp: %" IVdf " %s", (IV)data->whilem_c, (IV)(data->last_closep ? *((data)->last_closep) : -1), is_inf ? "INF " : "" ); if (data->last_found) { int i; Perl_re_printf(aTHX_ "Last:'%s' %" IVdf ":%" IVdf "/%" IVdf, SvPVX_const(data->last_found), (IV)data->last_end, (IV)data->last_start_min, (IV)data->last_start_max ); for (i = 0; i < 2; i++) { Perl_re_printf(aTHX_ " %s%s: '%s' @ %" IVdf "/%" IVdf, data->cur_is_floating == i ? "*" : "", i ? "Float" : "Fixed", SvPVX_const(data->substrs[i].str), (IV)data->substrs[i].min_offset, (IV)data->substrs[i].max_offset ); S_debug_show_study_flags(aTHX_ data->substrs[i].flags," [","]"); } } Perl_re_printf( aTHX_ "\n"); }); } static void S_debug_peep(pTHX_ const char *str, const RExC_state_t *pRExC_state, regnode *scan, U32 depth, U32 flags) { GET_RE_DEBUG_FLAGS_DECL; DEBUG_OPTIMISE_r({ regnode *Next; if (!scan) return; Next = regnext(scan); regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state); Perl_re_indentf( aTHX_ "%s>%3d: %s (%d)", depth, str, REG_NODE_NUM(scan), SvPV_nolen_const(RExC_mysv), Next ? (REG_NODE_NUM(Next)) : 0 ); S_debug_show_study_flags(aTHX_ flags," [ ","]"); Perl_re_printf( aTHX_ "\n"); }); } # define DEBUG_STUDYDATA(where, data, depth, is_inf) \ S_debug_studydata(aTHX_ where, data, depth, is_inf) # define DEBUG_PEEP(str, scan, depth, flags) \ S_debug_peep(aTHX_ str, pRExC_state, scan, depth, flags) #else # define DEBUG_STUDYDATA(where, data, depth, is_inf) NOOP # define DEBUG_PEEP(str, scan, depth, flags) NOOP #endif /* ========================================================= * BEGIN edit_distance stuff. * * This calculates how many single character changes of any type are needed to * transform a string into another one. It is taken from version 3.1 of * * https://metacpan.org/pod/Text::Levenshtein::Damerau::XS */ /* Our unsorted dictionary linked list. */ /* Note we use UVs, not chars. */ struct dictionary{ UV key; UV value; struct dictionary* next; }; typedef struct dictionary item; PERL_STATIC_INLINE item* push(UV key,item* curr) { item* head; Newxz(head, 1, item); head->key = key; head->value = 0; head->next = curr; return head; } PERL_STATIC_INLINE item* find(item* head, UV key) { item* iterator = head; while (iterator){ if (iterator->key == key){ return iterator; } iterator = iterator->next; } return NULL; } PERL_STATIC_INLINE item* uniquePush(item* head,UV key) { item* iterator = head; while (iterator){ if (iterator->key == key) { return head; } iterator = iterator->next; } return push(key,head); } PERL_STATIC_INLINE void dict_free(item* head) { item* iterator = head; while (iterator) { item* temp = iterator; iterator = iterator->next; Safefree(temp); } head = NULL; } /* End of Dictionary Stuff */ /* All calculations/work are done here */ STATIC int S_edit_distance(const UV* src, const UV* tgt, const STRLEN x, /* length of src[] */ const STRLEN y, /* length of tgt[] */ const SSize_t maxDistance ) { item *head = NULL; UV swapCount,swapScore,targetCharCount,i,j; UV *scores; UV score_ceil = x + y; PERL_ARGS_ASSERT_EDIT_DISTANCE; /* intialize matrix start values */ Newxz(scores, ( (x + 2) * (y + 2)), UV); scores[0] = score_ceil; scores[1 * (y + 2) + 0] = score_ceil; scores[0 * (y + 2) + 1] = score_ceil; scores[1 * (y + 2) + 1] = 0; head = uniquePush(uniquePush(head,src[0]),tgt[0]); /* work loops */ /* i = src index */ /* j = tgt index */ for (i=1;i<=x;i++) { if (i < x) head = uniquePush(head,src[i]); scores[(i+1) * (y + 2) + 1] = i; scores[(i+1) * (y + 2) + 0] = score_ceil; swapCount = 0; for (j=1;j<=y;j++) { if (i == 1) { if(j < y) head = uniquePush(head,tgt[j]); scores[1 * (y + 2) + (j + 1)] = j; scores[0 * (y + 2) + (j + 1)] = score_ceil; } targetCharCount = find(head,tgt[j-1])->value; swapScore = scores[targetCharCount * (y + 2) + swapCount] + i - targetCharCount - 1 + j - swapCount; if (src[i-1] != tgt[j-1]){ scores[(i+1) * (y + 2) + (j + 1)] = MIN(swapScore,(MIN(scores[i * (y + 2) + j], MIN(scores[(i+1) * (y + 2) + j], scores[i * (y + 2) + (j + 1)])) + 1)); } else { swapCount = j; scores[(i+1) * (y + 2) + (j + 1)] = MIN(scores[i * (y + 2) + j], swapScore); } } find(head,src[i-1])->value = i; } { IV score = scores[(x+1) * (y + 2) + (y + 1)]; dict_free(head); Safefree(scores); return (maxDistance != 0 && maxDistance < score)?(-1):score; } } /* END of edit_distance() stuff * ========================================================= */ /* is c a control character for which we have a mnemonic? */ #define isMNEMONIC_CNTRL(c) _IS_MNEMONIC_CNTRL_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c) STATIC const char * S_cntrl_to_mnemonic(const U8 c) { /* Returns the mnemonic string that represents character 'c', if one * exists; NULL otherwise. The only ones that exist for the purposes of * this routine are a few control characters */ switch (c) { case '\a': return "\\a"; case '\b': return "\\b"; case ESC_NATIVE: return "\\e"; case '\f': return "\\f"; case '\n': return "\\n"; case '\r': return "\\r"; case '\t': return "\\t"; } return NULL; } /* Mark that we cannot extend a found fixed substring at this point. Update the longest found anchored substring or the longest found floating substrings if needed. */ STATIC void S_scan_commit(pTHX_ const RExC_state_t *pRExC_state, scan_data_t *data, SSize_t *minlenp, int is_inf) { const STRLEN l = CHR_SVLEN(data->last_found); SV * const longest_sv = data->substrs[data->cur_is_floating].str; const STRLEN old_l = CHR_SVLEN(longest_sv); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_SCAN_COMMIT; if ((l >= old_l) && ((l > old_l) || (data->flags & SF_BEFORE_EOL))) { const U8 i = data->cur_is_floating; SvSetMagicSV(longest_sv, data->last_found); data->substrs[i].min_offset = l ? data->last_start_min : data->pos_min; if (!i) /* fixed */ data->substrs[0].max_offset = data->substrs[0].min_offset; else { /* float */ data->substrs[1].max_offset = (l ? data->last_start_max : (data->pos_delta > SSize_t_MAX - data->pos_min ? SSize_t_MAX : data->pos_min + data->pos_delta)); if (is_inf || (STRLEN)data->substrs[1].max_offset > (STRLEN)SSize_t_MAX) data->substrs[1].max_offset = SSize_t_MAX; } if (data->flags & SF_BEFORE_EOL) data->substrs[i].flags |= (data->flags & SF_BEFORE_EOL); else data->substrs[i].flags &= ~SF_BEFORE_EOL; data->substrs[i].minlenp = minlenp; data->substrs[i].lookbehind = 0; } SvCUR_set(data->last_found, 0); { SV * const sv = data->last_found; if (SvUTF8(sv) && SvMAGICAL(sv)) { MAGIC * const mg = mg_find(sv, PERL_MAGIC_utf8); if (mg) mg->mg_len = 0; } } data->last_end = -1; data->flags &= ~SF_BEFORE_EOL; DEBUG_STUDYDATA("commit", data, 0, is_inf); } /* An SSC is just a regnode_charclass_posix with an extra field: the inversion * list that describes which code points it matches */ STATIC void S_ssc_anything(pTHX_ regnode_ssc *ssc) { /* Set the SSC 'ssc' to match an empty string or any code point */ PERL_ARGS_ASSERT_SSC_ANYTHING; assert(is_ANYOF_SYNTHETIC(ssc)); /* mortalize so won't leak */ ssc->invlist = sv_2mortal(_add_range_to_invlist(NULL, 0, UV_MAX)); ANYOF_FLAGS(ssc) |= SSC_MATCHES_EMPTY_STRING; /* Plus matches empty */ } STATIC int S_ssc_is_anything(const regnode_ssc *ssc) { /* Returns TRUE if the SSC 'ssc' can match the empty string and any code * point; FALSE otherwise. Thus, this is used to see if using 'ssc' buys * us anything: if the function returns TRUE, 'ssc' hasn't been restricted * in any way, so there's no point in using it */ UV start, end; bool ret; PERL_ARGS_ASSERT_SSC_IS_ANYTHING; assert(is_ANYOF_SYNTHETIC(ssc)); if (! (ANYOF_FLAGS(ssc) & SSC_MATCHES_EMPTY_STRING)) { return FALSE; } /* See if the list consists solely of the range 0 - Infinity */ invlist_iterinit(ssc->invlist); ret = invlist_iternext(ssc->invlist, &start, &end) && start == 0 && end == UV_MAX; invlist_iterfinish(ssc->invlist); if (ret) { return TRUE; } /* If e.g., both \w and \W are set, matches everything */ if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { int i; for (i = 0; i < ANYOF_POSIXL_MAX; i += 2) { if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i+1)) { return TRUE; } } } return FALSE; } STATIC void S_ssc_init(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc) { /* Initializes the SSC 'ssc'. This includes setting it to match an empty * string, any code point, or any posix class under locale */ PERL_ARGS_ASSERT_SSC_INIT; Zero(ssc, 1, regnode_ssc); set_ANYOF_SYNTHETIC(ssc); ARG_SET(ssc, ANYOF_ONLY_HAS_BITMAP); ssc_anything(ssc); /* If any portion of the regex is to operate under locale rules that aren't * fully known at compile time, initialization includes it. The reason * this isn't done for all regexes is that the optimizer was written under * the assumption that locale was all-or-nothing. Given the complexity and * lack of documentation in the optimizer, and that there are inadequate * test cases for locale, many parts of it may not work properly, it is * safest to avoid locale unless necessary. */ if (RExC_contains_locale) { ANYOF_POSIXL_SETALL(ssc); } else { ANYOF_POSIXL_ZERO(ssc); } } STATIC int S_ssc_is_cp_posixl_init(const RExC_state_t *pRExC_state, const regnode_ssc *ssc) { /* Returns TRUE if the SSC 'ssc' is in its initial state with regard only * to the list of code points matched, and locale posix classes; hence does * not check its flags) */ UV start, end; bool ret; PERL_ARGS_ASSERT_SSC_IS_CP_POSIXL_INIT; assert(is_ANYOF_SYNTHETIC(ssc)); invlist_iterinit(ssc->invlist); ret = invlist_iternext(ssc->invlist, &start, &end) && start == 0 && end == UV_MAX; invlist_iterfinish(ssc->invlist); if (! ret) { return FALSE; } if (RExC_contains_locale && ! ANYOF_POSIXL_SSC_TEST_ALL_SET(ssc)) { return FALSE; } return TRUE; } STATIC SV* S_get_ANYOF_cp_list_for_ssc(pTHX_ const RExC_state_t *pRExC_state, const regnode_charclass* const node) { /* Returns a mortal inversion list defining which code points are matched * by 'node', which is of type ANYOF. Handles complementing the result if * appropriate. If some code points aren't knowable at this time, the * returned list must, and will, contain every code point that is a * possibility. */ SV* invlist = NULL; SV* only_utf8_locale_invlist = NULL; unsigned int i; const U32 n = ARG(node); bool new_node_has_latin1 = FALSE; PERL_ARGS_ASSERT_GET_ANYOF_CP_LIST_FOR_SSC; /* Look at the data structure created by S_set_ANYOF_arg() */ if (n != ANYOF_ONLY_HAS_BITMAP) { SV * const rv = MUTABLE_SV(RExC_rxi->data->data[n]); AV * const av = MUTABLE_AV(SvRV(rv)); SV **const ary = AvARRAY(av); assert(RExC_rxi->data->what[n] == 's'); if (ary[1] && ary[1] != &PL_sv_undef) { /* Has compile-time swash */ invlist = sv_2mortal(invlist_clone(_get_swash_invlist(ary[1]))); } else if (ary[0] && ary[0] != &PL_sv_undef) { /* Here, no compile-time swash, and there are things that won't be * known until runtime -- we have to assume it could be anything */ invlist = sv_2mortal(_new_invlist(1)); return _add_range_to_invlist(invlist, 0, UV_MAX); } else if (ary[3] && ary[3] != &PL_sv_undef) { /* Here no compile-time swash, and no run-time only data. Use the * node's inversion list */ invlist = sv_2mortal(invlist_clone(ary[3])); } /* Get the code points valid only under UTF-8 locales */ if ((ANYOF_FLAGS(node) & ANYOFL_FOLD) && ary[2] && ary[2] != &PL_sv_undef) { only_utf8_locale_invlist = ary[2]; } } if (! invlist) { invlist = sv_2mortal(_new_invlist(0)); } /* An ANYOF node contains a bitmap for the first NUM_ANYOF_CODE_POINTS * code points, and an inversion list for the others, but if there are code * points that should match only conditionally on the target string being * UTF-8, those are placed in the inversion list, and not the bitmap. * Since there are circumstances under which they could match, they are * included in the SSC. But if the ANYOF node is to be inverted, we have * to exclude them here, so that when we invert below, the end result * actually does include them. (Think about "\xe0" =~ /[^\xc0]/di;). We * have to do this here before we add the unconditionally matched code * points */ if (ANYOF_FLAGS(node) & ANYOF_INVERT) { _invlist_intersection_complement_2nd(invlist, PL_UpperLatin1, &invlist); } /* Add in the points from the bit map */ for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) { if (ANYOF_BITMAP_TEST(node, i)) { unsigned int start = i++; for (; i < NUM_ANYOF_CODE_POINTS && ANYOF_BITMAP_TEST(node, i); ++i) { /* empty */ } invlist = _add_range_to_invlist(invlist, start, i-1); new_node_has_latin1 = TRUE; } } /* If this can match all upper Latin1 code points, have to add them * as well. But don't add them if inverting, as when that gets done below, * it would exclude all these characters, including the ones it shouldn't * that were added just above */ if (! (ANYOF_FLAGS(node) & ANYOF_INVERT) && OP(node) == ANYOFD && (ANYOF_FLAGS(node) & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER)) { _invlist_union(invlist, PL_UpperLatin1, &invlist); } /* Similarly for these */ if (ANYOF_FLAGS(node) & ANYOF_MATCHES_ALL_ABOVE_BITMAP) { _invlist_union_complement_2nd(invlist, PL_InBitmap, &invlist); } if (ANYOF_FLAGS(node) & ANYOF_INVERT) { _invlist_invert(invlist); } else if (new_node_has_latin1 && ANYOF_FLAGS(node) & ANYOFL_FOLD) { /* Under /li, any 0-255 could fold to any other 0-255, depending on the * locale. We can skip this if there are no 0-255 at all. */ _invlist_union(invlist, PL_Latin1, &invlist); } /* Similarly add the UTF-8 locale possible matches. These have to be * deferred until after the non-UTF-8 locale ones are taken care of just * above, or it leads to wrong results under ANYOF_INVERT */ if (only_utf8_locale_invlist) { _invlist_union_maybe_complement_2nd(invlist, only_utf8_locale_invlist, ANYOF_FLAGS(node) & ANYOF_INVERT, &invlist); } return invlist; } /* These two functions currently do the exact same thing */ #define ssc_init_zero ssc_init #define ssc_add_cp(ssc, cp) ssc_add_range((ssc), (cp), (cp)) #define ssc_match_all_cp(ssc) ssc_add_range(ssc, 0, UV_MAX) /* 'AND' a given class with another one. Can create false positives. 'ssc' * should not be inverted. 'and_with->flags & ANYOF_MATCHES_POSIXL' should be * 0 if 'and_with' is a regnode_charclass instead of a regnode_ssc. */ STATIC void S_ssc_and(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc, const regnode_charclass *and_with) { /* Accumulate into SSC 'ssc' its 'AND' with 'and_with', which is either * another SSC or a regular ANYOF class. Can create false positives. */ SV* anded_cp_list; U8 anded_flags; PERL_ARGS_ASSERT_SSC_AND; assert(is_ANYOF_SYNTHETIC(ssc)); /* 'and_with' is used as-is if it too is an SSC; otherwise have to extract * the code point inversion list and just the relevant flags */ if (is_ANYOF_SYNTHETIC(and_with)) { anded_cp_list = ((regnode_ssc *)and_with)->invlist; anded_flags = ANYOF_FLAGS(and_with); /* XXX This is a kludge around what appears to be deficiencies in the * optimizer. If we make S_ssc_anything() add in the WARN_SUPER flag, * there are paths through the optimizer where it doesn't get weeded * out when it should. And if we don't make some extra provision for * it like the code just below, it doesn't get added when it should. * This solution is to add it only when AND'ing, which is here, and * only when what is being AND'ed is the pristine, original node * matching anything. Thus it is like adding it to ssc_anything() but * only when the result is to be AND'ed. Probably the same solution * could be adopted for the same problem we have with /l matching, * which is solved differently in S_ssc_init(), and that would lead to * fewer false positives than that solution has. But if this solution * creates bugs, the consequences are only that a warning isn't raised * that should be; while the consequences for having /l bugs is * incorrect matches */ if (ssc_is_anything((regnode_ssc *)and_with)) { anded_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER; } } else { anded_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, and_with); if (OP(and_with) == ANYOFD) { anded_flags = ANYOF_FLAGS(and_with) & ANYOF_COMMON_FLAGS; } else { anded_flags = ANYOF_FLAGS(and_with) &( ANYOF_COMMON_FLAGS |ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER |ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP); if (ANYOFL_UTF8_LOCALE_REQD(ANYOF_FLAGS(and_with))) { anded_flags &= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } } } ANYOF_FLAGS(ssc) &= anded_flags; /* Below, C1 is the list of code points in 'ssc'; P1, its posix classes. * C2 is the list of code points in 'and-with'; P2, its posix classes. * 'and_with' may be inverted. When not inverted, we have the situation of * computing: * (C1 | P1) & (C2 | P2) * = (C1 & (C2 | P2)) | (P1 & (C2 | P2)) * = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2)) * <= ((C1 & C2) | P2)) | ( P1 | (P1 & P2)) * <= ((C1 & C2) | P1 | P2) * Alternatively, the last few steps could be: * = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2)) * <= ((C1 & C2) | C1 ) | ( C2 | (P1 & P2)) * <= (C1 | C2 | (P1 & P2)) * We favor the second approach if either P1 or P2 is non-empty. This is * because these components are a barrier to doing optimizations, as what * they match cannot be known until the moment of matching as they are * dependent on the current locale, 'AND"ing them likely will reduce or * eliminate them. * But we can do better if we know that C1,P1 are in their initial state (a * frequent occurrence), each matching everything: * (<everything>) & (C2 | P2) = C2 | P2 * Similarly, if C2,P2 are in their initial state (again a frequent * occurrence), the result is a no-op * (C1 | P1) & (<everything>) = C1 | P1 * * Inverted, we have * (C1 | P1) & ~(C2 | P2) = (C1 | P1) & (~C2 & ~P2) * = (C1 & (~C2 & ~P2)) | (P1 & (~C2 & ~P2)) * <= (C1 & ~C2) | (P1 & ~P2) * */ if ((ANYOF_FLAGS(and_with) & ANYOF_INVERT) && ! is_ANYOF_SYNTHETIC(and_with)) { unsigned int i; ssc_intersection(ssc, anded_cp_list, FALSE /* Has already been inverted */ ); /* If either P1 or P2 is empty, the intersection will be also; can skip * the loop */ if (! (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL)) { ANYOF_POSIXL_ZERO(ssc); } else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { /* Note that the Posix class component P from 'and_with' actually * looks like: * P = Pa | Pb | ... | Pn * where each component is one posix class, such as in [\w\s]. * Thus * ~P = ~(Pa | Pb | ... | Pn) * = ~Pa & ~Pb & ... & ~Pn * <= ~Pa | ~Pb | ... | ~Pn * The last is something we can easily calculate, but unfortunately * is likely to have many false positives. We could do better * in some (but certainly not all) instances if two classes in * P have known relationships. For example * :lower: <= :alpha: <= :alnum: <= \w <= :graph: <= :print: * So * :lower: & :print: = :lower: * And similarly for classes that must be disjoint. For example, * since \s and \w can have no elements in common based on rules in * the POSIX standard, * \w & ^\S = nothing * Unfortunately, some vendor locales do not meet the Posix * standard, in particular almost everything by Microsoft. * The loop below just changes e.g., \w into \W and vice versa */ regnode_charclass_posixl temp; int add = 1; /* To calculate the index of the complement */ ANYOF_POSIXL_ZERO(&temp); for (i = 0; i < ANYOF_MAX; i++) { assert(i % 2 != 0 || ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i) || ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i + 1)); if (ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)) { ANYOF_POSIXL_SET(&temp, i + add); } add = 0 - add; /* 1 goes to -1; -1 goes to 1 */ } ANYOF_POSIXL_AND(&temp, ssc); } /* else ssc already has no posixes */ } /* else: Not inverted. This routine is a no-op if 'and_with' is an SSC in its initial state */ else if (! is_ANYOF_SYNTHETIC(and_with) || ! ssc_is_cp_posixl_init(pRExC_state, (regnode_ssc *)and_with)) { /* But if 'ssc' is in its initial state, the result is just 'and_with'; * copy it over 'ssc' */ if (ssc_is_cp_posixl_init(pRExC_state, ssc)) { if (is_ANYOF_SYNTHETIC(and_with)) { StructCopy(and_with, ssc, regnode_ssc); } else { ssc->invlist = anded_cp_list; ANYOF_POSIXL_ZERO(ssc); if (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL) { ANYOF_POSIXL_OR((regnode_charclass_posixl*) and_with, ssc); } } } else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc) || (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL)) { /* One or the other of P1, P2 is non-empty. */ if (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL) { ANYOF_POSIXL_AND((regnode_charclass_posixl*) and_with, ssc); } ssc_union(ssc, anded_cp_list, FALSE); } else { /* P1 = P2 = empty */ ssc_intersection(ssc, anded_cp_list, FALSE); } } } STATIC void S_ssc_or(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc, const regnode_charclass *or_with) { /* Accumulate into SSC 'ssc' its 'OR' with 'or_with', which is either * another SSC or a regular ANYOF class. Can create false positives if * 'or_with' is to be inverted. */ SV* ored_cp_list; U8 ored_flags; PERL_ARGS_ASSERT_SSC_OR; assert(is_ANYOF_SYNTHETIC(ssc)); /* 'or_with' is used as-is if it too is an SSC; otherwise have to extract * the code point inversion list and just the relevant flags */ if (is_ANYOF_SYNTHETIC(or_with)) { ored_cp_list = ((regnode_ssc*) or_with)->invlist; ored_flags = ANYOF_FLAGS(or_with); } else { ored_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, or_with); ored_flags = ANYOF_FLAGS(or_with) & ANYOF_COMMON_FLAGS; if (OP(or_with) != ANYOFD) { ored_flags |= ANYOF_FLAGS(or_with) & ( ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER |ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP); if (ANYOFL_UTF8_LOCALE_REQD(ANYOF_FLAGS(or_with))) { ored_flags |= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } } } ANYOF_FLAGS(ssc) |= ored_flags; /* Below, C1 is the list of code points in 'ssc'; P1, its posix classes. * C2 is the list of code points in 'or-with'; P2, its posix classes. * 'or_with' may be inverted. When not inverted, we have the simple * situation of computing: * (C1 | P1) | (C2 | P2) = (C1 | C2) | (P1 | P2) * If P1|P2 yields a situation with both a class and its complement are * set, like having both \w and \W, this matches all code points, and we * can delete these from the P component of the ssc going forward. XXX We * might be able to delete all the P components, but I (khw) am not certain * about this, and it is better to be safe. * * Inverted, we have * (C1 | P1) | ~(C2 | P2) = (C1 | P1) | (~C2 & ~P2) * <= (C1 | P1) | ~C2 * <= (C1 | ~C2) | P1 * (which results in actually simpler code than the non-inverted case) * */ if ((ANYOF_FLAGS(or_with) & ANYOF_INVERT) && ! is_ANYOF_SYNTHETIC(or_with)) { /* We ignore P2, leaving P1 going forward */ } /* else Not inverted */ else if (ANYOF_FLAGS(or_with) & ANYOF_MATCHES_POSIXL) { ANYOF_POSIXL_OR((regnode_charclass_posixl*)or_with, ssc); if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { unsigned int i; for (i = 0; i < ANYOF_MAX; i += 2) { if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i + 1)) { ssc_match_all_cp(ssc); ANYOF_POSIXL_CLEAR(ssc, i); ANYOF_POSIXL_CLEAR(ssc, i+1); } } } } ssc_union(ssc, ored_cp_list, FALSE /* Already has been inverted */ ); } PERL_STATIC_INLINE void S_ssc_union(pTHX_ regnode_ssc *ssc, SV* const invlist, const bool invert2nd) { PERL_ARGS_ASSERT_SSC_UNION; assert(is_ANYOF_SYNTHETIC(ssc)); _invlist_union_maybe_complement_2nd(ssc->invlist, invlist, invert2nd, &ssc->invlist); } PERL_STATIC_INLINE void S_ssc_intersection(pTHX_ regnode_ssc *ssc, SV* const invlist, const bool invert2nd) { PERL_ARGS_ASSERT_SSC_INTERSECTION; assert(is_ANYOF_SYNTHETIC(ssc)); _invlist_intersection_maybe_complement_2nd(ssc->invlist, invlist, invert2nd, &ssc->invlist); } PERL_STATIC_INLINE void S_ssc_add_range(pTHX_ regnode_ssc *ssc, const UV start, const UV end) { PERL_ARGS_ASSERT_SSC_ADD_RANGE; assert(is_ANYOF_SYNTHETIC(ssc)); ssc->invlist = _add_range_to_invlist(ssc->invlist, start, end); } PERL_STATIC_INLINE void S_ssc_cp_and(pTHX_ regnode_ssc *ssc, const UV cp) { /* AND just the single code point 'cp' into the SSC 'ssc' */ SV* cp_list = _new_invlist(2); PERL_ARGS_ASSERT_SSC_CP_AND; assert(is_ANYOF_SYNTHETIC(ssc)); cp_list = add_cp_to_invlist(cp_list, cp); ssc_intersection(ssc, cp_list, FALSE /* Not inverted */ ); SvREFCNT_dec_NN(cp_list); } PERL_STATIC_INLINE void S_ssc_clear_locale(regnode_ssc *ssc) { /* Set the SSC 'ssc' to not match any locale things */ PERL_ARGS_ASSERT_SSC_CLEAR_LOCALE; assert(is_ANYOF_SYNTHETIC(ssc)); ANYOF_POSIXL_ZERO(ssc); ANYOF_FLAGS(ssc) &= ~ANYOF_LOCALE_FLAGS; } #define NON_OTHER_COUNT NON_OTHER_COUNT_FOR_USE_ONLY_BY_REGCOMP_DOT_C STATIC bool S_is_ssc_worth_it(const RExC_state_t * pRExC_state, const regnode_ssc * ssc) { /* The synthetic start class is used to hopefully quickly winnow down * places where a pattern could start a match in the target string. If it * doesn't really narrow things down that much, there isn't much point to * having the overhead of using it. This function uses some very crude * heuristics to decide if to use the ssc or not. * * It returns TRUE if 'ssc' rules out more than half what it considers to * be the "likely" possible matches, but of course it doesn't know what the * actual things being matched are going to be; these are only guesses * * For /l matches, it assumes that the only likely matches are going to be * in the 0-255 range, uniformly distributed, so half of that is 127 * For /a and /d matches, it assumes that the likely matches will be just * the ASCII range, so half of that is 63 * For /u and there isn't anything matching above the Latin1 range, it * assumes that that is the only range likely to be matched, and uses * half that as the cut-off: 127. If anything matches above Latin1, * it assumes that all of Unicode could match (uniformly), except for * non-Unicode code points and things in the General Category "Other" * (unassigned, private use, surrogates, controls and formats). This * is a much large number. */ U32 count = 0; /* Running total of number of code points matched by 'ssc' */ UV start, end; /* Start and end points of current range in inversion list */ const U32 max_code_points = (LOC) ? 256 : (( ! UNI_SEMANTICS || invlist_highest(ssc->invlist) < 256) ? 128 : NON_OTHER_COUNT); const U32 max_match = max_code_points / 2; PERL_ARGS_ASSERT_IS_SSC_WORTH_IT; invlist_iterinit(ssc->invlist); while (invlist_iternext(ssc->invlist, &start, &end)) { if (start >= max_code_points) { break; } end = MIN(end, max_code_points - 1); count += end - start + 1; if (count >= max_match) { invlist_iterfinish(ssc->invlist); return FALSE; } } return TRUE; } STATIC void S_ssc_finalize(pTHX_ RExC_state_t *pRExC_state, regnode_ssc *ssc) { /* The inversion list in the SSC is marked mortal; now we need a more * permanent copy, which is stored the same way that is done in a regular * ANYOF node, with the first NUM_ANYOF_CODE_POINTS code points in a bit * map */ SV* invlist = invlist_clone(ssc->invlist); PERL_ARGS_ASSERT_SSC_FINALIZE; assert(is_ANYOF_SYNTHETIC(ssc)); /* The code in this file assumes that all but these flags aren't relevant * to the SSC, except SSC_MATCHES_EMPTY_STRING, which should be cleared * by the time we reach here */ assert(! (ANYOF_FLAGS(ssc) & ~( ANYOF_COMMON_FLAGS |ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER |ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP))); populate_ANYOF_from_invlist( (regnode *) ssc, &invlist); set_ANYOF_arg(pRExC_state, (regnode *) ssc, invlist, NULL, NULL, NULL, FALSE); /* Make sure is clone-safe */ ssc->invlist = NULL; if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { ANYOF_FLAGS(ssc) |= ANYOF_MATCHES_POSIXL; } if (RExC_contains_locale) { OP(ssc) = ANYOFL; } assert(! (ANYOF_FLAGS(ssc) & ANYOF_LOCALE_FLAGS) || RExC_contains_locale); } #define TRIE_LIST_ITEM(state,idx) (trie->states[state].trans.list)[ idx ] #define TRIE_LIST_CUR(state) ( TRIE_LIST_ITEM( state, 0 ).forid ) #define TRIE_LIST_LEN(state) ( TRIE_LIST_ITEM( state, 0 ).newstate ) #define TRIE_LIST_USED(idx) ( trie->states[state].trans.list \ ? (TRIE_LIST_CUR( idx ) - 1) \ : 0 ) #ifdef DEBUGGING /* dump_trie(trie,widecharmap,revcharmap) dump_trie_interim_list(trie,widecharmap,revcharmap,next_alloc) dump_trie_interim_table(trie,widecharmap,revcharmap,next_alloc) These routines dump out a trie in a somewhat readable format. The _interim_ variants are used for debugging the interim tables that are used to generate the final compressed representation which is what dump_trie expects. Part of the reason for their existence is to provide a form of documentation as to how the different representations function. */ /* Dumps the final compressed table form of the trie to Perl_debug_log. Used for debugging make_trie(). */ STATIC void S_dump_trie(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap, AV *revcharmap, U32 depth) { U32 state; SV *sv=sv_newmortal(); int colwidth= widecharmap ? 6 : 4; U16 word; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMP_TRIE; Perl_re_indentf( aTHX_ "Char : %-6s%-6s%-4s ", depth+1, "Match","Base","Ofs" ); for( state = 0 ; state < trie->uniquecharcount ; state++ ) { SV ** const tmp = av_fetch( revcharmap, state, 0); if ( tmp ) { Perl_re_printf( aTHX_ "%*s", colwidth, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) ); } } Perl_re_printf( aTHX_ "\n"); Perl_re_indentf( aTHX_ "State|-----------------------", depth+1); for( state = 0 ; state < trie->uniquecharcount ; state++ ) Perl_re_printf( aTHX_ "%.*s", colwidth, "--------"); Perl_re_printf( aTHX_ "\n"); for( state = 1 ; state < trie->statecount ; state++ ) { const U32 base = trie->states[ state ].trans.base; Perl_re_indentf( aTHX_ "#%4" UVXf "|", depth+1, (UV)state); if ( trie->states[ state ].wordnum ) { Perl_re_printf( aTHX_ " W%4X", trie->states[ state ].wordnum ); } else { Perl_re_printf( aTHX_ "%6s", "" ); } Perl_re_printf( aTHX_ " @%4" UVXf " ", (UV)base ); if ( base ) { U32 ofs = 0; while( ( base + ofs < trie->uniquecharcount ) || ( base + ofs - trie->uniquecharcount < trie->lasttrans && trie->trans[ base + ofs - trie->uniquecharcount ].check != state)) ofs++; Perl_re_printf( aTHX_ "+%2" UVXf "[ ", (UV)ofs); for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) { if ( ( base + ofs >= trie->uniquecharcount ) && ( base + ofs - trie->uniquecharcount < trie->lasttrans ) && trie->trans[ base + ofs - trie->uniquecharcount ].check == state ) { Perl_re_printf( aTHX_ "%*" UVXf, colwidth, (UV)trie->trans[ base + ofs - trie->uniquecharcount ].next ); } else { Perl_re_printf( aTHX_ "%*s",colwidth," ." ); } } Perl_re_printf( aTHX_ "]"); } Perl_re_printf( aTHX_ "\n" ); } Perl_re_indentf( aTHX_ "word_info N:(prev,len)=", depth); for (word=1; word <= trie->wordcount; word++) { Perl_re_printf( aTHX_ " %d:(%d,%d)", (int)word, (int)(trie->wordinfo[word].prev), (int)(trie->wordinfo[word].len)); } Perl_re_printf( aTHX_ "\n" ); } /* Dumps a fully constructed but uncompressed trie in list form. List tries normally only are used for construction when the number of possible chars (trie->uniquecharcount) is very high. Used for debugging make_trie(). */ STATIC void S_dump_trie_interim_list(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap, AV *revcharmap, U32 next_alloc, U32 depth) { U32 state; SV *sv=sv_newmortal(); int colwidth= widecharmap ? 6 : 4; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_LIST; /* print out the table precompression. */ Perl_re_indentf( aTHX_ "State :Word | Transition Data\n", depth+1 ); Perl_re_indentf( aTHX_ "%s", depth+1, "------:-----+-----------------\n" ); for( state=1 ; state < next_alloc ; state ++ ) { U16 charid; Perl_re_indentf( aTHX_ " %4" UVXf " :", depth+1, (UV)state ); if ( ! trie->states[ state ].wordnum ) { Perl_re_printf( aTHX_ "%5s| ",""); } else { Perl_re_printf( aTHX_ "W%4x| ", trie->states[ state ].wordnum ); } for( charid = 1 ; charid <= TRIE_LIST_USED( state ) ; charid++ ) { SV ** const tmp = av_fetch( revcharmap, TRIE_LIST_ITEM(state,charid).forid, 0); if ( tmp ) { Perl_re_printf( aTHX_ "%*s:%3X=%4" UVXf " | ", colwidth, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) , TRIE_LIST_ITEM(state,charid).forid, (UV)TRIE_LIST_ITEM(state,charid).newstate ); if (!(charid % 10)) Perl_re_printf( aTHX_ "\n%*s| ", (int)((depth * 2) + 14), ""); } } Perl_re_printf( aTHX_ "\n"); } } /* Dumps a fully constructed but uncompressed trie in table form. This is the normal DFA style state transition table, with a few twists to facilitate compression later. Used for debugging make_trie(). */ STATIC void S_dump_trie_interim_table(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap, AV *revcharmap, U32 next_alloc, U32 depth) { U32 state; U16 charid; SV *sv=sv_newmortal(); int colwidth= widecharmap ? 6 : 4; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_TABLE; /* print out the table precompression so that we can do a visual check that they are identical. */ Perl_re_indentf( aTHX_ "Char : ", depth+1 ); for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) { SV ** const tmp = av_fetch( revcharmap, charid, 0); if ( tmp ) { Perl_re_printf( aTHX_ "%*s", colwidth, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) ); } } Perl_re_printf( aTHX_ "\n"); Perl_re_indentf( aTHX_ "State+-", depth+1 ); for( charid=0 ; charid < trie->uniquecharcount ; charid++ ) { Perl_re_printf( aTHX_ "%.*s", colwidth,"--------"); } Perl_re_printf( aTHX_ "\n" ); for( state=1 ; state < next_alloc ; state += trie->uniquecharcount ) { Perl_re_indentf( aTHX_ "%4" UVXf " : ", depth+1, (UV)TRIE_NODENUM( state ) ); for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) { UV v=(UV)SAFE_TRIE_NODENUM( trie->trans[ state + charid ].next ); if (v) Perl_re_printf( aTHX_ "%*" UVXf, colwidth, v ); else Perl_re_printf( aTHX_ "%*s", colwidth, "." ); } if ( ! trie->states[ TRIE_NODENUM( state ) ].wordnum ) { Perl_re_printf( aTHX_ " (%4" UVXf ")\n", (UV)trie->trans[ state ].check ); } else { Perl_re_printf( aTHX_ " (%4" UVXf ") W%4X\n", (UV)trie->trans[ state ].check, trie->states[ TRIE_NODENUM( state ) ].wordnum ); } } } #endif /* make_trie(startbranch,first,last,tail,word_count,flags,depth) startbranch: the first branch in the whole branch sequence first : start branch of sequence of branch-exact nodes. May be the same as startbranch last : Thing following the last branch. May be the same as tail. tail : item following the branch sequence count : words in the sequence flags : currently the OP() type we will be building one of /EXACT(|F|FA|FU|FU_SS|L|FLU8)/ depth : indent depth Inplace optimizes a sequence of 2 or more Branch-Exact nodes into a TRIE node. A trie is an N'ary tree where the branches are determined by digital decomposition of the key. IE, at the root node you look up the 1st character and follow that branch repeat until you find the end of the branches. Nodes can be marked as "accepting" meaning they represent a complete word. Eg: /he|she|his|hers/ would convert into the following structure. Numbers represent states, letters following numbers represent valid transitions on the letter from that state, if the number is in square brackets it represents an accepting state, otherwise it will be in parenthesis. +-h->+-e->[3]-+-r->(8)-+-s->[9] | | | (2) | | (1) +-i->(6)-+-s->[7] | +-s->(3)-+-h->(4)-+-e->[5] Accept Word Mapping: 3=>1 (he),5=>2 (she), 7=>3 (his), 9=>4 (hers) This shows that when matching against the string 'hers' we will begin at state 1 read 'h' and move to state 2, read 'e' and move to state 3 which is accepting, then read 'r' and go to state 8 followed by 's' which takes us to state 9 which is also accepting. Thus we know that we can match both 'he' and 'hers' with a single traverse. We store a mapping from accepting to state to which word was matched, and then when we have multiple possibilities we try to complete the rest of the regex in the order in which they occurred in the alternation. The only prior NFA like behaviour that would be changed by the TRIE support is the silent ignoring of duplicate alternations which are of the form: / (DUPE|DUPE) X? (?{ ... }) Y /x Thus EVAL blocks following a trie may be called a different number of times with and without the optimisation. With the optimisations dupes will be silently ignored. This inconsistent behaviour of EVAL type nodes is well established as the following demonstrates: 'words'=~/(word|word|word)(?{ print $1 })[xyz]/ which prints out 'word' three times, but 'words'=~/(word|word|word)(?{ print $1 })S/ which doesnt print it out at all. This is due to other optimisations kicking in. Example of what happens on a structural level: The regexp /(ac|ad|ab)+/ will produce the following debug output: 1: CURLYM[1] {1,32767}(18) 5: BRANCH(8) 6: EXACT <ac>(16) 8: BRANCH(11) 9: EXACT <ad>(16) 11: BRANCH(14) 12: EXACT <ab>(16) 16: SUCCEED(0) 17: NOTHING(18) 18: END(0) This would be optimizable with startbranch=5, first=5, last=16, tail=16 and should turn into: 1: CURLYM[1] {1,32767}(18) 5: TRIE(16) [Words:3 Chars Stored:6 Unique Chars:4 States:5 NCP:1] <ac> <ad> <ab> 16: SUCCEED(0) 17: NOTHING(18) 18: END(0) Cases where tail != last would be like /(?foo|bar)baz/: 1: BRANCH(4) 2: EXACT <foo>(8) 4: BRANCH(7) 5: EXACT <bar>(8) 7: TAIL(8) 8: EXACT <baz>(10) 10: END(0) which would be optimizable with startbranch=1, first=1, last=7, tail=8 and would end up looking like: 1: TRIE(8) [Words:2 Chars Stored:6 Unique Chars:5 States:7 NCP:1] <foo> <bar> 7: TAIL(8) 8: EXACT <baz>(10) 10: END(0) d = uvchr_to_utf8_flags(d, uv, 0); is the recommended Unicode-aware way of saying *(d++) = uv; */ #define TRIE_STORE_REVCHAR(val) \ STMT_START { \ if (UTF) { \ SV *zlopp = newSV(UTF8_MAXBYTES); \ unsigned char *flrbbbbb = (unsigned char *) SvPVX(zlopp); \ unsigned const char *const kapow = uvchr_to_utf8(flrbbbbb, val); \ SvCUR_set(zlopp, kapow - flrbbbbb); \ SvPOK_on(zlopp); \ SvUTF8_on(zlopp); \ av_push(revcharmap, zlopp); \ } else { \ char ooooff = (char)val; \ av_push(revcharmap, newSVpvn(&ooooff, 1)); \ } \ } STMT_END /* This gets the next character from the input, folding it if not already * folded. */ #define TRIE_READ_CHAR STMT_START { \ wordlen++; \ if ( UTF ) { \ /* if it is UTF then it is either already folded, or does not need \ * folding */ \ uvc = valid_utf8_to_uvchr( (const U8*) uc, &len); \ } \ else if (folder == PL_fold_latin1) { \ /* This folder implies Unicode rules, which in the range expressible \ * by not UTF is the lower case, with the two exceptions, one of \ * which should have been taken care of before calling this */ \ assert(*uc != LATIN_SMALL_LETTER_SHARP_S); \ uvc = toLOWER_L1(*uc); \ if (UNLIKELY(uvc == MICRO_SIGN)) uvc = GREEK_SMALL_LETTER_MU; \ len = 1; \ } else { \ /* raw data, will be folded later if needed */ \ uvc = (U32)*uc; \ len = 1; \ } \ } STMT_END #define TRIE_LIST_PUSH(state,fid,ns) STMT_START { \ if ( TRIE_LIST_CUR( state ) >=TRIE_LIST_LEN( state ) ) { \ U32 ging = TRIE_LIST_LEN( state ) * 2; \ Renew( trie->states[ state ].trans.list, ging, reg_trie_trans_le ); \ TRIE_LIST_LEN( state ) = ging; \ } \ TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).forid = fid; \ TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).newstate = ns; \ TRIE_LIST_CUR( state )++; \ } STMT_END #define TRIE_LIST_NEW(state) STMT_START { \ Newxz( trie->states[ state ].trans.list, \ 4, reg_trie_trans_le ); \ TRIE_LIST_CUR( state ) = 1; \ TRIE_LIST_LEN( state ) = 4; \ } STMT_END #define TRIE_HANDLE_WORD(state) STMT_START { \ U16 dupe= trie->states[ state ].wordnum; \ regnode * const noper_next = regnext( noper ); \ \ DEBUG_r({ \ /* store the word for dumping */ \ SV* tmp; \ if (OP(noper) != NOTHING) \ tmp = newSVpvn_utf8(STRING(noper), STR_LEN(noper), UTF); \ else \ tmp = newSVpvn_utf8( "", 0, UTF ); \ av_push( trie_words, tmp ); \ }); \ \ curword++; \ trie->wordinfo[curword].prev = 0; \ trie->wordinfo[curword].len = wordlen; \ trie->wordinfo[curword].accept = state; \ \ if ( noper_next < tail ) { \ if (!trie->jump) \ trie->jump = (U16 *) PerlMemShared_calloc( word_count + 1, \ sizeof(U16) ); \ trie->jump[curword] = (U16)(noper_next - convert); \ if (!jumper) \ jumper = noper_next; \ if (!nextbranch) \ nextbranch= regnext(cur); \ } \ \ if ( dupe ) { \ /* It's a dupe. Pre-insert into the wordinfo[].prev */\ /* chain, so that when the bits of chain are later */\ /* linked together, the dups appear in the chain */\ trie->wordinfo[curword].prev = trie->wordinfo[dupe].prev; \ trie->wordinfo[dupe].prev = curword; \ } else { \ /* we haven't inserted this word yet. */ \ trie->states[ state ].wordnum = curword; \ } \ } STMT_END #define TRIE_TRANS_STATE(state,base,ucharcount,charid,special) \ ( ( base + charid >= ucharcount \ && base + charid < ubound \ && state == trie->trans[ base - ucharcount + charid ].check \ && trie->trans[ base - ucharcount + charid ].next ) \ ? trie->trans[ base - ucharcount + charid ].next \ : ( state==1 ? special : 0 ) \ ) #define TRIE_BITMAP_SET_FOLDED(trie, uvc, folder) \ STMT_START { \ TRIE_BITMAP_SET(trie, uvc); \ /* store the folded codepoint */ \ if ( folder ) \ TRIE_BITMAP_SET(trie, folder[(U8) uvc ]); \ \ if ( !UTF ) { \ /* store first byte of utf8 representation of */ \ /* variant codepoints */ \ if (! UVCHR_IS_INVARIANT(uvc)) { \ TRIE_BITMAP_SET(trie, UTF8_TWO_BYTE_HI(uvc)); \ } \ } \ } STMT_END #define MADE_TRIE 1 #define MADE_JUMP_TRIE 2 #define MADE_EXACT_TRIE 4 STATIC I32 S_make_trie(pTHX_ RExC_state_t *pRExC_state, regnode *startbranch, regnode *first, regnode *last, regnode *tail, U32 word_count, U32 flags, U32 depth) { /* first pass, loop through and scan words */ reg_trie_data *trie; HV *widecharmap = NULL; AV *revcharmap = newAV(); regnode *cur; STRLEN len = 0; UV uvc = 0; U16 curword = 0; U32 next_alloc = 0; regnode *jumper = NULL; regnode *nextbranch = NULL; regnode *convert = NULL; U32 *prev_states; /* temp array mapping each state to previous one */ /* we just use folder as a flag in utf8 */ const U8 * folder = NULL; /* in the below add_data call we are storing either 'tu' or 'tuaa' * which stands for one trie structure, one hash, optionally followed * by two arrays */ #ifdef DEBUGGING const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tuaa")); AV *trie_words = NULL; /* along with revcharmap, this only used during construction but both are * useful during debugging so we store them in the struct when debugging. */ #else const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tu")); STRLEN trie_charcount=0; #endif SV *re_trie_maxbuff; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_MAKE_TRIE; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif switch (flags) { case EXACT: case EXACTL: break; case EXACTFA: case EXACTFU_SS: case EXACTFU: case EXACTFLU8: folder = PL_fold_latin1; break; case EXACTF: folder = PL_fold; break; default: Perl_croak( aTHX_ "panic! In trie construction, unknown node type %u %s", (unsigned) flags, PL_reg_name[flags] ); } trie = (reg_trie_data *) PerlMemShared_calloc( 1, sizeof(reg_trie_data) ); trie->refcount = 1; trie->startstate = 1; trie->wordcount = word_count; RExC_rxi->data->data[ data_slot ] = (void*)trie; trie->charmap = (U16 *) PerlMemShared_calloc( 256, sizeof(U16) ); if (flags == EXACT || flags == EXACTL) trie->bitmap = (char *) PerlMemShared_calloc( ANYOF_BITMAP_SIZE, 1 ); trie->wordinfo = (reg_trie_wordinfo *) PerlMemShared_calloc( trie->wordcount+1, sizeof(reg_trie_wordinfo)); DEBUG_r({ trie_words = newAV(); }); re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1); assert(re_trie_maxbuff); if (!SvIOK(re_trie_maxbuff)) { sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT); } DEBUG_TRIE_COMPILE_r({ Perl_re_indentf( aTHX_ "make_trie start==%d, first==%d, last==%d, tail==%d depth=%d\n", depth+1, REG_NODE_NUM(startbranch),REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(tail), (int)depth); }); /* Find the node we are going to overwrite */ if ( first == startbranch && OP( last ) != BRANCH ) { /* whole branch chain */ convert = first; } else { /* branch sub-chain */ convert = NEXTOPER( first ); } /* -- First loop and Setup -- We first traverse the branches and scan each word to determine if it contains widechars, and how many unique chars there are, this is important as we have to build a table with at least as many columns as we have unique chars. We use an array of integers to represent the character codes 0..255 (trie->charmap) and we use a an HV* to store Unicode characters. We use the native representation of the character value as the key and IV's for the coded index. *TODO* If we keep track of how many times each character is used we can remap the columns so that the table compression later on is more efficient in terms of memory by ensuring the most common value is in the middle and the least common are on the outside. IMO this would be better than a most to least common mapping as theres a decent chance the most common letter will share a node with the least common, meaning the node will not be compressible. With a middle is most common approach the worst case is when we have the least common nodes twice. */ for ( cur = first ; cur < last ; cur = regnext( cur ) ) { regnode *noper = NEXTOPER( cur ); const U8 *uc; const U8 *e; int foldlen = 0; U32 wordlen = 0; /* required init */ STRLEN minchars = 0; STRLEN maxchars = 0; bool set_bit = trie->bitmap ? 1 : 0; /*store the first char in the bitmap?*/ if (OP(noper) == NOTHING) { /* skip past a NOTHING at the start of an alternation * eg, /(?:)a|(?:b)/ should be the same as /a|b/ */ regnode *noper_next= regnext(noper); if (noper_next < tail) noper= noper_next; } if ( noper < tail && ( OP(noper) == flags || ( flags == EXACTFU && OP(noper) == EXACTFU_SS ) ) ) { uc= (U8*)STRING(noper); e= uc + STR_LEN(noper); } else { trie->minlen= 0; continue; } if ( set_bit ) { /* bitmap only alloced when !(UTF&&Folding) */ TRIE_BITMAP_SET(trie,*uc); /* store the raw first byte regardless of encoding */ if (OP( noper ) == EXACTFU_SS) { /* false positives are ok, so just set this */ TRIE_BITMAP_SET(trie, LATIN_SMALL_LETTER_SHARP_S); } } for ( ; uc < e ; uc += len ) { /* Look at each char in the current branch */ TRIE_CHARCOUNT(trie)++; TRIE_READ_CHAR; /* TRIE_READ_CHAR returns the current character, or its fold if /i * is in effect. Under /i, this character can match itself, or * anything that folds to it. If not under /i, it can match just * itself. Most folds are 1-1, for example k, K, and KELVIN SIGN * all fold to k, and all are single characters. But some folds * expand to more than one character, so for example LATIN SMALL * LIGATURE FFI folds to the three character sequence 'ffi'. If * the string beginning at 'uc' is 'ffi', it could be matched by * three characters, or just by the one ligature character. (It * could also be matched by two characters: LATIN SMALL LIGATURE FF * followed by 'i', or by 'f' followed by LATIN SMALL LIGATURE FI). * (Of course 'I' and/or 'F' instead of 'i' and 'f' can also * match.) The trie needs to know the minimum and maximum number * of characters that could match so that it can use size alone to * quickly reject many match attempts. The max is simple: it is * the number of folded characters in this branch (since a fold is * never shorter than what folds to it. */ maxchars++; /* And the min is equal to the max if not under /i (indicated by * 'folder' being NULL), or there are no multi-character folds. If * there is a multi-character fold, the min is incremented just * once, for the character that folds to the sequence. Each * character in the sequence needs to be added to the list below of * characters in the trie, but we count only the first towards the * min number of characters needed. This is done through the * variable 'foldlen', which is returned by the macros that look * for these sequences as the number of bytes the sequence * occupies. Each time through the loop, we decrement 'foldlen' by * how many bytes the current char occupies. Only when it reaches * 0 do we increment 'minchars' or look for another multi-character * sequence. */ if (folder == NULL) { minchars++; } else if (foldlen > 0) { foldlen -= (UTF) ? UTF8SKIP(uc) : 1; } else { minchars++; /* See if *uc is the beginning of a multi-character fold. If * so, we decrement the length remaining to look at, to account * for the current character this iteration. (We can use 'uc' * instead of the fold returned by TRIE_READ_CHAR because for * non-UTF, the latin1_safe macro is smart enough to account * for all the unfolded characters, and because for UTF, the * string will already have been folded earlier in the * compilation process */ if (UTF) { if ((foldlen = is_MULTI_CHAR_FOLD_utf8_safe(uc, e))) { foldlen -= UTF8SKIP(uc); } } else if ((foldlen = is_MULTI_CHAR_FOLD_latin1_safe(uc, e))) { foldlen--; } } /* The current character (and any potential folds) should be added * to the possible matching characters for this position in this * branch */ if ( uvc < 256 ) { if ( folder ) { U8 folded= folder[ (U8) uvc ]; if ( !trie->charmap[ folded ] ) { trie->charmap[ folded ]=( ++trie->uniquecharcount ); TRIE_STORE_REVCHAR( folded ); } } if ( !trie->charmap[ uvc ] ) { trie->charmap[ uvc ]=( ++trie->uniquecharcount ); TRIE_STORE_REVCHAR( uvc ); } if ( set_bit ) { /* store the codepoint in the bitmap, and its folded * equivalent. */ TRIE_BITMAP_SET_FOLDED(trie, uvc, folder); set_bit = 0; /* We've done our bit :-) */ } } else { /* XXX We could come up with the list of code points that fold * to this using PL_utf8_foldclosures, except not for * multi-char folds, as there may be multiple combinations * there that could work, which needs to wait until runtime to * resolve (The comment about LIGATURE FFI above is such an * example */ SV** svpp; if ( !widecharmap ) widecharmap = newHV(); svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 1 ); if ( !svpp ) Perl_croak( aTHX_ "error creating/fetching widecharmap entry for 0x%" UVXf, uvc ); if ( !SvTRUE( *svpp ) ) { sv_setiv( *svpp, ++trie->uniquecharcount ); TRIE_STORE_REVCHAR(uvc); } } } /* end loop through characters in this branch of the trie */ /* We take the min and max for this branch and combine to find the min * and max for all branches processed so far */ if( cur == first ) { trie->minlen = minchars; trie->maxlen = maxchars; } else if (minchars < trie->minlen) { trie->minlen = minchars; } else if (maxchars > trie->maxlen) { trie->maxlen = maxchars; } } /* end first pass */ DEBUG_TRIE_COMPILE_r( Perl_re_indentf( aTHX_ "TRIE(%s): W:%d C:%d Uq:%d Min:%d Max:%d\n", depth+1, ( widecharmap ? "UTF8" : "NATIVE" ), (int)word_count, (int)TRIE_CHARCOUNT(trie), trie->uniquecharcount, (int)trie->minlen, (int)trie->maxlen ) ); /* We now know what we are dealing with in terms of unique chars and string sizes so we can calculate how much memory a naive representation using a flat table will take. If it's over a reasonable limit (as specified by ${^RE_TRIE_MAXBUF}) we use a more memory conservative but potentially much slower representation using an array of lists. At the end we convert both representations into the same compressed form that will be used in regexec.c for matching with. The latter is a form that cannot be used to construct with but has memory properties similar to the list form and access properties similar to the table form making it both suitable for fast searches and small enough that its feasable to store for the duration of a program. See the comment in the code where the compressed table is produced inplace from the flat tabe representation for an explanation of how the compression works. */ Newx(prev_states, TRIE_CHARCOUNT(trie) + 2, U32); prev_states[1] = 0; if ( (IV)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1) > SvIV(re_trie_maxbuff) ) { /* Second Pass -- Array Of Lists Representation Each state will be represented by a list of charid:state records (reg_trie_trans_le) the first such element holds the CUR and LEN points of the allocated array. (See defines above). We build the initial structure using the lists, and then convert it into the compressed table form which allows faster lookups (but cant be modified once converted). */ STRLEN transcount = 1; DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using list compiler\n", depth+1)); trie->states = (reg_trie_state *) PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2, sizeof(reg_trie_state) ); TRIE_LIST_NEW(1); next_alloc = 2; for ( cur = first ; cur < last ; cur = regnext( cur ) ) { regnode *noper = NEXTOPER( cur ); U32 state = 1; /* required init */ U16 charid = 0; /* sanity init */ U32 wordlen = 0; /* required init */ if (OP(noper) == NOTHING) { regnode *noper_next= regnext(noper); if (noper_next < tail) noper= noper_next; } if ( noper < tail && ( OP(noper) == flags || ( flags == EXACTFU && OP(noper) == EXACTFU_SS ) ) ) { const U8 *uc= (U8*)STRING(noper); const U8 *e= uc + STR_LEN(noper); for ( ; uc < e ; uc += len ) { TRIE_READ_CHAR; if ( uvc < 256 ) { charid = trie->charmap[ uvc ]; } else { SV** const svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 0); if ( !svpp ) { charid = 0; } else { charid=(U16)SvIV( *svpp ); } } /* charid is now 0 if we dont know the char read, or * nonzero if we do */ if ( charid ) { U16 check; U32 newstate = 0; charid--; if ( !trie->states[ state ].trans.list ) { TRIE_LIST_NEW( state ); } for ( check = 1; check <= TRIE_LIST_USED( state ); check++ ) { if ( TRIE_LIST_ITEM( state, check ).forid == charid ) { newstate = TRIE_LIST_ITEM( state, check ).newstate; break; } } if ( ! newstate ) { newstate = next_alloc++; prev_states[newstate] = state; TRIE_LIST_PUSH( state, charid, newstate ); transcount++; } state = newstate; } else { Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc ); } } } TRIE_HANDLE_WORD(state); } /* end second pass */ /* next alloc is the NEXT state to be allocated */ trie->statecount = next_alloc; trie->states = (reg_trie_state *) PerlMemShared_realloc( trie->states, next_alloc * sizeof(reg_trie_state) ); /* and now dump it out before we compress it */ DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_list(trie, widecharmap, revcharmap, next_alloc, depth+1) ); trie->trans = (reg_trie_trans *) PerlMemShared_calloc( transcount, sizeof(reg_trie_trans) ); { U32 state; U32 tp = 0; U32 zp = 0; for( state=1 ; state < next_alloc ; state ++ ) { U32 base=0; /* DEBUG_TRIE_COMPILE_MORE_r( Perl_re_printf( aTHX_ "tp: %d zp: %d ",tp,zp) ); */ if (trie->states[state].trans.list) { U16 minid=TRIE_LIST_ITEM( state, 1).forid; U16 maxid=minid; U16 idx; for( idx = 2 ; idx <= TRIE_LIST_USED( state ) ; idx++ ) { const U16 forid = TRIE_LIST_ITEM( state, idx).forid; if ( forid < minid ) { minid=forid; } else if ( forid > maxid ) { maxid=forid; } } if ( transcount < tp + maxid - minid + 1) { transcount *= 2; trie->trans = (reg_trie_trans *) PerlMemShared_realloc( trie->trans, transcount * sizeof(reg_trie_trans) ); Zero( trie->trans + (transcount / 2), transcount / 2, reg_trie_trans ); } base = trie->uniquecharcount + tp - minid; if ( maxid == minid ) { U32 set = 0; for ( ; zp < tp ; zp++ ) { if ( ! trie->trans[ zp ].next ) { base = trie->uniquecharcount + zp - minid; trie->trans[ zp ].next = TRIE_LIST_ITEM( state, 1).newstate; trie->trans[ zp ].check = state; set = 1; break; } } if ( !set ) { trie->trans[ tp ].next = TRIE_LIST_ITEM( state, 1).newstate; trie->trans[ tp ].check = state; tp++; zp = tp; } } else { for ( idx=1; idx <= TRIE_LIST_USED( state ) ; idx++ ) { const U32 tid = base - trie->uniquecharcount + TRIE_LIST_ITEM( state, idx ).forid; trie->trans[ tid ].next = TRIE_LIST_ITEM( state, idx ).newstate; trie->trans[ tid ].check = state; } tp += ( maxid - minid + 1 ); } Safefree(trie->states[ state ].trans.list); } /* DEBUG_TRIE_COMPILE_MORE_r( Perl_re_printf( aTHX_ " base: %d\n",base); ); */ trie->states[ state ].trans.base=base; } trie->lasttrans = tp + 1; } } else { /* Second Pass -- Flat Table Representation. we dont use the 0 slot of either trans[] or states[] so we add 1 to each. We know that we will need Charcount+1 trans at most to store the data (one row per char at worst case) So we preallocate both structures assuming worst case. We then construct the trie using only the .next slots of the entry structs. We use the .check field of the first entry of the node temporarily to make compression both faster and easier by keeping track of how many non zero fields are in the node. Since trans are numbered from 1 any 0 pointer in the table is a FAIL transition. There are two terms at use here: state as a TRIE_NODEIDX() which is a number representing the first entry of the node, and state as a TRIE_NODENUM() which is the trans number. state 1 is TRIE_NODEIDX(1) and TRIE_NODENUM(1), state 2 is TRIE_NODEIDX(2) and TRIE_NODENUM(3) if there are 2 entrys per node. eg: A B A B 1. 2 4 1. 3 7 2. 0 3 3. 0 5 3. 0 0 5. 0 0 4. 0 0 7. 0 0 The table is internally in the right hand, idx form. However as we also have to deal with the states array which is indexed by nodenum we have to use TRIE_NODENUM() to convert. */ DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using table compiler\n", depth+1)); trie->trans = (reg_trie_trans *) PerlMemShared_calloc( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1, sizeof(reg_trie_trans) ); trie->states = (reg_trie_state *) PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2, sizeof(reg_trie_state) ); next_alloc = trie->uniquecharcount + 1; for ( cur = first ; cur < last ; cur = regnext( cur ) ) { regnode *noper = NEXTOPER( cur ); U32 state = 1; /* required init */ U16 charid = 0; /* sanity init */ U32 accept_state = 0; /* sanity init */ U32 wordlen = 0; /* required init */ if (OP(noper) == NOTHING) { regnode *noper_next= regnext(noper); if (noper_next < tail) noper= noper_next; } if ( noper < tail && ( OP(noper) == flags || ( flags == EXACTFU && OP(noper) == EXACTFU_SS ) ) ) { const U8 *uc= (U8*)STRING(noper); const U8 *e= uc + STR_LEN(noper); for ( ; uc < e ; uc += len ) { TRIE_READ_CHAR; if ( uvc < 256 ) { charid = trie->charmap[ uvc ]; } else { SV* const * const svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 0); charid = svpp ? (U16)SvIV(*svpp) : 0; } if ( charid ) { charid--; if ( !trie->trans[ state + charid ].next ) { trie->trans[ state + charid ].next = next_alloc; trie->trans[ state ].check++; prev_states[TRIE_NODENUM(next_alloc)] = TRIE_NODENUM(state); next_alloc += trie->uniquecharcount; } state = trie->trans[ state + charid ].next; } else { Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc ); } /* charid is now 0 if we dont know the char read, or * nonzero if we do */ } } accept_state = TRIE_NODENUM( state ); TRIE_HANDLE_WORD(accept_state); } /* end second pass */ /* and now dump it out before we compress it */ DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_table(trie, widecharmap, revcharmap, next_alloc, depth+1)); { /* * Inplace compress the table.* For sparse data sets the table constructed by the trie algorithm will be mostly 0/FAIL transitions or to put it another way mostly empty. (Note that leaf nodes will not contain any transitions.) This algorithm compresses the tables by eliminating most such transitions, at the cost of a modest bit of extra work during lookup: - Each states[] entry contains a .base field which indicates the index in the state[] array wheres its transition data is stored. - If .base is 0 there are no valid transitions from that node. - If .base is nonzero then charid is added to it to find an entry in the trans array. -If trans[states[state].base+charid].check!=state then the transition is taken to be a 0/Fail transition. Thus if there are fail transitions at the front of the node then the .base offset will point somewhere inside the previous nodes data (or maybe even into a node even earlier), but the .check field determines if the transition is valid. XXX - wrong maybe? The following process inplace converts the table to the compressed table: We first do not compress the root node 1,and mark all its .check pointers as 1 and set its .base pointer as 1 as well. This allows us to do a DFA construction from the compressed table later, and ensures that any .base pointers we calculate later are greater than 0. - We set 'pos' to indicate the first entry of the second node. - We then iterate over the columns of the node, finding the first and last used entry at l and m. We then copy l..m into pos..(pos+m-l), and set the .check pointers accordingly, and advance pos appropriately and repreat for the next node. Note that when we copy the next pointers we have to convert them from the original NODEIDX form to NODENUM form as the former is not valid post compression. - If a node has no transitions used we mark its base as 0 and do not advance the pos pointer. - If a node only has one transition we use a second pointer into the structure to fill in allocated fail transitions from other states. This pointer is independent of the main pointer and scans forward looking for null transitions that are allocated to a state. When it finds one it writes the single transition into the "hole". If the pointer doesnt find one the single transition is appended as normal. - Once compressed we can Renew/realloc the structures to release the excess space. See "Table-Compression Methods" in sec 3.9 of the Red Dragon, specifically Fig 3.47 and the associated pseudocode. demq */ const U32 laststate = TRIE_NODENUM( next_alloc ); U32 state, charid; U32 pos = 0, zp=0; trie->statecount = laststate; for ( state = 1 ; state < laststate ; state++ ) { U8 flag = 0; const U32 stateidx = TRIE_NODEIDX( state ); const U32 o_used = trie->trans[ stateidx ].check; U32 used = trie->trans[ stateidx ].check; trie->trans[ stateidx ].check = 0; for ( charid = 0; used && charid < trie->uniquecharcount; charid++ ) { if ( flag || trie->trans[ stateidx + charid ].next ) { if ( trie->trans[ stateidx + charid ].next ) { if (o_used == 1) { for ( ; zp < pos ; zp++ ) { if ( ! trie->trans[ zp ].next ) { break; } } trie->states[ state ].trans.base = zp + trie->uniquecharcount - charid ; trie->trans[ zp ].next = SAFE_TRIE_NODENUM( trie->trans[ stateidx + charid ].next ); trie->trans[ zp ].check = state; if ( ++zp > pos ) pos = zp; break; } used--; } if ( !flag ) { flag = 1; trie->states[ state ].trans.base = pos + trie->uniquecharcount - charid ; } trie->trans[ pos ].next = SAFE_TRIE_NODENUM( trie->trans[ stateidx + charid ].next ); trie->trans[ pos ].check = state; pos++; } } } trie->lasttrans = pos + 1; trie->states = (reg_trie_state *) PerlMemShared_realloc( trie->states, laststate * sizeof(reg_trie_state) ); DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Alloc: %d Orig: %" IVdf " elements, Final:%" IVdf ". Savings of %%%5.2f\n", depth+1, (int)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1 ), (IV)next_alloc, (IV)pos, ( ( next_alloc - pos ) * 100 ) / (double)next_alloc ); ); } /* end table compress */ } DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Statecount:%" UVxf " Lasttrans:%" UVxf "\n", depth+1, (UV)trie->statecount, (UV)trie->lasttrans) ); /* resize the trans array to remove unused space */ trie->trans = (reg_trie_trans *) PerlMemShared_realloc( trie->trans, trie->lasttrans * sizeof(reg_trie_trans) ); { /* Modify the program and insert the new TRIE node */ U8 nodetype =(U8)(flags & 0xFF); char *str=NULL; #ifdef DEBUGGING regnode *optimize = NULL; #ifdef RE_TRACK_PATTERN_OFFSETS U32 mjd_offset = 0; U32 mjd_nodelen = 0; #endif /* RE_TRACK_PATTERN_OFFSETS */ #endif /* DEBUGGING */ /* This means we convert either the first branch or the first Exact, depending on whether the thing following (in 'last') is a branch or not and whther first is the startbranch (ie is it a sub part of the alternation or is it the whole thing.) Assuming its a sub part we convert the EXACT otherwise we convert the whole branch sequence, including the first. */ /* Find the node we are going to overwrite */ if ( first != startbranch || OP( last ) == BRANCH ) { /* branch sub-chain */ NEXT_OFF( first ) = (U16)(last - first); #ifdef RE_TRACK_PATTERN_OFFSETS DEBUG_r({ mjd_offset= Node_Offset((convert)); mjd_nodelen= Node_Length((convert)); }); #endif /* whole branch chain */ } #ifdef RE_TRACK_PATTERN_OFFSETS else { DEBUG_r({ const regnode *nop = NEXTOPER( convert ); mjd_offset= Node_Offset((nop)); mjd_nodelen= Node_Length((nop)); }); } DEBUG_OPTIMISE_r( Perl_re_indentf( aTHX_ "MJD offset:%" UVuf " MJD length:%" UVuf "\n", depth+1, (UV)mjd_offset, (UV)mjd_nodelen) ); #endif /* But first we check to see if there is a common prefix we can split out as an EXACT and put in front of the TRIE node. */ trie->startstate= 1; if ( trie->bitmap && !widecharmap && !trie->jump ) { /* we want to find the first state that has more than * one transition, if that state is not the first state * then we have a common prefix which we can remove. */ U32 state; for ( state = 1 ; state < trie->statecount-1 ; state++ ) { U32 ofs = 0; I32 first_ofs = -1; /* keeps track of the ofs of the first transition, -1 means none */ U32 count = 0; const U32 base = trie->states[ state ].trans.base; /* does this state terminate an alternation? */ if ( trie->states[state].wordnum ) count = 1; for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) { if ( ( base + ofs >= trie->uniquecharcount ) && ( base + ofs - trie->uniquecharcount < trie->lasttrans ) && trie->trans[ base + ofs - trie->uniquecharcount ].check == state ) { if ( ++count > 1 ) { /* we have more than one transition */ SV **tmp; U8 *ch; /* if this is the first state there is no common prefix * to extract, so we can exit */ if ( state == 1 ) break; tmp = av_fetch( revcharmap, ofs, 0); ch = (U8*)SvPV_nolen_const( *tmp ); /* if we are on count 2 then we need to initialize the * bitmap, and store the previous char if there was one * in it*/ if ( count == 2 ) { /* clear the bitmap */ Zero(trie->bitmap, ANYOF_BITMAP_SIZE, char); DEBUG_OPTIMISE_r( Perl_re_indentf( aTHX_ "New Start State=%" UVuf " Class: [", depth+1, (UV)state)); if (first_ofs >= 0) { SV ** const tmp = av_fetch( revcharmap, first_ofs, 0); const U8 * const ch = (U8*)SvPV_nolen_const( *tmp ); TRIE_BITMAP_SET_FOLDED(trie,*ch,folder); DEBUG_OPTIMISE_r( Perl_re_printf( aTHX_ "%s", (char*)ch) ); } } /* store the current firstchar in the bitmap */ TRIE_BITMAP_SET_FOLDED(trie,*ch,folder); DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "%s", ch)); } first_ofs = ofs; } } if ( count == 1 ) { /* This state has only one transition, its transition is part * of a common prefix - we need to concatenate the char it * represents to what we have so far. */ SV **tmp = av_fetch( revcharmap, first_ofs, 0); STRLEN len; char *ch = SvPV( *tmp, len ); DEBUG_OPTIMISE_r({ SV *sv=sv_newmortal(); Perl_re_indentf( aTHX_ "Prefix State: %" UVuf " Ofs:%" UVuf " Char='%s'\n", depth+1, (UV)state, (UV)first_ofs, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), 6, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) ); }); if ( state==1 ) { OP( convert ) = nodetype; str=STRING(convert); STR_LEN(convert)=0; } STR_LEN(convert) += len; while (len--) *str++ = *ch++; } else { #ifdef DEBUGGING if (state>1) DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "]\n")); #endif break; } } trie->prefixlen = (state-1); if (str) { regnode *n = convert+NODE_SZ_STR(convert); NEXT_OFF(convert) = NODE_SZ_STR(convert); trie->startstate = state; trie->minlen -= (state - 1); trie->maxlen -= (state - 1); #ifdef DEBUGGING /* At least the UNICOS C compiler choked on this * being argument to DEBUG_r(), so let's just have * it right here. */ if ( #ifdef PERL_EXT_RE_BUILD 1 #else DEBUG_r_TEST #endif ) { regnode *fix = convert; U32 word = trie->wordcount; mjd_nodelen++; Set_Node_Offset_Length(convert, mjd_offset, state - 1); while( ++fix < n ) { Set_Node_Offset_Length(fix, 0, 0); } while (word--) { SV ** const tmp = av_fetch( trie_words, word, 0 ); if (tmp) { if ( STR_LEN(convert) <= SvCUR(*tmp) ) sv_chop(*tmp, SvPV_nolen(*tmp) + STR_LEN(convert)); else sv_chop(*tmp, SvPV_nolen(*tmp) + SvCUR(*tmp)); } } } #endif if (trie->maxlen) { convert = n; } else { NEXT_OFF(convert) = (U16)(tail - convert); DEBUG_r(optimize= n); } } } if (!jumper) jumper = last; if ( trie->maxlen ) { NEXT_OFF( convert ) = (U16)(tail - convert); ARG_SET( convert, data_slot ); /* Store the offset to the first unabsorbed branch in jump[0], which is otherwise unused by the jump logic. We use this when dumping a trie and during optimisation. */ if (trie->jump) trie->jump[0] = (U16)(nextbranch - convert); /* If the start state is not accepting (meaning there is no empty string/NOTHING) * and there is a bitmap * and the first "jump target" node we found leaves enough room * then convert the TRIE node into a TRIEC node, with the bitmap * embedded inline in the opcode - this is hypothetically faster. */ if ( !trie->states[trie->startstate].wordnum && trie->bitmap && ( (char *)jumper - (char *)convert) >= (int)sizeof(struct regnode_charclass) ) { OP( convert ) = TRIEC; Copy(trie->bitmap, ((struct regnode_charclass *)convert)->bitmap, ANYOF_BITMAP_SIZE, char); PerlMemShared_free(trie->bitmap); trie->bitmap= NULL; } else OP( convert ) = TRIE; /* store the type in the flags */ convert->flags = nodetype; DEBUG_r({ optimize = convert + NODE_STEP_REGNODE + regarglen[ OP( convert ) ]; }); /* XXX We really should free up the resource in trie now, as we won't use them - (which resources?) dmq */ } /* needed for dumping*/ DEBUG_r(if (optimize) { regnode *opt = convert; while ( ++opt < optimize) { Set_Node_Offset_Length(opt,0,0); } /* Try to clean up some of the debris left after the optimisation. */ while( optimize < jumper ) { mjd_nodelen += Node_Length((optimize)); OP( optimize ) = OPTIMIZED; Set_Node_Offset_Length(optimize,0,0); optimize++; } Set_Node_Offset_Length(convert,mjd_offset,mjd_nodelen); }); } /* end node insert */ /* Finish populating the prev field of the wordinfo array. Walk back * from each accept state until we find another accept state, and if * so, point the first word's .prev field at the second word. If the * second already has a .prev field set, stop now. This will be the * case either if we've already processed that word's accept state, * or that state had multiple words, and the overspill words were * already linked up earlier. */ { U16 word; U32 state; U16 prev; for (word=1; word <= trie->wordcount; word++) { prev = 0; if (trie->wordinfo[word].prev) continue; state = trie->wordinfo[word].accept; while (state) { state = prev_states[state]; if (!state) break; prev = trie->states[state].wordnum; if (prev) break; } trie->wordinfo[word].prev = prev; } Safefree(prev_states); } /* and now dump out the compressed format */ DEBUG_TRIE_COMPILE_r(dump_trie(trie, widecharmap, revcharmap, depth+1)); RExC_rxi->data->data[ data_slot + 1 ] = (void*)widecharmap; #ifdef DEBUGGING RExC_rxi->data->data[ data_slot + TRIE_WORDS_OFFSET ] = (void*)trie_words; RExC_rxi->data->data[ data_slot + 3 ] = (void*)revcharmap; #else SvREFCNT_dec_NN(revcharmap); #endif return trie->jump ? MADE_JUMP_TRIE : trie->startstate>1 ? MADE_EXACT_TRIE : MADE_TRIE; } STATIC regnode * S_construct_ahocorasick_from_trie(pTHX_ RExC_state_t *pRExC_state, regnode *source, U32 depth) { /* The Trie is constructed and compressed now so we can build a fail array if * it's needed This is basically the Aho-Corasick algorithm. Its from exercise 3.31 and 3.32 in the "Red Dragon" -- Compilers, principles, techniques, and tools. Aho, Sethi, Ullman 1985/88 ISBN 0-201-10088-6 We find the fail state for each state in the trie, this state is the longest proper suffix of the current state's 'word' that is also a proper prefix of another word in our trie. State 1 represents the word '' and is thus the default fail state. This allows the DFA not to have to restart after its tried and failed a word at a given point, it simply continues as though it had been matching the other word in the first place. Consider 'abcdgu'=~/abcdefg|cdgu/ When we get to 'd' we are still matching the first word, we would encounter 'g' which would fail, which would bring us to the state representing 'd' in the second word where we would try 'g' and succeed, proceeding to match 'cdgu'. */ /* add a fail transition */ const U32 trie_offset = ARG(source); reg_trie_data *trie=(reg_trie_data *)RExC_rxi->data->data[trie_offset]; U32 *q; const U32 ucharcount = trie->uniquecharcount; const U32 numstates = trie->statecount; const U32 ubound = trie->lasttrans + ucharcount; U32 q_read = 0; U32 q_write = 0; U32 charid; U32 base = trie->states[ 1 ].trans.base; U32 *fail; reg_ac_data *aho; const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("T")); regnode *stclass; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_CONSTRUCT_AHOCORASICK_FROM_TRIE; PERL_UNUSED_CONTEXT; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif if ( OP(source) == TRIE ) { struct regnode_1 *op = (struct regnode_1 *) PerlMemShared_calloc(1, sizeof(struct regnode_1)); StructCopy(source,op,struct regnode_1); stclass = (regnode *)op; } else { struct regnode_charclass *op = (struct regnode_charclass *) PerlMemShared_calloc(1, sizeof(struct regnode_charclass)); StructCopy(source,op,struct regnode_charclass); stclass = (regnode *)op; } OP(stclass)+=2; /* convert the TRIE type to its AHO-CORASICK equivalent */ ARG_SET( stclass, data_slot ); aho = (reg_ac_data *) PerlMemShared_calloc( 1, sizeof(reg_ac_data) ); RExC_rxi->data->data[ data_slot ] = (void*)aho; aho->trie=trie_offset; aho->states=(reg_trie_state *)PerlMemShared_malloc( numstates * sizeof(reg_trie_state) ); Copy( trie->states, aho->states, numstates, reg_trie_state ); Newxz( q, numstates, U32); aho->fail = (U32 *) PerlMemShared_calloc( numstates, sizeof(U32) ); aho->refcount = 1; fail = aho->fail; /* initialize fail[0..1] to be 1 so that we always have a valid final fail state */ fail[ 0 ] = fail[ 1 ] = 1; for ( charid = 0; charid < ucharcount ; charid++ ) { const U32 newstate = TRIE_TRANS_STATE( 1, base, ucharcount, charid, 0 ); if ( newstate ) { q[ q_write ] = newstate; /* set to point at the root */ fail[ q[ q_write++ ] ]=1; } } while ( q_read < q_write) { const U32 cur = q[ q_read++ % numstates ]; base = trie->states[ cur ].trans.base; for ( charid = 0 ; charid < ucharcount ; charid++ ) { const U32 ch_state = TRIE_TRANS_STATE( cur, base, ucharcount, charid, 1 ); if (ch_state) { U32 fail_state = cur; U32 fail_base; do { fail_state = fail[ fail_state ]; fail_base = aho->states[ fail_state ].trans.base; } while ( !TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ) ); fail_state = TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ); fail[ ch_state ] = fail_state; if ( !aho->states[ ch_state ].wordnum && aho->states[ fail_state ].wordnum ) { aho->states[ ch_state ].wordnum = aho->states[ fail_state ].wordnum; } q[ q_write++ % numstates] = ch_state; } } } /* restore fail[0..1] to 0 so that we "fall out" of the AC loop when we fail in state 1, this allows us to use the charclass scan to find a valid start char. This is based on the principle that theres a good chance the string being searched contains lots of stuff that cant be a start char. */ fail[ 0 ] = fail[ 1 ] = 0; DEBUG_TRIE_COMPILE_r({ Perl_re_indentf( aTHX_ "Stclass Failtable (%" UVuf " states): 0", depth, (UV)numstates ); for( q_read=1; q_read<numstates; q_read++ ) { Perl_re_printf( aTHX_ ", %" UVuf, (UV)fail[q_read]); } Perl_re_printf( aTHX_ "\n"); }); Safefree(q); /*RExC_seen |= REG_TRIEDFA_SEEN;*/ return stclass; } /* The below joins as many adjacent EXACTish nodes as possible into a single * one. The regop may be changed if the node(s) contain certain sequences that * require special handling. The joining is only done if: * 1) there is room in the current conglomerated node to entirely contain the * next one. * 2) they are the exact same node type * * The adjacent nodes actually may be separated by NOTHING-kind nodes, and * these get optimized out * * XXX khw thinks this should be enhanced to fill EXACT (at least) nodes as full * as possible, even if that means splitting an existing node so that its first * part is moved to the preceeding node. This would maximise the efficiency of * memEQ during matching. Elsewhere in this file, khw proposes splitting * EXACTFish nodes into portions that don't change under folding vs those that * do. Those portions that don't change may be the only things in the pattern that * could be used to find fixed and floating strings. * * If a node is to match under /i (folded), the number of characters it matches * can be different than its character length if it contains a multi-character * fold. *min_subtract is set to the total delta number of characters of the * input nodes. * * And *unfolded_multi_char is set to indicate whether or not the node contains * an unfolded multi-char fold. This happens when whether the fold is valid or * not won't be known until runtime; namely for EXACTF nodes that contain LATIN * SMALL LETTER SHARP S, as only if the target string being matched against * turns out to be UTF-8 is that fold valid; and also for EXACTFL nodes whose * folding rules depend on the locale in force at runtime. (Multi-char folds * whose components are all above the Latin1 range are not run-time locale * dependent, and have already been folded by the time this function is * called.) * * This is as good a place as any to discuss the design of handling these * multi-character fold sequences. It's been wrong in Perl for a very long * time. There are three code points in Unicode whose multi-character folds * were long ago discovered to mess things up. The previous designs for * dealing with these involved assigning a special node for them. This * approach doesn't always work, as evidenced by this example: * "\xDFs" =~ /s\xDF/ui # Used to fail before these patches * Both sides fold to "sss", but if the pattern is parsed to create a node that * would match just the \xDF, it won't be able to handle the case where a * successful match would have to cross the node's boundary. The new approach * that hopefully generally solves the problem generates an EXACTFU_SS node * that is "sss" in this case. * * It turns out that there are problems with all multi-character folds, and not * just these three. Now the code is general, for all such cases. The * approach taken is: * 1) This routine examines each EXACTFish node that could contain multi- * character folded sequences. Since a single character can fold into * such a sequence, the minimum match length for this node is less than * the number of characters in the node. This routine returns in * *min_subtract how many characters to subtract from the the actual * length of the string to get a real minimum match length; it is 0 if * there are no multi-char foldeds. This delta is used by the caller to * adjust the min length of the match, and the delta between min and max, * so that the optimizer doesn't reject these possibilities based on size * constraints. * 2) For the sequence involving the Sharp s (\xDF), the node type EXACTFU_SS * is used for an EXACTFU node that contains at least one "ss" sequence in * it. For non-UTF-8 patterns and strings, this is the only case where * there is a possible fold length change. That means that a regular * EXACTFU node without UTF-8 involvement doesn't have to concern itself * with length changes, and so can be processed faster. regexec.c takes * advantage of this. Generally, an EXACTFish node that is in UTF-8 is * pre-folded by regcomp.c (except EXACTFL, some of whose folds aren't * known until runtime). This saves effort in regex matching. However, * the pre-folding isn't done for non-UTF8 patterns because the fold of * the MICRO SIGN requires UTF-8, and we don't want to slow things down by * forcing the pattern into UTF8 unless necessary. Also what EXACTF (and, * again, EXACTFL) nodes fold to isn't known until runtime. The fold * possibilities for the non-UTF8 patterns are quite simple, except for * the sharp s. All the ones that don't involve a UTF-8 target string are * members of a fold-pair, and arrays are set up for all of them so that * the other member of the pair can be found quickly. Code elsewhere in * this file makes sure that in EXACTFU nodes, the sharp s gets folded to * 'ss', even if the pattern isn't UTF-8. This avoids the issues * described in the next item. * 3) A problem remains for unfolded multi-char folds. (These occur when the * validity of the fold won't be known until runtime, and so must remain * unfolded for now. This happens for the sharp s in EXACTF and EXACTFA * nodes when the pattern isn't in UTF-8. (Note, BTW, that there cannot * be an EXACTF node with a UTF-8 pattern.) They also occur for various * folds in EXACTFL nodes, regardless of the UTF-ness of the pattern.) * The reason this is a problem is that the optimizer part of regexec.c * (probably unwittingly, in Perl_regexec_flags()) makes an assumption * that a character in the pattern corresponds to at most a single * character in the target string. (And I do mean character, and not byte * here, unlike other parts of the documentation that have never been * updated to account for multibyte Unicode.) sharp s in EXACTF and * EXACTFL nodes can match the two character string 'ss'; in EXACTFA nodes * it can match "\x{17F}\x{17F}". These, along with other ones in EXACTFL * nodes, violate the assumption, and they are the only instances where it * is violated. I'm reluctant to try to change the assumption, as the * code involved is impenetrable to me (khw), so instead the code here * punts. This routine examines EXACTFL nodes, and (when the pattern * isn't UTF-8) EXACTF and EXACTFA for such unfolded folds, and returns a * boolean indicating whether or not the node contains such a fold. When * it is true, the caller sets a flag that later causes the optimizer in * this file to not set values for the floating and fixed string lengths, * and thus avoids the optimizer code in regexec.c that makes the invalid * assumption. Thus, there is no optimization based on string lengths for * EXACTFL nodes that contain these few folds, nor for non-UTF8-pattern * EXACTF and EXACTFA nodes that contain the sharp s. (The reason the * assumption is wrong only in these cases is that all other non-UTF-8 * folds are 1-1; and, for UTF-8 patterns, we pre-fold all other folds to * their expanded versions. (Again, we can't prefold sharp s to 'ss' in * EXACTF nodes because we don't know at compile time if it actually * matches 'ss' or not. For EXACTF nodes it will match iff the target * string is in UTF-8. This is in contrast to EXACTFU nodes, where it * always matches; and EXACTFA where it never does. In an EXACTFA node in * a UTF-8 pattern, sharp s is folded to "\x{17F}\x{17F}, avoiding the * problem; but in a non-UTF8 pattern, folding it to that above-Latin1 * string would require the pattern to be forced into UTF-8, the overhead * of which we want to avoid. Similarly the unfolded multi-char folds in * EXACTFL nodes will match iff the locale at the time of match is a UTF-8 * locale.) * * Similarly, the code that generates tries doesn't currently handle * not-already-folded multi-char folds, and it looks like a pain to change * that. Therefore, trie generation of EXACTFA nodes with the sharp s * doesn't work. Instead, such an EXACTFA is turned into a new regnode, * EXACTFA_NO_TRIE, which the trie code knows not to handle. Most people * using /iaa matching will be doing so almost entirely with ASCII * strings, so this should rarely be encountered in practice */ #define JOIN_EXACT(scan,min_subtract,unfolded_multi_char, flags) \ if (PL_regkind[OP(scan)] == EXACT) \ join_exact(pRExC_state,(scan),(min_subtract),unfolded_multi_char, (flags),NULL,depth+1) STATIC U32 S_join_exact(pTHX_ RExC_state_t *pRExC_state, regnode *scan, UV *min_subtract, bool *unfolded_multi_char, U32 flags,regnode *val, U32 depth) { /* Merge several consecutive EXACTish nodes into one. */ regnode *n = regnext(scan); U32 stringok = 1; regnode *next = scan + NODE_SZ_STR(scan); U32 merged = 0; U32 stopnow = 0; #ifdef DEBUGGING regnode *stop = scan; GET_RE_DEBUG_FLAGS_DECL; #else PERL_UNUSED_ARG(depth); #endif PERL_ARGS_ASSERT_JOIN_EXACT; #ifndef EXPERIMENTAL_INPLACESCAN PERL_UNUSED_ARG(flags); PERL_UNUSED_ARG(val); #endif DEBUG_PEEP("join", scan, depth, 0); /* Look through the subsequent nodes in the chain. Skip NOTHING, merge * EXACT ones that are mergeable to the current one. */ while (n && (PL_regkind[OP(n)] == NOTHING || (stringok && OP(n) == OP(scan))) && NEXT_OFF(n) && NEXT_OFF(scan) + NEXT_OFF(n) < I16_MAX) { if (OP(n) == TAIL || n > next) stringok = 0; if (PL_regkind[OP(n)] == NOTHING) { DEBUG_PEEP("skip:", n, depth, 0); NEXT_OFF(scan) += NEXT_OFF(n); next = n + NODE_STEP_REGNODE; #ifdef DEBUGGING if (stringok) stop = n; #endif n = regnext(n); } else if (stringok) { const unsigned int oldl = STR_LEN(scan); regnode * const nnext = regnext(n); /* XXX I (khw) kind of doubt that this works on platforms (should * Perl ever run on one) where U8_MAX is above 255 because of lots * of other assumptions */ /* Don't join if the sum can't fit into a single node */ if (oldl + STR_LEN(n) > U8_MAX) break; DEBUG_PEEP("merg", n, depth, 0); merged++; NEXT_OFF(scan) += NEXT_OFF(n); STR_LEN(scan) += STR_LEN(n); next = n + NODE_SZ_STR(n); /* Now we can overwrite *n : */ Move(STRING(n), STRING(scan) + oldl, STR_LEN(n), char); #ifdef DEBUGGING stop = next - 1; #endif n = nnext; if (stopnow) break; } #ifdef EXPERIMENTAL_INPLACESCAN if (flags && !NEXT_OFF(n)) { DEBUG_PEEP("atch", val, depth, 0); if (reg_off_by_arg[OP(n)]) { ARG_SET(n, val - n); } else { NEXT_OFF(n) = val - n; } stopnow = 1; } #endif } *min_subtract = 0; *unfolded_multi_char = FALSE; /* Here, all the adjacent mergeable EXACTish nodes have been merged. We * can now analyze for sequences of problematic code points. (Prior to * this final joining, sequences could have been split over boundaries, and * hence missed). The sequences only happen in folding, hence for any * non-EXACT EXACTish node */ if (OP(scan) != EXACT && OP(scan) != EXACTL) { U8* s0 = (U8*) STRING(scan); U8* s = s0; U8* s_end = s0 + STR_LEN(scan); int total_count_delta = 0; /* Total delta number of characters that multi-char folds expand to */ /* One pass is made over the node's string looking for all the * possibilities. To avoid some tests in the loop, there are two main * cases, for UTF-8 patterns (which can't have EXACTF nodes) and * non-UTF-8 */ if (UTF) { U8* folded = NULL; if (OP(scan) == EXACTFL) { U8 *d; /* An EXACTFL node would already have been changed to another * node type unless there is at least one character in it that * is problematic; likely a character whose fold definition * won't be known until runtime, and so has yet to be folded. * For all but the UTF-8 locale, folds are 1-1 in length, but * to handle the UTF-8 case, we need to create a temporary * folded copy using UTF-8 locale rules in order to analyze it. * This is because our macros that look to see if a sequence is * a multi-char fold assume everything is folded (otherwise the * tests in those macros would be too complicated and slow). * Note that here, the non-problematic folds will have already * been done, so we can just copy such characters. We actually * don't completely fold the EXACTFL string. We skip the * unfolded multi-char folds, as that would just create work * below to figure out the size they already are */ Newx(folded, UTF8_MAX_FOLD_CHAR_EXPAND * STR_LEN(scan) + 1, U8); d = folded; while (s < s_end) { STRLEN s_len = UTF8SKIP(s); if (! is_PROBLEMATIC_LOCALE_FOLD_utf8(s)) { Copy(s, d, s_len, U8); d += s_len; } else if (is_FOLDS_TO_MULTI_utf8(s)) { *unfolded_multi_char = TRUE; Copy(s, d, s_len, U8); d += s_len; } else if (isASCII(*s)) { *(d++) = toFOLD(*s); } else { STRLEN len; _toFOLD_utf8_flags(s, s_end, d, &len, FOLD_FLAGS_FULL); d += len; } s += s_len; } /* Point the remainder of the routine to look at our temporary * folded copy */ s = folded; s_end = d; } /* End of creating folded copy of EXACTFL string */ /* Examine the string for a multi-character fold sequence. UTF-8 * patterns have all characters pre-folded by the time this code is * executed */ while (s < s_end - 1) /* Can stop 1 before the end, as minimum length sequence we are looking for is 2 */ { int count = 0; /* How many characters in a multi-char fold */ int len = is_MULTI_CHAR_FOLD_utf8_safe(s, s_end); if (! len) { /* Not a multi-char fold: get next char */ s += UTF8SKIP(s); continue; } /* Nodes with 'ss' require special handling, except for * EXACTFA-ish for which there is no multi-char fold to this */ if (len == 2 && *s == 's' && *(s+1) == 's' && OP(scan) != EXACTFA && OP(scan) != EXACTFA_NO_TRIE) { count = 2; if (OP(scan) != EXACTFL) { OP(scan) = EXACTFU_SS; } s += 2; } else { /* Here is a generic multi-char fold. */ U8* multi_end = s + len; /* Count how many characters are in it. In the case of * /aa, no folds which contain ASCII code points are * allowed, so check for those, and skip if found. */ if (OP(scan) != EXACTFA && OP(scan) != EXACTFA_NO_TRIE) { count = utf8_length(s, multi_end); s = multi_end; } else { while (s < multi_end) { if (isASCII(*s)) { s++; goto next_iteration; } else { s += UTF8SKIP(s); } count++; } } } /* The delta is how long the sequence is minus 1 (1 is how long * the character that folds to the sequence is) */ total_count_delta += count - 1; next_iteration: ; } /* We created a temporary folded copy of the string in EXACTFL * nodes. Therefore we need to be sure it doesn't go below zero, * as the real string could be shorter */ if (OP(scan) == EXACTFL) { int total_chars = utf8_length((U8*) STRING(scan), (U8*) STRING(scan) + STR_LEN(scan)); if (total_count_delta > total_chars) { total_count_delta = total_chars; } } *min_subtract += total_count_delta; Safefree(folded); } else if (OP(scan) == EXACTFA) { /* Non-UTF-8 pattern, EXACTFA node. There can't be a multi-char * fold to the ASCII range (and there are no existing ones in the * upper latin1 range). But, as outlined in the comments preceding * this function, we need to flag any occurrences of the sharp s. * This character forbids trie formation (because of added * complexity) */ #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) while (s < s_end) { if (*s == LATIN_SMALL_LETTER_SHARP_S) { OP(scan) = EXACTFA_NO_TRIE; *unfolded_multi_char = TRUE; break; } s++; } } else { /* Non-UTF-8 pattern, not EXACTFA node. Look for the multi-char * folds that are all Latin1. As explained in the comments * preceding this function, we look also for the sharp s in EXACTF * and EXACTFL nodes; it can be in the final position. Otherwise * we can stop looking 1 byte earlier because have to find at least * two characters for a multi-fold */ const U8* upper = (OP(scan) == EXACTF || OP(scan) == EXACTFL) ? s_end : s_end -1; while (s < upper) { int len = is_MULTI_CHAR_FOLD_latin1_safe(s, s_end); if (! len) { /* Not a multi-char fold. */ if (*s == LATIN_SMALL_LETTER_SHARP_S && (OP(scan) == EXACTF || OP(scan) == EXACTFL)) { *unfolded_multi_char = TRUE; } s++; continue; } if (len == 2 && isALPHA_FOLD_EQ(*s, 's') && isALPHA_FOLD_EQ(*(s+1), 's')) { /* EXACTF nodes need to know that the minimum length * changed so that a sharp s in the string can match this * ss in the pattern, but they remain EXACTF nodes, as they * won't match this unless the target string is is UTF-8, * which we don't know until runtime. EXACTFL nodes can't * transform into EXACTFU nodes */ if (OP(scan) != EXACTF && OP(scan) != EXACTFL) { OP(scan) = EXACTFU_SS; } } *min_subtract += len - 1; s += len; } #endif } } #ifdef DEBUGGING /* Allow dumping but overwriting the collection of skipped * ops and/or strings with fake optimized ops */ n = scan + NODE_SZ_STR(scan); while (n <= stop) { OP(n) = OPTIMIZED; FLAGS(n) = 0; NEXT_OFF(n) = 0; n++; } #endif DEBUG_OPTIMISE_r(if (merged){DEBUG_PEEP("finl", scan, depth, 0);}); return stopnow; } /* REx optimizer. Converts nodes into quicker variants "in place". Finds fixed substrings. */ /* Stops at toplevel WHILEM as well as at "last". At end *scanp is set to the position after last scanned or to NULL. */ #define INIT_AND_WITHP \ assert(!and_withp); \ Newx(and_withp,1, regnode_ssc); \ SAVEFREEPV(and_withp) static void S_unwind_scan_frames(pTHX_ const void *p) { scan_frame *f= (scan_frame *)p; do { scan_frame *n= f->next_frame; Safefree(f); f= n; } while (f); } STATIC SSize_t S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp, SSize_t *minlenp, SSize_t *deltap, regnode *last, scan_data_t *data, I32 stopparen, U32 recursed_depth, regnode_ssc *and_withp, U32 flags, U32 depth) /* scanp: Start here (read-write). */ /* deltap: Write maxlen-minlen here. */ /* last: Stop before this one. */ /* data: string data about the pattern */ /* stopparen: treat close N as END */ /* recursed: which subroutines have we recursed into */ /* and_withp: Valid if flags & SCF_DO_STCLASS_OR */ { /* There must be at least this number of characters to match */ SSize_t min = 0; I32 pars = 0, code; regnode *scan = *scanp, *next; SSize_t delta = 0; int is_inf = (flags & SCF_DO_SUBSTR) && (data->flags & SF_IS_INF); int is_inf_internal = 0; /* The studied chunk is infinite */ I32 is_par = OP(scan) == OPEN ? ARG(scan) : 0; scan_data_t data_fake; SV *re_trie_maxbuff = NULL; regnode *first_non_open = scan; SSize_t stopmin = SSize_t_MAX; scan_frame *frame = NULL; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_STUDY_CHUNK; RExC_study_started= 1; if ( depth == 0 ) { while (first_non_open && OP(first_non_open) == OPEN) first_non_open=regnext(first_non_open); } fake_study_recurse: DEBUG_r( RExC_study_chunk_recursed_count++; ); DEBUG_OPTIMISE_MORE_r( { Perl_re_indentf( aTHX_ "study_chunk stopparen=%ld recursed_count=%lu depth=%lu recursed_depth=%lu scan=%p last=%p", depth, (long)stopparen, (unsigned long)RExC_study_chunk_recursed_count, (unsigned long)depth, (unsigned long)recursed_depth, scan, last); if (recursed_depth) { U32 i; U32 j; for ( j = 0 ; j < recursed_depth ; j++ ) { for ( i = 0 ; i < (U32)RExC_npar ; i++ ) { if ( PAREN_TEST(RExC_study_chunk_recursed + ( j * RExC_study_chunk_recursed_bytes), i ) && ( !j || !PAREN_TEST(RExC_study_chunk_recursed + (( j - 1 ) * RExC_study_chunk_recursed_bytes), i) ) ) { Perl_re_printf( aTHX_ " %d",(int)i); break; } } if ( j + 1 < recursed_depth ) { Perl_re_printf( aTHX_ ","); } } } Perl_re_printf( aTHX_ "\n"); } ); while ( scan && OP(scan) != END && scan < last ){ UV min_subtract = 0; /* How mmany chars to subtract from the minimum node length to get a real minimum (because the folded version may be shorter) */ bool unfolded_multi_char = FALSE; /* Peephole optimizer: */ DEBUG_STUDYDATA("Peep", data, depth, is_inf); DEBUG_PEEP("Peep", scan, depth, flags); /* The reason we do this here is that we need to deal with things like * /(?:f)(?:o)(?:o)/ which cant be dealt with by the normal EXACT * parsing code, as each (?:..) is handled by a different invocation of * reg() -- Yves */ JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0); /* Follow the next-chain of the current node and optimize away all the NOTHINGs from it. */ if (OP(scan) != CURLYX) { const int max = (reg_off_by_arg[OP(scan)] ? I32_MAX /* I32 may be smaller than U16 on CRAYs! */ : (I32_MAX < U16_MAX ? I32_MAX : U16_MAX)); int off = (reg_off_by_arg[OP(scan)] ? ARG(scan) : NEXT_OFF(scan)); int noff; regnode *n = scan; /* Skip NOTHING and LONGJMP. */ while ((n = regnext(n)) && ((PL_regkind[OP(n)] == NOTHING && (noff = NEXT_OFF(n))) || ((OP(n) == LONGJMP) && (noff = ARG(n)))) && off + noff < max) off += noff; if (reg_off_by_arg[OP(scan)]) ARG(scan) = off; else NEXT_OFF(scan) = off; } /* The principal pseudo-switch. Cannot be a switch, since we look into several different things. */ if ( OP(scan) == DEFINEP ) { SSize_t minlen = 0; SSize_t deltanext = 0; SSize_t fake_last_close = 0; I32 f = SCF_IN_DEFINE; StructCopy(&zero_scan_data, &data_fake, scan_data_t); scan = regnext(scan); assert( OP(scan) == IFTHEN ); DEBUG_PEEP("expect IFTHEN", scan, depth, flags); data_fake.last_closep= &fake_last_close; minlen = *minlenp; next = regnext(scan); scan = NEXTOPER(NEXTOPER(scan)); DEBUG_PEEP("scan", scan, depth, flags); DEBUG_PEEP("next", next, depth, flags); /* we suppose the run is continuous, last=next... * NOTE we dont use the return here! */ (void)study_chunk(pRExC_state, &scan, &minlen, &deltanext, next, &data_fake, stopparen, recursed_depth, NULL, f, depth+1); scan = next; } else if ( OP(scan) == BRANCH || OP(scan) == BRANCHJ || OP(scan) == IFTHEN ) { next = regnext(scan); code = OP(scan); /* The op(next)==code check below is to see if we * have "BRANCH-BRANCH", "BRANCHJ-BRANCHJ", "IFTHEN-IFTHEN" * IFTHEN is special as it might not appear in pairs. * Not sure whether BRANCH-BRANCHJ is possible, regardless * we dont handle it cleanly. */ if (OP(next) == code || code == IFTHEN) { /* NOTE - There is similar code to this block below for * handling TRIE nodes on a re-study. If you change stuff here * check there too. */ SSize_t max1 = 0, min1 = SSize_t_MAX, num = 0; regnode_ssc accum; regnode * const startbranch=scan; if (flags & SCF_DO_SUBSTR) { /* Cannot merge strings after this. */ scan_commit(pRExC_state, data, minlenp, is_inf); } if (flags & SCF_DO_STCLASS) ssc_init_zero(pRExC_state, &accum); while (OP(scan) == code) { SSize_t deltanext, minnext, fake; I32 f = 0; regnode_ssc this_class; DEBUG_PEEP("Branch", scan, depth, flags); num++; StructCopy(&zero_scan_data, &data_fake, scan_data_t); if (data) { data_fake.whilem_c = data->whilem_c; data_fake.last_closep = data->last_closep; } else data_fake.last_closep = &fake; data_fake.pos_delta = delta; next = regnext(scan); scan = NEXTOPER(scan); /* everything */ if (code != BRANCH) /* everything but BRANCH */ scan = NEXTOPER(scan); if (flags & SCF_DO_STCLASS) { ssc_init(pRExC_state, &this_class); data_fake.start_class = &this_class; f = SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; /* we suppose the run is continuous, last=next...*/ minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext, next, &data_fake, stopparen, recursed_depth, NULL, f,depth+1); if (min1 > minnext) min1 = minnext; if (deltanext == SSize_t_MAX) { is_inf = is_inf_internal = 1; max1 = SSize_t_MAX; } else if (max1 < minnext + deltanext) max1 = minnext + deltanext; scan = next; if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SCF_SEEN_ACCEPT) { if ( stopmin > minnext) stopmin = min + min1; flags &= ~SCF_DO_SUBSTR; if (data) data->flags |= SCF_SEEN_ACCEPT; } if (data) { if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; } if (flags & SCF_DO_STCLASS) ssc_or(pRExC_state, &accum, (regnode_charclass*)&this_class); } if (code == IFTHEN && num < 2) /* Empty ELSE branch */ min1 = 0; if (flags & SCF_DO_SUBSTR) { data->pos_min += min1; if (data->pos_delta >= SSize_t_MAX - (max1 - min1)) data->pos_delta = SSize_t_MAX; else data->pos_delta += max1 - min1; if (max1 != min1 || is_inf) data->cur_is_floating = 1; } min += min1; if (delta == SSize_t_MAX || SSize_t_MAX - delta - (max1 - min1) < 0) delta = SSize_t_MAX; else delta += max1 - min1; if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass*) &accum); if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); flags &= ~SCF_DO_STCLASS; } } else if (flags & SCF_DO_STCLASS_AND) { if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum); flags &= ~SCF_DO_STCLASS; } else { /* Switch to OR mode: cache the old value of * data->start_class */ INIT_AND_WITHP; StructCopy(data->start_class, and_withp, regnode_ssc); flags &= ~SCF_DO_STCLASS_AND; StructCopy(&accum, data->start_class, regnode_ssc); flags |= SCF_DO_STCLASS_OR; } } if (PERL_ENABLE_TRIE_OPTIMISATION && OP( startbranch ) == BRANCH ) { /* demq. Assuming this was/is a branch we are dealing with: 'scan' now points at the item that follows the branch sequence, whatever it is. We now start at the beginning of the sequence and look for subsequences of BRANCH->EXACT=>x1 BRANCH->EXACT=>x2 tail which would be constructed from a pattern like /A|LIST|OF|WORDS/ If we can find such a subsequence we need to turn the first element into a trie and then add the subsequent branch exact strings to the trie. We have two cases 1. patterns where the whole set of branches can be converted. 2. patterns where only a subset can be converted. In case 1 we can replace the whole set with a single regop for the trie. In case 2 we need to keep the start and end branches so 'BRANCH EXACT; BRANCH EXACT; BRANCH X' becomes BRANCH TRIE; BRANCH X; There is an additional case, that being where there is a common prefix, which gets split out into an EXACT like node preceding the TRIE node. If x(1..n)==tail then we can do a simple trie, if not we make a "jump" trie, such that when we match the appropriate word we "jump" to the appropriate tail node. Essentially we turn a nested if into a case structure of sorts. */ int made=0; if (!re_trie_maxbuff) { re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1); if (!SvIOK(re_trie_maxbuff)) sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT); } if ( SvIV(re_trie_maxbuff)>=0 ) { regnode *cur; regnode *first = (regnode *)NULL; regnode *last = (regnode *)NULL; regnode *tail = scan; U8 trietype = 0; U32 count=0; /* var tail is used because there may be a TAIL regop in the way. Ie, the exacts will point to the thing following the TAIL, but the last branch will point at the TAIL. So we advance tail. If we have nested (?:) we may have to move through several tails. */ while ( OP( tail ) == TAIL ) { /* this is the TAIL generated by (?:) */ tail = regnext( tail ); } DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, tail, NULL, pRExC_state); Perl_re_indentf( aTHX_ "%s %" UVuf ":%s\n", depth+1, "Looking for TRIE'able sequences. Tail node is ", (UV)(tail - RExC_emit_start), SvPV_nolen_const( RExC_mysv ) ); }); /* Step through the branches cur represents each branch, noper is the first thing to be matched as part of that branch noper_next is the regnext() of that node. We normally handle a case like this /FOO[xyz]|BAR[pqr]/ via a "jump trie" but we also support building with NOJUMPTRIE, which restricts the trie logic to structures like /FOO|BAR/. If noper is a trieable nodetype then the branch is a possible optimization target. If we are building under NOJUMPTRIE then we require that noper_next is the same as scan (our current position in the regex program). Once we have two or more consecutive such branches we can create a trie of the EXACT's contents and stitch it in place into the program. If the sequence represents all of the branches in the alternation we replace the entire thing with a single TRIE node. Otherwise when it is a subsequence we need to stitch it in place and replace only the relevant branches. This means the first branch has to remain as it is used by the alternation logic, and its next pointer, and needs to be repointed at the item on the branch chain following the last branch we have optimized away. This could be either a BRANCH, in which case the subsequence is internal, or it could be the item following the branch sequence in which case the subsequence is at the end (which does not necessarily mean the first node is the start of the alternation). TRIE_TYPE(X) is a define which maps the optype to a trietype. optype | trietype ----------------+----------- NOTHING | NOTHING EXACT | EXACT EXACTFU | EXACTFU EXACTFU_SS | EXACTFU EXACTFA | EXACTFA EXACTL | EXACTL EXACTFLU8 | EXACTFLU8 */ #define TRIE_TYPE(X) ( ( NOTHING == (X) ) \ ? NOTHING \ : ( EXACT == (X) ) \ ? EXACT \ : ( EXACTFU == (X) || EXACTFU_SS == (X) ) \ ? EXACTFU \ : ( EXACTFA == (X) ) \ ? EXACTFA \ : ( EXACTL == (X) ) \ ? EXACTL \ : ( EXACTFLU8 == (X) ) \ ? EXACTFLU8 \ : 0 ) /* dont use tail as the end marker for this traverse */ for ( cur = startbranch ; cur != scan ; cur = regnext( cur ) ) { regnode * const noper = NEXTOPER( cur ); U8 noper_type = OP( noper ); U8 noper_trietype = TRIE_TYPE( noper_type ); #if defined(DEBUGGING) || defined(NOJUMPTRIE) regnode * const noper_next = regnext( noper ); U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0; U8 noper_next_trietype = (noper_next && noper_next < tail) ? TRIE_TYPE( noper_next_type ) :0; #endif DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state); Perl_re_indentf( aTHX_ "- %d:%s (%d)", depth+1, REG_NODE_NUM(cur), SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur) ); regprop(RExC_rx, RExC_mysv, noper, NULL, pRExC_state); Perl_re_printf( aTHX_ " -> %d:%s", REG_NODE_NUM(noper), SvPV_nolen_const(RExC_mysv)); if ( noper_next ) { regprop(RExC_rx, RExC_mysv, noper_next, NULL, pRExC_state); Perl_re_printf( aTHX_ "\t=> %d:%s\t", REG_NODE_NUM(noper_next), SvPV_nolen_const(RExC_mysv)); } Perl_re_printf( aTHX_ "(First==%d,Last==%d,Cur==%d,tt==%s,ntt==%s,nntt==%s)\n", REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur), PL_reg_name[trietype], PL_reg_name[noper_trietype], PL_reg_name[noper_next_trietype] ); }); /* Is noper a trieable nodetype that can be merged * with the current trie (if there is one)? */ if ( noper_trietype && ( ( noper_trietype == NOTHING ) || ( trietype == NOTHING ) || ( trietype == noper_trietype ) ) #ifdef NOJUMPTRIE && noper_next >= tail #endif && count < U16_MAX) { /* Handle mergable triable node Either we are * the first node in a new trieable sequence, * in which case we do some bookkeeping, * otherwise we update the end pointer. */ if ( !first ) { first = cur; if ( noper_trietype == NOTHING ) { #if !defined(DEBUGGING) && !defined(NOJUMPTRIE) regnode * const noper_next = regnext( noper ); U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0; U8 noper_next_trietype = noper_next_type ? TRIE_TYPE( noper_next_type ) :0; #endif if ( noper_next_trietype ) { trietype = noper_next_trietype; } else if (noper_next_type) { /* a NOTHING regop is 1 regop wide. * We need at least two for a trie * so we can't merge this in */ first = NULL; } } else { trietype = noper_trietype; } } else { if ( trietype == NOTHING ) trietype = noper_trietype; last = cur; } if (first) count++; } /* end handle mergable triable node */ else { /* handle unmergable node - * noper may either be a triable node which can * not be tried together with the current trie, * or a non triable node */ if ( last ) { /* If last is set and trietype is not * NOTHING then we have found at least two * triable branch sequences in a row of a * similar trietype so we can turn them * into a trie. If/when we allow NOTHING to * start a trie sequence this condition * will be required, and it isn't expensive * so we leave it in for now. */ if ( trietype && trietype != NOTHING ) make_trie( pRExC_state, startbranch, first, cur, tail, count, trietype, depth+1 ); last = NULL; /* note: we clear/update first, trietype etc below, so we dont do it here */ } if ( noper_trietype #ifdef NOJUMPTRIE && noper_next >= tail #endif ){ /* noper is triable, so we can start a new * trie sequence */ count = 1; first = cur; trietype = noper_trietype; } else if (first) { /* if we already saw a first but the * current node is not triable then we have * to reset the first information. */ count = 0; first = NULL; trietype = 0; } } /* end handle unmergable node */ } /* loop over branches */ DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state); Perl_re_indentf( aTHX_ "- %s (%d) <SCAN FINISHED> ", depth+1, SvPV_nolen_const( RExC_mysv ),REG_NODE_NUM(cur)); Perl_re_printf( aTHX_ "(First==%d, Last==%d, Cur==%d, tt==%s)\n", REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur), PL_reg_name[trietype] ); }); if ( last && trietype ) { if ( trietype != NOTHING ) { /* the last branch of the sequence was part of * a trie, so we have to construct it here * outside of the loop */ made= make_trie( pRExC_state, startbranch, first, scan, tail, count, trietype, depth+1 ); #ifdef TRIE_STUDY_OPT if ( ((made == MADE_EXACT_TRIE && startbranch == first) || ( first_non_open == first )) && depth==0 ) { flags |= SCF_TRIE_RESTUDY; if ( startbranch == first && scan >= tail ) { RExC_seen &=~REG_TOP_LEVEL_BRANCHES_SEEN; } } #endif } else { /* at this point we know whatever we have is a * NOTHING sequence/branch AND if 'startbranch' * is 'first' then we can turn the whole thing * into a NOTHING */ if ( startbranch == first ) { regnode *opt; /* the entire thing is a NOTHING sequence, * something like this: (?:|) So we can * turn it into a plain NOTHING op. */ DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state); Perl_re_indentf( aTHX_ "- %s (%d) <NOTHING BRANCH SEQUENCE>\n", depth+1, SvPV_nolen_const( RExC_mysv ),REG_NODE_NUM(cur)); }); OP(startbranch)= NOTHING; NEXT_OFF(startbranch)= tail - startbranch; for ( opt= startbranch + 1; opt < tail ; opt++ ) OP(opt)= OPTIMIZED; } } } /* end if ( last) */ } /* TRIE_MAXBUF is non zero */ } /* do trie */ } else if ( code == BRANCHJ ) { /* single branch is optimized. */ scan = NEXTOPER(NEXTOPER(scan)); } else /* single branch is optimized. */ scan = NEXTOPER(scan); continue; } else if (OP(scan) == SUSPEND || OP(scan) == GOSUB) { I32 paren = 0; regnode *start = NULL; regnode *end = NULL; U32 my_recursed_depth= recursed_depth; if (OP(scan) != SUSPEND) { /* GOSUB */ /* Do setup, note this code has side effects beyond * the rest of this block. Specifically setting * RExC_recurse[] must happen at least once during * study_chunk(). */ paren = ARG(scan); RExC_recurse[ARG2L(scan)] = scan; start = RExC_open_parens[paren]; end = RExC_close_parens[paren]; /* NOTE we MUST always execute the above code, even * if we do nothing with a GOSUB */ if ( ( flags & SCF_IN_DEFINE ) || ( (is_inf_internal || is_inf || (data && data->flags & SF_IS_INF)) && ( (flags & (SCF_DO_STCLASS | SCF_DO_SUBSTR)) == 0 ) ) ) { /* no need to do anything here if we are in a define. */ /* or we are after some kind of infinite construct * so we can skip recursing into this item. * Since it is infinite we will not change the maxlen * or delta, and if we miss something that might raise * the minlen it will merely pessimise a little. * * Iow /(?(DEFINE)(?<foo>foo|food))a+(?&foo)/ * might result in a minlen of 1 and not of 4, * but this doesn't make us mismatch, just try a bit * harder than we should. * */ scan= regnext(scan); continue; } if ( !recursed_depth || !PAREN_TEST(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), paren) ) { /* it is quite possible that there are more efficient ways * to do this. We maintain a bitmap per level of recursion * of which patterns we have entered so we can detect if a * pattern creates a possible infinite loop. When we * recurse down a level we copy the previous levels bitmap * down. When we are at recursion level 0 we zero the top * level bitmap. It would be nice to implement a different * more efficient way of doing this. In particular the top * level bitmap may be unnecessary. */ if (!recursed_depth) { Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes, U8); } else { Copy(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), RExC_study_chunk_recursed_bytes, U8); } /* we havent recursed into this paren yet, so recurse into it */ DEBUG_STUDYDATA("gosub-set", data, depth, is_inf); PAREN_SET(RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), paren); my_recursed_depth= recursed_depth + 1; } else { DEBUG_STUDYDATA("gosub-inf", data, depth, is_inf); /* some form of infinite recursion, assume infinite length * */ if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); data->cur_is_floating = 1; } is_inf = is_inf_internal = 1; if (flags & SCF_DO_STCLASS_OR) /* Allow everything */ ssc_anything(data->start_class); flags &= ~SCF_DO_STCLASS; start= NULL; /* reset start so we dont recurse later on. */ } } else { paren = stopparen; start = scan + 2; end = regnext(scan); } if (start) { scan_frame *newframe; assert(end); if (!RExC_frame_last) { Newxz(newframe, 1, scan_frame); SAVEDESTRUCTOR_X(S_unwind_scan_frames, newframe); RExC_frame_head= newframe; RExC_frame_count++; } else if (!RExC_frame_last->next_frame) { Newxz(newframe,1,scan_frame); RExC_frame_last->next_frame= newframe; newframe->prev_frame= RExC_frame_last; RExC_frame_count++; } else { newframe= RExC_frame_last->next_frame; } RExC_frame_last= newframe; newframe->next_regnode = regnext(scan); newframe->last_regnode = last; newframe->stopparen = stopparen; newframe->prev_recursed_depth = recursed_depth; newframe->this_prev_frame= frame; DEBUG_STUDYDATA("frame-new", data, depth, is_inf); DEBUG_PEEP("fnew", scan, depth, flags); frame = newframe; scan = start; stopparen = paren; last = end; depth = depth + 1; recursed_depth= my_recursed_depth; continue; } } else if (OP(scan) == EXACT || OP(scan) == EXACTL) { SSize_t l = STR_LEN(scan); UV uc; assert(l); if (UTF) { const U8 * const s = (U8*)STRING(scan); uc = utf8_to_uvchr_buf(s, s + l, NULL); l = utf8_length(s, s + l); } else { uc = *((U8*)STRING(scan)); } min += l; if (flags & SCF_DO_SUBSTR) { /* Update longest substr. */ /* The code below prefers earlier match for fixed offset, later match for variable offset. */ if (data->last_end == -1) { /* Update the start info. */ data->last_start_min = data->pos_min; data->last_start_max = is_inf ? SSize_t_MAX : data->pos_min + data->pos_delta; } sv_catpvn(data->last_found, STRING(scan), STR_LEN(scan)); if (UTF) SvUTF8_on(data->last_found); { SV * const sv = data->last_found; MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_utf8) : NULL; if (mg && mg->mg_len >= 0) mg->mg_len += utf8_length((U8*)STRING(scan), (U8*)STRING(scan)+STR_LEN(scan)); } data->last_end = data->pos_min + l; data->pos_min += l; /* As in the first entry. */ data->flags &= ~SF_BEFORE_EOL; } /* ANDing the code point leaves at most it, and not in locale, and * can't match null string */ if (flags & SCF_DO_STCLASS_AND) { ssc_cp_and(data->start_class, uc); ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; ssc_clear_locale(data->start_class); } else if (flags & SCF_DO_STCLASS_OR) { ssc_add_cp(data->start_class, uc); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); /* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } flags &= ~SCF_DO_STCLASS; } else if (PL_regkind[OP(scan)] == EXACT) { /* But OP != EXACT!, so is EXACTFish */ SSize_t l = STR_LEN(scan); const U8 * s = (U8*)STRING(scan); /* Search for fixed substrings supports EXACT only. */ if (flags & SCF_DO_SUBSTR) { assert(data); scan_commit(pRExC_state, data, minlenp, is_inf); } if (UTF) { l = utf8_length(s, s + l); } if (unfolded_multi_char) { RExC_seen |= REG_UNFOLDED_MULTI_SEEN; } min += l - min_subtract; assert (min >= 0); delta += min_subtract; if (flags & SCF_DO_SUBSTR) { data->pos_min += l - min_subtract; if (data->pos_min < 0) { data->pos_min = 0; } data->pos_delta += min_subtract; if (min_subtract) { data->cur_is_floating = 1; /* float */ } } if (flags & SCF_DO_STCLASS) { SV* EXACTF_invlist = _make_exactf_invlist(pRExC_state, scan); assert(EXACTF_invlist); if (flags & SCF_DO_STCLASS_AND) { if (OP(scan) != EXACTFL) ssc_clear_locale(data->start_class); ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; ANYOF_POSIXL_ZERO(data->start_class); ssc_intersection(data->start_class, EXACTF_invlist, FALSE); } else { /* SCF_DO_STCLASS_OR */ ssc_union(data->start_class, EXACTF_invlist, FALSE); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); /* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } flags &= ~SCF_DO_STCLASS; SvREFCNT_dec(EXACTF_invlist); } } else if (REGNODE_VARIES(OP(scan))) { SSize_t mincount, maxcount, minnext, deltanext, pos_before = 0; I32 fl = 0, f = flags; regnode * const oscan = scan; regnode_ssc this_class; regnode_ssc *oclass = NULL; I32 next_is_eval = 0; switch (PL_regkind[OP(scan)]) { case WHILEM: /* End of (?:...)* . */ scan = NEXTOPER(scan); goto finish; case PLUS: if (flags & (SCF_DO_SUBSTR | SCF_DO_STCLASS)) { next = NEXTOPER(scan); if (OP(next) == EXACT || OP(next) == EXACTL || (flags & SCF_DO_STCLASS)) { mincount = 1; maxcount = REG_INFTY; next = regnext(scan); scan = NEXTOPER(scan); goto do_curly; } } if (flags & SCF_DO_SUBSTR) data->pos_min++; min++; /* FALLTHROUGH */ case STAR: if (flags & SCF_DO_STCLASS) { mincount = 0; maxcount = REG_INFTY; next = regnext(scan); scan = NEXTOPER(scan); goto do_curly; } if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); /* Cannot extend fixed substrings */ data->cur_is_floating = 1; /* float */ } is_inf = is_inf_internal = 1; scan = regnext(scan); goto optimize_curly_tail; case CURLY: if (stopparen>0 && (OP(scan)==CURLYN || OP(scan)==CURLYM) && (scan->flags == stopparen)) { mincount = 1; maxcount = 1; } else { mincount = ARG1(scan); maxcount = ARG2(scan); } next = regnext(scan); if (OP(scan) == CURLYX) { I32 lp = (data ? *(data->last_closep) : 0); scan->flags = ((lp <= (I32)U8_MAX) ? (U8)lp : U8_MAX); } scan = NEXTOPER(scan) + EXTRA_STEP_2ARGS; next_is_eval = (OP(scan) == EVAL); do_curly: if (flags & SCF_DO_SUBSTR) { if (mincount == 0) scan_commit(pRExC_state, data, minlenp, is_inf); /* Cannot extend fixed substrings */ pos_before = data->pos_min; } if (data) { fl = data->flags; data->flags &= ~(SF_HAS_PAR|SF_IN_PAR|SF_HAS_EVAL); if (is_inf) data->flags |= SF_IS_INF; } if (flags & SCF_DO_STCLASS) { ssc_init(pRExC_state, &this_class); oclass = data->start_class; data->start_class = &this_class; f |= SCF_DO_STCLASS_AND; f &= ~SCF_DO_STCLASS_OR; } /* Exclude from super-linear cache processing any {n,m} regops for which the combination of input pos and regex pos is not enough information to determine if a match will be possible. For example, in the regex /foo(bar\s*){4,8}baz/ with the regex pos at the \s*, the prospects for a match depend not only on the input position but also on how many (bar\s*) repeats into the {4,8} we are. */ if ((mincount > 1) || (maxcount > 1 && maxcount != REG_INFTY)) f &= ~SCF_WHILEM_VISITED_POS; /* This will finish on WHILEM, setting scan, or on NULL: */ minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext, last, data, stopparen, recursed_depth, NULL, (mincount == 0 ? (f & ~SCF_DO_SUBSTR) : f) ,depth+1); if (flags & SCF_DO_STCLASS) data->start_class = oclass; if (mincount == 0 || minnext == 0) { if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class); } else if (flags & SCF_DO_STCLASS_AND) { /* Switch to OR mode: cache the old value of * data->start_class */ INIT_AND_WITHP; StructCopy(data->start_class, and_withp, regnode_ssc); flags &= ~SCF_DO_STCLASS_AND; StructCopy(&this_class, data->start_class, regnode_ssc); flags |= SCF_DO_STCLASS_OR; ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING; } } else { /* Non-zero len */ if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); } else if (flags & SCF_DO_STCLASS_AND) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &this_class); flags &= ~SCF_DO_STCLASS; } if (!scan) /* It was not CURLYX, but CURLY. */ scan = next; if (((flags & (SCF_TRIE_DOING_RESTUDY|SCF_DO_SUBSTR))==SCF_DO_SUBSTR) /* ? quantifier ok, except for (?{ ... }) */ && (next_is_eval || !(mincount == 0 && maxcount == 1)) && (minnext == 0) && (deltanext == 0) && data && !(data->flags & (SF_HAS_PAR|SF_IN_PAR)) && maxcount <= REG_INFTY/3) /* Complement check for big count */ { /* Fatal warnings may leak the regexp without this: */ SAVEFREESV(RExC_rx_sv); Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), "Quantifier unexpected on zero-length expression " "in regex m/%" UTF8f "/", UTF8fARG(UTF, RExC_precomp_end - RExC_precomp, RExC_precomp)); (void)ReREFCNT_inc(RExC_rx_sv); } min += minnext * mincount; is_inf_internal |= deltanext == SSize_t_MAX || (maxcount == REG_INFTY && minnext + deltanext > 0); is_inf |= is_inf_internal; if (is_inf) { delta = SSize_t_MAX; } else { delta += (minnext + deltanext) * maxcount - minnext * mincount; } /* Try powerful optimization CURLYX => CURLYN. */ if ( OP(oscan) == CURLYX && data && data->flags & SF_IN_PAR && !(data->flags & SF_HAS_EVAL) && !deltanext && minnext == 1 ) { /* Try to optimize to CURLYN. */ regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; regnode * const nxt1 = nxt; #ifdef DEBUGGING regnode *nxt2; #endif /* Skip open. */ nxt = regnext(nxt); if (!REGNODE_SIMPLE(OP(nxt)) && !(PL_regkind[OP(nxt)] == EXACT && STR_LEN(nxt) == 1)) goto nogo; #ifdef DEBUGGING nxt2 = nxt; #endif nxt = regnext(nxt); if (OP(nxt) != CLOSE) goto nogo; if (RExC_open_parens) { RExC_open_parens[ARG(nxt1)]=oscan; /*open->CURLYM*/ RExC_close_parens[ARG(nxt1)]=nxt+2; /*close->while*/ } /* Now we know that nxt2 is the only contents: */ oscan->flags = (U8)ARG(nxt); OP(oscan) = CURLYN; OP(nxt1) = NOTHING; /* was OPEN. */ #ifdef DEBUGGING OP(nxt1 + 1) = OPTIMIZED; /* was count. */ NEXT_OFF(nxt1+ 1) = 0; /* just for consistency. */ NEXT_OFF(nxt2) = 0; /* just for consistency with CURLY. */ OP(nxt) = OPTIMIZED; /* was CLOSE. */ OP(nxt + 1) = OPTIMIZED; /* was count. */ NEXT_OFF(nxt+ 1) = 0; /* just for consistency. */ #endif } nogo: /* Try optimization CURLYX => CURLYM. */ if ( OP(oscan) == CURLYX && data && !(data->flags & SF_HAS_PAR) && !(data->flags & SF_HAS_EVAL) && !deltanext /* atom is fixed width */ && minnext != 0 /* CURLYM can't handle zero width */ /* Nor characters whose fold at run-time may be * multi-character */ && ! (RExC_seen & REG_UNFOLDED_MULTI_SEEN) ) { /* XXXX How to optimize if data == 0? */ /* Optimize to a simpler form. */ regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN */ regnode *nxt2; OP(oscan) = CURLYM; while ( (nxt2 = regnext(nxt)) /* skip over embedded stuff*/ && (OP(nxt2) != WHILEM)) nxt = nxt2; OP(nxt2) = SUCCEED; /* Whas WHILEM */ /* Need to optimize away parenths. */ if ((data->flags & SF_IN_PAR) && OP(nxt) == CLOSE) { /* Set the parenth number. */ regnode *nxt1 = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN*/ oscan->flags = (U8)ARG(nxt); if (RExC_open_parens) { RExC_open_parens[ARG(nxt1)]=oscan; /*open->CURLYM*/ RExC_close_parens[ARG(nxt1)]=nxt2+1; /*close->NOTHING*/ } OP(nxt1) = OPTIMIZED; /* was OPEN. */ OP(nxt) = OPTIMIZED; /* was CLOSE. */ #ifdef DEBUGGING OP(nxt1 + 1) = OPTIMIZED; /* was count. */ OP(nxt + 1) = OPTIMIZED; /* was count. */ NEXT_OFF(nxt1 + 1) = 0; /* just for consistency. */ NEXT_OFF(nxt + 1) = 0; /* just for consistency. */ #endif #if 0 while ( nxt1 && (OP(nxt1) != WHILEM)) { regnode *nnxt = regnext(nxt1); if (nnxt == nxt) { if (reg_off_by_arg[OP(nxt1)]) ARG_SET(nxt1, nxt2 - nxt1); else if (nxt2 - nxt1 < U16_MAX) NEXT_OFF(nxt1) = nxt2 - nxt1; else OP(nxt) = NOTHING; /* Cannot beautify */ } nxt1 = nnxt; } #endif /* Optimize again: */ study_chunk(pRExC_state, &nxt1, minlenp, &deltanext, nxt, NULL, stopparen, recursed_depth, NULL, 0,depth+1); } else oscan->flags = 0; } else if ((OP(oscan) == CURLYX) && (flags & SCF_WHILEM_VISITED_POS) /* See the comment on a similar expression above. However, this time it's not a subexpression we care about, but the expression itself. */ && (maxcount == REG_INFTY) && data) { /* This stays as CURLYX, we can put the count/of pair. */ /* Find WHILEM (as in regexec.c) */ regnode *nxt = oscan + NEXT_OFF(oscan); if (OP(PREVOPER(nxt)) == NOTHING) /* LONGJMP */ nxt += ARG(nxt); nxt = PREVOPER(nxt); if (nxt->flags & 0xf) { /* we've already set whilem count on this node */ } else if (++data->whilem_c < 16) { assert(data->whilem_c <= RExC_whilem_seen); nxt->flags = (U8)(data->whilem_c | (RExC_whilem_seen << 4)); /* On WHILEM */ } } if (data && fl & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (flags & SCF_DO_SUBSTR) { SV *last_str = NULL; STRLEN last_chrs = 0; int counted = mincount != 0; if (data->last_end > 0 && mincount != 0) { /* Ends with a string. */ SSize_t b = pos_before >= data->last_start_min ? pos_before : data->last_start_min; STRLEN l; const char * const s = SvPV_const(data->last_found, l); SSize_t old = b - data->last_start_min; if (UTF) old = utf8_hop((U8*)s, old) - (U8*)s; l -= old; /* Get the added string: */ last_str = newSVpvn_utf8(s + old, l, UTF); last_chrs = UTF ? utf8_length((U8*)(s + old), (U8*)(s + old + l)) : l; if (deltanext == 0 && pos_before == b) { /* What was added is a constant string */ if (mincount > 1) { SvGROW(last_str, (mincount * l) + 1); repeatcpy(SvPVX(last_str) + l, SvPVX_const(last_str), l, mincount - 1); SvCUR_set(last_str, SvCUR(last_str) * mincount); /* Add additional parts. */ SvCUR_set(data->last_found, SvCUR(data->last_found) - l); sv_catsv(data->last_found, last_str); { SV * sv = data->last_found; MAGIC *mg = SvUTF8(sv) && SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_utf8) : NULL; if (mg && mg->mg_len >= 0) mg->mg_len += last_chrs * (mincount-1); } last_chrs *= mincount; data->last_end += l * (mincount - 1); } } else { /* start offset must point into the last copy */ data->last_start_min += minnext * (mincount - 1); data->last_start_max = is_inf ? SSize_t_MAX : data->last_start_max + (maxcount - 1) * (minnext + data->pos_delta); } } /* It is counted once already... */ data->pos_min += minnext * (mincount - counted); #if 0 Perl_re_printf( aTHX_ "counted=%" UVuf " deltanext=%" UVuf " SSize_t_MAX=%" UVuf " minnext=%" UVuf " maxcount=%" UVuf " mincount=%" UVuf "\n", (UV)counted, (UV)deltanext, (UV)SSize_t_MAX, (UV)minnext, (UV)maxcount, (UV)mincount); if (deltanext != SSize_t_MAX) Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n", (UV)(-counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount), (UV)(SSize_t_MAX - data->pos_delta)); #endif if (deltanext == SSize_t_MAX || -counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount >= SSize_t_MAX - data->pos_delta) data->pos_delta = SSize_t_MAX; else data->pos_delta += - counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount; if (mincount != maxcount) { /* Cannot extend fixed substrings found inside the group. */ scan_commit(pRExC_state, data, minlenp, is_inf); if (mincount && last_str) { SV * const sv = data->last_found; MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_utf8) : NULL; if (mg) mg->mg_len = -1; sv_setsv(sv, last_str); data->last_end = data->pos_min; data->last_start_min = data->pos_min - last_chrs; data->last_start_max = is_inf ? SSize_t_MAX : data->pos_min + data->pos_delta - last_chrs; } data->cur_is_floating = 1; /* float */ } SvREFCNT_dec(last_str); } if (data && (fl & SF_HAS_EVAL)) data->flags |= SF_HAS_EVAL; optimize_curly_tail: if (OP(oscan) != CURLYX) { while (PL_regkind[OP(next = regnext(oscan))] == NOTHING && NEXT_OFF(next)) NEXT_OFF(oscan) += NEXT_OFF(next); } continue; default: #ifdef DEBUGGING Perl_croak(aTHX_ "panic: unexpected varying REx opcode %d", OP(scan)); #endif case REF: case CLUMP: if (flags & SCF_DO_SUBSTR) { /* Cannot expect anything... */ scan_commit(pRExC_state, data, minlenp, is_inf); data->cur_is_floating = 1; /* float */ } is_inf = is_inf_internal = 1; if (flags & SCF_DO_STCLASS_OR) { if (OP(scan) == CLUMP) { /* Actually is any start char, but very few code points * aren't start characters */ ssc_match_all_cp(data->start_class); } else { ssc_anything(data->start_class); } } flags &= ~SCF_DO_STCLASS; break; } } else if (OP(scan) == LNBREAK) { if (flags & SCF_DO_STCLASS) { if (flags & SCF_DO_STCLASS_AND) { ssc_intersection(data->start_class, PL_XPosix_ptrs[_CC_VERTSPACE], FALSE); ssc_clear_locale(data->start_class); ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } else if (flags & SCF_DO_STCLASS_OR) { ssc_union(data->start_class, PL_XPosix_ptrs[_CC_VERTSPACE], FALSE); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); /* See commit msg for * 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } flags &= ~SCF_DO_STCLASS; } min++; if (delta != SSize_t_MAX) delta++; /* Because of the 2 char string cr-lf */ if (flags & SCF_DO_SUBSTR) { /* Cannot expect anything... */ scan_commit(pRExC_state, data, minlenp, is_inf); data->pos_min += 1; data->pos_delta += 1; data->cur_is_floating = 1; /* float */ } } else if (REGNODE_SIMPLE(OP(scan))) { if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); data->pos_min++; } min++; if (flags & SCF_DO_STCLASS) { bool invert = 0; SV* my_invlist = NULL; U8 namedclass; /* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; /* Some of the logic below assumes that switching locale on will only add false positives. */ switch (OP(scan)) { default: #ifdef DEBUGGING Perl_croak(aTHX_ "panic: unexpected simple REx opcode %d", OP(scan)); #endif case SANY: if (flags & SCF_DO_STCLASS_OR) /* Allow everything */ ssc_match_all_cp(data->start_class); break; case REG_ANY: { SV* REG_ANY_invlist = _new_invlist(2); REG_ANY_invlist = add_cp_to_invlist(REG_ANY_invlist, '\n'); if (flags & SCF_DO_STCLASS_OR) { ssc_union(data->start_class, REG_ANY_invlist, TRUE /* TRUE => invert, hence all but \n */ ); } else if (flags & SCF_DO_STCLASS_AND) { ssc_intersection(data->start_class, REG_ANY_invlist, TRUE /* TRUE => invert */ ); ssc_clear_locale(data->start_class); } SvREFCNT_dec_NN(REG_ANY_invlist); } break; case ANYOFD: case ANYOFL: case ANYOF: if (flags & SCF_DO_STCLASS_AND) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) scan); else ssc_or(pRExC_state, data->start_class, (regnode_charclass *) scan); break; case NPOSIXL: invert = 1; /* FALLTHROUGH */ case POSIXL: namedclass = classnum_to_namedclass(FLAGS(scan)) + invert; if (flags & SCF_DO_STCLASS_AND) { bool was_there = cBOOL( ANYOF_POSIXL_TEST(data->start_class, namedclass)); ANYOF_POSIXL_ZERO(data->start_class); if (was_there) { /* Do an AND */ ANYOF_POSIXL_SET(data->start_class, namedclass); } /* No individual code points can now match */ data->start_class->invlist = sv_2mortal(_new_invlist(0)); } else { int complement = namedclass + ((invert) ? -1 : 1); assert(flags & SCF_DO_STCLASS_OR); /* If the complement of this class was already there, * the result is that they match all code points, * (\d + \D == everything). Remove the classes from * future consideration. Locale is not relevant in * this case */ if (ANYOF_POSIXL_TEST(data->start_class, complement)) { ssc_match_all_cp(data->start_class); ANYOF_POSIXL_CLEAR(data->start_class, namedclass); ANYOF_POSIXL_CLEAR(data->start_class, complement); } else { /* The usual case; just add this class to the existing set */ ANYOF_POSIXL_SET(data->start_class, namedclass); } } break; case NPOSIXA: /* For these, we always know the exact set of what's matched */ invert = 1; /* FALLTHROUGH */ case POSIXA: if (FLAGS(scan) == _CC_ASCII) { my_invlist = invlist_clone(PL_XPosix_ptrs[_CC_ASCII]); } else { _invlist_intersection(PL_XPosix_ptrs[FLAGS(scan)], PL_XPosix_ptrs[_CC_ASCII], &my_invlist); } goto join_posix; case NPOSIXD: case NPOSIXU: invert = 1; /* FALLTHROUGH */ case POSIXD: case POSIXU: my_invlist = invlist_clone(PL_XPosix_ptrs[FLAGS(scan)]); /* NPOSIXD matches all upper Latin1 code points unless the * target string being matched is UTF-8, which is * unknowable until match time. Since we are going to * invert, we want to get rid of all of them so that the * inversion will match all */ if (OP(scan) == NPOSIXD) { _invlist_subtract(my_invlist, PL_UpperLatin1, &my_invlist); } join_posix: if (flags & SCF_DO_STCLASS_AND) { ssc_intersection(data->start_class, my_invlist, invert); ssc_clear_locale(data->start_class); } else { assert(flags & SCF_DO_STCLASS_OR); ssc_union(data->start_class, my_invlist, invert); } SvREFCNT_dec(my_invlist); } if (flags & SCF_DO_STCLASS_OR) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); flags &= ~SCF_DO_STCLASS; } } else if (PL_regkind[OP(scan)] == EOL && flags & SCF_DO_SUBSTR) { data->flags |= (OP(scan) == MEOL ? SF_BEFORE_MEOL : SF_BEFORE_SEOL); scan_commit(pRExC_state, data, minlenp, is_inf); } else if ( PL_regkind[OP(scan)] == BRANCHJ /* Lookbehind, or need to calculate parens/evals/stclass: */ && (scan->flags || data || (flags & SCF_DO_STCLASS)) && (OP(scan) == IFMATCH || OP(scan) == UNLESSM)) { if ( !PERL_ENABLE_POSITIVE_ASSERTION_STUDY || OP(scan) == UNLESSM ) { /* Negative Lookahead/lookbehind In this case we can't do fixed string optimisation. */ SSize_t deltanext, minnext, fake = 0; regnode *nscan; regnode_ssc intrnl; int f = 0; StructCopy(&zero_scan_data, &data_fake, scan_data_t); if (data) { data_fake.whilem_c = data->whilem_c; data_fake.last_closep = data->last_closep; } else data_fake.last_closep = &fake; data_fake.pos_delta = delta; if ( flags & SCF_DO_STCLASS && !scan->flags && OP(scan) == IFMATCH ) { /* Lookahead */ ssc_init(pRExC_state, &intrnl); data_fake.start_class = &intrnl; f |= SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; next = regnext(scan); nscan = NEXTOPER(NEXTOPER(scan)); minnext = study_chunk(pRExC_state, &nscan, minlenp, &deltanext, last, &data_fake, stopparen, recursed_depth, NULL, f, depth+1); if (scan->flags) { if (deltanext) { FAIL("Variable length lookbehind not implemented"); } else if (minnext > (I32)U8_MAX) { FAIL2("Lookbehind longer than %" UVuf " not implemented", (UV)U8_MAX); } scan->flags = (U8)minnext; } if (data) { if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; } if (f & SCF_DO_STCLASS_AND) { if (flags & SCF_DO_STCLASS_OR) { /* OR before, AND after: ideally we would recurse with * data_fake to get the AND applied by study of the * remainder of the pattern, and then derecurse; * *** HACK *** for now just treat as "no information". * See [perl #56690]. */ ssc_init(pRExC_state, data->start_class); } else { /* AND before and after: combine and continue. These * assertions are zero-length, so can match an EMPTY * string */ ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl); ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING; } } } #if PERL_ENABLE_POSITIVE_ASSERTION_STUDY else { /* Positive Lookahead/lookbehind In this case we can do fixed string optimisation, but we must be careful about it. Note in the case of lookbehind the positions will be offset by the minimum length of the pattern, something we won't know about until after the recurse. */ SSize_t deltanext, fake = 0; regnode *nscan; regnode_ssc intrnl; int f = 0; /* We use SAVEFREEPV so that when the full compile is finished perl will clean up the allocated minlens when it's all done. This way we don't have to worry about freeing them when we know they wont be used, which would be a pain. */ SSize_t *minnextp; Newx( minnextp, 1, SSize_t ); SAVEFREEPV(minnextp); if (data) { StructCopy(data, &data_fake, scan_data_t); if ((flags & SCF_DO_SUBSTR) && data->last_found) { f |= SCF_DO_SUBSTR; if (scan->flags) scan_commit(pRExC_state, &data_fake, minlenp, is_inf); data_fake.last_found=newSVsv(data->last_found); } } else data_fake.last_closep = &fake; data_fake.flags = 0; data_fake.substrs[0].flags = 0; data_fake.substrs[1].flags = 0; data_fake.pos_delta = delta; if (is_inf) data_fake.flags |= SF_IS_INF; if ( flags & SCF_DO_STCLASS && !scan->flags && OP(scan) == IFMATCH ) { /* Lookahead */ ssc_init(pRExC_state, &intrnl); data_fake.start_class = &intrnl; f |= SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; next = regnext(scan); nscan = NEXTOPER(NEXTOPER(scan)); *minnextp = study_chunk(pRExC_state, &nscan, minnextp, &deltanext, last, &data_fake, stopparen, recursed_depth, NULL, f,depth+1); if (scan->flags) { if (deltanext) { FAIL("Variable length lookbehind not implemented"); } else if (*minnextp > (I32)U8_MAX) { FAIL2("Lookbehind longer than %" UVuf " not implemented", (UV)U8_MAX); } scan->flags = (U8)*minnextp; } *minnextp += min; if (f & SCF_DO_STCLASS_AND) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl); ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING; } if (data) { if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; if ((flags & SCF_DO_SUBSTR) && data_fake.last_found) { int i; if (RExC_rx->minlen<*minnextp) RExC_rx->minlen=*minnextp; scan_commit(pRExC_state, &data_fake, minnextp, is_inf); SvREFCNT_dec_NN(data_fake.last_found); for (i = 0; i < 2; i++) { if (data_fake.substrs[i].minlenp != minlenp) { data->substrs[i].min_offset = data_fake.substrs[i].min_offset; data->substrs[i].max_offset = data_fake.substrs[i].max_offset; data->substrs[i].minlenp = data_fake.substrs[i].minlenp; data->substrs[i].lookbehind += scan->flags; } } } } } #endif } else if (OP(scan) == OPEN) { if (stopparen != (I32)ARG(scan)) pars++; } else if (OP(scan) == CLOSE) { if (stopparen == (I32)ARG(scan)) { break; } if ((I32)ARG(scan) == is_par) { next = regnext(scan); if ( next && (OP(next) != WHILEM) && next < last) is_par = 0; /* Disable optimization */ } if (data) *(data->last_closep) = ARG(scan); } else if (OP(scan) == EVAL) { if (data) data->flags |= SF_HAS_EVAL; } else if ( PL_regkind[OP(scan)] == ENDLIKE ) { if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); flags &= ~SCF_DO_SUBSTR; } if (data && OP(scan)==ACCEPT) { data->flags |= SCF_SEEN_ACCEPT; if (stopmin > min) stopmin = min; } } else if (OP(scan) == LOGICAL && scan->flags == 2) /* Embedded follows */ { if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); data->cur_is_floating = 1; /* float */ } is_inf = is_inf_internal = 1; if (flags & SCF_DO_STCLASS_OR) /* Allow everything */ ssc_anything(data->start_class); flags &= ~SCF_DO_STCLASS; } else if (OP(scan) == GPOS) { if (!(RExC_rx->intflags & PREGf_GPOS_FLOAT) && !(delta || is_inf || (data && data->pos_delta))) { if (!(RExC_rx->intflags & PREGf_ANCH) && (flags & SCF_DO_SUBSTR)) RExC_rx->intflags |= PREGf_ANCH_GPOS; if (RExC_rx->gofs < (STRLEN)min) RExC_rx->gofs = min; } else { RExC_rx->intflags |= PREGf_GPOS_FLOAT; RExC_rx->gofs = 0; } } #ifdef TRIE_STUDY_OPT #ifdef FULL_TRIE_STUDY else if (PL_regkind[OP(scan)] == TRIE) { /* NOTE - There is similar code to this block above for handling BRANCH nodes on the initial study. If you change stuff here check there too. */ regnode *trie_node= scan; regnode *tail= regnext(scan); reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ]; SSize_t max1 = 0, min1 = SSize_t_MAX; regnode_ssc accum; if (flags & SCF_DO_SUBSTR) { /* XXXX Add !SUSPEND? */ /* Cannot merge strings after this. */ scan_commit(pRExC_state, data, minlenp, is_inf); } if (flags & SCF_DO_STCLASS) ssc_init_zero(pRExC_state, &accum); if (!trie->jump) { min1= trie->minlen; max1= trie->maxlen; } else { const regnode *nextbranch= NULL; U32 word; for ( word=1 ; word <= trie->wordcount ; word++) { SSize_t deltanext=0, minnext=0, f = 0, fake; regnode_ssc this_class; StructCopy(&zero_scan_data, &data_fake, scan_data_t); if (data) { data_fake.whilem_c = data->whilem_c; data_fake.last_closep = data->last_closep; } else data_fake.last_closep = &fake; data_fake.pos_delta = delta; if (flags & SCF_DO_STCLASS) { ssc_init(pRExC_state, &this_class); data_fake.start_class = &this_class; f = SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; if (trie->jump[word]) { if (!nextbranch) nextbranch = trie_node + trie->jump[0]; scan= trie_node + trie->jump[word]; /* We go from the jump point to the branch that follows it. Note this means we need the vestigal unused branches even though they arent otherwise used. */ minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext, (regnode *)nextbranch, &data_fake, stopparen, recursed_depth, NULL, f,depth+1); } if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH) nextbranch= regnext((regnode*)nextbranch); if (min1 > (SSize_t)(minnext + trie->minlen)) min1 = minnext + trie->minlen; if (deltanext == SSize_t_MAX) { is_inf = is_inf_internal = 1; max1 = SSize_t_MAX; } else if (max1 < (SSize_t)(minnext + deltanext + trie->maxlen)) max1 = minnext + deltanext + trie->maxlen; if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SCF_SEEN_ACCEPT) { if ( stopmin > min + min1) stopmin = min + min1; flags &= ~SCF_DO_SUBSTR; if (data) data->flags |= SCF_SEEN_ACCEPT; } if (data) { if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; } if (flags & SCF_DO_STCLASS) ssc_or(pRExC_state, &accum, (regnode_charclass *) &this_class); } } if (flags & SCF_DO_SUBSTR) { data->pos_min += min1; data->pos_delta += max1 - min1; if (max1 != min1 || is_inf) data->cur_is_floating = 1; /* float */ } min += min1; if (delta != SSize_t_MAX) delta += max1 - min1; if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &accum); if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); flags &= ~SCF_DO_STCLASS; } } else if (flags & SCF_DO_STCLASS_AND) { if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum); flags &= ~SCF_DO_STCLASS; } else { /* Switch to OR mode: cache the old value of * data->start_class */ INIT_AND_WITHP; StructCopy(data->start_class, and_withp, regnode_ssc); flags &= ~SCF_DO_STCLASS_AND; StructCopy(&accum, data->start_class, regnode_ssc); flags |= SCF_DO_STCLASS_OR; } } scan= tail; continue; } #else else if (PL_regkind[OP(scan)] == TRIE) { reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ]; U8*bang=NULL; min += trie->minlen; delta += (trie->maxlen - trie->minlen); flags &= ~SCF_DO_STCLASS; /* xxx */ if (flags & SCF_DO_SUBSTR) { /* Cannot expect anything... */ scan_commit(pRExC_state, data, minlenp, is_inf); data->pos_min += trie->minlen; data->pos_delta += (trie->maxlen - trie->minlen); if (trie->maxlen != trie->minlen) data->cur_is_floating = 1; /* float */ } if (trie->jump) /* no more substrings -- for now /grr*/ flags &= ~SCF_DO_SUBSTR; } #endif /* old or new */ #endif /* TRIE_STUDY_OPT */ /* Else: zero-length, ignore. */ scan = regnext(scan); } finish: if (frame) { /* we need to unwind recursion. */ depth = depth - 1; DEBUG_STUDYDATA("frame-end", data, depth, is_inf); DEBUG_PEEP("fend", scan, depth, flags); /* restore previous context */ last = frame->last_regnode; scan = frame->next_regnode; stopparen = frame->stopparen; recursed_depth = frame->prev_recursed_depth; RExC_frame_last = frame->prev_frame; frame = frame->this_prev_frame; goto fake_study_recurse; } assert(!frame); DEBUG_STUDYDATA("pre-fin", data, depth, is_inf); *scanp = scan; *deltap = is_inf_internal ? SSize_t_MAX : delta; if (flags & SCF_DO_SUBSTR && is_inf) data->pos_delta = SSize_t_MAX - data->pos_min; if (is_par > (I32)U8_MAX) is_par = 0; if (is_par && pars==1 && data) { data->flags |= SF_IN_PAR; data->flags &= ~SF_HAS_PAR; } else if (pars && data) { data->flags |= SF_HAS_PAR; data->flags &= ~SF_IN_PAR; } if (flags & SCF_DO_STCLASS_OR) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); if (flags & SCF_TRIE_RESTUDY) data->flags |= SCF_TRIE_RESTUDY; DEBUG_STUDYDATA("post-fin", data, depth, is_inf); { SSize_t final_minlen= min < stopmin ? min : stopmin; if (!(RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN)) { if (final_minlen > SSize_t_MAX - delta) RExC_maxlen = SSize_t_MAX; else if (RExC_maxlen < final_minlen + delta) RExC_maxlen = final_minlen + delta; } return final_minlen; } NOT_REACHED; /* NOTREACHED */ } STATIC U32 S_add_data(RExC_state_t* const pRExC_state, const char* const s, const U32 n) { U32 count = RExC_rxi->data ? RExC_rxi->data->count : 0; PERL_ARGS_ASSERT_ADD_DATA; Renewc(RExC_rxi->data, sizeof(*RExC_rxi->data) + sizeof(void*) * (count + n - 1), char, struct reg_data); if(count) Renew(RExC_rxi->data->what, count + n, U8); else Newx(RExC_rxi->data->what, n, U8); RExC_rxi->data->count = count + n; Copy(s, RExC_rxi->data->what + count, n, U8); return count; } /*XXX: todo make this not included in a non debugging perl, but appears to be * used anyway there, in 'use re' */ #ifndef PERL_IN_XSUB_RE void Perl_reginitcolors(pTHX) { const char * const s = PerlEnv_getenv("PERL_RE_COLORS"); if (s) { char *t = savepv(s); int i = 0; PL_colors[0] = t; while (++i < 6) { t = strchr(t, '\t'); if (t) { *t = '\0'; PL_colors[i] = ++t; } else PL_colors[i] = t = (char *)""; } } else { int i = 0; while (i < 6) PL_colors[i++] = (char *)""; } PL_colorset = 1; } #endif #ifdef TRIE_STUDY_OPT #define CHECK_RESTUDY_GOTO_butfirst(dOsomething) \ STMT_START { \ if ( \ (data.flags & SCF_TRIE_RESTUDY) \ && ! restudied++ \ ) { \ dOsomething; \ goto reStudy; \ } \ } STMT_END #else #define CHECK_RESTUDY_GOTO_butfirst #endif /* * pregcomp - compile a regular expression into internal code * * Decides which engine's compiler to call based on the hint currently in * scope */ #ifndef PERL_IN_XSUB_RE /* return the currently in-scope regex engine (or the default if none) */ regexp_engine const * Perl_current_re_engine(pTHX) { if (IN_PERL_COMPILETIME) { HV * const table = GvHV(PL_hintgv); SV **ptr; if (!table || !(PL_hints & HINT_LOCALIZE_HH)) return &PL_core_reg_engine; ptr = hv_fetchs(table, "regcomp", FALSE); if ( !(ptr && SvIOK(*ptr) && SvIV(*ptr))) return &PL_core_reg_engine; return INT2PTR(regexp_engine*,SvIV(*ptr)); } else { SV *ptr; if (!PL_curcop->cop_hints_hash) return &PL_core_reg_engine; ptr = cop_hints_fetch_pvs(PL_curcop, "regcomp", 0); if ( !(ptr && SvIOK(ptr) && SvIV(ptr))) return &PL_core_reg_engine; return INT2PTR(regexp_engine*,SvIV(ptr)); } } REGEXP * Perl_pregcomp(pTHX_ SV * const pattern, const U32 flags) { regexp_engine const *eng = current_re_engine(); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_PREGCOMP; /* Dispatch a request to compile a regexp to correct regexp engine. */ DEBUG_COMPILE_r({ Perl_re_printf( aTHX_ "Using engine %" UVxf "\n", PTR2UV(eng)); }); return CALLREGCOMP_ENG(eng, pattern, flags); } #endif /* public(ish) entry point for the perl core's own regex compiling code. * It's actually a wrapper for Perl_re_op_compile that only takes an SV * pattern rather than a list of OPs, and uses the internal engine rather * than the current one */ REGEXP * Perl_re_compile(pTHX_ SV * const pattern, U32 rx_flags) { SV *pat = pattern; /* defeat constness! */ PERL_ARGS_ASSERT_RE_COMPILE; return Perl_re_op_compile(aTHX_ &pat, 1, NULL, #ifdef PERL_IN_XSUB_RE &my_reg_engine, #else &PL_core_reg_engine, #endif NULL, NULL, rx_flags, 0); } static void S_free_codeblocks(pTHX_ struct reg_code_blocks *cbs) { int n; if (--cbs->refcnt > 0) return; for (n = 0; n < cbs->count; n++) { REGEXP *rx = cbs->cb[n].src_regex; cbs->cb[n].src_regex = NULL; SvREFCNT_dec(rx); } Safefree(cbs->cb); Safefree(cbs); } static struct reg_code_blocks * S_alloc_code_blocks(pTHX_ int ncode) { struct reg_code_blocks *cbs; Newx(cbs, 1, struct reg_code_blocks); cbs->count = ncode; cbs->refcnt = 1; SAVEDESTRUCTOR_X(S_free_codeblocks, cbs); if (ncode) Newx(cbs->cb, ncode, struct reg_code_block); else cbs->cb = NULL; return cbs; } /* upgrade pattern pat_p of length plen_p to UTF8, and if there are code * blocks, recalculate the indices. Update pat_p and plen_p in-place to * point to the realloced string and length. * * This is essentially a copy of Perl_bytes_to_utf8() with the code index * stuff added */ static void S_pat_upgrade_to_utf8(pTHX_ RExC_state_t * const pRExC_state, char **pat_p, STRLEN *plen_p, int num_code_blocks) { U8 *const src = (U8*)*pat_p; U8 *dst, *d; int n=0; STRLEN s = 0; bool do_end = 0; GET_RE_DEBUG_FLAGS_DECL; DEBUG_PARSE_r(Perl_re_printf( aTHX_ "UTF8 mismatch! Converting to utf8 for resizing and compile\n")); Newx(dst, *plen_p * 2 + 1, U8); d = dst; while (s < *plen_p) { append_utf8_from_native_byte(src[s], &d); if (n < num_code_blocks) { assert(pRExC_state->code_blocks); if (!do_end && pRExC_state->code_blocks->cb[n].start == s) { pRExC_state->code_blocks->cb[n].start = d - dst - 1; assert(*(d - 1) == '('); do_end = 1; } else if (do_end && pRExC_state->code_blocks->cb[n].end == s) { pRExC_state->code_blocks->cb[n].end = d - dst - 1; assert(*(d - 1) == ')'); do_end = 0; n++; } } s++; } *d = '\0'; *plen_p = d - dst; *pat_p = (char*) dst; SAVEFREEPV(*pat_p); RExC_orig_utf8 = RExC_utf8 = 1; } /* S_concat_pat(): concatenate a list of args to the pattern string pat, * while recording any code block indices, and handling overloading, * nested qr// objects etc. If pat is null, it will allocate a new * string, or just return the first arg, if there's only one. * * Returns the malloced/updated pat. * patternp and pat_count is the array of SVs to be concatted; * oplist is the optional list of ops that generated the SVs; * recompile_p is a pointer to a boolean that will be set if * the regex will need to be recompiled. * delim, if non-null is an SV that will be inserted between each element */ static SV* S_concat_pat(pTHX_ RExC_state_t * const pRExC_state, SV *pat, SV ** const patternp, int pat_count, OP *oplist, bool *recompile_p, SV *delim) { SV **svp; int n = 0; bool use_delim = FALSE; bool alloced = FALSE; /* if we know we have at least two args, create an empty string, * then concatenate args to that. For no args, return an empty string */ if (!pat && pat_count != 1) { pat = newSVpvs(""); SAVEFREESV(pat); alloced = TRUE; } for (svp = patternp; svp < patternp + pat_count; svp++) { SV *sv; SV *rx = NULL; STRLEN orig_patlen = 0; bool code = 0; SV *msv = use_delim ? delim : *svp; if (!msv) msv = &PL_sv_undef; /* if we've got a delimiter, we go round the loop twice for each * svp slot (except the last), using the delimiter the second * time round */ if (use_delim) { svp--; use_delim = FALSE; } else if (delim) use_delim = TRUE; if (SvTYPE(msv) == SVt_PVAV) { /* we've encountered an interpolated array within * the pattern, e.g. /...@a..../. Expand the list of elements, * then recursively append elements. * The code in this block is based on S_pushav() */ AV *const av = (AV*)msv; const SSize_t maxarg = AvFILL(av) + 1; SV **array; if (oplist) { assert(oplist->op_type == OP_PADAV || oplist->op_type == OP_RV2AV); oplist = OpSIBLING(oplist); } if (SvRMAGICAL(av)) { SSize_t i; Newx(array, maxarg, SV*); SAVEFREEPV(array); for (i=0; i < maxarg; i++) { SV ** const svp = av_fetch(av, i, FALSE); array[i] = svp ? *svp : &PL_sv_undef; } } else array = AvARRAY(av); pat = S_concat_pat(aTHX_ pRExC_state, pat, array, maxarg, NULL, recompile_p, /* $" */ GvSV((gv_fetchpvs("\"", GV_ADDMULTI, SVt_PV)))); continue; } /* we make the assumption here that each op in the list of * op_siblings maps to one SV pushed onto the stack, * except for code blocks, with have both an OP_NULL and * and OP_CONST. * This allows us to match up the list of SVs against the * list of OPs to find the next code block. * * Note that PUSHMARK PADSV PADSV .. * is optimised to * PADRANGE PADSV PADSV .. * so the alignment still works. */ if (oplist) { if (oplist->op_type == OP_NULL && (oplist->op_flags & OPf_SPECIAL)) { assert(n < pRExC_state->code_blocks->count); pRExC_state->code_blocks->cb[n].start = pat ? SvCUR(pat) : 0; pRExC_state->code_blocks->cb[n].block = oplist; pRExC_state->code_blocks->cb[n].src_regex = NULL; n++; code = 1; oplist = OpSIBLING(oplist); /* skip CONST */ assert(oplist); } oplist = OpSIBLING(oplist);; } /* apply magic and QR overloading to arg */ SvGETMAGIC(msv); if (SvROK(msv) && SvAMAGIC(msv)) { SV *sv = AMG_CALLunary(msv, regexp_amg); if (sv) { if (SvROK(sv)) sv = SvRV(sv); if (SvTYPE(sv) != SVt_REGEXP) Perl_croak(aTHX_ "Overloaded qr did not return a REGEXP"); msv = sv; } } /* try concatenation overload ... */ if (pat && (SvAMAGIC(pat) || SvAMAGIC(msv)) && (sv = amagic_call(pat, msv, concat_amg, AMGf_assign))) { sv_setsv(pat, sv); /* overloading involved: all bets are off over literal * code. Pretend we haven't seen it */ if (n) pRExC_state->code_blocks->count -= n; n = 0; } else { /* ... or failing that, try "" overload */ while (SvAMAGIC(msv) && (sv = AMG_CALLunary(msv, string_amg)) && sv != msv && !( SvROK(msv) && SvROK(sv) && SvRV(msv) == SvRV(sv)) ) { msv = sv; SvGETMAGIC(msv); } if (SvROK(msv) && SvTYPE(SvRV(msv)) == SVt_REGEXP) msv = SvRV(msv); if (pat) { /* this is a partially unrolled * sv_catsv_nomg(pat, msv); * that allows us to adjust code block indices if * needed */ STRLEN dlen; char *dst = SvPV_force_nomg(pat, dlen); orig_patlen = dlen; if (SvUTF8(msv) && !SvUTF8(pat)) { S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &dst, &dlen, n); sv_setpvn(pat, dst, dlen); SvUTF8_on(pat); } sv_catsv_nomg(pat, msv); rx = msv; } else { /* We have only one SV to process, but we need to verify * it is properly null terminated or we will fail asserts * later. In theory we probably shouldn't get such SV's, * but if we do we should handle it gracefully. */ if ( SvTYPE(msv) != SVt_PV || (SvLEN(msv) > SvCUR(msv) && *(SvEND(msv)) == 0) ) { /* not a string, or a string with a trailing null */ pat = msv; } else { /* a string with no trailing null, we need to copy it * so it we have a trailing null */ pat = newSVsv(msv); } } if (code) pRExC_state->code_blocks->cb[n-1].end = SvCUR(pat)-1; } /* extract any code blocks within any embedded qr//'s */ if (rx && SvTYPE(rx) == SVt_REGEXP && RX_ENGINE((REGEXP*)rx)->op_comp) { RXi_GET_DECL(ReANY((REGEXP *)rx), ri); if (ri->code_blocks && ri->code_blocks->count) { int i; /* the presence of an embedded qr// with code means * we should always recompile: the text of the * qr// may not have changed, but it may be a * different closure than last time */ *recompile_p = 1; if (pRExC_state->code_blocks) { int new_count = pRExC_state->code_blocks->count + ri->code_blocks->count; Renew(pRExC_state->code_blocks->cb, new_count, struct reg_code_block); pRExC_state->code_blocks->count = new_count; } else pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_ ri->code_blocks->count); for (i=0; i < ri->code_blocks->count; i++) { struct reg_code_block *src, *dst; STRLEN offset = orig_patlen + ReANY((REGEXP *)rx)->pre_prefix; assert(n < pRExC_state->code_blocks->count); src = &ri->code_blocks->cb[i]; dst = &pRExC_state->code_blocks->cb[n]; dst->start = src->start + offset; dst->end = src->end + offset; dst->block = src->block; dst->src_regex = (REGEXP*) SvREFCNT_inc( (SV*) src->src_regex ? src->src_regex : (REGEXP*)rx); n++; } } } } /* avoid calling magic multiple times on a single element e.g. =~ $qr */ if (alloced) SvSETMAGIC(pat); return pat; } /* see if there are any run-time code blocks in the pattern. * False positives are allowed */ static bool S_has_runtime_code(pTHX_ RExC_state_t * const pRExC_state, char *pat, STRLEN plen) { int n = 0; STRLEN s; PERL_UNUSED_CONTEXT; for (s = 0; s < plen; s++) { if ( pRExC_state->code_blocks && n < pRExC_state->code_blocks->count && s == pRExC_state->code_blocks->cb[n].start) { s = pRExC_state->code_blocks->cb[n].end; n++; continue; } /* TODO ideally should handle [..], (#..), /#.../x to reduce false * positives here */ if (pat[s] == '(' && s+2 <= plen && pat[s+1] == '?' && (pat[s+2] == '{' || (s + 2 <= plen && pat[s+2] == '?' && pat[s+3] == '{')) ) return 1; } return 0; } /* Handle run-time code blocks. We will already have compiled any direct * or indirect literal code blocks. Now, take the pattern 'pat' and make a * copy of it, but with any literal code blocks blanked out and * appropriate chars escaped; then feed it into * * eval "qr'modified_pattern'" * * For example, * * a\bc(?{"this was literal"})def'ghi\\jkl(?{"this is runtime"})mno * * becomes * * qr'a\\bc_______________________def\'ghi\\\\jkl(?{"this is runtime"})mno' * * After eval_sv()-ing that, grab any new code blocks from the returned qr * and merge them with any code blocks of the original regexp. * * If the pat is non-UTF8, while the evalled qr is UTF8, don't merge; * instead, just save the qr and return FALSE; this tells our caller that * the original pattern needs upgrading to utf8. */ static bool S_compile_runtime_code(pTHX_ RExC_state_t * const pRExC_state, char *pat, STRLEN plen) { SV *qr; GET_RE_DEBUG_FLAGS_DECL; if (pRExC_state->runtime_code_qr) { /* this is the second time we've been called; this should * only happen if the main pattern got upgraded to utf8 * during compilation; re-use the qr we compiled first time * round (which should be utf8 too) */ qr = pRExC_state->runtime_code_qr; pRExC_state->runtime_code_qr = NULL; assert(RExC_utf8 && SvUTF8(qr)); } else { int n = 0; STRLEN s; char *p, *newpat; int newlen = plen + 7; /* allow for "qr''xx\0" extra chars */ SV *sv, *qr_ref; dSP; /* determine how many extra chars we need for ' and \ escaping */ for (s = 0; s < plen; s++) { if (pat[s] == '\'' || pat[s] == '\\') newlen++; } Newx(newpat, newlen, char); p = newpat; *p++ = 'q'; *p++ = 'r'; *p++ = '\''; for (s = 0; s < plen; s++) { if ( pRExC_state->code_blocks && n < pRExC_state->code_blocks->count && s == pRExC_state->code_blocks->cb[n].start) { /* blank out literal code block */ assert(pat[s] == '('); while (s <= pRExC_state->code_blocks->cb[n].end) { *p++ = '_'; s++; } s--; n++; continue; } if (pat[s] == '\'' || pat[s] == '\\') *p++ = '\\'; *p++ = pat[s]; } *p++ = '\''; if (pRExC_state->pm_flags & RXf_PMf_EXTENDED) { *p++ = 'x'; if (pRExC_state->pm_flags & RXf_PMf_EXTENDED_MORE) { *p++ = 'x'; } } *p++ = '\0'; DEBUG_COMPILE_r({ Perl_re_printf( aTHX_ "%sre-parsing pattern for runtime code:%s %s\n", PL_colors[4],PL_colors[5],newpat); }); sv = newSVpvn_flags(newpat, p-newpat-1, RExC_utf8 ? SVf_UTF8 : 0); Safefree(newpat); ENTER; SAVETMPS; save_re_context(); PUSHSTACKi(PERLSI_REQUIRE); /* G_RE_REPARSING causes the toker to collapse \\ into \ when * parsing qr''; normally only q'' does this. It also alters * hints handling */ eval_sv(sv, G_SCALAR|G_RE_REPARSING); SvREFCNT_dec_NN(sv); SPAGAIN; qr_ref = POPs; PUTBACK; { SV * const errsv = ERRSV; if (SvTRUE_NN(errsv)) /* use croak_sv ? */ Perl_croak_nocontext("%" SVf, SVfARG(errsv)); } assert(SvROK(qr_ref)); qr = SvRV(qr_ref); assert(SvTYPE(qr) == SVt_REGEXP && RX_ENGINE((REGEXP*)qr)->op_comp); /* the leaving below frees the tmp qr_ref. * Give qr a life of its own */ SvREFCNT_inc(qr); POPSTACK; FREETMPS; LEAVE; } if (!RExC_utf8 && SvUTF8(qr)) { /* first time through; the pattern got upgraded; save the * qr for the next time through */ assert(!pRExC_state->runtime_code_qr); pRExC_state->runtime_code_qr = qr; return 0; } /* extract any code blocks within the returned qr// */ /* merge the main (r1) and run-time (r2) code blocks into one */ { RXi_GET_DECL(ReANY((REGEXP *)qr), r2); struct reg_code_block *new_block, *dst; RExC_state_t * const r1 = pRExC_state; /* convenient alias */ int i1 = 0, i2 = 0; int r1c, r2c; if (!r2->code_blocks || !r2->code_blocks->count) /* we guessed wrong */ { SvREFCNT_dec_NN(qr); return 1; } if (!r1->code_blocks) r1->code_blocks = S_alloc_code_blocks(aTHX_ 0); r1c = r1->code_blocks->count; r2c = r2->code_blocks->count; Newx(new_block, r1c + r2c, struct reg_code_block); dst = new_block; while (i1 < r1c || i2 < r2c) { struct reg_code_block *src; bool is_qr = 0; if (i1 == r1c) { src = &r2->code_blocks->cb[i2++]; is_qr = 1; } else if (i2 == r2c) src = &r1->code_blocks->cb[i1++]; else if ( r1->code_blocks->cb[i1].start < r2->code_blocks->cb[i2].start) { src = &r1->code_blocks->cb[i1++]; assert(src->end < r2->code_blocks->cb[i2].start); } else { assert( r1->code_blocks->cb[i1].start > r2->code_blocks->cb[i2].start); src = &r2->code_blocks->cb[i2++]; is_qr = 1; assert(src->end < r1->code_blocks->cb[i1].start); } assert(pat[src->start] == '('); assert(pat[src->end] == ')'); dst->start = src->start; dst->end = src->end; dst->block = src->block; dst->src_regex = is_qr ? (REGEXP*) SvREFCNT_inc( (SV*) qr) : src->src_regex; dst++; } r1->code_blocks->count += r2c; Safefree(r1->code_blocks->cb); r1->code_blocks->cb = new_block; } SvREFCNT_dec_NN(qr); return 1; } STATIC bool S_setup_longest(pTHX_ RExC_state_t *pRExC_state, struct reg_substr_datum *rsd, struct scan_data_substrs *sub, STRLEN longest_length) { /* This is the common code for setting up the floating and fixed length * string data extracted from Perl_re_op_compile() below. Returns a boolean * as to whether succeeded or not */ I32 t; SSize_t ml; bool eol = cBOOL(sub->flags & SF_BEFORE_EOL); bool meol = cBOOL(sub->flags & SF_BEFORE_MEOL); if (! (longest_length || (eol /* Can't have SEOL and MULTI */ && (! meol || (RExC_flags & RXf_PMf_MULTILINE))) ) /* See comments for join_exact for why REG_UNFOLDED_MULTI_SEEN */ || (RExC_seen & REG_UNFOLDED_MULTI_SEEN)) { return FALSE; } /* copy the information about the longest from the reg_scan_data over to the program. */ if (SvUTF8(sub->str)) { rsd->substr = NULL; rsd->utf8_substr = sub->str; } else { rsd->substr = sub->str; rsd->utf8_substr = NULL; } /* end_shift is how many chars that must be matched that follow this item. We calculate it ahead of time as once the lookbehind offset is added in we lose the ability to correctly calculate it.*/ ml = sub->minlenp ? *(sub->minlenp) : (SSize_t)longest_length; rsd->end_shift = ml - sub->min_offset - longest_length /* XXX SvTAIL is always false here - did you mean FBMcf_TAIL * intead? - DAPM + (SvTAIL(sub->str) != 0) */ + sub->lookbehind; t = (eol/* Can't have SEOL and MULTI */ && (! meol || (RExC_flags & RXf_PMf_MULTILINE))); fbm_compile(sub->str, t ? FBMcf_TAIL : 0); return TRUE; } /* * Perl_re_op_compile - the perl internal RE engine's function to compile a * regular expression into internal code. * The pattern may be passed either as: * a list of SVs (patternp plus pat_count) * a list of OPs (expr) * If both are passed, the SV list is used, but the OP list indicates * which SVs are actually pre-compiled code blocks * * The SVs in the list have magic and qr overloading applied to them (and * the list may be modified in-place with replacement SVs in the latter * case). * * If the pattern hasn't changed from old_re, then old_re will be * returned. * * eng is the current engine. If that engine has an op_comp method, then * handle directly (i.e. we assume that op_comp was us); otherwise, just * do the initial concatenation of arguments and pass on to the external * engine. * * If is_bare_re is not null, set it to a boolean indicating whether the * arg list reduced (after overloading) to a single bare regex which has * been returned (i.e. /$qr/). * * orig_rx_flags contains RXf_* flags. See perlreapi.pod for more details. * * pm_flags contains the PMf_* flags, typically based on those from the * pm_flags field of the related PMOP. Currently we're only interested in * PMf_HAS_CV, PMf_IS_QR, PMf_USE_RE_EVAL. * * We can't allocate space until we know how big the compiled form will be, * but we can't compile it (and thus know how big it is) until we've got a * place to put the code. So we cheat: we compile it twice, once with code * generation turned off and size counting turned on, and once "for real". * This also means that we don't allocate space until we are sure that the * thing really will compile successfully, and we never have to move the * code and thus invalidate pointers into it. (Note that it has to be in * one piece because free() must be able to free it all.) [NB: not true in perl] * * Beware that the optimization-preparation code in here knows about some * of the structure of the compiled regexp. [I'll say.] */ REGEXP * Perl_re_op_compile(pTHX_ SV ** const patternp, int pat_count, OP *expr, const regexp_engine* eng, REGEXP *old_re, bool *is_bare_re, U32 orig_rx_flags, U32 pm_flags) { REGEXP *rx; struct regexp *r; regexp_internal *ri; STRLEN plen; char *exp; regnode *scan; I32 flags; SSize_t minlen = 0; U32 rx_flags; SV *pat; SV** new_patternp = patternp; /* these are all flags - maybe they should be turned * into a single int with different bit masks */ I32 sawlookahead = 0; I32 sawplus = 0; I32 sawopen = 0; I32 sawminmod = 0; regex_charset initial_charset = get_regex_charset(orig_rx_flags); bool recompile = 0; bool runtime_code = 0; scan_data_t data; RExC_state_t RExC_state; RExC_state_t * const pRExC_state = &RExC_state; #ifdef TRIE_STUDY_OPT int restudied = 0; RExC_state_t copyRExC_state; #endif GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_RE_OP_COMPILE; DEBUG_r(if (!PL_colorset) reginitcolors()); /* Initialize these here instead of as-needed, as is quick and avoids * having to test them each time otherwise */ if (! PL_AboveLatin1) { #ifdef DEBUGGING char * dump_len_string; #endif PL_AboveLatin1 = _new_invlist_C_array(AboveLatin1_invlist); PL_Latin1 = _new_invlist_C_array(Latin1_invlist); PL_UpperLatin1 = _new_invlist_C_array(UpperLatin1_invlist); PL_utf8_foldable = _new_invlist_C_array(_Perl_Any_Folds_invlist); PL_HasMultiCharFold = _new_invlist_C_array(_Perl_Folds_To_Multi_Char_invlist); /* This is calculated here, because the Perl program that generates the * static global ones doesn't currently have access to * NUM_ANYOF_CODE_POINTS */ PL_InBitmap = _new_invlist(2); PL_InBitmap = _add_range_to_invlist(PL_InBitmap, 0, NUM_ANYOF_CODE_POINTS - 1); #ifdef DEBUGGING dump_len_string = PerlEnv_getenv("PERL_DUMP_RE_MAX_LEN"); if ( ! dump_len_string || ! grok_atoUV(dump_len_string, (UV *)&PL_dump_re_max_len, NULL)) { PL_dump_re_max_len = 60; /* A reasonable default */ } #endif } pRExC_state->warn_text = NULL; pRExC_state->code_blocks = NULL; if (is_bare_re) *is_bare_re = FALSE; if (expr && (expr->op_type == OP_LIST || (expr->op_type == OP_NULL && expr->op_targ == OP_LIST))) { /* allocate code_blocks if needed */ OP *o; int ncode = 0; for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) if (o->op_type == OP_NULL && (o->op_flags & OPf_SPECIAL)) ncode++; /* count of DO blocks */ if (ncode) pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_ ncode); } if (!pat_count) { /* compile-time pattern with just OP_CONSTs and DO blocks */ int n; OP *o; /* find how many CONSTs there are */ assert(expr); n = 0; if (expr->op_type == OP_CONST) n = 1; else for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) { if (o->op_type == OP_CONST) n++; } /* fake up an SV array */ assert(!new_patternp); Newx(new_patternp, n, SV*); SAVEFREEPV(new_patternp); pat_count = n; n = 0; if (expr->op_type == OP_CONST) new_patternp[n] = cSVOPx_sv(expr); else for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) { if (o->op_type == OP_CONST) new_patternp[n++] = cSVOPo_sv; } } DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Assembling pattern from %d elements%s\n", pat_count, orig_rx_flags & RXf_SPLIT ? " for split" : "")); /* set expr to the first arg op */ if (pRExC_state->code_blocks && pRExC_state->code_blocks->count && expr->op_type != OP_CONST) { expr = cLISTOPx(expr)->op_first; assert( expr->op_type == OP_PUSHMARK || (expr->op_type == OP_NULL && expr->op_targ == OP_PUSHMARK) || expr->op_type == OP_PADRANGE); expr = OpSIBLING(expr); } pat = S_concat_pat(aTHX_ pRExC_state, NULL, new_patternp, pat_count, expr, &recompile, NULL); /* handle bare (possibly after overloading) regex: foo =~ $re */ { SV *re = pat; if (SvROK(re)) re = SvRV(re); if (SvTYPE(re) == SVt_REGEXP) { if (is_bare_re) *is_bare_re = TRUE; SvREFCNT_inc(re); DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Precompiled pattern%s\n", orig_rx_flags & RXf_SPLIT ? " for split" : "")); return (REGEXP*)re; } } exp = SvPV_nomg(pat, plen); if (!eng->op_comp) { if ((SvUTF8(pat) && IN_BYTES) || SvGMAGICAL(pat) || SvAMAGIC(pat)) { /* make a temporary copy; either to convert to bytes, * or to avoid repeating get-magic / overloaded stringify */ pat = newSVpvn_flags(exp, plen, SVs_TEMP | (IN_BYTES ? 0 : SvUTF8(pat))); } return CALLREGCOMP_ENG(eng, pat, orig_rx_flags); } /* ignore the utf8ness if the pattern is 0 length */ RExC_utf8 = RExC_orig_utf8 = (plen == 0 || IN_BYTES) ? 0 : SvUTF8(pat); RExC_uni_semantics = 0; RExC_seen_unfolded_sharp_s = 0; RExC_contains_locale = 0; RExC_strict = cBOOL(pm_flags & RXf_PMf_STRICT); RExC_study_started = 0; pRExC_state->runtime_code_qr = NULL; RExC_frame_head= NULL; RExC_frame_last= NULL; RExC_frame_count= 0; DEBUG_r({ RExC_mysv1= sv_newmortal(); RExC_mysv2= sv_newmortal(); }); DEBUG_COMPILE_r({ SV *dsv= sv_newmortal(); RE_PV_QUOTED_DECL(s, RExC_utf8, dsv, exp, plen, PL_dump_re_max_len); Perl_re_printf( aTHX_ "%sCompiling REx%s %s\n", PL_colors[4],PL_colors[5],s); }); redo_first_pass: /* we jump here if we have to recompile, e.g., from upgrading the pattern * to utf8 */ if ((pm_flags & PMf_USE_RE_EVAL) /* this second condition covers the non-regex literal case, * i.e. $foo =~ '(?{})'. */ || (IN_PERL_COMPILETIME && (PL_hints & HINT_RE_EVAL)) ) runtime_code = S_has_runtime_code(aTHX_ pRExC_state, exp, plen); /* return old regex if pattern hasn't changed */ /* XXX: note in the below we have to check the flags as well as the * pattern. * * Things get a touch tricky as we have to compare the utf8 flag * independently from the compile flags. */ if ( old_re && !recompile && !!RX_UTF8(old_re) == !!RExC_utf8 && ( RX_COMPFLAGS(old_re) == ( orig_rx_flags & RXf_PMf_FLAGCOPYMASK ) ) && RX_PRECOMP(old_re) && RX_PRELEN(old_re) == plen && memEQ(RX_PRECOMP(old_re), exp, plen) && !runtime_code /* with runtime code, always recompile */ ) { return old_re; } rx_flags = orig_rx_flags; if ( initial_charset == REGEX_DEPENDS_CHARSET && (RExC_utf8 ||RExC_uni_semantics)) { /* Set to use unicode semantics if the pattern is in utf8 and has the * 'depends' charset specified, as it means unicode when utf8 */ set_regex_charset(&rx_flags, REGEX_UNICODE_CHARSET); } RExC_precomp = exp; RExC_precomp_adj = 0; RExC_flags = rx_flags; RExC_pm_flags = pm_flags; if (runtime_code) { assert(TAINTING_get || !TAINT_get); if (TAINT_get) Perl_croak(aTHX_ "Eval-group in insecure regular expression"); if (!S_compile_runtime_code(aTHX_ pRExC_state, exp, plen)) { /* whoops, we have a non-utf8 pattern, whilst run-time code * got compiled as utf8. Try again with a utf8 pattern */ S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen, pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0); goto redo_first_pass; } } assert(!pRExC_state->runtime_code_qr); RExC_sawback = 0; RExC_seen = 0; RExC_maxlen = 0; RExC_in_lookbehind = 0; RExC_seen_zerolen = *exp == '^' ? -1 : 0; RExC_extralen = 0; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif RExC_in_multi_char_class = 0; /* First pass: determine size, legality. */ RExC_parse = exp; RExC_start = RExC_adjusted_start = exp; RExC_end = exp + plen; RExC_precomp_end = RExC_end; RExC_naughty = 0; RExC_npar = 1; RExC_nestroot = 0; RExC_size = 0L; RExC_emit = (regnode *) &RExC_emit_dummy; RExC_whilem_seen = 0; RExC_open_parens = NULL; RExC_close_parens = NULL; RExC_end_op = NULL; RExC_paren_names = NULL; #ifdef DEBUGGING RExC_paren_name_list = NULL; #endif RExC_recurse = NULL; RExC_study_chunk_recursed = NULL; RExC_study_chunk_recursed_bytes= 0; RExC_recurse_count = 0; pRExC_state->code_index = 0; /* This NUL is guaranteed because the pattern comes from an SV*, and the sv * code makes sure the final byte is an uncounted NUL. But should this * ever not be the case, lots of things could read beyond the end of the * buffer: loops like * while(isFOO(*RExC_parse)) RExC_parse++; * strchr(RExC_parse, "foo"); * etc. So it is worth noting. */ assert(*RExC_end == '\0'); DEBUG_PARSE_r( Perl_re_printf( aTHX_ "Starting first pass (sizing)\n"); RExC_lastnum=0; RExC_lastparse=NULL; ); if (reg(pRExC_state, 0, &flags,1) == NULL) { /* It's possible to write a regexp in ascii that represents Unicode codepoints outside of the byte range, such as via \x{100}. If we detect such a sequence we have to convert the entire pattern to utf8 and then recompile, as our sizing calculation will have been based on 1 byte == 1 character, but we will need to use utf8 to encode at least some part of the pattern, and therefore must convert the whole thing. -- dmq */ if (flags & RESTART_PASS1) { if (flags & NEED_UTF8) { S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen, pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0); } else { DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Need to redo pass 1\n")); } goto redo_first_pass; } Perl_croak(aTHX_ "panic: reg returned NULL to re_op_compile for sizing pass, flags=%#" UVxf, (UV) flags); } DEBUG_PARSE_r({ Perl_re_printf( aTHX_ "Required size %" IVdf " nodes\n" "Starting second pass (creation)\n", (IV)RExC_size); RExC_lastnum=0; RExC_lastparse=NULL; }); /* The first pass could have found things that force Unicode semantics */ if ((RExC_utf8 || RExC_uni_semantics) && get_regex_charset(rx_flags) == REGEX_DEPENDS_CHARSET) { set_regex_charset(&rx_flags, REGEX_UNICODE_CHARSET); } /* Small enough for pointer-storage convention? If extralen==0, this means that we will not need long jumps. */ if (RExC_size >= 0x10000L && RExC_extralen) RExC_size += RExC_extralen; else RExC_extralen = 0; if (RExC_whilem_seen > 15) RExC_whilem_seen = 15; /* Allocate space and zero-initialize. Note, the two step process of zeroing when in debug mode, thus anything assigned has to happen after that */ rx = (REGEXP*) newSV_type(SVt_REGEXP); r = ReANY(rx); Newxc(ri, sizeof(regexp_internal) + (unsigned)RExC_size * sizeof(regnode), char, regexp_internal); if ( r == NULL || ri == NULL ) FAIL("Regexp out of space"); #ifdef DEBUGGING /* avoid reading uninitialized memory in DEBUGGING code in study_chunk() */ Zero(ri, sizeof(regexp_internal) + (unsigned)RExC_size * sizeof(regnode), char); #else /* bulk initialize base fields with 0. */ Zero(ri, sizeof(regexp_internal), char); #endif /* non-zero initialization begins here */ RXi_SET( r, ri ); r->engine= eng; r->extflags = rx_flags; RXp_COMPFLAGS(r) = orig_rx_flags & RXf_PMf_FLAGCOPYMASK; if (pm_flags & PMf_IS_QR) { ri->code_blocks = pRExC_state->code_blocks; if (ri->code_blocks) ri->code_blocks->refcnt++; } { bool has_p = ((r->extflags & RXf_PMf_KEEPCOPY) == RXf_PMf_KEEPCOPY); bool has_charset = (get_regex_charset(r->extflags) != REGEX_DEPENDS_CHARSET); /* The caret is output if there are any defaults: if not all the STD * flags are set, or if no character set specifier is needed */ bool has_default = (((r->extflags & RXf_PMf_STD_PMMOD) != RXf_PMf_STD_PMMOD) || ! has_charset); bool has_runon = ((RExC_seen & REG_RUN_ON_COMMENT_SEEN) == REG_RUN_ON_COMMENT_SEEN); U8 reganch = (U8)((r->extflags & RXf_PMf_STD_PMMOD) >> RXf_PMf_STD_PMMOD_SHIFT); const char *fptr = STD_PAT_MODS; /*"msixxn"*/ char *p; /* We output all the necessary flags; we never output a minus, as all * those are defaults, so are * covered by the caret */ const STRLEN wraplen = plen + has_p + has_runon + has_default /* If needs a caret */ + PL_bitcount[reganch] /* 1 char for each set standard flag */ /* If needs a character set specifier */ + ((has_charset) ? MAX_CHARSET_NAME_LENGTH : 0) + (sizeof("(?:)") - 1); /* make sure PL_bitcount bounds not exceeded */ assert(sizeof(STD_PAT_MODS) <= 8); p = sv_grow(MUTABLE_SV(rx), wraplen + 1); /* +1 for the ending NUL */ SvPOK_on(rx); if (RExC_utf8) SvFLAGS(rx) |= SVf_UTF8; *p++='('; *p++='?'; /* If a default, cover it using the caret */ if (has_default) { *p++= DEFAULT_PAT_MOD; } if (has_charset) { STRLEN len; const char* const name = get_regex_charset_name(r->extflags, &len); Copy(name, p, len, char); p += len; } if (has_p) *p++ = KEEPCOPY_PAT_MOD; /*'p'*/ { char ch; while((ch = *fptr++)) { if(reganch & 1) *p++ = ch; reganch >>= 1; } } *p++ = ':'; Copy(RExC_precomp, p, plen, char); assert ((RX_WRAPPED(rx) - p) < 16); r->pre_prefix = p - RX_WRAPPED(rx); p += plen; if (has_runon) *p++ = '\n'; *p++ = ')'; *p = 0; SvCUR_set(rx, p - RX_WRAPPED(rx)); } r->intflags = 0; r->nparens = RExC_npar - 1; /* set early to validate backrefs */ /* Useful during FAIL. */ #ifdef RE_TRACK_PATTERN_OFFSETS Newxz(ri->u.offsets, 2*RExC_size+1, U32); /* MJD 20001228 */ DEBUG_OFFSETS_r(Perl_re_printf( aTHX_ "%s %" UVuf " bytes for offset annotations.\n", ri->u.offsets ? "Got" : "Couldn't get", (UV)((2*RExC_size+1) * sizeof(U32)))); #endif SetProgLen(ri,RExC_size); RExC_rx_sv = rx; RExC_rx = r; RExC_rxi = ri; /* Second pass: emit code. */ RExC_flags = rx_flags; /* don't let top level (?i) bleed */ RExC_pm_flags = pm_flags; RExC_parse = exp; RExC_end = exp + plen; RExC_naughty = 0; RExC_emit_start = ri->program; RExC_emit = ri->program; RExC_emit_bound = ri->program + RExC_size + 1; pRExC_state->code_index = 0; *((char*) RExC_emit++) = (char) REG_MAGIC; /* setup various meta data about recursion, this all requires * RExC_npar to be correctly set, and a bit later on we clear it */ if (RExC_seen & REG_RECURSE_SEEN) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting up open/close parens\n", 22, "| |", (int)(0 * 2 + 1), "")); /* setup RExC_open_parens, which holds the address of each * OPEN tag, and to make things simpler for the 0 index * the start of the program - this is used later for offsets */ Newxz(RExC_open_parens, RExC_npar,regnode *); SAVEFREEPV(RExC_open_parens); RExC_open_parens[0] = RExC_emit; /* setup RExC_close_parens, which holds the address of each * CLOSE tag, and to make things simpler for the 0 index * the end of the program - this is used later for offsets */ Newxz(RExC_close_parens, RExC_npar,regnode *); SAVEFREEPV(RExC_close_parens); /* we dont know where end op starts yet, so we dont * need to set RExC_close_parens[0] like we do RExC_open_parens[0] above */ /* Note, RExC_npar is 1 + the number of parens in a pattern. * So its 1 if there are no parens. */ RExC_study_chunk_recursed_bytes= (RExC_npar >> 3) + ((RExC_npar & 0x07) != 0); Newx(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes * RExC_npar, U8); SAVEFREEPV(RExC_study_chunk_recursed); } RExC_npar = 1; if (reg(pRExC_state, 0, &flags,1) == NULL) { ReREFCNT_dec(rx); Perl_croak(aTHX_ "panic: reg returned NULL to re_op_compile for generation pass, flags=%#" UVxf, (UV) flags); } DEBUG_OPTIMISE_r( Perl_re_printf( aTHX_ "Starting post parse optimization\n"); ); /* XXXX To minimize changes to RE engine we always allocate 3-units-long substrs field. */ Newx(r->substrs, 1, struct reg_substr_data); if (RExC_recurse_count) { Newxz(RExC_recurse,RExC_recurse_count,regnode *); SAVEFREEPV(RExC_recurse); } reStudy: r->minlen = minlen = sawlookahead = sawplus = sawopen = sawminmod = 0; DEBUG_r( RExC_study_chunk_recursed_count= 0; ); Zero(r->substrs, 1, struct reg_substr_data); if (RExC_study_chunk_recursed) { Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes * RExC_npar, U8); } #ifdef TRIE_STUDY_OPT if (!restudied) { StructCopy(&zero_scan_data, &data, scan_data_t); copyRExC_state = RExC_state; } else { U32 seen=RExC_seen; DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "Restudying\n")); RExC_state = copyRExC_state; if (seen & REG_TOP_LEVEL_BRANCHES_SEEN) RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN; else RExC_seen &= ~REG_TOP_LEVEL_BRANCHES_SEEN; StructCopy(&zero_scan_data, &data, scan_data_t); } #else StructCopy(&zero_scan_data, &data, scan_data_t); #endif /* Dig out information for optimizations. */ r->extflags = RExC_flags; /* was pm_op */ /*dmq: removed as part of de-PMOP: pm->op_pmflags = RExC_flags; */ if (UTF) SvUTF8_on(rx); /* Unicode in it? */ ri->regstclass = NULL; if (RExC_naughty >= TOO_NAUGHTY) /* Probably an expensive pattern. */ r->intflags |= PREGf_NAUGHTY; scan = ri->program + 1; /* First BRANCH. */ /* testing for BRANCH here tells us whether there is "must appear" data in the pattern. If there is then we can use it for optimisations */ if (!(RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN)) { /* Only one top-level choice. */ SSize_t fake; STRLEN longest_length[2]; regnode_ssc ch_class; /* pointed to by data */ int stclass_flag; SSize_t last_close = 0; /* pointed to by data */ regnode *first= scan; regnode *first_next= regnext(first); int i; /* * Skip introductions and multiplicators >= 1 * so that we can extract the 'meat' of the pattern that must * match in the large if() sequence following. * NOTE that EXACT is NOT covered here, as it is normally * picked up by the optimiser separately. * * This is unfortunate as the optimiser isnt handling lookahead * properly currently. * */ while ((OP(first) == OPEN && (sawopen = 1)) || /* An OR of *one* alternative - should not happen now. */ (OP(first) == BRANCH && OP(first_next) != BRANCH) || /* for now we can't handle lookbehind IFMATCH*/ (OP(first) == IFMATCH && !first->flags && (sawlookahead = 1)) || (OP(first) == PLUS) || (OP(first) == MINMOD) || /* An {n,m} with n>0 */ (PL_regkind[OP(first)] == CURLY && ARG1(first) > 0) || (OP(first) == NOTHING && PL_regkind[OP(first_next)] != END )) { /* * the only op that could be a regnode is PLUS, all the rest * will be regnode_1 or regnode_2. * * (yves doesn't think this is true) */ if (OP(first) == PLUS) sawplus = 1; else { if (OP(first) == MINMOD) sawminmod = 1; first += regarglen[OP(first)]; } first = NEXTOPER(first); first_next= regnext(first); } /* Starting-point info. */ again: DEBUG_PEEP("first:", first, 0, 0); /* Ignore EXACT as we deal with it later. */ if (PL_regkind[OP(first)] == EXACT) { if (OP(first) == EXACT || OP(first) == EXACTL) NOOP; /* Empty, get anchored substr later. */ else ri->regstclass = first; } #ifdef TRIE_STCLASS else if (PL_regkind[OP(first)] == TRIE && ((reg_trie_data *)ri->data->data[ ARG(first) ])->minlen>0) { /* this can happen only on restudy */ ri->regstclass = construct_ahocorasick_from_trie(pRExC_state, (regnode *)first, 0); } #endif else if (REGNODE_SIMPLE(OP(first))) ri->regstclass = first; else if (PL_regkind[OP(first)] == BOUND || PL_regkind[OP(first)] == NBOUND) ri->regstclass = first; else if (PL_regkind[OP(first)] == BOL) { r->intflags |= (OP(first) == MBOL ? PREGf_ANCH_MBOL : PREGf_ANCH_SBOL); first = NEXTOPER(first); goto again; } else if (OP(first) == GPOS) { r->intflags |= PREGf_ANCH_GPOS; first = NEXTOPER(first); goto again; } else if ((!sawopen || !RExC_sawback) && !sawlookahead && (OP(first) == STAR && PL_regkind[OP(NEXTOPER(first))] == REG_ANY) && !(r->intflags & PREGf_ANCH) && !pRExC_state->code_blocks) { /* turn .* into ^.* with an implied $*=1 */ const int type = (OP(NEXTOPER(first)) == REG_ANY) ? PREGf_ANCH_MBOL : PREGf_ANCH_SBOL; r->intflags |= (type | PREGf_IMPLICIT); first = NEXTOPER(first); goto again; } if (sawplus && !sawminmod && !sawlookahead && (!sawopen || !RExC_sawback) && !pRExC_state->code_blocks) /* May examine pos and $& */ /* x+ must match at the 1st pos of run of x's */ r->intflags |= PREGf_SKIP; /* Scan is after the zeroth branch, first is atomic matcher. */ #ifdef TRIE_STUDY_OPT DEBUG_PARSE_r( if (!restudied) Perl_re_printf( aTHX_ "first at %" IVdf "\n", (IV)(first - scan + 1)) ); #else DEBUG_PARSE_r( Perl_re_printf( aTHX_ "first at %" IVdf "\n", (IV)(first - scan + 1)) ); #endif /* * If there's something expensive in the r.e., find the * longest literal string that must appear and make it the * regmust. Resolve ties in favor of later strings, since * the regstart check works with the beginning of the r.e. * and avoiding duplication strengthens checking. Not a * strong reason, but sufficient in the absence of others. * [Now we resolve ties in favor of the earlier string if * it happens that c_offset_min has been invalidated, since the * earlier string may buy us something the later one won't.] */ data.substrs[0].str = newSVpvs(""); data.substrs[1].str = newSVpvs(""); data.last_found = newSVpvs(""); data.cur_is_floating = 0; /* initially any found substring is fixed */ ENTER_with_name("study_chunk"); SAVEFREESV(data.substrs[0].str); SAVEFREESV(data.substrs[1].str); SAVEFREESV(data.last_found); first = scan; if (!ri->regstclass) { ssc_init(pRExC_state, &ch_class); data.start_class = &ch_class; stclass_flag = SCF_DO_STCLASS_AND; } else /* XXXX Check for BOUND? */ stclass_flag = 0; data.last_closep = &last_close; DEBUG_RExC_seen(); minlen = study_chunk(pRExC_state, &first, &minlen, &fake, scan + RExC_size, /* Up to end */ &data, -1, 0, NULL, SCF_DO_SUBSTR | SCF_WHILEM_VISITED_POS | stclass_flag | (restudied ? SCF_TRIE_DOING_RESTUDY : 0), 0); CHECK_RESTUDY_GOTO_butfirst(LEAVE_with_name("study_chunk")); if ( RExC_npar == 1 && !data.cur_is_floating && data.last_start_min == 0 && data.last_end > 0 && !RExC_seen_zerolen && !(RExC_seen & REG_VERBARG_SEEN) && !(RExC_seen & REG_GPOS_SEEN) ){ r->extflags |= RXf_CHECK_ALL; } scan_commit(pRExC_state, &data,&minlen,0); /* XXX this is done in reverse order because that's the way the * code was before it was parameterised. Don't know whether it * actually needs doing in reverse order. DAPM */ for (i = 1; i >= 0; i--) { longest_length[i] = CHR_SVLEN(data.substrs[i].str); if ( !( i && SvCUR(data.substrs[0].str) /* ok to leave SvCUR */ && data.substrs[0].min_offset == data.substrs[1].min_offset && SvCUR(data.substrs[0].str) == SvCUR(data.substrs[1].str) ) && S_setup_longest (aTHX_ pRExC_state, &(r->substrs->data[i]), &(data.substrs[i]), longest_length[i])) { r->substrs->data[i].min_offset = data.substrs[i].min_offset - data.substrs[i].lookbehind; r->substrs->data[i].max_offset = data.substrs[i].max_offset; /* Don't offset infinity */ if (data.substrs[i].max_offset < SSize_t_MAX) r->substrs->data[i].max_offset -= data.substrs[i].lookbehind; SvREFCNT_inc_simple_void_NN(data.substrs[i].str); } else { r->substrs->data[i].substr = NULL; r->substrs->data[i].utf8_substr = NULL; longest_length[i] = 0; } } LEAVE_with_name("study_chunk"); if (ri->regstclass && (OP(ri->regstclass) == REG_ANY || OP(ri->regstclass) == SANY)) ri->regstclass = NULL; if ((!(r->substrs->data[0].substr || r->substrs->data[0].utf8_substr) || r->substrs->data[0].min_offset) && stclass_flag && ! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING) && is_ssc_worth_it(pRExC_state, data.start_class)) { const U32 n = add_data(pRExC_state, STR_WITH_LEN("f")); ssc_finalize(pRExC_state, data.start_class); Newx(RExC_rxi->data->data[n], 1, regnode_ssc); StructCopy(data.start_class, (regnode_ssc*)RExC_rxi->data->data[n], regnode_ssc); ri->regstclass = (regnode*)RExC_rxi->data->data[n]; r->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */ DEBUG_COMPILE_r({ SV *sv = sv_newmortal(); regprop(r, sv, (regnode*)data.start_class, NULL, pRExC_state); Perl_re_printf( aTHX_ "synthetic stclass \"%s\".\n", SvPVX_const(sv));}); data.start_class = NULL; } /* A temporary algorithm prefers floated substr to fixed one of * same length to dig more info. */ i = (longest_length[0] <= longest_length[1]); r->substrs->check_ix = i; r->check_end_shift = r->substrs->data[i].end_shift; r->check_substr = r->substrs->data[i].substr; r->check_utf8 = r->substrs->data[i].utf8_substr; r->check_offset_min = r->substrs->data[i].min_offset; r->check_offset_max = r->substrs->data[i].max_offset; if (!i && (r->intflags & (PREGf_ANCH_SBOL|PREGf_ANCH_GPOS))) r->intflags |= PREGf_NOSCAN; if ((r->check_substr || r->check_utf8) ) { r->extflags |= RXf_USE_INTUIT; if (SvTAIL(r->check_substr ? r->check_substr : r->check_utf8)) r->extflags |= RXf_INTUIT_TAIL; } /* XXX Unneeded? dmq (shouldn't as this is handled elsewhere) if ( (STRLEN)minlen < longest_length[1] ) minlen= longest_length[1]; if ( (STRLEN)minlen < longest_length[0] ) minlen= longest_length[0]; */ } else { /* Several toplevels. Best we can is to set minlen. */ SSize_t fake; regnode_ssc ch_class; SSize_t last_close = 0; DEBUG_PARSE_r(Perl_re_printf( aTHX_ "\nMulti Top Level\n")); scan = ri->program + 1; ssc_init(pRExC_state, &ch_class); data.start_class = &ch_class; data.last_closep = &last_close; DEBUG_RExC_seen(); minlen = study_chunk(pRExC_state, &scan, &minlen, &fake, scan + RExC_size, &data, -1, 0, NULL, SCF_DO_STCLASS_AND|SCF_WHILEM_VISITED_POS|(restudied ? SCF_TRIE_DOING_RESTUDY : 0), 0); CHECK_RESTUDY_GOTO_butfirst(NOOP); r->check_substr = NULL; r->check_utf8 = NULL; r->substrs->data[0].substr = NULL; r->substrs->data[0].utf8_substr = NULL; r->substrs->data[1].substr = NULL; r->substrs->data[1].utf8_substr = NULL; if (! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING) && is_ssc_worth_it(pRExC_state, data.start_class)) { const U32 n = add_data(pRExC_state, STR_WITH_LEN("f")); ssc_finalize(pRExC_state, data.start_class); Newx(RExC_rxi->data->data[n], 1, regnode_ssc); StructCopy(data.start_class, (regnode_ssc*)RExC_rxi->data->data[n], regnode_ssc); ri->regstclass = (regnode*)RExC_rxi->data->data[n]; r->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */ DEBUG_COMPILE_r({ SV* sv = sv_newmortal(); regprop(r, sv, (regnode*)data.start_class, NULL, pRExC_state); Perl_re_printf( aTHX_ "synthetic stclass \"%s\".\n", SvPVX_const(sv));}); data.start_class = NULL; } } if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) { r->extflags |= RXf_UNBOUNDED_QUANTIFIER_SEEN; r->maxlen = REG_INFTY; } else { r->maxlen = RExC_maxlen; } /* Guard against an embedded (?=) or (?<=) with a longer minlen than the "real" pattern. */ DEBUG_OPTIMISE_r({ Perl_re_printf( aTHX_ "minlen: %" IVdf " r->minlen:%" IVdf " maxlen:%" IVdf "\n", (IV)minlen, (IV)r->minlen, (IV)RExC_maxlen); }); r->minlenret = minlen; if (r->minlen < minlen) r->minlen = minlen; if (RExC_seen & REG_RECURSE_SEEN ) { r->intflags |= PREGf_RECURSE_SEEN; Newxz(r->recurse_locinput, r->nparens + 1, char *); } if (RExC_seen & REG_GPOS_SEEN) r->intflags |= PREGf_GPOS_SEEN; if (RExC_seen & REG_LOOKBEHIND_SEEN) r->extflags |= RXf_NO_INPLACE_SUBST; /* inplace might break the lookbehind */ if (pRExC_state->code_blocks) r->extflags |= RXf_EVAL_SEEN; if (RExC_seen & REG_VERBARG_SEEN) { r->intflags |= PREGf_VERBARG_SEEN; r->extflags |= RXf_NO_INPLACE_SUBST; /* don't understand this! Yves */ } if (RExC_seen & REG_CUTGROUP_SEEN) r->intflags |= PREGf_CUTGROUP_SEEN; if (pm_flags & PMf_USE_RE_EVAL) r->intflags |= PREGf_USE_RE_EVAL; if (RExC_paren_names) RXp_PAREN_NAMES(r) = MUTABLE_HV(SvREFCNT_inc(RExC_paren_names)); else RXp_PAREN_NAMES(r) = NULL; /* If we have seen an anchor in our pattern then we set the extflag RXf_IS_ANCHORED * so it can be used in pp.c */ if (r->intflags & PREGf_ANCH) r->extflags |= RXf_IS_ANCHORED; { /* this is used to identify "special" patterns that might result * in Perl NOT calling the regex engine and instead doing the match "itself", * particularly special cases in split//. By having the regex compiler * do this pattern matching at a regop level (instead of by inspecting the pattern) * we avoid weird issues with equivalent patterns resulting in different behavior, * AND we allow non Perl engines to get the same optimizations by the setting the * flags appropriately - Yves */ regnode *first = ri->program + 1; U8 fop = OP(first); regnode *next = regnext(first); U8 nop = OP(next); if (PL_regkind[fop] == NOTHING && nop == END) r->extflags |= RXf_NULL; else if ((fop == MBOL || (fop == SBOL && !first->flags)) && nop == END) /* when fop is SBOL first->flags will be true only when it was * produced by parsing /\A/, and not when parsing /^/. This is * very important for the split code as there we want to * treat /^/ as /^/m, but we do not want to treat /\A/ as /^/m. * See rt #122761 for more details. -- Yves */ r->extflags |= RXf_START_ONLY; else if (fop == PLUS && PL_regkind[nop] == POSIXD && FLAGS(next) == _CC_SPACE && nop == END) r->extflags |= RXf_WHITE; else if ( r->extflags & RXf_SPLIT && (fop == EXACT || fop == EXACTL) && STR_LEN(first) == 1 && *(STRING(first)) == ' ' && nop == END ) r->extflags |= (RXf_SKIPWHITE|RXf_WHITE); } if (RExC_contains_locale) { RXp_EXTFLAGS(r) |= RXf_TAINTED; } #ifdef DEBUGGING if (RExC_paren_names) { ri->name_list_idx = add_data( pRExC_state, STR_WITH_LEN("a")); ri->data->data[ri->name_list_idx] = (void*)SvREFCNT_inc(RExC_paren_name_list); } else #endif ri->name_list_idx = 0; while ( RExC_recurse_count > 0 ) { const regnode *scan = RExC_recurse[ --RExC_recurse_count ]; /* * This data structure is set up in study_chunk() and is used * to calculate the distance between a GOSUB regopcode and * the OPEN/CURLYM (CURLYM's are special and can act like OPEN's) * it refers to. * * If for some reason someone writes code that optimises * away a GOSUB opcode then the assert should be changed to * an if(scan) to guard the ARG2L_SET() - Yves * */ assert(scan && OP(scan) == GOSUB); ARG2L_SET( scan, RExC_open_parens[ARG(scan)] - scan ); } Newxz(r->offs, RExC_npar, regexp_paren_pair); /* assume we don't need to swap parens around before we match */ DEBUG_TEST_r({ Perl_re_printf( aTHX_ "study_chunk_recursed_count: %lu\n", (unsigned long)RExC_study_chunk_recursed_count); }); DEBUG_DUMP_r({ DEBUG_RExC_seen(); Perl_re_printf( aTHX_ "Final program:\n"); regdump(r); }); #ifdef RE_TRACK_PATTERN_OFFSETS DEBUG_OFFSETS_r(if (ri->u.offsets) { const STRLEN len = ri->u.offsets[0]; STRLEN i; GET_RE_DEBUG_FLAGS_DECL; Perl_re_printf( aTHX_ "Offsets: [%" UVuf "]\n\t", (UV)ri->u.offsets[0]); for (i = 1; i <= len; i++) { if (ri->u.offsets[i*2-1] || ri->u.offsets[i*2]) Perl_re_printf( aTHX_ "%" UVuf ":%" UVuf "[%" UVuf "] ", (UV)i, (UV)ri->u.offsets[i*2-1], (UV)ri->u.offsets[i*2]); } Perl_re_printf( aTHX_ "\n"); }); #endif #ifdef USE_ITHREADS /* under ithreads the ?pat? PMf_USED flag on the pmop is simulated * by setting the regexp SV to readonly-only instead. If the * pattern's been recompiled, the USEDness should remain. */ if (old_re && SvREADONLY(old_re)) SvREADONLY_on(rx); #endif return rx; } SV* Perl_reg_named_buff(pTHX_ REGEXP * const rx, SV * const key, SV * const value, const U32 flags) { PERL_ARGS_ASSERT_REG_NAMED_BUFF; PERL_UNUSED_ARG(value); if (flags & RXapif_FETCH) { return reg_named_buff_fetch(rx, key, flags); } else if (flags & (RXapif_STORE | RXapif_DELETE | RXapif_CLEAR)) { Perl_croak_no_modify(); return NULL; } else if (flags & RXapif_EXISTS) { return reg_named_buff_exists(rx, key, flags) ? &PL_sv_yes : &PL_sv_no; } else if (flags & RXapif_REGNAMES) { return reg_named_buff_all(rx, flags); } else if (flags & (RXapif_SCALAR | RXapif_REGNAMES_COUNT)) { return reg_named_buff_scalar(rx, flags); } else { Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff", (int)flags); return NULL; } } SV* Perl_reg_named_buff_iter(pTHX_ REGEXP * const rx, const SV * const lastkey, const U32 flags) { PERL_ARGS_ASSERT_REG_NAMED_BUFF_ITER; PERL_UNUSED_ARG(lastkey); if (flags & RXapif_FIRSTKEY) return reg_named_buff_firstkey(rx, flags); else if (flags & RXapif_NEXTKEY) return reg_named_buff_nextkey(rx, flags); else { Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_iter", (int)flags); return NULL; } } SV* Perl_reg_named_buff_fetch(pTHX_ REGEXP * const r, SV * const namesv, const U32 flags) { SV *ret; struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_FETCH; if (rx && RXp_PAREN_NAMES(rx)) { HE *he_str = hv_fetch_ent( RXp_PAREN_NAMES(rx), namesv, 0, 0 ); if (he_str) { IV i; SV* sv_dat=HeVAL(he_str); I32 *nums=(I32*)SvPVX(sv_dat); AV * const retarray = (flags & RXapif_ALL) ? newAV() : NULL; for ( i=0; i<SvIVX(sv_dat); i++ ) { if ((I32)(rx->nparens) >= nums[i] && rx->offs[nums[i]].start != -1 && rx->offs[nums[i]].end != -1) { ret = newSVpvs(""); CALLREG_NUMBUF_FETCH(r,nums[i],ret); if (!retarray) return ret; } else { if (retarray) ret = newSVsv(&PL_sv_undef); } if (retarray) av_push(retarray, ret); } if (retarray) return newRV_noinc(MUTABLE_SV(retarray)); } } return NULL; } bool Perl_reg_named_buff_exists(pTHX_ REGEXP * const r, SV * const key, const U32 flags) { struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_EXISTS; if (rx && RXp_PAREN_NAMES(rx)) { if (flags & RXapif_ALL) { return hv_exists_ent(RXp_PAREN_NAMES(rx), key, 0); } else { SV *sv = CALLREG_NAMED_BUFF_FETCH(r, key, flags); if (sv) { SvREFCNT_dec_NN(sv); return TRUE; } else { return FALSE; } } } else { return FALSE; } } SV* Perl_reg_named_buff_firstkey(pTHX_ REGEXP * const r, const U32 flags) { struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_FIRSTKEY; if ( rx && RXp_PAREN_NAMES(rx) ) { (void)hv_iterinit(RXp_PAREN_NAMES(rx)); return CALLREG_NAMED_BUFF_NEXTKEY(r, NULL, flags & ~RXapif_FIRSTKEY); } else { return FALSE; } } SV* Perl_reg_named_buff_nextkey(pTHX_ REGEXP * const r, const U32 flags) { struct regexp *const rx = ReANY(r); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REG_NAMED_BUFF_NEXTKEY; if (rx && RXp_PAREN_NAMES(rx)) { HV *hv = RXp_PAREN_NAMES(rx); HE *temphe; while ( (temphe = hv_iternext_flags(hv,0)) ) { IV i; IV parno = 0; SV* sv_dat = HeVAL(temphe); I32 *nums = (I32*)SvPVX(sv_dat); for ( i = 0; i < SvIVX(sv_dat); i++ ) { if ((I32)(rx->lastparen) >= nums[i] && rx->offs[nums[i]].start != -1 && rx->offs[nums[i]].end != -1) { parno = nums[i]; break; } } if (parno || flags & RXapif_ALL) { return newSVhek(HeKEY_hek(temphe)); } } } return NULL; } SV* Perl_reg_named_buff_scalar(pTHX_ REGEXP * const r, const U32 flags) { SV *ret; AV *av; SSize_t length; struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_SCALAR; if (rx && RXp_PAREN_NAMES(rx)) { if (flags & (RXapif_ALL | RXapif_REGNAMES_COUNT)) { return newSViv(HvTOTALKEYS(RXp_PAREN_NAMES(rx))); } else if (flags & RXapif_ONE) { ret = CALLREG_NAMED_BUFF_ALL(r, (flags | RXapif_REGNAMES)); av = MUTABLE_AV(SvRV(ret)); length = av_tindex(av); SvREFCNT_dec_NN(ret); return newSViv(length + 1); } else { Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_scalar", (int)flags); return NULL; } } return &PL_sv_undef; } SV* Perl_reg_named_buff_all(pTHX_ REGEXP * const r, const U32 flags) { struct regexp *const rx = ReANY(r); AV *av = newAV(); PERL_ARGS_ASSERT_REG_NAMED_BUFF_ALL; if (rx && RXp_PAREN_NAMES(rx)) { HV *hv= RXp_PAREN_NAMES(rx); HE *temphe; (void)hv_iterinit(hv); while ( (temphe = hv_iternext_flags(hv,0)) ) { IV i; IV parno = 0; SV* sv_dat = HeVAL(temphe); I32 *nums = (I32*)SvPVX(sv_dat); for ( i = 0; i < SvIVX(sv_dat); i++ ) { if ((I32)(rx->lastparen) >= nums[i] && rx->offs[nums[i]].start != -1 && rx->offs[nums[i]].end != -1) { parno = nums[i]; break; } } if (parno || flags & RXapif_ALL) { av_push(av, newSVhek(HeKEY_hek(temphe))); } } } return newRV_noinc(MUTABLE_SV(av)); } void Perl_reg_numbered_buff_fetch(pTHX_ REGEXP * const r, const I32 paren, SV * const sv) { struct regexp *const rx = ReANY(r); char *s = NULL; SSize_t i = 0; SSize_t s1, t1; I32 n = paren; PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_FETCH; if ( n == RX_BUFF_IDX_CARET_PREMATCH || n == RX_BUFF_IDX_CARET_FULLMATCH || n == RX_BUFF_IDX_CARET_POSTMATCH ) { bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY); if (!keepcopy) { /* on something like * $r = qr/.../; * /$qr/p; * the KEEPCOPY is set on the PMOP rather than the regex */ if (PL_curpm && r == PM_GETRE(PL_curpm)) keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY); } if (!keepcopy) goto ret_undef; } if (!rx->subbeg) goto ret_undef; if (n == RX_BUFF_IDX_CARET_FULLMATCH) /* no need to distinguish between them any more */ n = RX_BUFF_IDX_FULLMATCH; if ((n == RX_BUFF_IDX_PREMATCH || n == RX_BUFF_IDX_CARET_PREMATCH) && rx->offs[0].start != -1) { /* $`, ${^PREMATCH} */ i = rx->offs[0].start; s = rx->subbeg; } else if ((n == RX_BUFF_IDX_POSTMATCH || n == RX_BUFF_IDX_CARET_POSTMATCH) && rx->offs[0].end != -1) { /* $', ${^POSTMATCH} */ s = rx->subbeg - rx->suboffset + rx->offs[0].end; i = rx->sublen + rx->suboffset - rx->offs[0].end; } else if ( 0 <= n && n <= (I32)rx->nparens && (s1 = rx->offs[n].start) != -1 && (t1 = rx->offs[n].end) != -1) { /* $&, ${^MATCH}, $1 ... */ i = t1 - s1; s = rx->subbeg + s1 - rx->suboffset; } else { goto ret_undef; } assert(s >= rx->subbeg); assert((STRLEN)rx->sublen >= (STRLEN)((s - rx->subbeg) + i) ); if (i >= 0) { #ifdef NO_TAINT_SUPPORT sv_setpvn(sv, s, i); #else const int oldtainted = TAINT_get; TAINT_NOT; sv_setpvn(sv, s, i); TAINT_set(oldtainted); #endif if (RXp_MATCH_UTF8(rx)) SvUTF8_on(sv); else SvUTF8_off(sv); if (TAINTING_get) { if (RXp_MATCH_TAINTED(rx)) { if (SvTYPE(sv) >= SVt_PVMG) { MAGIC* const mg = SvMAGIC(sv); MAGIC* mgt; TAINT; SvMAGIC_set(sv, mg->mg_moremagic); SvTAINT(sv); if ((mgt = SvMAGIC(sv))) { mg->mg_moremagic = mgt; SvMAGIC_set(sv, mg); } } else { TAINT; SvTAINT(sv); } } else SvTAINTED_off(sv); } } else { ret_undef: sv_set_undef(sv); return; } } void Perl_reg_numbered_buff_store(pTHX_ REGEXP * const rx, const I32 paren, SV const * const value) { PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_STORE; PERL_UNUSED_ARG(rx); PERL_UNUSED_ARG(paren); PERL_UNUSED_ARG(value); if (!PL_localizing) Perl_croak_no_modify(); } I32 Perl_reg_numbered_buff_length(pTHX_ REGEXP * const r, const SV * const sv, const I32 paren) { struct regexp *const rx = ReANY(r); I32 i; I32 s1, t1; PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_LENGTH; if ( paren == RX_BUFF_IDX_CARET_PREMATCH || paren == RX_BUFF_IDX_CARET_FULLMATCH || paren == RX_BUFF_IDX_CARET_POSTMATCH ) { bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY); if (!keepcopy) { /* on something like * $r = qr/.../; * /$qr/p; * the KEEPCOPY is set on the PMOP rather than the regex */ if (PL_curpm && r == PM_GETRE(PL_curpm)) keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY); } if (!keepcopy) goto warn_undef; } /* Some of this code was originally in C<Perl_magic_len> in F<mg.c> */ switch (paren) { case RX_BUFF_IDX_CARET_PREMATCH: /* ${^PREMATCH} */ case RX_BUFF_IDX_PREMATCH: /* $` */ if (rx->offs[0].start != -1) { i = rx->offs[0].start; if (i > 0) { s1 = 0; t1 = i; goto getlen; } } return 0; case RX_BUFF_IDX_CARET_POSTMATCH: /* ${^POSTMATCH} */ case RX_BUFF_IDX_POSTMATCH: /* $' */ if (rx->offs[0].end != -1) { i = rx->sublen - rx->offs[0].end; if (i > 0) { s1 = rx->offs[0].end; t1 = rx->sublen; goto getlen; } } return 0; default: /* $& / ${^MATCH}, $1, $2, ... */ if (paren <= (I32)rx->nparens && (s1 = rx->offs[paren].start) != -1 && (t1 = rx->offs[paren].end) != -1) { i = t1 - s1; goto getlen; } else { warn_undef: if (ckWARN(WARN_UNINITIALIZED)) report_uninit((const SV *)sv); return 0; } } getlen: if (i > 0 && RXp_MATCH_UTF8(rx)) { const char * const s = rx->subbeg - rx->suboffset + s1; const U8 *ep; STRLEN el; i = t1 - s1; if (is_utf8_string_loclen((U8*)s, i, &ep, &el)) i = el; } return i; } SV* Perl_reg_qr_package(pTHX_ REGEXP * const rx) { PERL_ARGS_ASSERT_REG_QR_PACKAGE; PERL_UNUSED_ARG(rx); if (0) return NULL; else return newSVpvs("Regexp"); } /* Scans the name of a named buffer from the pattern. * If flags is REG_RSN_RETURN_NULL returns null. * If flags is REG_RSN_RETURN_NAME returns an SV* containing the name * If flags is REG_RSN_RETURN_DATA returns the data SV* corresponding * to the parsed name as looked up in the RExC_paren_names hash. * If there is an error throws a vFAIL().. type exception. */ #define REG_RSN_RETURN_NULL 0 #define REG_RSN_RETURN_NAME 1 #define REG_RSN_RETURN_DATA 2 STATIC SV* S_reg_scan_name(pTHX_ RExC_state_t *pRExC_state, U32 flags) { char *name_start = RExC_parse; PERL_ARGS_ASSERT_REG_SCAN_NAME; assert (RExC_parse <= RExC_end); if (RExC_parse == RExC_end) NOOP; else if (isIDFIRST_lazy_if_safe(RExC_parse, RExC_end, UTF)) { /* Note that the code here assumes well-formed UTF-8. Skip IDFIRST by * using do...while */ if (UTF) do { RExC_parse += UTF8SKIP(RExC_parse); } while ( RExC_parse < RExC_end && isWORDCHAR_utf8_safe((U8*)RExC_parse, (U8*) RExC_end)); else do { RExC_parse++; } while (RExC_parse < RExC_end && isWORDCHAR(*RExC_parse)); } else { RExC_parse++; /* so the <- from the vFAIL is after the offending character */ vFAIL("Group name must start with a non-digit word character"); } if ( flags ) { SV* sv_name = newSVpvn_flags(name_start, (int)(RExC_parse - name_start), SVs_TEMP | (UTF ? SVf_UTF8 : 0)); if ( flags == REG_RSN_RETURN_NAME) return sv_name; else if (flags==REG_RSN_RETURN_DATA) { HE *he_str = NULL; SV *sv_dat = NULL; if ( ! sv_name ) /* should not happen*/ Perl_croak(aTHX_ "panic: no svname in reg_scan_name"); if (RExC_paren_names) he_str = hv_fetch_ent( RExC_paren_names, sv_name, 0, 0 ); if ( he_str ) sv_dat = HeVAL(he_str); if ( ! sv_dat ) vFAIL("Reference to nonexistent named group"); return sv_dat; } else { Perl_croak(aTHX_ "panic: bad flag %lx in reg_scan_name", (unsigned long) flags); } NOT_REACHED; /* NOTREACHED */ } return NULL; } #define DEBUG_PARSE_MSG(funcname) DEBUG_PARSE_r({ \ int num; \ if (RExC_lastparse!=RExC_parse) { \ Perl_re_printf( aTHX_ "%s", \ Perl_pv_pretty(aTHX_ RExC_mysv1, RExC_parse, \ RExC_end - RExC_parse, 16, \ "", "", \ PERL_PV_ESCAPE_UNI_DETECT | \ PERL_PV_PRETTY_ELLIPSES | \ PERL_PV_PRETTY_LTGT | \ PERL_PV_ESCAPE_RE | \ PERL_PV_PRETTY_EXACTSIZE \ ) \ ); \ } else \ Perl_re_printf( aTHX_ "%16s",""); \ \ if (SIZE_ONLY) \ num = RExC_size + 1; \ else \ num=REG_NODE_NUM(RExC_emit); \ if (RExC_lastnum!=num) \ Perl_re_printf( aTHX_ "|%4d",num); \ else \ Perl_re_printf( aTHX_ "|%4s",""); \ Perl_re_printf( aTHX_ "|%*s%-4s", \ (int)((depth*2)), "", \ (funcname) \ ); \ RExC_lastnum=num; \ RExC_lastparse=RExC_parse; \ }) #define DEBUG_PARSE(funcname) DEBUG_PARSE_r({ \ DEBUG_PARSE_MSG((funcname)); \ Perl_re_printf( aTHX_ "%4s","\n"); \ }) #define DEBUG_PARSE_FMT(funcname,fmt,args) DEBUG_PARSE_r({\ DEBUG_PARSE_MSG((funcname)); \ Perl_re_printf( aTHX_ fmt "\n",args); \ }) /* This section of code defines the inversion list object and its methods. The * interfaces are highly subject to change, so as much as possible is static to * this file. An inversion list is here implemented as a malloc'd C UV array * as an SVt_INVLIST scalar. * * An inversion list for Unicode is an array of code points, sorted by ordinal * number. Each element gives the code point that begins a range that extends * up-to but not including the code point given by the next element. The final * element gives the first code point of a range that extends to the platform's * infinity. The even-numbered elements (invlist[0], invlist[2], invlist[4], * ...) give ranges whose code points are all in the inversion list. We say * that those ranges are in the set. The odd-numbered elements give ranges * whose code points are not in the inversion list, and hence not in the set. * Thus, element [0] is the first code point in the list. Element [1] * is the first code point beyond that not in the list; and element [2] is the * first code point beyond that that is in the list. In other words, the first * range is invlist[0]..(invlist[1]-1), and all code points in that range are * in the inversion list. The second range is invlist[1]..(invlist[2]-1), and * all code points in that range are not in the inversion list. The third * range invlist[2]..(invlist[3]-1) gives code points that are in the inversion * list, and so forth. Thus every element whose index is divisible by two * gives the beginning of a range that is in the list, and every element whose * index is not divisible by two gives the beginning of a range not in the * list. If the final element's index is divisible by two, the inversion list * extends to the platform's infinity; otherwise the highest code point in the * inversion list is the contents of that element minus 1. * * A range that contains just a single code point N will look like * invlist[i] == N * invlist[i+1] == N+1 * * If N is UV_MAX (the highest representable code point on the machine), N+1 is * impossible to represent, so element [i+1] is omitted. The single element * inversion list * invlist[0] == UV_MAX * contains just UV_MAX, but is interpreted as matching to infinity. * * Taking the complement (inverting) an inversion list is quite simple, if the * first element is 0, remove it; otherwise add a 0 element at the beginning. * This implementation reserves an element at the beginning of each inversion * list to always contain 0; there is an additional flag in the header which * indicates if the list begins at the 0, or is offset to begin at the next * element. This means that the inversion list can be inverted without any * copying; just flip the flag. * * More about inversion lists can be found in "Unicode Demystified" * Chapter 13 by Richard Gillam, published by Addison-Wesley. * * The inversion list data structure is currently implemented as an SV pointing * to an array of UVs that the SV thinks are bytes. This allows us to have an * array of UV whose memory management is automatically handled by the existing * facilities for SV's. * * Some of the methods should always be private to the implementation, and some * should eventually be made public */ /* The header definitions are in F<invlist_inline.h> */ #ifndef PERL_IN_XSUB_RE PERL_STATIC_INLINE UV* S__invlist_array_init(SV* const invlist, const bool will_have_0) { /* Returns a pointer to the first element in the inversion list's array. * This is called upon initialization of an inversion list. Where the * array begins depends on whether the list has the code point U+0000 in it * or not. The other parameter tells it whether the code that follows this * call is about to put a 0 in the inversion list or not. The first * element is either the element reserved for 0, if TRUE, or the element * after it, if FALSE */ bool* offset = get_invlist_offset_addr(invlist); UV* zero_addr = (UV *) SvPVX(invlist); PERL_ARGS_ASSERT__INVLIST_ARRAY_INIT; /* Must be empty */ assert(! _invlist_len(invlist)); *zero_addr = 0; /* 1^1 = 0; 1^0 = 1 */ *offset = 1 ^ will_have_0; return zero_addr + *offset; } #endif PERL_STATIC_INLINE void S_invlist_set_len(pTHX_ SV* const invlist, const UV len, const bool offset) { /* Sets the current number of elements stored in the inversion list. * Updates SvCUR correspondingly */ PERL_UNUSED_CONTEXT; PERL_ARGS_ASSERT_INVLIST_SET_LEN; assert(SvTYPE(invlist) == SVt_INVLIST); SvCUR_set(invlist, (len == 0) ? 0 : TO_INTERNAL_SIZE(len + offset)); assert(SvLEN(invlist) == 0 || SvCUR(invlist) <= SvLEN(invlist)); } #ifndef PERL_IN_XSUB_RE STATIC void S_invlist_replace_list_destroys_src(pTHX_ SV * dest, SV * src) { /* Replaces the inversion list in 'dest' with the one from 'src'. It * steals the list from 'src', so 'src' is made to have a NULL list. This * is similar to what SvSetMagicSV() would do, if it were implemented on * inversion lists, though this routine avoids a copy */ const UV src_len = _invlist_len(src); const bool src_offset = *get_invlist_offset_addr(src); const STRLEN src_byte_len = SvLEN(src); char * array = SvPVX(src); const int oldtainted = TAINT_get; PERL_ARGS_ASSERT_INVLIST_REPLACE_LIST_DESTROYS_SRC; assert(SvTYPE(src) == SVt_INVLIST); assert(SvTYPE(dest) == SVt_INVLIST); assert(! invlist_is_iterating(src)); assert(SvCUR(src) == 0 || SvCUR(src) < SvLEN(src)); /* Make sure it ends in the right place with a NUL, as our inversion list * manipulations aren't careful to keep this true, but sv_usepvn_flags() * asserts it */ array[src_byte_len - 1] = '\0'; TAINT_NOT; /* Otherwise it breaks */ sv_usepvn_flags(dest, (char *) array, src_byte_len - 1, /* This flag is documented to cause a copy to be avoided */ SV_HAS_TRAILING_NUL); TAINT_set(oldtainted); SvPV_set(src, 0); SvLEN_set(src, 0); SvCUR_set(src, 0); /* Finish up copying over the other fields in an inversion list */ *get_invlist_offset_addr(dest) = src_offset; invlist_set_len(dest, src_len, src_offset); *get_invlist_previous_index_addr(dest) = 0; invlist_iterfinish(dest); } PERL_STATIC_INLINE IV* S_get_invlist_previous_index_addr(SV* invlist) { /* Return the address of the IV that is reserved to hold the cached index * */ PERL_ARGS_ASSERT_GET_INVLIST_PREVIOUS_INDEX_ADDR; assert(SvTYPE(invlist) == SVt_INVLIST); return &(((XINVLIST*) SvANY(invlist))->prev_index); } PERL_STATIC_INLINE IV S_invlist_previous_index(SV* const invlist) { /* Returns cached index of previous search */ PERL_ARGS_ASSERT_INVLIST_PREVIOUS_INDEX; return *get_invlist_previous_index_addr(invlist); } PERL_STATIC_INLINE void S_invlist_set_previous_index(SV* const invlist, const IV index) { /* Caches <index> for later retrieval */ PERL_ARGS_ASSERT_INVLIST_SET_PREVIOUS_INDEX; assert(index == 0 || index < (int) _invlist_len(invlist)); *get_invlist_previous_index_addr(invlist) = index; } PERL_STATIC_INLINE void S_invlist_trim(SV* invlist) { /* Free the not currently-being-used space in an inversion list */ /* But don't free up the space needed for the 0 UV that is always at the * beginning of the list, nor the trailing NUL */ const UV min_size = TO_INTERNAL_SIZE(1) + 1; PERL_ARGS_ASSERT_INVLIST_TRIM; assert(SvTYPE(invlist) == SVt_INVLIST); SvPV_renew(invlist, MAX(min_size, SvCUR(invlist) + 1)); } PERL_STATIC_INLINE void S_invlist_clear(pTHX_ SV* invlist) /* Empty the inversion list */ { PERL_ARGS_ASSERT_INVLIST_CLEAR; assert(SvTYPE(invlist) == SVt_INVLIST); invlist_set_len(invlist, 0, 0); invlist_trim(invlist); } #endif /* ifndef PERL_IN_XSUB_RE */ PERL_STATIC_INLINE bool S_invlist_is_iterating(SV* const invlist) { PERL_ARGS_ASSERT_INVLIST_IS_ITERATING; return *(get_invlist_iter_addr(invlist)) < (STRLEN) UV_MAX; } #ifndef PERL_IN_XSUB_RE PERL_STATIC_INLINE UV S_invlist_max(SV* const invlist) { /* Returns the maximum number of elements storable in the inversion list's * array, without having to realloc() */ PERL_ARGS_ASSERT_INVLIST_MAX; assert(SvTYPE(invlist) == SVt_INVLIST); /* Assumes worst case, in which the 0 element is not counted in the * inversion list, so subtracts 1 for that */ return SvLEN(invlist) == 0 /* This happens under _new_invlist_C_array */ ? FROM_INTERNAL_SIZE(SvCUR(invlist)) - 1 : FROM_INTERNAL_SIZE(SvLEN(invlist)) - 1; } SV* Perl__new_invlist(pTHX_ IV initial_size) { /* Return a pointer to a newly constructed inversion list, with enough * space to store 'initial_size' elements. If that number is negative, a * system default is used instead */ SV* new_list; if (initial_size < 0) { initial_size = 10; } /* Allocate the initial space */ new_list = newSV_type(SVt_INVLIST); /* First 1 is in case the zero element isn't in the list; second 1 is for * trailing NUL */ SvGROW(new_list, TO_INTERNAL_SIZE(initial_size + 1) + 1); invlist_set_len(new_list, 0, 0); /* Force iterinit() to be used to get iteration to work */ *get_invlist_iter_addr(new_list) = (STRLEN) UV_MAX; *get_invlist_previous_index_addr(new_list) = 0; return new_list; } SV* Perl__new_invlist_C_array(pTHX_ const UV* const list) { /* Return a pointer to a newly constructed inversion list, initialized to * point to <list>, which has to be in the exact correct inversion list * form, including internal fields. Thus this is a dangerous routine that * should not be used in the wrong hands. The passed in 'list' contains * several header fields at the beginning that are not part of the * inversion list body proper */ const STRLEN length = (STRLEN) list[0]; const UV version_id = list[1]; const bool offset = cBOOL(list[2]); #define HEADER_LENGTH 3 /* If any of the above changes in any way, you must change HEADER_LENGTH * (if appropriate) and regenerate INVLIST_VERSION_ID by running * perl -E 'say int(rand 2**31-1)' */ #define INVLIST_VERSION_ID 148565664 /* This is a combination of a version and data structure type, so that one being passed in can be validated to be an inversion list of the correct vintage. */ SV* invlist = newSV_type(SVt_INVLIST); PERL_ARGS_ASSERT__NEW_INVLIST_C_ARRAY; if (version_id != INVLIST_VERSION_ID) { Perl_croak(aTHX_ "panic: Incorrect version for previously generated inversion list"); } /* The generated array passed in includes header elements that aren't part * of the list proper, so start it just after them */ SvPV_set(invlist, (char *) (list + HEADER_LENGTH)); SvLEN_set(invlist, 0); /* Means we own the contents, and the system shouldn't touch it */ *(get_invlist_offset_addr(invlist)) = offset; /* The 'length' passed to us is the physical number of elements in the * inversion list. But if there is an offset the logical number is one * less than that */ invlist_set_len(invlist, length - offset, offset); invlist_set_previous_index(invlist, 0); /* Initialize the iteration pointer. */ invlist_iterfinish(invlist); SvREADONLY_on(invlist); return invlist; } STATIC void S_invlist_extend(pTHX_ SV* const invlist, const UV new_max) { /* Grow the maximum size of an inversion list */ PERL_ARGS_ASSERT_INVLIST_EXTEND; assert(SvTYPE(invlist) == SVt_INVLIST); /* Add one to account for the zero element at the beginning which may not * be counted by the calling parameters */ SvGROW((SV *)invlist, TO_INTERNAL_SIZE(new_max + 1)); } STATIC void S__append_range_to_invlist(pTHX_ SV* const invlist, const UV start, const UV end) { /* Subject to change or removal. Append the range from 'start' to 'end' at * the end of the inversion list. The range must be above any existing * ones. */ UV* array; UV max = invlist_max(invlist); UV len = _invlist_len(invlist); bool offset; PERL_ARGS_ASSERT__APPEND_RANGE_TO_INVLIST; if (len == 0) { /* Empty lists must be initialized */ offset = start != 0; array = _invlist_array_init(invlist, ! offset); } else { /* Here, the existing list is non-empty. The current max entry in the * list is generally the first value not in the set, except when the * set extends to the end of permissible values, in which case it is * the first entry in that final set, and so this call is an attempt to * append out-of-order */ UV final_element = len - 1; array = invlist_array(invlist); if ( array[final_element] > start || ELEMENT_RANGE_MATCHES_INVLIST(final_element)) { Perl_croak(aTHX_ "panic: attempting to append to an inversion list, but wasn't at the end of the list, final=%" UVuf ", start=%" UVuf ", match=%c", array[final_element], start, ELEMENT_RANGE_MATCHES_INVLIST(final_element) ? 't' : 'f'); } /* Here, it is a legal append. If the new range begins 1 above the end * of the range below it, it is extending the range below it, so the * new first value not in the set is one greater than the newly * extended range. */ offset = *get_invlist_offset_addr(invlist); if (array[final_element] == start) { if (end != UV_MAX) { array[final_element] = end + 1; } else { /* But if the end is the maximum representable on the machine, * assume that infinity was actually what was meant. Just let * the range that this would extend to have no end */ invlist_set_len(invlist, len - 1, offset); } return; } } /* Here the new range doesn't extend any existing set. Add it */ len += 2; /* Includes an element each for the start and end of range */ /* If wll overflow the existing space, extend, which may cause the array to * be moved */ if (max < len) { invlist_extend(invlist, len); /* Have to set len here to avoid assert failure in invlist_array() */ invlist_set_len(invlist, len, offset); array = invlist_array(invlist); } else { invlist_set_len(invlist, len, offset); } /* The next item on the list starts the range, the one after that is * one past the new range. */ array[len - 2] = start; if (end != UV_MAX) { array[len - 1] = end + 1; } else { /* But if the end is the maximum representable on the machine, just let * the range have no end */ invlist_set_len(invlist, len - 1, offset); } } SSize_t Perl__invlist_search(SV* const invlist, const UV cp) { /* Searches the inversion list for the entry that contains the input code * point <cp>. If <cp> is not in the list, -1 is returned. Otherwise, the * return value is the index into the list's array of the range that * contains <cp>, that is, 'i' such that * array[i] <= cp < array[i+1] */ IV low = 0; IV mid; IV high = _invlist_len(invlist); const IV highest_element = high - 1; const UV* array; PERL_ARGS_ASSERT__INVLIST_SEARCH; /* If list is empty, return failure. */ if (high == 0) { return -1; } /* (We can't get the array unless we know the list is non-empty) */ array = invlist_array(invlist); mid = invlist_previous_index(invlist); assert(mid >=0); if (mid > highest_element) { mid = highest_element; } /* <mid> contains the cache of the result of the previous call to this * function (0 the first time). See if this call is for the same result, * or if it is for mid-1. This is under the theory that calls to this * function will often be for related code points that are near each other. * And benchmarks show that caching gives better results. We also test * here if the code point is within the bounds of the list. These tests * replace others that would have had to be made anyway to make sure that * the array bounds were not exceeded, and these give us extra information * at the same time */ if (cp >= array[mid]) { if (cp >= array[highest_element]) { return highest_element; } /* Here, array[mid] <= cp < array[highest_element]. This means that * the final element is not the answer, so can exclude it; it also * means that <mid> is not the final element, so can refer to 'mid + 1' * safely */ if (cp < array[mid + 1]) { return mid; } high--; low = mid + 1; } else { /* cp < aray[mid] */ if (cp < array[0]) { /* Fail if outside the array */ return -1; } high = mid; if (cp >= array[mid - 1]) { goto found_entry; } } /* Binary search. What we are looking for is <i> such that * array[i] <= cp < array[i+1] * The loop below converges on the i+1. Note that there may not be an * (i+1)th element in the array, and things work nonetheless */ while (low < high) { mid = (low + high) / 2; assert(mid <= highest_element); if (array[mid] <= cp) { /* cp >= array[mid] */ low = mid + 1; /* We could do this extra test to exit the loop early. if (cp < array[low]) { return mid; } */ } else { /* cp < array[mid] */ high = mid; } } found_entry: high--; invlist_set_previous_index(invlist, high); return high; } void Perl__invlist_populate_swatch(SV* const invlist, const UV start, const UV end, U8* swatch) { /* populates a swatch of a swash the same way swatch_get() does in utf8.c, * but is used when the swash has an inversion list. This makes this much * faster, as it uses a binary search instead of a linear one. This is * intimately tied to that function, and perhaps should be in utf8.c, * except it is intimately tied to inversion lists as well. It assumes * that <swatch> is all 0's on input */ UV current = start; const IV len = _invlist_len(invlist); IV i; const UV * array; PERL_ARGS_ASSERT__INVLIST_POPULATE_SWATCH; if (len == 0) { /* Empty inversion list */ return; } array = invlist_array(invlist); /* Find which element it is */ i = _invlist_search(invlist, start); /* We populate from <start> to <end> */ while (current < end) { UV upper; /* The inversion list gives the results for every possible code point * after the first one in the list. Only those ranges whose index is * even are ones that the inversion list matches. For the odd ones, * and if the initial code point is not in the list, we have to skip * forward to the next element */ if (i == -1 || ! ELEMENT_RANGE_MATCHES_INVLIST(i)) { i++; if (i >= len) { /* Finished if beyond the end of the array */ return; } current = array[i]; if (current >= end) { /* Finished if beyond the end of what we are populating */ if (LIKELY(end < UV_MAX)) { return; } /* We get here when the upper bound is the maximum * representable on the machine, and we are looking for just * that code point. Have to special case it */ i = len; goto join_end_of_list; } } assert(current >= start); /* The current range ends one below the next one, except don't go past * <end> */ i++; upper = (i < len && array[i] < end) ? array[i] : end; /* Here we are in a range that matches. Populate a bit in the 3-bit U8 * for each code point in it */ for (; current < upper; current++) { const STRLEN offset = (STRLEN)(current - start); swatch[offset >> 3] |= 1 << (offset & 7); } join_end_of_list: /* Quit if at the end of the list */ if (i >= len) { /* But first, have to deal with the highest possible code point on * the platform. The previous code assumes that <end> is one * beyond where we want to populate, but that is impossible at the * platform's infinity, so have to handle it specially */ if (UNLIKELY(end == UV_MAX && ELEMENT_RANGE_MATCHES_INVLIST(len-1))) { const STRLEN offset = (STRLEN)(end - start); swatch[offset >> 3] |= 1 << (offset & 7); } return; } /* Advance to the next range, which will be for code points not in the * inversion list */ current = array[i]; } return; } void Perl__invlist_union_maybe_complement_2nd(pTHX_ SV* const a, SV* const b, const bool complement_b, SV** output) { /* Take the union of two inversion lists and point '*output' to it. On * input, '*output' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly * even 'a' or 'b'). If to an inversion list, the contents of the original * list will be replaced by the union. The first list, 'a', may be * NULL, in which case a copy of the second list is placed in '*output'. * If 'complement_b' is TRUE, the union is taken of the complement * (inversion) of 'b' instead of b itself. * * The basis for this comes from "Unicode Demystified" Chapter 13 by * Richard Gillam, published by Addison-Wesley, and explained at some * length there. The preface says to incorporate its examples into your * code at your own risk. * * The algorithm is like a merge sort. */ const UV* array_a; /* a's array */ const UV* array_b; UV len_a; /* length of a's array */ UV len_b; SV* u; /* the resulting union */ UV* array_u; UV len_u = 0; UV i_a = 0; /* current index into a's array */ UV i_b = 0; UV i_u = 0; /* running count, as explained in the algorithm source book; items are * stopped accumulating and are output when the count changes to/from 0. * The count is incremented when we start a range that's in an input's set, * and decremented when we start a range that's not in a set. So this * variable can be 0, 1, or 2. When it is 0 neither input is in their set, * and hence nothing goes into the union; 1, just one of the inputs is in * its set (and its current range gets added to the union); and 2 when both * inputs are in their sets. */ UV count = 0; PERL_ARGS_ASSERT__INVLIST_UNION_MAYBE_COMPLEMENT_2ND; assert(a != b); assert(*output == NULL || SvTYPE(*output) == SVt_INVLIST); len_b = _invlist_len(b); if (len_b == 0) { /* Here, 'b' is empty, hence it's complement is all possible code * points. So if the union includes the complement of 'b', it includes * everything, and we need not even look at 'a'. It's easiest to * create a new inversion list that matches everything. */ if (complement_b) { SV* everything = _add_range_to_invlist(NULL, 0, UV_MAX); if (*output == NULL) { /* If the output didn't exist, just point it at the new list */ *output = everything; } else { /* Otherwise, replace its contents with the new list */ invlist_replace_list_destroys_src(*output, everything); SvREFCNT_dec_NN(everything); } return; } /* Here, we don't want the complement of 'b', and since 'b' is empty, * the union will come entirely from 'a'. If 'a' is NULL or empty, the * output will be empty */ if (a == NULL || _invlist_len(a) == 0) { if (*output == NULL) { *output = _new_invlist(0); } else { invlist_clear(*output); } return; } /* Here, 'a' is not empty, but 'b' is, so 'a' entirely determines the * union. We can just return a copy of 'a' if '*output' doesn't point * to an existing list */ if (*output == NULL) { *output = invlist_clone(a); return; } /* If the output is to overwrite 'a', we have a no-op, as it's * already in 'a' */ if (*output == a) { return; } /* Here, '*output' is to be overwritten by 'a' */ u = invlist_clone(a); invlist_replace_list_destroys_src(*output, u); SvREFCNT_dec_NN(u); return; } /* Here 'b' is not empty. See about 'a' */ if (a == NULL || ((len_a = _invlist_len(a)) == 0)) { /* Here, 'a' is empty (and b is not). That means the union will come * entirely from 'b'. If '*output' is NULL, we can directly return a * clone of 'b'. Otherwise, we replace the contents of '*output' with * the clone */ SV ** dest = (*output == NULL) ? output : &u; *dest = invlist_clone(b); if (complement_b) { _invlist_invert(*dest); } if (dest == &u) { invlist_replace_list_destroys_src(*output, u); SvREFCNT_dec_NN(u); } return; } /* Here both lists exist and are non-empty */ array_a = invlist_array(a); array_b = invlist_array(b); /* If are to take the union of 'a' with the complement of b, set it * up so are looking at b's complement. */ if (complement_b) { /* To complement, we invert: if the first element is 0, remove it. To * do this, we just pretend the array starts one later */ if (array_b[0] == 0) { array_b++; len_b--; } else { /* But if the first element is not zero, we pretend the list starts * at the 0 that is always stored immediately before the array. */ array_b--; len_b++; } } /* Size the union for the worst case: that the sets are completely * disjoint */ u = _new_invlist(len_a + len_b); /* Will contain U+0000 if either component does */ array_u = _invlist_array_init(u, ( len_a > 0 && array_a[0] == 0) || (len_b > 0 && array_b[0] == 0)); /* Go through each input list item by item, stopping when have exhausted * one of them */ while (i_a < len_a && i_b < len_b) { UV cp; /* The element to potentially add to the union's array */ bool cp_in_set; /* is it in the the input list's set or not */ /* We need to take one or the other of the two inputs for the union. * Since we are merging two sorted lists, we take the smaller of the * next items. In case of a tie, we take first the one that is in its * set. If we first took the one not in its set, it would decrement * the count, possibly to 0 which would cause it to be output as ending * the range, and the next time through we would take the same number, * and output it again as beginning the next range. By doing it the * opposite way, there is no possibility that the count will be * momentarily decremented to 0, and thus the two adjoining ranges will * be seamlessly merged. (In a tie and both are in the set or both not * in the set, it doesn't matter which we take first.) */ if ( array_a[i_a] < array_b[i_b] || ( array_a[i_a] == array_b[i_b] && ELEMENT_RANGE_MATCHES_INVLIST(i_a))) { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a); cp = array_a[i_a++]; } else { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b); cp = array_b[i_b++]; } /* Here, have chosen which of the two inputs to look at. Only output * if the running count changes to/from 0, which marks the * beginning/end of a range that's in the set */ if (cp_in_set) { if (count == 0) { array_u[i_u++] = cp; } count++; } else { count--; if (count == 0) { array_u[i_u++] = cp; } } } /* The loop above increments the index into exactly one of the input lists * each iteration, and ends when either index gets to its list end. That * means the other index is lower than its end, and so something is * remaining in that one. We decrement 'count', as explained below, if * that list is in its set. (i_a and i_b each currently index the element * beyond the one we care about.) */ if ( (i_a != len_a && PREV_RANGE_MATCHES_INVLIST(i_a)) || (i_b != len_b && PREV_RANGE_MATCHES_INVLIST(i_b))) { count--; } /* Above we decremented 'count' if the list that had unexamined elements in * it was in its set. This has made it so that 'count' being non-zero * means there isn't anything left to output; and 'count' equal to 0 means * that what is left to output is precisely that which is left in the * non-exhausted input list. * * To see why, note first that the exhausted input obviously has nothing * left to add to the union. If it was in its set at its end, that means * the set extends from here to the platform's infinity, and hence so does * the union and the non-exhausted set is irrelevant. The exhausted set * also contributed 1 to 'count'. If 'count' was 2, it got decremented to * 1, but if it was 1, the non-exhausted set wasn't in its set, and so * 'count' remains at 1. This is consistent with the decremented 'count' * != 0 meaning there's nothing left to add to the union. * * But if the exhausted input wasn't in its set, it contributed 0 to * 'count', and the rest of the union will be whatever the other input is. * If 'count' was 0, neither list was in its set, and 'count' remains 0; * otherwise it gets decremented to 0. This is consistent with 'count' * == 0 meaning the remainder of the union is whatever is left in the * non-exhausted list. */ if (count != 0) { len_u = i_u; } else { IV copy_count = len_a - i_a; if (copy_count > 0) { /* The non-exhausted input is 'a' */ Copy(array_a + i_a, array_u + i_u, copy_count, UV); } else { /* The non-exhausted input is b */ copy_count = len_b - i_b; Copy(array_b + i_b, array_u + i_u, copy_count, UV); } len_u = i_u + copy_count; } /* Set the result to the final length, which can change the pointer to * array_u, so re-find it. (Note that it is unlikely that this will * change, as we are shrinking the space, not enlarging it) */ if (len_u != _invlist_len(u)) { invlist_set_len(u, len_u, *get_invlist_offset_addr(u)); invlist_trim(u); array_u = invlist_array(u); } if (*output == NULL) { /* Simply return the new inversion list */ *output = u; } else { /* Otherwise, overwrite the inversion list that was in '*output'. We * could instead free '*output', and then set it to 'u', but experience * has shown [perl #127392] that if the input is a mortal, we can get a * huge build-up of these during regex compilation before they get * freed. */ invlist_replace_list_destroys_src(*output, u); SvREFCNT_dec_NN(u); } return; } void Perl__invlist_intersection_maybe_complement_2nd(pTHX_ SV* const a, SV* const b, const bool complement_b, SV** i) { /* Take the intersection of two inversion lists and point '*i' to it. On * input, '*i' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly * even 'a' or 'b'). If to an inversion list, the contents of the original * list will be replaced by the intersection. The first list, 'a', may be * NULL, in which case '*i' will be an empty list. If 'complement_b' is * TRUE, the result will be the intersection of 'a' and the complement (or * inversion) of 'b' instead of 'b' directly. * * The basis for this comes from "Unicode Demystified" Chapter 13 by * Richard Gillam, published by Addison-Wesley, and explained at some * length there. The preface says to incorporate its examples into your * code at your own risk. In fact, it had bugs * * The algorithm is like a merge sort, and is essentially the same as the * union above */ const UV* array_a; /* a's array */ const UV* array_b; UV len_a; /* length of a's array */ UV len_b; SV* r; /* the resulting intersection */ UV* array_r; UV len_r = 0; UV i_a = 0; /* current index into a's array */ UV i_b = 0; UV i_r = 0; /* running count of how many of the two inputs are postitioned at ranges * that are in their sets. As explained in the algorithm source book, * items are stopped accumulating and are output when the count changes * to/from 2. The count is incremented when we start a range that's in an * input's set, and decremented when we start a range that's not in a set. * Only when it is 2 are we in the intersection. */ UV count = 0; PERL_ARGS_ASSERT__INVLIST_INTERSECTION_MAYBE_COMPLEMENT_2ND; assert(a != b); assert(*i == NULL || SvTYPE(*i) == SVt_INVLIST); /* Special case if either one is empty */ len_a = (a == NULL) ? 0 : _invlist_len(a); if ((len_a == 0) || ((len_b = _invlist_len(b)) == 0)) { if (len_a != 0 && complement_b) { /* Here, 'a' is not empty, therefore from the enclosing 'if', 'b' * must be empty. Here, also we are using 'b's complement, which * hence must be every possible code point. Thus the intersection * is simply 'a'. */ if (*i == a) { /* No-op */ return; } if (*i == NULL) { *i = invlist_clone(a); return; } r = invlist_clone(a); invlist_replace_list_destroys_src(*i, r); SvREFCNT_dec_NN(r); return; } /* Here, 'a' or 'b' is empty and not using the complement of 'b'. The * intersection must be empty */ if (*i == NULL) { *i = _new_invlist(0); return; } invlist_clear(*i); return; } /* Here both lists exist and are non-empty */ array_a = invlist_array(a); array_b = invlist_array(b); /* If are to take the intersection of 'a' with the complement of b, set it * up so are looking at b's complement. */ if (complement_b) { /* To complement, we invert: if the first element is 0, remove it. To * do this, we just pretend the array starts one later */ if (array_b[0] == 0) { array_b++; len_b--; } else { /* But if the first element is not zero, we pretend the list starts * at the 0 that is always stored immediately before the array. */ array_b--; len_b++; } } /* Size the intersection for the worst case: that the intersection ends up * fragmenting everything to be completely disjoint */ r= _new_invlist(len_a + len_b); /* Will contain U+0000 iff both components do */ array_r = _invlist_array_init(r, len_a > 0 && array_a[0] == 0 && len_b > 0 && array_b[0] == 0); /* Go through each list item by item, stopping when have exhausted one of * them */ while (i_a < len_a && i_b < len_b) { UV cp; /* The element to potentially add to the intersection's array */ bool cp_in_set; /* Is it in the input list's set or not */ /* We need to take one or the other of the two inputs for the * intersection. Since we are merging two sorted lists, we take the * smaller of the next items. In case of a tie, we take first the one * that is not in its set (a difference from the union algorithm). If * we first took the one in its set, it would increment the count, * possibly to 2 which would cause it to be output as starting a range * in the intersection, and the next time through we would take that * same number, and output it again as ending the set. By doing the * opposite of this, there is no possibility that the count will be * momentarily incremented to 2. (In a tie and both are in the set or * both not in the set, it doesn't matter which we take first.) */ if ( array_a[i_a] < array_b[i_b] || ( array_a[i_a] == array_b[i_b] && ! ELEMENT_RANGE_MATCHES_INVLIST(i_a))) { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a); cp = array_a[i_a++]; } else { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b); cp= array_b[i_b++]; } /* Here, have chosen which of the two inputs to look at. Only output * if the running count changes to/from 2, which marks the * beginning/end of a range that's in the intersection */ if (cp_in_set) { count++; if (count == 2) { array_r[i_r++] = cp; } } else { if (count == 2) { array_r[i_r++] = cp; } count--; } } /* The loop above increments the index into exactly one of the input lists * each iteration, and ends when either index gets to its list end. That * means the other index is lower than its end, and so something is * remaining in that one. We increment 'count', as explained below, if the * exhausted list was in its set. (i_a and i_b each currently index the * element beyond the one we care about.) */ if ( (i_a == len_a && PREV_RANGE_MATCHES_INVLIST(i_a)) || (i_b == len_b && PREV_RANGE_MATCHES_INVLIST(i_b))) { count++; } /* Above we incremented 'count' if the exhausted list was in its set. This * has made it so that 'count' being below 2 means there is nothing left to * output; otheriwse what's left to add to the intersection is precisely * that which is left in the non-exhausted input list. * * To see why, note first that the exhausted input obviously has nothing * left to affect the intersection. If it was in its set at its end, that * means the set extends from here to the platform's infinity, and hence * anything in the non-exhausted's list will be in the intersection, and * anything not in it won't be. Hence, the rest of the intersection is * precisely what's in the non-exhausted list The exhausted set also * contributed 1 to 'count', meaning 'count' was at least 1. Incrementing * it means 'count' is now at least 2. This is consistent with the * incremented 'count' being >= 2 means to add the non-exhausted list to * the intersection. * * But if the exhausted input wasn't in its set, it contributed 0 to * 'count', and the intersection can't include anything further; the * non-exhausted set is irrelevant. 'count' was at most 1, and doesn't get * incremented. This is consistent with 'count' being < 2 meaning nothing * further to add to the intersection. */ if (count < 2) { /* Nothing left to put in the intersection. */ len_r = i_r; } else { /* copy the non-exhausted list, unchanged. */ IV copy_count = len_a - i_a; if (copy_count > 0) { /* a is the one with stuff left */ Copy(array_a + i_a, array_r + i_r, copy_count, UV); } else { /* b is the one with stuff left */ copy_count = len_b - i_b; Copy(array_b + i_b, array_r + i_r, copy_count, UV); } len_r = i_r + copy_count; } /* Set the result to the final length, which can change the pointer to * array_r, so re-find it. (Note that it is unlikely that this will * change, as we are shrinking the space, not enlarging it) */ if (len_r != _invlist_len(r)) { invlist_set_len(r, len_r, *get_invlist_offset_addr(r)); invlist_trim(r); array_r = invlist_array(r); } if (*i == NULL) { /* Simply return the calculated intersection */ *i = r; } else { /* Otherwise, replace the existing inversion list in '*i'. We could instead free '*i', and then set it to 'r', but experience has shown [perl #127392] that if the input is a mortal, we can get a huge build-up of these during regex compilation before they get freed. */ if (len_r) { invlist_replace_list_destroys_src(*i, r); } else { invlist_clear(*i); } SvREFCNT_dec_NN(r); } return; } SV* Perl__add_range_to_invlist(pTHX_ SV* invlist, UV start, UV end) { /* Add the range from 'start' to 'end' inclusive to the inversion list's * set. A pointer to the inversion list is returned. This may actually be * a new list, in which case the passed in one has been destroyed. The * passed-in inversion list can be NULL, in which case a new one is created * with just the one range in it. The new list is not necessarily * NUL-terminated. Space is not freed if the inversion list shrinks as a * result of this function. The gain would not be large, and in many * cases, this is called multiple times on a single inversion list, so * anything freed may almost immediately be needed again. * * This used to mostly call the 'union' routine, but that is much more * heavyweight than really needed for a single range addition */ UV* array; /* The array implementing the inversion list */ UV len; /* How many elements in 'array' */ SSize_t i_s; /* index into the invlist array where 'start' should go */ SSize_t i_e = 0; /* And the index where 'end' should go */ UV cur_highest; /* The highest code point in the inversion list upon entry to this function */ /* This range becomes the whole inversion list if none already existed */ if (invlist == NULL) { invlist = _new_invlist(2); _append_range_to_invlist(invlist, start, end); return invlist; } /* Likewise, if the inversion list is currently empty */ len = _invlist_len(invlist); if (len == 0) { _append_range_to_invlist(invlist, start, end); return invlist; } /* Starting here, we have to know the internals of the list */ array = invlist_array(invlist); /* If the new range ends higher than the current highest ... */ cur_highest = invlist_highest(invlist); if (end > cur_highest) { /* If the whole range is higher, we can just append it */ if (start > cur_highest) { _append_range_to_invlist(invlist, start, end); return invlist; } /* Otherwise, add the portion that is higher ... */ _append_range_to_invlist(invlist, cur_highest + 1, end); /* ... and continue on below to handle the rest. As a result of the * above append, we know that the index of the end of the range is the * final even numbered one of the array. Recall that the final element * always starts a range that extends to infinity. If that range is in * the set (meaning the set goes from here to infinity), it will be an * even index, but if it isn't in the set, it's odd, and the final * range in the set is one less, which is even. */ if (end == UV_MAX) { i_e = len; } else { i_e = len - 2; } } /* We have dealt with appending, now see about prepending. If the new * range starts lower than the current lowest ... */ if (start < array[0]) { /* Adding something which has 0 in it is somewhat tricky, and uncommon. * Let the union code handle it, rather than having to know the * trickiness in two code places. */ if (UNLIKELY(start == 0)) { SV* range_invlist; range_invlist = _new_invlist(2); _append_range_to_invlist(range_invlist, start, end); _invlist_union(invlist, range_invlist, &invlist); SvREFCNT_dec_NN(range_invlist); return invlist; } /* If the whole new range comes before the first entry, and doesn't * extend it, we have to insert it as an additional range */ if (end < array[0] - 1) { i_s = i_e = -1; goto splice_in_new_range; } /* Here the new range adjoins the existing first range, extending it * downwards. */ array[0] = start; /* And continue on below to handle the rest. We know that the index of * the beginning of the range is the first one of the array */ i_s = 0; } else { /* Not prepending any part of the new range to the existing list. * Find where in the list it should go. This finds i_s, such that: * invlist[i_s] <= start < array[i_s+1] */ i_s = _invlist_search(invlist, start); } /* At this point, any extending before the beginning of the inversion list * and/or after the end has been done. This has made it so that, in the * code below, each endpoint of the new range is either in a range that is * in the set, or is in a gap between two ranges that are. This means we * don't have to worry about exceeding the array bounds. * * Find where in the list the new range ends (but we can skip this if we * have already determined what it is, or if it will be the same as i_s, * which we already have computed) */ if (i_e == 0) { i_e = (start == end) ? i_s : _invlist_search(invlist, end); } /* Here generally invlist[i_e] <= end < array[i_e+1]. But if invlist[i_e] * is a range that goes to infinity there is no element at invlist[i_e+1], * so only the first relation holds. */ if ( ! ELEMENT_RANGE_MATCHES_INVLIST(i_s)) { /* Here, the ranges on either side of the beginning of the new range * are in the set, and this range starts in the gap between them. * * The new range extends the range above it downwards if the new range * ends at or above that range's start */ const bool extends_the_range_above = ( end == UV_MAX || end + 1 >= array[i_s+1]); /* The new range extends the range below it upwards if it begins just * after where that range ends */ if (start == array[i_s]) { /* If the new range fills the entire gap between the other ranges, * they will get merged together. Other ranges may also get * merged, depending on how many of them the new range spans. In * the general case, we do the merge later, just once, after we * figure out how many to merge. But in the case where the new * range exactly spans just this one gap (possibly extending into * the one above), we do the merge here, and an early exit. This * is done here to avoid having to special case later. */ if (i_e - i_s <= 1) { /* If i_e - i_s == 1, it means that the new range terminates * within the range above, and hence 'extends_the_range_above' * must be true. (If the range above it extends to infinity, * 'i_s+2' will be above the array's limit, but 'len-i_s-2' * will be 0, so no harm done.) */ if (extends_the_range_above) { Move(array + i_s + 2, array + i_s, len - i_s - 2, UV); invlist_set_len(invlist, len - 2, *(get_invlist_offset_addr(invlist))); return invlist; } /* Here, i_e must == i_s. We keep them in sync, as they apply * to the same range, and below we are about to decrement i_s * */ i_e--; } /* Here, the new range is adjacent to the one below. (It may also * span beyond the range above, but that will get resolved later.) * Extend the range below to include this one. */ array[i_s] = (end == UV_MAX) ? UV_MAX : end + 1; i_s--; start = array[i_s]; } else if (extends_the_range_above) { /* Here the new range only extends the range above it, but not the * one below. It merges with the one above. Again, we keep i_e * and i_s in sync if they point to the same range */ if (i_e == i_s) { i_e++; } i_s++; array[i_s] = start; } } /* Here, we've dealt with the new range start extending any adjoining * existing ranges. * * If the new range extends to infinity, it is now the final one, * regardless of what was there before */ if (UNLIKELY(end == UV_MAX)) { invlist_set_len(invlist, i_s + 1, *(get_invlist_offset_addr(invlist))); return invlist; } /* If i_e started as == i_s, it has also been dealt with, * and been updated to the new i_s, which will fail the following if */ if (! ELEMENT_RANGE_MATCHES_INVLIST(i_e)) { /* Here, the ranges on either side of the end of the new range are in * the set, and this range ends in the gap between them. * * If this range is adjacent to (hence extends) the range above it, it * becomes part of that range; likewise if it extends the range below, * it becomes part of that range */ if (end + 1 == array[i_e+1]) { i_e++; array[i_e] = start; } else if (start <= array[i_e]) { array[i_e] = end + 1; i_e--; } } if (i_s == i_e) { /* If the range fits entirely in an existing range (as possibly already * extended above), it doesn't add anything new */ if (ELEMENT_RANGE_MATCHES_INVLIST(i_s)) { return invlist; } /* Here, no part of the range is in the list. Must add it. It will * occupy 2 more slots */ splice_in_new_range: invlist_extend(invlist, len + 2); array = invlist_array(invlist); /* Move the rest of the array down two slots. Don't include any * trailing NUL */ Move(array + i_e + 1, array + i_e + 3, len - i_e - 1, UV); /* Do the actual splice */ array[i_e+1] = start; array[i_e+2] = end + 1; invlist_set_len(invlist, len + 2, *(get_invlist_offset_addr(invlist))); return invlist; } /* Here the new range crossed the boundaries of a pre-existing range. The * code above has adjusted things so that both ends are in ranges that are * in the set. This means everything in between must also be in the set. * Just squash things together */ Move(array + i_e + 1, array + i_s + 1, len - i_e - 1, UV); invlist_set_len(invlist, len - i_e + i_s, *(get_invlist_offset_addr(invlist))); return invlist; } SV* Perl__setup_canned_invlist(pTHX_ const STRLEN size, const UV element0, UV** other_elements_ptr) { /* Create and return an inversion list whose contents are to be populated * by the caller. The caller gives the number of elements (in 'size') and * the very first element ('element0'). This function will set * '*other_elements_ptr' to an array of UVs, where the remaining elements * are to be placed. * * Obviously there is some trust involved that the caller will properly * fill in the other elements of the array. * * (The first element needs to be passed in, as the underlying code does * things differently depending on whether it is zero or non-zero) */ SV* invlist = _new_invlist(size); bool offset; PERL_ARGS_ASSERT__SETUP_CANNED_INVLIST; invlist = add_cp_to_invlist(invlist, element0); offset = *get_invlist_offset_addr(invlist); invlist_set_len(invlist, size, offset); *other_elements_ptr = invlist_array(invlist) + 1; return invlist; } #endif PERL_STATIC_INLINE SV* S_add_cp_to_invlist(pTHX_ SV* invlist, const UV cp) { return _add_range_to_invlist(invlist, cp, cp); } #ifndef PERL_IN_XSUB_RE void Perl__invlist_invert(pTHX_ SV* const invlist) { /* Complement the input inversion list. This adds a 0 if the list didn't * have a zero; removes it otherwise. As described above, the data * structure is set up so that this is very efficient */ PERL_ARGS_ASSERT__INVLIST_INVERT; assert(! invlist_is_iterating(invlist)); /* The inverse of matching nothing is matching everything */ if (_invlist_len(invlist) == 0) { _append_range_to_invlist(invlist, 0, UV_MAX); return; } *get_invlist_offset_addr(invlist) = ! *get_invlist_offset_addr(invlist); } #endif PERL_STATIC_INLINE SV* S_invlist_clone(pTHX_ SV* const invlist) { /* Return a new inversion list that is a copy of the input one, which is * unchanged. The new list will not be mortal even if the old one was. */ /* Need to allocate extra space to accommodate Perl's addition of a * trailing NUL to SvPV's, since it thinks they are always strings */ SV* new_invlist = _new_invlist(_invlist_len(invlist) + 1); STRLEN physical_length = SvCUR(invlist); bool offset = *(get_invlist_offset_addr(invlist)); PERL_ARGS_ASSERT_INVLIST_CLONE; *(get_invlist_offset_addr(new_invlist)) = offset; invlist_set_len(new_invlist, _invlist_len(invlist), offset); Copy(SvPVX(invlist), SvPVX(new_invlist), physical_length, char); return new_invlist; } PERL_STATIC_INLINE STRLEN* S_get_invlist_iter_addr(SV* invlist) { /* Return the address of the UV that contains the current iteration * position */ PERL_ARGS_ASSERT_GET_INVLIST_ITER_ADDR; assert(SvTYPE(invlist) == SVt_INVLIST); return &(((XINVLIST*) SvANY(invlist))->iterator); } PERL_STATIC_INLINE void S_invlist_iterinit(SV* invlist) /* Initialize iterator for invlist */ { PERL_ARGS_ASSERT_INVLIST_ITERINIT; *get_invlist_iter_addr(invlist) = 0; } PERL_STATIC_INLINE void S_invlist_iterfinish(SV* invlist) { /* Terminate iterator for invlist. This is to catch development errors. * Any iteration that is interrupted before completed should call this * function. Functions that add code points anywhere else but to the end * of an inversion list assert that they are not in the middle of an * iteration. If they were, the addition would make the iteration * problematical: if the iteration hadn't reached the place where things * were being added, it would be ok */ PERL_ARGS_ASSERT_INVLIST_ITERFINISH; *get_invlist_iter_addr(invlist) = (STRLEN) UV_MAX; } STATIC bool S_invlist_iternext(SV* invlist, UV* start, UV* end) { /* An C<invlist_iterinit> call on <invlist> must be used to set this up. * This call sets in <*start> and <*end>, the next range in <invlist>. * Returns <TRUE> if successful and the next call will return the next * range; <FALSE> if was already at the end of the list. If the latter, * <*start> and <*end> are unchanged, and the next call to this function * will start over at the beginning of the list */ STRLEN* pos = get_invlist_iter_addr(invlist); UV len = _invlist_len(invlist); UV *array; PERL_ARGS_ASSERT_INVLIST_ITERNEXT; if (*pos >= len) { *pos = (STRLEN) UV_MAX; /* Force iterinit() to be required next time */ return FALSE; } array = invlist_array(invlist); *start = array[(*pos)++]; if (*pos >= len) { *end = UV_MAX; } else { *end = array[(*pos)++] - 1; } return TRUE; } PERL_STATIC_INLINE UV S_invlist_highest(SV* const invlist) { /* Returns the highest code point that matches an inversion list. This API * has an ambiguity, as it returns 0 under either the highest is actually * 0, or if the list is empty. If this distinction matters to you, check * for emptiness before calling this function */ UV len = _invlist_len(invlist); UV *array; PERL_ARGS_ASSERT_INVLIST_HIGHEST; if (len == 0) { return 0; } array = invlist_array(invlist); /* The last element in the array in the inversion list always starts a * range that goes to infinity. That range may be for code points that are * matched in the inversion list, or it may be for ones that aren't * matched. In the latter case, the highest code point in the set is one * less than the beginning of this range; otherwise it is the final element * of this range: infinity */ return (ELEMENT_RANGE_MATCHES_INVLIST(len - 1)) ? UV_MAX : array[len - 1] - 1; } STATIC SV * S_invlist_contents(pTHX_ SV* const invlist, const bool traditional_style) { /* Get the contents of an inversion list into a string SV so that they can * be printed out. If 'traditional_style' is TRUE, it uses the format * traditionally done for debug tracing; otherwise it uses a format * suitable for just copying to the output, with blanks between ranges and * a dash between range components */ UV start, end; SV* output; const char intra_range_delimiter = (traditional_style ? '\t' : '-'); const char inter_range_delimiter = (traditional_style ? '\n' : ' '); if (traditional_style) { output = newSVpvs("\n"); } else { output = newSVpvs(""); } PERL_ARGS_ASSERT_INVLIST_CONTENTS; assert(! invlist_is_iterating(invlist)); invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { if (end == UV_MAX) { Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%cINFINITY%c", start, intra_range_delimiter, inter_range_delimiter); } else if (end != start) { Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c%04" UVXf "%c", start, intra_range_delimiter, end, inter_range_delimiter); } else { Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c", start, inter_range_delimiter); } } if (SvCUR(output) && ! traditional_style) {/* Get rid of trailing blank */ SvCUR_set(output, SvCUR(output) - 1); } return output; } #ifndef PERL_IN_XSUB_RE void Perl__invlist_dump(pTHX_ PerlIO *file, I32 level, const char * const indent, SV* const invlist) { /* Designed to be called only by do_sv_dump(). Dumps out the ranges of the * inversion list 'invlist' to 'file' at 'level' Each line is prefixed by * the string 'indent'. The output looks like this: [0] 0x000A .. 0x000D [2] 0x0085 [4] 0x2028 .. 0x2029 [6] 0x3104 .. INFINITY * This means that the first range of code points matched by the list are * 0xA through 0xD; the second range contains only the single code point * 0x85, etc. An inversion list is an array of UVs. Two array elements * are used to define each range (except if the final range extends to * infinity, only a single element is needed). The array index of the * first element for the corresponding range is given in brackets. */ UV start, end; STRLEN count = 0; PERL_ARGS_ASSERT__INVLIST_DUMP; if (invlist_is_iterating(invlist)) { Perl_dump_indent(aTHX_ level, file, "%sCan't dump inversion list because is in middle of iterating\n", indent); return; } invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { if (end == UV_MAX) { Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf " .. INFINITY\n", indent, (UV)count, start); } else if (end != start) { Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf " .. 0x%04" UVXf "\n", indent, (UV)count, start, end); } else { Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf "\n", indent, (UV)count, start); } count += 2; } } void Perl__load_PL_utf8_foldclosures (pTHX) { assert(! PL_utf8_foldclosures); /* If the folds haven't been read in, call a fold function * to force that */ if (! PL_utf8_tofold) { U8 dummy[UTF8_MAXBYTES_CASE+1]; const U8 hyphen[] = HYPHEN_UTF8; /* This string is just a short named one above \xff */ toFOLD_utf8_safe(hyphen, hyphen + sizeof(hyphen) - 1, dummy, NULL); assert(PL_utf8_tofold); /* Verify that worked */ } PL_utf8_foldclosures = _swash_inversion_hash(PL_utf8_tofold); } #endif #if defined(PERL_ARGS_ASSERT__INVLISTEQ) && !defined(PERL_IN_XSUB_RE) bool Perl__invlistEQ(pTHX_ SV* const a, SV* const b, const bool complement_b) { /* Return a boolean as to if the two passed in inversion lists are * identical. The final argument, if TRUE, says to take the complement of * the second inversion list before doing the comparison */ const UV* array_a = invlist_array(a); const UV* array_b = invlist_array(b); UV len_a = _invlist_len(a); UV len_b = _invlist_len(b); PERL_ARGS_ASSERT__INVLISTEQ; /* If are to compare 'a' with the complement of b, set it * up so are looking at b's complement. */ if (complement_b) { /* The complement of nothing is everything, so <a> would have to have * just one element, starting at zero (ending at infinity) */ if (len_b == 0) { return (len_a == 1 && array_a[0] == 0); } else if (array_b[0] == 0) { /* Otherwise, to complement, we invert. Here, the first element is * 0, just remove it. To do this, we just pretend the array starts * one later */ array_b++; len_b--; } else { /* But if the first element is not zero, we pretend the list starts * at the 0 that is always stored immediately before the array. */ array_b--; len_b++; } } return len_a == len_b && memEQ(array_a, array_b, len_a * sizeof(array_a[0])); } #endif /* * As best we can, determine the characters that can match the start of * the given EXACTF-ish node. * * Returns the invlist as a new SV*; it is the caller's responsibility to * call SvREFCNT_dec() when done with it. */ STATIC SV* S__make_exactf_invlist(pTHX_ RExC_state_t *pRExC_state, regnode *node) { const U8 * s = (U8*)STRING(node); SSize_t bytelen = STR_LEN(node); UV uc; /* Start out big enough for 2 separate code points */ SV* invlist = _new_invlist(4); PERL_ARGS_ASSERT__MAKE_EXACTF_INVLIST; if (! UTF) { uc = *s; /* We punt and assume can match anything if the node begins * with a multi-character fold. Things are complicated. For * example, /ffi/i could match any of: * "\N{LATIN SMALL LIGATURE FFI}" * "\N{LATIN SMALL LIGATURE FF}I" * "F\N{LATIN SMALL LIGATURE FI}" * plus several other things; and making sure we have all the * possibilities is hard. */ if (is_MULTI_CHAR_FOLD_latin1_safe(s, s + bytelen)) { invlist = _add_range_to_invlist(invlist, 0, UV_MAX); } else { /* Any Latin1 range character can potentially match any * other depending on the locale */ if (OP(node) == EXACTFL) { _invlist_union(invlist, PL_Latin1, &invlist); } else { /* But otherwise, it matches at least itself. We can * quickly tell if it has a distinct fold, and if so, * it matches that as well */ invlist = add_cp_to_invlist(invlist, uc); if (IS_IN_SOME_FOLD_L1(uc)) invlist = add_cp_to_invlist(invlist, PL_fold_latin1[uc]); } /* Some characters match above-Latin1 ones under /i. This * is true of EXACTFL ones when the locale is UTF-8 */ if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(uc) && (! isASCII(uc) || (OP(node) != EXACTFA && OP(node) != EXACTFA_NO_TRIE))) { add_above_Latin1_folds(pRExC_state, (U8) uc, &invlist); } } } else { /* Pattern is UTF-8 */ U8 folded[UTF8_MAX_FOLD_CHAR_EXPAND * UTF8_MAXBYTES_CASE + 1] = { '\0' }; STRLEN foldlen = UTF8SKIP(s); const U8* e = s + bytelen; SV** listp; uc = utf8_to_uvchr_buf(s, s + bytelen, NULL); /* The only code points that aren't folded in a UTF EXACTFish * node are are the problematic ones in EXACTFL nodes */ if (OP(node) == EXACTFL && is_PROBLEMATIC_LOCALE_FOLDEDS_START_cp(uc)) { /* We need to check for the possibility that this EXACTFL * node begins with a multi-char fold. Therefore we fold * the first few characters of it so that we can make that * check */ U8 *d = folded; int i; for (i = 0; i < UTF8_MAX_FOLD_CHAR_EXPAND && s < e; i++) { if (isASCII(*s)) { *(d++) = (U8) toFOLD(*s); s++; } else { STRLEN len; toFOLD_utf8_safe(s, e, d, &len); d += len; s += UTF8SKIP(s); } } /* And set up so the code below that looks in this folded * buffer instead of the node's string */ e = d; foldlen = UTF8SKIP(folded); s = folded; } /* When we reach here 's' points to the fold of the first * character(s) of the node; and 'e' points to far enough along * the folded string to be just past any possible multi-char * fold. 'foldlen' is the length in bytes of the first * character in 's' * * Unlike the non-UTF-8 case, the macro for determining if a * string is a multi-char fold requires all the characters to * already be folded. This is because of all the complications * if not. Note that they are folded anyway, except in EXACTFL * nodes. Like the non-UTF case above, we punt if the node * begins with a multi-char fold */ if (is_MULTI_CHAR_FOLD_utf8_safe(s, e)) { invlist = _add_range_to_invlist(invlist, 0, UV_MAX); } else { /* Single char fold */ /* It matches all the things that fold to it, which are * found in PL_utf8_foldclosures (including itself) */ invlist = add_cp_to_invlist(invlist, uc); if (! PL_utf8_foldclosures) _load_PL_utf8_foldclosures(); if ((listp = hv_fetch(PL_utf8_foldclosures, (char *) s, foldlen, FALSE))) { AV* list = (AV*) *listp; IV k; for (k = 0; k <= av_tindex_skip_len_mg(list); k++) { SV** c_p = av_fetch(list, k, FALSE); UV c; assert(c_p); c = SvUV(*c_p); /* /aa doesn't allow folds between ASCII and non- */ if ((OP(node) == EXACTFA || OP(node) == EXACTFA_NO_TRIE) && isASCII(c) != isASCII(uc)) { continue; } invlist = add_cp_to_invlist(invlist, c); } } } } return invlist; } #undef HEADER_LENGTH #undef TO_INTERNAL_SIZE #undef FROM_INTERNAL_SIZE #undef INVLIST_VERSION_ID /* End of inversion list object */ STATIC void S_parse_lparen_question_flags(pTHX_ RExC_state_t *pRExC_state) { /* This parses the flags that are in either the '(?foo)' or '(?foo:bar)' * constructs, and updates RExC_flags with them. On input, RExC_parse * should point to the first flag; it is updated on output to point to the * final ')' or ':'. There needs to be at least one flag, or this will * abort */ /* for (?g), (?gc), and (?o) warnings; warning about (?c) will warn about (?g) -- japhy */ #define WASTED_O 0x01 #define WASTED_G 0x02 #define WASTED_C 0x04 #define WASTED_GC (WASTED_G|WASTED_C) I32 wastedflags = 0x00; U32 posflags = 0, negflags = 0; U32 *flagsp = &posflags; char has_charset_modifier = '\0'; regex_charset cs; bool has_use_defaults = FALSE; const char* const seqstart = RExC_parse - 1; /* Point to the '?' */ int x_mod_count = 0; PERL_ARGS_ASSERT_PARSE_LPAREN_QUESTION_FLAGS; /* '^' as an initial flag sets certain defaults */ if (UCHARAT(RExC_parse) == '^') { RExC_parse++; has_use_defaults = TRUE; STD_PMMOD_FLAGS_CLEAR(&RExC_flags); set_regex_charset(&RExC_flags, (RExC_utf8 || RExC_uni_semantics) ? REGEX_UNICODE_CHARSET : REGEX_DEPENDS_CHARSET); } cs = get_regex_charset(RExC_flags); if (cs == REGEX_DEPENDS_CHARSET && (RExC_utf8 || RExC_uni_semantics)) { cs = REGEX_UNICODE_CHARSET; } while (RExC_parse < RExC_end) { /* && strchr("iogcmsx", *RExC_parse) */ /* (?g), (?gc) and (?o) are useless here and must be globally applied -- japhy */ switch (*RExC_parse) { /* Code for the imsxn flags */ CASE_STD_PMMOD_FLAGS_PARSE_SET(flagsp, x_mod_count); case LOCALE_PAT_MOD: if (has_charset_modifier) { goto excess_modifier; } else if (flagsp == &negflags) { goto neg_modifier; } cs = REGEX_LOCALE_CHARSET; has_charset_modifier = LOCALE_PAT_MOD; break; case UNICODE_PAT_MOD: if (has_charset_modifier) { goto excess_modifier; } else if (flagsp == &negflags) { goto neg_modifier; } cs = REGEX_UNICODE_CHARSET; has_charset_modifier = UNICODE_PAT_MOD; break; case ASCII_RESTRICT_PAT_MOD: if (flagsp == &negflags) { goto neg_modifier; } if (has_charset_modifier) { if (cs != REGEX_ASCII_RESTRICTED_CHARSET) { goto excess_modifier; } /* Doubled modifier implies more restricted */ cs = REGEX_ASCII_MORE_RESTRICTED_CHARSET; } else { cs = REGEX_ASCII_RESTRICTED_CHARSET; } has_charset_modifier = ASCII_RESTRICT_PAT_MOD; break; case DEPENDS_PAT_MOD: if (has_use_defaults) { goto fail_modifiers; } else if (flagsp == &negflags) { goto neg_modifier; } else if (has_charset_modifier) { goto excess_modifier; } /* The dual charset means unicode semantics if the * pattern (or target, not known until runtime) are * utf8, or something in the pattern indicates unicode * semantics */ cs = (RExC_utf8 || RExC_uni_semantics) ? REGEX_UNICODE_CHARSET : REGEX_DEPENDS_CHARSET; has_charset_modifier = DEPENDS_PAT_MOD; break; excess_modifier: RExC_parse++; if (has_charset_modifier == ASCII_RESTRICT_PAT_MOD) { vFAIL2("Regexp modifier \"%c\" may appear a maximum of twice", ASCII_RESTRICT_PAT_MOD); } else if (has_charset_modifier == *(RExC_parse - 1)) { vFAIL2("Regexp modifier \"%c\" may not appear twice", *(RExC_parse - 1)); } else { vFAIL3("Regexp modifiers \"%c\" and \"%c\" are mutually exclusive", has_charset_modifier, *(RExC_parse - 1)); } NOT_REACHED; /*NOTREACHED*/ neg_modifier: RExC_parse++; vFAIL2("Regexp modifier \"%c\" may not appear after the \"-\"", *(RExC_parse - 1)); NOT_REACHED; /*NOTREACHED*/ case ONCE_PAT_MOD: /* 'o' */ case GLOBAL_PAT_MOD: /* 'g' */ if (PASS2 && ckWARN(WARN_REGEXP)) { const I32 wflagbit = *RExC_parse == 'o' ? WASTED_O : WASTED_G; if (! (wastedflags & wflagbit) ) { wastedflags |= wflagbit; /* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */ vWARN5( RExC_parse + 1, "Useless (%s%c) - %suse /%c modifier", flagsp == &negflags ? "?-" : "?", *RExC_parse, flagsp == &negflags ? "don't " : "", *RExC_parse ); } } break; case CONTINUE_PAT_MOD: /* 'c' */ if (PASS2 && ckWARN(WARN_REGEXP)) { if (! (wastedflags & WASTED_C) ) { wastedflags |= WASTED_GC; /* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */ vWARN3( RExC_parse + 1, "Useless (%sc) - %suse /gc modifier", flagsp == &negflags ? "?-" : "?", flagsp == &negflags ? "don't " : "" ); } } break; case KEEPCOPY_PAT_MOD: /* 'p' */ if (flagsp == &negflags) { if (PASS2) ckWARNreg(RExC_parse + 1,"Useless use of (?-p)"); } else { *flagsp |= RXf_PMf_KEEPCOPY; } break; case '-': /* A flag is a default iff it is following a minus, so * if there is a minus, it means will be trying to * re-specify a default which is an error */ if (has_use_defaults || flagsp == &negflags) { goto fail_modifiers; } flagsp = &negflags; wastedflags = 0; /* reset so (?g-c) warns twice */ x_mod_count = 0; break; case ':': case ')': if ((posflags & (RXf_PMf_EXTENDED|RXf_PMf_EXTENDED_MORE)) == RXf_PMf_EXTENDED) { negflags |= RXf_PMf_EXTENDED_MORE; } RExC_flags |= posflags; if (negflags & RXf_PMf_EXTENDED) { negflags |= RXf_PMf_EXTENDED_MORE; } RExC_flags &= ~negflags; set_regex_charset(&RExC_flags, cs); return; default: fail_modifiers: RExC_parse += SKIP_IF_CHAR(RExC_parse); /* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */ vFAIL2utf8f("Sequence (%" UTF8f "...) not recognized", UTF8fARG(UTF, RExC_parse-seqstart, seqstart)); NOT_REACHED; /*NOTREACHED*/ } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } vFAIL("Sequence (?... not terminated"); } /* - reg - regular expression, i.e. main body or parenthesized thing * * Caller must absorb opening parenthesis. * * Combining parenthesis handling with the base level of regular expression * is a trifle forced, but the need to tie the tails of the branches to what * follows makes it hard to avoid. */ #define REGTAIL(x,y,z) regtail((x),(y),(z),depth+1) #ifdef DEBUGGING #define REGTAIL_STUDY(x,y,z) regtail_study((x),(y),(z),depth+1) #else #define REGTAIL_STUDY(x,y,z) regtail((x),(y),(z),depth+1) #endif PERL_STATIC_INLINE regnode * S_handle_named_backref(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, char * parse_start, char ch ) { regnode *ret; char* name_start = RExC_parse; U32 num = 0; SV *sv_dat = reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_HANDLE_NAMED_BACKREF; if (RExC_parse == name_start || *RExC_parse != ch) { /* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */ vFAIL2("Sequence %.3s... not terminated",parse_start); } if (!SIZE_ONLY) { num = add_data( pRExC_state, STR_WITH_LEN("S")); RExC_rxi->data->data[num]=(void*)sv_dat; SvREFCNT_inc_simple_void(sv_dat); } RExC_sawback = 1; ret = reganode(pRExC_state, ((! FOLD) ? NREF : (ASCII_FOLD_RESTRICTED) ? NREFFA : (AT_LEAST_UNI_SEMANTICS) ? NREFFU : (LOC) ? NREFFL : NREFF), num); *flagp |= HASWIDTH; Set_Node_Offset(ret, parse_start+1); Set_Node_Cur_Length(ret, parse_start); nextchar(pRExC_state); return ret; } /* Returns NULL, setting *flagp to TRYAGAIN at the end of (?) that only sets flags. Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8. Otherwise would only return NULL if regbranch() returns NULL, which cannot happen. */ STATIC regnode * S_reg(pTHX_ RExC_state_t *pRExC_state, I32 paren, I32 *flagp,U32 depth) /* paren: Parenthesized? 0=top; 1,2=inside '(': changed to letter. * 2 is like 1, but indicates that nextchar() has been called to advance * RExC_parse beyond the '('. Things like '(?' are indivisible tokens, and * this flag alerts us to the need to check for that */ { regnode *ret; /* Will be the head of the group. */ regnode *br; regnode *lastbr; regnode *ender = NULL; I32 parno = 0; I32 flags; U32 oregflags = RExC_flags; bool have_branch = 0; bool is_open = 0; I32 freeze_paren = 0; I32 after_freeze = 0; I32 num; /* numeric backreferences */ char * parse_start = RExC_parse; /* MJD */ char * const oregcomp_parse = RExC_parse; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REG; DEBUG_PARSE("reg "); *flagp = 0; /* Tentatively. */ /* Having this true makes it feasible to have a lot fewer tests for the * parse pointer being in scope. For example, we can write * while(isFOO(*RExC_parse)) RExC_parse++; * instead of * while(RExC_parse < RExC_end && isFOO(*RExC_parse)) RExC_parse++; */ assert(*RExC_end == '\0'); /* Make an OPEN node, if parenthesized. */ if (paren) { /* Under /x, space and comments can be gobbled up between the '(' and * here (if paren ==2). The forms '(*VERB' and '(?...' disallow such * intervening space, as the sequence is a token, and a token should be * indivisible */ bool has_intervening_patws = paren == 2 && *(RExC_parse - 1) != '('; if (RExC_parse >= RExC_end) { vFAIL("Unmatched ("); } if ( *RExC_parse == '*') { /* (*VERB:ARG) */ char *start_verb = RExC_parse + 1; STRLEN verb_len; char *start_arg = NULL; unsigned char op = 0; int arg_required = 0; int internal_argval = -1; /* if >-1 we are not allowed an argument*/ if (has_intervening_patws) { RExC_parse++; /* past the '*' */ vFAIL("In '(*VERB...)', the '(' and '*' must be adjacent"); } while (RExC_parse < RExC_end && *RExC_parse != ')' ) { if ( *RExC_parse == ':' ) { start_arg = RExC_parse + 1; break; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } verb_len = RExC_parse - start_verb; if ( start_arg ) { if (RExC_parse >= RExC_end) { goto unterminated_verb_pattern; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; while ( RExC_parse < RExC_end && *RExC_parse != ')' ) RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) unterminated_verb_pattern: vFAIL("Unterminated verb pattern argument"); if ( RExC_parse == start_arg ) start_arg = NULL; } else { if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) vFAIL("Unterminated verb pattern"); } /* Here, we know that RExC_parse < RExC_end */ switch ( *start_verb ) { case 'A': /* (*ACCEPT) */ if ( memEQs(start_verb,verb_len,"ACCEPT") ) { op = ACCEPT; internal_argval = RExC_nestroot; } break; case 'C': /* (*COMMIT) */ if ( memEQs(start_verb,verb_len,"COMMIT") ) op = COMMIT; break; case 'F': /* (*FAIL) */ if ( verb_len==1 || memEQs(start_verb,verb_len,"FAIL") ) { op = OPFAIL; } break; case ':': /* (*:NAME) */ case 'M': /* (*MARK:NAME) */ if ( verb_len==0 || memEQs(start_verb,verb_len,"MARK") ) { op = MARKPOINT; arg_required = 1; } break; case 'P': /* (*PRUNE) */ if ( memEQs(start_verb,verb_len,"PRUNE") ) op = PRUNE; break; case 'S': /* (*SKIP) */ if ( memEQs(start_verb,verb_len,"SKIP") ) op = SKIP; break; case 'T': /* (*THEN) */ /* [19:06] <TimToady> :: is then */ if ( memEQs(start_verb,verb_len,"THEN") ) { op = CUTGROUP; RExC_seen |= REG_CUTGROUP_SEEN; } break; } if ( ! op ) { RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; vFAIL2utf8f( "Unknown verb pattern '%" UTF8f "'", UTF8fARG(UTF, verb_len, start_verb)); } if ( arg_required && !start_arg ) { vFAIL3("Verb pattern '%.*s' has a mandatory argument", verb_len, start_verb); } if (internal_argval == -1) { ret = reganode(pRExC_state, op, 0); } else { ret = reg2Lanode(pRExC_state, op, 0, internal_argval); } RExC_seen |= REG_VERBARG_SEEN; if ( ! SIZE_ONLY ) { if (start_arg) { SV *sv = newSVpvn( start_arg, RExC_parse - start_arg); ARG(ret) = add_data( pRExC_state, STR_WITH_LEN("S")); RExC_rxi->data->data[ARG(ret)]=(void*)sv; ret->flags = 1; } else { ret->flags = 0; } if ( internal_argval != -1 ) ARG2L_SET(ret, internal_argval); } nextchar(pRExC_state); return ret; } else if (*RExC_parse == '?') { /* (?...) */ bool is_logical = 0; const char * const seqstart = RExC_parse; const char * endptr; if (has_intervening_patws) { RExC_parse++; vFAIL("In '(?...)', the '(' and '?' must be adjacent"); } RExC_parse++; /* past the '?' */ paren = *RExC_parse; /* might be a trailing NUL, if not well-formed */ RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; if (RExC_parse > RExC_end) { paren = '\0'; } ret = NULL; /* For look-ahead/behind. */ switch (paren) { case 'P': /* (?P...) variants for those used to PCRE/Python */ paren = *RExC_parse; if ( paren == '<') { /* (?P<...>) named capture */ RExC_parse++; if (RExC_parse >= RExC_end) { vFAIL("Sequence (?P<... not terminated"); } goto named_capture; } else if (paren == '>') { /* (?P>name) named recursion */ RExC_parse++; if (RExC_parse >= RExC_end) { vFAIL("Sequence (?P>... not terminated"); } goto named_recursion; } else if (paren == '=') { /* (?P=...) named backref */ RExC_parse++; return handle_named_backref(pRExC_state, flagp, parse_start, ')'); } RExC_parse += SKIP_IF_CHAR(RExC_parse); /* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */ vFAIL3("Sequence (%.*s...) not recognized", RExC_parse-seqstart, seqstart); NOT_REACHED; /*NOTREACHED*/ case '<': /* (?<...) */ if (*RExC_parse == '!') paren = ','; else if (*RExC_parse != '=') named_capture: { /* (?<...>) */ char *name_start; SV *svname; paren= '>'; /* FALLTHROUGH */ case '\'': /* (?'...') */ name_start = RExC_parse; svname = reg_scan_name(pRExC_state, SIZE_ONLY /* reverse test from the others */ ? REG_RSN_RETURN_NAME : REG_RSN_RETURN_NULL); if ( RExC_parse == name_start || RExC_parse >= RExC_end || *RExC_parse != paren) { vFAIL2("Sequence (?%c... not terminated", paren=='>' ? '<' : paren); } if (SIZE_ONLY) { HE *he_str; SV *sv_dat = NULL; if (!svname) /* shouldn't happen */ Perl_croak(aTHX_ "panic: reg_scan_name returned NULL"); if (!RExC_paren_names) { RExC_paren_names= newHV(); sv_2mortal(MUTABLE_SV(RExC_paren_names)); #ifdef DEBUGGING RExC_paren_name_list= newAV(); sv_2mortal(MUTABLE_SV(RExC_paren_name_list)); #endif } he_str = hv_fetch_ent( RExC_paren_names, svname, 1, 0 ); if ( he_str ) sv_dat = HeVAL(he_str); if ( ! sv_dat ) { /* croak baby croak */ Perl_croak(aTHX_ "panic: paren_name hash element allocation failed"); } else if ( SvPOK(sv_dat) ) { /* (?|...) can mean we have dupes so scan to check its already been stored. Maybe a flag indicating we are inside such a construct would be useful, but the arrays are likely to be quite small, so for now we punt -- dmq */ IV count = SvIV(sv_dat); I32 *pv = (I32*)SvPVX(sv_dat); IV i; for ( i = 0 ; i < count ; i++ ) { if ( pv[i] == RExC_npar ) { count = 0; break; } } if ( count ) { pv = (I32*)SvGROW(sv_dat, SvCUR(sv_dat) + sizeof(I32)+1); SvCUR_set(sv_dat, SvCUR(sv_dat) + sizeof(I32)); pv[count] = RExC_npar; SvIV_set(sv_dat, SvIVX(sv_dat) + 1); } } else { (void)SvUPGRADE(sv_dat,SVt_PVNV); sv_setpvn(sv_dat, (char *)&(RExC_npar), sizeof(I32)); SvIOK_on(sv_dat); SvIV_set(sv_dat, 1); } #ifdef DEBUGGING /* Yes this does cause a memory leak in debugging Perls * */ if (!av_store(RExC_paren_name_list, RExC_npar, SvREFCNT_inc(svname))) SvREFCNT_dec_NN(svname); #endif /*sv_dump(sv_dat);*/ } nextchar(pRExC_state); paren = 1; goto capturing_parens; } RExC_seen |= REG_LOOKBEHIND_SEEN; RExC_in_lookbehind++; RExC_parse++; if (RExC_parse >= RExC_end) { vFAIL("Sequence (?... not terminated"); } /* FALLTHROUGH */ case '=': /* (?=...) */ RExC_seen_zerolen++; break; case '!': /* (?!...) */ RExC_seen_zerolen++; /* check if we're really just a "FAIL" assertion */ skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); if (*RExC_parse == ')') { ret=reganode(pRExC_state, OPFAIL, 0); nextchar(pRExC_state); return ret; } break; case '|': /* (?|...) */ /* branch reset, behave like a (?:...) except that buffers in alternations share the same numbers */ paren = ':'; after_freeze = freeze_paren = RExC_npar; break; case ':': /* (?:...) */ case '>': /* (?>...) */ break; case '$': /* (?$...) */ case '@': /* (?@...) */ vFAIL2("Sequence (?%c...) not implemented", (int)paren); break; case '0' : /* (?0) */ case 'R' : /* (?R) */ if (RExC_parse == RExC_end || *RExC_parse != ')') FAIL("Sequence (?R) not terminated"); num = 0; RExC_seen |= REG_RECURSE_SEEN; *flagp |= POSTPONED; goto gen_recurse_regop; /*notreached*/ /* named and numeric backreferences */ case '&': /* (?&NAME) */ parse_start = RExC_parse - 1; named_recursion: { SV *sv_dat = reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); num = sv_dat ? *((I32 *)SvPVX(sv_dat)) : 0; } if (RExC_parse >= RExC_end || *RExC_parse != ')') vFAIL("Sequence (?&... not terminated"); goto gen_recurse_regop; /* NOTREACHED */ case '+': if (!(RExC_parse[0] >= '1' && RExC_parse[0] <= '9')) { RExC_parse++; vFAIL("Illegal pattern"); } goto parse_recursion; /* NOTREACHED*/ case '-': /* (?-1) */ if (!(RExC_parse[0] >= '1' && RExC_parse[0] <= '9')) { RExC_parse--; /* rewind to let it be handled later */ goto parse_flags; } /* FALLTHROUGH */ case '1': case '2': case '3': case '4': /* (?1) */ case '5': case '6': case '7': case '8': case '9': RExC_parse = (char *) seqstart + 1; /* Point to the digit */ parse_recursion: { bool is_neg = FALSE; UV unum; parse_start = RExC_parse - 1; /* MJD */ if (*RExC_parse == '-') { RExC_parse++; is_neg = TRUE; } if (grok_atoUV(RExC_parse, &unum, &endptr) && unum <= I32_MAX ) { num = (I32)unum; RExC_parse = (char*)endptr; } else num = I32_MAX; if (is_neg) { /* Some limit for num? */ num = -num; } } if (*RExC_parse!=')') vFAIL("Expecting close bracket"); gen_recurse_regop: if ( paren == '-' ) { /* Diagram of capture buffer numbering. Top line is the normal capture buffer numbers Bottom line is the negative indexing as from the X (the (?-2)) + 1 2 3 4 5 X 6 7 /(a(x)y)(a(b(c(?-2)d)e)f)(g(h))/ - 5 4 3 2 1 X x x */ num = RExC_npar + num; if (num < 1) { RExC_parse++; vFAIL("Reference to nonexistent group"); } } else if ( paren == '+' ) { num = RExC_npar + num - 1; } /* We keep track how many GOSUB items we have produced. To start off the ARG2L() of the GOSUB holds its "id", which is used later in conjunction with RExC_recurse to calculate the offset we need to jump for the GOSUB, which it will store in the final representation. We have to defer the actual calculation until much later as the regop may move. */ ret = reg2Lanode(pRExC_state, GOSUB, num, RExC_recurse_count); if (!SIZE_ONLY) { if (num > (I32)RExC_rx->nparens) { RExC_parse++; vFAIL("Reference to nonexistent group"); } RExC_recurse_count++; DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Recurse #%" UVuf " to %" IVdf "\n", 22, "| |", (int)(depth * 2 + 1), "", (UV)ARG(ret), (IV)ARG2L(ret))); } RExC_seen |= REG_RECURSE_SEEN; Set_Node_Length(ret, 1 + regarglen[OP(ret)]); /* MJD */ Set_Node_Offset(ret, parse_start); /* MJD */ *flagp |= POSTPONED; assert(*RExC_parse == ')'); nextchar(pRExC_state); return ret; /* NOTREACHED */ case '?': /* (??...) */ is_logical = 1; if (*RExC_parse != '{') { RExC_parse += SKIP_IF_CHAR(RExC_parse); /* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */ vFAIL2utf8f( "Sequence (%" UTF8f "...) not recognized", UTF8fARG(UTF, RExC_parse-seqstart, seqstart)); NOT_REACHED; /*NOTREACHED*/ } *flagp |= POSTPONED; paren = '{'; RExC_parse++; /* FALLTHROUGH */ case '{': /* (?{...}) */ { U32 n = 0; struct reg_code_block *cb; RExC_seen_zerolen++; if ( !pRExC_state->code_blocks || pRExC_state->code_index >= pRExC_state->code_blocks->count || pRExC_state->code_blocks->cb[pRExC_state->code_index].start != (STRLEN)((RExC_parse -3 - (is_logical ? 1 : 0)) - RExC_start) ) { if (RExC_pm_flags & PMf_USE_RE_EVAL) FAIL("panic: Sequence (?{...}): no code block found\n"); FAIL("Eval-group not allowed at runtime, use re 'eval'"); } /* this is a pre-compiled code block (?{...}) */ cb = &pRExC_state->code_blocks->cb[pRExC_state->code_index]; RExC_parse = RExC_start + cb->end; if (!SIZE_ONLY) { OP *o = cb->block; if (cb->src_regex) { n = add_data(pRExC_state, STR_WITH_LEN("rl")); RExC_rxi->data->data[n] = (void*)SvREFCNT_inc((SV*)cb->src_regex); RExC_rxi->data->data[n+1] = (void*)o; } else { n = add_data(pRExC_state, (RExC_pm_flags & PMf_HAS_CV) ? "L" : "l", 1); RExC_rxi->data->data[n] = (void*)o; } } pRExC_state->code_index++; nextchar(pRExC_state); if (is_logical) { regnode *eval; ret = reg_node(pRExC_state, LOGICAL); eval = reg2Lanode(pRExC_state, EVAL, n, /* for later propagation into (??{}) * return value */ RExC_flags & RXf_PMf_COMPILETIME ); if (!SIZE_ONLY) { ret->flags = 2; } REGTAIL(pRExC_state, ret, eval); /* deal with the length of this later - MJD */ return ret; } ret = reg2Lanode(pRExC_state, EVAL, n, 0); Set_Node_Length(ret, RExC_parse - parse_start + 1); Set_Node_Offset(ret, parse_start); return ret; } case '(': /* (?(?{...})...) and (?(?=...)...) */ { int is_define= 0; const int DEFINE_len = sizeof("DEFINE") - 1; if (RExC_parse[0] == '?') { /* (?(?...)) */ if ( RExC_parse < RExC_end - 1 && ( RExC_parse[1] == '=' || RExC_parse[1] == '!' || RExC_parse[1] == '<' || RExC_parse[1] == '{') ) { /* Lookahead or eval. */ I32 flag; regnode *tail; ret = reg_node(pRExC_state, LOGICAL); if (!SIZE_ONLY) ret->flags = 1; tail = reg(pRExC_state, 1, &flag, depth+1); if (flag & (RESTART_PASS1|NEED_UTF8)) { *flagp = flag & (RESTART_PASS1|NEED_UTF8); return NULL; } REGTAIL(pRExC_state, ret, tail); goto insert_if; } /* Fall through to ‘Unknown switch condition’ at the end of the if/else chain. */ } else if ( RExC_parse[0] == '<' /* (?(<NAME>)...) */ || RExC_parse[0] == '\'' ) /* (?('NAME')...) */ { char ch = RExC_parse[0] == '<' ? '>' : '\''; char *name_start= RExC_parse++; U32 num = 0; SV *sv_dat=reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); if ( RExC_parse == name_start || RExC_parse >= RExC_end || *RExC_parse != ch) { vFAIL2("Sequence (?(%c... not terminated", (ch == '>' ? '<' : ch)); } RExC_parse++; if (!SIZE_ONLY) { num = add_data( pRExC_state, STR_WITH_LEN("S")); RExC_rxi->data->data[num]=(void*)sv_dat; SvREFCNT_inc_simple_void(sv_dat); } ret = reganode(pRExC_state,NGROUPP,num); goto insert_if_check_paren; } else if (memBEGINs(RExC_parse, (STRLEN) (RExC_end - RExC_parse), "DEFINE")) { ret = reganode(pRExC_state,DEFINEP,0); RExC_parse += DEFINE_len; is_define = 1; goto insert_if_check_paren; } else if (RExC_parse[0] == 'R') { RExC_parse++; /* parno == 0 => /(?(R)YES|NO)/ "in any form of recursion OR eval" * parno == 1 => /(?(R0)YES|NO)/ "in GOSUB (?0) / (?R)" * parno == 2 => /(?(R1)YES|NO)/ "in GOSUB (?1) (parno-1)" */ parno = 0; if (RExC_parse[0] == '0') { parno = 1; RExC_parse++; } else if (RExC_parse[0] >= '1' && RExC_parse[0] <= '9' ) { UV uv; if (grok_atoUV(RExC_parse, &uv, &endptr) && uv <= I32_MAX ) { parno = (I32)uv + 1; RExC_parse = (char*)endptr; } /* else "Switch condition not recognized" below */ } else if (RExC_parse[0] == '&') { SV *sv_dat; RExC_parse++; sv_dat = reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); /* we should only have a false sv_dat when * SIZE_ONLY is true, and we always have false * sv_dat when SIZE_ONLY is true. * reg_scan_name() will VFAIL() if the name is * unknown when SIZE_ONLY is false, and otherwise * will return something, and when SIZE_ONLY is * true, reg_scan_name() just parses the string, * and doesnt return anything. (in theory) */ assert(SIZE_ONLY ? !sv_dat : !!sv_dat); if (sv_dat) parno = 1 + *((I32 *)SvPVX(sv_dat)); } ret = reganode(pRExC_state,INSUBP,parno); goto insert_if_check_paren; } else if (RExC_parse[0] >= '1' && RExC_parse[0] <= '9' ) { /* (?(1)...) */ char c; UV uv; if (grok_atoUV(RExC_parse, &uv, &endptr) && uv <= I32_MAX ) { parno = (I32)uv; RExC_parse = (char*)endptr; } else { vFAIL("panic: grok_atoUV returned FALSE"); } ret = reganode(pRExC_state, GROUPP, parno); insert_if_check_paren: if (UCHARAT(RExC_parse) != ')') { RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; vFAIL("Switch condition not recognized"); } nextchar(pRExC_state); insert_if: REGTAIL(pRExC_state, ret, reganode(pRExC_state, IFTHEN, 0)); br = regbranch(pRExC_state, &flags, 1,depth+1); if (br == NULL) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } else REGTAIL(pRExC_state, br, reganode(pRExC_state, LONGJMP, 0)); c = UCHARAT(RExC_parse); nextchar(pRExC_state); if (flags&HASWIDTH) *flagp |= HASWIDTH; if (c == '|') { if (is_define) vFAIL("(?(DEFINE)....) does not allow branches"); /* Fake one for optimizer. */ lastbr = reganode(pRExC_state, IFTHEN, 0); if (!regbranch(pRExC_state, &flags, 1,depth+1)) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } REGTAIL(pRExC_state, ret, lastbr); if (flags&HASWIDTH) *flagp |= HASWIDTH; c = UCHARAT(RExC_parse); nextchar(pRExC_state); } else lastbr = NULL; if (c != ')') { if (RExC_parse >= RExC_end) vFAIL("Switch (?(condition)... not terminated"); else vFAIL("Switch (?(condition)... contains too many branches"); } ender = reg_node(pRExC_state, TAIL); REGTAIL(pRExC_state, br, ender); if (lastbr) { REGTAIL(pRExC_state, lastbr, ender); REGTAIL(pRExC_state, NEXTOPER(NEXTOPER(lastbr)), ender); } else REGTAIL(pRExC_state, ret, ender); RExC_size++; /* XXX WHY do we need this?!! For large programs it seems to be required but I can't figure out why. -- dmq*/ return ret; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; vFAIL("Unknown switch condition (?(...))"); } case '[': /* (?[ ... ]) */ return handle_regex_sets(pRExC_state, NULL, flagp, depth, oregcomp_parse); case 0: /* A NUL */ RExC_parse--; /* for vFAIL to print correctly */ vFAIL("Sequence (? incomplete"); break; default: /* e.g., (?i) */ RExC_parse = (char *) seqstart + 1; parse_flags: parse_lparen_question_flags(pRExC_state); if (UCHARAT(RExC_parse) != ':') { if (RExC_parse < RExC_end) nextchar(pRExC_state); *flagp = TRYAGAIN; return NULL; } paren = ':'; nextchar(pRExC_state); ret = NULL; goto parse_rest; } /* end switch */ } else if (!(RExC_flags & RXf_PMf_NOCAPTURE)) { /* (...) */ capturing_parens: parno = RExC_npar; RExC_npar++; ret = reganode(pRExC_state, OPEN, parno); if (!SIZE_ONLY ){ if (!RExC_nestroot) RExC_nestroot = parno; if (RExC_open_parens && !RExC_open_parens[parno]) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting open paren #%" IVdf " to %d\n", 22, "| |", (int)(depth * 2 + 1), "", (IV)parno, REG_NODE_NUM(ret))); RExC_open_parens[parno]= ret; } } Set_Node_Length(ret, 1); /* MJD */ Set_Node_Offset(ret, RExC_parse); /* MJD */ is_open = 1; } else { /* with RXf_PMf_NOCAPTURE treat (...) as (?:...) */ paren = ':'; ret = NULL; } } else /* ! paren */ ret = NULL; parse_rest: /* Pick up the branches, linking them together. */ parse_start = RExC_parse; /* MJD */ br = regbranch(pRExC_state, &flags, 1,depth+1); /* branch_len = (paren != 0); */ if (br == NULL) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } if (*RExC_parse == '|') { if (!SIZE_ONLY && RExC_extralen) { reginsert(pRExC_state, BRANCHJ, br, depth+1); } else { /* MJD */ reginsert(pRExC_state, BRANCH, br, depth+1); Set_Node_Length(br, paren != 0); Set_Node_Offset_To_R(br-RExC_emit_start, parse_start-RExC_start); } have_branch = 1; if (SIZE_ONLY) RExC_extralen += 1; /* For BRANCHJ-BRANCH. */ } else if (paren == ':') { *flagp |= flags&SIMPLE; } if (is_open) { /* Starts with OPEN. */ REGTAIL(pRExC_state, ret, br); /* OPEN -> first. */ } else if (paren != '?') /* Not Conditional */ ret = br; *flagp |= flags & (SPSTART | HASWIDTH | POSTPONED); lastbr = br; while (*RExC_parse == '|') { if (!SIZE_ONLY && RExC_extralen) { ender = reganode(pRExC_state, LONGJMP,0); /* Append to the previous. */ REGTAIL(pRExC_state, NEXTOPER(NEXTOPER(lastbr)), ender); } if (SIZE_ONLY) RExC_extralen += 2; /* Account for LONGJMP. */ nextchar(pRExC_state); if (freeze_paren) { if (RExC_npar > after_freeze) after_freeze = RExC_npar; RExC_npar = freeze_paren; } br = regbranch(pRExC_state, &flags, 0, depth+1); if (br == NULL) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } REGTAIL(pRExC_state, lastbr, br); /* BRANCH -> BRANCH. */ lastbr = br; *flagp |= flags & (SPSTART | HASWIDTH | POSTPONED); } if (have_branch || paren != ':') { /* Make a closing node, and hook it on the end. */ switch (paren) { case ':': ender = reg_node(pRExC_state, TAIL); break; case 1: case 2: ender = reganode(pRExC_state, CLOSE, parno); if ( RExC_close_parens ) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting close paren #%" IVdf " to %d\n", 22, "| |", (int)(depth * 2 + 1), "", (IV)parno, REG_NODE_NUM(ender))); RExC_close_parens[parno]= ender; if (RExC_nestroot == parno) RExC_nestroot = 0; } Set_Node_Offset(ender,RExC_parse+1); /* MJD */ Set_Node_Length(ender,1); /* MJD */ break; case '<': case ',': case '=': case '!': *flagp &= ~HASWIDTH; /* FALLTHROUGH */ case '>': ender = reg_node(pRExC_state, SUCCEED); break; case 0: ender = reg_node(pRExC_state, END); if (!SIZE_ONLY) { assert(!RExC_end_op); /* there can only be one! */ RExC_end_op = ender; if (RExC_close_parens) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting close paren #0 (END) to %d\n", 22, "| |", (int)(depth * 2 + 1), "", REG_NODE_NUM(ender))); RExC_close_parens[0]= ender; } } break; } DEBUG_PARSE_r(if (!SIZE_ONLY) { DEBUG_PARSE_MSG("lsbr"); regprop(RExC_rx, RExC_mysv1, lastbr, NULL, pRExC_state); regprop(RExC_rx, RExC_mysv2, ender, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ tying lastbr %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n", SvPV_nolen_const(RExC_mysv1), (IV)REG_NODE_NUM(lastbr), SvPV_nolen_const(RExC_mysv2), (IV)REG_NODE_NUM(ender), (IV)(ender - lastbr) ); }); REGTAIL(pRExC_state, lastbr, ender); if (have_branch && !SIZE_ONLY) { char is_nothing= 1; if (depth==1) RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN; /* Hook the tails of the branches to the closing node. */ for (br = ret; br; br = regnext(br)) { const U8 op = PL_regkind[OP(br)]; if (op == BRANCH) { REGTAIL_STUDY(pRExC_state, NEXTOPER(br), ender); if ( OP(NEXTOPER(br)) != NOTHING || regnext(NEXTOPER(br)) != ender) is_nothing= 0; } else if (op == BRANCHJ) { REGTAIL_STUDY(pRExC_state, NEXTOPER(NEXTOPER(br)), ender); /* for now we always disable this optimisation * / if ( OP(NEXTOPER(NEXTOPER(br))) != NOTHING || regnext(NEXTOPER(NEXTOPER(br))) != ender) */ is_nothing= 0; } } if (is_nothing) { br= PL_regkind[OP(ret)] != BRANCH ? regnext(ret) : ret; DEBUG_PARSE_r(if (!SIZE_ONLY) { DEBUG_PARSE_MSG("NADA"); regprop(RExC_rx, RExC_mysv1, ret, NULL, pRExC_state); regprop(RExC_rx, RExC_mysv2, ender, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ converting ret %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n", SvPV_nolen_const(RExC_mysv1), (IV)REG_NODE_NUM(ret), SvPV_nolen_const(RExC_mysv2), (IV)REG_NODE_NUM(ender), (IV)(ender - ret) ); }); OP(br)= NOTHING; if (OP(ender) == TAIL) { NEXT_OFF(br)= 0; RExC_emit= br + 1; } else { regnode *opt; for ( opt= br + 1; opt < ender ; opt++ ) OP(opt)= OPTIMIZED; NEXT_OFF(br)= ender - br; } } } } { const char *p; static const char parens[] = "=!<,>"; if (paren && (p = strchr(parens, paren))) { U8 node = ((p - parens) % 2) ? UNLESSM : IFMATCH; int flag = (p - parens) > 1; if (paren == '>') node = SUSPEND, flag = 0; reginsert(pRExC_state, node,ret, depth+1); Set_Node_Cur_Length(ret, parse_start); Set_Node_Offset(ret, parse_start + 1); ret->flags = flag; REGTAIL_STUDY(pRExC_state, ret, reg_node(pRExC_state, TAIL)); } } /* Check for proper termination. */ if (paren) { /* restore original flags, but keep (?p) and, if we've changed from /d * rules to /u, keep the /u */ RExC_flags = oregflags | (RExC_flags & RXf_PMf_KEEPCOPY); if (DEPENDS_SEMANTICS && RExC_uni_semantics) { set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); } if (RExC_parse >= RExC_end || UCHARAT(RExC_parse) != ')') { RExC_parse = oregcomp_parse; vFAIL("Unmatched ("); } nextchar(pRExC_state); } else if (!paren && RExC_parse < RExC_end) { if (*RExC_parse == ')') { RExC_parse++; vFAIL("Unmatched )"); } else FAIL("Junk on end of regexp"); /* "Can't happen". */ NOT_REACHED; /* NOTREACHED */ } if (RExC_in_lookbehind) { RExC_in_lookbehind--; } if (after_freeze > RExC_npar) RExC_npar = after_freeze; return(ret); } /* - regbranch - one alternative of an | operator * * Implements the concatenation operator. * * Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be * restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8 */ STATIC regnode * S_regbranch(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, I32 first, U32 depth) { regnode *ret; regnode *chain = NULL; regnode *latest; I32 flags = 0, c = 0; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGBRANCH; DEBUG_PARSE("brnc"); if (first) ret = NULL; else { if (!SIZE_ONLY && RExC_extralen) ret = reganode(pRExC_state, BRANCHJ,0); else { ret = reg_node(pRExC_state, BRANCH); Set_Node_Length(ret, 1); } } if (!first && SIZE_ONLY) RExC_extralen += 1; /* BRANCHJ */ *flagp = WORST; /* Tentatively. */ skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); while (RExC_parse < RExC_end && *RExC_parse != '|' && *RExC_parse != ')') { flags &= ~TRYAGAIN; latest = regpiece(pRExC_state, &flags,depth+1); if (latest == NULL) { if (flags & TRYAGAIN) continue; if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regpiece returned NULL, flags=%#" UVxf, (UV) flags); } else if (ret == NULL) ret = latest; *flagp |= flags&(HASWIDTH|POSTPONED); if (chain == NULL) /* First piece. */ *flagp |= flags&SPSTART; else { /* FIXME adding one for every branch after the first is probably * excessive now we have TRIE support. (hv) */ MARK_NAUGHTY(1); REGTAIL(pRExC_state, chain, latest); } chain = latest; c++; } if (chain == NULL) { /* Loop ran zero times. */ chain = reg_node(pRExC_state, NOTHING); if (ret == NULL) ret = chain; } if (c == 1) { *flagp |= flags&SIMPLE; } return ret; } /* - regpiece - something followed by possible quantifier * + ? {n,m} * * Note that the branching code sequences used for ? and the general cases * of * and + are somewhat optimized: they use the same NOTHING node as * both the endmarker for their branch list and the body of the last branch. * It might seem that this node could be dispensed with entirely, but the * endmarker role is not redundant. * * Returns NULL, setting *flagp to TRYAGAIN if regatom() returns NULL with * TRYAGAIN. * Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be * restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8 */ STATIC regnode * S_regpiece(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth) { regnode *ret; char op; char *next; I32 flags; const char * const origparse = RExC_parse; I32 min; I32 max = REG_INFTY; #ifdef RE_TRACK_PATTERN_OFFSETS char *parse_start; #endif const char *maxpos = NULL; UV uv; /* Save the original in case we change the emitted regop to a FAIL. */ regnode * const orig_emit = RExC_emit; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGPIECE; DEBUG_PARSE("piec"); ret = regatom(pRExC_state, &flags,depth+1); if (ret == NULL) { if (flags & (TRYAGAIN|RESTART_PASS1|NEED_UTF8)) *flagp |= flags & (TRYAGAIN|RESTART_PASS1|NEED_UTF8); else FAIL2("panic: regatom returned NULL, flags=%#" UVxf, (UV) flags); return(NULL); } op = *RExC_parse; if (op == '{' && regcurly(RExC_parse)) { maxpos = NULL; #ifdef RE_TRACK_PATTERN_OFFSETS parse_start = RExC_parse; /* MJD */ #endif next = RExC_parse + 1; while (isDIGIT(*next) || *next == ',') { if (*next == ',') { if (maxpos) break; else maxpos = next; } next++; } if (*next == '}') { /* got one */ const char* endptr; if (!maxpos) maxpos = next; RExC_parse++; if (isDIGIT(*RExC_parse)) { if (!grok_atoUV(RExC_parse, &uv, &endptr)) vFAIL("Invalid quantifier in {,}"); if (uv >= REG_INFTY) vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1); min = (I32)uv; } else { min = 0; } if (*maxpos == ',') maxpos++; else maxpos = RExC_parse; if (isDIGIT(*maxpos)) { if (!grok_atoUV(maxpos, &uv, &endptr)) vFAIL("Invalid quantifier in {,}"); if (uv >= REG_INFTY) vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1); max = (I32)uv; } else { max = REG_INFTY; /* meaning "infinity" */ } RExC_parse = next; nextchar(pRExC_state); if (max < min) { /* If can't match, warn and optimize to fail unconditionally */ reginsert(pRExC_state, OPFAIL, orig_emit, depth+1); if (PASS2) { ckWARNreg(RExC_parse, "Quantifier {n,m} with n > m can't match"); NEXT_OFF(orig_emit)= regarglen[OPFAIL] + NODE_STEP_REGNODE; } return ret; } else if (min == max && *RExC_parse == '?') { if (PASS2) { ckWARN2reg(RExC_parse + 1, "Useless use of greediness modifier '%c'", *RExC_parse); } } do_curly: if ((flags&SIMPLE)) { if (min == 0 && max == REG_INFTY) { reginsert(pRExC_state, STAR, ret, depth+1); MARK_NAUGHTY(4); RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN; goto nest_check; } if (min == 1 && max == REG_INFTY) { reginsert(pRExC_state, PLUS, ret, depth+1); MARK_NAUGHTY(3); RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN; goto nest_check; } MARK_NAUGHTY_EXP(2, 2); reginsert(pRExC_state, CURLY, ret, depth+1); Set_Node_Offset(ret, parse_start+1); /* MJD */ Set_Node_Cur_Length(ret, parse_start); } else { regnode * const w = reg_node(pRExC_state, WHILEM); w->flags = 0; REGTAIL(pRExC_state, ret, w); if (!SIZE_ONLY && RExC_extralen) { reginsert(pRExC_state, LONGJMP,ret, depth+1); reginsert(pRExC_state, NOTHING,ret, depth+1); NEXT_OFF(ret) = 3; /* Go over LONGJMP. */ } reginsert(pRExC_state, CURLYX,ret, depth+1); /* MJD hk */ Set_Node_Offset(ret, parse_start+1); Set_Node_Length(ret, op == '{' ? (RExC_parse - parse_start) : 1); if (!SIZE_ONLY && RExC_extralen) NEXT_OFF(ret) = 3; /* Go over NOTHING to LONGJMP. */ REGTAIL(pRExC_state, ret, reg_node(pRExC_state, NOTHING)); if (SIZE_ONLY) RExC_whilem_seen++, RExC_extralen += 3; MARK_NAUGHTY_EXP(1, 4); /* compound interest */ } ret->flags = 0; if (min > 0) *flagp = WORST; if (max > 0) *flagp |= HASWIDTH; if (!SIZE_ONLY) { ARG1_SET(ret, (U16)min); ARG2_SET(ret, (U16)max); } if (max == REG_INFTY) RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN; goto nest_check; } } if (!ISMULT1(op)) { *flagp = flags; return(ret); } #if 0 /* Now runtime fix should be reliable. */ /* if this is reinstated, don't forget to put this back into perldiag: =item Regexp *+ operand could be empty at {#} in regex m/%s/ (F) The part of the regexp subject to either the * or + quantifier could match an empty string. The {#} shows in the regular expression about where the problem was discovered. */ if (!(flags&HASWIDTH) && op != '?') vFAIL("Regexp *+ operand could be empty"); #endif #ifdef RE_TRACK_PATTERN_OFFSETS parse_start = RExC_parse; #endif nextchar(pRExC_state); *flagp = (op != '+') ? (WORST|SPSTART|HASWIDTH) : (WORST|HASWIDTH); if (op == '*') { min = 0; goto do_curly; } else if (op == '+') { min = 1; goto do_curly; } else if (op == '?') { min = 0; max = 1; goto do_curly; } nest_check: if (!SIZE_ONLY && !(flags&(HASWIDTH|POSTPONED)) && max > REG_INFTY/3) { SAVEFREESV(RExC_rx_sv); /* in case of fatal warnings */ ckWARN2reg(RExC_parse, "%" UTF8f " matches null string many times", UTF8fARG(UTF, (RExC_parse >= origparse ? RExC_parse - origparse : 0), origparse)); (void)ReREFCNT_inc(RExC_rx_sv); } if (*RExC_parse == '?') { nextchar(pRExC_state); reginsert(pRExC_state, MINMOD, ret, depth+1); REGTAIL(pRExC_state, ret, ret + NODE_STEP_REGNODE); } else if (*RExC_parse == '+') { regnode *ender; nextchar(pRExC_state); ender = reg_node(pRExC_state, SUCCEED); REGTAIL(pRExC_state, ret, ender); reginsert(pRExC_state, SUSPEND, ret, depth+1); ender = reg_node(pRExC_state, TAIL); REGTAIL(pRExC_state, ret, ender); } if (ISMULT2(RExC_parse)) { RExC_parse++; vFAIL("Nested quantifiers"); } return(ret); } STATIC bool S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state, regnode ** node_p, UV * code_point_p, int * cp_count, I32 * flagp, const bool strict, const U32 depth ) { /* This routine teases apart the various meanings of \N and returns * accordingly. The input parameters constrain which meaning(s) is/are valid * in the current context. * * Exactly one of <node_p> and <code_point_p> must be non-NULL. * * If <code_point_p> is not NULL, the context is expecting the result to be a * single code point. If this \N instance turns out to a single code point, * the function returns TRUE and sets *code_point_p to that code point. * * If <node_p> is not NULL, the context is expecting the result to be one of * the things representable by a regnode. If this \N instance turns out to be * one such, the function generates the regnode, returns TRUE and sets *node_p * to point to that regnode. * * If this instance of \N isn't legal in any context, this function will * generate a fatal error and not return. * * On input, RExC_parse should point to the first char following the \N at the * time of the call. On successful return, RExC_parse will have been updated * to point to just after the sequence identified by this routine. Also * *flagp has been updated as needed. * * When there is some problem with the current context and this \N instance, * the function returns FALSE, without advancing RExC_parse, nor setting * *node_p, nor *code_point_p, nor *flagp. * * If <cp_count> is not NULL, the caller wants to know the length (in code * points) that this \N sequence matches. This is set even if the function * returns FALSE, as detailed below. * * There are 5 possibilities here, as detailed in the next 5 paragraphs. * * Probably the most common case is for the \N to specify a single code point. * *cp_count will be set to 1, and *code_point_p will be set to that code * point. * * Another possibility is for the input to be an empty \N{}, which for * backwards compatibility we accept. *cp_count will be set to 0. *node_p * will be set to a generated NOTHING node. * * Still another possibility is for the \N to mean [^\n]. *cp_count will be * set to 0. *node_p will be set to a generated REG_ANY node. * * The fourth possibility is that \N resolves to a sequence of more than one * code points. *cp_count will be set to the number of code points in the * sequence. *node_p * will be set to a generated node returned by this * function calling S_reg(). * * The final possibility is that it is premature to be calling this function; * that pass1 needs to be restarted. This can happen when this changes from * /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The * latter occurs only when the fourth possibility would otherwise be in * effect, and is because one of those code points requires the pattern to be * recompiled as UTF-8. The function returns FALSE, and sets the * RESTART_PASS1 and NEED_UTF8 flags in *flagp, as appropriate. When this * happens, the caller needs to desist from continuing parsing, and return * this information to its caller. This is not set for when there is only one * code point, as this can be called as part of an ANYOF node, and they can * store above-Latin1 code points without the pattern having to be in UTF-8. * * For non-single-quoted regexes, the tokenizer has resolved character and * sequence names inside \N{...} into their Unicode values, normalizing the * result into what we should see here: '\N{U+c1.c2...}', where c1... are the * hex-represented code points in the sequence. This is done there because * the names can vary based on what charnames pragma is in scope at the time, * so we need a way to take a snapshot of what they resolve to at the time of * the original parse. [perl #56444]. * * That parsing is skipped for single-quoted regexes, so we may here get * '\N{NAME}'. This is a fatal error. These names have to be resolved by the * parser. But if the single-quoted regex is something like '\N{U+41}', that * is legal and handled here. The code point is Unicode, and has to be * translated into the native character set for non-ASCII platforms. */ char * endbrace; /* points to '}' following the name */ char *endchar; /* Points to '.' or '}' ending cur char in the input stream */ char* p = RExC_parse; /* Temporary */ GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_GROK_BSLASH_N; GET_RE_DEBUG_FLAGS; assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */ assert(! (node_p && cp_count)); /* At most 1 should be set */ if (cp_count) { /* Initialize return for the most common case */ *cp_count = 1; } /* The [^\n] meaning of \N ignores spaces and comments under the /x * modifier. The other meanings do not, so use a temporary until we find * out which we are being called with */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* Disambiguate between \N meaning a named character versus \N meaning * [^\n]. The latter is assumed when the {...} following the \N is a legal * quantifier, or there is no '{' at all */ if (*p != '{' || regcurly(p)) { RExC_parse = p; if (cp_count) { *cp_count = -1; } if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(*node_p, 1); /* MJD */ return TRUE; } /* Here, we have decided it should be a named character or sequence */ /* The test above made sure that the next real character is a '{', but * under the /x modifier, it could be separated by space (or a comment and * \n) and this is not allowed (for consistency with \x{...} and the * tokenizer handling of \N{NAME}). */ if (*RExC_parse != '{') { vFAIL("Missing braces on \\N{}"); } RExC_parse++; /* Skip past the '{' */ endbrace = strchr(RExC_parse, '}'); if (! endbrace) { /* no trailing brace */ vFAIL2("Missing right brace on \\%c{}", 'N'); } else if (!( endbrace == RExC_parse /* nothing between the {} */ || memBEGINs(RExC_parse, /* U+ (bad hex is checked below for a better error msg) */ (STRLEN) (RExC_end - RExC_parse), "U+"))) { RExC_parse = endbrace; /* position msg's '<--HERE' */ vFAIL("\\N{NAME} must be resolved by the lexer"); } REQUIRE_UNI_RULES(flagp, FALSE); /* Unicode named chars imply Unicode semantics */ if (endbrace == RExC_parse) { /* empty: \N{} */ if (strict) { RExC_parse++; /* Position after the "}" */ vFAIL("Zero length \\N{}"); } if (cp_count) { *cp_count = 0; } nextchar(pRExC_state); if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state,NOTHING); return TRUE; } RExC_parse += 2; /* Skip past the 'U+' */ /* Because toke.c has generated a special construct for us guaranteed not * to have NULs, we can use a str function */ endchar = RExC_parse + strcspn(RExC_parse, ".}"); /* Code points are separated by dots. If none, there is only one code * point, and is terminated by the brace */ if (endchar >= endbrace) { STRLEN length_of_hex; I32 grok_hex_flags; /* Here, exactly one code point. If that isn't what is wanted, fail */ if (! code_point_p) { RExC_parse = p; return FALSE; } /* Convert code point from hex */ length_of_hex = (STRLEN)(endchar - RExC_parse); grok_hex_flags = PERL_SCAN_ALLOW_UNDERSCORES | PERL_SCAN_DISALLOW_PREFIX /* No errors in the first pass (See [perl * #122671].) We let the code below find the * errors when there are multiple chars. */ | ((SIZE_ONLY) ? PERL_SCAN_SILENT_ILLDIGIT : 0); /* This routine is the one place where both single- and double-quotish * \N{U+xxxx} are evaluated. The value is a Unicode code point which * must be converted to native. */ *code_point_p = UNI_TO_NATIVE(grok_hex(RExC_parse, &length_of_hex, &grok_hex_flags, NULL)); /* The tokenizer should have guaranteed validity, but it's possible to * bypass it by using single quoting, so check. Don't do the check * here when there are multiple chars; we do it below anyway. */ if (length_of_hex == 0 || length_of_hex != (STRLEN)(endchar - RExC_parse) ) { RExC_parse += length_of_hex; /* Includes all the valid */ RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */ ? UTF8SKIP(RExC_parse) : 1; /* Guard against malformed utf8 */ if (RExC_parse >= endchar) { RExC_parse = endchar; } vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = endbrace + 1; return TRUE; } else { /* Is a multiple character sequence */ SV * substitute_parse; STRLEN len; char *orig_end = RExC_end; char *save_start = RExC_start; I32 flags; /* Count the code points, if desired, in the sequence */ if (cp_count) { *cp_count = 0; while (RExC_parse < endbrace) { /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); (*cp_count)++; } } /* Fail if caller doesn't want to handle a multi-code-point sequence. * But don't backup up the pointer if the caller wants to know how many * code points there are (they can then handle things) */ if (! node_p) { if (! cp_count) { RExC_parse = p; } return FALSE; } /* What is done here is to convert this to a sub-pattern of the form * \x{char1}\x{char2}... and then call reg recursively to parse it * (enclosing in "(?: ... )" ). That way, it retains its atomicness, * while not having to worry about special handling that some code * points may have. */ substitute_parse = newSVpvs("?:"); while (RExC_parse < endbrace) { /* Convert to notation the rest of the code understands */ sv_catpv(substitute_parse, "\\x{"); sv_catpvn(substitute_parse, RExC_parse, endchar - RExC_parse); sv_catpv(substitute_parse, "}"); /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); } sv_catpv(substitute_parse, ")"); len = SvCUR(substitute_parse); /* Don't allow empty number */ if (len < (STRLEN) 8) { RExC_parse = endbrace; vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = RExC_start = RExC_adjusted_start = SvPV_nolen(substitute_parse); RExC_end = RExC_parse + len; /* The values are Unicode, and therefore not subject to recoding, but * have to be converted to native on a non-Unicode (meaning non-ASCII) * platform. */ #ifdef EBCDIC RExC_recode_x_to_native = 1; #endif *node_p = reg(pRExC_state, 1, &flags, depth+1); /* Restore the saved values */ RExC_start = RExC_adjusted_start = save_start; RExC_parse = endbrace; RExC_end = orig_end; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif SvREFCNT_dec_NN(substitute_parse); if (! *node_p) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return FALSE; } FAIL2("panic: reg returned NULL to grok_bslash_N, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); nextchar(pRExC_state); return TRUE; } } PERL_STATIC_INLINE U8 S_compute_EXACTish(RExC_state_t *pRExC_state) { U8 op; PERL_ARGS_ASSERT_COMPUTE_EXACTISH; if (! FOLD) { return (LOC) ? EXACTL : EXACT; } op = get_regex_charset(RExC_flags); if (op >= REGEX_ASCII_RESTRICTED_CHARSET) { op--; /* /a is same as /u, and map /aa's offset to what /a's would have been, so there is no hole */ } return op + EXACTF; } PERL_STATIC_INLINE void S_alloc_maybe_populate_EXACT(pTHX_ RExC_state_t *pRExC_state, regnode *node, I32* flagp, STRLEN len, UV code_point, bool downgradable) { /* This knows the details about sizing an EXACTish node, setting flags for * it (by setting <*flagp>, and potentially populating it with a single * character. * * If <len> (the length in bytes) is non-zero, this function assumes that * the node has already been populated, and just does the sizing. In this * case <code_point> should be the final code point that has already been * placed into the node. This value will be ignored except that under some * circumstances <*flagp> is set based on it. * * If <len> is zero, the function assumes that the node is to contain only * the single character given by <code_point> and calculates what <len> * should be. In pass 1, it sizes the node appropriately. In pass 2, it * additionally will populate the node's STRING with <code_point> or its * fold if folding. * * In both cases <*flagp> is appropriately set * * It knows that under FOLD, the Latin Sharp S and UTF characters above * 255, must be folded (the former only when the rules indicate it can * match 'ss') * * When it does the populating, it looks at the flag 'downgradable'. If * true with a node that folds, it checks if the single code point * participates in a fold, and if not downgrades the node to an EXACT. * This helps the optimizer */ bool len_passed_in = cBOOL(len != 0); U8 character[UTF8_MAXBYTES_CASE+1]; PERL_ARGS_ASSERT_ALLOC_MAYBE_POPULATE_EXACT; /* Don't bother to check for downgrading in PASS1, as it doesn't make any * sizing difference, and is extra work that is thrown away */ if (downgradable && ! PASS2) { downgradable = FALSE; } if (! len_passed_in) { if (UTF) { if (UVCHR_IS_INVARIANT(code_point)) { if (LOC || ! FOLD) { /* /l defers folding until runtime */ *character = (U8) code_point; } else { /* Here is /i and not /l. (toFOLD() is defined on just ASCII, which isn't the same thing as INVARIANT on EBCDIC, but it works there, as the extra invariants fold to themselves) */ *character = toFOLD((U8) code_point); /* We can downgrade to an EXACT node if this character * isn't a folding one. Note that this assumes that * nothing above Latin1 folds to some other invariant than * one of these alphabetics; otherwise we would also have * to check: * && (! HAS_NONLATIN1_FOLD_CLOSURE(code_point) * || ASCII_FOLD_RESTRICTED)) */ if (downgradable && PL_fold[code_point] == code_point) { OP(node) = EXACT; } } len = 1; } else if (FOLD && (! LOC || ! is_PROBLEMATIC_LOCALE_FOLD_cp(code_point))) { /* Folding, and ok to do so now */ UV folded = _to_uni_fold_flags( code_point, character, &len, FOLD_FLAGS_FULL | ((ASCII_FOLD_RESTRICTED) ? FOLD_FLAGS_NOMIX_ASCII : 0)); if (downgradable && folded == code_point /* This quickly rules out many cases, avoiding the _invlist_contains_cp() overhead for those. */ && ! _invlist_contains_cp(PL_utf8_foldable, code_point)) { OP(node) = (LOC) ? EXACTL : EXACT; } } else if (code_point <= MAX_UTF8_TWO_BYTE) { /* Not folding this cp, and can output it directly */ *character = UTF8_TWO_BYTE_HI(code_point); *(character + 1) = UTF8_TWO_BYTE_LO(code_point); len = 2; } else { uvchr_to_utf8( character, code_point); len = UTF8SKIP(character); } } /* Else pattern isn't UTF8. */ else if (! FOLD) { *character = (U8) code_point; len = 1; } /* Else is folded non-UTF8 */ #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) else if (LIKELY(code_point != LATIN_SMALL_LETTER_SHARP_S)) { #else else if (1) { #endif /* We don't fold any non-UTF8 except possibly the Sharp s (see * comments at join_exact()); */ *character = (U8) code_point; len = 1; /* Can turn into an EXACT node if we know the fold at compile time, * and it folds to itself and doesn't particpate in other folds */ if (downgradable && ! LOC && PL_fold_latin1[code_point] == code_point && (! HAS_NONLATIN1_FOLD_CLOSURE(code_point) || (isASCII(code_point) && ASCII_FOLD_RESTRICTED))) { OP(node) = EXACT; } } /* else is Sharp s. May need to fold it */ else if (AT_LEAST_UNI_SEMANTICS && ! ASCII_FOLD_RESTRICTED) { *character = 's'; *(character + 1) = 's'; len = 2; } else { *character = LATIN_SMALL_LETTER_SHARP_S; len = 1; } } if (SIZE_ONLY) { RExC_size += STR_SZ(len); } else { RExC_emit += STR_SZ(len); STR_LEN(node) = len; if (! len_passed_in) { Copy((char *) character, STRING(node), len, char); } } *flagp |= HASWIDTH; /* A single character node is SIMPLE, except for the special-cased SHARP S * under /di. */ if ((len == 1 || (UTF && len == UVCHR_SKIP(code_point))) #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) && ( code_point != LATIN_SMALL_LETTER_SHARP_S || ! FOLD || ! DEPENDS_SEMANTICS) #endif ) { *flagp |= SIMPLE; } /* The OP may not be well defined in PASS1 */ if (PASS2 && OP(node) == EXACTFL) { RExC_contains_locale = 1; } } STATIC bool S_new_regcurly(const char *s, const char *e) { /* This is a temporary function designed to match the most lenient form of * a {m,n} quantifier we ever envision, with either number omitted, and * spaces anywhere between/before/after them. * * If this function fails, then the string it matches is very unlikely to * ever be considered a valid quantifier, so we can allow the '{' that * begins it to be considered as a literal */ bool has_min = FALSE; bool has_max = FALSE; PERL_ARGS_ASSERT_NEW_REGCURLY; if (s >= e || *s++ != '{') return FALSE; while (s < e && isSPACE(*s)) { s++; } while (s < e && isDIGIT(*s)) { has_min = TRUE; s++; } while (s < e && isSPACE(*s)) { s++; } if (*s == ',') { s++; while (s < e && isSPACE(*s)) { s++; } while (s < e && isDIGIT(*s)) { has_max = TRUE; s++; } while (s < e && isSPACE(*s)) { s++; } } return s < e && *s == '}' && (has_min || has_max); } /* Parse backref decimal value, unless it's too big to sensibly be a backref, * in which case return I32_MAX (rather than possibly 32-bit wrapping) */ static I32 S_backref_value(char *p) { const char* endptr; UV val; if (grok_atoUV(p, &val, &endptr) && val <= I32_MAX) return (I32)val; return I32_MAX; } /* - regatom - the lowest level Try to identify anything special at the start of the current parse position. If there is, then handle it as required. This may involve generating a single regop, such as for an assertion; or it may involve recursing, such as to handle a () structure. If the string doesn't start with something special then we gobble up as much literal text as we can. If we encounter a quantifier, we have to back off the final literal character, as that quantifier applies to just it and not to the whole string of literals. Once we have been able to handle whatever type of thing started the sequence, we return. Note: we have to be careful with escapes, as they can be both literal and special, and in the case of \10 and friends, context determines which. A summary of the code structure is: switch (first_byte) { cases for each special: handle this special; break; case '\\': switch (2nd byte) { cases for each unambiguous special: handle this special; break; cases for each ambigous special/literal: disambiguate; if (special) handle here else goto defchar; default: // unambiguously literal: goto defchar; } default: // is a literal char // FALL THROUGH defchar: create EXACTish node for literal; while (more input and node isn't full) { switch (input_byte) { cases for each special; make sure parse pointer is set so that the next call to regatom will see this special first goto loopdone; // EXACTish node terminated by prev. char default: append char to EXACTISH node; } get next input byte; } loopdone: } return the generated node; Specifically there are two separate switches for handling escape sequences, with the one for handling literal escapes requiring a dummy entry for all of the special escapes that are actually handled by the other. Returns NULL, setting *flagp to TRYAGAIN if reg() returns NULL with TRYAGAIN. Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8 Otherwise does not return NULL. */ STATIC regnode * S_regatom(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth) { regnode *ret = NULL; I32 flags = 0; char *parse_start; U8 op; int invert = 0; U8 arg; GET_RE_DEBUG_FLAGS_DECL; *flagp = WORST; /* Tentatively. */ DEBUG_PARSE("atom"); PERL_ARGS_ASSERT_REGATOM; tryagain: parse_start = RExC_parse; assert(RExC_parse < RExC_end); switch ((U8)*RExC_parse) { case '^': RExC_seen_zerolen++; nextchar(pRExC_state); if (RExC_flags & RXf_PMf_MULTILINE) ret = reg_node(pRExC_state, MBOL); else ret = reg_node(pRExC_state, SBOL); Set_Node_Length(ret, 1); /* MJD */ break; case '$': nextchar(pRExC_state); if (*RExC_parse) RExC_seen_zerolen++; if (RExC_flags & RXf_PMf_MULTILINE) ret = reg_node(pRExC_state, MEOL); else ret = reg_node(pRExC_state, SEOL); Set_Node_Length(ret, 1); /* MJD */ break; case '.': nextchar(pRExC_state); if (RExC_flags & RXf_PMf_SINGLELINE) ret = reg_node(pRExC_state, SANY); else ret = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(ret, 1); /* MJD */ break; case '[': { char * const oregcomp_parse = ++RExC_parse; ret = regclass(pRExC_state, flagp,depth+1, FALSE, /* means parse the whole char class */ TRUE, /* allow multi-char folds */ FALSE, /* don't silence non-portable warnings. */ (bool) RExC_strict, TRUE, /* Allow an optimized regnode result */ NULL, NULL); if (ret == NULL) { if (*flagp & (RESTART_PASS1|NEED_UTF8)) return NULL; FAIL2("panic: regclass returned NULL to regatom, flags=%#" UVxf, (UV) *flagp); } if (*RExC_parse != ']') { RExC_parse = oregcomp_parse; vFAIL("Unmatched ["); } nextchar(pRExC_state); Set_Node_Length(ret, RExC_parse - oregcomp_parse + 1); /* MJD */ break; } case '(': nextchar(pRExC_state); ret = reg(pRExC_state, 2, &flags,depth+1); if (ret == NULL) { if (flags & TRYAGAIN) { if (RExC_parse >= RExC_end) { /* Make parent create an empty node if needed. */ *flagp |= TRYAGAIN; return(NULL); } goto tryagain; } if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: reg returned NULL to regatom, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); break; case '|': case ')': if (flags & TRYAGAIN) { *flagp |= TRYAGAIN; return NULL; } vFAIL("Internal urp"); /* Supposed to be caught earlier. */ break; case '?': case '+': case '*': RExC_parse++; vFAIL("Quantifier follows nothing"); break; case '\\': /* Special Escapes This switch handles escape sequences that resolve to some kind of special regop and not to literal text. Escape sequnces that resolve to literal text are handled below in the switch marked "Literal Escapes". Every entry in this switch *must* have a corresponding entry in the literal escape switch. However, the opposite is not required, as the default for this switch is to jump to the literal text handling code. */ RExC_parse++; switch ((U8)*RExC_parse) { /* Special Escapes */ case 'A': RExC_seen_zerolen++; ret = reg_node(pRExC_state, SBOL); /* SBOL is shared with /^/ so we set the flags so we can tell * /\A/ from /^/ in split. We check ret because first pass we * have no regop struct to set the flags on. */ if (PASS2) ret->flags = 1; *flagp |= SIMPLE; goto finish_meta_pat; case 'G': ret = reg_node(pRExC_state, GPOS); RExC_seen |= REG_GPOS_SEEN; *flagp |= SIMPLE; goto finish_meta_pat; case 'K': RExC_seen_zerolen++; ret = reg_node(pRExC_state, KEEPS); *flagp |= SIMPLE; /* XXX:dmq : disabling in-place substitution seems to * be necessary here to avoid cases of memory corruption, as * with: C<$_="x" x 80; s/x\K/y/> -- rgs */ RExC_seen |= REG_LOOKBEHIND_SEEN; goto finish_meta_pat; case 'Z': ret = reg_node(pRExC_state, SEOL); *flagp |= SIMPLE; RExC_seen_zerolen++; /* Do not optimize RE away */ goto finish_meta_pat; case 'z': ret = reg_node(pRExC_state, EOS); *flagp |= SIMPLE; RExC_seen_zerolen++; /* Do not optimize RE away */ goto finish_meta_pat; case 'C': vFAIL("\\C no longer supported"); case 'X': ret = reg_node(pRExC_state, CLUMP); *flagp |= HASWIDTH; goto finish_meta_pat; case 'W': invert = 1; /* FALLTHROUGH */ case 'w': arg = ANYOF_WORDCHAR; goto join_posix; case 'B': invert = 1; /* FALLTHROUGH */ case 'b': { regex_charset charset = get_regex_charset(RExC_flags); RExC_seen_zerolen++; RExC_seen |= REG_LOOKBEHIND_SEEN; op = BOUND + charset; if (op == BOUNDL) { RExC_contains_locale = 1; } ret = reg_node(pRExC_state, op); *flagp |= SIMPLE; if (RExC_parse >= RExC_end || *(RExC_parse + 1) != '{') { FLAGS(ret) = TRADITIONAL_BOUND; if (PASS2 && op > BOUNDA) { /* /aa is same as /a */ OP(ret) = BOUNDA; } } else { STRLEN length; char name = *RExC_parse; char * endbrace; RExC_parse += 2; endbrace = strchr(RExC_parse, '}'); if (! endbrace) { vFAIL2("Missing right brace on \\%c{}", name); } /* XXX Need to decide whether to take spaces or not. Should be * consistent with \p{}, but that currently is SPACE, which * means vertical too, which seems wrong * while (isBLANK(*RExC_parse)) { RExC_parse++; }*/ if (endbrace == RExC_parse) { RExC_parse++; /* After the '}' */ vFAIL2("Empty \\%c{}", name); } length = endbrace - RExC_parse; /*while (isBLANK(*(RExC_parse + length - 1))) { length--; }*/ switch (*RExC_parse) { case 'g': if ( length != 1 && (memNEs(RExC_parse + 1, length - 1, "cb"))) { goto bad_bound_type; } FLAGS(ret) = GCB_BOUND; break; case 'l': if (length != 2 || *(RExC_parse + 1) != 'b') { goto bad_bound_type; } FLAGS(ret) = LB_BOUND; break; case 's': if (length != 2 || *(RExC_parse + 1) != 'b') { goto bad_bound_type; } FLAGS(ret) = SB_BOUND; break; case 'w': if (length != 2 || *(RExC_parse + 1) != 'b') { goto bad_bound_type; } FLAGS(ret) = WB_BOUND; break; default: bad_bound_type: RExC_parse = endbrace; vFAIL2utf8f( "'%" UTF8f "' is an unknown bound type", UTF8fARG(UTF, length, endbrace - length)); NOT_REACHED; /*NOTREACHED*/ } RExC_parse = endbrace; REQUIRE_UNI_RULES(flagp, NULL); if (PASS2 && op >= BOUNDA) { /* /aa is same as /a */ OP(ret) = BOUNDU; length += 4; /* Don't have to worry about UTF-8, in this message because * to get here the contents of the \b must be ASCII */ ckWARN4reg(RExC_parse + 1, /* Include the '}' in msg */ "Using /u for '%.*s' instead of /%s", (unsigned) length, endbrace - length + 1, (charset == REGEX_ASCII_RESTRICTED_CHARSET) ? ASCII_RESTRICT_PAT_MODS : ASCII_MORE_RESTRICT_PAT_MODS); } } if (PASS2 && invert) { OP(ret) += NBOUND - BOUND; } goto finish_meta_pat; } case 'D': invert = 1; /* FALLTHROUGH */ case 'd': arg = ANYOF_DIGIT; if (! DEPENDS_SEMANTICS) { goto join_posix; } /* \d doesn't have any matches in the upper Latin1 range, hence /d * is equivalent to /u. Changing to /u saves some branches at * runtime */ op = POSIXU; goto join_posix_op_known; case 'R': ret = reg_node(pRExC_state, LNBREAK); *flagp |= HASWIDTH|SIMPLE; goto finish_meta_pat; case 'H': invert = 1; /* FALLTHROUGH */ case 'h': arg = ANYOF_BLANK; op = POSIXU; goto join_posix_op_known; case 'V': invert = 1; /* FALLTHROUGH */ case 'v': arg = ANYOF_VERTWS; op = POSIXU; goto join_posix_op_known; case 'S': invert = 1; /* FALLTHROUGH */ case 's': arg = ANYOF_SPACE; join_posix: op = POSIXD + get_regex_charset(RExC_flags); if (op > POSIXA) { /* /aa is same as /a */ op = POSIXA; } else if (op == POSIXL) { RExC_contains_locale = 1; } join_posix_op_known: if (invert) { op += NPOSIXD - POSIXD; } ret = reg_node(pRExC_state, op); if (! SIZE_ONLY) { FLAGS(ret) = namedclass_to_classnum(arg); } *flagp |= HASWIDTH|SIMPLE; /* FALLTHROUGH */ finish_meta_pat: if ( UCHARAT(RExC_parse + 1) == '{' && UNLIKELY(! new_regcurly(RExC_parse + 1, RExC_end))) { RExC_parse += 2; vFAIL("Unescaped left brace in regex is illegal here"); } nextchar(pRExC_state); Set_Node_Length(ret, 2); /* MJD */ break; case 'p': case 'P': RExC_parse--; ret = regclass(pRExC_state, flagp,depth+1, TRUE, /* means just parse this element */ FALSE, /* don't allow multi-char folds */ FALSE, /* don't silence non-portable warnings. It would be a bug if these returned non-portables */ (bool) RExC_strict, TRUE, /* Allow an optimized regnode result */ NULL, NULL); if (*flagp & RESTART_PASS1) return NULL; /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if * multi-char folds are allowed. */ if (!ret) FAIL2("panic: regclass returned NULL to regatom, flags=%#" UVxf, (UV) *flagp); RExC_parse--; Set_Node_Offset(ret, parse_start); Set_Node_Cur_Length(ret, parse_start - 2); nextchar(pRExC_state); break; case 'N': /* Handle \N, \N{} and \N{NAMED SEQUENCE} (the latter meaning the * \N{...} evaluates to a sequence of more than one code points). * The function call below returns a regnode, which is our result. * The parameters cause it to fail if the \N{} evaluates to a * single code point; we handle those like any other literal. The * reason that the multicharacter case is handled here and not as * part of the EXACtish code is because of quantifiers. In * /\N{BLAH}+/, the '+' applies to the whole thing, and doing it * this way makes that Just Happen. dmq. * join_exact() will join this up with adjacent EXACTish nodes * later on, if appropriate. */ ++RExC_parse; if (grok_bslash_N(pRExC_state, &ret, /* Want a regnode returned */ NULL, /* Fail if evaluates to a single code point */ NULL, /* Don't need a count of how many code points */ flagp, RExC_strict, depth) ) { break; } if (*flagp & RESTART_PASS1) return NULL; /* Here, evaluates to a single code point. Go get that */ RExC_parse = parse_start; goto defchar; case 'k': /* Handle \k<NAME> and \k'NAME' */ parse_named_seq: { char ch; if ( RExC_parse >= RExC_end - 1 || (( ch = RExC_parse[1]) != '<' && ch != '\'' && ch != '{')) { RExC_parse++; /* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */ vFAIL2("Sequence %.2s... not terminated",parse_start); } else { RExC_parse += 2; ret = handle_named_backref(pRExC_state, flagp, parse_start, (ch == '<') ? '>' : (ch == '{') ? '}' : '\''); } break; } case 'g': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { I32 num; bool hasbrace = 0; if (*RExC_parse == 'g') { bool isrel = 0; RExC_parse++; if (*RExC_parse == '{') { RExC_parse++; hasbrace = 1; } if (*RExC_parse == '-') { RExC_parse++; isrel = 1; } if (hasbrace && !isDIGIT(*RExC_parse)) { if (isrel) RExC_parse--; RExC_parse -= 2; goto parse_named_seq; } if (RExC_parse >= RExC_end) { goto unterminated_g; } num = S_backref_value(RExC_parse); if (num == 0) vFAIL("Reference to invalid group 0"); else if (num == I32_MAX) { if (isDIGIT(*RExC_parse)) vFAIL("Reference to nonexistent group"); else unterminated_g: vFAIL("Unterminated \\g... pattern"); } if (isrel) { num = RExC_npar - num; if (num < 1) vFAIL("Reference to nonexistent or unclosed group"); } } else { num = S_backref_value(RExC_parse); /* bare \NNN might be backref or octal - if it is larger * than or equal RExC_npar then it is assumed to be an * octal escape. Note RExC_npar is +1 from the actual * number of parens. */ /* Note we do NOT check if num == I32_MAX here, as that is * handled by the RExC_npar check */ if ( /* any numeric escape < 10 is always a backref */ num > 9 /* any numeric escape < RExC_npar is a backref */ && num >= RExC_npar /* cannot be an octal escape if it starts with 8 */ && *RExC_parse != '8' /* cannot be an octal escape it it starts with 9 */ && *RExC_parse != '9' ) { /* Probably not a backref, instead likely to be an * octal character escape, e.g. \35 or \777. * The above logic should make it obvious why using * octal escapes in patterns is problematic. - Yves */ RExC_parse = parse_start; goto defchar; } } /* At this point RExC_parse points at a numeric escape like * \12 or \88 or something similar, which we should NOT treat * as an octal escape. It may or may not be a valid backref * escape. For instance \88888888 is unlikely to be a valid * backref. */ while (isDIGIT(*RExC_parse)) RExC_parse++; if (hasbrace) { if (*RExC_parse != '}') vFAIL("Unterminated \\g{...} pattern"); RExC_parse++; } if (!SIZE_ONLY) { if (num > (I32)RExC_rx->nparens) vFAIL("Reference to nonexistent group"); } RExC_sawback = 1; ret = reganode(pRExC_state, ((! FOLD) ? REF : (ASCII_FOLD_RESTRICTED) ? REFFA : (AT_LEAST_UNI_SEMANTICS) ? REFFU : (LOC) ? REFFL : REFF), num); *flagp |= HASWIDTH; /* override incorrect value set in reganode MJD */ Set_Node_Offset(ret, parse_start); Set_Node_Cur_Length(ret, parse_start-1); skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); } break; case '\0': if (RExC_parse >= RExC_end) FAIL("Trailing \\"); /* FALLTHROUGH */ default: /* Do not generate "unrecognized" warnings here, we fall back into the quick-grab loop below */ RExC_parse = parse_start; goto defchar; } /* end of switch on a \foo sequence */ break; case '#': /* '#' comments should have been spaced over before this function was * called */ assert((RExC_flags & RXf_PMf_EXTENDED) == 0); /* if (RExC_flags & RXf_PMf_EXTENDED) { RExC_parse = reg_skipcomment( pRExC_state, RExC_parse ); if (RExC_parse < RExC_end) goto tryagain; } */ /* FALLTHROUGH */ default: defchar: { /* Here, we have determined that the next thing is probably a * literal character. RExC_parse points to the first byte of its * definition. (It still may be an escape sequence that evaluates * to a single character) */ STRLEN len = 0; UV ender = 0; char *p; char *s; #define MAX_NODE_STRING_SIZE 127 char foldbuf[MAX_NODE_STRING_SIZE+UTF8_MAXBYTES_CASE]; char *s0; U8 upper_parse = MAX_NODE_STRING_SIZE; U8 node_type = compute_EXACTish(pRExC_state); bool next_is_quantifier; char * oldp = NULL; /* We can convert EXACTF nodes to EXACTFU if they contain only * characters that match identically regardless of the target * string's UTF8ness. The reason to do this is that EXACTF is not * trie-able, EXACTFU is. * * Similarly, we can convert EXACTFL nodes to EXACTFLU8 if they * contain only above-Latin1 characters (hence must be in UTF8), * which don't participate in folds with Latin1-range characters, * as the latter's folds aren't known until runtime. (We don't * need to figure this out until pass 2) */ bool maybe_exactfu = PASS2 && (node_type == EXACTF || node_type == EXACTFL); /* If a folding node contains only code points that don't * participate in folds, it can be changed into an EXACT node, * which allows the optimizer more things to look for */ bool maybe_exact; ret = reg_node(pRExC_state, node_type); /* In pass1, folded, we use a temporary buffer instead of the * actual node, as the node doesn't exist yet */ s = (SIZE_ONLY && FOLD) ? foldbuf : STRING(ret); s0 = s; reparse: /* We look for the EXACTFish to EXACT node optimizaton only if * folding. (And we don't need to figure this out until pass 2). * XXX It might actually make sense to split the node into portions * that are exact and ones that aren't, so that we could later use * the exact ones to find the longest fixed and floating strings. * One would want to join them back into a larger node. One could * use a pseudo regnode like 'EXACT_ORIG_FOLD' */ maybe_exact = FOLD && PASS2; /* XXX The node can hold up to 255 bytes, yet this only goes to * 127. I (khw) do not know why. Keeping it somewhat less than * 255 allows us to not have to worry about overflow due to * converting to utf8 and fold expansion, but that value is * 255-UTF8_MAXBYTES_CASE. join_exact() may join adjacent nodes * split up by this limit into a single one using the real max of * 255. Even at 127, this breaks under rare circumstances. If * folding, we do not want to split a node at a character that is a * non-final in a multi-char fold, as an input string could just * happen to want to match across the node boundary. The join * would solve that problem if the join actually happens. But a * series of more than two nodes in a row each of 127 would cause * the first join to succeed to get to 254, but then there wouldn't * be room for the next one, which could at be one of those split * multi-char folds. I don't know of any fool-proof solution. One * could back off to end with only a code point that isn't such a * non-final, but it is possible for there not to be any in the * entire node. */ assert( ! UTF /* Is at the beginning of a character */ || UTF8_IS_INVARIANT(UCHARAT(RExC_parse)) || UTF8_IS_START(UCHARAT(RExC_parse))); /* Here, we have a literal character. Find the maximal string of * them in the input that we can fit into a single EXACTish node. * We quit at the first non-literal or when the node gets full */ for (p = RExC_parse; len < upper_parse && p < RExC_end; len++) { oldp = p; /* White space has already been ignored */ assert( (RExC_flags & RXf_PMf_EXTENDED) == 0 || ! is_PATWS_safe((p), RExC_end, UTF)); switch ((U8)*p) { case '^': case '$': case '.': case '[': case '(': case ')': case '|': goto loopdone; case '\\': /* Literal Escapes Switch This switch is meant to handle escape sequences that resolve to a literal character. Every escape sequence that represents something else, like an assertion or a char class, is handled in the switch marked 'Special Escapes' above in this routine, but also has an entry here as anything that isn't explicitly mentioned here will be treated as an unescaped equivalent literal. */ switch ((U8)*++p) { /* These are all the special escapes. */ case 'A': /* Start assertion */ case 'b': case 'B': /* Word-boundary assertion*/ case 'C': /* Single char !DANGEROUS! */ case 'd': case 'D': /* digit class */ case 'g': case 'G': /* generic-backref, pos assertion */ case 'h': case 'H': /* HORIZWS */ case 'k': case 'K': /* named backref, keep marker */ case 'p': case 'P': /* Unicode property */ case 'R': /* LNBREAK */ case 's': case 'S': /* space class */ case 'v': case 'V': /* VERTWS */ case 'w': case 'W': /* word class */ case 'X': /* eXtended Unicode "combining character sequence" */ case 'z': case 'Z': /* End of line/string assertion */ --p; goto loopdone; /* Anything after here is an escape that resolves to a literal. (Except digits, which may or may not) */ case 'n': ender = '\n'; p++; break; case 'N': /* Handle a single-code point named character. */ RExC_parse = p + 1; if (! grok_bslash_N(pRExC_state, NULL, /* Fail if evaluates to anything other than a single code point */ &ender, /* The returned single code point */ NULL, /* Don't need a count of how many code points */ flagp, RExC_strict, depth) ) { if (*flagp & NEED_UTF8) FAIL("panic: grok_bslash_N set NEED_UTF8"); if (*flagp & RESTART_PASS1) return NULL; /* Here, it wasn't a single code point. Go close * up this EXACTish node. The switch() prior to * this switch handles the other cases */ RExC_parse = p = oldp; goto loopdone; } p = RExC_parse; RExC_parse = parse_start; if (ender > 0xff) { REQUIRE_UTF8(flagp); } break; case 'r': ender = '\r'; p++; break; case 't': ender = '\t'; p++; break; case 'f': ender = '\f'; p++; break; case 'e': ender = ESC_NATIVE; p++; break; case 'a': ender = '\a'; p++; break; case 'o': { UV result; const char* error_msg; bool valid = grok_bslash_o(&p, RExC_end, &result, &error_msg, PASS2, /* out warnings */ (bool) RExC_strict, TRUE, /* Output warnings for non- portables */ UTF); if (! valid) { RExC_parse = p; /* going to die anyway; point to exact spot of failure */ vFAIL(error_msg); } ender = result; if (ender > 0xff) { REQUIRE_UTF8(flagp); } break; } case 'x': { UV result = UV_MAX; /* initialize to erroneous value */ const char* error_msg; bool valid = grok_bslash_x(&p, RExC_end, &result, &error_msg, PASS2, /* out warnings */ (bool) RExC_strict, TRUE, /* Silence warnings for non- portables */ UTF); if (! valid) { RExC_parse = p; /* going to die anyway; point to exact spot of failure */ vFAIL(error_msg); } ender = result; if (ender < 0x100) { #ifdef EBCDIC if (RExC_recode_x_to_native) { ender = LATIN1_TO_NATIVE(ender); } #endif } else { REQUIRE_UTF8(flagp); } break; } case 'c': p++; ender = grok_bslash_c(*p++, PASS2); break; case '8': case '9': /* must be a backreference */ --p; /* we have an escape like \8 which cannot be an octal escape * so we exit the loop, and let the outer loop handle this * escape which may or may not be a legitimate backref. */ goto loopdone; case '1': case '2': case '3':case '4': case '5': case '6': case '7': /* When we parse backslash escapes there is ambiguity * between backreferences and octal escapes. Any escape * from \1 - \9 is a backreference, any multi-digit * escape which does not start with 0 and which when * evaluated as decimal could refer to an already * parsed capture buffer is a back reference. Anything * else is octal. * * Note this implies that \118 could be interpreted as * 118 OR as "\11" . "8" depending on whether there * were 118 capture buffers defined already in the * pattern. */ /* NOTE, RExC_npar is 1 more than the actual number of * parens we have seen so far, hence the < RExC_npar below. */ if ( !isDIGIT(p[1]) || S_backref_value(p) < RExC_npar) { /* Not to be treated as an octal constant, go find backref */ --p; goto loopdone; } /* FALLTHROUGH */ case '0': { I32 flags = PERL_SCAN_SILENT_ILLDIGIT; STRLEN numlen = 3; ender = grok_oct(p, &numlen, &flags, NULL); if (ender > 0xff) { REQUIRE_UTF8(flagp); } p += numlen; if (PASS2 /* like \08, \178 */ && numlen < 3 && isDIGIT(*p) && ckWARN(WARN_REGEXP)) { reg_warn_non_literal_string( p + 1, form_short_octal_warning(p, numlen)); } } break; case '\0': if (p >= RExC_end) FAIL("Trailing \\"); /* FALLTHROUGH */ default: if (!SIZE_ONLY&& isALPHANUMERIC(*p)) { /* Include any left brace following the alpha to emphasize * that it could be part of an escape at some point * in the future */ int len = (isALPHA(*p) && *(p + 1) == '{') ? 2 : 1; ckWARN3reg(p + len, "Unrecognized escape \\%.*s passed through", len, p); } goto normal_default; } /* End of switch on '\' */ break; case '{': /* Currently we allow an lbrace at the start of a construct * without raising a warning. This is because we think we * will never want such a brace to be meant to be other * than taken literally. */ if (len || (p > RExC_start && isALPHA_A(*(p - 1)))) { /* But, we raise a fatal warning otherwise, as the * deprecation cycle has come and gone. Except that it * turns out that some heavily-relied on upstream * software, notably GNU Autoconf, have failed to fix * their uses. For these, don't make it fatal unless * we anticipate using the '{' for something else. * This happens after any alpha, and for a looser {m,n} * quantifier specification */ if ( RExC_strict || ( p > parse_start + 1 && isALPHA_A(*(p - 1)) && *(p - 2) == '\\') || new_regcurly(p, RExC_end)) { RExC_parse = p + 1; vFAIL("Unescaped left brace in regex is " "illegal here"); } if (PASS2) { ckWARNregdep(p + 1, "Unescaped left brace in regex is " "deprecated here (and will be fatal " "in Perl 5.30), passed through"); } } goto normal_default; case '}': case ']': if (PASS2 && p > RExC_parse && RExC_strict) { ckWARN2reg(p + 1, "Unescaped literal '%c'", *p); } /*FALLTHROUGH*/ default: /* A literal character */ normal_default: if (! UTF8_IS_INVARIANT(*p) && UTF) { STRLEN numlen; ender = utf8n_to_uvchr((U8*)p, RExC_end - p, &numlen, UTF8_ALLOW_DEFAULT); p += numlen; } else ender = (U8) *p++; break; } /* End of switch on the literal */ /* Here, have looked at the literal character and <ender> * contains its ordinal, <p> points to the character after it. * We need to check if the next non-ignored thing is a * quantifier. Move <p> to after anything that should be * ignored, which, as a side effect, positions <p> for the next * loop iteration */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* If the next thing is a quantifier, it applies to this * character only, which means that this character has to be in * its own node and can't just be appended to the string in an * existing node, so if there are already other characters in * the node, close the node with just them, and set up to do * this character again next time through, when it will be the * only thing in its new node */ next_is_quantifier = LIKELY(p < RExC_end) && UNLIKELY(ISMULT2(p)); if (next_is_quantifier && LIKELY(len)) { p = oldp; goto loopdone; } /* Ready to add 'ender' to the node */ if (! FOLD) { /* The simple case, just append the literal */ /* In the sizing pass, we need only the size of the * character we are appending, hence we can delay getting * its representation until PASS2. */ if (SIZE_ONLY) { if (UTF && ! UVCHR_IS_INVARIANT(ender)) { const STRLEN unilen = UVCHR_SKIP(ender); s += unilen; /* We have to subtract 1 just below (and again in * the corresponding PASS2 code) because the loop * increments <len> each time, as all but this path * (and one other) through it add a single byte to * the EXACTish node. But these paths would change * len to be the correct final value, so cancel out * the increment that follows */ len += unilen - 1; } else { s++; } } else { /* PASS2 */ not_fold_common: if (UTF && ! UVCHR_IS_INVARIANT(ender)) { U8 * new_s = uvchr_to_utf8((U8*)s, ender); len += (char *) new_s - s - 1; s = (char *) new_s; } else { *(s++) = (char) ender; } } } else if (LOC && is_PROBLEMATIC_LOCALE_FOLD_cp(ender)) { /* Here are folding under /l, and the code point is * problematic. First, we know we can't simplify things */ maybe_exact = FALSE; maybe_exactfu = FALSE; /* A problematic code point in this context means that its * fold isn't known until runtime, so we can't fold it now. * (The non-problematic code points are the above-Latin1 * ones that fold to also all above-Latin1. Their folds * don't vary no matter what the locale is.) But here we * have characters whose fold depends on the locale. * Unlike the non-folding case above, we have to keep track * of these in the sizing pass, so that we can make sure we * don't split too-long nodes in the middle of a potential * multi-char fold. And unlike the regular fold case * handled in the else clauses below, we don't actually * fold and don't have special cases to consider. What we * do for both passes is the PASS2 code for non-folding */ goto not_fold_common; } else /* A regular FOLD code point */ if (! ( UTF #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) /* See comments for join_exact() as to why we fold * this non-UTF at compile time */ || ( node_type == EXACTFU && ender == LATIN_SMALL_LETTER_SHARP_S) #endif )) { /* Here, are folding and are not UTF-8 encoded; therefore * the character must be in the range 0-255, and is not /l * (Not /l because we already handled these under /l in * is_PROBLEMATIC_LOCALE_FOLD_cp) */ if (IS_IN_SOME_FOLD_L1(ender)) { maybe_exact = FALSE; /* See if the character's fold differs between /d and * /u. This includes the multi-char fold SHARP S to * 'ss' */ if (UNLIKELY(ender == LATIN_SMALL_LETTER_SHARP_S)) { RExC_seen_unfolded_sharp_s = 1; maybe_exactfu = FALSE; } else if (maybe_exactfu && (PL_fold[ender] != PL_fold_latin1[ender] #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) || ( len > 0 && isALPHA_FOLD_EQ(ender, 's') && isALPHA_FOLD_EQ(*(s-1), 's')) #endif )) { maybe_exactfu = FALSE; } } /* Even when folding, we store just the input character, as * we have an array that finds its fold quickly */ *(s++) = (char) ender; } else { /* FOLD, and UTF (or sharp s) */ /* Unlike the non-fold case, we do actually have to * calculate the results here in pass 1. This is for two * reasons, the folded length may be longer than the * unfolded, and we have to calculate how many EXACTish * nodes it will take; and we may run out of room in a node * in the middle of a potential multi-char fold, and have * to back off accordingly. */ UV folded; if (isASCII_uni(ender)) { folded = toFOLD(ender); *(s)++ = (U8) folded; } else { STRLEN foldlen; folded = _to_uni_fold_flags( ender, (U8 *) s, &foldlen, FOLD_FLAGS_FULL | ((ASCII_FOLD_RESTRICTED) ? FOLD_FLAGS_NOMIX_ASCII : 0)); s += foldlen; /* The loop increments <len> each time, as all but this * path (and one other) through it add a single byte to * the EXACTish node. But this one has changed len to * be the correct final value, so subtract one to * cancel out the increment that follows */ len += foldlen - 1; } /* If this node only contains non-folding code points so * far, see if this new one is also non-folding */ if (maybe_exact) { if (folded != ender) { maybe_exact = FALSE; } else { /* Here the fold is the original; we have to check * further to see if anything folds to it */ if (_invlist_contains_cp(PL_utf8_foldable, ender)) { maybe_exact = FALSE; } } } ender = folded; } if (next_is_quantifier) { /* Here, the next input is a quantifier, and to get here, * the current character is the only one in the node. * Also, here <len> doesn't include the final byte for this * character */ len++; goto loopdone; } } /* End of loop through literal characters */ /* Here we have either exhausted the input or ran out of room in * the node. (If we encountered a character that can't be in the * node, transfer is made directly to <loopdone>, and so we * wouldn't have fallen off the end of the loop.) In the latter * case, we artificially have to split the node into two, because * we just don't have enough space to hold everything. This * creates a problem if the final character participates in a * multi-character fold in the non-final position, as a match that * should have occurred won't, due to the way nodes are matched, * and our artificial boundary. So back off until we find a non- * problematic character -- one that isn't at the beginning or * middle of such a fold. (Either it doesn't participate in any * folds, or appears only in the final position of all the folds it * does participate in.) A better solution with far fewer false * positives, and that would fill the nodes more completely, would * be to actually have available all the multi-character folds to * test against, and to back-off only far enough to be sure that * this node isn't ending with a partial one. <upper_parse> is set * further below (if we need to reparse the node) to include just * up through that final non-problematic character that this code * identifies, so when it is set to less than the full node, we can * skip the rest of this */ if (FOLD && p < RExC_end && upper_parse == MAX_NODE_STRING_SIZE) { const STRLEN full_len = len; assert(len >= MAX_NODE_STRING_SIZE); /* Here, <s> points to the final byte of the final character. * Look backwards through the string until find a non- * problematic character */ if (! UTF) { /* This has no multi-char folds to non-UTF characters */ if (ASCII_FOLD_RESTRICTED) { goto loopdone; } while (--s >= s0 && IS_NON_FINAL_FOLD(*s)) { } len = s - s0 + 1; } else { if (! PL_NonL1NonFinalFold) { PL_NonL1NonFinalFold = _new_invlist_C_array( NonL1_Perl_Non_Final_Folds_invlist); } /* Point to the first byte of the final character */ s = (char *) utf8_hop((U8 *) s, -1); while (s >= s0) { /* Search backwards until find non-problematic char */ if (UTF8_IS_INVARIANT(*s)) { /* There are no ascii characters that participate * in multi-char folds under /aa. In EBCDIC, the * non-ascii invariants are all control characters, * so don't ever participate in any folds. */ if (ASCII_FOLD_RESTRICTED || ! IS_NON_FINAL_FOLD(*s)) { break; } } else if (UTF8_IS_DOWNGRADEABLE_START(*s)) { if (! IS_NON_FINAL_FOLD(EIGHT_BIT_UTF8_TO_NATIVE( *s, *(s+1)))) { break; } } else if (! _invlist_contains_cp( PL_NonL1NonFinalFold, valid_utf8_to_uvchr((U8 *) s, NULL))) { break; } /* Here, the current character is problematic in that * it does occur in the non-final position of some * fold, so try the character before it, but have to * special case the very first byte in the string, so * we don't read outside the string */ s = (s == s0) ? s -1 : (char *) utf8_hop((U8 *) s, -1); } /* End of loop backwards through the string */ /* If there were only problematic characters in the string, * <s> will point to before s0, in which case the length * should be 0, otherwise include the length of the * non-problematic character just found */ len = (s < s0) ? 0 : s - s0 + UTF8SKIP(s); } /* Here, have found the final character, if any, that is * non-problematic as far as ending the node without splitting * it across a potential multi-char fold. <len> contains the * number of bytes in the node up-to and including that * character, or is 0 if there is no such character, meaning * the whole node contains only problematic characters. In * this case, give up and just take the node as-is. We can't * do any better */ if (len == 0) { len = full_len; /* If the node ends in an 's' we make sure it stays EXACTF, * as if it turns into an EXACTFU, it could later get * joined with another 's' that would then wrongly match * the sharp s */ if (maybe_exactfu && isALPHA_FOLD_EQ(ender, 's')) { maybe_exactfu = FALSE; } } else { /* Here, the node does contain some characters that aren't * problematic. If one such is the final character in the * node, we are done */ if (len == full_len) { goto loopdone; } else if (len + ((UTF) ? UTF8SKIP(s) : 1) == full_len) { /* If the final character is problematic, but the * penultimate is not, back-off that last character to * later start a new node with it */ p = oldp; goto loopdone; } /* Here, the final non-problematic character is earlier * in the input than the penultimate character. What we do * is reparse from the beginning, going up only as far as * this final ok one, thus guaranteeing that the node ends * in an acceptable character. The reason we reparse is * that we know how far in the character is, but we don't * know how to correlate its position with the input parse. * An alternate implementation would be to build that * correlation as we go along during the original parse, * but that would entail extra work for every node, whereas * this code gets executed only when the string is too * large for the node, and the final two characters are * problematic, an infrequent occurrence. Yet another * possible strategy would be to save the tail of the * string, and the next time regatom is called, initialize * with that. The problem with this is that unless you * back off one more character, you won't be guaranteed * regatom will get called again, unless regbranch, * regpiece ... are also changed. If you do back off that * extra character, so that there is input guaranteed to * force calling regatom, you can't handle the case where * just the first character in the node is acceptable. I * (khw) decided to try this method which doesn't have that * pitfall; if performance issues are found, we can do a * combination of the current approach plus that one */ upper_parse = len; len = 0; s = s0; goto reparse; } } /* End of verifying node ends with an appropriate char */ loopdone: /* Jumped to when encounters something that shouldn't be in the node */ /* I (khw) don't know if you can get here with zero length, but the * old code handled this situation by creating a zero-length EXACT * node. Might as well be NOTHING instead */ if (len == 0) { OP(ret) = NOTHING; } else { if (FOLD) { /* If 'maybe_exact' is still set here, means there are no * code points in the node that participate in folds; * similarly for 'maybe_exactfu' and code points that match * differently depending on UTF8ness of the target string * (for /u), or depending on locale for /l */ if (maybe_exact) { OP(ret) = (LOC) ? EXACTL : EXACT; } else if (maybe_exactfu) { OP(ret) = (LOC) ? EXACTFLU8 : EXACTFU; } } alloc_maybe_populate_EXACT(pRExC_state, ret, flagp, len, ender, FALSE /* Don't look to see if could be turned into an EXACT node, as we have already computed that */ ); } RExC_parse = p - 1; Set_Node_Cur_Length(ret, parse_start); RExC_parse = p; { /* len is STRLEN which is unsigned, need to copy to signed */ IV iv = len; if (iv < 0) vFAIL("Internal disaster"); } } /* End of label 'defchar:' */ break; } /* End of giant switch on input character */ /* Position parse to next real character */ skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); if (PASS2 && *RExC_parse == '{' && OP(ret) != SBOL && ! regcurly(RExC_parse)) { ckWARNregdep(RExC_parse + 1, "Unescaped left brace in regex is deprecated here (and will be fatal in Perl 5.30), passed through"); } return(ret); } STATIC void S_populate_ANYOF_from_invlist(pTHX_ regnode *node, SV** invlist_ptr) { /* Uses the inversion list '*invlist_ptr' to populate the ANYOF 'node'. It * sets up the bitmap and any flags, removing those code points from the * inversion list, setting it to NULL should it become completely empty */ PERL_ARGS_ASSERT_POPULATE_ANYOF_FROM_INVLIST; assert(PL_regkind[OP(node)] == ANYOF); ANYOF_BITMAP_ZERO(node); if (*invlist_ptr) { /* This gets set if we actually need to modify things */ bool change_invlist = FALSE; UV start, end; /* Start looking through *invlist_ptr */ invlist_iterinit(*invlist_ptr); while (invlist_iternext(*invlist_ptr, &start, &end)) { UV high; int i; if (end == UV_MAX && start <= NUM_ANYOF_CODE_POINTS) { ANYOF_FLAGS(node) |= ANYOF_MATCHES_ALL_ABOVE_BITMAP; } /* Quit if are above what we should change */ if (start >= NUM_ANYOF_CODE_POINTS) { break; } change_invlist = TRUE; /* Set all the bits in the range, up to the max that we are doing */ high = (end < NUM_ANYOF_CODE_POINTS - 1) ? end : NUM_ANYOF_CODE_POINTS - 1; for (i = start; i <= (int) high; i++) { if (! ANYOF_BITMAP_TEST(node, i)) { ANYOF_BITMAP_SET(node, i); } } } invlist_iterfinish(*invlist_ptr); /* Done with loop; remove any code points that are in the bitmap from * *invlist_ptr; similarly for code points above the bitmap if we have * a flag to match all of them anyways */ if (change_invlist) { _invlist_subtract(*invlist_ptr, PL_InBitmap, invlist_ptr); } if (ANYOF_FLAGS(node) & ANYOF_MATCHES_ALL_ABOVE_BITMAP) { _invlist_intersection(*invlist_ptr, PL_InBitmap, invlist_ptr); } /* If have completely emptied it, remove it completely */ if (_invlist_len(*invlist_ptr) == 0) { SvREFCNT_dec_NN(*invlist_ptr); *invlist_ptr = NULL; } } } /* Parse POSIX character classes: [[:foo:]], [[=foo=]], [[.foo.]]. Character classes ([:foo:]) can also be negated ([:^foo:]). Returns a named class id (ANYOF_XXX) if successful, -1 otherwise. Equivalence classes ([=foo=]) and composites ([.foo.]) are parsed, but trigger failures because they are currently unimplemented. */ #define POSIXCC_DONE(c) ((c) == ':') #define POSIXCC_NOTYET(c) ((c) == '=' || (c) == '.') #define POSIXCC(c) (POSIXCC_DONE(c) || POSIXCC_NOTYET(c)) #define MAYBE_POSIXCC(c) (POSIXCC(c) || (c) == '^' || (c) == ';') #define WARNING_PREFIX "Assuming NOT a POSIX class since " #define NO_BLANKS_POSIX_WARNING "no blanks are allowed in one" #define SEMI_COLON_POSIX_WARNING "a semi-colon was found instead of a colon" #define NOT_MEANT_TO_BE_A_POSIX_CLASS (OOB_NAMEDCLASS - 1) /* 'posix_warnings' and 'warn_text' are names of variables in the following * routine. q.v. */ #define ADD_POSIX_WARNING(p, text) STMT_START { \ if (posix_warnings) { \ if (! RExC_warn_text ) RExC_warn_text = (AV *) sv_2mortal((SV *) newAV()); \ av_push(RExC_warn_text, Perl_newSVpvf(aTHX_ \ WARNING_PREFIX \ text \ REPORT_LOCATION, \ REPORT_LOCATION_ARGS(p))); \ } \ } STMT_END #define CLEAR_POSIX_WARNINGS() \ STMT_START { \ if (posix_warnings && RExC_warn_text) \ av_clear(RExC_warn_text); \ } STMT_END #define CLEAR_POSIX_WARNINGS_AND_RETURN(ret) \ STMT_START { \ CLEAR_POSIX_WARNINGS(); \ return ret; \ } STMT_END STATIC int S_handle_possible_posix(pTHX_ RExC_state_t *pRExC_state, const char * const s, /* Where the putative posix class begins. Normally, this is one past the '['. This parameter exists so it can be somewhere besides RExC_parse. */ char ** updated_parse_ptr, /* Where to set the updated parse pointer, or NULL */ AV ** posix_warnings, /* Where to place any generated warnings, or NULL */ const bool check_only /* Don't die if error */ ) { /* This parses what the caller thinks may be one of the three POSIX * constructs: * 1) a character class, like [:blank:] * 2) a collating symbol, like [. .] * 3) an equivalence class, like [= =] * In the latter two cases, it croaks if it finds a syntactically legal * one, as these are not handled by Perl. * * The main purpose is to look for a POSIX character class. It returns: * a) the class number * if it is a completely syntactically and semantically legal class. * 'updated_parse_ptr', if not NULL, is set to point to just after the * closing ']' of the class * b) OOB_NAMEDCLASS * if it appears that one of the three POSIX constructs was meant, but * its specification was somehow defective. 'updated_parse_ptr', if * not NULL, is set to point to the character just after the end * character of the class. See below for handling of warnings. * c) NOT_MEANT_TO_BE_A_POSIX_CLASS * if it doesn't appear that a POSIX construct was intended. * 'updated_parse_ptr' is not changed. No warnings nor errors are * raised. * * In b) there may be errors or warnings generated. If 'check_only' is * TRUE, then any errors are discarded. Warnings are returned to the * caller via an AV* created into '*posix_warnings' if it is not NULL. If * instead it is NULL, warnings are suppressed. This is done in all * passes. The reason for this is that the rest of the parsing is heavily * dependent on whether this routine found a valid posix class or not. If * it did, the closing ']' is absorbed as part of the class. If no class, * or an invalid one is found, any ']' will be considered the terminator of * the outer bracketed character class, leading to very different results. * In particular, a '(?[ ])' construct will likely have a syntax error if * the class is parsed other than intended, and this will happen in pass1, * before the warnings would normally be output. This mechanism allows the * caller to output those warnings in pass1 just before dieing, giving a * much better clue as to what is wrong. * * The reason for this function, and its complexity is that a bracketed * character class can contain just about anything. But it's easy to * mistype the very specific posix class syntax but yielding a valid * regular bracketed class, so it silently gets compiled into something * quite unintended. * * The solution adopted here maintains backward compatibility except that * it adds a warning if it looks like a posix class was intended but * improperly specified. The warning is not raised unless what is input * very closely resembles one of the 14 legal posix classes. To do this, * it uses fuzzy parsing. It calculates how many single-character edits it * would take to transform what was input into a legal posix class. Only * if that number is quite small does it think that the intention was a * posix class. Obviously these are heuristics, and there will be cases * where it errs on one side or another, and they can be tweaked as * experience informs. * * The syntax for a legal posix class is: * * qr/(?xa: \[ : \^? [[:lower:]]{4,6} : \] )/ * * What this routine considers syntactically to be an intended posix class * is this (the comments indicate some restrictions that the pattern * doesn't show): * * qr/(?x: \[? # The left bracket, possibly * # omitted * \h* # possibly followed by blanks * (?: \^ \h* )? # possibly a misplaced caret * [:;]? # The opening class character, * # possibly omitted. A typo * # semi-colon can also be used. * \h* * \^? # possibly a correctly placed * # caret, but not if there was also * # a misplaced one * \h* * .{3,15} # The class name. If there are * # deviations from the legal syntax, * # its edit distance must be close * # to a real class name in order * # for it to be considered to be * # an intended posix class. * \h* * [[:punct:]]? # The closing class character, * # possibly omitted. If not a colon * # nor semi colon, the class name * # must be even closer to a valid * # one * \h* * \]? # The right bracket, possibly * # omitted. * )/ * * In the above, \h must be ASCII-only. * * These are heuristics, and can be tweaked as field experience dictates. * There will be cases when someone didn't intend to specify a posix class * that this warns as being so. The goal is to minimize these, while * maximizing the catching of things intended to be a posix class that * aren't parsed as such. */ const char* p = s; const char * const e = RExC_end; unsigned complement = 0; /* If to complement the class */ bool found_problem = FALSE; /* Assume OK until proven otherwise */ bool has_opening_bracket = FALSE; bool has_opening_colon = FALSE; int class_number = OOB_NAMEDCLASS; /* Out-of-bounds until find valid class */ const char * possible_end = NULL; /* used for a 2nd parse pass */ const char* name_start; /* ptr to class name first char */ /* If the number of single-character typos the input name is away from a * legal name is no more than this number, it is considered to have meant * the legal name */ int max_distance = 2; /* to store the name. The size determines the maximum length before we * decide that no posix class was intended. Should be at least * sizeof("alphanumeric") */ UV input_text[15]; STATIC_ASSERT_DECL(C_ARRAY_LENGTH(input_text) >= sizeof "alphanumeric"); PERL_ARGS_ASSERT_HANDLE_POSSIBLE_POSIX; CLEAR_POSIX_WARNINGS(); if (p >= e) { return NOT_MEANT_TO_BE_A_POSIX_CLASS; } if (*(p - 1) != '[') { ADD_POSIX_WARNING(p, "it doesn't start with a '['"); found_problem = TRUE; } else { has_opening_bracket = TRUE; } /* They could be confused and think you can put spaces between the * components */ if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } /* For [. .] and [= =]. These are quite different internally from [: :], * so they are handled separately. */ if (POSIXCC_NOTYET(*p) && p < e - 3) /* 1 for the close, and 1 for the ']' and 1 for at least one char in it */ { const char open_char = *p; const char * temp_ptr = p + 1; /* These two constructs are not handled by perl, and if we find a * syntactically valid one, we croak. khw, who wrote this code, finds * this explanation of them very unclear: * http://pubs.opengroup.org/onlinepubs/009696899/basedefs/xbd_chap09.html * And searching the rest of the internet wasn't very helpful either. * It looks like just about any byte can be in these constructs, * depending on the locale. But unless the pattern is being compiled * under /l, which is very rare, Perl runs under the C or POSIX locale. * In that case, it looks like [= =] isn't allowed at all, and that * [. .] could be any single code point, but for longer strings the * constituent characters would have to be the ASCII alphabetics plus * the minus-hyphen. Any sensible locale definition would limit itself * to these. And any portable one definitely should. Trying to parse * the general case is a nightmare (see [perl #127604]). So, this code * looks only for interiors of these constructs that match: * qr/.|[-\w]{2,}/ * Using \w relaxes the apparent rules a little, without adding much * danger of mistaking something else for one of these constructs. * * [. .] in some implementations described on the internet is usable to * escape a character that otherwise is special in bracketed character * classes. For example [.].] means a literal right bracket instead of * the ending of the class * * [= =] can legitimately contain a [. .] construct, but we don't * handle this case, as that [. .] construct will later get parsed * itself and croak then. And [= =] is checked for even when not under * /l, as Perl has long done so. * * The code below relies on there being a trailing NUL, so it doesn't * have to keep checking if the parse ptr < e. */ if (temp_ptr[1] == open_char) { temp_ptr++; } else while ( temp_ptr < e && (isWORDCHAR(*temp_ptr) || *temp_ptr == '-')) { temp_ptr++; } if (*temp_ptr == open_char) { temp_ptr++; if (*temp_ptr == ']') { temp_ptr++; if (! found_problem && ! check_only) { RExC_parse = (char *) temp_ptr; vFAIL3("POSIX syntax [%c %c] is reserved for future " "extensions", open_char, open_char); } /* Here, the syntax wasn't completely valid, or else the call * is to check-only */ if (updated_parse_ptr) { *updated_parse_ptr = (char *) temp_ptr; } CLEAR_POSIX_WARNINGS_AND_RETURN(OOB_NAMEDCLASS); } } /* If we find something that started out to look like one of these * constructs, but isn't, we continue below so that it can be checked * for being a class name with a typo of '.' or '=' instead of a colon. * */ } /* Here, we think there is a possibility that a [: :] class was meant, and * we have the first real character. It could be they think the '^' comes * first */ if (*p == '^') { found_problem = TRUE; ADD_POSIX_WARNING(p + 1, "the '^' must come after the colon"); complement = 1; p++; if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } } /* But the first character should be a colon, which they could have easily * mistyped on a qwerty keyboard as a semi-colon (and which may be hard to * distinguish from a colon, so treat that as a colon). */ if (*p == ':') { p++; has_opening_colon = TRUE; } else if (*p == ';') { found_problem = TRUE; p++; ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING); has_opening_colon = TRUE; } else { found_problem = TRUE; ADD_POSIX_WARNING(p, "there must be a starting ':'"); /* Consider an initial punctuation (not one of the recognized ones) to * be a left terminator */ if (*p != '^' && *p != ']' && isPUNCT(*p)) { p++; } } /* They may think that you can put spaces between the components */ if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } if (*p == '^') { /* We consider something like [^:^alnum:]] to not have been intended to * be a posix class, but XXX maybe we should */ if (complement) { CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } complement = 1; p++; } /* Again, they may think that you can put spaces between the components */ if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } if (*p == ']') { /* XXX This ']' may be a typo, and something else was meant. But * treating it as such creates enough complications, that that * possibility isn't currently considered here. So we assume that the * ']' is what is intended, and if we've already found an initial '[', * this leaves this construct looking like [:] or [:^], which almost * certainly weren't intended to be posix classes */ if (has_opening_bracket) { CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* But this function can be called when we parse the colon for * something like qr/[alpha:]]/, so we back up to look for the * beginning */ p--; if (*p == ';') { found_problem = TRUE; ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING); } else if (*p != ':') { /* XXX We are currently very restrictive here, so this code doesn't * consider the possibility that, say, /[alpha.]]/ was intended to * be a posix class. */ CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* Here we have something like 'foo:]'. There was no initial colon, * and we back up over 'foo. XXX Unlike the going forward case, we * don't handle typos of non-word chars in the middle */ has_opening_colon = FALSE; p--; while (p > RExC_start && isWORDCHAR(*p)) { p--; } p++; /* Here, we have positioned ourselves to where we think the first * character in the potential class is */ } /* Now the interior really starts. There are certain key characters that * can end the interior, or these could just be typos. To catch both * cases, we may have to do two passes. In the first pass, we keep on * going unless we come to a sequence that matches * qr/ [[:punct:]] [[:blank:]]* \] /xa * This means it takes a sequence to end the pass, so two typos in a row if * that wasn't what was intended. If the class is perfectly formed, just * this one pass is needed. We also stop if there are too many characters * being accumulated, but this number is deliberately set higher than any * real class. It is set high enough so that someone who thinks that * 'alphanumeric' is a correct name would get warned that it wasn't. * While doing the pass, we keep track of where the key characters were in * it. If we don't find an end to the class, and one of the key characters * was found, we redo the pass, but stop when we get to that character. * Thus the key character was considered a typo in the first pass, but a * terminator in the second. If two key characters are found, we stop at * the second one in the first pass. Again this can miss two typos, but * catches a single one * * In the first pass, 'possible_end' starts as NULL, and then gets set to * point to the first key character. For the second pass, it starts as -1. * */ name_start = p; parse_name: { bool has_blank = FALSE; bool has_upper = FALSE; bool has_terminating_colon = FALSE; bool has_terminating_bracket = FALSE; bool has_semi_colon = FALSE; unsigned int name_len = 0; int punct_count = 0; while (p < e) { /* Squeeze out blanks when looking up the class name below */ if (isBLANK(*p) ) { has_blank = TRUE; found_problem = TRUE; p++; continue; } /* The name will end with a punctuation */ if (isPUNCT(*p)) { const char * peek = p + 1; /* Treat any non-']' punctuation followed by a ']' (possibly * with intervening blanks) as trying to terminate the class. * ']]' is very likely to mean a class was intended (but * missing the colon), but the warning message that gets * generated shows the error position better if we exit the * loop at the bottom (eventually), so skip it here. */ if (*p != ']') { if (peek < e && isBLANK(*peek)) { has_blank = TRUE; found_problem = TRUE; do { peek++; } while (peek < e && isBLANK(*peek)); } if (peek < e && *peek == ']') { has_terminating_bracket = TRUE; if (*p == ':') { has_terminating_colon = TRUE; } else if (*p == ';') { has_semi_colon = TRUE; has_terminating_colon = TRUE; } else { found_problem = TRUE; } p = peek + 1; goto try_posix; } } /* Here we have punctuation we thought didn't end the class. * Keep track of the position of the key characters that are * more likely to have been class-enders */ if (*p == ']' || *p == '[' || *p == ':' || *p == ';') { /* Allow just one such possible class-ender not actually * ending the class. */ if (possible_end) { break; } possible_end = p; } /* If we have too many punctuation characters, no use in * keeping going */ if (++punct_count > max_distance) { break; } /* Treat the punctuation as a typo. */ input_text[name_len++] = *p; p++; } else if (isUPPER(*p)) { /* Use lowercase for lookup */ input_text[name_len++] = toLOWER(*p); has_upper = TRUE; found_problem = TRUE; p++; } else if (! UTF || UTF8_IS_INVARIANT(*p)) { input_text[name_len++] = *p; p++; } else { input_text[name_len++] = utf8_to_uvchr_buf((U8 *) p, e, NULL); p+= UTF8SKIP(p); } /* The declaration of 'input_text' is how long we allow a potential * class name to be, before saying they didn't mean a class name at * all */ if (name_len >= C_ARRAY_LENGTH(input_text)) { break; } } /* We get to here when the possible class name hasn't been properly * terminated before: * 1) we ran off the end of the pattern; or * 2) found two characters, each of which might have been intended to * be the name's terminator * 3) found so many punctuation characters in the purported name, * that the edit distance to a valid one is exceeded * 4) we decided it was more characters than anyone could have * intended to be one. */ found_problem = TRUE; /* In the final two cases, we know that looking up what we've * accumulated won't lead to a match, even a fuzzy one. */ if ( name_len >= C_ARRAY_LENGTH(input_text) || punct_count > max_distance) { /* If there was an intermediate key character that could have been * an intended end, redo the parse, but stop there */ if (possible_end && possible_end != (char *) -1) { possible_end = (char *) -1; /* Special signal value to say we've done a first pass */ p = name_start; goto parse_name; } /* Otherwise, it can't have meant to have been a class */ CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* If we ran off the end, and the final character was a punctuation * one, back up one, to look at that final one just below. Later, we * will restore the parse pointer if appropriate */ if (name_len && p == e && isPUNCT(*(p-1))) { p--; name_len--; } if (p < e && isPUNCT(*p)) { if (*p == ']') { has_terminating_bracket = TRUE; /* If this is a 2nd ']', and the first one is just below this * one, consider that to be the real terminator. This gives a * uniform and better positioning for the warning message */ if ( possible_end && possible_end != (char *) -1 && *possible_end == ']' && name_len && input_text[name_len - 1] == ']') { name_len--; p = possible_end; /* And this is actually equivalent to having done the 2nd * pass now, so set it to not try again */ possible_end = (char *) -1; } } else { if (*p == ':') { has_terminating_colon = TRUE; } else if (*p == ';') { has_semi_colon = TRUE; has_terminating_colon = TRUE; } p++; } } try_posix: /* Here, we have a class name to look up. We can short circuit the * stuff below for short names that can't possibly be meant to be a * class name. (We can do this on the first pass, as any second pass * will yield an even shorter name) */ if (name_len < 3) { CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* Find which class it is. Initially switch on the length of the name. * */ switch (name_len) { case 4: if (memEQs(name_start, 4, "word")) { /* this is not POSIX, this is the Perl \w */ class_number = ANYOF_WORDCHAR; } break; case 5: /* Names all of length 5: alnum alpha ascii blank cntrl digit * graph lower print punct space upper * Offset 4 gives the best switch position. */ switch (name_start[4]) { case 'a': if (memBEGINs(name_start, 5, "alph")) /* alpha */ class_number = ANYOF_ALPHA; break; case 'e': if (memBEGINs(name_start, 5, "spac")) /* space */ class_number = ANYOF_SPACE; break; case 'h': if (memBEGINs(name_start, 5, "grap")) /* graph */ class_number = ANYOF_GRAPH; break; case 'i': if (memBEGINs(name_start, 5, "asci")) /* ascii */ class_number = ANYOF_ASCII; break; case 'k': if (memBEGINs(name_start, 5, "blan")) /* blank */ class_number = ANYOF_BLANK; break; case 'l': if (memBEGINs(name_start, 5, "cntr")) /* cntrl */ class_number = ANYOF_CNTRL; break; case 'm': if (memBEGINs(name_start, 5, "alnu")) /* alnum */ class_number = ANYOF_ALPHANUMERIC; break; case 'r': if (memBEGINs(name_start, 5, "lowe")) /* lower */ class_number = (FOLD) ? ANYOF_CASED : ANYOF_LOWER; else if (memBEGINs(name_start, 5, "uppe")) /* upper */ class_number = (FOLD) ? ANYOF_CASED : ANYOF_UPPER; break; case 't': if (memBEGINs(name_start, 5, "digi")) /* digit */ class_number = ANYOF_DIGIT; else if (memBEGINs(name_start, 5, "prin")) /* print */ class_number = ANYOF_PRINT; else if (memBEGINs(name_start, 5, "punc")) /* punct */ class_number = ANYOF_PUNCT; break; } break; case 6: if (memEQs(name_start, 6, "xdigit")) class_number = ANYOF_XDIGIT; break; } /* If the name exactly matches a posix class name the class number will * here be set to it, and the input almost certainly was meant to be a * posix class, so we can skip further checking. If instead the syntax * is exactly correct, but the name isn't one of the legal ones, we * will return that as an error below. But if neither of these apply, * it could be that no posix class was intended at all, or that one * was, but there was a typo. We tease these apart by doing fuzzy * matching on the name */ if (class_number == OOB_NAMEDCLASS && found_problem) { const UV posix_names[][6] = { { 'a', 'l', 'n', 'u', 'm' }, { 'a', 'l', 'p', 'h', 'a' }, { 'a', 's', 'c', 'i', 'i' }, { 'b', 'l', 'a', 'n', 'k' }, { 'c', 'n', 't', 'r', 'l' }, { 'd', 'i', 'g', 'i', 't' }, { 'g', 'r', 'a', 'p', 'h' }, { 'l', 'o', 'w', 'e', 'r' }, { 'p', 'r', 'i', 'n', 't' }, { 'p', 'u', 'n', 'c', 't' }, { 's', 'p', 'a', 'c', 'e' }, { 'u', 'p', 'p', 'e', 'r' }, { 'w', 'o', 'r', 'd' }, { 'x', 'd', 'i', 'g', 'i', 't' } }; /* The names of the above all have added NULs to make them the same * size, so we need to also have the real lengths */ const UV posix_name_lengths[] = { sizeof("alnum") - 1, sizeof("alpha") - 1, sizeof("ascii") - 1, sizeof("blank") - 1, sizeof("cntrl") - 1, sizeof("digit") - 1, sizeof("graph") - 1, sizeof("lower") - 1, sizeof("print") - 1, sizeof("punct") - 1, sizeof("space") - 1, sizeof("upper") - 1, sizeof("word") - 1, sizeof("xdigit")- 1 }; unsigned int i; int temp_max = max_distance; /* Use a temporary, so if we reparse, we haven't changed the outer one */ /* Use a smaller max edit distance if we are missing one of the * delimiters */ if ( has_opening_bracket + has_opening_colon < 2 || has_terminating_bracket + has_terminating_colon < 2) { temp_max--; } /* See if the input name is close to a legal one */ for (i = 0; i < C_ARRAY_LENGTH(posix_names); i++) { /* Short circuit call if the lengths are too far apart to be * able to match */ if (abs( (int) (name_len - posix_name_lengths[i])) > temp_max) { continue; } if (edit_distance(input_text, posix_names[i], name_len, posix_name_lengths[i], temp_max ) > -1) { /* If it is close, it probably was intended to be a class */ goto probably_meant_to_be; } } /* Here the input name is not close enough to a valid class name * for us to consider it to be intended to be a posix class. If * we haven't already done so, and the parse found a character that * could have been terminators for the name, but which we absorbed * as typos during the first pass, repeat the parse, signalling it * to stop at that character */ if (possible_end && possible_end != (char *) -1) { possible_end = (char *) -1; p = name_start; goto parse_name; } /* Here neither pass found a close-enough class name */ CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } probably_meant_to_be: /* Here we think that a posix specification was intended. Update any * parse pointer */ if (updated_parse_ptr) { *updated_parse_ptr = (char *) p; } /* If a posix class name was intended but incorrectly specified, we * output or return the warnings */ if (found_problem) { /* We set flags for these issues in the parse loop above instead of * adding them to the list of warnings, because we can parse it * twice, and we only want one warning instance */ if (has_upper) { ADD_POSIX_WARNING(p, "the name must be all lowercase letters"); } if (has_blank) { ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } if (has_semi_colon) { ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING); } else if (! has_terminating_colon) { ADD_POSIX_WARNING(p, "there is no terminating ':'"); } if (! has_terminating_bracket) { ADD_POSIX_WARNING(p, "there is no terminating ']'"); } if (posix_warnings && RExC_warn_text && av_top_index(RExC_warn_text) > -1) { *posix_warnings = RExC_warn_text; } } else if (class_number != OOB_NAMEDCLASS) { /* If it is a known class, return the class. The class number * #defines are structured so each complement is +1 to the normal * one */ CLEAR_POSIX_WARNINGS_AND_RETURN(class_number + complement); } else if (! check_only) { /* Here, it is an unrecognized class. This is an error (unless the * call is to check only, which we've already handled above) */ const char * const complement_string = (complement) ? "^" : ""; RExC_parse = (char *) p; vFAIL3utf8f("POSIX class [:%s%" UTF8f ":] unknown", complement_string, UTF8fARG(UTF, RExC_parse - name_start - 2, name_start)); } } return OOB_NAMEDCLASS; } #undef ADD_POSIX_WARNING STATIC unsigned int S_regex_set_precedence(const U8 my_operator) { /* Returns the precedence in the (?[...]) construct of the input operator, * specified by its character representation. The precedence follows * general Perl rules, but it extends this so that ')' and ']' have (low) * precedence even though they aren't really operators */ switch (my_operator) { case '!': return 5; case '&': return 4; case '^': case '|': case '+': case '-': return 3; case ')': return 2; case ']': return 1; } NOT_REACHED; /* NOTREACHED */ return 0; /* Silence compiler warning */ } STATIC regnode * S_handle_regex_sets(pTHX_ RExC_state_t *pRExC_state, SV** return_invlist, I32 *flagp, U32 depth, char * const oregcomp_parse) { /* Handle the (?[...]) construct to do set operations */ U8 curchar; /* Current character being parsed */ UV start, end; /* End points of code point ranges */ SV* final = NULL; /* The end result inversion list */ SV* result_string; /* 'final' stringified */ AV* stack; /* stack of operators and operands not yet resolved */ AV* fence_stack = NULL; /* A stack containing the positions in 'stack' of where the undealt-with left parens would be if they were actually put there */ /* The 'volatile' is a workaround for an optimiser bug * in Solaris Studio 12.3. See RT #127455 */ volatile IV fence = 0; /* Position of where most recent undealt- with left paren in stack is; -1 if none. */ STRLEN len; /* Temporary */ regnode* node; /* Temporary, and final regnode returned by this function */ const bool save_fold = FOLD; /* Temporary */ char *save_end, *save_parse; /* Temporaries */ const bool in_locale = LOC; /* we turn off /l during processing */ AV* posix_warnings = NULL; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_HANDLE_REGEX_SETS; if (in_locale) { set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); } REQUIRE_UNI_RULES(flagp, NULL); /* The use of this operator implies /u. This is required so that the compile time values are valid in all runtime cases */ /* This will return only an ANYOF regnode, or (unlikely) something smaller * (such as EXACT). Thus we can skip most everything if just sizing. We * call regclass to handle '[]' so as to not have to reinvent its parsing * rules here (throwing away the size it computes each time). And, we exit * upon an unescaped ']' that isn't one ending a regclass. To do both * these things, we need to realize that something preceded by a backslash * is escaped, so we have to keep track of backslashes */ if (SIZE_ONLY) { UV depth = 0; /* how many nested (?[...]) constructs */ while (RExC_parse < RExC_end) { SV* current = NULL; skip_to_be_ignored_text(pRExC_state, &RExC_parse, TRUE /* Force /x */ ); switch (*RExC_parse) { case '?': if (RExC_parse[1] == '[') depth++, RExC_parse++; /* FALLTHROUGH */ default: break; case '\\': /* Skip past this, so the next character gets skipped, after * the switch */ RExC_parse++; if (*RExC_parse == 'c') { /* Skip the \cX notation for control characters */ RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } break; case '[': { /* See if this is a [:posix:] class. */ bool is_posix_class = (OOB_NAMEDCLASS < handle_possible_posix(pRExC_state, RExC_parse + 1, NULL, NULL, TRUE /* checking only */)); /* If it is a posix class, leave the parse pointer at the * '[' to fool regclass() into thinking it is part of a * '[[:posix:]]'. */ if (! is_posix_class) { RExC_parse++; } /* regclass() can only return RESTART_PASS1 and NEED_UTF8 * if multi-char folds are allowed. */ if (!regclass(pRExC_state, flagp,depth+1, is_posix_class, /* parse the whole char class only if not a posix class */ FALSE, /* don't allow multi-char folds */ TRUE, /* silence non-portable warnings. */ TRUE, /* strict */ FALSE, /* Require return to be an ANYOF */ &current, &posix_warnings )) FAIL2("panic: regclass returned NULL to handle_sets, " "flags=%#" UVxf, (UV) *flagp); /* function call leaves parse pointing to the ']', except * if we faked it */ if (is_posix_class) { RExC_parse--; } SvREFCNT_dec(current); /* In case it returned something */ break; } case ']': if (depth--) break; RExC_parse++; if (*RExC_parse == ')') { node = reganode(pRExC_state, ANYOF, 0); RExC_size += ANYOF_SKIP; nextchar(pRExC_state); Set_Node_Length(node, RExC_parse - oregcomp_parse + 1); /* MJD */ if (in_locale) { set_regex_charset(&RExC_flags, REGEX_LOCALE_CHARSET); } return node; } goto no_close; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } no_close: /* We output the messages even if warnings are off, because we'll fail * the very next thing, and these give a likely diagnosis for that */ if (posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0) { output_or_return_posix_warnings(pRExC_state, posix_warnings, NULL); } FAIL("Syntax error in (?[...])"); } /* Pass 2 only after this. */ Perl_ck_warner_d(aTHX_ packWARN(WARN_EXPERIMENTAL__REGEX_SETS), "The regex_sets feature is experimental" REPORT_LOCATION, REPORT_LOCATION_ARGS(RExC_parse)); /* Everything in this construct is a metacharacter. Operands begin with * either a '\' (for an escape sequence), or a '[' for a bracketed * character class. Any other character should be an operator, or * parenthesis for grouping. Both types of operands are handled by calling * regclass() to parse them. It is called with a parameter to indicate to * return the computed inversion list. The parsing here is implemented via * a stack. Each entry on the stack is a single character representing one * of the operators; or else a pointer to an operand inversion list. */ #define IS_OPERATOR(a) SvIOK(a) #define IS_OPERAND(a) (! IS_OPERATOR(a)) /* The stack is kept in Łukasiewicz order. (That's pronounced similar * to luke-a-shave-itch (or -itz), but people who didn't want to bother * with pronouncing it called it Reverse Polish instead, but now that YOU * know how to pronounce it you can use the correct term, thus giving due * credit to the person who invented it, and impressing your geek friends. * Wikipedia says that the pronounciation of "Ł" has been changing so that * it is now more like an English initial W (as in wonk) than an L.) * * This means that, for example, 'a | b & c' is stored on the stack as * * c [4] * b [3] * & [2] * a [1] * | [0] * * where the numbers in brackets give the stack [array] element number. * In this implementation, parentheses are not stored on the stack. * Instead a '(' creates a "fence" so that the part of the stack below the * fence is invisible except to the corresponding ')' (this allows us to * replace testing for parens, by using instead subtraction of the fence * position). As new operands are processed they are pushed onto the stack * (except as noted in the next paragraph). New operators of higher * precedence than the current final one are inserted on the stack before * the lhs operand (so that when the rhs is pushed next, everything will be * in the correct positions shown above. When an operator of equal or * lower precedence is encountered in parsing, all the stacked operations * of equal or higher precedence are evaluated, leaving the result as the * top entry on the stack. This makes higher precedence operations * evaluate before lower precedence ones, and causes operations of equal * precedence to left associate. * * The only unary operator '!' is immediately pushed onto the stack when * encountered. When an operand is encountered, if the top of the stack is * a '!", the complement is immediately performed, and the '!' popped. The * resulting value is treated as a new operand, and the logic in the * previous paragraph is executed. Thus in the expression * [a] + ! [b] * the stack looks like * * ! * a * + * * as 'b' gets parsed, the latter gets evaluated to '!b', and the stack * becomes * * !b * a * + * * A ')' is treated as an operator with lower precedence than all the * aforementioned ones, which causes all operations on the stack above the * corresponding '(' to be evaluated down to a single resultant operand. * Then the fence for the '(' is removed, and the operand goes through the * algorithm above, without the fence. * * A separate stack is kept of the fence positions, so that the position of * the latest so-far unbalanced '(' is at the top of it. * * The ']' ending the construct is treated as the lowest operator of all, * so that everything gets evaluated down to a single operand, which is the * result */ sv_2mortal((SV *)(stack = newAV())); sv_2mortal((SV *)(fence_stack = newAV())); while (RExC_parse < RExC_end) { I32 top_index; /* Index of top-most element in 'stack' */ SV** top_ptr; /* Pointer to top 'stack' element */ SV* current = NULL; /* To contain the current inversion list operand */ SV* only_to_avoid_leaks; skip_to_be_ignored_text(pRExC_state, &RExC_parse, TRUE /* Force /x */ ); if (RExC_parse >= RExC_end) { Perl_croak(aTHX_ "panic: Read past end of '(?[ ])'"); } curchar = UCHARAT(RExC_parse); redo_curchar: #ifdef ENABLE_REGEX_SETS_DEBUGGING /* Enable with -Accflags=-DENABLE_REGEX_SETS_DEBUGGING */ DEBUG_U(dump_regex_sets_structures(pRExC_state, stack, fence, fence_stack)); #endif top_index = av_tindex_skip_len_mg(stack); switch (curchar) { SV** stacked_ptr; /* Ptr to something already on 'stack' */ char stacked_operator; /* The topmost operator on the 'stack'. */ SV* lhs; /* Operand to the left of the operator */ SV* rhs; /* Operand to the right of the operator */ SV* fence_ptr; /* Pointer to top element of the fence stack */ case '(': if ( RExC_parse < RExC_end - 1 && (UCHARAT(RExC_parse + 1) == '?')) { /* If is a '(?', could be an embedded '(?flags:(?[...])'. * This happens when we have some thing like * * my $thai_or_lao = qr/(?[ \p{Thai} + \p{Lao} ])/; * ... * qr/(?[ \p{Digit} & $thai_or_lao ])/; * * Here we would be handling the interpolated * '$thai_or_lao'. We handle this by a recursive call to * ourselves which returns the inversion list the * interpolated expression evaluates to. We use the flags * from the interpolated pattern. */ U32 save_flags = RExC_flags; const char * save_parse; RExC_parse += 2; /* Skip past the '(?' */ save_parse = RExC_parse; /* Parse any flags for the '(?' */ parse_lparen_question_flags(pRExC_state); if (RExC_parse == save_parse /* Makes sure there was at least one flag (or else this embedding wasn't compiled) */ || RExC_parse >= RExC_end - 4 || UCHARAT(RExC_parse) != ':' || UCHARAT(++RExC_parse) != '(' || UCHARAT(++RExC_parse) != '?' || UCHARAT(++RExC_parse) != '[') { /* In combination with the above, this moves the * pointer to the point just after the first erroneous * character (or if there are no flags, to where they * should have been) */ if (RExC_parse >= RExC_end - 4) { RExC_parse = RExC_end; } else if (RExC_parse != save_parse) { RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; } vFAIL("Expecting '(?flags:(?[...'"); } /* Recurse, with the meat of the embedded expression */ RExC_parse++; (void) handle_regex_sets(pRExC_state, &current, flagp, depth+1, oregcomp_parse); /* Here, 'current' contains the embedded expression's * inversion list, and RExC_parse points to the trailing * ']'; the next character should be the ')' */ RExC_parse++; assert(UCHARAT(RExC_parse) == ')'); /* Then the ')' matching the original '(' handled by this * case: statement */ RExC_parse++; assert(UCHARAT(RExC_parse) == ')'); RExC_parse++; RExC_flags = save_flags; goto handle_operand; } /* A regular '('. Look behind for illegal syntax */ if (top_index - fence >= 0) { /* If the top entry on the stack is an operator, it had * better be a '!', otherwise the entry below the top * operand should be an operator */ if ( ! (top_ptr = av_fetch(stack, top_index, FALSE)) || (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) != '!') || ( IS_OPERAND(*top_ptr) && ( top_index - fence < 1 || ! (stacked_ptr = av_fetch(stack, top_index - 1, FALSE)) || ! IS_OPERATOR(*stacked_ptr)))) { RExC_parse++; vFAIL("Unexpected '(' with no preceding operator"); } } /* Stack the position of this undealt-with left paren */ av_push(fence_stack, newSViv(fence)); fence = top_index + 1; break; case '\\': /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if * multi-char folds are allowed. */ if (!regclass(pRExC_state, flagp,depth+1, TRUE, /* means parse just the next thing */ FALSE, /* don't allow multi-char folds */ FALSE, /* don't silence non-portable warnings. */ TRUE, /* strict */ FALSE, /* Require return to be an ANYOF */ &current, NULL)) { FAIL2("panic: regclass returned NULL to handle_sets, " "flags=%#" UVxf, (UV) *flagp); } /* regclass() will return with parsing just the \ sequence, * leaving the parse pointer at the next thing to parse */ RExC_parse--; goto handle_operand; case '[': /* Is a bracketed character class */ { /* See if this is a [:posix:] class. */ bool is_posix_class = (OOB_NAMEDCLASS < handle_possible_posix(pRExC_state, RExC_parse + 1, NULL, NULL, TRUE /* checking only */)); /* If it is a posix class, leave the parse pointer at the '[' * to fool regclass() into thinking it is part of a * '[[:posix:]]'. */ if (! is_posix_class) { RExC_parse++; } /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if * multi-char folds are allowed. */ if (!regclass(pRExC_state, flagp,depth+1, is_posix_class, /* parse the whole char class only if not a posix class */ FALSE, /* don't allow multi-char folds */ TRUE, /* silence non-portable warnings. */ TRUE, /* strict */ FALSE, /* Require return to be an ANYOF */ &current, NULL )) { FAIL2("panic: regclass returned NULL to handle_sets, " "flags=%#" UVxf, (UV) *flagp); } /* function call leaves parse pointing to the ']', except if we * faked it */ if (is_posix_class) { RExC_parse--; } goto handle_operand; } case ']': if (top_index >= 1) { goto join_operators; } /* Only a single operand on the stack: are done */ goto done; case ')': if (av_tindex_skip_len_mg(fence_stack) < 0) { RExC_parse++; vFAIL("Unexpected ')'"); } /* If nothing after the fence, is missing an operand */ if (top_index - fence < 0) { RExC_parse++; goto bad_syntax; } /* If at least two things on the stack, treat this as an * operator */ if (top_index - fence >= 1) { goto join_operators; } /* Here only a single thing on the fenced stack, and there is a * fence. Get rid of it */ fence_ptr = av_pop(fence_stack); assert(fence_ptr); fence = SvIV(fence_ptr) - 1; SvREFCNT_dec_NN(fence_ptr); fence_ptr = NULL; if (fence < 0) { fence = 0; } /* Having gotten rid of the fence, we pop the operand at the * stack top and process it as a newly encountered operand */ current = av_pop(stack); if (IS_OPERAND(current)) { goto handle_operand; } RExC_parse++; goto bad_syntax; case '&': case '|': case '+': case '-': case '^': /* These binary operators should have a left operand already * parsed */ if ( top_index - fence < 0 || top_index - fence == 1 || ( ! (top_ptr = av_fetch(stack, top_index, FALSE))) || ! IS_OPERAND(*top_ptr)) { goto unexpected_binary; } /* If only the one operand is on the part of the stack visible * to us, we just place this operator in the proper position */ if (top_index - fence < 2) { /* Place the operator before the operand */ SV* lhs = av_pop(stack); av_push(stack, newSVuv(curchar)); av_push(stack, lhs); break; } /* But if there is something else on the stack, we need to * process it before this new operator if and only if the * stacked operation has equal or higher precedence than the * new one */ join_operators: /* The operator on the stack is supposed to be below both its * operands */ if ( ! (stacked_ptr = av_fetch(stack, top_index - 2, FALSE)) || IS_OPERAND(*stacked_ptr)) { /* But if not, it's legal and indicates we are completely * done if and only if we're currently processing a ']', * which should be the final thing in the expression */ if (curchar == ']') { goto done; } unexpected_binary: RExC_parse++; vFAIL2("Unexpected binary operator '%c' with no " "preceding operand", curchar); } stacked_operator = (char) SvUV(*stacked_ptr); if (regex_set_precedence(curchar) > regex_set_precedence(stacked_operator)) { /* Here, the new operator has higher precedence than the * stacked one. This means we need to add the new one to * the stack to await its rhs operand (and maybe more * stuff). We put it before the lhs operand, leaving * untouched the stacked operator and everything below it * */ lhs = av_pop(stack); assert(IS_OPERAND(lhs)); av_push(stack, newSVuv(curchar)); av_push(stack, lhs); break; } /* Here, the new operator has equal or lower precedence than * what's already there. This means the operation already * there should be performed now, before the new one. */ rhs = av_pop(stack); if (! IS_OPERAND(rhs)) { /* This can happen when a ! is not followed by an operand, * like in /(?[\t &!])/ */ goto bad_syntax; } lhs = av_pop(stack); if (! IS_OPERAND(lhs)) { /* This can happen when there is an empty (), like in * /(?[[0]+()+])/ */ goto bad_syntax; } switch (stacked_operator) { case '&': _invlist_intersection(lhs, rhs, &rhs); break; case '|': case '+': _invlist_union(lhs, rhs, &rhs); break; case '-': _invlist_subtract(lhs, rhs, &rhs); break; case '^': /* The union minus the intersection */ { SV* i = NULL; SV* u = NULL; _invlist_union(lhs, rhs, &u); _invlist_intersection(lhs, rhs, &i); _invlist_subtract(u, i, &rhs); SvREFCNT_dec_NN(i); SvREFCNT_dec_NN(u); break; } } SvREFCNT_dec(lhs); /* Here, the higher precedence operation has been done, and the * result is in 'rhs'. We overwrite the stacked operator with * the result. Then we redo this code to either push the new * operator onto the stack or perform any higher precedence * stacked operation */ only_to_avoid_leaks = av_pop(stack); SvREFCNT_dec(only_to_avoid_leaks); av_push(stack, rhs); goto redo_curchar; case '!': /* Highest priority, right associative */ /* If what's already at the top of the stack is another '!", * they just cancel each other out */ if ( (top_ptr = av_fetch(stack, top_index, FALSE)) && (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) == '!')) { only_to_avoid_leaks = av_pop(stack); SvREFCNT_dec(only_to_avoid_leaks); } else { /* Otherwise, since it's right associative, just push onto the stack */ av_push(stack, newSVuv(curchar)); } break; default: RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; vFAIL("Unexpected character"); handle_operand: /* Here 'current' is the operand. If something is already on the * stack, we have to check if it is a !. But first, the code above * may have altered the stack in the time since we earlier set * 'top_index'. */ top_index = av_tindex_skip_len_mg(stack); if (top_index - fence >= 0) { /* If the top entry on the stack is an operator, it had better * be a '!', otherwise the entry below the top operand should * be an operator */ top_ptr = av_fetch(stack, top_index, FALSE); assert(top_ptr); if (IS_OPERATOR(*top_ptr)) { /* The only permissible operator at the top of the stack is * '!', which is applied immediately to this operand. */ curchar = (char) SvUV(*top_ptr); if (curchar != '!') { SvREFCNT_dec(current); vFAIL2("Unexpected binary operator '%c' with no " "preceding operand", curchar); } _invlist_invert(current); only_to_avoid_leaks = av_pop(stack); SvREFCNT_dec(only_to_avoid_leaks); /* And we redo with the inverted operand. This allows * handling multiple ! in a row */ goto handle_operand; } /* Single operand is ok only for the non-binary ')' * operator */ else if ((top_index - fence == 0 && curchar != ')') || (top_index - fence > 0 && (! (stacked_ptr = av_fetch(stack, top_index - 1, FALSE)) || IS_OPERAND(*stacked_ptr)))) { SvREFCNT_dec(current); vFAIL("Operand with no preceding operator"); } } /* Here there was nothing on the stack or the top element was * another operand. Just add this new one */ av_push(stack, current); } /* End of switch on next parse token */ RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; } /* End of loop parsing through the construct */ done: if (av_tindex_skip_len_mg(fence_stack) >= 0) { vFAIL("Unmatched ("); } if (av_tindex_skip_len_mg(stack) < 0 /* Was empty */ || ((final = av_pop(stack)) == NULL) || ! IS_OPERAND(final) || SvTYPE(final) != SVt_INVLIST || av_tindex_skip_len_mg(stack) >= 0) /* More left on stack */ { bad_syntax: SvREFCNT_dec(final); vFAIL("Incomplete expression within '(?[ ])'"); } /* Here, 'final' is the resultant inversion list from evaluating the * expression. Return it if so requested */ if (return_invlist) { *return_invlist = final; return END; } /* Otherwise generate a resultant node, based on 'final'. regclass() is * expecting a string of ranges and individual code points */ invlist_iterinit(final); result_string = newSVpvs(""); while (invlist_iternext(final, &start, &end)) { if (start == end) { Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}", start); } else { Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}-\\x{%" UVXf "}", start, end); } } /* About to generate an ANYOF (or similar) node from the inversion list we * have calculated */ save_parse = RExC_parse; RExC_parse = SvPV(result_string, len); save_end = RExC_end; RExC_end = RExC_parse + len; /* We turn off folding around the call, as the class we have constructed * already has all folding taken into consideration, and we don't want * regclass() to add to that */ RExC_flags &= ~RXf_PMf_FOLD; /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if multi-char * folds are allowed. */ node = regclass(pRExC_state, flagp,depth+1, FALSE, /* means parse the whole char class */ FALSE, /* don't allow multi-char folds */ TRUE, /* silence non-portable warnings. The above may very well have generated non-portable code points, but they're valid on this machine */ FALSE, /* similarly, no need for strict */ FALSE, /* Require return to be an ANYOF */ NULL, NULL ); if (!node) FAIL2("panic: regclass returned NULL to handle_sets, flags=%#" UVxf, PTR2UV(flagp)); /* Fix up the node type if we are in locale. (We have pretended we are * under /u for the purposes of regclass(), as this construct will only * work under UTF-8 locales. But now we change the opcode to be ANYOFL (so * as to cause any warnings about bad locales to be output in regexec.c), * and add the flag that indicates to check if not in a UTF-8 locale. The * reason we above forbid optimization into something other than an ANYOF * node is simply to minimize the number of code changes in regexec.c. * Otherwise we would have to create new EXACTish node types and deal with * them. This decision could be revisited should this construct become * popular. * * (One might think we could look at the resulting ANYOF node and suppress * the flag if everything is above 255, as those would be UTF-8 only, * but this isn't true, as the components that led to that result could * have been locale-affected, and just happen to cancel each other out * under UTF-8 locales.) */ if (in_locale) { set_regex_charset(&RExC_flags, REGEX_LOCALE_CHARSET); assert(OP(node) == ANYOF); OP(node) = ANYOFL; ANYOF_FLAGS(node) |= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } if (save_fold) { RExC_flags |= RXf_PMf_FOLD; } RExC_parse = save_parse + 1; RExC_end = save_end; SvREFCNT_dec_NN(final); SvREFCNT_dec_NN(result_string); nextchar(pRExC_state); Set_Node_Length(node, RExC_parse - oregcomp_parse + 1); /* MJD */ return node; } #ifdef ENABLE_REGEX_SETS_DEBUGGING STATIC void S_dump_regex_sets_structures(pTHX_ RExC_state_t *pRExC_state, AV * stack, const IV fence, AV * fence_stack) { /* Dumps the stacks in handle_regex_sets() */ const SSize_t stack_top = av_tindex_skip_len_mg(stack); const SSize_t fence_stack_top = av_tindex_skip_len_mg(fence_stack); SSize_t i; PERL_ARGS_ASSERT_DUMP_REGEX_SETS_STRUCTURES; PerlIO_printf(Perl_debug_log, "\nParse position is:%s\n", RExC_parse); if (stack_top < 0) { PerlIO_printf(Perl_debug_log, "Nothing on stack\n"); } else { PerlIO_printf(Perl_debug_log, "Stack: (fence=%d)\n", (int) fence); for (i = stack_top; i >= 0; i--) { SV ** element_ptr = av_fetch(stack, i, FALSE); if (! element_ptr) { } if (IS_OPERATOR(*element_ptr)) { PerlIO_printf(Perl_debug_log, "[%d]: %c\n", (int) i, (int) SvIV(*element_ptr)); } else { PerlIO_printf(Perl_debug_log, "[%d] ", (int) i); sv_dump(*element_ptr); } } } if (fence_stack_top < 0) { PerlIO_printf(Perl_debug_log, "Nothing on fence_stack\n"); } else { PerlIO_printf(Perl_debug_log, "Fence_stack: \n"); for (i = fence_stack_top; i >= 0; i--) { SV ** element_ptr = av_fetch(fence_stack, i, FALSE); if (! element_ptr) { } PerlIO_printf(Perl_debug_log, "[%d]: %d\n", (int) i, (int) SvIV(*element_ptr)); } } } #endif #undef IS_OPERATOR #undef IS_OPERAND STATIC void S_add_above_Latin1_folds(pTHX_ RExC_state_t *pRExC_state, const U8 cp, SV** invlist) { /* This hard-codes the Latin1/above-Latin1 folding rules, so that an * innocent-looking character class, like /[ks]/i won't have to go out to * disk to find the possible matches. * * This should be called only for a Latin1-range code points, cp, which is * known to be involved in a simple fold with other code points above * Latin1. It would give false results if /aa has been specified. * Multi-char folds are outside the scope of this, and must be handled * specially. * * XXX It would be better to generate these via regen, in case a new * version of the Unicode standard adds new mappings, though that is not * really likely, and may be caught by the default: case of the switch * below. */ PERL_ARGS_ASSERT_ADD_ABOVE_LATIN1_FOLDS; assert(HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(cp)); switch (cp) { case 'k': case 'K': *invlist = add_cp_to_invlist(*invlist, KELVIN_SIGN); break; case 's': case 'S': *invlist = add_cp_to_invlist(*invlist, LATIN_SMALL_LETTER_LONG_S); break; case MICRO_SIGN: *invlist = add_cp_to_invlist(*invlist, GREEK_CAPITAL_LETTER_MU); *invlist = add_cp_to_invlist(*invlist, GREEK_SMALL_LETTER_MU); break; case LATIN_CAPITAL_LETTER_A_WITH_RING_ABOVE: case LATIN_SMALL_LETTER_A_WITH_RING_ABOVE: *invlist = add_cp_to_invlist(*invlist, ANGSTROM_SIGN); break; case LATIN_SMALL_LETTER_Y_WITH_DIAERESIS: *invlist = add_cp_to_invlist(*invlist, LATIN_CAPITAL_LETTER_Y_WITH_DIAERESIS); break; #ifdef LATIN_CAPITAL_LETTER_SHARP_S /* not defined in early Unicode releases */ case LATIN_SMALL_LETTER_SHARP_S: *invlist = add_cp_to_invlist(*invlist, LATIN_CAPITAL_LETTER_SHARP_S); break; #endif #if UNICODE_MAJOR_VERSION < 3 \ || (UNICODE_MAJOR_VERSION == 3 && UNICODE_DOT_VERSION == 0) /* In 3.0 and earlier, U+0130 folded simply to 'i'; and in 3.0.1 so did * U+0131. */ case 'i': case 'I': *invlist = add_cp_to_invlist(*invlist, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE); # if UNICODE_DOT_DOT_VERSION == 1 *invlist = add_cp_to_invlist(*invlist, LATIN_SMALL_LETTER_DOTLESS_I); # endif break; #endif default: /* Use deprecated warning to increase the chances of this being * output */ if (PASS2) { ckWARN2reg_d(RExC_parse, "Perl folding rules are not up-to-date for 0x%02X; please use the perlbug utility to report;", cp); } break; } } STATIC void S_output_or_return_posix_warnings(pTHX_ RExC_state_t *pRExC_state, AV* posix_warnings, AV** return_posix_warnings) { /* If the final parameter is NULL, output the elements of the array given * by '*posix_warnings' as REGEXP warnings. Otherwise, the elements are * pushed onto it, (creating if necessary) */ SV * msg; const bool first_is_fatal = ! return_posix_warnings && ckDEAD(packWARN(WARN_REGEXP)); PERL_ARGS_ASSERT_OUTPUT_OR_RETURN_POSIX_WARNINGS; while ((msg = av_shift(posix_warnings)) != &PL_sv_undef) { if (return_posix_warnings) { if (! *return_posix_warnings) { /* mortalize to not leak if warnings are fatal */ *return_posix_warnings = (AV *) sv_2mortal((SV *) newAV()); } av_push(*return_posix_warnings, msg); } else { if (first_is_fatal) { /* Avoid leaking this */ av_undef(posix_warnings); /* This isn't necessary if the array is mortal, but is a fail-safe */ (void) sv_2mortal(msg); if (PASS2) { SAVEFREESV(RExC_rx_sv); } } Perl_warner(aTHX_ packWARN(WARN_REGEXP), "%s", SvPVX(msg)); SvREFCNT_dec_NN(msg); } } } STATIC AV * S_add_multi_match(pTHX_ AV* multi_char_matches, SV* multi_string, const STRLEN cp_count) { /* This adds the string scalar <multi_string> to the array * <multi_char_matches>. <multi_string> is known to have exactly * <cp_count> code points in it. This is used when constructing a * bracketed character class and we find something that needs to match more * than a single character. * * <multi_char_matches> is actually an array of arrays. Each top-level * element is an array that contains all the strings known so far that are * the same length. And that length (in number of code points) is the same * as the index of the top-level array. Hence, the [2] element is an * array, each element thereof is a string containing TWO code points; * while element [3] is for strings of THREE characters, and so on. Since * this is for multi-char strings there can never be a [0] nor [1] element. * * When we rewrite the character class below, we will do so such that the * longest strings are written first, so that it prefers the longest * matching strings first. This is done even if it turns out that any * quantifier is non-greedy, out of this programmer's (khw) laziness. Tom * Christiansen has agreed that this is ok. This makes the test for the * ligature 'ffi' come before the test for 'ff', for example */ AV* this_array; AV** this_array_ptr; PERL_ARGS_ASSERT_ADD_MULTI_MATCH; if (! multi_char_matches) { multi_char_matches = newAV(); } if (av_exists(multi_char_matches, cp_count)) { this_array_ptr = (AV**) av_fetch(multi_char_matches, cp_count, FALSE); this_array = *this_array_ptr; } else { this_array = newAV(); av_store(multi_char_matches, cp_count, (SV*) this_array); } av_push(this_array, multi_string); return multi_char_matches; } /* The names of properties whose definitions are not known at compile time are * stored in this SV, after a constant heading. So if the length has been * changed since initialization, then there is a run-time definition. */ #define HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION \ (SvCUR(listsv) != initial_listsv_len) /* There is a restricted set of white space characters that are legal when * ignoring white space in a bracketed character class. This generates the * code to skip them. * * There is a line below that uses the same white space criteria but is outside * this macro. Both here and there must use the same definition */ #define SKIP_BRACKETED_WHITE_SPACE(do_skip, p) \ STMT_START { \ if (do_skip) { \ while (isBLANK_A(UCHARAT(p))) \ { \ p++; \ } \ } \ } STMT_END STATIC regnode * S_regclass(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth, const bool stop_at_1, /* Just parse the next thing, don't look for a full character class */ bool allow_multi_folds, const bool silence_non_portable, /* Don't output warnings about too large characters */ const bool strict, bool optimizable, /* ? Allow a non-ANYOF return node */ SV** ret_invlist, /* Return an inversion list, not a node */ AV** return_posix_warnings ) { /* parse a bracketed class specification. Most of these will produce an * ANYOF node; but something like [a] will produce an EXACT node; [aA], an * EXACTFish node; [[:ascii:]], a POSIXA node; etc. It is more complex * under /i with multi-character folds: it will be rewritten following the * paradigm of this example, where the <multi-fold>s are characters which * fold to multiple character sequences: * /[abc\x{multi-fold1}def\x{multi-fold2}ghi]/i * gets effectively rewritten as: * /(?:\x{multi-fold1}|\x{multi-fold2}|[abcdefghi]/i * reg() gets called (recursively) on the rewritten version, and this * function will return what it constructs. (Actually the <multi-fold>s * aren't physically removed from the [abcdefghi], it's just that they are * ignored in the recursion by means of a flag: * <RExC_in_multi_char_class>.) * * ANYOF nodes contain a bit map for the first NUM_ANYOF_CODE_POINTS * characters, with the corresponding bit set if that character is in the * list. For characters above this, a range list or swash is used. There * are extra bits for \w, etc. in locale ANYOFs, as what these match is not * determinable at compile time * * Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs * to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded * to UTF-8. This can only happen if ret_invlist is non-NULL. */ UV prevvalue = OOB_UNICODE, save_prevvalue = OOB_UNICODE; IV range = 0; UV value = OOB_UNICODE, save_value = OOB_UNICODE; regnode *ret; STRLEN numlen; int namedclass = OOB_NAMEDCLASS; char *rangebegin = NULL; bool need_class = 0; SV *listsv = NULL; STRLEN initial_listsv_len = 0; /* Kind of a kludge to see if it is more than just initialized. */ SV* properties = NULL; /* Code points that match \p{} \P{} */ SV* posixes = NULL; /* Code points that match classes like [:word:], extended beyond the Latin1 range. These have to be kept separate from other code points for much of this function because their handling is different under /i, and for most classes under /d as well */ SV* nposixes = NULL; /* Similarly for [:^word:]. These are kept separate for a while from the non-complemented versions because of complications with /d matching */ SV* simple_posixes = NULL; /* But under some conditions, the classes can be treated more simply than the general case, leading to less compilation and execution work */ UV element_count = 0; /* Number of distinct elements in the class. Optimizations may be possible if this is tiny */ AV * multi_char_matches = NULL; /* Code points that fold to more than one character; used under /i */ UV n; char * stop_ptr = RExC_end; /* where to stop parsing */ /* ignore unescaped whitespace? */ const bool skip_white = cBOOL( ret_invlist || (RExC_flags & RXf_PMf_EXTENDED_MORE)); /* Unicode properties are stored in a swash; this holds the current one * being parsed. If this swash is the only above-latin1 component of the * character class, an optimization is to pass it directly on to the * execution engine. Otherwise, it is set to NULL to indicate that there * are other things in the class that have to be dealt with at execution * time */ SV* swash = NULL; /* Code points that match \p{} \P{} */ /* Set if a component of this character class is user-defined; just passed * on to the engine */ bool has_user_defined_property = FALSE; /* inversion list of code points this node matches only when the target * string is in UTF-8. These are all non-ASCII, < 256. (Because is under * /d) */ SV* has_upper_latin1_only_utf8_matches = NULL; /* Inversion list of code points this node matches regardless of things * like locale, folding, utf8ness of the target string */ SV* cp_list = NULL; /* Like cp_list, but code points on this list need to be checked for things * that fold to/from them under /i */ SV* cp_foldable_list = NULL; /* Like cp_list, but code points on this list are valid only when the * runtime locale is UTF-8 */ SV* only_utf8_locale_list = NULL; /* In a range, if one of the endpoints is non-character-set portable, * meaning that it hard-codes a code point that may mean a different * charactger in ASCII vs. EBCDIC, as opposed to, say, a literal 'A' or a * mnemonic '\t' which each mean the same character no matter which * character set the platform is on. */ unsigned int non_portable_endpoint = 0; /* Is the range unicode? which means on a platform that isn't 1-1 native * to Unicode (i.e. non-ASCII), each code point in it should be considered * to be a Unicode value. */ bool unicode_range = FALSE; bool invert = FALSE; /* Is this class to be complemented */ bool warn_super = ALWAYS_WARN_SUPER; regnode * const orig_emit = RExC_emit; /* Save the original RExC_emit in case we need to change the emitted regop to an EXACT. */ const char * orig_parse = RExC_parse; const SSize_t orig_size = RExC_size; bool posixl_matches_all = FALSE; /* Does /l class have both e.g. \W,\w ? */ /* This variable is used to mark where the end in the input is of something * that looks like a POSIX construct but isn't. During the parse, when * something looks like it could be such a construct is encountered, it is * checked for being one, but not if we've already checked this area of the * input. Only after this position is reached do we check again */ char *not_posix_region_end = RExC_parse - 1; AV* posix_warnings = NULL; const bool do_posix_warnings = return_posix_warnings || (PASS2 && ckWARN(WARN_REGEXP)); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGCLASS; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif DEBUG_PARSE("clas"); #if UNICODE_MAJOR_VERSION < 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && UNICODE_DOT_VERSION == 0 \ && UNICODE_DOT_DOT_VERSION == 0) allow_multi_folds = FALSE; #endif /* Assume we are going to generate an ANYOF node. */ ret = reganode(pRExC_state, (LOC) ? ANYOFL : ANYOF, 0); if (SIZE_ONLY) { RExC_size += ANYOF_SKIP; listsv = &PL_sv_undef; /* For code scanners: listsv always non-NULL. */ } else { ANYOF_FLAGS(ret) = 0; RExC_emit += ANYOF_SKIP; listsv = newSVpvs_flags("# comment\n", SVs_TEMP); initial_listsv_len = SvCUR(listsv); SvTEMP_off(listsv); /* Grr, TEMPs and mortals are conflated. */ } SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); assert(RExC_parse <= RExC_end); if (UCHARAT(RExC_parse) == '^') { /* Complement the class */ RExC_parse++; invert = TRUE; allow_multi_folds = FALSE; MARK_NAUGHTY(1); SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); } /* Check that they didn't say [:posix:] instead of [[:posix:]] */ if (! ret_invlist && MAYBE_POSIXCC(UCHARAT(RExC_parse))) { int maybe_class = handle_possible_posix(pRExC_state, RExC_parse, &not_posix_region_end, NULL, TRUE /* checking only */); if (PASS2 && maybe_class >= OOB_NAMEDCLASS && do_posix_warnings) { SAVEFREESV(RExC_rx_sv); ckWARN4reg(not_posix_region_end, "POSIX syntax [%c %c] belongs inside character classes%s", *RExC_parse, *RExC_parse, (maybe_class == OOB_NAMEDCLASS) ? ((POSIXCC_NOTYET(*RExC_parse)) ? " (but this one isn't implemented)" : " (but this one isn't fully valid)") : "" ); (void)ReREFCNT_inc(RExC_rx_sv); } } /* If the caller wants us to just parse a single element, accomplish this * by faking the loop ending condition */ if (stop_at_1 && RExC_end > RExC_parse) { stop_ptr = RExC_parse + 1; } /* allow 1st char to be ']' (allowing it to be '-' is dealt with later) */ if (UCHARAT(RExC_parse) == ']') goto charclassloop; while (1) { if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0 && RExC_parse > not_posix_region_end) { /* Warnings about posix class issues are considered tentative until * we are far enough along in the parse that we can no longer * change our mind, at which point we either output them or add * them, if it has so specified, to what gets returned to the * caller. This is done each time through the loop so that a later * class won't zap them before they have been dealt with. */ output_or_return_posix_warnings(pRExC_state, posix_warnings, return_posix_warnings); } if (RExC_parse >= stop_ptr) { break; } SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); if (UCHARAT(RExC_parse) == ']') { break; } charclassloop: namedclass = OOB_NAMEDCLASS; /* initialize as illegal */ save_value = value; save_prevvalue = prevvalue; if (!range) { rangebegin = RExC_parse; element_count++; non_portable_endpoint = 0; } if (UTF && ! UTF8_IS_INVARIANT(* RExC_parse)) { value = utf8n_to_uvchr((U8*)RExC_parse, RExC_end - RExC_parse, &numlen, UTF8_ALLOW_DEFAULT); RExC_parse += numlen; } else value = UCHARAT(RExC_parse++); if (value == '[') { char * posix_class_end; namedclass = handle_possible_posix(pRExC_state, RExC_parse, &posix_class_end, do_posix_warnings ? &posix_warnings : NULL, FALSE /* die if error */); if (namedclass > OOB_NAMEDCLASS) { /* If there was an earlier attempt to parse this particular * posix class, and it failed, it was a false alarm, as this * successful one proves */ if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0 && not_posix_region_end >= RExC_parse && not_posix_region_end <= posix_class_end) { av_undef(posix_warnings); } RExC_parse = posix_class_end; } else if (namedclass == OOB_NAMEDCLASS) { not_posix_region_end = posix_class_end; } else { namedclass = OOB_NAMEDCLASS; } } else if ( RExC_parse - 1 > not_posix_region_end && MAYBE_POSIXCC(value)) { (void) handle_possible_posix( pRExC_state, RExC_parse - 1, /* -1 because parse has already been advanced */ &not_posix_region_end, do_posix_warnings ? &posix_warnings : NULL, TRUE /* checking only */); } else if (value == '\\') { /* Is a backslash; get the code point of the char after it */ if (RExC_parse >= RExC_end) { vFAIL("Unmatched ["); } if (UTF && ! UTF8_IS_INVARIANT(UCHARAT(RExC_parse))) { value = utf8n_to_uvchr((U8*)RExC_parse, RExC_end - RExC_parse, &numlen, UTF8_ALLOW_DEFAULT); RExC_parse += numlen; } else value = UCHARAT(RExC_parse++); /* Some compilers cannot handle switching on 64-bit integer * values, therefore value cannot be an UV. Yes, this will * be a problem later if we want switch on Unicode. * A similar issue a little bit later when switching on * namedclass. --jhi */ /* If the \ is escaping white space when white space is being * skipped, it means that that white space is wanted literally, and * is already in 'value'. Otherwise, need to translate the escape * into what it signifies. */ if (! skip_white || ! isBLANK_A(value)) switch ((I32)value) { case 'w': namedclass = ANYOF_WORDCHAR; break; case 'W': namedclass = ANYOF_NWORDCHAR; break; case 's': namedclass = ANYOF_SPACE; break; case 'S': namedclass = ANYOF_NSPACE; break; case 'd': namedclass = ANYOF_DIGIT; break; case 'D': namedclass = ANYOF_NDIGIT; break; case 'v': namedclass = ANYOF_VERTWS; break; case 'V': namedclass = ANYOF_NVERTWS; break; case 'h': namedclass = ANYOF_HORIZWS; break; case 'H': namedclass = ANYOF_NHORIZWS; break; case 'N': /* Handle \N{NAME} in class */ { const char * const backslash_N_beg = RExC_parse - 2; int cp_count; if (! grok_bslash_N(pRExC_state, NULL, /* No regnode */ &value, /* Yes single value */ &cp_count, /* Multiple code pt count */ flagp, strict, depth) ) { if (*flagp & NEED_UTF8) FAIL("panic: grok_bslash_N set NEED_UTF8"); if (*flagp & RESTART_PASS1) return NULL; if (cp_count < 0) { vFAIL("\\N in a character class must be a named character: \\N{...}"); } else if (cp_count == 0) { if (PASS2) { ckWARNreg(RExC_parse, "Ignoring zero length \\N{} in character class"); } } else { /* cp_count > 1 */ if (! RExC_in_multi_char_class) { if (invert || range || *RExC_parse == '-') { if (strict) { RExC_parse--; vFAIL("\\N{} in inverted character class or as a range end-point is restricted to one character"); } else if (PASS2) { ckWARNreg(RExC_parse, "Using just the first character returned by \\N{} in character class"); } break; /* <value> contains the first code point. Drop out of the switch to process it */ } else { SV * multi_char_N = newSVpvn(backslash_N_beg, RExC_parse - backslash_N_beg); multi_char_matches = add_multi_match(multi_char_matches, multi_char_N, cp_count); } } } /* End of cp_count != 1 */ /* This element should not be processed further in this * class */ element_count--; value = save_value; prevvalue = save_prevvalue; continue; /* Back to top of loop to get next char */ } /* Here, is a single code point, and <value> contains it */ unicode_range = TRUE; /* \N{} are Unicode */ } break; case 'p': case 'P': { char *e; /* We will handle any undefined properties ourselves */ U8 swash_init_flags = _CORE_SWASH_INIT_RETURN_IF_UNDEF /* And we actually would prefer to get * the straight inversion list of the * swash, since we will be accessing it * anyway, to save a little time */ |_CORE_SWASH_INIT_ACCEPT_INVLIST; if (RExC_parse >= RExC_end) vFAIL2("Empty \\%c", (U8)value); if (*RExC_parse == '{') { const U8 c = (U8)value; e = strchr(RExC_parse, '}'); if (!e) { RExC_parse++; vFAIL2("Missing right brace on \\%c{}", c); } RExC_parse++; while (isSPACE(*RExC_parse)) { RExC_parse++; } if (UCHARAT(RExC_parse) == '^') { /* toggle. (The rhs xor gets the single bit that * differs between P and p; the other xor inverts just * that bit) */ value ^= 'P' ^ 'p'; RExC_parse++; while (isSPACE(*RExC_parse)) { RExC_parse++; } } if (e == RExC_parse) vFAIL2("Empty \\%c{}", c); n = e - RExC_parse; while (isSPACE(*(RExC_parse + n - 1))) n--; } /* The \p isn't immediately followed by a '{' */ else if (! isALPHA(*RExC_parse)) { RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; vFAIL2("Character following \\%c must be '{' or a " "single-character Unicode property name", (U8) value); } else { e = RExC_parse; n = 1; } if (!SIZE_ONLY) { SV* invlist; char* name; char* base_name; /* name after any packages are stripped */ char* lookup_name = NULL; const char * const colon_colon = "::"; /* Try to get the definition of the property into * <invlist>. If /i is in effect, the effective property * will have its name be <__NAME_i>. The design is * discussed in commit * 2f833f5208e26b208886e51e09e2c072b5eabb46 */ name = savepv(Perl_form(aTHX_ "%.*s", (int)n, RExC_parse)); SAVEFREEPV(name); if (FOLD) { lookup_name = savepv(Perl_form(aTHX_ "__%s_i", name)); /* The function call just below that uses this can fail * to return, leaking memory if we don't do this */ SAVEFREEPV(lookup_name); } /* Look up the property name, and get its swash and * inversion list, if the property is found */ SvREFCNT_dec(swash); /* Free any left-overs */ swash = _core_swash_init("utf8", (lookup_name) ? lookup_name : name, &PL_sv_undef, 1, /* binary */ 0, /* not tr/// */ NULL, /* No inversion list */ &swash_init_flags ); if (! swash || ! (invlist = _get_swash_invlist(swash))) { HV* curpkg = (IN_PERL_COMPILETIME) ? PL_curstash : CopSTASH(PL_curcop); UV final_n = n; bool has_pkg; if (swash) { /* Got a swash but no inversion list. Something is likely wrong that will be sorted-out later */ SvREFCNT_dec_NN(swash); swash = NULL; } /* Here didn't find it. It could be a an error (like a * typo) in specifying a Unicode property, or it could * be a user-defined property that will be available at * run-time. The names of these must begin with 'In' * or 'Is' (after any packages are stripped off). So * if not one of those, or if we accept only * compile-time properties, is an error; otherwise add * it to the list for run-time look up. */ if ((base_name = rninstr(name, name + n, colon_colon, colon_colon + 2))) { /* Has ::. We know this must be a user-defined property */ base_name += 2; final_n -= base_name - name; has_pkg = TRUE; } else { base_name = name; has_pkg = FALSE; } if ( final_n < 3 || base_name[0] != 'I' || (base_name[1] != 's' && base_name[1] != 'n') || ret_invlist) { const char * const msg = (has_pkg) ? "Illegal user-defined property name" : "Can't find Unicode property definition"; RExC_parse = e + 1; /* diag_listed_as: Can't find Unicode property definition "%s" */ vFAIL3utf8f("%s \"%" UTF8f "\"", msg, UTF8fARG(UTF, n, name)); } /* If the property name doesn't already have a package * name, add the current one to it so that it can be * referred to outside it. [perl #121777] */ if (! has_pkg && curpkg) { char* pkgname = HvNAME(curpkg); if (memNEs(pkgname, HvNAMELEN(curpkg), "main")) { char* full_name = Perl_form(aTHX_ "%s::%s", pkgname, name); n = strlen(full_name); name = savepvn(full_name, n); SAVEFREEPV(name); } } Perl_sv_catpvf(aTHX_ listsv, "%cutf8::%s%" UTF8f "%s\n", (value == 'p' ? '+' : '!'), (FOLD) ? "__" : "", UTF8fARG(UTF, n, name), (FOLD) ? "_i" : ""); has_user_defined_property = TRUE; optimizable = FALSE; /* Will have to leave this an ANYOF node */ /* We don't know yet what this matches, so have to flag * it */ ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP; } else { /* Here, did get the swash and its inversion list. If * the swash is from a user-defined property, then this * whole character class should be regarded as such */ if (swash_init_flags & _CORE_SWASH_INIT_USER_DEFINED_PROPERTY) { has_user_defined_property = TRUE; } else if /* We warn on matching an above-Unicode code point * if the match would return true, except don't * warn for \p{All}, which has exactly one element * = 0 */ (_invlist_contains_cp(invlist, 0x110000) && (! (_invlist_len(invlist) == 1 && *invlist_array(invlist) == 0))) { warn_super = TRUE; } /* Invert if asking for the complement */ if (value == 'P') { _invlist_union_complement_2nd(properties, invlist, &properties); /* The swash can't be used as-is, because we've * inverted things; delay removing it to here after * have copied its invlist above */ SvREFCNT_dec_NN(swash); swash = NULL; } else { _invlist_union(properties, invlist, &properties); } } } RExC_parse = e + 1; namedclass = ANYOF_UNIPROP; /* no official name, but it's named */ /* \p means they want Unicode semantics */ REQUIRE_UNI_RULES(flagp, NULL); } break; case 'n': value = '\n'; break; case 'r': value = '\r'; break; case 't': value = '\t'; break; case 'f': value = '\f'; break; case 'b': value = '\b'; break; case 'e': value = ESC_NATIVE; break; case 'a': value = '\a'; break; case 'o': RExC_parse--; /* function expects to be pointed at the 'o' */ { const char* error_msg; bool valid = grok_bslash_o(&RExC_parse, RExC_end, &value, &error_msg, PASS2, /* warnings only in pass 2 */ strict, silence_non_portable, UTF); if (! valid) { vFAIL(error_msg); } } non_portable_endpoint++; break; case 'x': RExC_parse--; /* function expects to be pointed at the 'x' */ { const char* error_msg; bool valid = grok_bslash_x(&RExC_parse, RExC_end, &value, &error_msg, PASS2, /* Output warnings */ strict, silence_non_portable, UTF); if (! valid) { vFAIL(error_msg); } } non_portable_endpoint++; break; case 'c': value = grok_bslash_c(*RExC_parse++, PASS2); non_portable_endpoint++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': { /* Take 1-3 octal digits */ I32 flags = PERL_SCAN_SILENT_ILLDIGIT; numlen = (strict) ? 4 : 3; value = grok_oct(--RExC_parse, &numlen, &flags, NULL); RExC_parse += numlen; if (numlen != 3) { if (strict) { RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; vFAIL("Need exactly 3 octal digits"); } else if (! SIZE_ONLY /* like \08, \178 */ && numlen < 3 && RExC_parse < RExC_end && isDIGIT(*RExC_parse) && ckWARN(WARN_REGEXP)) { SAVEFREESV(RExC_rx_sv); reg_warn_non_literal_string( RExC_parse + 1, form_short_octal_warning(RExC_parse, numlen)); (void)ReREFCNT_inc(RExC_rx_sv); } } non_portable_endpoint++; break; } default: /* Allow \_ to not give an error */ if (!SIZE_ONLY && isWORDCHAR(value) && value != '_') { if (strict) { vFAIL2("Unrecognized escape \\%c in character class", (int)value); } else { SAVEFREESV(RExC_rx_sv); ckWARN2reg(RExC_parse, "Unrecognized escape \\%c in character class passed through", (int)value); (void)ReREFCNT_inc(RExC_rx_sv); } } break; } /* End of switch on char following backslash */ } /* end of handling backslash escape sequences */ /* Here, we have the current token in 'value' */ if (namedclass > OOB_NAMEDCLASS) { /* this is a named class \blah */ U8 classnum; /* a bad range like a-\d, a-[:digit:]. The '-' is taken as a * literal, as is the character that began the false range, i.e. * the 'a' in the examples */ if (range) { if (!SIZE_ONLY) { const int w = (RExC_parse >= rangebegin) ? RExC_parse - rangebegin : 0; if (strict) { vFAIL2utf8f( "False [] range \"%" UTF8f "\"", UTF8fARG(UTF, w, rangebegin)); } else { SAVEFREESV(RExC_rx_sv); /* in case of fatal warnings */ ckWARN2reg(RExC_parse, "False [] range \"%" UTF8f "\"", UTF8fARG(UTF, w, rangebegin)); (void)ReREFCNT_inc(RExC_rx_sv); cp_list = add_cp_to_invlist(cp_list, '-'); cp_foldable_list = add_cp_to_invlist(cp_foldable_list, prevvalue); } } range = 0; /* this was not a true range */ element_count += 2; /* So counts for three values */ } classnum = namedclass_to_classnum(namedclass); if (LOC && namedclass < ANYOF_POSIXL_MAX #ifndef HAS_ISASCII && classnum != _CC_ASCII #endif ) { /* What the Posix classes (like \w, [:space:]) match in locale * isn't knowable under locale until actual match time. Room * must be reserved (one time per outer bracketed class) to * store such classes. The space will contain a bit for each * named class that is to be matched against. This isn't * needed for \p{} and pseudo-classes, as they are not affected * by locale, and hence are dealt with separately */ if (! need_class) { need_class = 1; if (SIZE_ONLY) { RExC_size += ANYOF_POSIXL_SKIP - ANYOF_SKIP; } else { RExC_emit += ANYOF_POSIXL_SKIP - ANYOF_SKIP; } ANYOF_FLAGS(ret) |= ANYOF_MATCHES_POSIXL; ANYOF_POSIXL_ZERO(ret); /* We can't change this into some other type of node * (unless this is the only element, in which case there * are nodes that mean exactly this) as has runtime * dependencies */ optimizable = FALSE; } /* Coverity thinks it is possible for this to be negative; both * jhi and khw think it's not, but be safer */ assert(! (ANYOF_FLAGS(ret) & ANYOF_MATCHES_POSIXL) || (namedclass + ((namedclass % 2) ? -1 : 1)) >= 0); /* See if it already matches the complement of this POSIX * class */ if ((ANYOF_FLAGS(ret) & ANYOF_MATCHES_POSIXL) && ANYOF_POSIXL_TEST(ret, namedclass + ((namedclass % 2) ? -1 : 1))) { posixl_matches_all = TRUE; break; /* No need to continue. Since it matches both e.g., \w and \W, it matches everything, and the bracketed class can be optimized into qr/./s */ } /* Add this class to those that should be checked at runtime */ ANYOF_POSIXL_SET(ret, namedclass); /* The above-Latin1 characters are not subject to locale rules. * Just add them, in the second pass, to the * unconditionally-matched list */ if (! SIZE_ONLY) { SV* scratch_list = NULL; /* Get the list of the above-Latin1 code points this * matches */ _invlist_intersection_maybe_complement_2nd(PL_AboveLatin1, PL_XPosix_ptrs[classnum], /* Odd numbers are complements, like * NDIGIT, NASCII, ... */ namedclass % 2 != 0, &scratch_list); /* Checking if 'cp_list' is NULL first saves an extra * clone. Its reference count will be decremented at the * next union, etc, or if this is the only instance, at the * end of the routine */ if (! cp_list) { cp_list = scratch_list; } else { _invlist_union(cp_list, scratch_list, &cp_list); SvREFCNT_dec_NN(scratch_list); } continue; /* Go get next character */ } } else if (! SIZE_ONLY) { /* Here, not in pass1 (in that pass we skip calculating the * contents of this class), and is not /l, or is a POSIX class * for which /l doesn't matter (or is a Unicode property, which * is skipped here). */ if (namedclass >= ANYOF_POSIXL_MAX) { /* If a special class */ if (namedclass != ANYOF_UNIPROP) { /* UNIPROP = \p and \P */ /* Here, should be \h, \H, \v, or \V. None of /d, /i * nor /l make a difference in what these match, * therefore we just add what they match to cp_list. */ if (classnum != _CC_VERTSPACE) { assert( namedclass == ANYOF_HORIZWS || namedclass == ANYOF_NHORIZWS); /* It turns out that \h is just a synonym for * XPosixBlank */ classnum = _CC_BLANK; } _invlist_union_maybe_complement_2nd( cp_list, PL_XPosix_ptrs[classnum], namedclass % 2 != 0, /* Complement if odd (NHORIZWS, NVERTWS) */ &cp_list); } } else if ( UNI_SEMANTICS || classnum == _CC_ASCII || (DEPENDS_SEMANTICS && ( classnum == _CC_DIGIT || classnum == _CC_XDIGIT))) { /* We usually have to worry about /d and /a affecting what * POSIX classes match, with special code needed for /d * because we won't know until runtime what all matches. * But there is no extra work needed under /u, and * [:ascii:] is unaffected by /a and /d; and :digit: and * :xdigit: don't have runtime differences under /d. So we * can special case these, and avoid some extra work below, * and at runtime. */ _invlist_union_maybe_complement_2nd( simple_posixes, PL_XPosix_ptrs[classnum], namedclass % 2 != 0, &simple_posixes); } else { /* Garden variety class. If is NUPPER, NALPHA, ... complement and use nposixes */ SV** posixes_ptr = namedclass % 2 == 0 ? &posixes : &nposixes; _invlist_union_maybe_complement_2nd( *posixes_ptr, PL_XPosix_ptrs[classnum], namedclass % 2 != 0, posixes_ptr); } } } /* end of namedclass \blah */ SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); /* If 'range' is set, 'value' is the ending of a range--check its * validity. (If value isn't a single code point in the case of a * range, we should have figured that out above in the code that * catches false ranges). Later, we will handle each individual code * point in the range. If 'range' isn't set, this could be the * beginning of a range, so check for that by looking ahead to see if * the next real character to be processed is the range indicator--the * minus sign */ if (range) { #ifdef EBCDIC /* For unicode ranges, we have to test that the Unicode as opposed * to the native values are not decreasing. (Above 255, there is * no difference between native and Unicode) */ if (unicode_range && prevvalue < 255 && value < 255) { if (NATIVE_TO_LATIN1(prevvalue) > NATIVE_TO_LATIN1(value)) { goto backwards_range; } } else #endif if (prevvalue > value) /* b-a */ { int w; #ifdef EBCDIC backwards_range: #endif w = RExC_parse - rangebegin; vFAIL2utf8f( "Invalid [] range \"%" UTF8f "\"", UTF8fARG(UTF, w, rangebegin)); NOT_REACHED; /* NOTREACHED */ } } else { prevvalue = value; /* save the beginning of the potential range */ if (! stop_at_1 /* Can't be a range if parsing just one thing */ && *RExC_parse == '-') { char* next_char_ptr = RExC_parse + 1; /* Get the next real char after the '-' */ SKIP_BRACKETED_WHITE_SPACE(skip_white, next_char_ptr); /* If the '-' is at the end of the class (just before the ']', * it is a literal minus; otherwise it is a range */ if (next_char_ptr < RExC_end && *next_char_ptr != ']') { RExC_parse = next_char_ptr; /* a bad range like \w-, [:word:]- ? */ if (namedclass > OOB_NAMEDCLASS) { if (strict || (PASS2 && ckWARN(WARN_REGEXP))) { const int w = RExC_parse >= rangebegin ? RExC_parse - rangebegin : 0; if (strict) { vFAIL4("False [] range \"%*.*s\"", w, w, rangebegin); } else if (PASS2) { vWARN4(RExC_parse, "False [] range \"%*.*s\"", w, w, rangebegin); } } if (!SIZE_ONLY) { cp_list = add_cp_to_invlist(cp_list, '-'); } element_count++; } else range = 1; /* yeah, it's a range! */ continue; /* but do it the next time */ } } } if (namedclass > OOB_NAMEDCLASS) { continue; } /* Here, we have a single value this time through the loop, and * <prevvalue> is the beginning of the range, if any; or <value> if * not. */ /* non-Latin1 code point implies unicode semantics. Must be set in * pass1 so is there for the whole of pass 2 */ if (value > 255) { REQUIRE_UNI_RULES(flagp, NULL); } /* Ready to process either the single value, or the completed range. * For single-valued non-inverted ranges, we consider the possibility * of multi-char folds. (We made a conscious decision to not do this * for the other cases because it can often lead to non-intuitive * results. For example, you have the peculiar case that: * "s s" =~ /^[^\xDF]+$/i => Y * "ss" =~ /^[^\xDF]+$/i => N * * See [perl #89750] */ if (FOLD && allow_multi_folds && value == prevvalue) { if (value == LATIN_SMALL_LETTER_SHARP_S || (value > 255 && _invlist_contains_cp(PL_HasMultiCharFold, value))) { /* Here <value> is indeed a multi-char fold. Get what it is */ U8 foldbuf[UTF8_MAXBYTES_CASE]; STRLEN foldlen; UV folded = _to_uni_fold_flags( value, foldbuf, &foldlen, FOLD_FLAGS_FULL | (ASCII_FOLD_RESTRICTED ? FOLD_FLAGS_NOMIX_ASCII : 0) ); /* Here, <folded> should be the first character of the * multi-char fold of <value>, with <foldbuf> containing the * whole thing. But, if this fold is not allowed (because of * the flags), <fold> will be the same as <value>, and should * be processed like any other character, so skip the special * handling */ if (folded != value) { /* Skip if we are recursed, currently parsing the class * again. Otherwise add this character to the list of * multi-char folds. */ if (! RExC_in_multi_char_class) { STRLEN cp_count = utf8_length(foldbuf, foldbuf + foldlen); SV* multi_fold = sv_2mortal(newSVpvs("")); Perl_sv_catpvf(aTHX_ multi_fold, "\\x{%" UVXf "}", value); multi_char_matches = add_multi_match(multi_char_matches, multi_fold, cp_count); } /* This element should not be processed further in this * class */ element_count--; value = save_value; prevvalue = save_prevvalue; continue; } } } if (strict && PASS2 && ckWARN(WARN_REGEXP)) { if (range) { /* If the range starts above 255, everything is portable and * likely to be so for any forseeable character set, so don't * warn. */ if (unicode_range && non_portable_endpoint && prevvalue < 256) { vWARN(RExC_parse, "Both or neither range ends should be Unicode"); } else if (prevvalue != value) { /* Under strict, ranges that stop and/or end in an ASCII * printable should have each end point be a portable value * for it (preferably like 'A', but we don't warn if it is * a (portable) Unicode name or code point), and the range * must be be all digits or all letters of the same case. * Otherwise, the range is non-portable and unclear as to * what it contains */ if ( (isPRINT_A(prevvalue) || isPRINT_A(value)) && ( non_portable_endpoint || ! ( (isDIGIT_A(prevvalue) && isDIGIT_A(value)) || (isLOWER_A(prevvalue) && isLOWER_A(value)) || (isUPPER_A(prevvalue) && isUPPER_A(value)) ))) { vWARN(RExC_parse, "Ranges of ASCII printables should" " be some subset of \"0-9\"," " \"A-Z\", or \"a-z\""); } else if (prevvalue >= 0x660) { /* ARABIC_INDIC_DIGIT_ZERO */ SSize_t index_start; SSize_t index_final; /* But the nature of Unicode and languages mean we * can't do the same checks for above-ASCII ranges, * except in the case of digit ones. These should * contain only digits from the same group of 10. The * ASCII case is handled just above. 0x660 is the * first digit character beyond ASCII. Hence here, the * range could be a range of digits. First some * unlikely special cases. Grandfather in that a range * ending in 19DA (NEW TAI LUE THAM DIGIT ONE) is bad * if its starting value is one of the 10 digits prior * to it. This is because it is an alternate way of * writing 19D1, and some people may expect it to be in * that group. But it is bad, because it won't give * the expected results. In Unicode 5.2 it was * considered to be in that group (of 11, hence), but * this was fixed in the next version */ if (UNLIKELY(value == 0x19DA && prevvalue >= 0x19D0)) { goto warn_bad_digit_range; } else if (UNLIKELY( prevvalue >= 0x1D7CE && value <= 0x1D7FF)) { /* This is the only other case currently in Unicode * where the algorithm below fails. The code * points just above are the end points of a single * range containing only decimal digits. It is 5 * different series of 0-9. All other ranges of * digits currently in Unicode are just a single * series. (And mktables will notify us if a later * Unicode version breaks this.) * * If the range being checked is at most 9 long, * and the digit values represented are in * numerical order, they are from the same series. * */ if ( value - prevvalue > 9 || ((( value - 0x1D7CE) % 10) <= (prevvalue - 0x1D7CE) % 10)) { goto warn_bad_digit_range; } } else { /* For all other ranges of digits in Unicode, the * algorithm is just to check if both end points * are in the same series, which is the same range. * */ index_start = _invlist_search( PL_XPosix_ptrs[_CC_DIGIT], prevvalue); /* Warn if the range starts and ends with a digit, * and they are not in the same group of 10. */ if ( index_start >= 0 && ELEMENT_RANGE_MATCHES_INVLIST(index_start) && (index_final = _invlist_search(PL_XPosix_ptrs[_CC_DIGIT], value)) != index_start && index_final >= 0 && ELEMENT_RANGE_MATCHES_INVLIST(index_final)) { warn_bad_digit_range: vWARN(RExC_parse, "Ranges of digits should be" " from the same group of" " 10"); } } } } } if ((! range || prevvalue == value) && non_portable_endpoint) { if (isPRINT_A(value)) { char literal[3]; unsigned d = 0; if (isBACKSLASHED_PUNCT(value)) { literal[d++] = '\\'; } literal[d++] = (char) value; literal[d++] = '\0'; vWARN4(RExC_parse, "\"%.*s\" is more clearly written simply as \"%s\"", (int) (RExC_parse - rangebegin), rangebegin, literal ); } else if isMNEMONIC_CNTRL(value) { vWARN4(RExC_parse, "\"%.*s\" is more clearly written simply as \"%s\"", (int) (RExC_parse - rangebegin), rangebegin, cntrl_to_mnemonic((U8) value) ); } } } /* Deal with this element of the class */ if (! SIZE_ONLY) { #ifndef EBCDIC cp_foldable_list = _add_range_to_invlist(cp_foldable_list, prevvalue, value); #else /* On non-ASCII platforms, for ranges that span all of 0..255, and * ones that don't require special handling, we can just add the * range like we do for ASCII platforms */ if ((UNLIKELY(prevvalue == 0) && value >= 255) || ! (prevvalue < 256 && (unicode_range || (! non_portable_endpoint && ((isLOWER_A(prevvalue) && isLOWER_A(value)) || (isUPPER_A(prevvalue) && isUPPER_A(value))))))) { cp_foldable_list = _add_range_to_invlist(cp_foldable_list, prevvalue, value); } else { /* Here, requires special handling. This can be because it is * a range whose code points are considered to be Unicode, and * so must be individually translated into native, or because * its a subrange of 'A-Z' or 'a-z' which each aren't * contiguous in EBCDIC, but we have defined them to include * only the "expected" upper or lower case ASCII alphabetics. * Subranges above 255 are the same in native and Unicode, so * can be added as a range */ U8 start = NATIVE_TO_LATIN1(prevvalue); unsigned j; U8 end = (value < 256) ? NATIVE_TO_LATIN1(value) : 255; for (j = start; j <= end; j++) { cp_foldable_list = add_cp_to_invlist(cp_foldable_list, LATIN1_TO_NATIVE(j)); } if (value > 255) { cp_foldable_list = _add_range_to_invlist(cp_foldable_list, 256, value); } } #endif } range = 0; /* this range (if it was one) is done now */ } /* End of loop through all the text within the brackets */ if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0) { output_or_return_posix_warnings(pRExC_state, posix_warnings, return_posix_warnings); } /* If anything in the class expands to more than one character, we have to * deal with them by building up a substitute parse string, and recursively * calling reg() on it, instead of proceeding */ if (multi_char_matches) { SV * substitute_parse = newSVpvn_flags("?:", 2, SVs_TEMP); I32 cp_count; STRLEN len; char *save_end = RExC_end; char *save_parse = RExC_parse; char *save_start = RExC_start; STRLEN prefix_end = 0; /* We copy the character class after a prefix supplied here. This is the size + 1 of that prefix */ bool first_time = TRUE; /* First multi-char occurrence doesn't get a "|" */ I32 reg_flags; assert(! invert); assert(RExC_precomp_adj == 0); /* Only one level of recursion allowed */ #if 0 /* Have decided not to deal with multi-char folds in inverted classes, because too confusing */ if (invert) { sv_catpv(substitute_parse, "(?:"); } #endif /* Look at the longest folds first */ for (cp_count = av_tindex_skip_len_mg(multi_char_matches); cp_count > 0; cp_count--) { if (av_exists(multi_char_matches, cp_count)) { AV** this_array_ptr; SV* this_sequence; this_array_ptr = (AV**) av_fetch(multi_char_matches, cp_count, FALSE); while ((this_sequence = av_pop(*this_array_ptr)) != &PL_sv_undef) { if (! first_time) { sv_catpv(substitute_parse, "|"); } first_time = FALSE; sv_catpv(substitute_parse, SvPVX(this_sequence)); } } } /* If the character class contains anything else besides these * multi-character folds, have to include it in recursive parsing */ if (element_count) { sv_catpv(substitute_parse, "|["); prefix_end = SvCUR(substitute_parse); sv_catpvn(substitute_parse, orig_parse, RExC_parse - orig_parse); /* Put in a closing ']' only if not going off the end, as otherwise * we are adding something that really isn't there */ if (RExC_parse < RExC_end) { sv_catpv(substitute_parse, "]"); } } sv_catpv(substitute_parse, ")"); #if 0 if (invert) { /* This is a way to get the parse to skip forward a whole named * sequence instead of matching the 2nd character when it fails the * first */ sv_catpv(substitute_parse, "(*THEN)(*SKIP)(*FAIL)|.)"); } #endif /* Set up the data structure so that any errors will be properly * reported. See the comments at the definition of * REPORT_LOCATION_ARGS for details */ RExC_precomp_adj = orig_parse - RExC_precomp; RExC_start = RExC_parse = SvPV(substitute_parse, len); RExC_adjusted_start = RExC_start + prefix_end; RExC_end = RExC_parse + len; RExC_in_multi_char_class = 1; RExC_emit = (regnode *)orig_emit; ret = reg(pRExC_state, 1, &reg_flags, depth+1); *flagp |= reg_flags&(HASWIDTH|SIMPLE|SPSTART|POSTPONED|RESTART_PASS1|NEED_UTF8); /* And restore so can parse the rest of the pattern */ RExC_parse = save_parse; RExC_start = RExC_adjusted_start = save_start; RExC_precomp_adj = 0; RExC_end = save_end; RExC_in_multi_char_class = 0; SvREFCNT_dec_NN(multi_char_matches); return ret; } /* Here, we've gone through the entire class and dealt with multi-char * folds. We are now in a position that we can do some checks to see if we * can optimize this ANYOF node into a simpler one, even in Pass 1. * Currently we only do two checks: * 1) is in the unlikely event that the user has specified both, eg. \w and * \W under /l, then the class matches everything. (This optimization * is done only to make the optimizer code run later work.) * 2) if the character class contains only a single element (including a * single range), we see if there is an equivalent node for it. * Other checks are possible */ if ( optimizable && ! ret_invlist /* Can't optimize if returning the constructed inversion list */ && (UNLIKELY(posixl_matches_all) || element_count == 1)) { U8 op = END; U8 arg = 0; if (UNLIKELY(posixl_matches_all)) { op = SANY; } else if (namedclass > OOB_NAMEDCLASS) { /* this is a single named class, like \w or [:digit:] or \p{foo} */ /* All named classes are mapped into POSIXish nodes, with its FLAG * argument giving which class it is */ switch ((I32)namedclass) { case ANYOF_UNIPROP: break; /* These don't depend on the charset modifiers. They always * match under /u rules */ case ANYOF_NHORIZWS: case ANYOF_HORIZWS: namedclass = ANYOF_BLANK + namedclass - ANYOF_HORIZWS; /* FALLTHROUGH */ case ANYOF_NVERTWS: case ANYOF_VERTWS: op = POSIXU; goto join_posix; /* The actual POSIXish node for all the rest depends on the * charset modifier. The ones in the first set depend only on * ASCII or, if available on this platform, also locale */ case ANYOF_ASCII: case ANYOF_NASCII: #ifdef HAS_ISASCII op = (LOC) ? POSIXL : POSIXA; #else op = POSIXA; #endif goto join_posix; /* The following don't have any matches in the upper Latin1 * range, hence /d is equivalent to /u for them. Making it /u * saves some branches at runtime */ case ANYOF_DIGIT: case ANYOF_NDIGIT: case ANYOF_XDIGIT: case ANYOF_NXDIGIT: if (! DEPENDS_SEMANTICS) { goto treat_as_default; } op = POSIXU; goto join_posix; /* The following change to CASED under /i */ case ANYOF_LOWER: case ANYOF_NLOWER: case ANYOF_UPPER: case ANYOF_NUPPER: if (FOLD) { namedclass = ANYOF_CASED + (namedclass % 2); } /* FALLTHROUGH */ /* The rest have more possibilities depending on the charset. * We take advantage of the enum ordering of the charset * modifiers to get the exact node type, */ default: treat_as_default: op = POSIXD + get_regex_charset(RExC_flags); if (op > POSIXA) { /* /aa is same as /a */ op = POSIXA; } join_posix: /* The odd numbered ones are the complements of the * next-lower even number one */ if (namedclass % 2 == 1) { invert = ! invert; namedclass--; } arg = namedclass_to_classnum(namedclass); break; } } else if (value == prevvalue) { /* Here, the class consists of just a single code point */ if (invert) { if (! LOC && value == '\n') { op = REG_ANY; /* Optimize [^\n] */ *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); } } else if (value < 256 || UTF) { /* Optimize a single value into an EXACTish node, but not if it * would require converting the pattern to UTF-8. */ op = compute_EXACTish(pRExC_state); } } /* Otherwise is a range */ else if (! LOC) { /* locale could vary these */ if (prevvalue == '0') { if (value == '9') { arg = _CC_DIGIT; op = POSIXA; } } else if (! FOLD || ASCII_FOLD_RESTRICTED) { /* We can optimize A-Z or a-z, but not if they could match * something like the KELVIN SIGN under /i. */ if (prevvalue == 'A') { if (value == 'Z' #ifdef EBCDIC && ! non_portable_endpoint #endif ) { arg = (FOLD) ? _CC_ALPHA : _CC_UPPER; op = POSIXA; } } else if (prevvalue == 'a') { if (value == 'z' #ifdef EBCDIC && ! non_portable_endpoint #endif ) { arg = (FOLD) ? _CC_ALPHA : _CC_LOWER; op = POSIXA; } } } } /* Here, we have changed <op> away from its initial value iff we found * an optimization */ if (op != END) { /* Throw away this ANYOF regnode, and emit the calculated one, * which should correspond to the beginning, not current, state of * the parse */ const char * cur_parse = RExC_parse; RExC_parse = (char *)orig_parse; if ( SIZE_ONLY) { if (! LOC) { /* To get locale nodes to not use the full ANYOF size would * require moving the code above that writes the portions * of it that aren't in other nodes to after this point. * e.g. ANYOF_POSIXL_SET */ RExC_size = orig_size; } } else { RExC_emit = (regnode *)orig_emit; if (PL_regkind[op] == POSIXD) { if (op == POSIXL) { RExC_contains_locale = 1; } if (invert) { op += NPOSIXD - POSIXD; } } } ret = reg_node(pRExC_state, op); if (PL_regkind[op] == POSIXD || PL_regkind[op] == NPOSIXD) { if (! SIZE_ONLY) { FLAGS(ret) = arg; } *flagp |= HASWIDTH|SIMPLE; } else if (PL_regkind[op] == EXACT) { alloc_maybe_populate_EXACT(pRExC_state, ret, flagp, 0, value, TRUE /* downgradable to EXACT */ ); } RExC_parse = (char *) cur_parse; SvREFCNT_dec(posixes); SvREFCNT_dec(nposixes); SvREFCNT_dec(simple_posixes); SvREFCNT_dec(cp_list); SvREFCNT_dec(cp_foldable_list); return ret; } } if (SIZE_ONLY) return ret; /****** !SIZE_ONLY (Pass 2) AFTER HERE *********/ /* If folding, we calculate all characters that could fold to or from the * ones already on the list */ if (cp_foldable_list) { if (FOLD) { UV start, end; /* End points of code point ranges */ SV* fold_intersection = NULL; SV** use_list; /* Our calculated list will be for Unicode rules. For locale * matching, we have to keep a separate list that is consulted at * runtime only when the locale indicates Unicode rules. For * non-locale, we just use the general list */ if (LOC) { use_list = &only_utf8_locale_list; } else { use_list = &cp_list; } /* Only the characters in this class that participate in folds need * be checked. Get the intersection of this class and all the * possible characters that are foldable. This can quickly narrow * down a large class */ _invlist_intersection(PL_utf8_foldable, cp_foldable_list, &fold_intersection); /* The folds for all the Latin1 characters are hard-coded into this * program, but we have to go out to disk to get the others. */ if (invlist_highest(cp_foldable_list) >= 256) { /* This is a hash that for a particular fold gives all * characters that are involved in it */ if (! PL_utf8_foldclosures) { _load_PL_utf8_foldclosures(); } } /* Now look at the foldable characters in this class individually */ invlist_iterinit(fold_intersection); while (invlist_iternext(fold_intersection, &start, &end)) { UV j; /* Look at every character in the range */ for (j = start; j <= end; j++) { U8 foldbuf[UTF8_MAXBYTES_CASE+1]; STRLEN foldlen; SV** listp; if (j < 256) { if (IS_IN_SOME_FOLD_L1(j)) { /* ASCII is always matched; non-ASCII is matched * only under Unicode rules (which could happen * under /l if the locale is a UTF-8 one */ if (isASCII(j) || ! DEPENDS_SEMANTICS) { *use_list = add_cp_to_invlist(*use_list, PL_fold_latin1[j]); } else { has_upper_latin1_only_utf8_matches = add_cp_to_invlist( has_upper_latin1_only_utf8_matches, PL_fold_latin1[j]); } } if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(j) && (! isASCII(j) || ! ASCII_FOLD_RESTRICTED)) { add_above_Latin1_folds(pRExC_state, (U8) j, use_list); } continue; } /* Here is an above Latin1 character. We don't have the * rules hard-coded for it. First, get its fold. This is * the simple fold, as the multi-character folds have been * handled earlier and separated out */ _to_uni_fold_flags(j, foldbuf, &foldlen, (ASCII_FOLD_RESTRICTED) ? FOLD_FLAGS_NOMIX_ASCII : 0); /* Single character fold of above Latin1. Add everything in * its fold closure to the list that this node should match. * The fold closures data structure is a hash with the keys * being the UTF-8 of every character that is folded to, like * 'k', and the values each an array of all code points that * fold to its key. e.g. [ 'k', 'K', KELVIN_SIGN ]. * Multi-character folds are not included */ if ((listp = hv_fetch(PL_utf8_foldclosures, (char *) foldbuf, foldlen, FALSE))) { AV* list = (AV*) *listp; IV k; for (k = 0; k <= av_tindex_skip_len_mg(list); k++) { SV** c_p = av_fetch(list, k, FALSE); UV c; assert(c_p); c = SvUV(*c_p); /* /aa doesn't allow folds between ASCII and non- */ if ((ASCII_FOLD_RESTRICTED && (isASCII(c) != isASCII(j)))) { continue; } /* Folds under /l which cross the 255/256 boundary * are added to a separate list. (These are valid * only when the locale is UTF-8.) */ if (c < 256 && LOC) { *use_list = add_cp_to_invlist(*use_list, c); continue; } if (isASCII(c) || c > 255 || AT_LEAST_UNI_SEMANTICS) { cp_list = add_cp_to_invlist(cp_list, c); } else { /* Similarly folds involving non-ascii Latin1 * characters under /d are added to their list */ has_upper_latin1_only_utf8_matches = add_cp_to_invlist( has_upper_latin1_only_utf8_matches, c); } } } } } SvREFCNT_dec_NN(fold_intersection); } /* Now that we have finished adding all the folds, there is no reason * to keep the foldable list separate */ _invlist_union(cp_list, cp_foldable_list, &cp_list); SvREFCNT_dec_NN(cp_foldable_list); } /* And combine the result (if any) with any inversion lists from posix * classes. The lists are kept separate up to now because we don't want to * fold the classes (folding of those is automatically handled by the swash * fetching code) */ if (simple_posixes) { /* These are the classes known to be unaffected by /a, /aa, and /d */ if (cp_list) { _invlist_union(cp_list, simple_posixes, &cp_list); SvREFCNT_dec_NN(simple_posixes); } else { cp_list = simple_posixes; } } if (posixes || nposixes) { /* We have to adjust /a and /aa */ if (AT_LEAST_ASCII_RESTRICTED) { /* Under /a and /aa, nothing above ASCII matches these */ if (posixes) { _invlist_intersection(posixes, PL_XPosix_ptrs[_CC_ASCII], &posixes); } /* Under /a and /aa, everything above ASCII matches these * complements */ if (nposixes) { _invlist_union_complement_2nd(nposixes, PL_XPosix_ptrs[_CC_ASCII], &nposixes); } } if (! DEPENDS_SEMANTICS) { /* For everything but /d, we can just add the current 'posixes' and * 'nposixes' to the main list */ if (posixes) { if (cp_list) { _invlist_union(cp_list, posixes, &cp_list); SvREFCNT_dec_NN(posixes); } else { cp_list = posixes; } } if (nposixes) { if (cp_list) { _invlist_union(cp_list, nposixes, &cp_list); SvREFCNT_dec_NN(nposixes); } else { cp_list = nposixes; } } } else { /* Under /d, things like \w match upper Latin1 characters only if * the target string is in UTF-8. But things like \W match all the * upper Latin1 characters if the target string is not in UTF-8. * * Handle the case where there something like \W separately */ if (nposixes) { SV* only_non_utf8_list = invlist_clone(PL_UpperLatin1); /* A complemented posix class matches all upper Latin1 * characters if not in UTF-8. And it matches just certain * ones when in UTF-8. That means those certain ones are * matched regardless, so can just be added to the * unconditional list */ if (cp_list) { _invlist_union(cp_list, nposixes, &cp_list); SvREFCNT_dec_NN(nposixes); nposixes = NULL; } else { cp_list = nposixes; } /* Likewise for 'posixes' */ _invlist_union(posixes, cp_list, &cp_list); /* Likewise for anything else in the range that matched only * under UTF-8 */ if (has_upper_latin1_only_utf8_matches) { _invlist_union(cp_list, has_upper_latin1_only_utf8_matches, &cp_list); SvREFCNT_dec_NN(has_upper_latin1_only_utf8_matches); has_upper_latin1_only_utf8_matches = NULL; } /* If we don't match all the upper Latin1 characters regardless * of UTF-8ness, we have to set a flag to match the rest when * not in UTF-8 */ _invlist_subtract(only_non_utf8_list, cp_list, &only_non_utf8_list); if (_invlist_len(only_non_utf8_list) != 0) { ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER; } } else { /* Here there were no complemented posix classes. That means * the upper Latin1 characters in 'posixes' match only when the * target string is in UTF-8. So we have to add them to the * list of those types of code points, while adding the * remainder to the unconditional list. * * First calculate what they are */ SV* nonascii_but_latin1_properties = NULL; _invlist_intersection(posixes, PL_UpperLatin1, &nonascii_but_latin1_properties); /* And add them to the final list of such characters. */ _invlist_union(has_upper_latin1_only_utf8_matches, nonascii_but_latin1_properties, &has_upper_latin1_only_utf8_matches); /* Remove them from what now becomes the unconditional list */ _invlist_subtract(posixes, nonascii_but_latin1_properties, &posixes); /* And add those unconditional ones to the final list */ if (cp_list) { _invlist_union(cp_list, posixes, &cp_list); SvREFCNT_dec_NN(posixes); posixes = NULL; } else { cp_list = posixes; } SvREFCNT_dec(nonascii_but_latin1_properties); /* Get rid of any characters that we now know are matched * unconditionally from the conditional list, which may make * that list empty */ _invlist_subtract(has_upper_latin1_only_utf8_matches, cp_list, &has_upper_latin1_only_utf8_matches); if (_invlist_len(has_upper_latin1_only_utf8_matches) == 0) { SvREFCNT_dec_NN(has_upper_latin1_only_utf8_matches); has_upper_latin1_only_utf8_matches = NULL; } } } } /* And combine the result (if any) with any inversion list from properties. * The lists are kept separate up to now so that we can distinguish the two * in regards to matching above-Unicode. A run-time warning is generated * if a Unicode property is matched against a non-Unicode code point. But, * we allow user-defined properties to match anything, without any warning, * and we also suppress the warning if there is a portion of the character * class that isn't a Unicode property, and which matches above Unicode, \W * or [\x{110000}] for example. * (Note that in this case, unlike the Posix one above, there is no * <has_upper_latin1_only_utf8_matches>, because having a Unicode property * forces Unicode semantics */ if (properties) { if (cp_list) { /* If it matters to the final outcome, see if a non-property * component of the class matches above Unicode. If so, the * warning gets suppressed. This is true even if just a single * such code point is specified, as, though not strictly correct if * another such code point is matched against, the fact that they * are using above-Unicode code points indicates they should know * the issues involved */ if (warn_super) { warn_super = ! (invert ^ (invlist_highest(cp_list) > PERL_UNICODE_MAX)); } _invlist_union(properties, cp_list, &cp_list); SvREFCNT_dec_NN(properties); } else { cp_list = properties; } if (warn_super) { ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER; /* Because an ANYOF node is the only one that warns, this node * can't be optimized into something else */ optimizable = FALSE; } } /* Here, we have calculated what code points should be in the character * class. * * Now we can see about various optimizations. Fold calculation (which we * did above) needs to take place before inversion. Otherwise /[^k]/i * would invert to include K, which under /i would match k, which it * shouldn't. Therefore we can't invert folded locale now, as it won't be * folded until runtime */ /* If we didn't do folding, it's because some information isn't available * until runtime; set the run-time fold flag for these. (We don't have to * worry about properties folding, as that is taken care of by the swash * fetching). We know to set the flag if we have a non-NULL list for UTF-8 * locales, or the class matches at least one 0-255 range code point */ if (LOC && FOLD) { /* Some things on the list might be unconditionally included because of * other components. Remove them, and clean up the list if it goes to * 0 elements */ if (only_utf8_locale_list && cp_list) { _invlist_subtract(only_utf8_locale_list, cp_list, &only_utf8_locale_list); if (_invlist_len(only_utf8_locale_list) == 0) { SvREFCNT_dec_NN(only_utf8_locale_list); only_utf8_locale_list = NULL; } } if (only_utf8_locale_list) { ANYOF_FLAGS(ret) |= ANYOFL_FOLD |ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } else if (cp_list) { /* Look to see if a 0-255 code point is in list */ UV start, end; invlist_iterinit(cp_list); if (invlist_iternext(cp_list, &start, &end) && start < 256) { ANYOF_FLAGS(ret) |= ANYOFL_FOLD; } invlist_iterfinish(cp_list); } } else if ( DEPENDS_SEMANTICS && ( has_upper_latin1_only_utf8_matches || (ANYOF_FLAGS(ret) & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER))) { OP(ret) = ANYOFD; optimizable = FALSE; } /* Optimize inverted simple patterns (e.g. [^a-z]) when everything is known * at compile time. Besides not inverting folded locale now, we can't * invert if there are things such as \w, which aren't known until runtime * */ if (cp_list && invert && OP(ret) != ANYOFD && ! (ANYOF_FLAGS(ret) & (ANYOF_LOCALE_FLAGS)) && ! HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION) { _invlist_invert(cp_list); /* Any swash can't be used as-is, because we've inverted things */ if (swash) { SvREFCNT_dec_NN(swash); swash = NULL; } /* Clear the invert flag since have just done it here */ invert = FALSE; } if (ret_invlist) { assert(cp_list); *ret_invlist = cp_list; SvREFCNT_dec(swash); /* Discard the generated node */ if (SIZE_ONLY) { RExC_size = orig_size; } else { RExC_emit = orig_emit; } return orig_emit; } /* Some character classes are equivalent to other nodes. Such nodes take * up less room and generally fewer operations to execute than ANYOF nodes. * Above, we checked for and optimized into some such equivalents for * certain common classes that are easy to test. Getting to this point in * the code means that the class didn't get optimized there. Since this * code is only executed in Pass 2, it is too late to save space--it has * been allocated in Pass 1, and currently isn't given back. But turning * things into an EXACTish node can allow the optimizer to join it to any * adjacent such nodes. And if the class is equivalent to things like /./, * expensive run-time swashes can be avoided. Now that we have more * complete information, we can find things necessarily missed by the * earlier code. Another possible "optimization" that isn't done is that * something like [Ee] could be changed into an EXACTFU. khw tried this * and found that the ANYOF is faster, including for code points not in the * bitmap. This still might make sense to do, provided it got joined with * an adjacent node(s) to create a longer EXACTFU one. This could be * accomplished by creating a pseudo ANYOF_EXACTFU node type that the join * routine would know is joinable. If that didn't happen, the node type * could then be made a straight ANYOF */ if (optimizable && cp_list && ! invert) { UV start, end; U8 op = END; /* The optimzation node-type */ int posix_class = -1; /* Illegal value */ const char * cur_parse= RExC_parse; invlist_iterinit(cp_list); if (! invlist_iternext(cp_list, &start, &end)) { /* Here, the list is empty. This happens, for example, when a * Unicode property that doesn't match anything is the only element * in the character class (perluniprops.pod notes such properties). * */ op = OPFAIL; *flagp |= HASWIDTH|SIMPLE; } else if (start == end) { /* The range is a single code point */ if (! invlist_iternext(cp_list, &start, &end) /* Don't do this optimization if it would require changing * the pattern to UTF-8 */ && (start < 256 || UTF)) { /* Here, the list contains a single code point. Can optimize * into an EXACTish node */ value = start; if (! FOLD) { op = (LOC) ? EXACTL : EXACT; } else if (LOC) { /* A locale node under folding with one code point can be * an EXACTFL, as its fold won't be calculated until * runtime */ op = EXACTFL; } else { /* Here, we are generally folding, but there is only one * code point to match. If we have to, we use an EXACT * node, but it would be better for joining with adjacent * nodes in the optimization pass if we used the same * EXACTFish node that any such are likely to be. We can * do this iff the code point doesn't participate in any * folds. For example, an EXACTF of a colon is the same as * an EXACT one, since nothing folds to or from a colon. */ if (value < 256) { if (IS_IN_SOME_FOLD_L1(value)) { op = EXACT; } } else { if (_invlist_contains_cp(PL_utf8_foldable, value)) { op = EXACT; } } /* If we haven't found the node type, above, it means we * can use the prevailing one */ if (op == END) { op = compute_EXACTish(pRExC_state); } } } } /* End of first range contains just a single code point */ else if (start == 0) { if (end == UV_MAX) { op = SANY; *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); } else if (end == '\n' - 1 && invlist_iternext(cp_list, &start, &end) && start == '\n' + 1 && end == UV_MAX) { op = REG_ANY; *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); } } invlist_iterfinish(cp_list); if (op == END) { const UV cp_list_len = _invlist_len(cp_list); const UV* cp_list_array = invlist_array(cp_list); /* Here, didn't find an optimization. See if this matches any of * the POSIX classes. These run slightly faster for above-Unicode * code points, so don't bother with POSIXA ones nor the 2 that * have no above-Unicode matches. We can avoid these checks unless * the ANYOF matches at least as high as the lowest POSIX one * (which was manually found to be \v. The actual code point may * increase in later Unicode releases, if a higher code point is * assigned to be \v, but this code will never break. It would * just mean we could execute the checks for posix optimizations * unnecessarily) */ if (cp_list_array[cp_list_len-1] > 0x2029) { for (posix_class = 0; posix_class <= _HIGHEST_REGCOMP_DOT_H_SYNC; posix_class++) { int try_inverted; if (posix_class == _CC_ASCII || posix_class == _CC_CNTRL) { continue; } for (try_inverted = 0; try_inverted < 2; try_inverted++) { /* Check if matches normal or inverted */ if (_invlistEQ(cp_list, PL_XPosix_ptrs[posix_class], try_inverted)) { op = (try_inverted) ? NPOSIXU : POSIXU; *flagp |= HASWIDTH|SIMPLE; goto found_posix; } } } found_posix: ; } } if (op != END) { RExC_parse = (char *)orig_parse; RExC_emit = (regnode *)orig_emit; if (regarglen[op]) { ret = reganode(pRExC_state, op, 0); } else { ret = reg_node(pRExC_state, op); } RExC_parse = (char *)cur_parse; if (PL_regkind[op] == EXACT) { alloc_maybe_populate_EXACT(pRExC_state, ret, flagp, 0, value, TRUE /* downgradable to EXACT */ ); } else if (PL_regkind[op] == POSIXD || PL_regkind[op] == NPOSIXD) { FLAGS(ret) = posix_class; } SvREFCNT_dec_NN(cp_list); return ret; } } /* Here, <cp_list> contains all the code points we can determine at * compile time that match under all conditions. Go through it, and * for things that belong in the bitmap, put them there, and delete from * <cp_list>. While we are at it, see if everything above 255 is in the * list, and if so, set a flag to speed up execution */ populate_ANYOF_from_invlist(ret, &cp_list); if (invert) { ANYOF_FLAGS(ret) |= ANYOF_INVERT; } /* Here, the bitmap has been populated with all the Latin1 code points that * always match. Can now add to the overall list those that match only * when the target string is UTF-8 (<has_upper_latin1_only_utf8_matches>). * */ if (has_upper_latin1_only_utf8_matches) { if (cp_list) { _invlist_union(cp_list, has_upper_latin1_only_utf8_matches, &cp_list); SvREFCNT_dec_NN(has_upper_latin1_only_utf8_matches); } else { cp_list = has_upper_latin1_only_utf8_matches; } ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP; } /* If there is a swash and more than one element, we can't use the swash in * the optimization below. */ if (swash && element_count > 1) { SvREFCNT_dec_NN(swash); swash = NULL; } /* Note that the optimization of using 'swash' if it is the only thing in * the class doesn't have us change swash at all, so it can include things * that are also in the bitmap; otherwise we have purposely deleted that * duplicate information */ set_ANYOF_arg(pRExC_state, ret, cp_list, (HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION) ? listsv : NULL, only_utf8_locale_list, swash, has_user_defined_property); *flagp |= HASWIDTH|SIMPLE; if (ANYOF_FLAGS(ret) & ANYOF_LOCALE_FLAGS) { RExC_contains_locale = 1; } return ret; } #undef HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION STATIC void S_set_ANYOF_arg(pTHX_ RExC_state_t* const pRExC_state, regnode* const node, SV* const cp_list, SV* const runtime_defns, SV* const only_utf8_locale_list, SV* const swash, const bool has_user_defined_property) { /* Sets the arg field of an ANYOF-type node 'node', using information about * the node passed-in. If there is nothing outside the node's bitmap, the * arg is set to ANYOF_ONLY_HAS_BITMAP. Otherwise, it sets the argument to * the count returned by add_data(), having allocated and stored an array, * av, that that count references, as follows: * av[0] stores the character class description in its textual form. * This is used later (regexec.c:Perl_regclass_swash()) to * initialize the appropriate swash, and is also useful for dumping * the regnode. This is set to &PL_sv_undef if the textual * description is not needed at run-time (as happens if the other * elements completely define the class) * av[1] if &PL_sv_undef, is a placeholder to later contain the swash * computed from av[0]. But if no further computation need be done, * the swash is stored here now (and av[0] is &PL_sv_undef). * av[2] stores the inversion list of code points that match only if the * current locale is UTF-8 * av[3] stores the cp_list inversion list for use in addition or instead * of av[0]; used only if cp_list exists and av[1] is &PL_sv_undef. * (Otherwise everything needed is already in av[0] and av[1]) * av[4] is set if any component of the class is from a user-defined * property; used only if av[3] exists */ UV n; PERL_ARGS_ASSERT_SET_ANYOF_ARG; if (! cp_list && ! runtime_defns && ! only_utf8_locale_list) { assert(! (ANYOF_FLAGS(node) & ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)); ARG_SET(node, ANYOF_ONLY_HAS_BITMAP); } else { AV * const av = newAV(); SV *rv; av_store(av, 0, (runtime_defns) ? SvREFCNT_inc(runtime_defns) : &PL_sv_undef); if (swash) { assert(cp_list); av_store(av, 1, swash); SvREFCNT_dec_NN(cp_list); } else { av_store(av, 1, &PL_sv_undef); if (cp_list) { av_store(av, 3, cp_list); av_store(av, 4, newSVuv(has_user_defined_property)); } } if (only_utf8_locale_list) { av_store(av, 2, only_utf8_locale_list); } else { av_store(av, 2, &PL_sv_undef); } rv = newRV_noinc(MUTABLE_SV(av)); n = add_data(pRExC_state, STR_WITH_LEN("s")); RExC_rxi->data->data[n] = (void*)rv; ARG_SET(node, n); } } #if !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION) SV * Perl__get_regclass_nonbitmap_data(pTHX_ const regexp *prog, const regnode* node, bool doinit, SV** listsvp, SV** only_utf8_locale_ptr, SV** output_invlist) { /* For internal core use only. * Returns the swash for the input 'node' in the regex 'prog'. * If <doinit> is 'true', will attempt to create the swash if not already * done. * If <listsvp> is non-null, will return the printable contents of the * swash. This can be used to get debugging information even before the * swash exists, by calling this function with 'doinit' set to false, in * which case the components that will be used to eventually create the * swash are returned (in a printable form). * If <only_utf8_locale_ptr> is not NULL, it is where this routine is to * store an inversion list of code points that should match only if the * execution-time locale is a UTF-8 one. * If <output_invlist> is not NULL, it is where this routine is to store an * inversion list of the code points that would be instead returned in * <listsvp> if this were NULL. Thus, what gets output in <listsvp> * when this parameter is used, is just the non-code point data that * will go into creating the swash. This currently should be just * user-defined properties whose definitions were not known at compile * time. Using this parameter allows for easier manipulation of the * swash's data by the caller. It is illegal to call this function with * this parameter set, but not <listsvp> * * Tied intimately to how S_set_ANYOF_arg sets up the data structure. Note * that, in spite of this function's name, the swash it returns may include * the bitmap data as well */ SV *sw = NULL; SV *si = NULL; /* Input swash initialization string */ SV* invlist = NULL; RXi_GET_DECL(prog,progi); const struct reg_data * const data = prog ? progi->data : NULL; PERL_ARGS_ASSERT__GET_REGCLASS_NONBITMAP_DATA; assert(! output_invlist || listsvp); if (data && data->count) { const U32 n = ARG(node); if (data->what[n] == 's') { SV * const rv = MUTABLE_SV(data->data[n]); AV * const av = MUTABLE_AV(SvRV(rv)); SV **const ary = AvARRAY(av); U8 swash_init_flags = _CORE_SWASH_INIT_ACCEPT_INVLIST; si = *ary; /* ary[0] = the string to initialize the swash with */ if (av_tindex_skip_len_mg(av) >= 2) { if (only_utf8_locale_ptr && ary[2] && ary[2] != &PL_sv_undef) { *only_utf8_locale_ptr = ary[2]; } else { assert(only_utf8_locale_ptr); *only_utf8_locale_ptr = NULL; } /* Elements 3 and 4 are either both present or both absent. [3] * is any inversion list generated at compile time; [4] * indicates if that inversion list has any user-defined * properties in it. */ if (av_tindex_skip_len_mg(av) >= 3) { invlist = ary[3]; if (SvUV(ary[4])) { swash_init_flags |= _CORE_SWASH_INIT_USER_DEFINED_PROPERTY; } } else { invlist = NULL; } } /* Element [1] is reserved for the set-up swash. If already there, * return it; if not, create it and store it there */ if (ary[1] && SvROK(ary[1])) { sw = ary[1]; } else if (doinit && ((si && si != &PL_sv_undef) || (invlist && invlist != &PL_sv_undef))) { assert(si); sw = _core_swash_init("utf8", /* the utf8 package */ "", /* nameless */ si, 1, /* binary */ 0, /* not from tr/// */ invlist, &swash_init_flags); (void)av_store(av, 1, sw); } } } /* If requested, return a printable version of what this swash matches */ if (listsvp) { SV* matches_string = NULL; /* The swash should be used, if possible, to get the data, as it * contains the resolved data. But this function can be called at * compile-time, before everything gets resolved, in which case we * return the currently best available information, which is the string * that will eventually be used to do that resolving, 'si' */ if ((! sw || (invlist = _get_swash_invlist(sw)) == NULL) && (si && si != &PL_sv_undef)) { /* Here, we only have 'si' (and possibly some passed-in data in * 'invlist', which is handled below) If the caller only wants * 'si', use that. */ if (! output_invlist) { matches_string = newSVsv(si); } else { /* But if the caller wants an inversion list of the node, we * need to parse 'si' and place as much as possible in the * desired output inversion list, making 'matches_string' only * contain the currently unresolvable things */ const char *si_string = SvPVX(si); STRLEN remaining = SvCUR(si); UV prev_cp = 0; U8 count = 0; /* Ignore everything before the first new-line */ while (*si_string != '\n' && remaining > 0) { si_string++; remaining--; } assert(remaining > 0); si_string++; remaining--; while (remaining > 0) { /* The data consists of just strings defining user-defined * property names, but in prior incarnations, and perhaps * somehow from pluggable regex engines, it could still * hold hex code point definitions. Each component of a * range would be separated by a tab, and each range by a * new-line. If these are found, instead add them to the * inversion list */ I32 grok_flags = PERL_SCAN_SILENT_ILLDIGIT |PERL_SCAN_SILENT_NON_PORTABLE; STRLEN len = remaining; UV cp = grok_hex(si_string, &len, &grok_flags, NULL); /* If the hex decode routine found something, it should go * up to the next \n */ if ( *(si_string + len) == '\n') { if (count) { /* 2nd code point on line */ *output_invlist = _add_range_to_invlist(*output_invlist, prev_cp, cp); } else { *output_invlist = add_cp_to_invlist(*output_invlist, cp); } count = 0; goto prepare_for_next_iteration; } /* If the hex decode was instead for the lower range limit, * save it, and go parse the upper range limit */ if (*(si_string + len) == '\t') { assert(count == 0); prev_cp = cp; count = 1; prepare_for_next_iteration: si_string += len + 1; remaining -= len + 1; continue; } /* Here, didn't find a legal hex number. Just add it from * here to the next \n */ remaining -= len; while (*(si_string + len) != '\n' && remaining > 0) { remaining--; len++; } if (*(si_string + len) == '\n') { len++; remaining--; } if (matches_string) { sv_catpvn(matches_string, si_string, len - 1); } else { matches_string = newSVpvn(si_string, len - 1); } si_string += len; sv_catpvs(matches_string, " "); } /* end of loop through the text */ assert(matches_string); if (SvCUR(matches_string)) { /* Get rid of trailing blank */ SvCUR_set(matches_string, SvCUR(matches_string) - 1); } } /* end of has an 'si' but no swash */ } /* If we have a swash in place, its equivalent inversion list was above * placed into 'invlist'. If not, this variable may contain a stored * inversion list which is information beyond what is in 'si' */ if (invlist) { /* Again, if the caller doesn't want the output inversion list, put * everything in 'matches-string' */ if (! output_invlist) { if ( ! matches_string) { matches_string = newSVpvs("\n"); } sv_catsv(matches_string, invlist_contents(invlist, TRUE /* traditional style */ )); } else if (! *output_invlist) { *output_invlist = invlist_clone(invlist); } else { _invlist_union(*output_invlist, invlist, output_invlist); } } *listsvp = matches_string; } return sw; } #endif /* !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION) */ /* reg_skipcomment() Absorbs an /x style # comment from the input stream, returning a pointer to the first character beyond the comment, or if the comment terminates the pattern without anything following it, this returns one past the final character of the pattern (in other words, RExC_end) and sets the REG_RUN_ON_COMMENT_SEEN flag. Note it's the callers responsibility to ensure that we are actually in /x mode */ PERL_STATIC_INLINE char* S_reg_skipcomment(RExC_state_t *pRExC_state, char* p) { PERL_ARGS_ASSERT_REG_SKIPCOMMENT; assert(*p == '#'); while (p < RExC_end) { if (*(++p) == '\n') { return p+1; } } /* we ran off the end of the pattern without ending the comment, so we have * to add an \n when wrapping */ RExC_seen |= REG_RUN_ON_COMMENT_SEEN; return p; } STATIC void S_skip_to_be_ignored_text(pTHX_ RExC_state_t *pRExC_state, char ** p, const bool force_to_xmod ) { /* If the text at the current parse position '*p' is a '(?#...)' comment, * or if we are under /x or 'force_to_xmod' is TRUE, and the text at '*p' * is /x whitespace, advance '*p' so that on exit it points to the first * byte past all such white space and comments */ const bool use_xmod = force_to_xmod || (RExC_flags & RXf_PMf_EXTENDED); PERL_ARGS_ASSERT_SKIP_TO_BE_IGNORED_TEXT; assert( ! UTF || UTF8_IS_INVARIANT(**p) || UTF8_IS_START(**p)); for (;;) { if (RExC_end - (*p) >= 3 && *(*p) == '(' && *(*p + 1) == '?' && *(*p + 2) == '#') { while (*(*p) != ')') { if ((*p) == RExC_end) FAIL("Sequence (?#... not terminated"); (*p)++; } (*p)++; continue; } if (use_xmod) { const char * save_p = *p; while ((*p) < RExC_end) { STRLEN len; if ((len = is_PATWS_safe((*p), RExC_end, UTF))) { (*p) += len; } else if (*(*p) == '#') { (*p) = reg_skipcomment(pRExC_state, (*p)); } else { break; } } if (*p != save_p) { continue; } } break; } return; } /* nextchar() Advances the parse position by one byte, unless that byte is the beginning of a '(?#...)' style comment, or is /x whitespace and /x is in effect. In those two cases, the parse position is advanced beyond all such comments and white space. This is the UTF, (?#...), and /x friendly way of saying RExC_parse++. */ STATIC void S_nextchar(pTHX_ RExC_state_t *pRExC_state) { PERL_ARGS_ASSERT_NEXTCHAR; if (RExC_parse < RExC_end) { assert( ! UTF || UTF8_IS_INVARIANT(*RExC_parse) || UTF8_IS_START(*RExC_parse)); RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force /x */ ); } } STATIC regnode * S_regnode_guts(pTHX_ RExC_state_t *pRExC_state, const U8 op, const STRLEN extra_size, const char* const name) { /* Allocate a regnode for 'op' and returns it, with 'extra_size' extra * space. In pass1, it aligns and increments RExC_size; in pass2, * RExC_emit */ regnode * const ret = RExC_emit; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGNODE_GUTS; assert(extra_size >= regarglen[op]); if (SIZE_ONLY) { SIZE_ALIGN(RExC_size); RExC_size += 1 + extra_size; return(ret); } if (RExC_emit >= RExC_emit_bound) Perl_croak(aTHX_ "panic: reg_node overrun trying to emit %d, %p>=%p", op, (void*)RExC_emit, (void*)RExC_emit_bound); NODE_ALIGN_FILL(ret); #ifndef RE_TRACK_PATTERN_OFFSETS PERL_UNUSED_ARG(name); #else if (RExC_offsets) { /* MJD */ MJD_OFFSET_DEBUG( ("%s:%d: (op %s) %s %" UVuf " (len %" UVuf ") (max %" UVuf ").\n", name, __LINE__, PL_reg_name[op], (UV)(RExC_emit - RExC_emit_start) > RExC_offsets[0] ? "Overwriting end of array!\n" : "OK", (UV)(RExC_emit - RExC_emit_start), (UV)(RExC_parse - RExC_start), (UV)RExC_offsets[0])); Set_Node_Offset(RExC_emit, RExC_parse + (op == END)); } #endif return(ret); } /* - reg_node - emit a node */ STATIC regnode * /* Location. */ S_reg_node(pTHX_ RExC_state_t *pRExC_state, U8 op) { regnode * const ret = regnode_guts(pRExC_state, op, regarglen[op], "reg_node"); PERL_ARGS_ASSERT_REG_NODE; assert(regarglen[op] == 0); if (PASS2) { regnode *ptr = ret; FILL_ADVANCE_NODE(ptr, op); RExC_emit = ptr; } return(ret); } /* - reganode - emit a node with an argument */ STATIC regnode * /* Location. */ S_reganode(pTHX_ RExC_state_t *pRExC_state, U8 op, U32 arg) { regnode * const ret = regnode_guts(pRExC_state, op, regarglen[op], "reganode"); PERL_ARGS_ASSERT_REGANODE; assert(regarglen[op] == 1); if (PASS2) { regnode *ptr = ret; FILL_ADVANCE_NODE_ARG(ptr, op, arg); RExC_emit = ptr; } return(ret); } STATIC regnode * S_reg2Lanode(pTHX_ RExC_state_t *pRExC_state, const U8 op, const U32 arg1, const I32 arg2) { /* emit a node with U32 and I32 arguments */ regnode * const ret = regnode_guts(pRExC_state, op, regarglen[op], "reg2Lanode"); PERL_ARGS_ASSERT_REG2LANODE; assert(regarglen[op] == 2); if (PASS2) { regnode *ptr = ret; FILL_ADVANCE_NODE_2L_ARG(ptr, op, arg1, arg2); RExC_emit = ptr; } return(ret); } /* - reginsert - insert an operator in front of already-emitted operand * * Means relocating the operand. * * IMPORTANT NOTE - it is the *callers* responsibility to correctly * set up NEXT_OFF() of the inserted node if needed. Something like this: * * reginsert(pRExC, OPFAIL, orig_emit, depth+1); * if (PASS2) * NEXT_OFF(orig_emit) = regarglen[OPFAIL] + NODE_STEP_REGNODE; * * ALSO NOTE - operand->flags will be set to 0 as well. */ STATIC void S_reginsert(pTHX_ RExC_state_t *pRExC_state, U8 op, regnode *operand, U32 depth) { regnode *src; regnode *dst; regnode *place; const int offset = regarglen[(U8)op]; const int size = NODE_STEP_REGNODE + offset; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGINSERT; PERL_UNUSED_CONTEXT; PERL_UNUSED_ARG(depth); /* (PL_regkind[(U8)op] == CURLY ? EXTRA_STEP_2ARGS : 0); */ DEBUG_PARSE_FMT("inst"," - %s",PL_reg_name[op]); if (SIZE_ONLY) { RExC_size += size; return; } assert(!RExC_study_started); /* I believe we should never use reginsert once we have started studying. If this is wrong then we need to adjust RExC_recurse below like we do with RExC_open_parens/RExC_close_parens. */ src = RExC_emit; RExC_emit += size; dst = RExC_emit; if (RExC_open_parens) { int paren; /*DEBUG_PARSE_FMT("inst"," - %" IVdf, (IV)RExC_npar);*/ /* remember that RExC_npar is rex->nparens + 1, * iow it is 1 more than the number of parens seen in * the pattern so far. */ for ( paren=0 ; paren < RExC_npar ; paren++ ) { /* note, RExC_open_parens[0] is the start of the * regex, it can't move. RExC_close_parens[0] is the end * of the regex, it *can* move. */ if ( paren && RExC_open_parens[paren] >= operand ) { /*DEBUG_PARSE_FMT("open"," - %d",size);*/ RExC_open_parens[paren] += size; } else { /*DEBUG_PARSE_FMT("open"," - %s","ok");*/ } if ( RExC_close_parens[paren] >= operand ) { /*DEBUG_PARSE_FMT("close"," - %d",size);*/ RExC_close_parens[paren] += size; } else { /*DEBUG_PARSE_FMT("close"," - %s","ok");*/ } } } if (RExC_end_op) RExC_end_op += size; while (src > operand) { StructCopy(--src, --dst, regnode); #ifdef RE_TRACK_PATTERN_OFFSETS if (RExC_offsets) { /* MJD 20010112 */ MJD_OFFSET_DEBUG( ("%s(%d): (op %s) %s copy %" UVuf " -> %" UVuf " (max %" UVuf ").\n", "reg_insert", __LINE__, PL_reg_name[op], (UV)(dst - RExC_emit_start) > RExC_offsets[0] ? "Overwriting end of array!\n" : "OK", (UV)(src - RExC_emit_start), (UV)(dst - RExC_emit_start), (UV)RExC_offsets[0])); Set_Node_Offset_To_R(dst-RExC_emit_start, Node_Offset(src)); Set_Node_Length_To_R(dst-RExC_emit_start, Node_Length(src)); } #endif } place = operand; /* Op node, where operand used to be. */ #ifdef RE_TRACK_PATTERN_OFFSETS if (RExC_offsets) { /* MJD */ MJD_OFFSET_DEBUG( ("%s(%d): (op %s) %s %" UVuf " <- %" UVuf " (max %" UVuf ").\n", "reginsert", __LINE__, PL_reg_name[op], (UV)(place - RExC_emit_start) > RExC_offsets[0] ? "Overwriting end of array!\n" : "OK", (UV)(place - RExC_emit_start), (UV)(RExC_parse - RExC_start), (UV)RExC_offsets[0])); Set_Node_Offset(place, RExC_parse); Set_Node_Length(place, 1); } #endif src = NEXTOPER(place); place->flags = 0; FILL_ADVANCE_NODE(place, op); Zero(src, offset, regnode); } /* - regtail - set the next-pointer at the end of a node chain of p to val. - SEE ALSO: regtail_study */ STATIC void S_regtail(pTHX_ RExC_state_t * pRExC_state, const regnode * const p, const regnode * const val, const U32 depth) { regnode *scan; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGTAIL; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif if (SIZE_ONLY) return; /* Find last node. */ scan = (regnode *) p; for (;;) { regnode * const temp = regnext(scan); DEBUG_PARSE_r({ DEBUG_PARSE_MSG((scan==p ? "tail" : "")); regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ %s (%d) %s %s\n", SvPV_nolen_const(RExC_mysv), REG_NODE_NUM(scan), (temp == NULL ? "->" : ""), (temp == NULL ? PL_reg_name[OP(val)] : "") ); }); if (temp == NULL) break; scan = temp; } if (reg_off_by_arg[OP(scan)]) { ARG_SET(scan, val - scan); } else { NEXT_OFF(scan) = val - scan; } } #ifdef DEBUGGING /* - regtail_study - set the next-pointer at the end of a node chain of p to val. - Look for optimizable sequences at the same time. - currently only looks for EXACT chains. This is experimental code. The idea is to use this routine to perform in place optimizations on branches and groups as they are constructed, with the long term intention of removing optimization from study_chunk so that it is purely analytical. Currently only used when in DEBUG mode. The macro REGTAIL_STUDY() is used to control which is which. */ /* TODO: All four parms should be const */ STATIC U8 S_regtail_study(pTHX_ RExC_state_t *pRExC_state, regnode *p, const regnode *val,U32 depth) { regnode *scan; U8 exact = PSEUDO; #ifdef EXPERIMENTAL_INPLACESCAN I32 min = 0; #endif GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGTAIL_STUDY; if (SIZE_ONLY) return exact; /* Find last node. */ scan = p; for (;;) { regnode * const temp = regnext(scan); #ifdef EXPERIMENTAL_INPLACESCAN if (PL_regkind[OP(scan)] == EXACT) { bool unfolded_multi_char; /* Unexamined in this routine */ if (join_exact(pRExC_state, scan, &min, &unfolded_multi_char, 1, val, depth+1)) return EXACT; } #endif if ( exact ) { switch (OP(scan)) { case EXACT: case EXACTL: case EXACTF: case EXACTFA_NO_TRIE: case EXACTFA: case EXACTFU: case EXACTFLU8: case EXACTFU_SS: case EXACTFL: if( exact == PSEUDO ) exact= OP(scan); else if ( exact != OP(scan) ) exact= 0; case NOTHING: break; default: exact= 0; } } DEBUG_PARSE_r({ DEBUG_PARSE_MSG((scan==p ? "tsdy" : "")); regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ %s (%d) -> %s\n", SvPV_nolen_const(RExC_mysv), REG_NODE_NUM(scan), PL_reg_name[exact]); }); if (temp == NULL) break; scan = temp; } DEBUG_PARSE_r({ DEBUG_PARSE_MSG(""); regprop(RExC_rx, RExC_mysv, val, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ attach to %s (%" IVdf ") offset to %" IVdf "\n", SvPV_nolen_const(RExC_mysv), (IV)REG_NODE_NUM(val), (IV)(val - scan) ); }); if (reg_off_by_arg[OP(scan)]) { ARG_SET(scan, val - scan); } else { NEXT_OFF(scan) = val - scan; } return exact; } #endif /* - regdump - dump a regexp onto Perl_debug_log in vaguely comprehensible form */ #ifdef DEBUGGING static void S_regdump_intflags(pTHX_ const char *lead, const U32 flags) { int bit; int set=0; ASSUME(REG_INTFLAGS_NAME_SIZE <= sizeof(flags)*8); for (bit=0; bit<REG_INTFLAGS_NAME_SIZE; bit++) { if (flags & (1<<bit)) { if (!set++ && lead) Perl_re_printf( aTHX_ "%s",lead); Perl_re_printf( aTHX_ "%s ",PL_reg_intflags_name[bit]); } } if (lead) { if (set) Perl_re_printf( aTHX_ "\n"); else Perl_re_printf( aTHX_ "%s[none-set]\n",lead); } } static void S_regdump_extflags(pTHX_ const char *lead, const U32 flags) { int bit; int set=0; regex_charset cs; ASSUME(REG_EXTFLAGS_NAME_SIZE <= sizeof(flags)*8); for (bit=0; bit<REG_EXTFLAGS_NAME_SIZE; bit++) { if (flags & (1<<bit)) { if ((1<<bit) & RXf_PMf_CHARSET) { /* Output separately, below */ continue; } if (!set++ && lead) Perl_re_printf( aTHX_ "%s",lead); Perl_re_printf( aTHX_ "%s ",PL_reg_extflags_name[bit]); } } if ((cs = get_regex_charset(flags)) != REGEX_DEPENDS_CHARSET) { if (!set++ && lead) { Perl_re_printf( aTHX_ "%s",lead); } switch (cs) { case REGEX_UNICODE_CHARSET: Perl_re_printf( aTHX_ "UNICODE"); break; case REGEX_LOCALE_CHARSET: Perl_re_printf( aTHX_ "LOCALE"); break; case REGEX_ASCII_RESTRICTED_CHARSET: Perl_re_printf( aTHX_ "ASCII-RESTRICTED"); break; case REGEX_ASCII_MORE_RESTRICTED_CHARSET: Perl_re_printf( aTHX_ "ASCII-MORE_RESTRICTED"); break; default: Perl_re_printf( aTHX_ "UNKNOWN CHARACTER SET"); break; } } if (lead) { if (set) Perl_re_printf( aTHX_ "\n"); else Perl_re_printf( aTHX_ "%s[none-set]\n",lead); } } #endif void Perl_regdump(pTHX_ const regexp *r) { #ifdef DEBUGGING int i; SV * const sv = sv_newmortal(); SV *dsv= sv_newmortal(); RXi_GET_DECL(r,ri); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGDUMP; (void)dumpuntil(r, ri->program, ri->program + 1, NULL, NULL, sv, 0, 0); /* Header fields of interest. */ for (i = 0; i < 2; i++) { if (r->substrs->data[i].substr) { RE_PV_QUOTED_DECL(s, 0, dsv, SvPVX_const(r->substrs->data[i].substr), RE_SV_DUMPLEN(r->substrs->data[i].substr), PL_dump_re_max_len); Perl_re_printf( aTHX_ "%s %s%s at %" IVdf "..%" UVuf " ", i ? "floating" : "anchored", s, RE_SV_TAIL(r->substrs->data[i].substr), (IV)r->substrs->data[i].min_offset, (UV)r->substrs->data[i].max_offset); } else if (r->substrs->data[i].utf8_substr) { RE_PV_QUOTED_DECL(s, 1, dsv, SvPVX_const(r->substrs->data[i].utf8_substr), RE_SV_DUMPLEN(r->substrs->data[i].utf8_substr), 30); Perl_re_printf( aTHX_ "%s utf8 %s%s at %" IVdf "..%" UVuf " ", i ? "floating" : "anchored", s, RE_SV_TAIL(r->substrs->data[i].utf8_substr), (IV)r->substrs->data[i].min_offset, (UV)r->substrs->data[i].max_offset); } } if (r->check_substr || r->check_utf8) Perl_re_printf( aTHX_ (const char *) ( r->check_substr == r->substrs->data[1].substr && r->check_utf8 == r->substrs->data[1].utf8_substr ? "(checking floating" : "(checking anchored")); if (r->intflags & PREGf_NOSCAN) Perl_re_printf( aTHX_ " noscan"); if (r->extflags & RXf_CHECK_ALL) Perl_re_printf( aTHX_ " isall"); if (r->check_substr || r->check_utf8) Perl_re_printf( aTHX_ ") "); if (ri->regstclass) { regprop(r, sv, ri->regstclass, NULL, NULL); Perl_re_printf( aTHX_ "stclass %s ", SvPVX_const(sv)); } if (r->intflags & PREGf_ANCH) { Perl_re_printf( aTHX_ "anchored"); if (r->intflags & PREGf_ANCH_MBOL) Perl_re_printf( aTHX_ "(MBOL)"); if (r->intflags & PREGf_ANCH_SBOL) Perl_re_printf( aTHX_ "(SBOL)"); if (r->intflags & PREGf_ANCH_GPOS) Perl_re_printf( aTHX_ "(GPOS)"); Perl_re_printf( aTHX_ " "); } if (r->intflags & PREGf_GPOS_SEEN) Perl_re_printf( aTHX_ "GPOS:%" UVuf " ", (UV)r->gofs); if (r->intflags & PREGf_SKIP) Perl_re_printf( aTHX_ "plus "); if (r->intflags & PREGf_IMPLICIT) Perl_re_printf( aTHX_ "implicit "); Perl_re_printf( aTHX_ "minlen %" IVdf " ", (IV)r->minlen); if (r->extflags & RXf_EVAL_SEEN) Perl_re_printf( aTHX_ "with eval "); Perl_re_printf( aTHX_ "\n"); DEBUG_FLAGS_r({ regdump_extflags("r->extflags: ",r->extflags); regdump_intflags("r->intflags: ",r->intflags); }); #else PERL_ARGS_ASSERT_REGDUMP; PERL_UNUSED_CONTEXT; PERL_UNUSED_ARG(r); #endif /* DEBUGGING */ } /* Should be synchronized with ANYOF_ #defines in regcomp.h */ #ifdef DEBUGGING # if _CC_WORDCHAR != 0 || _CC_DIGIT != 1 || _CC_ALPHA != 2 \ || _CC_LOWER != 3 || _CC_UPPER != 4 || _CC_PUNCT != 5 \ || _CC_PRINT != 6 || _CC_ALPHANUMERIC != 7 || _CC_GRAPH != 8 \ || _CC_CASED != 9 || _CC_SPACE != 10 || _CC_BLANK != 11 \ || _CC_XDIGIT != 12 || _CC_CNTRL != 13 || _CC_ASCII != 14 \ || _CC_VERTSPACE != 15 # error Need to adjust order of anyofs[] # endif static const char * const anyofs[] = { "\\w", "\\W", "\\d", "\\D", "[:alpha:]", "[:^alpha:]", "[:lower:]", "[:^lower:]", "[:upper:]", "[:^upper:]", "[:punct:]", "[:^punct:]", "[:print:]", "[:^print:]", "[:alnum:]", "[:^alnum:]", "[:graph:]", "[:^graph:]", "[:cased:]", "[:^cased:]", "\\s", "\\S", "[:blank:]", "[:^blank:]", "[:xdigit:]", "[:^xdigit:]", "[:cntrl:]", "[:^cntrl:]", "[:ascii:]", "[:^ascii:]", "\\v", "\\V" }; #endif /* - regprop - printable representation of opcode, with run time support */ void Perl_regprop(pTHX_ const regexp *prog, SV *sv, const regnode *o, const regmatch_info *reginfo, const RExC_state_t *pRExC_state) { #ifdef DEBUGGING int k; RXi_GET_DECL(prog,progi); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGPROP; SvPVCLEAR(sv); if (OP(o) > REGNODE_MAX) /* regnode.type is unsigned */ /* It would be nice to FAIL() here, but this may be called from regexec.c, and it would be hard to supply pRExC_state. */ Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d", (int)OP(o), (int)REGNODE_MAX); sv_catpv(sv, PL_reg_name[OP(o)]); /* Take off const! */ k = PL_regkind[OP(o)]; if (k == EXACT) { sv_catpvs(sv, " "); /* Using is_utf8_string() (via PERL_PV_UNI_DETECT) * is a crude hack but it may be the best for now since * we have no flag "this EXACTish node was UTF-8" * --jhi */ pv_pretty(sv, STRING(o), STR_LEN(o), PL_dump_re_max_len, PL_colors[0], PL_colors[1], PERL_PV_ESCAPE_UNI_DETECT | PERL_PV_ESCAPE_NONASCII | PERL_PV_PRETTY_ELLIPSES | PERL_PV_PRETTY_LTGT | PERL_PV_PRETTY_NOCLEAR ); } else if (k == TRIE) { /* print the details of the trie in dumpuntil instead, as * progi->data isn't available here */ const char op = OP(o); const U32 n = ARG(o); const reg_ac_data * const ac = IS_TRIE_AC(op) ? (reg_ac_data *)progi->data->data[n] : NULL; const reg_trie_data * const trie = (reg_trie_data*)progi->data->data[!IS_TRIE_AC(op) ? n : ac->trie]; Perl_sv_catpvf(aTHX_ sv, "-%s",PL_reg_name[o->flags]); DEBUG_TRIE_COMPILE_r({ if (trie->jump) sv_catpvs(sv, "(JUMP)"); Perl_sv_catpvf(aTHX_ sv, "<S:%" UVuf "/%" IVdf " W:%" UVuf " L:%" UVuf "/%" UVuf " C:%" UVuf "/%" UVuf ">", (UV)trie->startstate, (IV)trie->statecount-1, /* -1 because of the unused 0 element */ (UV)trie->wordcount, (UV)trie->minlen, (UV)trie->maxlen, (UV)TRIE_CHARCOUNT(trie), (UV)trie->uniquecharcount ); }); if ( IS_ANYOF_TRIE(op) || trie->bitmap ) { sv_catpvs(sv, "["); (void) put_charclass_bitmap_innards(sv, ((IS_ANYOF_TRIE(op)) ? ANYOF_BITMAP(o) : TRIE_BITMAP(trie)), NULL, NULL, NULL, FALSE ); sv_catpvs(sv, "]"); } } else if (k == CURLY) { U32 lo = ARG1(o), hi = ARG2(o); if (OP(o) == CURLYM || OP(o) == CURLYN || OP(o) == CURLYX) Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags); /* Parenth number */ Perl_sv_catpvf(aTHX_ sv, "{%u,", (unsigned) lo); if (hi == REG_INFTY) sv_catpvs(sv, "INFTY"); else Perl_sv_catpvf(aTHX_ sv, "%u", (unsigned) hi); sv_catpvs(sv, "}"); } else if (k == WHILEM && o->flags) /* Ordinal/of */ Perl_sv_catpvf(aTHX_ sv, "[%d/%d]", o->flags & 0xf, o->flags>>4); else if (k == REF || k == OPEN || k == CLOSE || k == GROUPP || OP(o)==ACCEPT) { AV *name_list= NULL; U32 parno= OP(o) == ACCEPT ? (U32)ARG2L(o) : ARG(o); Perl_sv_catpvf(aTHX_ sv, "%" UVuf, (UV)parno); /* Parenth number */ if ( RXp_PAREN_NAMES(prog) ) { name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]); } else if ( pRExC_state ) { name_list= RExC_paren_name_list; } if (name_list) { if ( k != REF || (OP(o) < NREF)) { SV **name= av_fetch(name_list, parno, 0 ); if (name) Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name)); } else { SV *sv_dat= MUTABLE_SV(progi->data->data[ parno ]); I32 *nums=(I32*)SvPVX(sv_dat); SV **name= av_fetch(name_list, nums[0], 0 ); I32 n; if (name) { for ( n=0; n<SvIVX(sv_dat); n++ ) { Perl_sv_catpvf(aTHX_ sv, "%s%" IVdf, (n ? "," : ""), (IV)nums[n]); } Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name)); } } } if ( k == REF && reginfo) { U32 n = ARG(o); /* which paren pair */ I32 ln = prog->offs[n].start; if (prog->lastparen < n || ln == -1) Perl_sv_catpvf(aTHX_ sv, ": FAIL"); else if (ln == prog->offs[n].end) Perl_sv_catpvf(aTHX_ sv, ": ACCEPT - EMPTY STRING"); else { const char *s = reginfo->strbeg + ln; Perl_sv_catpvf(aTHX_ sv, ": "); Perl_pv_pretty( aTHX_ sv, s, prog->offs[n].end - prog->offs[n].start, 32, 0, 0, PERL_PV_ESCAPE_UNI_DETECT|PERL_PV_PRETTY_NOCLEAR|PERL_PV_PRETTY_ELLIPSES|PERL_PV_PRETTY_QUOTE ); } } } else if (k == GOSUB) { AV *name_list= NULL; if ( RXp_PAREN_NAMES(prog) ) { name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]); } else if ( pRExC_state ) { name_list= RExC_paren_name_list; } /* Paren and offset */ Perl_sv_catpvf(aTHX_ sv, "%d[%+d:%d]", (int)ARG(o),(int)ARG2L(o), (int)((o + (int)ARG2L(o)) - progi->program) ); if (name_list) { SV **name= av_fetch(name_list, ARG(o), 0 ); if (name) Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name)); } } else if (k == LOGICAL) /* 2: embedded, otherwise 1 */ Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags); else if (k == ANYOF) { const U8 flags = ANYOF_FLAGS(o); bool do_sep = FALSE; /* Do we need to separate various components of the output? */ /* Set if there is still an unresolved user-defined property */ SV *unresolved = NULL; /* Things that are ignored except when the runtime locale is UTF-8 */ SV *only_utf8_locale_invlist = NULL; /* Code points that don't fit in the bitmap */ SV *nonbitmap_invlist = NULL; /* And things that aren't in the bitmap, but are small enough to be */ SV* bitmap_range_not_in_bitmap = NULL; const bool inverted = flags & ANYOF_INVERT; if (OP(o) == ANYOFL) { if (ANYOFL_UTF8_LOCALE_REQD(flags)) { sv_catpvs(sv, "{utf8-locale-reqd}"); } if (flags & ANYOFL_FOLD) { sv_catpvs(sv, "{i}"); } } /* If there is stuff outside the bitmap, get it */ if (ARG(o) != ANYOF_ONLY_HAS_BITMAP) { (void) _get_regclass_nonbitmap_data(prog, o, FALSE, &unresolved, &only_utf8_locale_invlist, &nonbitmap_invlist); /* The non-bitmap data may contain stuff that could fit in the * bitmap. This could come from a user-defined property being * finally resolved when this call was done; or much more likely * because there are matches that require UTF-8 to be valid, and so * aren't in the bitmap. This is teased apart later */ _invlist_intersection(nonbitmap_invlist, PL_InBitmap, &bitmap_range_not_in_bitmap); /* Leave just the things that don't fit into the bitmap */ _invlist_subtract(nonbitmap_invlist, PL_InBitmap, &nonbitmap_invlist); } /* Obey this flag to add all above-the-bitmap code points */ if (flags & ANYOF_MATCHES_ALL_ABOVE_BITMAP) { nonbitmap_invlist = _add_range_to_invlist(nonbitmap_invlist, NUM_ANYOF_CODE_POINTS, UV_MAX); } /* Ready to start outputting. First, the initial left bracket */ Perl_sv_catpvf(aTHX_ sv, "[%s", PL_colors[0]); /* Then all the things that could fit in the bitmap */ do_sep = put_charclass_bitmap_innards(sv, ANYOF_BITMAP(o), bitmap_range_not_in_bitmap, only_utf8_locale_invlist, o, /* Can't try inverting for a * better display if there are * things that haven't been * resolved */ unresolved != NULL); SvREFCNT_dec(bitmap_range_not_in_bitmap); /* If there are user-defined properties which haven't been defined yet, * output them. If the result is not to be inverted, it is clearest to * output them in a separate [] from the bitmap range stuff. If the * result is to be complemented, we have to show everything in one [], * as the inversion applies to the whole thing. Use {braces} to * separate them from anything in the bitmap and anything above the * bitmap. */ if (unresolved) { if (inverted) { if (! do_sep) { /* If didn't output anything in the bitmap */ sv_catpvs(sv, "^"); } sv_catpvs(sv, "{"); } else if (do_sep) { Perl_sv_catpvf(aTHX_ sv,"%s][%s",PL_colors[1],PL_colors[0]); } sv_catsv(sv, unresolved); if (inverted) { sv_catpvs(sv, "}"); } do_sep = ! inverted; } /* And, finally, add the above-the-bitmap stuff */ if (nonbitmap_invlist && _invlist_len(nonbitmap_invlist)) { SV* contents; /* See if truncation size is overridden */ const STRLEN dump_len = (PL_dump_re_max_len > 256) ? PL_dump_re_max_len : 256; /* This is output in a separate [] */ if (do_sep) { Perl_sv_catpvf(aTHX_ sv,"%s][%s",PL_colors[1],PL_colors[0]); } /* And, for easy of understanding, it is shown in the * uncomplemented form if possible. The one exception being if * there are unresolved items, where the inversion has to be * delayed until runtime */ if (inverted && ! unresolved) { _invlist_invert(nonbitmap_invlist); _invlist_subtract(nonbitmap_invlist, PL_InBitmap, &nonbitmap_invlist); } contents = invlist_contents(nonbitmap_invlist, FALSE /* output suitable for catsv */ ); /* If the output is shorter than the permissible maximum, just do it. */ if (SvCUR(contents) <= dump_len) { sv_catsv(sv, contents); } else { const char * contents_string = SvPVX(contents); STRLEN i = dump_len; /* Otherwise, start at the permissible max and work back to the * first break possibility */ while (i > 0 && contents_string[i] != ' ') { i--; } if (i == 0) { /* Fail-safe. Use the max if we couldn't find a legal break */ i = dump_len; } sv_catpvn(sv, contents_string, i); sv_catpvs(sv, "..."); } SvREFCNT_dec_NN(contents); SvREFCNT_dec_NN(nonbitmap_invlist); } /* And finally the matching, closing ']' */ Perl_sv_catpvf(aTHX_ sv, "%s]", PL_colors[1]); SvREFCNT_dec(unresolved); } else if (k == POSIXD || k == NPOSIXD) { U8 index = FLAGS(o) * 2; if (index < C_ARRAY_LENGTH(anyofs)) { if (*anyofs[index] != '[') { sv_catpv(sv, "["); } sv_catpv(sv, anyofs[index]); if (*anyofs[index] != '[') { sv_catpv(sv, "]"); } } else { Perl_sv_catpvf(aTHX_ sv, "[illegal type=%d])", index); } } else if (k == BOUND || k == NBOUND) { /* Must be synced with order of 'bound_type' in regcomp.h */ const char * const bounds[] = { "", /* Traditional */ "{gcb}", "{lb}", "{sb}", "{wb}" }; assert(FLAGS(o) < C_ARRAY_LENGTH(bounds)); sv_catpv(sv, bounds[FLAGS(o)]); } else if (k == BRANCHJ && (OP(o) == UNLESSM || OP(o) == IFMATCH)) Perl_sv_catpvf(aTHX_ sv, "[%d]", -(o->flags)); else if (OP(o) == SBOL) Perl_sv_catpvf(aTHX_ sv, " /%s/", o->flags ? "\\A" : "^"); /* add on the verb argument if there is one */ if ( ( k == VERB || OP(o) == ACCEPT || OP(o) == OPFAIL ) && o->flags) { if ( ARG(o) ) Perl_sv_catpvf(aTHX_ sv, ":%" SVf, SVfARG((MUTABLE_SV(progi->data->data[ ARG( o ) ])))); else sv_catpvs(sv, ":NULL"); } #else PERL_UNUSED_CONTEXT; PERL_UNUSED_ARG(sv); PERL_UNUSED_ARG(o); PERL_UNUSED_ARG(prog); PERL_UNUSED_ARG(reginfo); PERL_UNUSED_ARG(pRExC_state); #endif /* DEBUGGING */ } SV * Perl_re_intuit_string(pTHX_ REGEXP * const r) { /* Assume that RE_INTUIT is set */ struct regexp *const prog = ReANY(r); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_RE_INTUIT_STRING; PERL_UNUSED_CONTEXT; DEBUG_COMPILE_r( { const char * const s = SvPV_nolen_const(RX_UTF8(r) ? prog->check_utf8 : prog->check_substr); if (!PL_colorset) reginitcolors(); Perl_re_printf( aTHX_ "%sUsing REx %ssubstr:%s \"%s%.60s%s%s\"\n", PL_colors[4], RX_UTF8(r) ? "utf8 " : "", PL_colors[5],PL_colors[0], s, PL_colors[1], (strlen(s) > PL_dump_re_max_len ? "..." : "")); } ); /* use UTF8 check substring if regexp pattern itself is in UTF8 */ return RX_UTF8(r) ? prog->check_utf8 : prog->check_substr; } /* pregfree() handles refcounting and freeing the perl core regexp structure. When it is necessary to actually free the structure the first thing it does is call the 'free' method of the regexp_engine associated to the regexp, allowing the handling of the void *pprivate; member first. (This routine is not overridable by extensions, which is why the extensions free is called first.) See regdupe and regdupe_internal if you change anything here. */ #ifndef PERL_IN_XSUB_RE void Perl_pregfree(pTHX_ REGEXP *r) { SvREFCNT_dec(r); } void Perl_pregfree2(pTHX_ REGEXP *rx) { struct regexp *const r = ReANY(rx); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_PREGFREE2; if (r->mother_re) { ReREFCNT_dec(r->mother_re); } else { CALLREGFREE_PVT(rx); /* free the private data */ SvREFCNT_dec(RXp_PAREN_NAMES(r)); } if (r->substrs) { int i; for (i = 0; i < 2; i++) { SvREFCNT_dec(r->substrs->data[i].substr); SvREFCNT_dec(r->substrs->data[i].utf8_substr); } Safefree(r->substrs); } RX_MATCH_COPY_FREE(rx); #ifdef PERL_ANY_COW SvREFCNT_dec(r->saved_copy); #endif Safefree(r->offs); SvREFCNT_dec(r->qr_anoncv); if (r->recurse_locinput) Safefree(r->recurse_locinput); } /* reg_temp_copy() Copy ssv to dsv, both of which should of type SVt_REGEXP or SVt_PVLV, except that dsv will be created if NULL. This function is used in two main ways. First to implement $r = qr/....; $s = $$r; Secondly, it is used as a hacky workaround to the structural issue of match results being stored in the regexp structure which is in turn stored in PL_curpm/PL_reg_curpm. The problem is that due to qr// the pattern could be PL_curpm in multiple contexts, and could require multiple result sets being associated with the pattern simultaneously, such as when doing a recursive match with (??{$qr}) The solution is to make a lightweight copy of the regexp structure when a qr// is returned from the code executed by (??{$qr}) this lightweight copy doesn't actually own any of its data except for the starp/end and the actual regexp structure itself. */ REGEXP * Perl_reg_temp_copy(pTHX_ REGEXP *dsv, REGEXP *ssv) { struct regexp *drx; struct regexp *const srx = ReANY(ssv); const bool islv = dsv && SvTYPE(dsv) == SVt_PVLV; PERL_ARGS_ASSERT_REG_TEMP_COPY; if (!dsv) dsv = (REGEXP*) newSV_type(SVt_REGEXP); else { SvOK_off((SV *)dsv); if (islv) { /* For PVLVs, the head (sv_any) points to an XPVLV, while * the LV's xpvlenu_rx will point to a regexp body, which * we allocate here */ REGEXP *temp = (REGEXP *)newSV_type(SVt_REGEXP); assert(!SvPVX(dsv)); ((XPV*)SvANY(dsv))->xpv_len_u.xpvlenu_rx = temp->sv_any; temp->sv_any = NULL; SvFLAGS(temp) = (SvFLAGS(temp) & ~SVTYPEMASK) | SVt_NULL; SvREFCNT_dec_NN(temp); /* SvCUR still resides in the xpvlv struct, so the regexp copy- ing below will not set it. */ SvCUR_set(dsv, SvCUR(ssv)); } } /* This ensures that SvTHINKFIRST(sv) is true, and hence that sv_force_normal(sv) is called. */ SvFAKE_on(dsv); drx = ReANY(dsv); SvFLAGS(dsv) |= SvFLAGS(ssv) & (SVf_POK|SVp_POK|SVf_UTF8); SvPV_set(dsv, RX_WRAPPED(ssv)); /* We share the same string buffer as the original regexp, on which we hold a reference count, incremented when mother_re is set below. The string pointer is copied here, being part of the regexp struct. */ memcpy(&(drx->xpv_cur), &(srx->xpv_cur), sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur)); if (!islv) SvLEN_set(dsv, 0); if (srx->offs) { const I32 npar = srx->nparens+1; Newx(drx->offs, npar, regexp_paren_pair); Copy(srx->offs, drx->offs, npar, regexp_paren_pair); } if (srx->substrs) { int i; Newx(drx->substrs, 1, struct reg_substr_data); StructCopy(srx->substrs, drx->substrs, struct reg_substr_data); for (i = 0; i < 2; i++) { SvREFCNT_inc_void(drx->substrs->data[i].substr); SvREFCNT_inc_void(drx->substrs->data[i].utf8_substr); } /* check_substr and check_utf8, if non-NULL, point to either their anchored or float namesakes, and don't hold a second reference. */ } RX_MATCH_COPIED_off(dsv); #ifdef PERL_ANY_COW drx->saved_copy = NULL; #endif drx->mother_re = ReREFCNT_inc(srx->mother_re ? srx->mother_re : ssv); SvREFCNT_inc_void(drx->qr_anoncv); if (srx->recurse_locinput) Newxz(drx->recurse_locinput,srx->nparens + 1,char *); return dsv; } #endif /* regfree_internal() Free the private data in a regexp. This is overloadable by extensions. Perl takes care of the regexp structure in pregfree(), this covers the *pprivate pointer which technically perl doesn't know about, however of course we have to handle the regexp_internal structure when no extension is in use. Note this is called before freeing anything in the regexp structure. */ void Perl_regfree_internal(pTHX_ REGEXP * const rx) { struct regexp *const r = ReANY(rx); RXi_GET_DECL(r,ri); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGFREE_INTERNAL; DEBUG_COMPILE_r({ if (!PL_colorset) reginitcolors(); { SV *dsv= sv_newmortal(); RE_PV_QUOTED_DECL(s, RX_UTF8(rx), dsv, RX_PRECOMP(rx), RX_PRELEN(rx), PL_dump_re_max_len); Perl_re_printf( aTHX_ "%sFreeing REx:%s %s\n", PL_colors[4],PL_colors[5],s); } }); #ifdef RE_TRACK_PATTERN_OFFSETS if (ri->u.offsets) Safefree(ri->u.offsets); /* 20010421 MJD */ #endif if (ri->code_blocks) S_free_codeblocks(aTHX_ ri->code_blocks); if (ri->data) { int n = ri->data->count; while (--n >= 0) { /* If you add a ->what type here, update the comment in regcomp.h */ switch (ri->data->what[n]) { case 'a': case 'r': case 's': case 'S': case 'u': SvREFCNT_dec(MUTABLE_SV(ri->data->data[n])); break; case 'f': Safefree(ri->data->data[n]); break; case 'l': case 'L': break; case 'T': { /* Aho Corasick add-on structure for a trie node. Used in stclass optimization only */ U32 refcount; reg_ac_data *aho=(reg_ac_data*)ri->data->data[n]; #ifdef USE_ITHREADS dVAR; #endif OP_REFCNT_LOCK; refcount = --aho->refcount; OP_REFCNT_UNLOCK; if ( !refcount ) { PerlMemShared_free(aho->states); PerlMemShared_free(aho->fail); /* do this last!!!! */ PerlMemShared_free(ri->data->data[n]); /* we should only ever get called once, so * assert as much, and also guard the free * which /might/ happen twice. At the least * it will make code anlyzers happy and it * doesn't cost much. - Yves */ assert(ri->regstclass); if (ri->regstclass) { PerlMemShared_free(ri->regstclass); ri->regstclass = 0; } } } break; case 't': { /* trie structure. */ U32 refcount; reg_trie_data *trie=(reg_trie_data*)ri->data->data[n]; #ifdef USE_ITHREADS dVAR; #endif OP_REFCNT_LOCK; refcount = --trie->refcount; OP_REFCNT_UNLOCK; if ( !refcount ) { PerlMemShared_free(trie->charmap); PerlMemShared_free(trie->states); PerlMemShared_free(trie->trans); if (trie->bitmap) PerlMemShared_free(trie->bitmap); if (trie->jump) PerlMemShared_free(trie->jump); PerlMemShared_free(trie->wordinfo); /* do this last!!!! */ PerlMemShared_free(ri->data->data[n]); } } break; default: Perl_croak(aTHX_ "panic: regfree data code '%c'", ri->data->what[n]); } } Safefree(ri->data->what); Safefree(ri->data); } Safefree(ri); } #define av_dup_inc(s,t) MUTABLE_AV(sv_dup_inc((const SV *)s,t)) #define hv_dup_inc(s,t) MUTABLE_HV(sv_dup_inc((const SV *)s,t)) #define SAVEPVN(p,n) ((p) ? savepvn(p,n) : NULL) /* re_dup_guts - duplicate a regexp. This routine is expected to clone a given regexp structure. It is only compiled under USE_ITHREADS. After all of the core data stored in struct regexp is duplicated the regexp_engine.dupe method is used to copy any private data stored in the *pprivate pointer. This allows extensions to handle any duplication it needs to do. See pregfree() and regfree_internal() if you change anything here. */ #if defined(USE_ITHREADS) #ifndef PERL_IN_XSUB_RE void Perl_re_dup_guts(pTHX_ const REGEXP *sstr, REGEXP *dstr, CLONE_PARAMS *param) { dVAR; I32 npar; const struct regexp *r = ReANY(sstr); struct regexp *ret = ReANY(dstr); PERL_ARGS_ASSERT_RE_DUP_GUTS; npar = r->nparens+1; Newx(ret->offs, npar, regexp_paren_pair); Copy(r->offs, ret->offs, npar, regexp_paren_pair); if (ret->substrs) { /* Do it this way to avoid reading from *r after the StructCopy(). That way, if any of the sv_dup_inc()s dislodge *r from the L1 cache, it doesn't matter. */ int i; const bool anchored = r->check_substr ? r->check_substr == r->substrs->data[0].substr : r->check_utf8 == r->substrs->data[0].utf8_substr; Newx(ret->substrs, 1, struct reg_substr_data); StructCopy(r->substrs, ret->substrs, struct reg_substr_data); for (i = 0; i < 2; i++) { ret->substrs->data[i].substr = sv_dup_inc(ret->substrs->data[i].substr, param); ret->substrs->data[i].utf8_substr = sv_dup_inc(ret->substrs->data[i].utf8_substr, param); } /* check_substr and check_utf8, if non-NULL, point to either their anchored or float namesakes, and don't hold a second reference. */ if (ret->check_substr) { if (anchored) { assert(r->check_utf8 == r->substrs->data[0].utf8_substr); ret->check_substr = ret->substrs->data[0].substr; ret->check_utf8 = ret->substrs->data[0].utf8_substr; } else { assert(r->check_substr == r->substrs->data[1].substr); assert(r->check_utf8 == r->substrs->data[1].utf8_substr); ret->check_substr = ret->substrs->data[1].substr; ret->check_utf8 = ret->substrs->data[1].utf8_substr; } } else if (ret->check_utf8) { if (anchored) { ret->check_utf8 = ret->substrs->data[0].utf8_substr; } else { ret->check_utf8 = ret->substrs->data[1].utf8_substr; } } } RXp_PAREN_NAMES(ret) = hv_dup_inc(RXp_PAREN_NAMES(ret), param); ret->qr_anoncv = MUTABLE_CV(sv_dup_inc((const SV *)ret->qr_anoncv, param)); if (r->recurse_locinput) Newxz(ret->recurse_locinput,r->nparens + 1,char *); if (ret->pprivate) RXi_SET(ret,CALLREGDUPE_PVT(dstr,param)); if (RX_MATCH_COPIED(dstr)) ret->subbeg = SAVEPVN(ret->subbeg, ret->sublen); else ret->subbeg = NULL; #ifdef PERL_ANY_COW ret->saved_copy = NULL; #endif /* Whether mother_re be set or no, we need to copy the string. We cannot refrain from copying it when the storage points directly to our mother regexp, because that's 1: a buffer in a different thread 2: something we no longer hold a reference on so we need to copy it locally. */ RX_WRAPPED(dstr) = SAVEPVN(RX_WRAPPED_const(sstr), SvCUR(sstr)+1); ret->mother_re = NULL; } #endif /* PERL_IN_XSUB_RE */ /* regdupe_internal() This is the internal complement to regdupe() which is used to copy the structure pointed to by the *pprivate pointer in the regexp. This is the core version of the extension overridable cloning hook. The regexp structure being duplicated will be copied by perl prior to this and will be provided as the regexp *r argument, however with the /old/ structures pprivate pointer value. Thus this routine may override any copying normally done by perl. It returns a pointer to the new regexp_internal structure. */ void * Perl_regdupe_internal(pTHX_ REGEXP * const rx, CLONE_PARAMS *param) { dVAR; struct regexp *const r = ReANY(rx); regexp_internal *reti; int len; RXi_GET_DECL(r,ri); PERL_ARGS_ASSERT_REGDUPE_INTERNAL; len = ProgLen(ri); Newxc(reti, sizeof(regexp_internal) + len*sizeof(regnode), char, regexp_internal); Copy(ri->program, reti->program, len+1, regnode); if (ri->code_blocks) { int n; Newx(reti->code_blocks, 1, struct reg_code_blocks); Newx(reti->code_blocks->cb, ri->code_blocks->count, struct reg_code_block); Copy(ri->code_blocks->cb, reti->code_blocks->cb, ri->code_blocks->count, struct reg_code_block); for (n = 0; n < ri->code_blocks->count; n++) reti->code_blocks->cb[n].src_regex = (REGEXP*) sv_dup_inc((SV*)(ri->code_blocks->cb[n].src_regex), param); reti->code_blocks->count = ri->code_blocks->count; reti->code_blocks->refcnt = 1; } else reti->code_blocks = NULL; reti->regstclass = NULL; if (ri->data) { struct reg_data *d; const int count = ri->data->count; int i; Newxc(d, sizeof(struct reg_data) + count*sizeof(void *), char, struct reg_data); Newx(d->what, count, U8); d->count = count; for (i = 0; i < count; i++) { d->what[i] = ri->data->what[i]; switch (d->what[i]) { /* see also regcomp.h and regfree_internal() */ case 'a': /* actually an AV, but the dup function is identical. values seem to be "plain sv's" generally. */ case 'r': /* a compiled regex (but still just another SV) */ case 's': /* an RV (currently only used for an RV to an AV by the ANYOF code) this use case should go away, the code could have used 'a' instead - see S_set_ANYOF_arg() for array contents. */ case 'S': /* actually an SV, but the dup function is identical. */ case 'u': /* actually an HV, but the dup function is identical. values are "plain sv's" */ d->data[i] = sv_dup_inc((const SV *)ri->data->data[i], param); break; case 'f': /* Synthetic Start Class - "Fake" charclass we generate to optimize * patterns which could start with several different things. Pre-TRIE * this was more important than it is now, however this still helps * in some places, for instance /x?a+/ might produce a SSC equivalent * to [xa]. This is used by Perl_re_intuit_start() and S_find_byclass() * in regexec.c */ /* This is cheating. */ Newx(d->data[i], 1, regnode_ssc); StructCopy(ri->data->data[i], d->data[i], regnode_ssc); reti->regstclass = (regnode*)d->data[i]; break; case 'T': /* AHO-CORASICK fail table */ /* Trie stclasses are readonly and can thus be shared * without duplication. We free the stclass in pregfree * when the corresponding reg_ac_data struct is freed. */ reti->regstclass= ri->regstclass; /* FALLTHROUGH */ case 't': /* TRIE transition table */ OP_REFCNT_LOCK; ((reg_trie_data*)ri->data->data[i])->refcount++; OP_REFCNT_UNLOCK; /* FALLTHROUGH */ case 'l': /* (?{...}) or (??{ ... }) code (cb->block) */ case 'L': /* same when RExC_pm_flags & PMf_HAS_CV and code is not from another regexp */ d->data[i] = ri->data->data[i]; break; default: Perl_croak(aTHX_ "panic: re_dup_guts unknown data code '%c'", ri->data->what[i]); } } reti->data = d; } else reti->data = NULL; reti->name_list_idx = ri->name_list_idx; #ifdef RE_TRACK_PATTERN_OFFSETS if (ri->u.offsets) { Newx(reti->u.offsets, 2*len+1, U32); Copy(ri->u.offsets, reti->u.offsets, 2*len+1, U32); } #else SetProgLen(reti,len); #endif return (void*)reti; } #endif /* USE_ITHREADS */ #ifndef PERL_IN_XSUB_RE /* - regnext - dig the "next" pointer out of a node */ regnode * Perl_regnext(pTHX_ regnode *p) { I32 offset; if (!p) return(NULL); if (OP(p) > REGNODE_MAX) { /* regnode.type is unsigned */ Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d", (int)OP(p), (int)REGNODE_MAX); } offset = (reg_off_by_arg[OP(p)] ? ARG(p) : NEXT_OFF(p)); if (offset == 0) return(NULL); return(p+offset); } #endif STATIC void S_re_croak2(pTHX_ bool utf8, const char* pat1,const char* pat2,...) { va_list args; STRLEN l1 = strlen(pat1); STRLEN l2 = strlen(pat2); char buf[512]; SV *msv; const char *message; PERL_ARGS_ASSERT_RE_CROAK2; if (l1 > 510) l1 = 510; if (l1 + l2 > 510) l2 = 510 - l1; Copy(pat1, buf, l1 , char); Copy(pat2, buf + l1, l2 , char); buf[l1 + l2] = '\n'; buf[l1 + l2 + 1] = '\0'; va_start(args, pat2); msv = vmess(buf, &args); va_end(args); message = SvPV_const(msv,l1); if (l1 > 512) l1 = 512; Copy(message, buf, l1 , char); /* l1-1 to avoid \n */ Perl_croak(aTHX_ "%" UTF8f, UTF8fARG(utf8, l1-1, buf)); } /* XXX Here's a total kludge. But we need to re-enter for swash routines. */ #ifndef PERL_IN_XSUB_RE void Perl_save_re_context(pTHX) { I32 nparens = -1; I32 i; /* Save $1..$n (#18107: UTF-8 s/(\w+)/uc($1)/e); AMS 20021106. */ if (PL_curpm) { const REGEXP * const rx = PM_GETRE(PL_curpm); if (rx) nparens = RX_NPARENS(rx); } /* RT #124109. This is a complete hack; in the SWASHNEW case we know * that PL_curpm will be null, but that utf8.pm and the modules it * loads will only use $1..$3. * The t/porting/re_context.t test file checks this assumption. */ if (nparens == -1) nparens = 3; for (i = 1; i <= nparens; i++) { char digits[TYPE_CHARS(long)]; const STRLEN len = my_snprintf(digits, sizeof(digits), "%lu", (long)i); GV *const *const gvp = (GV**)hv_fetch(PL_defstash, digits, len, 0); if (gvp) { GV * const gv = *gvp; if (SvTYPE(gv) == SVt_PVGV && GvSV(gv)) save_scalar(gv); } } } #endif #ifdef DEBUGGING STATIC void S_put_code_point(pTHX_ SV *sv, UV c) { PERL_ARGS_ASSERT_PUT_CODE_POINT; if (c > 255) { Perl_sv_catpvf(aTHX_ sv, "\\x{%04" UVXf "}", c); } else if (isPRINT(c)) { const char string = (char) c; /* We use {phrase} as metanotation in the class, so also escape literal * braces */ if (isBACKSLASHED_PUNCT(c) || c == '{' || c == '}') sv_catpvs(sv, "\\"); sv_catpvn(sv, &string, 1); } else if (isMNEMONIC_CNTRL(c)) { Perl_sv_catpvf(aTHX_ sv, "%s", cntrl_to_mnemonic((U8) c)); } else { Perl_sv_catpvf(aTHX_ sv, "\\x%02X", (U8) c); } } #define MAX_PRINT_A MAX_PRINT_A_FOR_USE_ONLY_BY_REGCOMP_DOT_C STATIC void S_put_range(pTHX_ SV *sv, UV start, const UV end, const bool allow_literals) { /* Appends to 'sv' a displayable version of the range of code points from * 'start' to 'end'. Mnemonics (like '\r') are used for the few controls * that have them, when they occur at the beginning or end of the range. * It uses hex to output the remaining code points, unless 'allow_literals' * is true, in which case the printable ASCII ones are output as-is (though * some of these will be escaped by put_code_point()). * * NOTE: This is designed only for printing ranges of code points that fit * inside an ANYOF bitmap. Higher code points are simply suppressed */ const unsigned int min_range_count = 3; assert(start <= end); PERL_ARGS_ASSERT_PUT_RANGE; while (start <= end) { UV this_end; const char * format; if (end - start < min_range_count) { /* Output chars individually when they occur in short ranges */ for (; start <= end; start++) { put_code_point(sv, start); } break; } /* If permitted by the input options, and there is a possibility that * this range contains a printable literal, look to see if there is * one. */ if (allow_literals && start <= MAX_PRINT_A) { /* If the character at the beginning of the range isn't an ASCII * printable, effectively split the range into two parts: * 1) the portion before the first such printable, * 2) the rest * and output them separately. */ if (! isPRINT_A(start)) { UV temp_end = start + 1; /* There is no point looking beyond the final possible * printable, in MAX_PRINT_A */ UV max = MIN(end, MAX_PRINT_A); while (temp_end <= max && ! isPRINT_A(temp_end)) { temp_end++; } /* Here, temp_end points to one beyond the first printable if * found, or to one beyond 'max' if not. If none found, make * sure that we use the entire range */ if (temp_end > MAX_PRINT_A) { temp_end = end + 1; } /* Output the first part of the split range: the part that * doesn't have printables, with the parameter set to not look * for literals (otherwise we would infinitely recurse) */ put_range(sv, start, temp_end - 1, FALSE); /* The 2nd part of the range (if any) starts here. */ start = temp_end; /* We do a continue, instead of dropping down, because even if * the 2nd part is non-empty, it could be so short that we want * to output it as individual characters, as tested for at the * top of this loop. */ continue; } /* Here, 'start' is a printable ASCII. If it is an alphanumeric, * output a sub-range of just the digits or letters, then process * the remaining portion as usual. */ if (isALPHANUMERIC_A(start)) { UV mask = (isDIGIT_A(start)) ? _CC_DIGIT : isUPPER_A(start) ? _CC_UPPER : _CC_LOWER; UV temp_end = start + 1; /* Find the end of the sub-range that includes just the * characters in the same class as the first character in it */ while (temp_end <= end && _generic_isCC_A(temp_end, mask)) { temp_end++; } temp_end--; /* For short ranges, don't duplicate the code above to output * them; just call recursively */ if (temp_end - start < min_range_count) { put_range(sv, start, temp_end, FALSE); } else { /* Output as a range */ put_code_point(sv, start); sv_catpvs(sv, "-"); put_code_point(sv, temp_end); } start = temp_end + 1; continue; } /* We output any other printables as individual characters */ if (isPUNCT_A(start) || isSPACE_A(start)) { while (start <= end && (isPUNCT_A(start) || isSPACE_A(start))) { put_code_point(sv, start); start++; } continue; } } /* End of looking for literals */ /* Here is not to output as a literal. Some control characters have * mnemonic names. Split off any of those at the beginning and end of * the range to print mnemonically. It isn't possible for many of * these to be in a row, so this won't overwhelm with output */ if ( start <= end && (isMNEMONIC_CNTRL(start) || isMNEMONIC_CNTRL(end))) { while (isMNEMONIC_CNTRL(start) && start <= end) { put_code_point(sv, start); start++; } /* If this didn't take care of the whole range ... */ if (start <= end) { /* Look backwards from the end to find the final non-mnemonic * */ UV temp_end = end; while (isMNEMONIC_CNTRL(temp_end)) { temp_end--; } /* And separately output the interior range that doesn't start * or end with mnemonics */ put_range(sv, start, temp_end, FALSE); /* Then output the mnemonic trailing controls */ start = temp_end + 1; while (start <= end) { put_code_point(sv, start); start++; } break; } } /* As a final resort, output the range or subrange as hex. */ this_end = (end < NUM_ANYOF_CODE_POINTS) ? end : NUM_ANYOF_CODE_POINTS - 1; #if NUM_ANYOF_CODE_POINTS > 256 format = (this_end < 256) ? "\\x%02" UVXf "-\\x%02" UVXf : "\\x{%04" UVXf "}-\\x{%04" UVXf "}"; #else format = "\\x%02" UVXf "-\\x%02" UVXf; #endif GCC_DIAG_IGNORE(-Wformat-nonliteral); Perl_sv_catpvf(aTHX_ sv, format, start, this_end); GCC_DIAG_RESTORE; break; } } STATIC void S_put_charclass_bitmap_innards_invlist(pTHX_ SV *sv, SV* invlist) { /* Concatenate onto the PV in 'sv' a displayable form of the inversion list * 'invlist' */ UV start, end; bool allow_literals = TRUE; PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_INVLIST; /* Generally, it is more readable if printable characters are output as * literals, but if a range (nearly) spans all of them, it's best to output * it as a single range. This code will use a single range if all but 2 * ASCII printables are in it */ invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { /* If the range starts beyond the final printable, it doesn't have any * in it */ if (start > MAX_PRINT_A) { break; } /* In both ASCII and EBCDIC, a SPACE is the lowest printable. To span * all but two, the range must start and end no later than 2 from * either end */ if (start < ' ' + 2 && end > MAX_PRINT_A - 2) { if (end > MAX_PRINT_A) { end = MAX_PRINT_A; } if (start < ' ') { start = ' '; } if (end - start >= MAX_PRINT_A - ' ' - 2) { allow_literals = FALSE; } break; } } invlist_iterfinish(invlist); /* Here we have figured things out. Output each range */ invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { if (start >= NUM_ANYOF_CODE_POINTS) { break; } put_range(sv, start, end, allow_literals); } invlist_iterfinish(invlist); return; } STATIC SV* S_put_charclass_bitmap_innards_common(pTHX_ SV* invlist, /* The bitmap */ SV* posixes, /* Under /l, things like [:word:], \S */ SV* only_utf8, /* Under /d, matches iff the target is UTF-8 */ SV* not_utf8, /* /d, matches iff the target isn't UTF-8 */ SV* only_utf8_locale, /* Under /l, matches if the locale is UTF-8 */ const bool invert /* Is the result to be inverted? */ ) { /* Create and return an SV containing a displayable version of the bitmap * and associated information determined by the input parameters. If the * output would have been only the inversion indicator '^', NULL is instead * returned. */ SV * output; PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_COMMON; if (invert) { output = newSVpvs("^"); } else { output = newSVpvs(""); } /* First, the code points in the bitmap that are unconditionally there */ put_charclass_bitmap_innards_invlist(output, invlist); /* Traditionally, these have been placed after the main code points */ if (posixes) { sv_catsv(output, posixes); } if (only_utf8 && _invlist_len(only_utf8)) { Perl_sv_catpvf(aTHX_ output, "%s{utf8}%s", PL_colors[1], PL_colors[0]); put_charclass_bitmap_innards_invlist(output, only_utf8); } if (not_utf8 && _invlist_len(not_utf8)) { Perl_sv_catpvf(aTHX_ output, "%s{not utf8}%s", PL_colors[1], PL_colors[0]); put_charclass_bitmap_innards_invlist(output, not_utf8); } if (only_utf8_locale && _invlist_len(only_utf8_locale)) { Perl_sv_catpvf(aTHX_ output, "%s{utf8 locale}%s", PL_colors[1], PL_colors[0]); put_charclass_bitmap_innards_invlist(output, only_utf8_locale); /* This is the only list in this routine that can legally contain code * points outside the bitmap range. The call just above to * 'put_charclass_bitmap_innards_invlist' will simply suppress them, so * output them here. There's about a half-dozen possible, and none in * contiguous ranges longer than 2 */ if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) { UV start, end; SV* above_bitmap = NULL; _invlist_subtract(only_utf8_locale, PL_InBitmap, &above_bitmap); invlist_iterinit(above_bitmap); while (invlist_iternext(above_bitmap, &start, &end)) { UV i; for (i = start; i <= end; i++) { put_code_point(output, i); } } invlist_iterfinish(above_bitmap); SvREFCNT_dec_NN(above_bitmap); } } if (invert && SvCUR(output) == 1) { return NULL; } return output; } STATIC bool S_put_charclass_bitmap_innards(pTHX_ SV *sv, char *bitmap, SV *nonbitmap_invlist, SV *only_utf8_locale_invlist, const regnode * const node, const bool force_as_is_display) { /* Appends to 'sv' a displayable version of the innards of the bracketed * character class defined by the other arguments: * 'bitmap' points to the bitmap. * 'nonbitmap_invlist' is an inversion list of the code points that are in * the bitmap range, but for some reason aren't in the bitmap; NULL if * none. The reasons for this could be that they require some * condition such as the target string being or not being in UTF-8 * (under /d), or because they came from a user-defined property that * was not resolved at the time of the regex compilation (under /u) * 'only_utf8_locale_invlist' is an inversion list of the code points that * are valid only if the runtime locale is a UTF-8 one; NULL if none * 'node' is the regex pattern node. It is needed only when the above two * parameters are not null, and is passed so that this routine can * tease apart the various reasons for them. * 'force_as_is_display' is TRUE if this routine should definitely NOT try * to invert things to see if that leads to a cleaner display. If * FALSE, this routine is free to use its judgment about doing this. * * It returns TRUE if there was actually something output. (It may be that * the bitmap, etc is empty.) * * When called for outputting the bitmap of a non-ANYOF node, just pass the * bitmap, with the succeeding parameters set to NULL, and the final one to * FALSE. */ /* In general, it tries to display the 'cleanest' representation of the * innards, choosing whether to display them inverted or not, regardless of * whether the class itself is to be inverted. However, there are some * cases where it can't try inverting, as what actually matches isn't known * until runtime, and hence the inversion isn't either. */ bool inverting_allowed = ! force_as_is_display; int i; STRLEN orig_sv_cur = SvCUR(sv); SV* invlist; /* Inversion list we accumulate of code points that are unconditionally matched */ SV* only_utf8 = NULL; /* Under /d, list of matches iff the target is UTF-8 */ SV* not_utf8 = NULL; /* /d, list of matches iff the target isn't UTF-8 */ SV* posixes = NULL; /* Under /l, string of things like [:word:], \D */ SV* only_utf8_locale = NULL; /* Under /l, list of matches if the locale is UTF-8 */ SV* as_is_display; /* The output string when we take the inputs literally */ SV* inverted_display; /* The output string when we invert the inputs */ U8 flags = (node) ? ANYOF_FLAGS(node) : 0; bool invert = cBOOL(flags & ANYOF_INVERT); /* Is the input to be inverted to match? */ /* We are biased in favor of displaying things without them being inverted, * as that is generally easier to understand */ const int bias = 5; PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS; /* Start off with whatever code points are passed in. (We clone, so we * don't change the caller's list) */ if (nonbitmap_invlist) { assert(invlist_highest(nonbitmap_invlist) < NUM_ANYOF_CODE_POINTS); invlist = invlist_clone(nonbitmap_invlist); } else { /* Worst case size is every other code point is matched */ invlist = _new_invlist(NUM_ANYOF_CODE_POINTS / 2); } if (flags) { if (OP(node) == ANYOFD) { /* This flag indicates that the code points below 0x100 in the * nonbitmap list are precisely the ones that match only when the * target is UTF-8 (they should all be non-ASCII). */ if (flags & ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP) { _invlist_intersection(invlist, PL_UpperLatin1, &only_utf8); _invlist_subtract(invlist, only_utf8, &invlist); } /* And this flag for matching all non-ASCII 0xFF and below */ if (flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER) { not_utf8 = invlist_clone(PL_UpperLatin1); } } else if (OP(node) == ANYOFL) { /* If either of these flags are set, what matches isn't * determinable except during execution, so don't know enough here * to invert */ if (flags & (ANYOFL_FOLD|ANYOF_MATCHES_POSIXL)) { inverting_allowed = FALSE; } /* What the posix classes match also varies at runtime, so these * will be output symbolically. */ if (ANYOF_POSIXL_TEST_ANY_SET(node)) { int i; posixes = newSVpvs(""); for (i = 0; i < ANYOF_POSIXL_MAX; i++) { if (ANYOF_POSIXL_TEST(node,i)) { sv_catpv(posixes, anyofs[i]); } } } } } /* Accumulate the bit map into the unconditional match list */ for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) { if (BITMAP_TEST(bitmap, i)) { int start = i++; for (; i < NUM_ANYOF_CODE_POINTS && BITMAP_TEST(bitmap, i); i++) { /* empty */ } invlist = _add_range_to_invlist(invlist, start, i-1); } } /* Make sure that the conditional match lists don't have anything in them * that match unconditionally; otherwise the output is quite confusing. * This could happen if the code that populates these misses some * duplication. */ if (only_utf8) { _invlist_subtract(only_utf8, invlist, &only_utf8); } if (not_utf8) { _invlist_subtract(not_utf8, invlist, &not_utf8); } if (only_utf8_locale_invlist) { /* Since this list is passed in, we have to make a copy before * modifying it */ only_utf8_locale = invlist_clone(only_utf8_locale_invlist); _invlist_subtract(only_utf8_locale, invlist, &only_utf8_locale); /* And, it can get really weird for us to try outputting an inverted * form of this list when it has things above the bitmap, so don't even * try */ if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) { inverting_allowed = FALSE; } } /* Calculate what the output would be if we take the input as-is */ as_is_display = put_charclass_bitmap_innards_common(invlist, posixes, only_utf8, not_utf8, only_utf8_locale, invert); /* If have to take the output as-is, just do that */ if (! inverting_allowed) { if (as_is_display) { sv_catsv(sv, as_is_display); SvREFCNT_dec_NN(as_is_display); } } else { /* But otherwise, create the output again on the inverted input, and use whichever version is shorter */ int inverted_bias, as_is_bias; /* We will apply our bias to whichever of the the results doesn't have * the '^' */ if (invert) { invert = FALSE; as_is_bias = bias; inverted_bias = 0; } else { invert = TRUE; as_is_bias = 0; inverted_bias = bias; } /* Now invert each of the lists that contribute to the output, * excluding from the result things outside the possible range */ /* For the unconditional inversion list, we have to add in all the * conditional code points, so that when inverted, they will be gone * from it */ _invlist_union(only_utf8, invlist, &invlist); _invlist_union(not_utf8, invlist, &invlist); _invlist_union(only_utf8_locale, invlist, &invlist); _invlist_invert(invlist); _invlist_intersection(invlist, PL_InBitmap, &invlist); if (only_utf8) { _invlist_invert(only_utf8); _invlist_intersection(only_utf8, PL_UpperLatin1, &only_utf8); } else if (not_utf8) { /* If a code point matches iff the target string is not in UTF-8, * then complementing the result has it not match iff not in UTF-8, * which is the same thing as matching iff it is UTF-8. */ only_utf8 = not_utf8; not_utf8 = NULL; } if (only_utf8_locale) { _invlist_invert(only_utf8_locale); _invlist_intersection(only_utf8_locale, PL_InBitmap, &only_utf8_locale); } inverted_display = put_charclass_bitmap_innards_common( invlist, posixes, only_utf8, not_utf8, only_utf8_locale, invert); /* Use the shortest representation, taking into account our bias * against showing it inverted */ if ( inverted_display && ( ! as_is_display || ( SvCUR(inverted_display) + inverted_bias < SvCUR(as_is_display) + as_is_bias))) { sv_catsv(sv, inverted_display); } else if (as_is_display) { sv_catsv(sv, as_is_display); } SvREFCNT_dec(as_is_display); SvREFCNT_dec(inverted_display); } SvREFCNT_dec_NN(invlist); SvREFCNT_dec(only_utf8); SvREFCNT_dec(not_utf8); SvREFCNT_dec(posixes); SvREFCNT_dec(only_utf8_locale); return SvCUR(sv) > orig_sv_cur; } #define CLEAR_OPTSTART \ if (optstart) STMT_START { \ DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ \ " (%" IVdf " nodes)\n", (IV)(node - optstart))); \ optstart=NULL; \ } STMT_END #define DUMPUNTIL(b,e) \ CLEAR_OPTSTART; \ node=dumpuntil(r,start,(b),(e),last,sv,indent+1,depth+1); STATIC const regnode * S_dumpuntil(pTHX_ const regexp *r, const regnode *start, const regnode *node, const regnode *last, const regnode *plast, SV* sv, I32 indent, U32 depth) { U8 op = PSEUDO; /* Arbitrary non-END op. */ const regnode *next; const regnode *optstart= NULL; RXi_GET_DECL(r,ri); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMPUNTIL; #ifdef DEBUG_DUMPUNTIL Perl_re_printf( aTHX_ "--- %d : %d - %d - %d\n",indent,node-start, last ? last-start : 0,plast ? plast-start : 0); #endif if (plast && plast < last) last= plast; while (PL_regkind[op] != END && (!last || node < last)) { assert(node); /* While that wasn't END last time... */ NODE_ALIGN(node); op = OP(node); if (op == CLOSE || op == WHILEM) indent--; next = regnext((regnode *)node); /* Where, what. */ if (OP(node) == OPTIMIZED) { if (!optstart && RE_DEBUG_FLAG(RE_DEBUG_COMPILE_OPTIMISE)) optstart = node; else goto after_print; } else CLEAR_OPTSTART; regprop(r, sv, node, NULL, NULL); Perl_re_printf( aTHX_ "%4" IVdf ":%*s%s", (IV)(node - start), (int)(2*indent + 1), "", SvPVX_const(sv)); if (OP(node) != OPTIMIZED) { if (next == NULL) /* Next ptr. */ Perl_re_printf( aTHX_ " (0)"); else if (PL_regkind[(U8)op] == BRANCH && PL_regkind[OP(next)] != BRANCH ) Perl_re_printf( aTHX_ " (FAIL)"); else Perl_re_printf( aTHX_ " (%" IVdf ")", (IV)(next - start)); Perl_re_printf( aTHX_ "\n"); } after_print: if (PL_regkind[(U8)op] == BRANCHJ) { assert(next); { const regnode *nnode = (OP(next) == LONGJMP ? regnext((regnode *)next) : next); if (last && nnode > last) nnode = last; DUMPUNTIL(NEXTOPER(NEXTOPER(node)), nnode); } } else if (PL_regkind[(U8)op] == BRANCH) { assert(next); DUMPUNTIL(NEXTOPER(node), next); } else if ( PL_regkind[(U8)op] == TRIE ) { const regnode *this_trie = node; const char op = OP(node); const U32 n = ARG(node); const reg_ac_data * const ac = op>=AHOCORASICK ? (reg_ac_data *)ri->data->data[n] : NULL; const reg_trie_data * const trie = (reg_trie_data*)ri->data->data[op<AHOCORASICK ? n : ac->trie]; #ifdef DEBUGGING AV *const trie_words = MUTABLE_AV(ri->data->data[n + TRIE_WORDS_OFFSET]); #endif const regnode *nextbranch= NULL; I32 word_idx; SvPVCLEAR(sv); for (word_idx= 0; word_idx < (I32)trie->wordcount; word_idx++) { SV ** const elem_ptr = av_fetch(trie_words,word_idx,0); Perl_re_indentf( aTHX_ "%s ", indent+3, elem_ptr ? pv_pretty(sv, SvPV_nolen_const(*elem_ptr), SvCUR(*elem_ptr), PL_dump_re_max_len, PL_colors[0], PL_colors[1], (SvUTF8(*elem_ptr) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_PRETTY_ELLIPSES | PERL_PV_PRETTY_LTGT ) : "???" ); if (trie->jump) { U16 dist= trie->jump[word_idx+1]; Perl_re_printf( aTHX_ "(%" UVuf ")\n", (UV)((dist ? this_trie + dist : next) - start)); if (dist) { if (!nextbranch) nextbranch= this_trie + trie->jump[0]; DUMPUNTIL(this_trie + dist, nextbranch); } if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH) nextbranch= regnext((regnode *)nextbranch); } else { Perl_re_printf( aTHX_ "\n"); } } if (last && next > last) node= last; else node= next; } else if ( op == CURLY ) { /* "next" might be very big: optimizer */ DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS, NEXTOPER(node) + EXTRA_STEP_2ARGS + 1); } else if (PL_regkind[(U8)op] == CURLY && op != CURLYX) { assert(next); DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS, next); } else if ( op == PLUS || op == STAR) { DUMPUNTIL(NEXTOPER(node), NEXTOPER(node) + 1); } else if (PL_regkind[(U8)op] == ANYOF) { /* arglen 1 + class block */ node += 1 + ((ANYOF_FLAGS(node) & ANYOF_MATCHES_POSIXL) ? ANYOF_POSIXL_SKIP : ANYOF_SKIP); node = NEXTOPER(node); } else if (PL_regkind[(U8)op] == EXACT) { /* Literal string, where present. */ node += NODE_SZ_STR(node) - 1; node = NEXTOPER(node); } else { node = NEXTOPER(node); node += regarglen[(U8)op]; } if (op == CURLYX || op == OPEN) indent++; } CLEAR_OPTSTART; #ifdef DEBUG_DUMPUNTIL Perl_re_printf( aTHX_ "--- %d\n", (int)indent); #endif return node; } #endif /* DEBUGGING */ /* * ex: set ts=8 sts=4 sw=4 et: */
/* regcomp.c */ /* * 'A fair jaw-cracker dwarf-language must be.' --Samwise Gamgee * * [p.285 of _The Lord of the Rings_, II/iii: "The Ring Goes South"] */ /* This file contains functions for compiling a regular expression. See * also regexec.c which funnily enough, contains functions for executing * a regular expression. * * This file is also copied at build time to ext/re/re_comp.c, where * it's built with -DPERL_EXT_RE_BUILD -DPERL_EXT_RE_DEBUG -DPERL_EXT. * This causes the main functions to be compiled under new names and with * debugging support added, which makes "use re 'debug'" work. */ /* NOTE: this is derived from Henry Spencer's regexp code, and should not * confused with the original package (see point 3 below). Thanks, Henry! */ /* Additional note: this code is very heavily munged from Henry's version * in places. In some spots I've traded clarity for efficiency, so don't * blame Henry for some of the lack of readability. */ /* The names of the functions have been changed from regcomp and * regexec to pregcomp and pregexec in order to avoid conflicts * with the POSIX routines of the same names. */ #ifdef PERL_EXT_RE_BUILD #include "re_top.h" #endif /* * pregcomp and pregexec -- regsub and regerror are not used in perl * * Copyright (c) 1986 by University of Toronto. * Written by Henry Spencer. Not derived from licensed software. * * Permission is granted to anyone to use this software for any * purpose on any computer system, and to redistribute it freely, * subject to the following restrictions: * * 1. The author is not responsible for the consequences of use of * this software, no matter how awful, even if they arise * from defects in it. * * 2. The origin of this software must not be misrepresented, either * by explicit claim or by omission. * * 3. Altered versions must be plainly marked as such, and must not * be misrepresented as being the original software. * * **** Alterations to Henry's code are... **** **** Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, **** 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 **** by Larry Wall and others **** **** You may distribute under the terms of either the GNU General Public **** License or the Artistic License, as specified in the README file. * * Beware that some of this code is subtly aware of the way operator * precedence is structured in regular expressions. Serious changes in * regular-expression syntax might require a total rethink. */ #include "EXTERN.h" #define PERL_IN_REGCOMP_C #include "perl.h" #ifndef PERL_IN_XSUB_RE # include "INTERN.h" #endif #define REG_COMP_C #ifdef PERL_IN_XSUB_RE # include "re_comp.h" EXTERN_C const struct regexp_engine my_reg_engine; #else # include "regcomp.h" #endif #include "dquote_inline.h" #include "invlist_inline.h" #include "unicode_constants.h" #define HAS_NONLATIN1_FOLD_CLOSURE(i) \ _HAS_NONLATIN1_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i) #define HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(i) \ _HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE_ONLY_FOR_USE_BY_REGCOMP_DOT_C_AND_REGEXEC_DOT_C(i) #define IS_NON_FINAL_FOLD(c) _IS_NON_FINAL_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c) #define IS_IN_SOME_FOLD_L1(c) _IS_IN_SOME_FOLD_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c) #ifndef STATIC #define STATIC static #endif /* this is a chain of data about sub patterns we are processing that need to be handled separately/specially in study_chunk. Its so we can simulate recursion without losing state. */ struct scan_frame; typedef struct scan_frame { regnode *last_regnode; /* last node to process in this frame */ regnode *next_regnode; /* next node to process when last is reached */ U32 prev_recursed_depth; I32 stopparen; /* what stopparen do we use */ U32 is_top_frame; /* what flags do we use? */ struct scan_frame *this_prev_frame; /* this previous frame */ struct scan_frame *prev_frame; /* previous frame */ struct scan_frame *next_frame; /* next frame */ } scan_frame; /* Certain characters are output as a sequence with the first being a * backslash. */ #define isBACKSLASHED_PUNCT(c) strchr("-[]\\^", c) struct RExC_state_t { U32 flags; /* RXf_* are we folding, multilining? */ U32 pm_flags; /* PMf_* stuff from the calling PMOP */ char *precomp; /* uncompiled string. */ char *precomp_end; /* pointer to end of uncompiled string. */ REGEXP *rx_sv; /* The SV that is the regexp. */ regexp *rx; /* perl core regexp structure */ regexp_internal *rxi; /* internal data for regexp object pprivate field */ char *start; /* Start of input for compile */ char *end; /* End of input for compile */ char *parse; /* Input-scan pointer. */ char *adjusted_start; /* 'start', adjusted. See code use */ STRLEN precomp_adj; /* an offset beyond precomp. See code use */ SSize_t whilem_seen; /* number of WHILEM in this expr */ regnode *emit_start; /* Start of emitted-code area */ regnode *emit_bound; /* First regnode outside of the allocated space */ regnode *emit; /* Code-emit pointer; if = &emit_dummy, implies compiling, so don't emit */ regnode_ssc emit_dummy; /* placeholder for emit to point to; large enough for the largest non-EXACTish node, so can use it as scratch in pass1 */ I32 naughty; /* How bad is this pattern? */ I32 sawback; /* Did we see \1, ...? */ U32 seen; SSize_t size; /* Code size. */ I32 npar; /* Capture buffer count, (OPEN) plus one. ("par" 0 is the whole pattern)*/ I32 nestroot; /* root parens we are in - used by accept */ I32 extralen; I32 seen_zerolen; regnode **open_parens; /* pointers to open parens */ regnode **close_parens; /* pointers to close parens */ regnode *end_op; /* END node in program */ I32 utf8; /* whether the pattern is utf8 or not */ I32 orig_utf8; /* whether the pattern was originally in utf8 */ /* XXX use this for future optimisation of case * where pattern must be upgraded to utf8. */ I32 uni_semantics; /* If a d charset modifier should use unicode rules, even if the pattern is not in utf8 */ HV *paren_names; /* Paren names */ regnode **recurse; /* Recurse regops */ I32 recurse_count; /* Number of recurse regops we have generated */ U8 *study_chunk_recursed; /* bitmap of which subs we have moved through */ U32 study_chunk_recursed_bytes; /* bytes in bitmap */ I32 in_lookbehind; I32 contains_locale; I32 override_recoding; #ifdef EBCDIC I32 recode_x_to_native; #endif I32 in_multi_char_class; struct reg_code_blocks *code_blocks;/* positions of literal (?{}) within pattern */ int code_index; /* next code_blocks[] slot */ SSize_t maxlen; /* mininum possible number of chars in string to match */ scan_frame *frame_head; scan_frame *frame_last; U32 frame_count; AV *warn_text; #ifdef ADD_TO_REGEXEC char *starttry; /* -Dr: where regtry was called. */ #define RExC_starttry (pRExC_state->starttry) #endif SV *runtime_code_qr; /* qr with the runtime code blocks */ #ifdef DEBUGGING const char *lastparse; I32 lastnum; AV *paren_name_list; /* idx -> name */ U32 study_chunk_recursed_count; SV *mysv1; SV *mysv2; #define RExC_lastparse (pRExC_state->lastparse) #define RExC_lastnum (pRExC_state->lastnum) #define RExC_paren_name_list (pRExC_state->paren_name_list) #define RExC_study_chunk_recursed_count (pRExC_state->study_chunk_recursed_count) #define RExC_mysv (pRExC_state->mysv1) #define RExC_mysv1 (pRExC_state->mysv1) #define RExC_mysv2 (pRExC_state->mysv2) #endif bool seen_unfolded_sharp_s; bool strict; bool study_started; }; #define RExC_flags (pRExC_state->flags) #define RExC_pm_flags (pRExC_state->pm_flags) #define RExC_precomp (pRExC_state->precomp) #define RExC_precomp_adj (pRExC_state->precomp_adj) #define RExC_adjusted_start (pRExC_state->adjusted_start) #define RExC_precomp_end (pRExC_state->precomp_end) #define RExC_rx_sv (pRExC_state->rx_sv) #define RExC_rx (pRExC_state->rx) #define RExC_rxi (pRExC_state->rxi) #define RExC_start (pRExC_state->start) #define RExC_end (pRExC_state->end) #define RExC_parse (pRExC_state->parse) #define RExC_whilem_seen (pRExC_state->whilem_seen) /* Set during the sizing pass when there is a LATIN SMALL LETTER SHARP S in any * EXACTF node, hence was parsed under /di rules. If later in the parse, * something forces the pattern into using /ui rules, the sharp s should be * folded into the sequence 'ss', which takes up more space than previously * calculated. This means that the sizing pass needs to be restarted. (The * node also becomes an EXACTFU_SS.) For all other characters, an EXACTF node * that gets converted to /ui (and EXACTFU) occupies the same amount of space, * so there is no need to resize [perl #125990]. */ #define RExC_seen_unfolded_sharp_s (pRExC_state->seen_unfolded_sharp_s) #ifdef RE_TRACK_PATTERN_OFFSETS #define RExC_offsets (pRExC_state->rxi->u.offsets) /* I am not like the others */ #endif #define RExC_emit (pRExC_state->emit) #define RExC_emit_dummy (pRExC_state->emit_dummy) #define RExC_emit_start (pRExC_state->emit_start) #define RExC_emit_bound (pRExC_state->emit_bound) #define RExC_sawback (pRExC_state->sawback) #define RExC_seen (pRExC_state->seen) #define RExC_size (pRExC_state->size) #define RExC_maxlen (pRExC_state->maxlen) #define RExC_npar (pRExC_state->npar) #define RExC_nestroot (pRExC_state->nestroot) #define RExC_extralen (pRExC_state->extralen) #define RExC_seen_zerolen (pRExC_state->seen_zerolen) #define RExC_utf8 (pRExC_state->utf8) #define RExC_uni_semantics (pRExC_state->uni_semantics) #define RExC_orig_utf8 (pRExC_state->orig_utf8) #define RExC_open_parens (pRExC_state->open_parens) #define RExC_close_parens (pRExC_state->close_parens) #define RExC_end_op (pRExC_state->end_op) #define RExC_paren_names (pRExC_state->paren_names) #define RExC_recurse (pRExC_state->recurse) #define RExC_recurse_count (pRExC_state->recurse_count) #define RExC_study_chunk_recursed (pRExC_state->study_chunk_recursed) #define RExC_study_chunk_recursed_bytes \ (pRExC_state->study_chunk_recursed_bytes) #define RExC_in_lookbehind (pRExC_state->in_lookbehind) #define RExC_contains_locale (pRExC_state->contains_locale) #ifdef EBCDIC # define RExC_recode_x_to_native (pRExC_state->recode_x_to_native) #endif #define RExC_in_multi_char_class (pRExC_state->in_multi_char_class) #define RExC_frame_head (pRExC_state->frame_head) #define RExC_frame_last (pRExC_state->frame_last) #define RExC_frame_count (pRExC_state->frame_count) #define RExC_strict (pRExC_state->strict) #define RExC_study_started (pRExC_state->study_started) #define RExC_warn_text (pRExC_state->warn_text) /* Heuristic check on the complexity of the pattern: if TOO_NAUGHTY, we set * a flag to disable back-off on the fixed/floating substrings - if it's * a high complexity pattern we assume the benefit of avoiding a full match * is worth the cost of checking for the substrings even if they rarely help. */ #define RExC_naughty (pRExC_state->naughty) #define TOO_NAUGHTY (10) #define MARK_NAUGHTY(add) \ if (RExC_naughty < TOO_NAUGHTY) \ RExC_naughty += (add) #define MARK_NAUGHTY_EXP(exp, add) \ if (RExC_naughty < TOO_NAUGHTY) \ RExC_naughty += RExC_naughty / (exp) + (add) #define ISMULT1(c) ((c) == '*' || (c) == '+' || (c) == '?') #define ISMULT2(s) ((*s) == '*' || (*s) == '+' || (*s) == '?' || \ ((*s) == '{' && regcurly(s))) /* * Flags to be passed up and down. */ #define WORST 0 /* Worst case. */ #define HASWIDTH 0x01 /* Known to match non-null strings. */ /* Simple enough to be STAR/PLUS operand; in an EXACTish node must be a single * character. (There needs to be a case: in the switch statement in regexec.c * for any node marked SIMPLE.) Note that this is not the same thing as * REGNODE_SIMPLE */ #define SIMPLE 0x02 #define SPSTART 0x04 /* Starts with * or + */ #define POSTPONED 0x08 /* (?1),(?&name), (??{...}) or similar */ #define TRYAGAIN 0x10 /* Weeded out a declaration. */ #define RESTART_PASS1 0x20 /* Need to restart sizing pass */ #define NEED_UTF8 0x40 /* In conjunction with RESTART_PASS1, need to calcuate sizes as UTF-8 */ #define REG_NODE_NUM(x) ((x) ? (int)((x)-RExC_emit_start) : -1) /* whether trie related optimizations are enabled */ #if PERL_ENABLE_EXTENDED_TRIE_OPTIMISATION #define TRIE_STUDY_OPT #define FULL_TRIE_STUDY #define TRIE_STCLASS #endif #define PBYTE(u8str,paren) ((U8*)(u8str))[(paren) >> 3] #define PBITVAL(paren) (1 << ((paren) & 7)) #define PAREN_TEST(u8str,paren) ( PBYTE(u8str,paren) & PBITVAL(paren)) #define PAREN_SET(u8str,paren) PBYTE(u8str,paren) |= PBITVAL(paren) #define PAREN_UNSET(u8str,paren) PBYTE(u8str,paren) &= (~PBITVAL(paren)) #define REQUIRE_UTF8(flagp) STMT_START { \ if (!UTF) { \ assert(PASS1); \ *flagp = RESTART_PASS1|NEED_UTF8; \ return NULL; \ } \ } STMT_END /* Change from /d into /u rules, and restart the parse if we've already seen * something whose size would increase as a result, by setting *flagp and * returning 'restart_retval'. RExC_uni_semantics is a flag that indicates * we've change to /u during the parse. */ #define REQUIRE_UNI_RULES(flagp, restart_retval) \ STMT_START { \ if (DEPENDS_SEMANTICS) { \ assert(PASS1); \ set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); \ RExC_uni_semantics = 1; \ if (RExC_seen_unfolded_sharp_s) { \ *flagp |= RESTART_PASS1; \ return restart_retval; \ } \ } \ } STMT_END /* This converts the named class defined in regcomp.h to its equivalent class * number defined in handy.h. */ #define namedclass_to_classnum(class) ((int) ((class) / 2)) #define classnum_to_namedclass(classnum) ((classnum) * 2) #define _invlist_union_complement_2nd(a, b, output) \ _invlist_union_maybe_complement_2nd(a, b, TRUE, output) #define _invlist_intersection_complement_2nd(a, b, output) \ _invlist_intersection_maybe_complement_2nd(a, b, TRUE, output) /* About scan_data_t. During optimisation we recurse through the regexp program performing various inplace (keyhole style) optimisations. In addition study_chunk and scan_commit populate this data structure with information about what strings MUST appear in the pattern. We look for the longest string that must appear at a fixed location, and we look for the longest string that may appear at a floating location. So for instance in the pattern: /FOO[xX]A.*B[xX]BAR/ Both 'FOO' and 'A' are fixed strings. Both 'B' and 'BAR' are floating strings (because they follow a .* construct). study_chunk will identify both FOO and BAR as being the longest fixed and floating strings respectively. The strings can be composites, for instance /(f)(o)(o)/ will result in a composite fixed substring 'foo'. For each string some basic information is maintained: - min_offset This is the position the string must appear at, or not before. It also implicitly (when combined with minlenp) tells us how many characters must match before the string we are searching for. Likewise when combined with minlenp and the length of the string it tells us how many characters must appear after the string we have found. - max_offset Only used for floating strings. This is the rightmost point that the string can appear at. If set to SSize_t_MAX it indicates that the string can occur infinitely far to the right. For fixed strings, it is equal to min_offset. - minlenp A pointer to the minimum number of characters of the pattern that the string was found inside. This is important as in the case of positive lookahead or positive lookbehind we can have multiple patterns involved. Consider /(?=FOO).*F/ The minimum length of the pattern overall is 3, the minimum length of the lookahead part is 3, but the minimum length of the part that will actually match is 1. So 'FOO's minimum length is 3, but the minimum length for the F is 1. This is important as the minimum length is used to determine offsets in front of and behind the string being looked for. Since strings can be composites this is the length of the pattern at the time it was committed with a scan_commit. Note that the length is calculated by study_chunk, so that the minimum lengths are not known until the full pattern has been compiled, thus the pointer to the value. - lookbehind In the case of lookbehind the string being searched for can be offset past the start point of the final matching string. If this value was just blithely removed from the min_offset it would invalidate some of the calculations for how many chars must match before or after (as they are derived from min_offset and minlen and the length of the string being searched for). When the final pattern is compiled and the data is moved from the scan_data_t structure into the regexp structure the information about lookbehind is factored in, with the information that would have been lost precalculated in the end_shift field for the associated string. The fields pos_min and pos_delta are used to store the minimum offset and the delta to the maximum offset at the current point in the pattern. */ struct scan_data_substrs { SV *str; /* longest substring found in pattern */ SSize_t min_offset; /* earliest point in string it can appear */ SSize_t max_offset; /* latest point in string it can appear */ SSize_t *minlenp; /* pointer to the minlen relevant to the string */ SSize_t lookbehind; /* is the pos of the string modified by LB */ I32 flags; /* per substring SF_* and SCF_* flags */ }; typedef struct scan_data_t { /*I32 len_min; unused */ /*I32 len_delta; unused */ SSize_t pos_min; SSize_t pos_delta; SV *last_found; SSize_t last_end; /* min value, <0 unless valid. */ SSize_t last_start_min; SSize_t last_start_max; U8 cur_is_floating; /* whether the last_* values should be set as * the next fixed (0) or floating (1) * substring */ /* [0] is longest fixed substring so far, [1] is longest float so far */ struct scan_data_substrs substrs[2]; I32 flags; /* common SF_* and SCF_* flags */ I32 whilem_c; SSize_t *last_closep; regnode_ssc *start_class; } scan_data_t; /* * Forward declarations for pregcomp()'s friends. */ static const scan_data_t zero_scan_data = { 0, 0, NULL, 0, 0, 0, 0, { { NULL, 0, 0, 0, 0, 0 }, { NULL, 0, 0, 0, 0, 0 }, }, 0, 0, NULL, NULL }; /* study flags */ #define SF_BEFORE_SEOL 0x0001 #define SF_BEFORE_MEOL 0x0002 #define SF_BEFORE_EOL (SF_BEFORE_SEOL|SF_BEFORE_MEOL) #define SF_IS_INF 0x0040 #define SF_HAS_PAR 0x0080 #define SF_IN_PAR 0x0100 #define SF_HAS_EVAL 0x0200 /* SCF_DO_SUBSTR is the flag that tells the regexp analyzer to track the * longest substring in the pattern. When it is not set the optimiser keeps * track of position, but does not keep track of the actual strings seen, * * So for instance /foo/ will be parsed with SCF_DO_SUBSTR being true, but * /foo/i will not. * * Similarly, /foo.*(blah|erm|huh).*fnorble/ will have "foo" and "fnorble" * parsed with SCF_DO_SUBSTR on, but while processing the (...) it will be * turned off because of the alternation (BRANCH). */ #define SCF_DO_SUBSTR 0x0400 #define SCF_DO_STCLASS_AND 0x0800 #define SCF_DO_STCLASS_OR 0x1000 #define SCF_DO_STCLASS (SCF_DO_STCLASS_AND|SCF_DO_STCLASS_OR) #define SCF_WHILEM_VISITED_POS 0x2000 #define SCF_TRIE_RESTUDY 0x4000 /* Do restudy? */ #define SCF_SEEN_ACCEPT 0x8000 #define SCF_TRIE_DOING_RESTUDY 0x10000 #define SCF_IN_DEFINE 0x20000 #define UTF cBOOL(RExC_utf8) /* The enums for all these are ordered so things work out correctly */ #define LOC (get_regex_charset(RExC_flags) == REGEX_LOCALE_CHARSET) #define DEPENDS_SEMANTICS (get_regex_charset(RExC_flags) \ == REGEX_DEPENDS_CHARSET) #define UNI_SEMANTICS (get_regex_charset(RExC_flags) == REGEX_UNICODE_CHARSET) #define AT_LEAST_UNI_SEMANTICS (get_regex_charset(RExC_flags) \ >= REGEX_UNICODE_CHARSET) #define ASCII_RESTRICTED (get_regex_charset(RExC_flags) \ == REGEX_ASCII_RESTRICTED_CHARSET) #define AT_LEAST_ASCII_RESTRICTED (get_regex_charset(RExC_flags) \ >= REGEX_ASCII_RESTRICTED_CHARSET) #define ASCII_FOLD_RESTRICTED (get_regex_charset(RExC_flags) \ == REGEX_ASCII_MORE_RESTRICTED_CHARSET) #define FOLD cBOOL(RExC_flags & RXf_PMf_FOLD) /* For programs that want to be strictly Unicode compatible by dying if any * attempt is made to match a non-Unicode code point against a Unicode * property. */ #define ALWAYS_WARN_SUPER ckDEAD(packWARN(WARN_NON_UNICODE)) #define OOB_NAMEDCLASS -1 /* There is no code point that is out-of-bounds, so this is problematic. But * its only current use is to initialize a variable that is always set before * looked at. */ #define OOB_UNICODE 0xDEADBEEF #define CHR_SVLEN(sv) (UTF ? sv_len_utf8(sv) : SvCUR(sv)) /* length of regex to show in messages that don't mark a position within */ #define RegexLengthToShowInErrorMessages 127 /* * If MARKER[12] are adjusted, be sure to adjust the constants at the top * of t/op/regmesg.t, the tests in t/op/re_tests, and those in * op/pragma/warn/regcomp. */ #define MARKER1 "<-- HERE" /* marker as it appears in the description */ #define MARKER2 " <-- HERE " /* marker as it appears within the regex */ #define REPORT_LOCATION " in regex; marked by " MARKER1 \ " in m/%" UTF8f MARKER2 "%" UTF8f "/" /* The code in this file in places uses one level of recursion with parsing * rebased to an alternate string constructed by us in memory. This can take * the form of something that is completely different from the input, or * something that uses the input as part of the alternate. In the first case, * there should be no possibility of an error, as we are in complete control of * the alternate string. But in the second case we don't control the input * portion, so there may be errors in that. Here's an example: * /[abc\x{DF}def]/ui * is handled specially because \x{df} folds to a sequence of more than one * character, 'ss'. What is done is to create and parse an alternate string, * which looks like this: * /(?:\x{DF}|[abc\x{DF}def])/ui * where it uses the input unchanged in the middle of something it constructs, * which is a branch for the DF outside the character class, and clustering * parens around the whole thing. (It knows enough to skip the DF inside the * class while in this substitute parse.) 'abc' and 'def' may have errors that * need to be reported. The general situation looks like this: * * sI tI xI eI * Input: ---------------------------------------------------- * Constructed: --------------------------------------------------- * sC tC xC eC EC * * The input string sI..eI is the input pattern. The string sC..EC is the * constructed substitute parse string. The portions sC..tC and eC..EC are * constructed by us. The portion tC..eC is an exact duplicate of the input * pattern tI..eI. In the diagram, these are vertically aligned. Suppose that * while parsing, we find an error at xC. We want to display a message showing * the real input string. Thus we need to find the point xI in it which * corresponds to xC. xC >= tC, since the portion of the string sC..tC has * been constructed by us, and so shouldn't have errors. We get: * * xI = sI + (tI - sI) + (xC - tC) * * and, the offset into sI is: * * (xI - sI) = (tI - sI) + (xC - tC) * * When the substitute is constructed, we save (tI -sI) as RExC_precomp_adj, * and we save tC as RExC_adjusted_start. * * During normal processing of the input pattern, everything points to that, * with RExC_precomp_adj set to 0, and RExC_adjusted_start set to sI. */ #define tI_sI RExC_precomp_adj #define tC RExC_adjusted_start #define sC RExC_precomp #define xI_offset(xC) ((IV) (tI_sI + (xC - tC))) #define xI(xC) (sC + xI_offset(xC)) #define eC RExC_precomp_end #define REPORT_LOCATION_ARGS(xC) \ UTF8fARG(UTF, \ (xI(xC) > eC) /* Don't run off end */ \ ? eC - sC /* Length before the <--HERE */ \ : ( __ASSERT_(xI_offset(xC) >= 0) xI_offset(xC) ), \ sC), /* The input pattern printed up to the <--HERE */ \ UTF8fARG(UTF, \ (xI(xC) > eC) ? 0 : eC - xI(xC), /* Length after <--HERE */ \ (xI(xC) > eC) ? eC : xI(xC)) /* pattern after <--HERE */ /* Used to point after bad bytes for an error message, but avoid skipping * past a nul byte. */ #define SKIP_IF_CHAR(s) (!*(s) ? 0 : UTF ? UTF8SKIP(s) : 1) /* * Calls SAVEDESTRUCTOR_X if needed, then calls Perl_croak with the given * arg. Show regex, up to a maximum length. If it's too long, chop and add * "...". */ #define _FAIL(code) STMT_START { \ const char *ellipses = ""; \ IV len = RExC_precomp_end - RExC_precomp; \ \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ if (len > RegexLengthToShowInErrorMessages) { \ /* chop 10 shorter than the max, to ensure meaning of "..." */ \ len = RegexLengthToShowInErrorMessages - 10; \ ellipses = "..."; \ } \ code; \ } STMT_END #define FAIL(msg) _FAIL( \ Perl_croak(aTHX_ "%s in regex m/%" UTF8f "%s/", \ msg, UTF8fARG(UTF, len, RExC_precomp), ellipses)) #define FAIL2(msg,arg) _FAIL( \ Perl_croak(aTHX_ msg " in regex m/%" UTF8f "%s/", \ arg, UTF8fARG(UTF, len, RExC_precomp), ellipses)) /* * Simple_vFAIL -- like FAIL, but marks the current location in the scan */ #define Simple_vFAIL(m) STMT_START { \ Perl_croak(aTHX_ "%s" REPORT_LOCATION, \ m, REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* * Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL() */ #define vFAIL(m) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL(m); \ } STMT_END /* * Like Simple_vFAIL(), but accepts two arguments. */ #define Simple_vFAIL2(m,a1) STMT_START { \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* * Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL2(). */ #define vFAIL2(m,a1) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL2(m, a1); \ } STMT_END /* * Like Simple_vFAIL(), but accepts three arguments. */ #define Simple_vFAIL3(m, a1, a2) STMT_START { \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* * Calls SAVEDESTRUCTOR_X if needed, then Simple_vFAIL3(). */ #define vFAIL3(m,a1,a2) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL3(m, a1, a2); \ } STMT_END /* * Like Simple_vFAIL(), but accepts four arguments. */ #define Simple_vFAIL4(m, a1, a2, a3) STMT_START { \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, a3, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END #define vFAIL4(m,a1,a2,a3) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ Simple_vFAIL4(m, a1, a2, a3); \ } STMT_END /* A specialized version of vFAIL2 that works with UTF8f */ #define vFAIL2utf8f(m, a1) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END #define vFAIL3utf8f(m, a1, a2) STMT_START { \ if (!SIZE_ONLY) \ SAVEFREESV(RExC_rx_sv); \ S_re_croak2(aTHX_ UTF, m, REPORT_LOCATION, a1, a2, \ REPORT_LOCATION_ARGS(RExC_parse)); \ } STMT_END /* These have asserts in them because of [perl #122671] Many warnings in * regcomp.c can occur twice. If they get output in pass1 and later in that * pass, the pattern has to be converted to UTF-8 and the pass restarted, they * would get output again. So they should be output in pass2, and these * asserts make sure new warnings follow that paradigm. */ /* m is not necessarily a "literal string", in this macro */ #define reg_warn_non_literal_string(loc, m) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ "%s" REPORT_LOCATION, \ m, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARNreg(loc,m) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN(loc, m) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN_dep(loc, m) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_DEPRECATED), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARNdep(loc,m) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner_d(aTHX_ packWARN(WARN_DEPRECATED), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARNregdep(loc,m) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner_d(aTHX_ packWARN2(WARN_DEPRECATED, \ WARN_REGEXP), \ m REPORT_LOCATION, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN2reg_d(loc,m, a1) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner_d(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN2reg(loc, m, a1) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN3(loc, m, a1, a2) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN3reg(loc, m, a1, a2) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN4(loc, m, a1, a2, a3) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, a3, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define ckWARN4reg(loc, m, a1, a2, a3) STMT_START { \ __ASSERT_(PASS2) Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, a3, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END #define vWARN5(loc, m, a1, a2, a3, a4) STMT_START { \ __ASSERT_(PASS2) Perl_warner(aTHX_ packWARN(WARN_REGEXP), \ m REPORT_LOCATION, \ a1, a2, a3, a4, \ REPORT_LOCATION_ARGS(loc)); \ } STMT_END /* Macros for recording node offsets. 20001227 mjd@plover.com * Nodes are numbered 1, 2, 3, 4. Node #n's position is recorded in * element 2*n-1 of the array. Element #2n holds the byte length node #n. * Element 0 holds the number n. * Position is 1 indexed. */ #ifndef RE_TRACK_PATTERN_OFFSETS #define Set_Node_Offset_To_R(node,byte) #define Set_Node_Offset(node,byte) #define Set_Cur_Node_Offset #define Set_Node_Length_To_R(node,len) #define Set_Node_Length(node,len) #define Set_Node_Cur_Length(node,start) #define Node_Offset(n) #define Node_Length(n) #define Set_Node_Offset_Length(node,offset,len) #define ProgLen(ri) ri->u.proglen #define SetProgLen(ri,x) ri->u.proglen = x #else #define ProgLen(ri) ri->u.offsets[0] #define SetProgLen(ri,x) ri->u.offsets[0] = x #define Set_Node_Offset_To_R(node,byte) STMT_START { \ if (! SIZE_ONLY) { \ MJD_OFFSET_DEBUG(("** (%d) offset of node %d is %d.\n", \ __LINE__, (int)(node), (int)(byte))); \ if((node) < 0) { \ Perl_croak(aTHX_ "value of node is %d in Offset macro", \ (int)(node)); \ } else { \ RExC_offsets[2*(node)-1] = (byte); \ } \ } \ } STMT_END #define Set_Node_Offset(node,byte) \ Set_Node_Offset_To_R((node)-RExC_emit_start, (byte)-RExC_start) #define Set_Cur_Node_Offset Set_Node_Offset(RExC_emit, RExC_parse) #define Set_Node_Length_To_R(node,len) STMT_START { \ if (! SIZE_ONLY) { \ MJD_OFFSET_DEBUG(("** (%d) size of node %d is %d.\n", \ __LINE__, (int)(node), (int)(len))); \ if((node) < 0) { \ Perl_croak(aTHX_ "value of node is %d in Length macro", \ (int)(node)); \ } else { \ RExC_offsets[2*(node)] = (len); \ } \ } \ } STMT_END #define Set_Node_Length(node,len) \ Set_Node_Length_To_R((node)-RExC_emit_start, len) #define Set_Node_Cur_Length(node, start) \ Set_Node_Length(node, RExC_parse - start) /* Get offsets and lengths */ #define Node_Offset(n) (RExC_offsets[2*((n)-RExC_emit_start)-1]) #define Node_Length(n) (RExC_offsets[2*((n)-RExC_emit_start)]) #define Set_Node_Offset_Length(node,offset,len) STMT_START { \ Set_Node_Offset_To_R((node)-RExC_emit_start, (offset)); \ Set_Node_Length_To_R((node)-RExC_emit_start, (len)); \ } STMT_END #endif #if PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS #define EXPERIMENTAL_INPLACESCAN #endif /*PERL_ENABLE_EXPERIMENTAL_REGEX_OPTIMISATIONS*/ #ifdef DEBUGGING int Perl_re_printf(pTHX_ const char *fmt, ...) { va_list ap; int result; PerlIO *f= Perl_debug_log; PERL_ARGS_ASSERT_RE_PRINTF; va_start(ap, fmt); result = PerlIO_vprintf(f, fmt, ap); va_end(ap); return result; } int Perl_re_indentf(pTHX_ const char *fmt, U32 depth, ...) { va_list ap; int result; PerlIO *f= Perl_debug_log; PERL_ARGS_ASSERT_RE_INDENTF; va_start(ap, depth); PerlIO_printf(f, "%*s", ( (int)depth % 20 ) * 2, ""); result = PerlIO_vprintf(f, fmt, ap); va_end(ap); return result; } #endif /* DEBUGGING */ #define DEBUG_RExC_seen() \ DEBUG_OPTIMISE_MORE_r({ \ Perl_re_printf( aTHX_ "RExC_seen: "); \ \ if (RExC_seen & REG_ZERO_LEN_SEEN) \ Perl_re_printf( aTHX_ "REG_ZERO_LEN_SEEN "); \ \ if (RExC_seen & REG_LOOKBEHIND_SEEN) \ Perl_re_printf( aTHX_ "REG_LOOKBEHIND_SEEN "); \ \ if (RExC_seen & REG_GPOS_SEEN) \ Perl_re_printf( aTHX_ "REG_GPOS_SEEN "); \ \ if (RExC_seen & REG_RECURSE_SEEN) \ Perl_re_printf( aTHX_ "REG_RECURSE_SEEN "); \ \ if (RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN) \ Perl_re_printf( aTHX_ "REG_TOP_LEVEL_BRANCHES_SEEN "); \ \ if (RExC_seen & REG_VERBARG_SEEN) \ Perl_re_printf( aTHX_ "REG_VERBARG_SEEN "); \ \ if (RExC_seen & REG_CUTGROUP_SEEN) \ Perl_re_printf( aTHX_ "REG_CUTGROUP_SEEN "); \ \ if (RExC_seen & REG_RUN_ON_COMMENT_SEEN) \ Perl_re_printf( aTHX_ "REG_RUN_ON_COMMENT_SEEN "); \ \ if (RExC_seen & REG_UNFOLDED_MULTI_SEEN) \ Perl_re_printf( aTHX_ "REG_UNFOLDED_MULTI_SEEN "); \ \ if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) \ Perl_re_printf( aTHX_ "REG_UNBOUNDED_QUANTIFIER_SEEN "); \ \ Perl_re_printf( aTHX_ "\n"); \ }); #define DEBUG_SHOW_STUDY_FLAG(flags,flag) \ if ((flags) & flag) Perl_re_printf( aTHX_ "%s ", #flag) #ifdef DEBUGGING static void S_debug_show_study_flags(pTHX_ U32 flags, const char *open_str, const char *close_str) { if (!flags) return; Perl_re_printf( aTHX_ "%s", open_str); DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_SEOL); DEBUG_SHOW_STUDY_FLAG(flags, SF_BEFORE_MEOL); DEBUG_SHOW_STUDY_FLAG(flags, SF_IS_INF); DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_PAR); DEBUG_SHOW_STUDY_FLAG(flags, SF_IN_PAR); DEBUG_SHOW_STUDY_FLAG(flags, SF_HAS_EVAL); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_SUBSTR); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_AND); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS_OR); DEBUG_SHOW_STUDY_FLAG(flags, SCF_DO_STCLASS); DEBUG_SHOW_STUDY_FLAG(flags, SCF_WHILEM_VISITED_POS); DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_RESTUDY); DEBUG_SHOW_STUDY_FLAG(flags, SCF_SEEN_ACCEPT); DEBUG_SHOW_STUDY_FLAG(flags, SCF_TRIE_DOING_RESTUDY); DEBUG_SHOW_STUDY_FLAG(flags, SCF_IN_DEFINE); Perl_re_printf( aTHX_ "%s", close_str); } static void S_debug_studydata(pTHX_ const char *where, scan_data_t *data, U32 depth, int is_inf) { GET_RE_DEBUG_FLAGS_DECL; DEBUG_OPTIMISE_MORE_r({ if (!data) return; Perl_re_indentf(aTHX_ "%s: Pos:%" IVdf "/%" IVdf " Flags: 0x%" UVXf, depth, where, (IV)data->pos_min, (IV)data->pos_delta, (UV)data->flags ); S_debug_show_study_flags(aTHX_ data->flags," [","]"); Perl_re_printf( aTHX_ " Whilem_c: %" IVdf " Lcp: %" IVdf " %s", (IV)data->whilem_c, (IV)(data->last_closep ? *((data)->last_closep) : -1), is_inf ? "INF " : "" ); if (data->last_found) { int i; Perl_re_printf(aTHX_ "Last:'%s' %" IVdf ":%" IVdf "/%" IVdf, SvPVX_const(data->last_found), (IV)data->last_end, (IV)data->last_start_min, (IV)data->last_start_max ); for (i = 0; i < 2; i++) { Perl_re_printf(aTHX_ " %s%s: '%s' @ %" IVdf "/%" IVdf, data->cur_is_floating == i ? "*" : "", i ? "Float" : "Fixed", SvPVX_const(data->substrs[i].str), (IV)data->substrs[i].min_offset, (IV)data->substrs[i].max_offset ); S_debug_show_study_flags(aTHX_ data->substrs[i].flags," [","]"); } } Perl_re_printf( aTHX_ "\n"); }); } static void S_debug_peep(pTHX_ const char *str, const RExC_state_t *pRExC_state, regnode *scan, U32 depth, U32 flags) { GET_RE_DEBUG_FLAGS_DECL; DEBUG_OPTIMISE_r({ regnode *Next; if (!scan) return; Next = regnext(scan); regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state); Perl_re_indentf( aTHX_ "%s>%3d: %s (%d)", depth, str, REG_NODE_NUM(scan), SvPV_nolen_const(RExC_mysv), Next ? (REG_NODE_NUM(Next)) : 0 ); S_debug_show_study_flags(aTHX_ flags," [ ","]"); Perl_re_printf( aTHX_ "\n"); }); } # define DEBUG_STUDYDATA(where, data, depth, is_inf) \ S_debug_studydata(aTHX_ where, data, depth, is_inf) # define DEBUG_PEEP(str, scan, depth, flags) \ S_debug_peep(aTHX_ str, pRExC_state, scan, depth, flags) #else # define DEBUG_STUDYDATA(where, data, depth, is_inf) NOOP # define DEBUG_PEEP(str, scan, depth, flags) NOOP #endif /* ========================================================= * BEGIN edit_distance stuff. * * This calculates how many single character changes of any type are needed to * transform a string into another one. It is taken from version 3.1 of * * https://metacpan.org/pod/Text::Levenshtein::Damerau::XS */ /* Our unsorted dictionary linked list. */ /* Note we use UVs, not chars. */ struct dictionary{ UV key; UV value; struct dictionary* next; }; typedef struct dictionary item; PERL_STATIC_INLINE item* push(UV key,item* curr) { item* head; Newxz(head, 1, item); head->key = key; head->value = 0; head->next = curr; return head; } PERL_STATIC_INLINE item* find(item* head, UV key) { item* iterator = head; while (iterator){ if (iterator->key == key){ return iterator; } iterator = iterator->next; } return NULL; } PERL_STATIC_INLINE item* uniquePush(item* head,UV key) { item* iterator = head; while (iterator){ if (iterator->key == key) { return head; } iterator = iterator->next; } return push(key,head); } PERL_STATIC_INLINE void dict_free(item* head) { item* iterator = head; while (iterator) { item* temp = iterator; iterator = iterator->next; Safefree(temp); } head = NULL; } /* End of Dictionary Stuff */ /* All calculations/work are done here */ STATIC int S_edit_distance(const UV* src, const UV* tgt, const STRLEN x, /* length of src[] */ const STRLEN y, /* length of tgt[] */ const SSize_t maxDistance ) { item *head = NULL; UV swapCount,swapScore,targetCharCount,i,j; UV *scores; UV score_ceil = x + y; PERL_ARGS_ASSERT_EDIT_DISTANCE; /* intialize matrix start values */ Newxz(scores, ( (x + 2) * (y + 2)), UV); scores[0] = score_ceil; scores[1 * (y + 2) + 0] = score_ceil; scores[0 * (y + 2) + 1] = score_ceil; scores[1 * (y + 2) + 1] = 0; head = uniquePush(uniquePush(head,src[0]),tgt[0]); /* work loops */ /* i = src index */ /* j = tgt index */ for (i=1;i<=x;i++) { if (i < x) head = uniquePush(head,src[i]); scores[(i+1) * (y + 2) + 1] = i; scores[(i+1) * (y + 2) + 0] = score_ceil; swapCount = 0; for (j=1;j<=y;j++) { if (i == 1) { if(j < y) head = uniquePush(head,tgt[j]); scores[1 * (y + 2) + (j + 1)] = j; scores[0 * (y + 2) + (j + 1)] = score_ceil; } targetCharCount = find(head,tgt[j-1])->value; swapScore = scores[targetCharCount * (y + 2) + swapCount] + i - targetCharCount - 1 + j - swapCount; if (src[i-1] != tgt[j-1]){ scores[(i+1) * (y + 2) + (j + 1)] = MIN(swapScore,(MIN(scores[i * (y + 2) + j], MIN(scores[(i+1) * (y + 2) + j], scores[i * (y + 2) + (j + 1)])) + 1)); } else { swapCount = j; scores[(i+1) * (y + 2) + (j + 1)] = MIN(scores[i * (y + 2) + j], swapScore); } } find(head,src[i-1])->value = i; } { IV score = scores[(x+1) * (y + 2) + (y + 1)]; dict_free(head); Safefree(scores); return (maxDistance != 0 && maxDistance < score)?(-1):score; } } /* END of edit_distance() stuff * ========================================================= */ /* is c a control character for which we have a mnemonic? */ #define isMNEMONIC_CNTRL(c) _IS_MNEMONIC_CNTRL_ONLY_FOR_USE_BY_REGCOMP_DOT_C(c) STATIC const char * S_cntrl_to_mnemonic(const U8 c) { /* Returns the mnemonic string that represents character 'c', if one * exists; NULL otherwise. The only ones that exist for the purposes of * this routine are a few control characters */ switch (c) { case '\a': return "\\a"; case '\b': return "\\b"; case ESC_NATIVE: return "\\e"; case '\f': return "\\f"; case '\n': return "\\n"; case '\r': return "\\r"; case '\t': return "\\t"; } return NULL; } /* Mark that we cannot extend a found fixed substring at this point. Update the longest found anchored substring or the longest found floating substrings if needed. */ STATIC void S_scan_commit(pTHX_ const RExC_state_t *pRExC_state, scan_data_t *data, SSize_t *minlenp, int is_inf) { const STRLEN l = CHR_SVLEN(data->last_found); SV * const longest_sv = data->substrs[data->cur_is_floating].str; const STRLEN old_l = CHR_SVLEN(longest_sv); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_SCAN_COMMIT; if ((l >= old_l) && ((l > old_l) || (data->flags & SF_BEFORE_EOL))) { const U8 i = data->cur_is_floating; SvSetMagicSV(longest_sv, data->last_found); data->substrs[i].min_offset = l ? data->last_start_min : data->pos_min; if (!i) /* fixed */ data->substrs[0].max_offset = data->substrs[0].min_offset; else { /* float */ data->substrs[1].max_offset = (l ? data->last_start_max : (data->pos_delta > SSize_t_MAX - data->pos_min ? SSize_t_MAX : data->pos_min + data->pos_delta)); if (is_inf || (STRLEN)data->substrs[1].max_offset > (STRLEN)SSize_t_MAX) data->substrs[1].max_offset = SSize_t_MAX; } if (data->flags & SF_BEFORE_EOL) data->substrs[i].flags |= (data->flags & SF_BEFORE_EOL); else data->substrs[i].flags &= ~SF_BEFORE_EOL; data->substrs[i].minlenp = minlenp; data->substrs[i].lookbehind = 0; } SvCUR_set(data->last_found, 0); { SV * const sv = data->last_found; if (SvUTF8(sv) && SvMAGICAL(sv)) { MAGIC * const mg = mg_find(sv, PERL_MAGIC_utf8); if (mg) mg->mg_len = 0; } } data->last_end = -1; data->flags &= ~SF_BEFORE_EOL; DEBUG_STUDYDATA("commit", data, 0, is_inf); } /* An SSC is just a regnode_charclass_posix with an extra field: the inversion * list that describes which code points it matches */ STATIC void S_ssc_anything(pTHX_ regnode_ssc *ssc) { /* Set the SSC 'ssc' to match an empty string or any code point */ PERL_ARGS_ASSERT_SSC_ANYTHING; assert(is_ANYOF_SYNTHETIC(ssc)); /* mortalize so won't leak */ ssc->invlist = sv_2mortal(_add_range_to_invlist(NULL, 0, UV_MAX)); ANYOF_FLAGS(ssc) |= SSC_MATCHES_EMPTY_STRING; /* Plus matches empty */ } STATIC int S_ssc_is_anything(const regnode_ssc *ssc) { /* Returns TRUE if the SSC 'ssc' can match the empty string and any code * point; FALSE otherwise. Thus, this is used to see if using 'ssc' buys * us anything: if the function returns TRUE, 'ssc' hasn't been restricted * in any way, so there's no point in using it */ UV start, end; bool ret; PERL_ARGS_ASSERT_SSC_IS_ANYTHING; assert(is_ANYOF_SYNTHETIC(ssc)); if (! (ANYOF_FLAGS(ssc) & SSC_MATCHES_EMPTY_STRING)) { return FALSE; } /* See if the list consists solely of the range 0 - Infinity */ invlist_iterinit(ssc->invlist); ret = invlist_iternext(ssc->invlist, &start, &end) && start == 0 && end == UV_MAX; invlist_iterfinish(ssc->invlist); if (ret) { return TRUE; } /* If e.g., both \w and \W are set, matches everything */ if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { int i; for (i = 0; i < ANYOF_POSIXL_MAX; i += 2) { if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i+1)) { return TRUE; } } } return FALSE; } STATIC void S_ssc_init(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc) { /* Initializes the SSC 'ssc'. This includes setting it to match an empty * string, any code point, or any posix class under locale */ PERL_ARGS_ASSERT_SSC_INIT; Zero(ssc, 1, regnode_ssc); set_ANYOF_SYNTHETIC(ssc); ARG_SET(ssc, ANYOF_ONLY_HAS_BITMAP); ssc_anything(ssc); /* If any portion of the regex is to operate under locale rules that aren't * fully known at compile time, initialization includes it. The reason * this isn't done for all regexes is that the optimizer was written under * the assumption that locale was all-or-nothing. Given the complexity and * lack of documentation in the optimizer, and that there are inadequate * test cases for locale, many parts of it may not work properly, it is * safest to avoid locale unless necessary. */ if (RExC_contains_locale) { ANYOF_POSIXL_SETALL(ssc); } else { ANYOF_POSIXL_ZERO(ssc); } } STATIC int S_ssc_is_cp_posixl_init(const RExC_state_t *pRExC_state, const regnode_ssc *ssc) { /* Returns TRUE if the SSC 'ssc' is in its initial state with regard only * to the list of code points matched, and locale posix classes; hence does * not check its flags) */ UV start, end; bool ret; PERL_ARGS_ASSERT_SSC_IS_CP_POSIXL_INIT; assert(is_ANYOF_SYNTHETIC(ssc)); invlist_iterinit(ssc->invlist); ret = invlist_iternext(ssc->invlist, &start, &end) && start == 0 && end == UV_MAX; invlist_iterfinish(ssc->invlist); if (! ret) { return FALSE; } if (RExC_contains_locale && ! ANYOF_POSIXL_SSC_TEST_ALL_SET(ssc)) { return FALSE; } return TRUE; } STATIC SV* S_get_ANYOF_cp_list_for_ssc(pTHX_ const RExC_state_t *pRExC_state, const regnode_charclass* const node) { /* Returns a mortal inversion list defining which code points are matched * by 'node', which is of type ANYOF. Handles complementing the result if * appropriate. If some code points aren't knowable at this time, the * returned list must, and will, contain every code point that is a * possibility. */ SV* invlist = NULL; SV* only_utf8_locale_invlist = NULL; unsigned int i; const U32 n = ARG(node); bool new_node_has_latin1 = FALSE; PERL_ARGS_ASSERT_GET_ANYOF_CP_LIST_FOR_SSC; /* Look at the data structure created by S_set_ANYOF_arg() */ if (n != ANYOF_ONLY_HAS_BITMAP) { SV * const rv = MUTABLE_SV(RExC_rxi->data->data[n]); AV * const av = MUTABLE_AV(SvRV(rv)); SV **const ary = AvARRAY(av); assert(RExC_rxi->data->what[n] == 's'); if (ary[1] && ary[1] != &PL_sv_undef) { /* Has compile-time swash */ invlist = sv_2mortal(invlist_clone(_get_swash_invlist(ary[1]))); } else if (ary[0] && ary[0] != &PL_sv_undef) { /* Here, no compile-time swash, and there are things that won't be * known until runtime -- we have to assume it could be anything */ invlist = sv_2mortal(_new_invlist(1)); return _add_range_to_invlist(invlist, 0, UV_MAX); } else if (ary[3] && ary[3] != &PL_sv_undef) { /* Here no compile-time swash, and no run-time only data. Use the * node's inversion list */ invlist = sv_2mortal(invlist_clone(ary[3])); } /* Get the code points valid only under UTF-8 locales */ if ((ANYOF_FLAGS(node) & ANYOFL_FOLD) && ary[2] && ary[2] != &PL_sv_undef) { only_utf8_locale_invlist = ary[2]; } } if (! invlist) { invlist = sv_2mortal(_new_invlist(0)); } /* An ANYOF node contains a bitmap for the first NUM_ANYOF_CODE_POINTS * code points, and an inversion list for the others, but if there are code * points that should match only conditionally on the target string being * UTF-8, those are placed in the inversion list, and not the bitmap. * Since there are circumstances under which they could match, they are * included in the SSC. But if the ANYOF node is to be inverted, we have * to exclude them here, so that when we invert below, the end result * actually does include them. (Think about "\xe0" =~ /[^\xc0]/di;). We * have to do this here before we add the unconditionally matched code * points */ if (ANYOF_FLAGS(node) & ANYOF_INVERT) { _invlist_intersection_complement_2nd(invlist, PL_UpperLatin1, &invlist); } /* Add in the points from the bit map */ for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) { if (ANYOF_BITMAP_TEST(node, i)) { unsigned int start = i++; for (; i < NUM_ANYOF_CODE_POINTS && ANYOF_BITMAP_TEST(node, i); ++i) { /* empty */ } invlist = _add_range_to_invlist(invlist, start, i-1); new_node_has_latin1 = TRUE; } } /* If this can match all upper Latin1 code points, have to add them * as well. But don't add them if inverting, as when that gets done below, * it would exclude all these characters, including the ones it shouldn't * that were added just above */ if (! (ANYOF_FLAGS(node) & ANYOF_INVERT) && OP(node) == ANYOFD && (ANYOF_FLAGS(node) & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER)) { _invlist_union(invlist, PL_UpperLatin1, &invlist); } /* Similarly for these */ if (ANYOF_FLAGS(node) & ANYOF_MATCHES_ALL_ABOVE_BITMAP) { _invlist_union_complement_2nd(invlist, PL_InBitmap, &invlist); } if (ANYOF_FLAGS(node) & ANYOF_INVERT) { _invlist_invert(invlist); } else if (new_node_has_latin1 && ANYOF_FLAGS(node) & ANYOFL_FOLD) { /* Under /li, any 0-255 could fold to any other 0-255, depending on the * locale. We can skip this if there are no 0-255 at all. */ _invlist_union(invlist, PL_Latin1, &invlist); } /* Similarly add the UTF-8 locale possible matches. These have to be * deferred until after the non-UTF-8 locale ones are taken care of just * above, or it leads to wrong results under ANYOF_INVERT */ if (only_utf8_locale_invlist) { _invlist_union_maybe_complement_2nd(invlist, only_utf8_locale_invlist, ANYOF_FLAGS(node) & ANYOF_INVERT, &invlist); } return invlist; } /* These two functions currently do the exact same thing */ #define ssc_init_zero ssc_init #define ssc_add_cp(ssc, cp) ssc_add_range((ssc), (cp), (cp)) #define ssc_match_all_cp(ssc) ssc_add_range(ssc, 0, UV_MAX) /* 'AND' a given class with another one. Can create false positives. 'ssc' * should not be inverted. 'and_with->flags & ANYOF_MATCHES_POSIXL' should be * 0 if 'and_with' is a regnode_charclass instead of a regnode_ssc. */ STATIC void S_ssc_and(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc, const regnode_charclass *and_with) { /* Accumulate into SSC 'ssc' its 'AND' with 'and_with', which is either * another SSC or a regular ANYOF class. Can create false positives. */ SV* anded_cp_list; U8 anded_flags; PERL_ARGS_ASSERT_SSC_AND; assert(is_ANYOF_SYNTHETIC(ssc)); /* 'and_with' is used as-is if it too is an SSC; otherwise have to extract * the code point inversion list and just the relevant flags */ if (is_ANYOF_SYNTHETIC(and_with)) { anded_cp_list = ((regnode_ssc *)and_with)->invlist; anded_flags = ANYOF_FLAGS(and_with); /* XXX This is a kludge around what appears to be deficiencies in the * optimizer. If we make S_ssc_anything() add in the WARN_SUPER flag, * there are paths through the optimizer where it doesn't get weeded * out when it should. And if we don't make some extra provision for * it like the code just below, it doesn't get added when it should. * This solution is to add it only when AND'ing, which is here, and * only when what is being AND'ed is the pristine, original node * matching anything. Thus it is like adding it to ssc_anything() but * only when the result is to be AND'ed. Probably the same solution * could be adopted for the same problem we have with /l matching, * which is solved differently in S_ssc_init(), and that would lead to * fewer false positives than that solution has. But if this solution * creates bugs, the consequences are only that a warning isn't raised * that should be; while the consequences for having /l bugs is * incorrect matches */ if (ssc_is_anything((regnode_ssc *)and_with)) { anded_flags |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER; } } else { anded_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, and_with); if (OP(and_with) == ANYOFD) { anded_flags = ANYOF_FLAGS(and_with) & ANYOF_COMMON_FLAGS; } else { anded_flags = ANYOF_FLAGS(and_with) &( ANYOF_COMMON_FLAGS |ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER |ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP); if (ANYOFL_UTF8_LOCALE_REQD(ANYOF_FLAGS(and_with))) { anded_flags &= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } } } ANYOF_FLAGS(ssc) &= anded_flags; /* Below, C1 is the list of code points in 'ssc'; P1, its posix classes. * C2 is the list of code points in 'and-with'; P2, its posix classes. * 'and_with' may be inverted. When not inverted, we have the situation of * computing: * (C1 | P1) & (C2 | P2) * = (C1 & (C2 | P2)) | (P1 & (C2 | P2)) * = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2)) * <= ((C1 & C2) | P2)) | ( P1 | (P1 & P2)) * <= ((C1 & C2) | P1 | P2) * Alternatively, the last few steps could be: * = ((C1 & C2) | (C1 & P2)) | ((P1 & C2) | (P1 & P2)) * <= ((C1 & C2) | C1 ) | ( C2 | (P1 & P2)) * <= (C1 | C2 | (P1 & P2)) * We favor the second approach if either P1 or P2 is non-empty. This is * because these components are a barrier to doing optimizations, as what * they match cannot be known until the moment of matching as they are * dependent on the current locale, 'AND"ing them likely will reduce or * eliminate them. * But we can do better if we know that C1,P1 are in their initial state (a * frequent occurrence), each matching everything: * (<everything>) & (C2 | P2) = C2 | P2 * Similarly, if C2,P2 are in their initial state (again a frequent * occurrence), the result is a no-op * (C1 | P1) & (<everything>) = C1 | P1 * * Inverted, we have * (C1 | P1) & ~(C2 | P2) = (C1 | P1) & (~C2 & ~P2) * = (C1 & (~C2 & ~P2)) | (P1 & (~C2 & ~P2)) * <= (C1 & ~C2) | (P1 & ~P2) * */ if ((ANYOF_FLAGS(and_with) & ANYOF_INVERT) && ! is_ANYOF_SYNTHETIC(and_with)) { unsigned int i; ssc_intersection(ssc, anded_cp_list, FALSE /* Has already been inverted */ ); /* If either P1 or P2 is empty, the intersection will be also; can skip * the loop */ if (! (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL)) { ANYOF_POSIXL_ZERO(ssc); } else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { /* Note that the Posix class component P from 'and_with' actually * looks like: * P = Pa | Pb | ... | Pn * where each component is one posix class, such as in [\w\s]. * Thus * ~P = ~(Pa | Pb | ... | Pn) * = ~Pa & ~Pb & ... & ~Pn * <= ~Pa | ~Pb | ... | ~Pn * The last is something we can easily calculate, but unfortunately * is likely to have many false positives. We could do better * in some (but certainly not all) instances if two classes in * P have known relationships. For example * :lower: <= :alpha: <= :alnum: <= \w <= :graph: <= :print: * So * :lower: & :print: = :lower: * And similarly for classes that must be disjoint. For example, * since \s and \w can have no elements in common based on rules in * the POSIX standard, * \w & ^\S = nothing * Unfortunately, some vendor locales do not meet the Posix * standard, in particular almost everything by Microsoft. * The loop below just changes e.g., \w into \W and vice versa */ regnode_charclass_posixl temp; int add = 1; /* To calculate the index of the complement */ ANYOF_POSIXL_ZERO(&temp); for (i = 0; i < ANYOF_MAX; i++) { assert(i % 2 != 0 || ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i) || ! ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i + 1)); if (ANYOF_POSIXL_TEST((regnode_charclass_posixl*) and_with, i)) { ANYOF_POSIXL_SET(&temp, i + add); } add = 0 - add; /* 1 goes to -1; -1 goes to 1 */ } ANYOF_POSIXL_AND(&temp, ssc); } /* else ssc already has no posixes */ } /* else: Not inverted. This routine is a no-op if 'and_with' is an SSC in its initial state */ else if (! is_ANYOF_SYNTHETIC(and_with) || ! ssc_is_cp_posixl_init(pRExC_state, (regnode_ssc *)and_with)) { /* But if 'ssc' is in its initial state, the result is just 'and_with'; * copy it over 'ssc' */ if (ssc_is_cp_posixl_init(pRExC_state, ssc)) { if (is_ANYOF_SYNTHETIC(and_with)) { StructCopy(and_with, ssc, regnode_ssc); } else { ssc->invlist = anded_cp_list; ANYOF_POSIXL_ZERO(ssc); if (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL) { ANYOF_POSIXL_OR((regnode_charclass_posixl*) and_with, ssc); } } } else if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc) || (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL)) { /* One or the other of P1, P2 is non-empty. */ if (ANYOF_FLAGS(and_with) & ANYOF_MATCHES_POSIXL) { ANYOF_POSIXL_AND((regnode_charclass_posixl*) and_with, ssc); } ssc_union(ssc, anded_cp_list, FALSE); } else { /* P1 = P2 = empty */ ssc_intersection(ssc, anded_cp_list, FALSE); } } } STATIC void S_ssc_or(pTHX_ const RExC_state_t *pRExC_state, regnode_ssc *ssc, const regnode_charclass *or_with) { /* Accumulate into SSC 'ssc' its 'OR' with 'or_with', which is either * another SSC or a regular ANYOF class. Can create false positives if * 'or_with' is to be inverted. */ SV* ored_cp_list; U8 ored_flags; PERL_ARGS_ASSERT_SSC_OR; assert(is_ANYOF_SYNTHETIC(ssc)); /* 'or_with' is used as-is if it too is an SSC; otherwise have to extract * the code point inversion list and just the relevant flags */ if (is_ANYOF_SYNTHETIC(or_with)) { ored_cp_list = ((regnode_ssc*) or_with)->invlist; ored_flags = ANYOF_FLAGS(or_with); } else { ored_cp_list = get_ANYOF_cp_list_for_ssc(pRExC_state, or_with); ored_flags = ANYOF_FLAGS(or_with) & ANYOF_COMMON_FLAGS; if (OP(or_with) != ANYOFD) { ored_flags |= ANYOF_FLAGS(or_with) & ( ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER |ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP); if (ANYOFL_UTF8_LOCALE_REQD(ANYOF_FLAGS(or_with))) { ored_flags |= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } } } ANYOF_FLAGS(ssc) |= ored_flags; /* Below, C1 is the list of code points in 'ssc'; P1, its posix classes. * C2 is the list of code points in 'or-with'; P2, its posix classes. * 'or_with' may be inverted. When not inverted, we have the simple * situation of computing: * (C1 | P1) | (C2 | P2) = (C1 | C2) | (P1 | P2) * If P1|P2 yields a situation with both a class and its complement are * set, like having both \w and \W, this matches all code points, and we * can delete these from the P component of the ssc going forward. XXX We * might be able to delete all the P components, but I (khw) am not certain * about this, and it is better to be safe. * * Inverted, we have * (C1 | P1) | ~(C2 | P2) = (C1 | P1) | (~C2 & ~P2) * <= (C1 | P1) | ~C2 * <= (C1 | ~C2) | P1 * (which results in actually simpler code than the non-inverted case) * */ if ((ANYOF_FLAGS(or_with) & ANYOF_INVERT) && ! is_ANYOF_SYNTHETIC(or_with)) { /* We ignore P2, leaving P1 going forward */ } /* else Not inverted */ else if (ANYOF_FLAGS(or_with) & ANYOF_MATCHES_POSIXL) { ANYOF_POSIXL_OR((regnode_charclass_posixl*)or_with, ssc); if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { unsigned int i; for (i = 0; i < ANYOF_MAX; i += 2) { if (ANYOF_POSIXL_TEST(ssc, i) && ANYOF_POSIXL_TEST(ssc, i + 1)) { ssc_match_all_cp(ssc); ANYOF_POSIXL_CLEAR(ssc, i); ANYOF_POSIXL_CLEAR(ssc, i+1); } } } } ssc_union(ssc, ored_cp_list, FALSE /* Already has been inverted */ ); } PERL_STATIC_INLINE void S_ssc_union(pTHX_ regnode_ssc *ssc, SV* const invlist, const bool invert2nd) { PERL_ARGS_ASSERT_SSC_UNION; assert(is_ANYOF_SYNTHETIC(ssc)); _invlist_union_maybe_complement_2nd(ssc->invlist, invlist, invert2nd, &ssc->invlist); } PERL_STATIC_INLINE void S_ssc_intersection(pTHX_ regnode_ssc *ssc, SV* const invlist, const bool invert2nd) { PERL_ARGS_ASSERT_SSC_INTERSECTION; assert(is_ANYOF_SYNTHETIC(ssc)); _invlist_intersection_maybe_complement_2nd(ssc->invlist, invlist, invert2nd, &ssc->invlist); } PERL_STATIC_INLINE void S_ssc_add_range(pTHX_ regnode_ssc *ssc, const UV start, const UV end) { PERL_ARGS_ASSERT_SSC_ADD_RANGE; assert(is_ANYOF_SYNTHETIC(ssc)); ssc->invlist = _add_range_to_invlist(ssc->invlist, start, end); } PERL_STATIC_INLINE void S_ssc_cp_and(pTHX_ regnode_ssc *ssc, const UV cp) { /* AND just the single code point 'cp' into the SSC 'ssc' */ SV* cp_list = _new_invlist(2); PERL_ARGS_ASSERT_SSC_CP_AND; assert(is_ANYOF_SYNTHETIC(ssc)); cp_list = add_cp_to_invlist(cp_list, cp); ssc_intersection(ssc, cp_list, FALSE /* Not inverted */ ); SvREFCNT_dec_NN(cp_list); } PERL_STATIC_INLINE void S_ssc_clear_locale(regnode_ssc *ssc) { /* Set the SSC 'ssc' to not match any locale things */ PERL_ARGS_ASSERT_SSC_CLEAR_LOCALE; assert(is_ANYOF_SYNTHETIC(ssc)); ANYOF_POSIXL_ZERO(ssc); ANYOF_FLAGS(ssc) &= ~ANYOF_LOCALE_FLAGS; } #define NON_OTHER_COUNT NON_OTHER_COUNT_FOR_USE_ONLY_BY_REGCOMP_DOT_C STATIC bool S_is_ssc_worth_it(const RExC_state_t * pRExC_state, const regnode_ssc * ssc) { /* The synthetic start class is used to hopefully quickly winnow down * places where a pattern could start a match in the target string. If it * doesn't really narrow things down that much, there isn't much point to * having the overhead of using it. This function uses some very crude * heuristics to decide if to use the ssc or not. * * It returns TRUE if 'ssc' rules out more than half what it considers to * be the "likely" possible matches, but of course it doesn't know what the * actual things being matched are going to be; these are only guesses * * For /l matches, it assumes that the only likely matches are going to be * in the 0-255 range, uniformly distributed, so half of that is 127 * For /a and /d matches, it assumes that the likely matches will be just * the ASCII range, so half of that is 63 * For /u and there isn't anything matching above the Latin1 range, it * assumes that that is the only range likely to be matched, and uses * half that as the cut-off: 127. If anything matches above Latin1, * it assumes that all of Unicode could match (uniformly), except for * non-Unicode code points and things in the General Category "Other" * (unassigned, private use, surrogates, controls and formats). This * is a much large number. */ U32 count = 0; /* Running total of number of code points matched by 'ssc' */ UV start, end; /* Start and end points of current range in inversion list */ const U32 max_code_points = (LOC) ? 256 : (( ! UNI_SEMANTICS || invlist_highest(ssc->invlist) < 256) ? 128 : NON_OTHER_COUNT); const U32 max_match = max_code_points / 2; PERL_ARGS_ASSERT_IS_SSC_WORTH_IT; invlist_iterinit(ssc->invlist); while (invlist_iternext(ssc->invlist, &start, &end)) { if (start >= max_code_points) { break; } end = MIN(end, max_code_points - 1); count += end - start + 1; if (count >= max_match) { invlist_iterfinish(ssc->invlist); return FALSE; } } return TRUE; } STATIC void S_ssc_finalize(pTHX_ RExC_state_t *pRExC_state, regnode_ssc *ssc) { /* The inversion list in the SSC is marked mortal; now we need a more * permanent copy, which is stored the same way that is done in a regular * ANYOF node, with the first NUM_ANYOF_CODE_POINTS code points in a bit * map */ SV* invlist = invlist_clone(ssc->invlist); PERL_ARGS_ASSERT_SSC_FINALIZE; assert(is_ANYOF_SYNTHETIC(ssc)); /* The code in this file assumes that all but these flags aren't relevant * to the SSC, except SSC_MATCHES_EMPTY_STRING, which should be cleared * by the time we reach here */ assert(! (ANYOF_FLAGS(ssc) & ~( ANYOF_COMMON_FLAGS |ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER |ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP))); populate_ANYOF_from_invlist( (regnode *) ssc, &invlist); set_ANYOF_arg(pRExC_state, (regnode *) ssc, invlist, NULL, NULL, NULL, FALSE); /* Make sure is clone-safe */ ssc->invlist = NULL; if (ANYOF_POSIXL_SSC_TEST_ANY_SET(ssc)) { ANYOF_FLAGS(ssc) |= ANYOF_MATCHES_POSIXL; } if (RExC_contains_locale) { OP(ssc) = ANYOFL; } assert(! (ANYOF_FLAGS(ssc) & ANYOF_LOCALE_FLAGS) || RExC_contains_locale); } #define TRIE_LIST_ITEM(state,idx) (trie->states[state].trans.list)[ idx ] #define TRIE_LIST_CUR(state) ( TRIE_LIST_ITEM( state, 0 ).forid ) #define TRIE_LIST_LEN(state) ( TRIE_LIST_ITEM( state, 0 ).newstate ) #define TRIE_LIST_USED(idx) ( trie->states[state].trans.list \ ? (TRIE_LIST_CUR( idx ) - 1) \ : 0 ) #ifdef DEBUGGING /* dump_trie(trie,widecharmap,revcharmap) dump_trie_interim_list(trie,widecharmap,revcharmap,next_alloc) dump_trie_interim_table(trie,widecharmap,revcharmap,next_alloc) These routines dump out a trie in a somewhat readable format. The _interim_ variants are used for debugging the interim tables that are used to generate the final compressed representation which is what dump_trie expects. Part of the reason for their existence is to provide a form of documentation as to how the different representations function. */ /* Dumps the final compressed table form of the trie to Perl_debug_log. Used for debugging make_trie(). */ STATIC void S_dump_trie(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap, AV *revcharmap, U32 depth) { U32 state; SV *sv=sv_newmortal(); int colwidth= widecharmap ? 6 : 4; U16 word; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMP_TRIE; Perl_re_indentf( aTHX_ "Char : %-6s%-6s%-4s ", depth+1, "Match","Base","Ofs" ); for( state = 0 ; state < trie->uniquecharcount ; state++ ) { SV ** const tmp = av_fetch( revcharmap, state, 0); if ( tmp ) { Perl_re_printf( aTHX_ "%*s", colwidth, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) ); } } Perl_re_printf( aTHX_ "\n"); Perl_re_indentf( aTHX_ "State|-----------------------", depth+1); for( state = 0 ; state < trie->uniquecharcount ; state++ ) Perl_re_printf( aTHX_ "%.*s", colwidth, "--------"); Perl_re_printf( aTHX_ "\n"); for( state = 1 ; state < trie->statecount ; state++ ) { const U32 base = trie->states[ state ].trans.base; Perl_re_indentf( aTHX_ "#%4" UVXf "|", depth+1, (UV)state); if ( trie->states[ state ].wordnum ) { Perl_re_printf( aTHX_ " W%4X", trie->states[ state ].wordnum ); } else { Perl_re_printf( aTHX_ "%6s", "" ); } Perl_re_printf( aTHX_ " @%4" UVXf " ", (UV)base ); if ( base ) { U32 ofs = 0; while( ( base + ofs < trie->uniquecharcount ) || ( base + ofs - trie->uniquecharcount < trie->lasttrans && trie->trans[ base + ofs - trie->uniquecharcount ].check != state)) ofs++; Perl_re_printf( aTHX_ "+%2" UVXf "[ ", (UV)ofs); for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) { if ( ( base + ofs >= trie->uniquecharcount ) && ( base + ofs - trie->uniquecharcount < trie->lasttrans ) && trie->trans[ base + ofs - trie->uniquecharcount ].check == state ) { Perl_re_printf( aTHX_ "%*" UVXf, colwidth, (UV)trie->trans[ base + ofs - trie->uniquecharcount ].next ); } else { Perl_re_printf( aTHX_ "%*s",colwidth," ." ); } } Perl_re_printf( aTHX_ "]"); } Perl_re_printf( aTHX_ "\n" ); } Perl_re_indentf( aTHX_ "word_info N:(prev,len)=", depth); for (word=1; word <= trie->wordcount; word++) { Perl_re_printf( aTHX_ " %d:(%d,%d)", (int)word, (int)(trie->wordinfo[word].prev), (int)(trie->wordinfo[word].len)); } Perl_re_printf( aTHX_ "\n" ); } /* Dumps a fully constructed but uncompressed trie in list form. List tries normally only are used for construction when the number of possible chars (trie->uniquecharcount) is very high. Used for debugging make_trie(). */ STATIC void S_dump_trie_interim_list(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap, AV *revcharmap, U32 next_alloc, U32 depth) { U32 state; SV *sv=sv_newmortal(); int colwidth= widecharmap ? 6 : 4; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_LIST; /* print out the table precompression. */ Perl_re_indentf( aTHX_ "State :Word | Transition Data\n", depth+1 ); Perl_re_indentf( aTHX_ "%s", depth+1, "------:-----+-----------------\n" ); for( state=1 ; state < next_alloc ; state ++ ) { U16 charid; Perl_re_indentf( aTHX_ " %4" UVXf " :", depth+1, (UV)state ); if ( ! trie->states[ state ].wordnum ) { Perl_re_printf( aTHX_ "%5s| ",""); } else { Perl_re_printf( aTHX_ "W%4x| ", trie->states[ state ].wordnum ); } for( charid = 1 ; charid <= TRIE_LIST_USED( state ) ; charid++ ) { SV ** const tmp = av_fetch( revcharmap, TRIE_LIST_ITEM(state,charid).forid, 0); if ( tmp ) { Perl_re_printf( aTHX_ "%*s:%3X=%4" UVXf " | ", colwidth, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) , TRIE_LIST_ITEM(state,charid).forid, (UV)TRIE_LIST_ITEM(state,charid).newstate ); if (!(charid % 10)) Perl_re_printf( aTHX_ "\n%*s| ", (int)((depth * 2) + 14), ""); } } Perl_re_printf( aTHX_ "\n"); } } /* Dumps a fully constructed but uncompressed trie in table form. This is the normal DFA style state transition table, with a few twists to facilitate compression later. Used for debugging make_trie(). */ STATIC void S_dump_trie_interim_table(pTHX_ const struct _reg_trie_data *trie, HV *widecharmap, AV *revcharmap, U32 next_alloc, U32 depth) { U32 state; U16 charid; SV *sv=sv_newmortal(); int colwidth= widecharmap ? 6 : 4; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMP_TRIE_INTERIM_TABLE; /* print out the table precompression so that we can do a visual check that they are identical. */ Perl_re_indentf( aTHX_ "Char : ", depth+1 ); for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) { SV ** const tmp = av_fetch( revcharmap, charid, 0); if ( tmp ) { Perl_re_printf( aTHX_ "%*s", colwidth, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), colwidth, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) ); } } Perl_re_printf( aTHX_ "\n"); Perl_re_indentf( aTHX_ "State+-", depth+1 ); for( charid=0 ; charid < trie->uniquecharcount ; charid++ ) { Perl_re_printf( aTHX_ "%.*s", colwidth,"--------"); } Perl_re_printf( aTHX_ "\n" ); for( state=1 ; state < next_alloc ; state += trie->uniquecharcount ) { Perl_re_indentf( aTHX_ "%4" UVXf " : ", depth+1, (UV)TRIE_NODENUM( state ) ); for( charid = 0 ; charid < trie->uniquecharcount ; charid++ ) { UV v=(UV)SAFE_TRIE_NODENUM( trie->trans[ state + charid ].next ); if (v) Perl_re_printf( aTHX_ "%*" UVXf, colwidth, v ); else Perl_re_printf( aTHX_ "%*s", colwidth, "." ); } if ( ! trie->states[ TRIE_NODENUM( state ) ].wordnum ) { Perl_re_printf( aTHX_ " (%4" UVXf ")\n", (UV)trie->trans[ state ].check ); } else { Perl_re_printf( aTHX_ " (%4" UVXf ") W%4X\n", (UV)trie->trans[ state ].check, trie->states[ TRIE_NODENUM( state ) ].wordnum ); } } } #endif /* make_trie(startbranch,first,last,tail,word_count,flags,depth) startbranch: the first branch in the whole branch sequence first : start branch of sequence of branch-exact nodes. May be the same as startbranch last : Thing following the last branch. May be the same as tail. tail : item following the branch sequence count : words in the sequence flags : currently the OP() type we will be building one of /EXACT(|F|FA|FU|FU_SS|L|FLU8)/ depth : indent depth Inplace optimizes a sequence of 2 or more Branch-Exact nodes into a TRIE node. A trie is an N'ary tree where the branches are determined by digital decomposition of the key. IE, at the root node you look up the 1st character and follow that branch repeat until you find the end of the branches. Nodes can be marked as "accepting" meaning they represent a complete word. Eg: /he|she|his|hers/ would convert into the following structure. Numbers represent states, letters following numbers represent valid transitions on the letter from that state, if the number is in square brackets it represents an accepting state, otherwise it will be in parenthesis. +-h->+-e->[3]-+-r->(8)-+-s->[9] | | | (2) | | (1) +-i->(6)-+-s->[7] | +-s->(3)-+-h->(4)-+-e->[5] Accept Word Mapping: 3=>1 (he),5=>2 (she), 7=>3 (his), 9=>4 (hers) This shows that when matching against the string 'hers' we will begin at state 1 read 'h' and move to state 2, read 'e' and move to state 3 which is accepting, then read 'r' and go to state 8 followed by 's' which takes us to state 9 which is also accepting. Thus we know that we can match both 'he' and 'hers' with a single traverse. We store a mapping from accepting to state to which word was matched, and then when we have multiple possibilities we try to complete the rest of the regex in the order in which they occurred in the alternation. The only prior NFA like behaviour that would be changed by the TRIE support is the silent ignoring of duplicate alternations which are of the form: / (DUPE|DUPE) X? (?{ ... }) Y /x Thus EVAL blocks following a trie may be called a different number of times with and without the optimisation. With the optimisations dupes will be silently ignored. This inconsistent behaviour of EVAL type nodes is well established as the following demonstrates: 'words'=~/(word|word|word)(?{ print $1 })[xyz]/ which prints out 'word' three times, but 'words'=~/(word|word|word)(?{ print $1 })S/ which doesnt print it out at all. This is due to other optimisations kicking in. Example of what happens on a structural level: The regexp /(ac|ad|ab)+/ will produce the following debug output: 1: CURLYM[1] {1,32767}(18) 5: BRANCH(8) 6: EXACT <ac>(16) 8: BRANCH(11) 9: EXACT <ad>(16) 11: BRANCH(14) 12: EXACT <ab>(16) 16: SUCCEED(0) 17: NOTHING(18) 18: END(0) This would be optimizable with startbranch=5, first=5, last=16, tail=16 and should turn into: 1: CURLYM[1] {1,32767}(18) 5: TRIE(16) [Words:3 Chars Stored:6 Unique Chars:4 States:5 NCP:1] <ac> <ad> <ab> 16: SUCCEED(0) 17: NOTHING(18) 18: END(0) Cases where tail != last would be like /(?foo|bar)baz/: 1: BRANCH(4) 2: EXACT <foo>(8) 4: BRANCH(7) 5: EXACT <bar>(8) 7: TAIL(8) 8: EXACT <baz>(10) 10: END(0) which would be optimizable with startbranch=1, first=1, last=7, tail=8 and would end up looking like: 1: TRIE(8) [Words:2 Chars Stored:6 Unique Chars:5 States:7 NCP:1] <foo> <bar> 7: TAIL(8) 8: EXACT <baz>(10) 10: END(0) d = uvchr_to_utf8_flags(d, uv, 0); is the recommended Unicode-aware way of saying *(d++) = uv; */ #define TRIE_STORE_REVCHAR(val) \ STMT_START { \ if (UTF) { \ SV *zlopp = newSV(UTF8_MAXBYTES); \ unsigned char *flrbbbbb = (unsigned char *) SvPVX(zlopp); \ unsigned const char *const kapow = uvchr_to_utf8(flrbbbbb, val); \ SvCUR_set(zlopp, kapow - flrbbbbb); \ SvPOK_on(zlopp); \ SvUTF8_on(zlopp); \ av_push(revcharmap, zlopp); \ } else { \ char ooooff = (char)val; \ av_push(revcharmap, newSVpvn(&ooooff, 1)); \ } \ } STMT_END /* This gets the next character from the input, folding it if not already * folded. */ #define TRIE_READ_CHAR STMT_START { \ wordlen++; \ if ( UTF ) { \ /* if it is UTF then it is either already folded, or does not need \ * folding */ \ uvc = valid_utf8_to_uvchr( (const U8*) uc, &len); \ } \ else if (folder == PL_fold_latin1) { \ /* This folder implies Unicode rules, which in the range expressible \ * by not UTF is the lower case, with the two exceptions, one of \ * which should have been taken care of before calling this */ \ assert(*uc != LATIN_SMALL_LETTER_SHARP_S); \ uvc = toLOWER_L1(*uc); \ if (UNLIKELY(uvc == MICRO_SIGN)) uvc = GREEK_SMALL_LETTER_MU; \ len = 1; \ } else { \ /* raw data, will be folded later if needed */ \ uvc = (U32)*uc; \ len = 1; \ } \ } STMT_END #define TRIE_LIST_PUSH(state,fid,ns) STMT_START { \ if ( TRIE_LIST_CUR( state ) >=TRIE_LIST_LEN( state ) ) { \ U32 ging = TRIE_LIST_LEN( state ) * 2; \ Renew( trie->states[ state ].trans.list, ging, reg_trie_trans_le ); \ TRIE_LIST_LEN( state ) = ging; \ } \ TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).forid = fid; \ TRIE_LIST_ITEM( state, TRIE_LIST_CUR( state ) ).newstate = ns; \ TRIE_LIST_CUR( state )++; \ } STMT_END #define TRIE_LIST_NEW(state) STMT_START { \ Newxz( trie->states[ state ].trans.list, \ 4, reg_trie_trans_le ); \ TRIE_LIST_CUR( state ) = 1; \ TRIE_LIST_LEN( state ) = 4; \ } STMT_END #define TRIE_HANDLE_WORD(state) STMT_START { \ U16 dupe= trie->states[ state ].wordnum; \ regnode * const noper_next = regnext( noper ); \ \ DEBUG_r({ \ /* store the word for dumping */ \ SV* tmp; \ if (OP(noper) != NOTHING) \ tmp = newSVpvn_utf8(STRING(noper), STR_LEN(noper), UTF); \ else \ tmp = newSVpvn_utf8( "", 0, UTF ); \ av_push( trie_words, tmp ); \ }); \ \ curword++; \ trie->wordinfo[curword].prev = 0; \ trie->wordinfo[curword].len = wordlen; \ trie->wordinfo[curword].accept = state; \ \ if ( noper_next < tail ) { \ if (!trie->jump) \ trie->jump = (U16 *) PerlMemShared_calloc( word_count + 1, \ sizeof(U16) ); \ trie->jump[curword] = (U16)(noper_next - convert); \ if (!jumper) \ jumper = noper_next; \ if (!nextbranch) \ nextbranch= regnext(cur); \ } \ \ if ( dupe ) { \ /* It's a dupe. Pre-insert into the wordinfo[].prev */\ /* chain, so that when the bits of chain are later */\ /* linked together, the dups appear in the chain */\ trie->wordinfo[curword].prev = trie->wordinfo[dupe].prev; \ trie->wordinfo[dupe].prev = curword; \ } else { \ /* we haven't inserted this word yet. */ \ trie->states[ state ].wordnum = curword; \ } \ } STMT_END #define TRIE_TRANS_STATE(state,base,ucharcount,charid,special) \ ( ( base + charid >= ucharcount \ && base + charid < ubound \ && state == trie->trans[ base - ucharcount + charid ].check \ && trie->trans[ base - ucharcount + charid ].next ) \ ? trie->trans[ base - ucharcount + charid ].next \ : ( state==1 ? special : 0 ) \ ) #define TRIE_BITMAP_SET_FOLDED(trie, uvc, folder) \ STMT_START { \ TRIE_BITMAP_SET(trie, uvc); \ /* store the folded codepoint */ \ if ( folder ) \ TRIE_BITMAP_SET(trie, folder[(U8) uvc ]); \ \ if ( !UTF ) { \ /* store first byte of utf8 representation of */ \ /* variant codepoints */ \ if (! UVCHR_IS_INVARIANT(uvc)) { \ TRIE_BITMAP_SET(trie, UTF8_TWO_BYTE_HI(uvc)); \ } \ } \ } STMT_END #define MADE_TRIE 1 #define MADE_JUMP_TRIE 2 #define MADE_EXACT_TRIE 4 STATIC I32 S_make_trie(pTHX_ RExC_state_t *pRExC_state, regnode *startbranch, regnode *first, regnode *last, regnode *tail, U32 word_count, U32 flags, U32 depth) { /* first pass, loop through and scan words */ reg_trie_data *trie; HV *widecharmap = NULL; AV *revcharmap = newAV(); regnode *cur; STRLEN len = 0; UV uvc = 0; U16 curword = 0; U32 next_alloc = 0; regnode *jumper = NULL; regnode *nextbranch = NULL; regnode *convert = NULL; U32 *prev_states; /* temp array mapping each state to previous one */ /* we just use folder as a flag in utf8 */ const U8 * folder = NULL; /* in the below add_data call we are storing either 'tu' or 'tuaa' * which stands for one trie structure, one hash, optionally followed * by two arrays */ #ifdef DEBUGGING const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tuaa")); AV *trie_words = NULL; /* along with revcharmap, this only used during construction but both are * useful during debugging so we store them in the struct when debugging. */ #else const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("tu")); STRLEN trie_charcount=0; #endif SV *re_trie_maxbuff; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_MAKE_TRIE; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif switch (flags) { case EXACT: case EXACTL: break; case EXACTFA: case EXACTFU_SS: case EXACTFU: case EXACTFLU8: folder = PL_fold_latin1; break; case EXACTF: folder = PL_fold; break; default: Perl_croak( aTHX_ "panic! In trie construction, unknown node type %u %s", (unsigned) flags, PL_reg_name[flags] ); } trie = (reg_trie_data *) PerlMemShared_calloc( 1, sizeof(reg_trie_data) ); trie->refcount = 1; trie->startstate = 1; trie->wordcount = word_count; RExC_rxi->data->data[ data_slot ] = (void*)trie; trie->charmap = (U16 *) PerlMemShared_calloc( 256, sizeof(U16) ); if (flags == EXACT || flags == EXACTL) trie->bitmap = (char *) PerlMemShared_calloc( ANYOF_BITMAP_SIZE, 1 ); trie->wordinfo = (reg_trie_wordinfo *) PerlMemShared_calloc( trie->wordcount+1, sizeof(reg_trie_wordinfo)); DEBUG_r({ trie_words = newAV(); }); re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1); assert(re_trie_maxbuff); if (!SvIOK(re_trie_maxbuff)) { sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT); } DEBUG_TRIE_COMPILE_r({ Perl_re_indentf( aTHX_ "make_trie start==%d, first==%d, last==%d, tail==%d depth=%d\n", depth+1, REG_NODE_NUM(startbranch),REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(tail), (int)depth); }); /* Find the node we are going to overwrite */ if ( first == startbranch && OP( last ) != BRANCH ) { /* whole branch chain */ convert = first; } else { /* branch sub-chain */ convert = NEXTOPER( first ); } /* -- First loop and Setup -- We first traverse the branches and scan each word to determine if it contains widechars, and how many unique chars there are, this is important as we have to build a table with at least as many columns as we have unique chars. We use an array of integers to represent the character codes 0..255 (trie->charmap) and we use a an HV* to store Unicode characters. We use the native representation of the character value as the key and IV's for the coded index. *TODO* If we keep track of how many times each character is used we can remap the columns so that the table compression later on is more efficient in terms of memory by ensuring the most common value is in the middle and the least common are on the outside. IMO this would be better than a most to least common mapping as theres a decent chance the most common letter will share a node with the least common, meaning the node will not be compressible. With a middle is most common approach the worst case is when we have the least common nodes twice. */ for ( cur = first ; cur < last ; cur = regnext( cur ) ) { regnode *noper = NEXTOPER( cur ); const U8 *uc; const U8 *e; int foldlen = 0; U32 wordlen = 0; /* required init */ STRLEN minchars = 0; STRLEN maxchars = 0; bool set_bit = trie->bitmap ? 1 : 0; /*store the first char in the bitmap?*/ if (OP(noper) == NOTHING) { /* skip past a NOTHING at the start of an alternation * eg, /(?:)a|(?:b)/ should be the same as /a|b/ */ regnode *noper_next= regnext(noper); if (noper_next < tail) noper= noper_next; } if ( noper < tail && ( OP(noper) == flags || ( flags == EXACTFU && OP(noper) == EXACTFU_SS ) ) ) { uc= (U8*)STRING(noper); e= uc + STR_LEN(noper); } else { trie->minlen= 0; continue; } if ( set_bit ) { /* bitmap only alloced when !(UTF&&Folding) */ TRIE_BITMAP_SET(trie,*uc); /* store the raw first byte regardless of encoding */ if (OP( noper ) == EXACTFU_SS) { /* false positives are ok, so just set this */ TRIE_BITMAP_SET(trie, LATIN_SMALL_LETTER_SHARP_S); } } for ( ; uc < e ; uc += len ) { /* Look at each char in the current branch */ TRIE_CHARCOUNT(trie)++; TRIE_READ_CHAR; /* TRIE_READ_CHAR returns the current character, or its fold if /i * is in effect. Under /i, this character can match itself, or * anything that folds to it. If not under /i, it can match just * itself. Most folds are 1-1, for example k, K, and KELVIN SIGN * all fold to k, and all are single characters. But some folds * expand to more than one character, so for example LATIN SMALL * LIGATURE FFI folds to the three character sequence 'ffi'. If * the string beginning at 'uc' is 'ffi', it could be matched by * three characters, or just by the one ligature character. (It * could also be matched by two characters: LATIN SMALL LIGATURE FF * followed by 'i', or by 'f' followed by LATIN SMALL LIGATURE FI). * (Of course 'I' and/or 'F' instead of 'i' and 'f' can also * match.) The trie needs to know the minimum and maximum number * of characters that could match so that it can use size alone to * quickly reject many match attempts. The max is simple: it is * the number of folded characters in this branch (since a fold is * never shorter than what folds to it. */ maxchars++; /* And the min is equal to the max if not under /i (indicated by * 'folder' being NULL), or there are no multi-character folds. If * there is a multi-character fold, the min is incremented just * once, for the character that folds to the sequence. Each * character in the sequence needs to be added to the list below of * characters in the trie, but we count only the first towards the * min number of characters needed. This is done through the * variable 'foldlen', which is returned by the macros that look * for these sequences as the number of bytes the sequence * occupies. Each time through the loop, we decrement 'foldlen' by * how many bytes the current char occupies. Only when it reaches * 0 do we increment 'minchars' or look for another multi-character * sequence. */ if (folder == NULL) { minchars++; } else if (foldlen > 0) { foldlen -= (UTF) ? UTF8SKIP(uc) : 1; } else { minchars++; /* See if *uc is the beginning of a multi-character fold. If * so, we decrement the length remaining to look at, to account * for the current character this iteration. (We can use 'uc' * instead of the fold returned by TRIE_READ_CHAR because for * non-UTF, the latin1_safe macro is smart enough to account * for all the unfolded characters, and because for UTF, the * string will already have been folded earlier in the * compilation process */ if (UTF) { if ((foldlen = is_MULTI_CHAR_FOLD_utf8_safe(uc, e))) { foldlen -= UTF8SKIP(uc); } } else if ((foldlen = is_MULTI_CHAR_FOLD_latin1_safe(uc, e))) { foldlen--; } } /* The current character (and any potential folds) should be added * to the possible matching characters for this position in this * branch */ if ( uvc < 256 ) { if ( folder ) { U8 folded= folder[ (U8) uvc ]; if ( !trie->charmap[ folded ] ) { trie->charmap[ folded ]=( ++trie->uniquecharcount ); TRIE_STORE_REVCHAR( folded ); } } if ( !trie->charmap[ uvc ] ) { trie->charmap[ uvc ]=( ++trie->uniquecharcount ); TRIE_STORE_REVCHAR( uvc ); } if ( set_bit ) { /* store the codepoint in the bitmap, and its folded * equivalent. */ TRIE_BITMAP_SET_FOLDED(trie, uvc, folder); set_bit = 0; /* We've done our bit :-) */ } } else { /* XXX We could come up with the list of code points that fold * to this using PL_utf8_foldclosures, except not for * multi-char folds, as there may be multiple combinations * there that could work, which needs to wait until runtime to * resolve (The comment about LIGATURE FFI above is such an * example */ SV** svpp; if ( !widecharmap ) widecharmap = newHV(); svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 1 ); if ( !svpp ) Perl_croak( aTHX_ "error creating/fetching widecharmap entry for 0x%" UVXf, uvc ); if ( !SvTRUE( *svpp ) ) { sv_setiv( *svpp, ++trie->uniquecharcount ); TRIE_STORE_REVCHAR(uvc); } } } /* end loop through characters in this branch of the trie */ /* We take the min and max for this branch and combine to find the min * and max for all branches processed so far */ if( cur == first ) { trie->minlen = minchars; trie->maxlen = maxchars; } else if (minchars < trie->minlen) { trie->minlen = minchars; } else if (maxchars > trie->maxlen) { trie->maxlen = maxchars; } } /* end first pass */ DEBUG_TRIE_COMPILE_r( Perl_re_indentf( aTHX_ "TRIE(%s): W:%d C:%d Uq:%d Min:%d Max:%d\n", depth+1, ( widecharmap ? "UTF8" : "NATIVE" ), (int)word_count, (int)TRIE_CHARCOUNT(trie), trie->uniquecharcount, (int)trie->minlen, (int)trie->maxlen ) ); /* We now know what we are dealing with in terms of unique chars and string sizes so we can calculate how much memory a naive representation using a flat table will take. If it's over a reasonable limit (as specified by ${^RE_TRIE_MAXBUF}) we use a more memory conservative but potentially much slower representation using an array of lists. At the end we convert both representations into the same compressed form that will be used in regexec.c for matching with. The latter is a form that cannot be used to construct with but has memory properties similar to the list form and access properties similar to the table form making it both suitable for fast searches and small enough that its feasable to store for the duration of a program. See the comment in the code where the compressed table is produced inplace from the flat tabe representation for an explanation of how the compression works. */ Newx(prev_states, TRIE_CHARCOUNT(trie) + 2, U32); prev_states[1] = 0; if ( (IV)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1) > SvIV(re_trie_maxbuff) ) { /* Second Pass -- Array Of Lists Representation Each state will be represented by a list of charid:state records (reg_trie_trans_le) the first such element holds the CUR and LEN points of the allocated array. (See defines above). We build the initial structure using the lists, and then convert it into the compressed table form which allows faster lookups (but cant be modified once converted). */ STRLEN transcount = 1; DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using list compiler\n", depth+1)); trie->states = (reg_trie_state *) PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2, sizeof(reg_trie_state) ); TRIE_LIST_NEW(1); next_alloc = 2; for ( cur = first ; cur < last ; cur = regnext( cur ) ) { regnode *noper = NEXTOPER( cur ); U32 state = 1; /* required init */ U16 charid = 0; /* sanity init */ U32 wordlen = 0; /* required init */ if (OP(noper) == NOTHING) { regnode *noper_next= regnext(noper); if (noper_next < tail) noper= noper_next; } if ( noper < tail && ( OP(noper) == flags || ( flags == EXACTFU && OP(noper) == EXACTFU_SS ) ) ) { const U8 *uc= (U8*)STRING(noper); const U8 *e= uc + STR_LEN(noper); for ( ; uc < e ; uc += len ) { TRIE_READ_CHAR; if ( uvc < 256 ) { charid = trie->charmap[ uvc ]; } else { SV** const svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 0); if ( !svpp ) { charid = 0; } else { charid=(U16)SvIV( *svpp ); } } /* charid is now 0 if we dont know the char read, or * nonzero if we do */ if ( charid ) { U16 check; U32 newstate = 0; charid--; if ( !trie->states[ state ].trans.list ) { TRIE_LIST_NEW( state ); } for ( check = 1; check <= TRIE_LIST_USED( state ); check++ ) { if ( TRIE_LIST_ITEM( state, check ).forid == charid ) { newstate = TRIE_LIST_ITEM( state, check ).newstate; break; } } if ( ! newstate ) { newstate = next_alloc++; prev_states[newstate] = state; TRIE_LIST_PUSH( state, charid, newstate ); transcount++; } state = newstate; } else { Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc ); } } } TRIE_HANDLE_WORD(state); } /* end second pass */ /* next alloc is the NEXT state to be allocated */ trie->statecount = next_alloc; trie->states = (reg_trie_state *) PerlMemShared_realloc( trie->states, next_alloc * sizeof(reg_trie_state) ); /* and now dump it out before we compress it */ DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_list(trie, widecharmap, revcharmap, next_alloc, depth+1) ); trie->trans = (reg_trie_trans *) PerlMemShared_calloc( transcount, sizeof(reg_trie_trans) ); { U32 state; U32 tp = 0; U32 zp = 0; for( state=1 ; state < next_alloc ; state ++ ) { U32 base=0; /* DEBUG_TRIE_COMPILE_MORE_r( Perl_re_printf( aTHX_ "tp: %d zp: %d ",tp,zp) ); */ if (trie->states[state].trans.list) { U16 minid=TRIE_LIST_ITEM( state, 1).forid; U16 maxid=minid; U16 idx; for( idx = 2 ; idx <= TRIE_LIST_USED( state ) ; idx++ ) { const U16 forid = TRIE_LIST_ITEM( state, idx).forid; if ( forid < minid ) { minid=forid; } else if ( forid > maxid ) { maxid=forid; } } if ( transcount < tp + maxid - minid + 1) { transcount *= 2; trie->trans = (reg_trie_trans *) PerlMemShared_realloc( trie->trans, transcount * sizeof(reg_trie_trans) ); Zero( trie->trans + (transcount / 2), transcount / 2, reg_trie_trans ); } base = trie->uniquecharcount + tp - minid; if ( maxid == minid ) { U32 set = 0; for ( ; zp < tp ; zp++ ) { if ( ! trie->trans[ zp ].next ) { base = trie->uniquecharcount + zp - minid; trie->trans[ zp ].next = TRIE_LIST_ITEM( state, 1).newstate; trie->trans[ zp ].check = state; set = 1; break; } } if ( !set ) { trie->trans[ tp ].next = TRIE_LIST_ITEM( state, 1).newstate; trie->trans[ tp ].check = state; tp++; zp = tp; } } else { for ( idx=1; idx <= TRIE_LIST_USED( state ) ; idx++ ) { const U32 tid = base - trie->uniquecharcount + TRIE_LIST_ITEM( state, idx ).forid; trie->trans[ tid ].next = TRIE_LIST_ITEM( state, idx ).newstate; trie->trans[ tid ].check = state; } tp += ( maxid - minid + 1 ); } Safefree(trie->states[ state ].trans.list); } /* DEBUG_TRIE_COMPILE_MORE_r( Perl_re_printf( aTHX_ " base: %d\n",base); ); */ trie->states[ state ].trans.base=base; } trie->lasttrans = tp + 1; } } else { /* Second Pass -- Flat Table Representation. we dont use the 0 slot of either trans[] or states[] so we add 1 to each. We know that we will need Charcount+1 trans at most to store the data (one row per char at worst case) So we preallocate both structures assuming worst case. We then construct the trie using only the .next slots of the entry structs. We use the .check field of the first entry of the node temporarily to make compression both faster and easier by keeping track of how many non zero fields are in the node. Since trans are numbered from 1 any 0 pointer in the table is a FAIL transition. There are two terms at use here: state as a TRIE_NODEIDX() which is a number representing the first entry of the node, and state as a TRIE_NODENUM() which is the trans number. state 1 is TRIE_NODEIDX(1) and TRIE_NODENUM(1), state 2 is TRIE_NODEIDX(2) and TRIE_NODENUM(3) if there are 2 entrys per node. eg: A B A B 1. 2 4 1. 3 7 2. 0 3 3. 0 5 3. 0 0 5. 0 0 4. 0 0 7. 0 0 The table is internally in the right hand, idx form. However as we also have to deal with the states array which is indexed by nodenum we have to use TRIE_NODENUM() to convert. */ DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Compiling trie using table compiler\n", depth+1)); trie->trans = (reg_trie_trans *) PerlMemShared_calloc( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1, sizeof(reg_trie_trans) ); trie->states = (reg_trie_state *) PerlMemShared_calloc( TRIE_CHARCOUNT(trie) + 2, sizeof(reg_trie_state) ); next_alloc = trie->uniquecharcount + 1; for ( cur = first ; cur < last ; cur = regnext( cur ) ) { regnode *noper = NEXTOPER( cur ); U32 state = 1; /* required init */ U16 charid = 0; /* sanity init */ U32 accept_state = 0; /* sanity init */ U32 wordlen = 0; /* required init */ if (OP(noper) == NOTHING) { regnode *noper_next= regnext(noper); if (noper_next < tail) noper= noper_next; } if ( noper < tail && ( OP(noper) == flags || ( flags == EXACTFU && OP(noper) == EXACTFU_SS ) ) ) { const U8 *uc= (U8*)STRING(noper); const U8 *e= uc + STR_LEN(noper); for ( ; uc < e ; uc += len ) { TRIE_READ_CHAR; if ( uvc < 256 ) { charid = trie->charmap[ uvc ]; } else { SV* const * const svpp = hv_fetch( widecharmap, (char*)&uvc, sizeof( UV ), 0); charid = svpp ? (U16)SvIV(*svpp) : 0; } if ( charid ) { charid--; if ( !trie->trans[ state + charid ].next ) { trie->trans[ state + charid ].next = next_alloc; trie->trans[ state ].check++; prev_states[TRIE_NODENUM(next_alloc)] = TRIE_NODENUM(state); next_alloc += trie->uniquecharcount; } state = trie->trans[ state + charid ].next; } else { Perl_croak( aTHX_ "panic! In trie construction, no char mapping for %" IVdf, uvc ); } /* charid is now 0 if we dont know the char read, or * nonzero if we do */ } } accept_state = TRIE_NODENUM( state ); TRIE_HANDLE_WORD(accept_state); } /* end second pass */ /* and now dump it out before we compress it */ DEBUG_TRIE_COMPILE_MORE_r(dump_trie_interim_table(trie, widecharmap, revcharmap, next_alloc, depth+1)); { /* * Inplace compress the table.* For sparse data sets the table constructed by the trie algorithm will be mostly 0/FAIL transitions or to put it another way mostly empty. (Note that leaf nodes will not contain any transitions.) This algorithm compresses the tables by eliminating most such transitions, at the cost of a modest bit of extra work during lookup: - Each states[] entry contains a .base field which indicates the index in the state[] array wheres its transition data is stored. - If .base is 0 there are no valid transitions from that node. - If .base is nonzero then charid is added to it to find an entry in the trans array. -If trans[states[state].base+charid].check!=state then the transition is taken to be a 0/Fail transition. Thus if there are fail transitions at the front of the node then the .base offset will point somewhere inside the previous nodes data (or maybe even into a node even earlier), but the .check field determines if the transition is valid. XXX - wrong maybe? The following process inplace converts the table to the compressed table: We first do not compress the root node 1,and mark all its .check pointers as 1 and set its .base pointer as 1 as well. This allows us to do a DFA construction from the compressed table later, and ensures that any .base pointers we calculate later are greater than 0. - We set 'pos' to indicate the first entry of the second node. - We then iterate over the columns of the node, finding the first and last used entry at l and m. We then copy l..m into pos..(pos+m-l), and set the .check pointers accordingly, and advance pos appropriately and repreat for the next node. Note that when we copy the next pointers we have to convert them from the original NODEIDX form to NODENUM form as the former is not valid post compression. - If a node has no transitions used we mark its base as 0 and do not advance the pos pointer. - If a node only has one transition we use a second pointer into the structure to fill in allocated fail transitions from other states. This pointer is independent of the main pointer and scans forward looking for null transitions that are allocated to a state. When it finds one it writes the single transition into the "hole". If the pointer doesnt find one the single transition is appended as normal. - Once compressed we can Renew/realloc the structures to release the excess space. See "Table-Compression Methods" in sec 3.9 of the Red Dragon, specifically Fig 3.47 and the associated pseudocode. demq */ const U32 laststate = TRIE_NODENUM( next_alloc ); U32 state, charid; U32 pos = 0, zp=0; trie->statecount = laststate; for ( state = 1 ; state < laststate ; state++ ) { U8 flag = 0; const U32 stateidx = TRIE_NODEIDX( state ); const U32 o_used = trie->trans[ stateidx ].check; U32 used = trie->trans[ stateidx ].check; trie->trans[ stateidx ].check = 0; for ( charid = 0; used && charid < trie->uniquecharcount; charid++ ) { if ( flag || trie->trans[ stateidx + charid ].next ) { if ( trie->trans[ stateidx + charid ].next ) { if (o_used == 1) { for ( ; zp < pos ; zp++ ) { if ( ! trie->trans[ zp ].next ) { break; } } trie->states[ state ].trans.base = zp + trie->uniquecharcount - charid ; trie->trans[ zp ].next = SAFE_TRIE_NODENUM( trie->trans[ stateidx + charid ].next ); trie->trans[ zp ].check = state; if ( ++zp > pos ) pos = zp; break; } used--; } if ( !flag ) { flag = 1; trie->states[ state ].trans.base = pos + trie->uniquecharcount - charid ; } trie->trans[ pos ].next = SAFE_TRIE_NODENUM( trie->trans[ stateidx + charid ].next ); trie->trans[ pos ].check = state; pos++; } } } trie->lasttrans = pos + 1; trie->states = (reg_trie_state *) PerlMemShared_realloc( trie->states, laststate * sizeof(reg_trie_state) ); DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Alloc: %d Orig: %" IVdf " elements, Final:%" IVdf ". Savings of %%%5.2f\n", depth+1, (int)( ( TRIE_CHARCOUNT(trie) + 1 ) * trie->uniquecharcount + 1 ), (IV)next_alloc, (IV)pos, ( ( next_alloc - pos ) * 100 ) / (double)next_alloc ); ); } /* end table compress */ } DEBUG_TRIE_COMPILE_MORE_r( Perl_re_indentf( aTHX_ "Statecount:%" UVxf " Lasttrans:%" UVxf "\n", depth+1, (UV)trie->statecount, (UV)trie->lasttrans) ); /* resize the trans array to remove unused space */ trie->trans = (reg_trie_trans *) PerlMemShared_realloc( trie->trans, trie->lasttrans * sizeof(reg_trie_trans) ); { /* Modify the program and insert the new TRIE node */ U8 nodetype =(U8)(flags & 0xFF); char *str=NULL; #ifdef DEBUGGING regnode *optimize = NULL; #ifdef RE_TRACK_PATTERN_OFFSETS U32 mjd_offset = 0; U32 mjd_nodelen = 0; #endif /* RE_TRACK_PATTERN_OFFSETS */ #endif /* DEBUGGING */ /* This means we convert either the first branch or the first Exact, depending on whether the thing following (in 'last') is a branch or not and whther first is the startbranch (ie is it a sub part of the alternation or is it the whole thing.) Assuming its a sub part we convert the EXACT otherwise we convert the whole branch sequence, including the first. */ /* Find the node we are going to overwrite */ if ( first != startbranch || OP( last ) == BRANCH ) { /* branch sub-chain */ NEXT_OFF( first ) = (U16)(last - first); #ifdef RE_TRACK_PATTERN_OFFSETS DEBUG_r({ mjd_offset= Node_Offset((convert)); mjd_nodelen= Node_Length((convert)); }); #endif /* whole branch chain */ } #ifdef RE_TRACK_PATTERN_OFFSETS else { DEBUG_r({ const regnode *nop = NEXTOPER( convert ); mjd_offset= Node_Offset((nop)); mjd_nodelen= Node_Length((nop)); }); } DEBUG_OPTIMISE_r( Perl_re_indentf( aTHX_ "MJD offset:%" UVuf " MJD length:%" UVuf "\n", depth+1, (UV)mjd_offset, (UV)mjd_nodelen) ); #endif /* But first we check to see if there is a common prefix we can split out as an EXACT and put in front of the TRIE node. */ trie->startstate= 1; if ( trie->bitmap && !widecharmap && !trie->jump ) { /* we want to find the first state that has more than * one transition, if that state is not the first state * then we have a common prefix which we can remove. */ U32 state; for ( state = 1 ; state < trie->statecount-1 ; state++ ) { U32 ofs = 0; I32 first_ofs = -1; /* keeps track of the ofs of the first transition, -1 means none */ U32 count = 0; const U32 base = trie->states[ state ].trans.base; /* does this state terminate an alternation? */ if ( trie->states[state].wordnum ) count = 1; for ( ofs = 0 ; ofs < trie->uniquecharcount ; ofs++ ) { if ( ( base + ofs >= trie->uniquecharcount ) && ( base + ofs - trie->uniquecharcount < trie->lasttrans ) && trie->trans[ base + ofs - trie->uniquecharcount ].check == state ) { if ( ++count > 1 ) { /* we have more than one transition */ SV **tmp; U8 *ch; /* if this is the first state there is no common prefix * to extract, so we can exit */ if ( state == 1 ) break; tmp = av_fetch( revcharmap, ofs, 0); ch = (U8*)SvPV_nolen_const( *tmp ); /* if we are on count 2 then we need to initialize the * bitmap, and store the previous char if there was one * in it*/ if ( count == 2 ) { /* clear the bitmap */ Zero(trie->bitmap, ANYOF_BITMAP_SIZE, char); DEBUG_OPTIMISE_r( Perl_re_indentf( aTHX_ "New Start State=%" UVuf " Class: [", depth+1, (UV)state)); if (first_ofs >= 0) { SV ** const tmp = av_fetch( revcharmap, first_ofs, 0); const U8 * const ch = (U8*)SvPV_nolen_const( *tmp ); TRIE_BITMAP_SET_FOLDED(trie,*ch,folder); DEBUG_OPTIMISE_r( Perl_re_printf( aTHX_ "%s", (char*)ch) ); } } /* store the current firstchar in the bitmap */ TRIE_BITMAP_SET_FOLDED(trie,*ch,folder); DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "%s", ch)); } first_ofs = ofs; } } if ( count == 1 ) { /* This state has only one transition, its transition is part * of a common prefix - we need to concatenate the char it * represents to what we have so far. */ SV **tmp = av_fetch( revcharmap, first_ofs, 0); STRLEN len; char *ch = SvPV( *tmp, len ); DEBUG_OPTIMISE_r({ SV *sv=sv_newmortal(); Perl_re_indentf( aTHX_ "Prefix State: %" UVuf " Ofs:%" UVuf " Char='%s'\n", depth+1, (UV)state, (UV)first_ofs, pv_pretty(sv, SvPV_nolen_const(*tmp), SvCUR(*tmp), 6, PL_colors[0], PL_colors[1], (SvUTF8(*tmp) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_ESCAPE_FIRSTCHAR ) ); }); if ( state==1 ) { OP( convert ) = nodetype; str=STRING(convert); STR_LEN(convert)=0; } STR_LEN(convert) += len; while (len--) *str++ = *ch++; } else { #ifdef DEBUGGING if (state>1) DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "]\n")); #endif break; } } trie->prefixlen = (state-1); if (str) { regnode *n = convert+NODE_SZ_STR(convert); NEXT_OFF(convert) = NODE_SZ_STR(convert); trie->startstate = state; trie->minlen -= (state - 1); trie->maxlen -= (state - 1); #ifdef DEBUGGING /* At least the UNICOS C compiler choked on this * being argument to DEBUG_r(), so let's just have * it right here. */ if ( #ifdef PERL_EXT_RE_BUILD 1 #else DEBUG_r_TEST #endif ) { regnode *fix = convert; U32 word = trie->wordcount; mjd_nodelen++; Set_Node_Offset_Length(convert, mjd_offset, state - 1); while( ++fix < n ) { Set_Node_Offset_Length(fix, 0, 0); } while (word--) { SV ** const tmp = av_fetch( trie_words, word, 0 ); if (tmp) { if ( STR_LEN(convert) <= SvCUR(*tmp) ) sv_chop(*tmp, SvPV_nolen(*tmp) + STR_LEN(convert)); else sv_chop(*tmp, SvPV_nolen(*tmp) + SvCUR(*tmp)); } } } #endif if (trie->maxlen) { convert = n; } else { NEXT_OFF(convert) = (U16)(tail - convert); DEBUG_r(optimize= n); } } } if (!jumper) jumper = last; if ( trie->maxlen ) { NEXT_OFF( convert ) = (U16)(tail - convert); ARG_SET( convert, data_slot ); /* Store the offset to the first unabsorbed branch in jump[0], which is otherwise unused by the jump logic. We use this when dumping a trie and during optimisation. */ if (trie->jump) trie->jump[0] = (U16)(nextbranch - convert); /* If the start state is not accepting (meaning there is no empty string/NOTHING) * and there is a bitmap * and the first "jump target" node we found leaves enough room * then convert the TRIE node into a TRIEC node, with the bitmap * embedded inline in the opcode - this is hypothetically faster. */ if ( !trie->states[trie->startstate].wordnum && trie->bitmap && ( (char *)jumper - (char *)convert) >= (int)sizeof(struct regnode_charclass) ) { OP( convert ) = TRIEC; Copy(trie->bitmap, ((struct regnode_charclass *)convert)->bitmap, ANYOF_BITMAP_SIZE, char); PerlMemShared_free(trie->bitmap); trie->bitmap= NULL; } else OP( convert ) = TRIE; /* store the type in the flags */ convert->flags = nodetype; DEBUG_r({ optimize = convert + NODE_STEP_REGNODE + regarglen[ OP( convert ) ]; }); /* XXX We really should free up the resource in trie now, as we won't use them - (which resources?) dmq */ } /* needed for dumping*/ DEBUG_r(if (optimize) { regnode *opt = convert; while ( ++opt < optimize) { Set_Node_Offset_Length(opt,0,0); } /* Try to clean up some of the debris left after the optimisation. */ while( optimize < jumper ) { mjd_nodelen += Node_Length((optimize)); OP( optimize ) = OPTIMIZED; Set_Node_Offset_Length(optimize,0,0); optimize++; } Set_Node_Offset_Length(convert,mjd_offset,mjd_nodelen); }); } /* end node insert */ /* Finish populating the prev field of the wordinfo array. Walk back * from each accept state until we find another accept state, and if * so, point the first word's .prev field at the second word. If the * second already has a .prev field set, stop now. This will be the * case either if we've already processed that word's accept state, * or that state had multiple words, and the overspill words were * already linked up earlier. */ { U16 word; U32 state; U16 prev; for (word=1; word <= trie->wordcount; word++) { prev = 0; if (trie->wordinfo[word].prev) continue; state = trie->wordinfo[word].accept; while (state) { state = prev_states[state]; if (!state) break; prev = trie->states[state].wordnum; if (prev) break; } trie->wordinfo[word].prev = prev; } Safefree(prev_states); } /* and now dump out the compressed format */ DEBUG_TRIE_COMPILE_r(dump_trie(trie, widecharmap, revcharmap, depth+1)); RExC_rxi->data->data[ data_slot + 1 ] = (void*)widecharmap; #ifdef DEBUGGING RExC_rxi->data->data[ data_slot + TRIE_WORDS_OFFSET ] = (void*)trie_words; RExC_rxi->data->data[ data_slot + 3 ] = (void*)revcharmap; #else SvREFCNT_dec_NN(revcharmap); #endif return trie->jump ? MADE_JUMP_TRIE : trie->startstate>1 ? MADE_EXACT_TRIE : MADE_TRIE; } STATIC regnode * S_construct_ahocorasick_from_trie(pTHX_ RExC_state_t *pRExC_state, regnode *source, U32 depth) { /* The Trie is constructed and compressed now so we can build a fail array if * it's needed This is basically the Aho-Corasick algorithm. Its from exercise 3.31 and 3.32 in the "Red Dragon" -- Compilers, principles, techniques, and tools. Aho, Sethi, Ullman 1985/88 ISBN 0-201-10088-6 We find the fail state for each state in the trie, this state is the longest proper suffix of the current state's 'word' that is also a proper prefix of another word in our trie. State 1 represents the word '' and is thus the default fail state. This allows the DFA not to have to restart after its tried and failed a word at a given point, it simply continues as though it had been matching the other word in the first place. Consider 'abcdgu'=~/abcdefg|cdgu/ When we get to 'd' we are still matching the first word, we would encounter 'g' which would fail, which would bring us to the state representing 'd' in the second word where we would try 'g' and succeed, proceeding to match 'cdgu'. */ /* add a fail transition */ const U32 trie_offset = ARG(source); reg_trie_data *trie=(reg_trie_data *)RExC_rxi->data->data[trie_offset]; U32 *q; const U32 ucharcount = trie->uniquecharcount; const U32 numstates = trie->statecount; const U32 ubound = trie->lasttrans + ucharcount; U32 q_read = 0; U32 q_write = 0; U32 charid; U32 base = trie->states[ 1 ].trans.base; U32 *fail; reg_ac_data *aho; const U32 data_slot = add_data( pRExC_state, STR_WITH_LEN("T")); regnode *stclass; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_CONSTRUCT_AHOCORASICK_FROM_TRIE; PERL_UNUSED_CONTEXT; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif if ( OP(source) == TRIE ) { struct regnode_1 *op = (struct regnode_1 *) PerlMemShared_calloc(1, sizeof(struct regnode_1)); StructCopy(source,op,struct regnode_1); stclass = (regnode *)op; } else { struct regnode_charclass *op = (struct regnode_charclass *) PerlMemShared_calloc(1, sizeof(struct regnode_charclass)); StructCopy(source,op,struct regnode_charclass); stclass = (regnode *)op; } OP(stclass)+=2; /* convert the TRIE type to its AHO-CORASICK equivalent */ ARG_SET( stclass, data_slot ); aho = (reg_ac_data *) PerlMemShared_calloc( 1, sizeof(reg_ac_data) ); RExC_rxi->data->data[ data_slot ] = (void*)aho; aho->trie=trie_offset; aho->states=(reg_trie_state *)PerlMemShared_malloc( numstates * sizeof(reg_trie_state) ); Copy( trie->states, aho->states, numstates, reg_trie_state ); Newxz( q, numstates, U32); aho->fail = (U32 *) PerlMemShared_calloc( numstates, sizeof(U32) ); aho->refcount = 1; fail = aho->fail; /* initialize fail[0..1] to be 1 so that we always have a valid final fail state */ fail[ 0 ] = fail[ 1 ] = 1; for ( charid = 0; charid < ucharcount ; charid++ ) { const U32 newstate = TRIE_TRANS_STATE( 1, base, ucharcount, charid, 0 ); if ( newstate ) { q[ q_write ] = newstate; /* set to point at the root */ fail[ q[ q_write++ ] ]=1; } } while ( q_read < q_write) { const U32 cur = q[ q_read++ % numstates ]; base = trie->states[ cur ].trans.base; for ( charid = 0 ; charid < ucharcount ; charid++ ) { const U32 ch_state = TRIE_TRANS_STATE( cur, base, ucharcount, charid, 1 ); if (ch_state) { U32 fail_state = cur; U32 fail_base; do { fail_state = fail[ fail_state ]; fail_base = aho->states[ fail_state ].trans.base; } while ( !TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ) ); fail_state = TRIE_TRANS_STATE( fail_state, fail_base, ucharcount, charid, 1 ); fail[ ch_state ] = fail_state; if ( !aho->states[ ch_state ].wordnum && aho->states[ fail_state ].wordnum ) { aho->states[ ch_state ].wordnum = aho->states[ fail_state ].wordnum; } q[ q_write++ % numstates] = ch_state; } } } /* restore fail[0..1] to 0 so that we "fall out" of the AC loop when we fail in state 1, this allows us to use the charclass scan to find a valid start char. This is based on the principle that theres a good chance the string being searched contains lots of stuff that cant be a start char. */ fail[ 0 ] = fail[ 1 ] = 0; DEBUG_TRIE_COMPILE_r({ Perl_re_indentf( aTHX_ "Stclass Failtable (%" UVuf " states): 0", depth, (UV)numstates ); for( q_read=1; q_read<numstates; q_read++ ) { Perl_re_printf( aTHX_ ", %" UVuf, (UV)fail[q_read]); } Perl_re_printf( aTHX_ "\n"); }); Safefree(q); /*RExC_seen |= REG_TRIEDFA_SEEN;*/ return stclass; } /* The below joins as many adjacent EXACTish nodes as possible into a single * one. The regop may be changed if the node(s) contain certain sequences that * require special handling. The joining is only done if: * 1) there is room in the current conglomerated node to entirely contain the * next one. * 2) they are the exact same node type * * The adjacent nodes actually may be separated by NOTHING-kind nodes, and * these get optimized out * * XXX khw thinks this should be enhanced to fill EXACT (at least) nodes as full * as possible, even if that means splitting an existing node so that its first * part is moved to the preceeding node. This would maximise the efficiency of * memEQ during matching. Elsewhere in this file, khw proposes splitting * EXACTFish nodes into portions that don't change under folding vs those that * do. Those portions that don't change may be the only things in the pattern that * could be used to find fixed and floating strings. * * If a node is to match under /i (folded), the number of characters it matches * can be different than its character length if it contains a multi-character * fold. *min_subtract is set to the total delta number of characters of the * input nodes. * * And *unfolded_multi_char is set to indicate whether or not the node contains * an unfolded multi-char fold. This happens when whether the fold is valid or * not won't be known until runtime; namely for EXACTF nodes that contain LATIN * SMALL LETTER SHARP S, as only if the target string being matched against * turns out to be UTF-8 is that fold valid; and also for EXACTFL nodes whose * folding rules depend on the locale in force at runtime. (Multi-char folds * whose components are all above the Latin1 range are not run-time locale * dependent, and have already been folded by the time this function is * called.) * * This is as good a place as any to discuss the design of handling these * multi-character fold sequences. It's been wrong in Perl for a very long * time. There are three code points in Unicode whose multi-character folds * were long ago discovered to mess things up. The previous designs for * dealing with these involved assigning a special node for them. This * approach doesn't always work, as evidenced by this example: * "\xDFs" =~ /s\xDF/ui # Used to fail before these patches * Both sides fold to "sss", but if the pattern is parsed to create a node that * would match just the \xDF, it won't be able to handle the case where a * successful match would have to cross the node's boundary. The new approach * that hopefully generally solves the problem generates an EXACTFU_SS node * that is "sss" in this case. * * It turns out that there are problems with all multi-character folds, and not * just these three. Now the code is general, for all such cases. The * approach taken is: * 1) This routine examines each EXACTFish node that could contain multi- * character folded sequences. Since a single character can fold into * such a sequence, the minimum match length for this node is less than * the number of characters in the node. This routine returns in * *min_subtract how many characters to subtract from the the actual * length of the string to get a real minimum match length; it is 0 if * there are no multi-char foldeds. This delta is used by the caller to * adjust the min length of the match, and the delta between min and max, * so that the optimizer doesn't reject these possibilities based on size * constraints. * 2) For the sequence involving the Sharp s (\xDF), the node type EXACTFU_SS * is used for an EXACTFU node that contains at least one "ss" sequence in * it. For non-UTF-8 patterns and strings, this is the only case where * there is a possible fold length change. That means that a regular * EXACTFU node without UTF-8 involvement doesn't have to concern itself * with length changes, and so can be processed faster. regexec.c takes * advantage of this. Generally, an EXACTFish node that is in UTF-8 is * pre-folded by regcomp.c (except EXACTFL, some of whose folds aren't * known until runtime). This saves effort in regex matching. However, * the pre-folding isn't done for non-UTF8 patterns because the fold of * the MICRO SIGN requires UTF-8, and we don't want to slow things down by * forcing the pattern into UTF8 unless necessary. Also what EXACTF (and, * again, EXACTFL) nodes fold to isn't known until runtime. The fold * possibilities for the non-UTF8 patterns are quite simple, except for * the sharp s. All the ones that don't involve a UTF-8 target string are * members of a fold-pair, and arrays are set up for all of them so that * the other member of the pair can be found quickly. Code elsewhere in * this file makes sure that in EXACTFU nodes, the sharp s gets folded to * 'ss', even if the pattern isn't UTF-8. This avoids the issues * described in the next item. * 3) A problem remains for unfolded multi-char folds. (These occur when the * validity of the fold won't be known until runtime, and so must remain * unfolded for now. This happens for the sharp s in EXACTF and EXACTFA * nodes when the pattern isn't in UTF-8. (Note, BTW, that there cannot * be an EXACTF node with a UTF-8 pattern.) They also occur for various * folds in EXACTFL nodes, regardless of the UTF-ness of the pattern.) * The reason this is a problem is that the optimizer part of regexec.c * (probably unwittingly, in Perl_regexec_flags()) makes an assumption * that a character in the pattern corresponds to at most a single * character in the target string. (And I do mean character, and not byte * here, unlike other parts of the documentation that have never been * updated to account for multibyte Unicode.) sharp s in EXACTF and * EXACTFL nodes can match the two character string 'ss'; in EXACTFA nodes * it can match "\x{17F}\x{17F}". These, along with other ones in EXACTFL * nodes, violate the assumption, and they are the only instances where it * is violated. I'm reluctant to try to change the assumption, as the * code involved is impenetrable to me (khw), so instead the code here * punts. This routine examines EXACTFL nodes, and (when the pattern * isn't UTF-8) EXACTF and EXACTFA for such unfolded folds, and returns a * boolean indicating whether or not the node contains such a fold. When * it is true, the caller sets a flag that later causes the optimizer in * this file to not set values for the floating and fixed string lengths, * and thus avoids the optimizer code in regexec.c that makes the invalid * assumption. Thus, there is no optimization based on string lengths for * EXACTFL nodes that contain these few folds, nor for non-UTF8-pattern * EXACTF and EXACTFA nodes that contain the sharp s. (The reason the * assumption is wrong only in these cases is that all other non-UTF-8 * folds are 1-1; and, for UTF-8 patterns, we pre-fold all other folds to * their expanded versions. (Again, we can't prefold sharp s to 'ss' in * EXACTF nodes because we don't know at compile time if it actually * matches 'ss' or not. For EXACTF nodes it will match iff the target * string is in UTF-8. This is in contrast to EXACTFU nodes, where it * always matches; and EXACTFA where it never does. In an EXACTFA node in * a UTF-8 pattern, sharp s is folded to "\x{17F}\x{17F}, avoiding the * problem; but in a non-UTF8 pattern, folding it to that above-Latin1 * string would require the pattern to be forced into UTF-8, the overhead * of which we want to avoid. Similarly the unfolded multi-char folds in * EXACTFL nodes will match iff the locale at the time of match is a UTF-8 * locale.) * * Similarly, the code that generates tries doesn't currently handle * not-already-folded multi-char folds, and it looks like a pain to change * that. Therefore, trie generation of EXACTFA nodes with the sharp s * doesn't work. Instead, such an EXACTFA is turned into a new regnode, * EXACTFA_NO_TRIE, which the trie code knows not to handle. Most people * using /iaa matching will be doing so almost entirely with ASCII * strings, so this should rarely be encountered in practice */ #define JOIN_EXACT(scan,min_subtract,unfolded_multi_char, flags) \ if (PL_regkind[OP(scan)] == EXACT) \ join_exact(pRExC_state,(scan),(min_subtract),unfolded_multi_char, (flags),NULL,depth+1) STATIC U32 S_join_exact(pTHX_ RExC_state_t *pRExC_state, regnode *scan, UV *min_subtract, bool *unfolded_multi_char, U32 flags,regnode *val, U32 depth) { /* Merge several consecutive EXACTish nodes into one. */ regnode *n = regnext(scan); U32 stringok = 1; regnode *next = scan + NODE_SZ_STR(scan); U32 merged = 0; U32 stopnow = 0; #ifdef DEBUGGING regnode *stop = scan; GET_RE_DEBUG_FLAGS_DECL; #else PERL_UNUSED_ARG(depth); #endif PERL_ARGS_ASSERT_JOIN_EXACT; #ifndef EXPERIMENTAL_INPLACESCAN PERL_UNUSED_ARG(flags); PERL_UNUSED_ARG(val); #endif DEBUG_PEEP("join", scan, depth, 0); /* Look through the subsequent nodes in the chain. Skip NOTHING, merge * EXACT ones that are mergeable to the current one. */ while (n && (PL_regkind[OP(n)] == NOTHING || (stringok && OP(n) == OP(scan))) && NEXT_OFF(n) && NEXT_OFF(scan) + NEXT_OFF(n) < I16_MAX) { if (OP(n) == TAIL || n > next) stringok = 0; if (PL_regkind[OP(n)] == NOTHING) { DEBUG_PEEP("skip:", n, depth, 0); NEXT_OFF(scan) += NEXT_OFF(n); next = n + NODE_STEP_REGNODE; #ifdef DEBUGGING if (stringok) stop = n; #endif n = regnext(n); } else if (stringok) { const unsigned int oldl = STR_LEN(scan); regnode * const nnext = regnext(n); /* XXX I (khw) kind of doubt that this works on platforms (should * Perl ever run on one) where U8_MAX is above 255 because of lots * of other assumptions */ /* Don't join if the sum can't fit into a single node */ if (oldl + STR_LEN(n) > U8_MAX) break; DEBUG_PEEP("merg", n, depth, 0); merged++; NEXT_OFF(scan) += NEXT_OFF(n); STR_LEN(scan) += STR_LEN(n); next = n + NODE_SZ_STR(n); /* Now we can overwrite *n : */ Move(STRING(n), STRING(scan) + oldl, STR_LEN(n), char); #ifdef DEBUGGING stop = next - 1; #endif n = nnext; if (stopnow) break; } #ifdef EXPERIMENTAL_INPLACESCAN if (flags && !NEXT_OFF(n)) { DEBUG_PEEP("atch", val, depth, 0); if (reg_off_by_arg[OP(n)]) { ARG_SET(n, val - n); } else { NEXT_OFF(n) = val - n; } stopnow = 1; } #endif } *min_subtract = 0; *unfolded_multi_char = FALSE; /* Here, all the adjacent mergeable EXACTish nodes have been merged. We * can now analyze for sequences of problematic code points. (Prior to * this final joining, sequences could have been split over boundaries, and * hence missed). The sequences only happen in folding, hence for any * non-EXACT EXACTish node */ if (OP(scan) != EXACT && OP(scan) != EXACTL) { U8* s0 = (U8*) STRING(scan); U8* s = s0; U8* s_end = s0 + STR_LEN(scan); int total_count_delta = 0; /* Total delta number of characters that multi-char folds expand to */ /* One pass is made over the node's string looking for all the * possibilities. To avoid some tests in the loop, there are two main * cases, for UTF-8 patterns (which can't have EXACTF nodes) and * non-UTF-8 */ if (UTF) { U8* folded = NULL; if (OP(scan) == EXACTFL) { U8 *d; /* An EXACTFL node would already have been changed to another * node type unless there is at least one character in it that * is problematic; likely a character whose fold definition * won't be known until runtime, and so has yet to be folded. * For all but the UTF-8 locale, folds are 1-1 in length, but * to handle the UTF-8 case, we need to create a temporary * folded copy using UTF-8 locale rules in order to analyze it. * This is because our macros that look to see if a sequence is * a multi-char fold assume everything is folded (otherwise the * tests in those macros would be too complicated and slow). * Note that here, the non-problematic folds will have already * been done, so we can just copy such characters. We actually * don't completely fold the EXACTFL string. We skip the * unfolded multi-char folds, as that would just create work * below to figure out the size they already are */ Newx(folded, UTF8_MAX_FOLD_CHAR_EXPAND * STR_LEN(scan) + 1, U8); d = folded; while (s < s_end) { STRLEN s_len = UTF8SKIP(s); if (! is_PROBLEMATIC_LOCALE_FOLD_utf8(s)) { Copy(s, d, s_len, U8); d += s_len; } else if (is_FOLDS_TO_MULTI_utf8(s)) { *unfolded_multi_char = TRUE; Copy(s, d, s_len, U8); d += s_len; } else if (isASCII(*s)) { *(d++) = toFOLD(*s); } else { STRLEN len; _toFOLD_utf8_flags(s, s_end, d, &len, FOLD_FLAGS_FULL); d += len; } s += s_len; } /* Point the remainder of the routine to look at our temporary * folded copy */ s = folded; s_end = d; } /* End of creating folded copy of EXACTFL string */ /* Examine the string for a multi-character fold sequence. UTF-8 * patterns have all characters pre-folded by the time this code is * executed */ while (s < s_end - 1) /* Can stop 1 before the end, as minimum length sequence we are looking for is 2 */ { int count = 0; /* How many characters in a multi-char fold */ int len = is_MULTI_CHAR_FOLD_utf8_safe(s, s_end); if (! len) { /* Not a multi-char fold: get next char */ s += UTF8SKIP(s); continue; } /* Nodes with 'ss' require special handling, except for * EXACTFA-ish for which there is no multi-char fold to this */ if (len == 2 && *s == 's' && *(s+1) == 's' && OP(scan) != EXACTFA && OP(scan) != EXACTFA_NO_TRIE) { count = 2; if (OP(scan) != EXACTFL) { OP(scan) = EXACTFU_SS; } s += 2; } else { /* Here is a generic multi-char fold. */ U8* multi_end = s + len; /* Count how many characters are in it. In the case of * /aa, no folds which contain ASCII code points are * allowed, so check for those, and skip if found. */ if (OP(scan) != EXACTFA && OP(scan) != EXACTFA_NO_TRIE) { count = utf8_length(s, multi_end); s = multi_end; } else { while (s < multi_end) { if (isASCII(*s)) { s++; goto next_iteration; } else { s += UTF8SKIP(s); } count++; } } } /* The delta is how long the sequence is minus 1 (1 is how long * the character that folds to the sequence is) */ total_count_delta += count - 1; next_iteration: ; } /* We created a temporary folded copy of the string in EXACTFL * nodes. Therefore we need to be sure it doesn't go below zero, * as the real string could be shorter */ if (OP(scan) == EXACTFL) { int total_chars = utf8_length((U8*) STRING(scan), (U8*) STRING(scan) + STR_LEN(scan)); if (total_count_delta > total_chars) { total_count_delta = total_chars; } } *min_subtract += total_count_delta; Safefree(folded); } else if (OP(scan) == EXACTFA) { /* Non-UTF-8 pattern, EXACTFA node. There can't be a multi-char * fold to the ASCII range (and there are no existing ones in the * upper latin1 range). But, as outlined in the comments preceding * this function, we need to flag any occurrences of the sharp s. * This character forbids trie formation (because of added * complexity) */ #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) while (s < s_end) { if (*s == LATIN_SMALL_LETTER_SHARP_S) { OP(scan) = EXACTFA_NO_TRIE; *unfolded_multi_char = TRUE; break; } s++; } } else { /* Non-UTF-8 pattern, not EXACTFA node. Look for the multi-char * folds that are all Latin1. As explained in the comments * preceding this function, we look also for the sharp s in EXACTF * and EXACTFL nodes; it can be in the final position. Otherwise * we can stop looking 1 byte earlier because have to find at least * two characters for a multi-fold */ const U8* upper = (OP(scan) == EXACTF || OP(scan) == EXACTFL) ? s_end : s_end -1; while (s < upper) { int len = is_MULTI_CHAR_FOLD_latin1_safe(s, s_end); if (! len) { /* Not a multi-char fold. */ if (*s == LATIN_SMALL_LETTER_SHARP_S && (OP(scan) == EXACTF || OP(scan) == EXACTFL)) { *unfolded_multi_char = TRUE; } s++; continue; } if (len == 2 && isALPHA_FOLD_EQ(*s, 's') && isALPHA_FOLD_EQ(*(s+1), 's')) { /* EXACTF nodes need to know that the minimum length * changed so that a sharp s in the string can match this * ss in the pattern, but they remain EXACTF nodes, as they * won't match this unless the target string is is UTF-8, * which we don't know until runtime. EXACTFL nodes can't * transform into EXACTFU nodes */ if (OP(scan) != EXACTF && OP(scan) != EXACTFL) { OP(scan) = EXACTFU_SS; } } *min_subtract += len - 1; s += len; } #endif } } #ifdef DEBUGGING /* Allow dumping but overwriting the collection of skipped * ops and/or strings with fake optimized ops */ n = scan + NODE_SZ_STR(scan); while (n <= stop) { OP(n) = OPTIMIZED; FLAGS(n) = 0; NEXT_OFF(n) = 0; n++; } #endif DEBUG_OPTIMISE_r(if (merged){DEBUG_PEEP("finl", scan, depth, 0);}); return stopnow; } /* REx optimizer. Converts nodes into quicker variants "in place". Finds fixed substrings. */ /* Stops at toplevel WHILEM as well as at "last". At end *scanp is set to the position after last scanned or to NULL. */ #define INIT_AND_WITHP \ assert(!and_withp); \ Newx(and_withp,1, regnode_ssc); \ SAVEFREEPV(and_withp) static void S_unwind_scan_frames(pTHX_ const void *p) { scan_frame *f= (scan_frame *)p; do { scan_frame *n= f->next_frame; Safefree(f); f= n; } while (f); } STATIC SSize_t S_study_chunk(pTHX_ RExC_state_t *pRExC_state, regnode **scanp, SSize_t *minlenp, SSize_t *deltap, regnode *last, scan_data_t *data, I32 stopparen, U32 recursed_depth, regnode_ssc *and_withp, U32 flags, U32 depth) /* scanp: Start here (read-write). */ /* deltap: Write maxlen-minlen here. */ /* last: Stop before this one. */ /* data: string data about the pattern */ /* stopparen: treat close N as END */ /* recursed: which subroutines have we recursed into */ /* and_withp: Valid if flags & SCF_DO_STCLASS_OR */ { /* There must be at least this number of characters to match */ SSize_t min = 0; I32 pars = 0, code; regnode *scan = *scanp, *next; SSize_t delta = 0; int is_inf = (flags & SCF_DO_SUBSTR) && (data->flags & SF_IS_INF); int is_inf_internal = 0; /* The studied chunk is infinite */ I32 is_par = OP(scan) == OPEN ? ARG(scan) : 0; scan_data_t data_fake; SV *re_trie_maxbuff = NULL; regnode *first_non_open = scan; SSize_t stopmin = SSize_t_MAX; scan_frame *frame = NULL; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_STUDY_CHUNK; RExC_study_started= 1; if ( depth == 0 ) { while (first_non_open && OP(first_non_open) == OPEN) first_non_open=regnext(first_non_open); } fake_study_recurse: DEBUG_r( RExC_study_chunk_recursed_count++; ); DEBUG_OPTIMISE_MORE_r( { Perl_re_indentf( aTHX_ "study_chunk stopparen=%ld recursed_count=%lu depth=%lu recursed_depth=%lu scan=%p last=%p", depth, (long)stopparen, (unsigned long)RExC_study_chunk_recursed_count, (unsigned long)depth, (unsigned long)recursed_depth, scan, last); if (recursed_depth) { U32 i; U32 j; for ( j = 0 ; j < recursed_depth ; j++ ) { for ( i = 0 ; i < (U32)RExC_npar ; i++ ) { if ( PAREN_TEST(RExC_study_chunk_recursed + ( j * RExC_study_chunk_recursed_bytes), i ) && ( !j || !PAREN_TEST(RExC_study_chunk_recursed + (( j - 1 ) * RExC_study_chunk_recursed_bytes), i) ) ) { Perl_re_printf( aTHX_ " %d",(int)i); break; } } if ( j + 1 < recursed_depth ) { Perl_re_printf( aTHX_ ","); } } } Perl_re_printf( aTHX_ "\n"); } ); while ( scan && OP(scan) != END && scan < last ){ UV min_subtract = 0; /* How mmany chars to subtract from the minimum node length to get a real minimum (because the folded version may be shorter) */ bool unfolded_multi_char = FALSE; /* Peephole optimizer: */ DEBUG_STUDYDATA("Peep", data, depth, is_inf); DEBUG_PEEP("Peep", scan, depth, flags); /* The reason we do this here is that we need to deal with things like * /(?:f)(?:o)(?:o)/ which cant be dealt with by the normal EXACT * parsing code, as each (?:..) is handled by a different invocation of * reg() -- Yves */ JOIN_EXACT(scan,&min_subtract, &unfolded_multi_char, 0); /* Follow the next-chain of the current node and optimize away all the NOTHINGs from it. */ if (OP(scan) != CURLYX) { const int max = (reg_off_by_arg[OP(scan)] ? I32_MAX /* I32 may be smaller than U16 on CRAYs! */ : (I32_MAX < U16_MAX ? I32_MAX : U16_MAX)); int off = (reg_off_by_arg[OP(scan)] ? ARG(scan) : NEXT_OFF(scan)); int noff; regnode *n = scan; /* Skip NOTHING and LONGJMP. */ while ((n = regnext(n)) && ((PL_regkind[OP(n)] == NOTHING && (noff = NEXT_OFF(n))) || ((OP(n) == LONGJMP) && (noff = ARG(n)))) && off + noff < max) off += noff; if (reg_off_by_arg[OP(scan)]) ARG(scan) = off; else NEXT_OFF(scan) = off; } /* The principal pseudo-switch. Cannot be a switch, since we look into several different things. */ if ( OP(scan) == DEFINEP ) { SSize_t minlen = 0; SSize_t deltanext = 0; SSize_t fake_last_close = 0; I32 f = SCF_IN_DEFINE; StructCopy(&zero_scan_data, &data_fake, scan_data_t); scan = regnext(scan); assert( OP(scan) == IFTHEN ); DEBUG_PEEP("expect IFTHEN", scan, depth, flags); data_fake.last_closep= &fake_last_close; minlen = *minlenp; next = regnext(scan); scan = NEXTOPER(NEXTOPER(scan)); DEBUG_PEEP("scan", scan, depth, flags); DEBUG_PEEP("next", next, depth, flags); /* we suppose the run is continuous, last=next... * NOTE we dont use the return here! */ (void)study_chunk(pRExC_state, &scan, &minlen, &deltanext, next, &data_fake, stopparen, recursed_depth, NULL, f, depth+1); scan = next; } else if ( OP(scan) == BRANCH || OP(scan) == BRANCHJ || OP(scan) == IFTHEN ) { next = regnext(scan); code = OP(scan); /* The op(next)==code check below is to see if we * have "BRANCH-BRANCH", "BRANCHJ-BRANCHJ", "IFTHEN-IFTHEN" * IFTHEN is special as it might not appear in pairs. * Not sure whether BRANCH-BRANCHJ is possible, regardless * we dont handle it cleanly. */ if (OP(next) == code || code == IFTHEN) { /* NOTE - There is similar code to this block below for * handling TRIE nodes on a re-study. If you change stuff here * check there too. */ SSize_t max1 = 0, min1 = SSize_t_MAX, num = 0; regnode_ssc accum; regnode * const startbranch=scan; if (flags & SCF_DO_SUBSTR) { /* Cannot merge strings after this. */ scan_commit(pRExC_state, data, minlenp, is_inf); } if (flags & SCF_DO_STCLASS) ssc_init_zero(pRExC_state, &accum); while (OP(scan) == code) { SSize_t deltanext, minnext, fake; I32 f = 0; regnode_ssc this_class; DEBUG_PEEP("Branch", scan, depth, flags); num++; StructCopy(&zero_scan_data, &data_fake, scan_data_t); if (data) { data_fake.whilem_c = data->whilem_c; data_fake.last_closep = data->last_closep; } else data_fake.last_closep = &fake; data_fake.pos_delta = delta; next = regnext(scan); scan = NEXTOPER(scan); /* everything */ if (code != BRANCH) /* everything but BRANCH */ scan = NEXTOPER(scan); if (flags & SCF_DO_STCLASS) { ssc_init(pRExC_state, &this_class); data_fake.start_class = &this_class; f = SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; /* we suppose the run is continuous, last=next...*/ minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext, next, &data_fake, stopparen, recursed_depth, NULL, f,depth+1); if (min1 > minnext) min1 = minnext; if (deltanext == SSize_t_MAX) { is_inf = is_inf_internal = 1; max1 = SSize_t_MAX; } else if (max1 < minnext + deltanext) max1 = minnext + deltanext; scan = next; if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SCF_SEEN_ACCEPT) { if ( stopmin > minnext) stopmin = min + min1; flags &= ~SCF_DO_SUBSTR; if (data) data->flags |= SCF_SEEN_ACCEPT; } if (data) { if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; } if (flags & SCF_DO_STCLASS) ssc_or(pRExC_state, &accum, (regnode_charclass*)&this_class); } if (code == IFTHEN && num < 2) /* Empty ELSE branch */ min1 = 0; if (flags & SCF_DO_SUBSTR) { data->pos_min += min1; if (data->pos_delta >= SSize_t_MAX - (max1 - min1)) data->pos_delta = SSize_t_MAX; else data->pos_delta += max1 - min1; if (max1 != min1 || is_inf) data->cur_is_floating = 1; } min += min1; if (delta == SSize_t_MAX || SSize_t_MAX - delta - (max1 - min1) < 0) delta = SSize_t_MAX; else delta += max1 - min1; if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass*) &accum); if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); flags &= ~SCF_DO_STCLASS; } } else if (flags & SCF_DO_STCLASS_AND) { if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum); flags &= ~SCF_DO_STCLASS; } else { /* Switch to OR mode: cache the old value of * data->start_class */ INIT_AND_WITHP; StructCopy(data->start_class, and_withp, regnode_ssc); flags &= ~SCF_DO_STCLASS_AND; StructCopy(&accum, data->start_class, regnode_ssc); flags |= SCF_DO_STCLASS_OR; } } if (PERL_ENABLE_TRIE_OPTIMISATION && OP( startbranch ) == BRANCH ) { /* demq. Assuming this was/is a branch we are dealing with: 'scan' now points at the item that follows the branch sequence, whatever it is. We now start at the beginning of the sequence and look for subsequences of BRANCH->EXACT=>x1 BRANCH->EXACT=>x2 tail which would be constructed from a pattern like /A|LIST|OF|WORDS/ If we can find such a subsequence we need to turn the first element into a trie and then add the subsequent branch exact strings to the trie. We have two cases 1. patterns where the whole set of branches can be converted. 2. patterns where only a subset can be converted. In case 1 we can replace the whole set with a single regop for the trie. In case 2 we need to keep the start and end branches so 'BRANCH EXACT; BRANCH EXACT; BRANCH X' becomes BRANCH TRIE; BRANCH X; There is an additional case, that being where there is a common prefix, which gets split out into an EXACT like node preceding the TRIE node. If x(1..n)==tail then we can do a simple trie, if not we make a "jump" trie, such that when we match the appropriate word we "jump" to the appropriate tail node. Essentially we turn a nested if into a case structure of sorts. */ int made=0; if (!re_trie_maxbuff) { re_trie_maxbuff = get_sv(RE_TRIE_MAXBUF_NAME, 1); if (!SvIOK(re_trie_maxbuff)) sv_setiv(re_trie_maxbuff, RE_TRIE_MAXBUF_INIT); } if ( SvIV(re_trie_maxbuff)>=0 ) { regnode *cur; regnode *first = (regnode *)NULL; regnode *last = (regnode *)NULL; regnode *tail = scan; U8 trietype = 0; U32 count=0; /* var tail is used because there may be a TAIL regop in the way. Ie, the exacts will point to the thing following the TAIL, but the last branch will point at the TAIL. So we advance tail. If we have nested (?:) we may have to move through several tails. */ while ( OP( tail ) == TAIL ) { /* this is the TAIL generated by (?:) */ tail = regnext( tail ); } DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, tail, NULL, pRExC_state); Perl_re_indentf( aTHX_ "%s %" UVuf ":%s\n", depth+1, "Looking for TRIE'able sequences. Tail node is ", (UV)(tail - RExC_emit_start), SvPV_nolen_const( RExC_mysv ) ); }); /* Step through the branches cur represents each branch, noper is the first thing to be matched as part of that branch noper_next is the regnext() of that node. We normally handle a case like this /FOO[xyz]|BAR[pqr]/ via a "jump trie" but we also support building with NOJUMPTRIE, which restricts the trie logic to structures like /FOO|BAR/. If noper is a trieable nodetype then the branch is a possible optimization target. If we are building under NOJUMPTRIE then we require that noper_next is the same as scan (our current position in the regex program). Once we have two or more consecutive such branches we can create a trie of the EXACT's contents and stitch it in place into the program. If the sequence represents all of the branches in the alternation we replace the entire thing with a single TRIE node. Otherwise when it is a subsequence we need to stitch it in place and replace only the relevant branches. This means the first branch has to remain as it is used by the alternation logic, and its next pointer, and needs to be repointed at the item on the branch chain following the last branch we have optimized away. This could be either a BRANCH, in which case the subsequence is internal, or it could be the item following the branch sequence in which case the subsequence is at the end (which does not necessarily mean the first node is the start of the alternation). TRIE_TYPE(X) is a define which maps the optype to a trietype. optype | trietype ----------------+----------- NOTHING | NOTHING EXACT | EXACT EXACTFU | EXACTFU EXACTFU_SS | EXACTFU EXACTFA | EXACTFA EXACTL | EXACTL EXACTFLU8 | EXACTFLU8 */ #define TRIE_TYPE(X) ( ( NOTHING == (X) ) \ ? NOTHING \ : ( EXACT == (X) ) \ ? EXACT \ : ( EXACTFU == (X) || EXACTFU_SS == (X) ) \ ? EXACTFU \ : ( EXACTFA == (X) ) \ ? EXACTFA \ : ( EXACTL == (X) ) \ ? EXACTL \ : ( EXACTFLU8 == (X) ) \ ? EXACTFLU8 \ : 0 ) /* dont use tail as the end marker for this traverse */ for ( cur = startbranch ; cur != scan ; cur = regnext( cur ) ) { regnode * const noper = NEXTOPER( cur ); U8 noper_type = OP( noper ); U8 noper_trietype = TRIE_TYPE( noper_type ); #if defined(DEBUGGING) || defined(NOJUMPTRIE) regnode * const noper_next = regnext( noper ); U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0; U8 noper_next_trietype = (noper_next && noper_next < tail) ? TRIE_TYPE( noper_next_type ) :0; #endif DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state); Perl_re_indentf( aTHX_ "- %d:%s (%d)", depth+1, REG_NODE_NUM(cur), SvPV_nolen_const( RExC_mysv ), REG_NODE_NUM(cur) ); regprop(RExC_rx, RExC_mysv, noper, NULL, pRExC_state); Perl_re_printf( aTHX_ " -> %d:%s", REG_NODE_NUM(noper), SvPV_nolen_const(RExC_mysv)); if ( noper_next ) { regprop(RExC_rx, RExC_mysv, noper_next, NULL, pRExC_state); Perl_re_printf( aTHX_ "\t=> %d:%s\t", REG_NODE_NUM(noper_next), SvPV_nolen_const(RExC_mysv)); } Perl_re_printf( aTHX_ "(First==%d,Last==%d,Cur==%d,tt==%s,ntt==%s,nntt==%s)\n", REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur), PL_reg_name[trietype], PL_reg_name[noper_trietype], PL_reg_name[noper_next_trietype] ); }); /* Is noper a trieable nodetype that can be merged * with the current trie (if there is one)? */ if ( noper_trietype && ( ( noper_trietype == NOTHING ) || ( trietype == NOTHING ) || ( trietype == noper_trietype ) ) #ifdef NOJUMPTRIE && noper_next >= tail #endif && count < U16_MAX) { /* Handle mergable triable node Either we are * the first node in a new trieable sequence, * in which case we do some bookkeeping, * otherwise we update the end pointer. */ if ( !first ) { first = cur; if ( noper_trietype == NOTHING ) { #if !defined(DEBUGGING) && !defined(NOJUMPTRIE) regnode * const noper_next = regnext( noper ); U8 noper_next_type = (noper_next && noper_next < tail) ? OP(noper_next) : 0; U8 noper_next_trietype = noper_next_type ? TRIE_TYPE( noper_next_type ) :0; #endif if ( noper_next_trietype ) { trietype = noper_next_trietype; } else if (noper_next_type) { /* a NOTHING regop is 1 regop wide. * We need at least two for a trie * so we can't merge this in */ first = NULL; } } else { trietype = noper_trietype; } } else { if ( trietype == NOTHING ) trietype = noper_trietype; last = cur; } if (first) count++; } /* end handle mergable triable node */ else { /* handle unmergable node - * noper may either be a triable node which can * not be tried together with the current trie, * or a non triable node */ if ( last ) { /* If last is set and trietype is not * NOTHING then we have found at least two * triable branch sequences in a row of a * similar trietype so we can turn them * into a trie. If/when we allow NOTHING to * start a trie sequence this condition * will be required, and it isn't expensive * so we leave it in for now. */ if ( trietype && trietype != NOTHING ) make_trie( pRExC_state, startbranch, first, cur, tail, count, trietype, depth+1 ); last = NULL; /* note: we clear/update first, trietype etc below, so we dont do it here */ } if ( noper_trietype #ifdef NOJUMPTRIE && noper_next >= tail #endif ){ /* noper is triable, so we can start a new * trie sequence */ count = 1; first = cur; trietype = noper_trietype; } else if (first) { /* if we already saw a first but the * current node is not triable then we have * to reset the first information. */ count = 0; first = NULL; trietype = 0; } } /* end handle unmergable node */ } /* loop over branches */ DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state); Perl_re_indentf( aTHX_ "- %s (%d) <SCAN FINISHED> ", depth+1, SvPV_nolen_const( RExC_mysv ),REG_NODE_NUM(cur)); Perl_re_printf( aTHX_ "(First==%d, Last==%d, Cur==%d, tt==%s)\n", REG_NODE_NUM(first), REG_NODE_NUM(last), REG_NODE_NUM(cur), PL_reg_name[trietype] ); }); if ( last && trietype ) { if ( trietype != NOTHING ) { /* the last branch of the sequence was part of * a trie, so we have to construct it here * outside of the loop */ made= make_trie( pRExC_state, startbranch, first, scan, tail, count, trietype, depth+1 ); #ifdef TRIE_STUDY_OPT if ( ((made == MADE_EXACT_TRIE && startbranch == first) || ( first_non_open == first )) && depth==0 ) { flags |= SCF_TRIE_RESTUDY; if ( startbranch == first && scan >= tail ) { RExC_seen &=~REG_TOP_LEVEL_BRANCHES_SEEN; } } #endif } else { /* at this point we know whatever we have is a * NOTHING sequence/branch AND if 'startbranch' * is 'first' then we can turn the whole thing * into a NOTHING */ if ( startbranch == first ) { regnode *opt; /* the entire thing is a NOTHING sequence, * something like this: (?:|) So we can * turn it into a plain NOTHING op. */ DEBUG_TRIE_COMPILE_r({ regprop(RExC_rx, RExC_mysv, cur, NULL, pRExC_state); Perl_re_indentf( aTHX_ "- %s (%d) <NOTHING BRANCH SEQUENCE>\n", depth+1, SvPV_nolen_const( RExC_mysv ),REG_NODE_NUM(cur)); }); OP(startbranch)= NOTHING; NEXT_OFF(startbranch)= tail - startbranch; for ( opt= startbranch + 1; opt < tail ; opt++ ) OP(opt)= OPTIMIZED; } } } /* end if ( last) */ } /* TRIE_MAXBUF is non zero */ } /* do trie */ } else if ( code == BRANCHJ ) { /* single branch is optimized. */ scan = NEXTOPER(NEXTOPER(scan)); } else /* single branch is optimized. */ scan = NEXTOPER(scan); continue; } else if (OP(scan) == SUSPEND || OP(scan) == GOSUB) { I32 paren = 0; regnode *start = NULL; regnode *end = NULL; U32 my_recursed_depth= recursed_depth; if (OP(scan) != SUSPEND) { /* GOSUB */ /* Do setup, note this code has side effects beyond * the rest of this block. Specifically setting * RExC_recurse[] must happen at least once during * study_chunk(). */ paren = ARG(scan); RExC_recurse[ARG2L(scan)] = scan; start = RExC_open_parens[paren]; end = RExC_close_parens[paren]; /* NOTE we MUST always execute the above code, even * if we do nothing with a GOSUB */ if ( ( flags & SCF_IN_DEFINE ) || ( (is_inf_internal || is_inf || (data && data->flags & SF_IS_INF)) && ( (flags & (SCF_DO_STCLASS | SCF_DO_SUBSTR)) == 0 ) ) ) { /* no need to do anything here if we are in a define. */ /* or we are after some kind of infinite construct * so we can skip recursing into this item. * Since it is infinite we will not change the maxlen * or delta, and if we miss something that might raise * the minlen it will merely pessimise a little. * * Iow /(?(DEFINE)(?<foo>foo|food))a+(?&foo)/ * might result in a minlen of 1 and not of 4, * but this doesn't make us mismatch, just try a bit * harder than we should. * */ scan= regnext(scan); continue; } if ( !recursed_depth || !PAREN_TEST(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), paren) ) { /* it is quite possible that there are more efficient ways * to do this. We maintain a bitmap per level of recursion * of which patterns we have entered so we can detect if a * pattern creates a possible infinite loop. When we * recurse down a level we copy the previous levels bitmap * down. When we are at recursion level 0 we zero the top * level bitmap. It would be nice to implement a different * more efficient way of doing this. In particular the top * level bitmap may be unnecessary. */ if (!recursed_depth) { Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes, U8); } else { Copy(RExC_study_chunk_recursed + ((recursed_depth-1) * RExC_study_chunk_recursed_bytes), RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), RExC_study_chunk_recursed_bytes, U8); } /* we havent recursed into this paren yet, so recurse into it */ DEBUG_STUDYDATA("gosub-set", data, depth, is_inf); PAREN_SET(RExC_study_chunk_recursed + (recursed_depth * RExC_study_chunk_recursed_bytes), paren); my_recursed_depth= recursed_depth + 1; } else { DEBUG_STUDYDATA("gosub-inf", data, depth, is_inf); /* some form of infinite recursion, assume infinite length * */ if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); data->cur_is_floating = 1; } is_inf = is_inf_internal = 1; if (flags & SCF_DO_STCLASS_OR) /* Allow everything */ ssc_anything(data->start_class); flags &= ~SCF_DO_STCLASS; start= NULL; /* reset start so we dont recurse later on. */ } } else { paren = stopparen; start = scan + 2; end = regnext(scan); } if (start) { scan_frame *newframe; assert(end); if (!RExC_frame_last) { Newxz(newframe, 1, scan_frame); SAVEDESTRUCTOR_X(S_unwind_scan_frames, newframe); RExC_frame_head= newframe; RExC_frame_count++; } else if (!RExC_frame_last->next_frame) { Newxz(newframe,1,scan_frame); RExC_frame_last->next_frame= newframe; newframe->prev_frame= RExC_frame_last; RExC_frame_count++; } else { newframe= RExC_frame_last->next_frame; } RExC_frame_last= newframe; newframe->next_regnode = regnext(scan); newframe->last_regnode = last; newframe->stopparen = stopparen; newframe->prev_recursed_depth = recursed_depth; newframe->this_prev_frame= frame; DEBUG_STUDYDATA("frame-new", data, depth, is_inf); DEBUG_PEEP("fnew", scan, depth, flags); frame = newframe; scan = start; stopparen = paren; last = end; depth = depth + 1; recursed_depth= my_recursed_depth; continue; } } else if (OP(scan) == EXACT || OP(scan) == EXACTL) { SSize_t l = STR_LEN(scan); UV uc; assert(l); if (UTF) { const U8 * const s = (U8*)STRING(scan); uc = utf8_to_uvchr_buf(s, s + l, NULL); l = utf8_length(s, s + l); } else { uc = *((U8*)STRING(scan)); } min += l; if (flags & SCF_DO_SUBSTR) { /* Update longest substr. */ /* The code below prefers earlier match for fixed offset, later match for variable offset. */ if (data->last_end == -1) { /* Update the start info. */ data->last_start_min = data->pos_min; data->last_start_max = is_inf ? SSize_t_MAX : data->pos_min + data->pos_delta; } sv_catpvn(data->last_found, STRING(scan), STR_LEN(scan)); if (UTF) SvUTF8_on(data->last_found); { SV * const sv = data->last_found; MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_utf8) : NULL; if (mg && mg->mg_len >= 0) mg->mg_len += utf8_length((U8*)STRING(scan), (U8*)STRING(scan)+STR_LEN(scan)); } data->last_end = data->pos_min + l; data->pos_min += l; /* As in the first entry. */ data->flags &= ~SF_BEFORE_EOL; } /* ANDing the code point leaves at most it, and not in locale, and * can't match null string */ if (flags & SCF_DO_STCLASS_AND) { ssc_cp_and(data->start_class, uc); ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; ssc_clear_locale(data->start_class); } else if (flags & SCF_DO_STCLASS_OR) { ssc_add_cp(data->start_class, uc); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); /* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } flags &= ~SCF_DO_STCLASS; } else if (PL_regkind[OP(scan)] == EXACT) { /* But OP != EXACT!, so is EXACTFish */ SSize_t l = STR_LEN(scan); const U8 * s = (U8*)STRING(scan); /* Search for fixed substrings supports EXACT only. */ if (flags & SCF_DO_SUBSTR) { assert(data); scan_commit(pRExC_state, data, minlenp, is_inf); } if (UTF) { l = utf8_length(s, s + l); } if (unfolded_multi_char) { RExC_seen |= REG_UNFOLDED_MULTI_SEEN; } min += l - min_subtract; assert (min >= 0); delta += min_subtract; if (flags & SCF_DO_SUBSTR) { data->pos_min += l - min_subtract; if (data->pos_min < 0) { data->pos_min = 0; } data->pos_delta += min_subtract; if (min_subtract) { data->cur_is_floating = 1; /* float */ } } if (flags & SCF_DO_STCLASS) { SV* EXACTF_invlist = _make_exactf_invlist(pRExC_state, scan); assert(EXACTF_invlist); if (flags & SCF_DO_STCLASS_AND) { if (OP(scan) != EXACTFL) ssc_clear_locale(data->start_class); ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; ANYOF_POSIXL_ZERO(data->start_class); ssc_intersection(data->start_class, EXACTF_invlist, FALSE); } else { /* SCF_DO_STCLASS_OR */ ssc_union(data->start_class, EXACTF_invlist, FALSE); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); /* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } flags &= ~SCF_DO_STCLASS; SvREFCNT_dec(EXACTF_invlist); } } else if (REGNODE_VARIES(OP(scan))) { SSize_t mincount, maxcount, minnext, deltanext, pos_before = 0; I32 fl = 0, f = flags; regnode * const oscan = scan; regnode_ssc this_class; regnode_ssc *oclass = NULL; I32 next_is_eval = 0; switch (PL_regkind[OP(scan)]) { case WHILEM: /* End of (?:...)* . */ scan = NEXTOPER(scan); goto finish; case PLUS: if (flags & (SCF_DO_SUBSTR | SCF_DO_STCLASS)) { next = NEXTOPER(scan); if (OP(next) == EXACT || OP(next) == EXACTL || (flags & SCF_DO_STCLASS)) { mincount = 1; maxcount = REG_INFTY; next = regnext(scan); scan = NEXTOPER(scan); goto do_curly; } } if (flags & SCF_DO_SUBSTR) data->pos_min++; min++; /* FALLTHROUGH */ case STAR: if (flags & SCF_DO_STCLASS) { mincount = 0; maxcount = REG_INFTY; next = regnext(scan); scan = NEXTOPER(scan); goto do_curly; } if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); /* Cannot extend fixed substrings */ data->cur_is_floating = 1; /* float */ } is_inf = is_inf_internal = 1; scan = regnext(scan); goto optimize_curly_tail; case CURLY: if (stopparen>0 && (OP(scan)==CURLYN || OP(scan)==CURLYM) && (scan->flags == stopparen)) { mincount = 1; maxcount = 1; } else { mincount = ARG1(scan); maxcount = ARG2(scan); } next = regnext(scan); if (OP(scan) == CURLYX) { I32 lp = (data ? *(data->last_closep) : 0); scan->flags = ((lp <= (I32)U8_MAX) ? (U8)lp : U8_MAX); } scan = NEXTOPER(scan) + EXTRA_STEP_2ARGS; next_is_eval = (OP(scan) == EVAL); do_curly: if (flags & SCF_DO_SUBSTR) { if (mincount == 0) scan_commit(pRExC_state, data, minlenp, is_inf); /* Cannot extend fixed substrings */ pos_before = data->pos_min; } if (data) { fl = data->flags; data->flags &= ~(SF_HAS_PAR|SF_IN_PAR|SF_HAS_EVAL); if (is_inf) data->flags |= SF_IS_INF; } if (flags & SCF_DO_STCLASS) { ssc_init(pRExC_state, &this_class); oclass = data->start_class; data->start_class = &this_class; f |= SCF_DO_STCLASS_AND; f &= ~SCF_DO_STCLASS_OR; } /* Exclude from super-linear cache processing any {n,m} regops for which the combination of input pos and regex pos is not enough information to determine if a match will be possible. For example, in the regex /foo(bar\s*){4,8}baz/ with the regex pos at the \s*, the prospects for a match depend not only on the input position but also on how many (bar\s*) repeats into the {4,8} we are. */ if ((mincount > 1) || (maxcount > 1 && maxcount != REG_INFTY)) f &= ~SCF_WHILEM_VISITED_POS; /* This will finish on WHILEM, setting scan, or on NULL: */ minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext, last, data, stopparen, recursed_depth, NULL, (mincount == 0 ? (f & ~SCF_DO_SUBSTR) : f) ,depth+1); if (flags & SCF_DO_STCLASS) data->start_class = oclass; if (mincount == 0 || minnext == 0) { if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class); } else if (flags & SCF_DO_STCLASS_AND) { /* Switch to OR mode: cache the old value of * data->start_class */ INIT_AND_WITHP; StructCopy(data->start_class, and_withp, regnode_ssc); flags &= ~SCF_DO_STCLASS_AND; StructCopy(&this_class, data->start_class, regnode_ssc); flags |= SCF_DO_STCLASS_OR; ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING; } } else { /* Non-zero len */ if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &this_class); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); } else if (flags & SCF_DO_STCLASS_AND) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &this_class); flags &= ~SCF_DO_STCLASS; } if (!scan) /* It was not CURLYX, but CURLY. */ scan = next; if (((flags & (SCF_TRIE_DOING_RESTUDY|SCF_DO_SUBSTR))==SCF_DO_SUBSTR) /* ? quantifier ok, except for (?{ ... }) */ && (next_is_eval || !(mincount == 0 && maxcount == 1)) && (minnext == 0) && (deltanext == 0) && data && !(data->flags & (SF_HAS_PAR|SF_IN_PAR)) && maxcount <= REG_INFTY/3) /* Complement check for big count */ { /* Fatal warnings may leak the regexp without this: */ SAVEFREESV(RExC_rx_sv); Perl_ck_warner(aTHX_ packWARN(WARN_REGEXP), "Quantifier unexpected on zero-length expression " "in regex m/%" UTF8f "/", UTF8fARG(UTF, RExC_precomp_end - RExC_precomp, RExC_precomp)); (void)ReREFCNT_inc(RExC_rx_sv); } min += minnext * mincount; is_inf_internal |= deltanext == SSize_t_MAX || (maxcount == REG_INFTY && minnext + deltanext > 0); is_inf |= is_inf_internal; if (is_inf) { delta = SSize_t_MAX; } else { delta += (minnext + deltanext) * maxcount - minnext * mincount; } /* Try powerful optimization CURLYX => CURLYN. */ if ( OP(oscan) == CURLYX && data && data->flags & SF_IN_PAR && !(data->flags & SF_HAS_EVAL) && !deltanext && minnext == 1 ) { /* Try to optimize to CURLYN. */ regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; regnode * const nxt1 = nxt; #ifdef DEBUGGING regnode *nxt2; #endif /* Skip open. */ nxt = regnext(nxt); if (!REGNODE_SIMPLE(OP(nxt)) && !(PL_regkind[OP(nxt)] == EXACT && STR_LEN(nxt) == 1)) goto nogo; #ifdef DEBUGGING nxt2 = nxt; #endif nxt = regnext(nxt); if (OP(nxt) != CLOSE) goto nogo; if (RExC_open_parens) { RExC_open_parens[ARG(nxt1)]=oscan; /*open->CURLYM*/ RExC_close_parens[ARG(nxt1)]=nxt+2; /*close->while*/ } /* Now we know that nxt2 is the only contents: */ oscan->flags = (U8)ARG(nxt); OP(oscan) = CURLYN; OP(nxt1) = NOTHING; /* was OPEN. */ #ifdef DEBUGGING OP(nxt1 + 1) = OPTIMIZED; /* was count. */ NEXT_OFF(nxt1+ 1) = 0; /* just for consistency. */ NEXT_OFF(nxt2) = 0; /* just for consistency with CURLY. */ OP(nxt) = OPTIMIZED; /* was CLOSE. */ OP(nxt + 1) = OPTIMIZED; /* was count. */ NEXT_OFF(nxt+ 1) = 0; /* just for consistency. */ #endif } nogo: /* Try optimization CURLYX => CURLYM. */ if ( OP(oscan) == CURLYX && data && !(data->flags & SF_HAS_PAR) && !(data->flags & SF_HAS_EVAL) && !deltanext /* atom is fixed width */ && minnext != 0 /* CURLYM can't handle zero width */ /* Nor characters whose fold at run-time may be * multi-character */ && ! (RExC_seen & REG_UNFOLDED_MULTI_SEEN) ) { /* XXXX How to optimize if data == 0? */ /* Optimize to a simpler form. */ regnode *nxt = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN */ regnode *nxt2; OP(oscan) = CURLYM; while ( (nxt2 = regnext(nxt)) /* skip over embedded stuff*/ && (OP(nxt2) != WHILEM)) nxt = nxt2; OP(nxt2) = SUCCEED; /* Whas WHILEM */ /* Need to optimize away parenths. */ if ((data->flags & SF_IN_PAR) && OP(nxt) == CLOSE) { /* Set the parenth number. */ regnode *nxt1 = NEXTOPER(oscan) + EXTRA_STEP_2ARGS; /* OPEN*/ oscan->flags = (U8)ARG(nxt); if (RExC_open_parens) { RExC_open_parens[ARG(nxt1)]=oscan; /*open->CURLYM*/ RExC_close_parens[ARG(nxt1)]=nxt2+1; /*close->NOTHING*/ } OP(nxt1) = OPTIMIZED; /* was OPEN. */ OP(nxt) = OPTIMIZED; /* was CLOSE. */ #ifdef DEBUGGING OP(nxt1 + 1) = OPTIMIZED; /* was count. */ OP(nxt + 1) = OPTIMIZED; /* was count. */ NEXT_OFF(nxt1 + 1) = 0; /* just for consistency. */ NEXT_OFF(nxt + 1) = 0; /* just for consistency. */ #endif #if 0 while ( nxt1 && (OP(nxt1) != WHILEM)) { regnode *nnxt = regnext(nxt1); if (nnxt == nxt) { if (reg_off_by_arg[OP(nxt1)]) ARG_SET(nxt1, nxt2 - nxt1); else if (nxt2 - nxt1 < U16_MAX) NEXT_OFF(nxt1) = nxt2 - nxt1; else OP(nxt) = NOTHING; /* Cannot beautify */ } nxt1 = nnxt; } #endif /* Optimize again: */ study_chunk(pRExC_state, &nxt1, minlenp, &deltanext, nxt, NULL, stopparen, recursed_depth, NULL, 0,depth+1); } else oscan->flags = 0; } else if ((OP(oscan) == CURLYX) && (flags & SCF_WHILEM_VISITED_POS) /* See the comment on a similar expression above. However, this time it's not a subexpression we care about, but the expression itself. */ && (maxcount == REG_INFTY) && data) { /* This stays as CURLYX, we can put the count/of pair. */ /* Find WHILEM (as in regexec.c) */ regnode *nxt = oscan + NEXT_OFF(oscan); if (OP(PREVOPER(nxt)) == NOTHING) /* LONGJMP */ nxt += ARG(nxt); nxt = PREVOPER(nxt); if (nxt->flags & 0xf) { /* we've already set whilem count on this node */ } else if (++data->whilem_c < 16) { assert(data->whilem_c <= RExC_whilem_seen); nxt->flags = (U8)(data->whilem_c | (RExC_whilem_seen << 4)); /* On WHILEM */ } } if (data && fl & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (flags & SCF_DO_SUBSTR) { SV *last_str = NULL; STRLEN last_chrs = 0; int counted = mincount != 0; if (data->last_end > 0 && mincount != 0) { /* Ends with a string. */ SSize_t b = pos_before >= data->last_start_min ? pos_before : data->last_start_min; STRLEN l; const char * const s = SvPV_const(data->last_found, l); SSize_t old = b - data->last_start_min; if (UTF) old = utf8_hop((U8*)s, old) - (U8*)s; l -= old; /* Get the added string: */ last_str = newSVpvn_utf8(s + old, l, UTF); last_chrs = UTF ? utf8_length((U8*)(s + old), (U8*)(s + old + l)) : l; if (deltanext == 0 && pos_before == b) { /* What was added is a constant string */ if (mincount > 1) { SvGROW(last_str, (mincount * l) + 1); repeatcpy(SvPVX(last_str) + l, SvPVX_const(last_str), l, mincount - 1); SvCUR_set(last_str, SvCUR(last_str) * mincount); /* Add additional parts. */ SvCUR_set(data->last_found, SvCUR(data->last_found) - l); sv_catsv(data->last_found, last_str); { SV * sv = data->last_found; MAGIC *mg = SvUTF8(sv) && SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_utf8) : NULL; if (mg && mg->mg_len >= 0) mg->mg_len += last_chrs * (mincount-1); } last_chrs *= mincount; data->last_end += l * (mincount - 1); } } else { /* start offset must point into the last copy */ data->last_start_min += minnext * (mincount - 1); data->last_start_max = is_inf ? SSize_t_MAX : data->last_start_max + (maxcount - 1) * (minnext + data->pos_delta); } } /* It is counted once already... */ data->pos_min += minnext * (mincount - counted); #if 0 Perl_re_printf( aTHX_ "counted=%" UVuf " deltanext=%" UVuf " SSize_t_MAX=%" UVuf " minnext=%" UVuf " maxcount=%" UVuf " mincount=%" UVuf "\n", (UV)counted, (UV)deltanext, (UV)SSize_t_MAX, (UV)minnext, (UV)maxcount, (UV)mincount); if (deltanext != SSize_t_MAX) Perl_re_printf( aTHX_ "LHS=%" UVuf " RHS=%" UVuf "\n", (UV)(-counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount), (UV)(SSize_t_MAX - data->pos_delta)); #endif if (deltanext == SSize_t_MAX || -counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount >= SSize_t_MAX - data->pos_delta) data->pos_delta = SSize_t_MAX; else data->pos_delta += - counted * deltanext + (minnext + deltanext) * maxcount - minnext * mincount; if (mincount != maxcount) { /* Cannot extend fixed substrings found inside the group. */ scan_commit(pRExC_state, data, minlenp, is_inf); if (mincount && last_str) { SV * const sv = data->last_found; MAGIC * const mg = SvUTF8(sv) && SvMAGICAL(sv) ? mg_find(sv, PERL_MAGIC_utf8) : NULL; if (mg) mg->mg_len = -1; sv_setsv(sv, last_str); data->last_end = data->pos_min; data->last_start_min = data->pos_min - last_chrs; data->last_start_max = is_inf ? SSize_t_MAX : data->pos_min + data->pos_delta - last_chrs; } data->cur_is_floating = 1; /* float */ } SvREFCNT_dec(last_str); } if (data && (fl & SF_HAS_EVAL)) data->flags |= SF_HAS_EVAL; optimize_curly_tail: if (OP(oscan) != CURLYX) { while (PL_regkind[OP(next = regnext(oscan))] == NOTHING && NEXT_OFF(next)) NEXT_OFF(oscan) += NEXT_OFF(next); } continue; default: #ifdef DEBUGGING Perl_croak(aTHX_ "panic: unexpected varying REx opcode %d", OP(scan)); #endif case REF: case CLUMP: if (flags & SCF_DO_SUBSTR) { /* Cannot expect anything... */ scan_commit(pRExC_state, data, minlenp, is_inf); data->cur_is_floating = 1; /* float */ } is_inf = is_inf_internal = 1; if (flags & SCF_DO_STCLASS_OR) { if (OP(scan) == CLUMP) { /* Actually is any start char, but very few code points * aren't start characters */ ssc_match_all_cp(data->start_class); } else { ssc_anything(data->start_class); } } flags &= ~SCF_DO_STCLASS; break; } } else if (OP(scan) == LNBREAK) { if (flags & SCF_DO_STCLASS) { if (flags & SCF_DO_STCLASS_AND) { ssc_intersection(data->start_class, PL_XPosix_ptrs[_CC_VERTSPACE], FALSE); ssc_clear_locale(data->start_class); ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } else if (flags & SCF_DO_STCLASS_OR) { ssc_union(data->start_class, PL_XPosix_ptrs[_CC_VERTSPACE], FALSE); ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); /* See commit msg for * 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; } flags &= ~SCF_DO_STCLASS; } min++; if (delta != SSize_t_MAX) delta++; /* Because of the 2 char string cr-lf */ if (flags & SCF_DO_SUBSTR) { /* Cannot expect anything... */ scan_commit(pRExC_state, data, minlenp, is_inf); data->pos_min += 1; data->pos_delta += 1; data->cur_is_floating = 1; /* float */ } } else if (REGNODE_SIMPLE(OP(scan))) { if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); data->pos_min++; } min++; if (flags & SCF_DO_STCLASS) { bool invert = 0; SV* my_invlist = NULL; U8 namedclass; /* See commit msg 749e076fceedeb708a624933726e7989f2302f6a */ ANYOF_FLAGS(data->start_class) &= ~SSC_MATCHES_EMPTY_STRING; /* Some of the logic below assumes that switching locale on will only add false positives. */ switch (OP(scan)) { default: #ifdef DEBUGGING Perl_croak(aTHX_ "panic: unexpected simple REx opcode %d", OP(scan)); #endif case SANY: if (flags & SCF_DO_STCLASS_OR) /* Allow everything */ ssc_match_all_cp(data->start_class); break; case REG_ANY: { SV* REG_ANY_invlist = _new_invlist(2); REG_ANY_invlist = add_cp_to_invlist(REG_ANY_invlist, '\n'); if (flags & SCF_DO_STCLASS_OR) { ssc_union(data->start_class, REG_ANY_invlist, TRUE /* TRUE => invert, hence all but \n */ ); } else if (flags & SCF_DO_STCLASS_AND) { ssc_intersection(data->start_class, REG_ANY_invlist, TRUE /* TRUE => invert */ ); ssc_clear_locale(data->start_class); } SvREFCNT_dec_NN(REG_ANY_invlist); } break; case ANYOFD: case ANYOFL: case ANYOF: if (flags & SCF_DO_STCLASS_AND) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) scan); else ssc_or(pRExC_state, data->start_class, (regnode_charclass *) scan); break; case NPOSIXL: invert = 1; /* FALLTHROUGH */ case POSIXL: namedclass = classnum_to_namedclass(FLAGS(scan)) + invert; if (flags & SCF_DO_STCLASS_AND) { bool was_there = cBOOL( ANYOF_POSIXL_TEST(data->start_class, namedclass)); ANYOF_POSIXL_ZERO(data->start_class); if (was_there) { /* Do an AND */ ANYOF_POSIXL_SET(data->start_class, namedclass); } /* No individual code points can now match */ data->start_class->invlist = sv_2mortal(_new_invlist(0)); } else { int complement = namedclass + ((invert) ? -1 : 1); assert(flags & SCF_DO_STCLASS_OR); /* If the complement of this class was already there, * the result is that they match all code points, * (\d + \D == everything). Remove the classes from * future consideration. Locale is not relevant in * this case */ if (ANYOF_POSIXL_TEST(data->start_class, complement)) { ssc_match_all_cp(data->start_class); ANYOF_POSIXL_CLEAR(data->start_class, namedclass); ANYOF_POSIXL_CLEAR(data->start_class, complement); } else { /* The usual case; just add this class to the existing set */ ANYOF_POSIXL_SET(data->start_class, namedclass); } } break; case NPOSIXA: /* For these, we always know the exact set of what's matched */ invert = 1; /* FALLTHROUGH */ case POSIXA: if (FLAGS(scan) == _CC_ASCII) { my_invlist = invlist_clone(PL_XPosix_ptrs[_CC_ASCII]); } else { _invlist_intersection(PL_XPosix_ptrs[FLAGS(scan)], PL_XPosix_ptrs[_CC_ASCII], &my_invlist); } goto join_posix; case NPOSIXD: case NPOSIXU: invert = 1; /* FALLTHROUGH */ case POSIXD: case POSIXU: my_invlist = invlist_clone(PL_XPosix_ptrs[FLAGS(scan)]); /* NPOSIXD matches all upper Latin1 code points unless the * target string being matched is UTF-8, which is * unknowable until match time. Since we are going to * invert, we want to get rid of all of them so that the * inversion will match all */ if (OP(scan) == NPOSIXD) { _invlist_subtract(my_invlist, PL_UpperLatin1, &my_invlist); } join_posix: if (flags & SCF_DO_STCLASS_AND) { ssc_intersection(data->start_class, my_invlist, invert); ssc_clear_locale(data->start_class); } else { assert(flags & SCF_DO_STCLASS_OR); ssc_union(data->start_class, my_invlist, invert); } SvREFCNT_dec(my_invlist); } if (flags & SCF_DO_STCLASS_OR) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); flags &= ~SCF_DO_STCLASS; } } else if (PL_regkind[OP(scan)] == EOL && flags & SCF_DO_SUBSTR) { data->flags |= (OP(scan) == MEOL ? SF_BEFORE_MEOL : SF_BEFORE_SEOL); scan_commit(pRExC_state, data, minlenp, is_inf); } else if ( PL_regkind[OP(scan)] == BRANCHJ /* Lookbehind, or need to calculate parens/evals/stclass: */ && (scan->flags || data || (flags & SCF_DO_STCLASS)) && (OP(scan) == IFMATCH || OP(scan) == UNLESSM)) { if ( !PERL_ENABLE_POSITIVE_ASSERTION_STUDY || OP(scan) == UNLESSM ) { /* Negative Lookahead/lookbehind In this case we can't do fixed string optimisation. */ SSize_t deltanext, minnext, fake = 0; regnode *nscan; regnode_ssc intrnl; int f = 0; StructCopy(&zero_scan_data, &data_fake, scan_data_t); if (data) { data_fake.whilem_c = data->whilem_c; data_fake.last_closep = data->last_closep; } else data_fake.last_closep = &fake; data_fake.pos_delta = delta; if ( flags & SCF_DO_STCLASS && !scan->flags && OP(scan) == IFMATCH ) { /* Lookahead */ ssc_init(pRExC_state, &intrnl); data_fake.start_class = &intrnl; f |= SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; next = regnext(scan); nscan = NEXTOPER(NEXTOPER(scan)); minnext = study_chunk(pRExC_state, &nscan, minlenp, &deltanext, last, &data_fake, stopparen, recursed_depth, NULL, f, depth+1); if (scan->flags) { if (deltanext) { FAIL("Variable length lookbehind not implemented"); } else if (minnext > (I32)U8_MAX) { FAIL2("Lookbehind longer than %" UVuf " not implemented", (UV)U8_MAX); } scan->flags = (U8)minnext; } if (data) { if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; } if (f & SCF_DO_STCLASS_AND) { if (flags & SCF_DO_STCLASS_OR) { /* OR before, AND after: ideally we would recurse with * data_fake to get the AND applied by study of the * remainder of the pattern, and then derecurse; * *** HACK *** for now just treat as "no information". * See [perl #56690]. */ ssc_init(pRExC_state, data->start_class); } else { /* AND before and after: combine and continue. These * assertions are zero-length, so can match an EMPTY * string */ ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl); ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING; } } } #if PERL_ENABLE_POSITIVE_ASSERTION_STUDY else { /* Positive Lookahead/lookbehind In this case we can do fixed string optimisation, but we must be careful about it. Note in the case of lookbehind the positions will be offset by the minimum length of the pattern, something we won't know about until after the recurse. */ SSize_t deltanext, fake = 0; regnode *nscan; regnode_ssc intrnl; int f = 0; /* We use SAVEFREEPV so that when the full compile is finished perl will clean up the allocated minlens when it's all done. This way we don't have to worry about freeing them when we know they wont be used, which would be a pain. */ SSize_t *minnextp; Newx( minnextp, 1, SSize_t ); SAVEFREEPV(minnextp); if (data) { StructCopy(data, &data_fake, scan_data_t); if ((flags & SCF_DO_SUBSTR) && data->last_found) { f |= SCF_DO_SUBSTR; if (scan->flags) scan_commit(pRExC_state, &data_fake, minlenp, is_inf); data_fake.last_found=newSVsv(data->last_found); } } else data_fake.last_closep = &fake; data_fake.flags = 0; data_fake.substrs[0].flags = 0; data_fake.substrs[1].flags = 0; data_fake.pos_delta = delta; if (is_inf) data_fake.flags |= SF_IS_INF; if ( flags & SCF_DO_STCLASS && !scan->flags && OP(scan) == IFMATCH ) { /* Lookahead */ ssc_init(pRExC_state, &intrnl); data_fake.start_class = &intrnl; f |= SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; next = regnext(scan); nscan = NEXTOPER(NEXTOPER(scan)); *minnextp = study_chunk(pRExC_state, &nscan, minnextp, &deltanext, last, &data_fake, stopparen, recursed_depth, NULL, f,depth+1); if (scan->flags) { if (deltanext) { FAIL("Variable length lookbehind not implemented"); } else if (*minnextp > (I32)U8_MAX) { FAIL2("Lookbehind longer than %" UVuf " not implemented", (UV)U8_MAX); } scan->flags = (U8)*minnextp; } *minnextp += min; if (f & SCF_DO_STCLASS_AND) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &intrnl); ANYOF_FLAGS(data->start_class) |= SSC_MATCHES_EMPTY_STRING; } if (data) { if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; if ((flags & SCF_DO_SUBSTR) && data_fake.last_found) { int i; if (RExC_rx->minlen<*minnextp) RExC_rx->minlen=*minnextp; scan_commit(pRExC_state, &data_fake, minnextp, is_inf); SvREFCNT_dec_NN(data_fake.last_found); for (i = 0; i < 2; i++) { if (data_fake.substrs[i].minlenp != minlenp) { data->substrs[i].min_offset = data_fake.substrs[i].min_offset; data->substrs[i].max_offset = data_fake.substrs[i].max_offset; data->substrs[i].minlenp = data_fake.substrs[i].minlenp; data->substrs[i].lookbehind += scan->flags; } } } } } #endif } else if (OP(scan) == OPEN) { if (stopparen != (I32)ARG(scan)) pars++; } else if (OP(scan) == CLOSE) { if (stopparen == (I32)ARG(scan)) { break; } if ((I32)ARG(scan) == is_par) { next = regnext(scan); if ( next && (OP(next) != WHILEM) && next < last) is_par = 0; /* Disable optimization */ } if (data) *(data->last_closep) = ARG(scan); } else if (OP(scan) == EVAL) { if (data) data->flags |= SF_HAS_EVAL; } else if ( PL_regkind[OP(scan)] == ENDLIKE ) { if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); flags &= ~SCF_DO_SUBSTR; } if (data && OP(scan)==ACCEPT) { data->flags |= SCF_SEEN_ACCEPT; if (stopmin > min) stopmin = min; } } else if (OP(scan) == LOGICAL && scan->flags == 2) /* Embedded follows */ { if (flags & SCF_DO_SUBSTR) { scan_commit(pRExC_state, data, minlenp, is_inf); data->cur_is_floating = 1; /* float */ } is_inf = is_inf_internal = 1; if (flags & SCF_DO_STCLASS_OR) /* Allow everything */ ssc_anything(data->start_class); flags &= ~SCF_DO_STCLASS; } else if (OP(scan) == GPOS) { if (!(RExC_rx->intflags & PREGf_GPOS_FLOAT) && !(delta || is_inf || (data && data->pos_delta))) { if (!(RExC_rx->intflags & PREGf_ANCH) && (flags & SCF_DO_SUBSTR)) RExC_rx->intflags |= PREGf_ANCH_GPOS; if (RExC_rx->gofs < (STRLEN)min) RExC_rx->gofs = min; } else { RExC_rx->intflags |= PREGf_GPOS_FLOAT; RExC_rx->gofs = 0; } } #ifdef TRIE_STUDY_OPT #ifdef FULL_TRIE_STUDY else if (PL_regkind[OP(scan)] == TRIE) { /* NOTE - There is similar code to this block above for handling BRANCH nodes on the initial study. If you change stuff here check there too. */ regnode *trie_node= scan; regnode *tail= regnext(scan); reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ]; SSize_t max1 = 0, min1 = SSize_t_MAX; regnode_ssc accum; if (flags & SCF_DO_SUBSTR) { /* XXXX Add !SUSPEND? */ /* Cannot merge strings after this. */ scan_commit(pRExC_state, data, minlenp, is_inf); } if (flags & SCF_DO_STCLASS) ssc_init_zero(pRExC_state, &accum); if (!trie->jump) { min1= trie->minlen; max1= trie->maxlen; } else { const regnode *nextbranch= NULL; U32 word; for ( word=1 ; word <= trie->wordcount ; word++) { SSize_t deltanext=0, minnext=0, f = 0, fake; regnode_ssc this_class; StructCopy(&zero_scan_data, &data_fake, scan_data_t); if (data) { data_fake.whilem_c = data->whilem_c; data_fake.last_closep = data->last_closep; } else data_fake.last_closep = &fake; data_fake.pos_delta = delta; if (flags & SCF_DO_STCLASS) { ssc_init(pRExC_state, &this_class); data_fake.start_class = &this_class; f = SCF_DO_STCLASS_AND; } if (flags & SCF_WHILEM_VISITED_POS) f |= SCF_WHILEM_VISITED_POS; if (trie->jump[word]) { if (!nextbranch) nextbranch = trie_node + trie->jump[0]; scan= trie_node + trie->jump[word]; /* We go from the jump point to the branch that follows it. Note this means we need the vestigal unused branches even though they arent otherwise used. */ minnext = study_chunk(pRExC_state, &scan, minlenp, &deltanext, (regnode *)nextbranch, &data_fake, stopparen, recursed_depth, NULL, f,depth+1); } if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH) nextbranch= regnext((regnode*)nextbranch); if (min1 > (SSize_t)(minnext + trie->minlen)) min1 = minnext + trie->minlen; if (deltanext == SSize_t_MAX) { is_inf = is_inf_internal = 1; max1 = SSize_t_MAX; } else if (max1 < (SSize_t)(minnext + deltanext + trie->maxlen)) max1 = minnext + deltanext + trie->maxlen; if (data_fake.flags & (SF_HAS_PAR|SF_IN_PAR)) pars++; if (data_fake.flags & SCF_SEEN_ACCEPT) { if ( stopmin > min + min1) stopmin = min + min1; flags &= ~SCF_DO_SUBSTR; if (data) data->flags |= SCF_SEEN_ACCEPT; } if (data) { if (data_fake.flags & SF_HAS_EVAL) data->flags |= SF_HAS_EVAL; data->whilem_c = data_fake.whilem_c; } if (flags & SCF_DO_STCLASS) ssc_or(pRExC_state, &accum, (regnode_charclass *) &this_class); } } if (flags & SCF_DO_SUBSTR) { data->pos_min += min1; data->pos_delta += max1 - min1; if (max1 != min1 || is_inf) data->cur_is_floating = 1; /* float */ } min += min1; if (delta != SSize_t_MAX) delta += max1 - min1; if (flags & SCF_DO_STCLASS_OR) { ssc_or(pRExC_state, data->start_class, (regnode_charclass *) &accum); if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); flags &= ~SCF_DO_STCLASS; } } else if (flags & SCF_DO_STCLASS_AND) { if (min1) { ssc_and(pRExC_state, data->start_class, (regnode_charclass *) &accum); flags &= ~SCF_DO_STCLASS; } else { /* Switch to OR mode: cache the old value of * data->start_class */ INIT_AND_WITHP; StructCopy(data->start_class, and_withp, regnode_ssc); flags &= ~SCF_DO_STCLASS_AND; StructCopy(&accum, data->start_class, regnode_ssc); flags |= SCF_DO_STCLASS_OR; } } scan= tail; continue; } #else else if (PL_regkind[OP(scan)] == TRIE) { reg_trie_data *trie = (reg_trie_data*)RExC_rxi->data->data[ ARG(scan) ]; U8*bang=NULL; min += trie->minlen; delta += (trie->maxlen - trie->minlen); flags &= ~SCF_DO_STCLASS; /* xxx */ if (flags & SCF_DO_SUBSTR) { /* Cannot expect anything... */ scan_commit(pRExC_state, data, minlenp, is_inf); data->pos_min += trie->minlen; data->pos_delta += (trie->maxlen - trie->minlen); if (trie->maxlen != trie->minlen) data->cur_is_floating = 1; /* float */ } if (trie->jump) /* no more substrings -- for now /grr*/ flags &= ~SCF_DO_SUBSTR; } #endif /* old or new */ #endif /* TRIE_STUDY_OPT */ /* Else: zero-length, ignore. */ scan = regnext(scan); } finish: if (frame) { /* we need to unwind recursion. */ depth = depth - 1; DEBUG_STUDYDATA("frame-end", data, depth, is_inf); DEBUG_PEEP("fend", scan, depth, flags); /* restore previous context */ last = frame->last_regnode; scan = frame->next_regnode; stopparen = frame->stopparen; recursed_depth = frame->prev_recursed_depth; RExC_frame_last = frame->prev_frame; frame = frame->this_prev_frame; goto fake_study_recurse; } assert(!frame); DEBUG_STUDYDATA("pre-fin", data, depth, is_inf); *scanp = scan; *deltap = is_inf_internal ? SSize_t_MAX : delta; if (flags & SCF_DO_SUBSTR && is_inf) data->pos_delta = SSize_t_MAX - data->pos_min; if (is_par > (I32)U8_MAX) is_par = 0; if (is_par && pars==1 && data) { data->flags |= SF_IN_PAR; data->flags &= ~SF_HAS_PAR; } else if (pars && data) { data->flags |= SF_HAS_PAR; data->flags &= ~SF_IN_PAR; } if (flags & SCF_DO_STCLASS_OR) ssc_and(pRExC_state, data->start_class, (regnode_charclass *) and_withp); if (flags & SCF_TRIE_RESTUDY) data->flags |= SCF_TRIE_RESTUDY; DEBUG_STUDYDATA("post-fin", data, depth, is_inf); { SSize_t final_minlen= min < stopmin ? min : stopmin; if (!(RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN)) { if (final_minlen > SSize_t_MAX - delta) RExC_maxlen = SSize_t_MAX; else if (RExC_maxlen < final_minlen + delta) RExC_maxlen = final_minlen + delta; } return final_minlen; } NOT_REACHED; /* NOTREACHED */ } STATIC U32 S_add_data(RExC_state_t* const pRExC_state, const char* const s, const U32 n) { U32 count = RExC_rxi->data ? RExC_rxi->data->count : 0; PERL_ARGS_ASSERT_ADD_DATA; Renewc(RExC_rxi->data, sizeof(*RExC_rxi->data) + sizeof(void*) * (count + n - 1), char, struct reg_data); if(count) Renew(RExC_rxi->data->what, count + n, U8); else Newx(RExC_rxi->data->what, n, U8); RExC_rxi->data->count = count + n; Copy(s, RExC_rxi->data->what + count, n, U8); return count; } /*XXX: todo make this not included in a non debugging perl, but appears to be * used anyway there, in 'use re' */ #ifndef PERL_IN_XSUB_RE void Perl_reginitcolors(pTHX) { const char * const s = PerlEnv_getenv("PERL_RE_COLORS"); if (s) { char *t = savepv(s); int i = 0; PL_colors[0] = t; while (++i < 6) { t = strchr(t, '\t'); if (t) { *t = '\0'; PL_colors[i] = ++t; } else PL_colors[i] = t = (char *)""; } } else { int i = 0; while (i < 6) PL_colors[i++] = (char *)""; } PL_colorset = 1; } #endif #ifdef TRIE_STUDY_OPT #define CHECK_RESTUDY_GOTO_butfirst(dOsomething) \ STMT_START { \ if ( \ (data.flags & SCF_TRIE_RESTUDY) \ && ! restudied++ \ ) { \ dOsomething; \ goto reStudy; \ } \ } STMT_END #else #define CHECK_RESTUDY_GOTO_butfirst #endif /* * pregcomp - compile a regular expression into internal code * * Decides which engine's compiler to call based on the hint currently in * scope */ #ifndef PERL_IN_XSUB_RE /* return the currently in-scope regex engine (or the default if none) */ regexp_engine const * Perl_current_re_engine(pTHX) { if (IN_PERL_COMPILETIME) { HV * const table = GvHV(PL_hintgv); SV **ptr; if (!table || !(PL_hints & HINT_LOCALIZE_HH)) return &PL_core_reg_engine; ptr = hv_fetchs(table, "regcomp", FALSE); if ( !(ptr && SvIOK(*ptr) && SvIV(*ptr))) return &PL_core_reg_engine; return INT2PTR(regexp_engine*,SvIV(*ptr)); } else { SV *ptr; if (!PL_curcop->cop_hints_hash) return &PL_core_reg_engine; ptr = cop_hints_fetch_pvs(PL_curcop, "regcomp", 0); if ( !(ptr && SvIOK(ptr) && SvIV(ptr))) return &PL_core_reg_engine; return INT2PTR(regexp_engine*,SvIV(ptr)); } } REGEXP * Perl_pregcomp(pTHX_ SV * const pattern, const U32 flags) { regexp_engine const *eng = current_re_engine(); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_PREGCOMP; /* Dispatch a request to compile a regexp to correct regexp engine. */ DEBUG_COMPILE_r({ Perl_re_printf( aTHX_ "Using engine %" UVxf "\n", PTR2UV(eng)); }); return CALLREGCOMP_ENG(eng, pattern, flags); } #endif /* public(ish) entry point for the perl core's own regex compiling code. * It's actually a wrapper for Perl_re_op_compile that only takes an SV * pattern rather than a list of OPs, and uses the internal engine rather * than the current one */ REGEXP * Perl_re_compile(pTHX_ SV * const pattern, U32 rx_flags) { SV *pat = pattern; /* defeat constness! */ PERL_ARGS_ASSERT_RE_COMPILE; return Perl_re_op_compile(aTHX_ &pat, 1, NULL, #ifdef PERL_IN_XSUB_RE &my_reg_engine, #else &PL_core_reg_engine, #endif NULL, NULL, rx_flags, 0); } static void S_free_codeblocks(pTHX_ struct reg_code_blocks *cbs) { int n; if (--cbs->refcnt > 0) return; for (n = 0; n < cbs->count; n++) { REGEXP *rx = cbs->cb[n].src_regex; cbs->cb[n].src_regex = NULL; SvREFCNT_dec(rx); } Safefree(cbs->cb); Safefree(cbs); } static struct reg_code_blocks * S_alloc_code_blocks(pTHX_ int ncode) { struct reg_code_blocks *cbs; Newx(cbs, 1, struct reg_code_blocks); cbs->count = ncode; cbs->refcnt = 1; SAVEDESTRUCTOR_X(S_free_codeblocks, cbs); if (ncode) Newx(cbs->cb, ncode, struct reg_code_block); else cbs->cb = NULL; return cbs; } /* upgrade pattern pat_p of length plen_p to UTF8, and if there are code * blocks, recalculate the indices. Update pat_p and plen_p in-place to * point to the realloced string and length. * * This is essentially a copy of Perl_bytes_to_utf8() with the code index * stuff added */ static void S_pat_upgrade_to_utf8(pTHX_ RExC_state_t * const pRExC_state, char **pat_p, STRLEN *plen_p, int num_code_blocks) { U8 *const src = (U8*)*pat_p; U8 *dst, *d; int n=0; STRLEN s = 0; bool do_end = 0; GET_RE_DEBUG_FLAGS_DECL; DEBUG_PARSE_r(Perl_re_printf( aTHX_ "UTF8 mismatch! Converting to utf8 for resizing and compile\n")); Newx(dst, *plen_p * 2 + 1, U8); d = dst; while (s < *plen_p) { append_utf8_from_native_byte(src[s], &d); if (n < num_code_blocks) { assert(pRExC_state->code_blocks); if (!do_end && pRExC_state->code_blocks->cb[n].start == s) { pRExC_state->code_blocks->cb[n].start = d - dst - 1; assert(*(d - 1) == '('); do_end = 1; } else if (do_end && pRExC_state->code_blocks->cb[n].end == s) { pRExC_state->code_blocks->cb[n].end = d - dst - 1; assert(*(d - 1) == ')'); do_end = 0; n++; } } s++; } *d = '\0'; *plen_p = d - dst; *pat_p = (char*) dst; SAVEFREEPV(*pat_p); RExC_orig_utf8 = RExC_utf8 = 1; } /* S_concat_pat(): concatenate a list of args to the pattern string pat, * while recording any code block indices, and handling overloading, * nested qr// objects etc. If pat is null, it will allocate a new * string, or just return the first arg, if there's only one. * * Returns the malloced/updated pat. * patternp and pat_count is the array of SVs to be concatted; * oplist is the optional list of ops that generated the SVs; * recompile_p is a pointer to a boolean that will be set if * the regex will need to be recompiled. * delim, if non-null is an SV that will be inserted between each element */ static SV* S_concat_pat(pTHX_ RExC_state_t * const pRExC_state, SV *pat, SV ** const patternp, int pat_count, OP *oplist, bool *recompile_p, SV *delim) { SV **svp; int n = 0; bool use_delim = FALSE; bool alloced = FALSE; /* if we know we have at least two args, create an empty string, * then concatenate args to that. For no args, return an empty string */ if (!pat && pat_count != 1) { pat = newSVpvs(""); SAVEFREESV(pat); alloced = TRUE; } for (svp = patternp; svp < patternp + pat_count; svp++) { SV *sv; SV *rx = NULL; STRLEN orig_patlen = 0; bool code = 0; SV *msv = use_delim ? delim : *svp; if (!msv) msv = &PL_sv_undef; /* if we've got a delimiter, we go round the loop twice for each * svp slot (except the last), using the delimiter the second * time round */ if (use_delim) { svp--; use_delim = FALSE; } else if (delim) use_delim = TRUE; if (SvTYPE(msv) == SVt_PVAV) { /* we've encountered an interpolated array within * the pattern, e.g. /...@a..../. Expand the list of elements, * then recursively append elements. * The code in this block is based on S_pushav() */ AV *const av = (AV*)msv; const SSize_t maxarg = AvFILL(av) + 1; SV **array; if (oplist) { assert(oplist->op_type == OP_PADAV || oplist->op_type == OP_RV2AV); oplist = OpSIBLING(oplist); } if (SvRMAGICAL(av)) { SSize_t i; Newx(array, maxarg, SV*); SAVEFREEPV(array); for (i=0; i < maxarg; i++) { SV ** const svp = av_fetch(av, i, FALSE); array[i] = svp ? *svp : &PL_sv_undef; } } else array = AvARRAY(av); pat = S_concat_pat(aTHX_ pRExC_state, pat, array, maxarg, NULL, recompile_p, /* $" */ GvSV((gv_fetchpvs("\"", GV_ADDMULTI, SVt_PV)))); continue; } /* we make the assumption here that each op in the list of * op_siblings maps to one SV pushed onto the stack, * except for code blocks, with have both an OP_NULL and * and OP_CONST. * This allows us to match up the list of SVs against the * list of OPs to find the next code block. * * Note that PUSHMARK PADSV PADSV .. * is optimised to * PADRANGE PADSV PADSV .. * so the alignment still works. */ if (oplist) { if (oplist->op_type == OP_NULL && (oplist->op_flags & OPf_SPECIAL)) { assert(n < pRExC_state->code_blocks->count); pRExC_state->code_blocks->cb[n].start = pat ? SvCUR(pat) : 0; pRExC_state->code_blocks->cb[n].block = oplist; pRExC_state->code_blocks->cb[n].src_regex = NULL; n++; code = 1; oplist = OpSIBLING(oplist); /* skip CONST */ assert(oplist); } oplist = OpSIBLING(oplist);; } /* apply magic and QR overloading to arg */ SvGETMAGIC(msv); if (SvROK(msv) && SvAMAGIC(msv)) { SV *sv = AMG_CALLunary(msv, regexp_amg); if (sv) { if (SvROK(sv)) sv = SvRV(sv); if (SvTYPE(sv) != SVt_REGEXP) Perl_croak(aTHX_ "Overloaded qr did not return a REGEXP"); msv = sv; } } /* try concatenation overload ... */ if (pat && (SvAMAGIC(pat) || SvAMAGIC(msv)) && (sv = amagic_call(pat, msv, concat_amg, AMGf_assign))) { sv_setsv(pat, sv); /* overloading involved: all bets are off over literal * code. Pretend we haven't seen it */ if (n) pRExC_state->code_blocks->count -= n; n = 0; } else { /* ... or failing that, try "" overload */ while (SvAMAGIC(msv) && (sv = AMG_CALLunary(msv, string_amg)) && sv != msv && !( SvROK(msv) && SvROK(sv) && SvRV(msv) == SvRV(sv)) ) { msv = sv; SvGETMAGIC(msv); } if (SvROK(msv) && SvTYPE(SvRV(msv)) == SVt_REGEXP) msv = SvRV(msv); if (pat) { /* this is a partially unrolled * sv_catsv_nomg(pat, msv); * that allows us to adjust code block indices if * needed */ STRLEN dlen; char *dst = SvPV_force_nomg(pat, dlen); orig_patlen = dlen; if (SvUTF8(msv) && !SvUTF8(pat)) { S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &dst, &dlen, n); sv_setpvn(pat, dst, dlen); SvUTF8_on(pat); } sv_catsv_nomg(pat, msv); rx = msv; } else { /* We have only one SV to process, but we need to verify * it is properly null terminated or we will fail asserts * later. In theory we probably shouldn't get such SV's, * but if we do we should handle it gracefully. */ if ( SvTYPE(msv) != SVt_PV || (SvLEN(msv) > SvCUR(msv) && *(SvEND(msv)) == 0) ) { /* not a string, or a string with a trailing null */ pat = msv; } else { /* a string with no trailing null, we need to copy it * so it we have a trailing null */ pat = newSVsv(msv); } } if (code) pRExC_state->code_blocks->cb[n-1].end = SvCUR(pat)-1; } /* extract any code blocks within any embedded qr//'s */ if (rx && SvTYPE(rx) == SVt_REGEXP && RX_ENGINE((REGEXP*)rx)->op_comp) { RXi_GET_DECL(ReANY((REGEXP *)rx), ri); if (ri->code_blocks && ri->code_blocks->count) { int i; /* the presence of an embedded qr// with code means * we should always recompile: the text of the * qr// may not have changed, but it may be a * different closure than last time */ *recompile_p = 1; if (pRExC_state->code_blocks) { int new_count = pRExC_state->code_blocks->count + ri->code_blocks->count; Renew(pRExC_state->code_blocks->cb, new_count, struct reg_code_block); pRExC_state->code_blocks->count = new_count; } else pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_ ri->code_blocks->count); for (i=0; i < ri->code_blocks->count; i++) { struct reg_code_block *src, *dst; STRLEN offset = orig_patlen + ReANY((REGEXP *)rx)->pre_prefix; assert(n < pRExC_state->code_blocks->count); src = &ri->code_blocks->cb[i]; dst = &pRExC_state->code_blocks->cb[n]; dst->start = src->start + offset; dst->end = src->end + offset; dst->block = src->block; dst->src_regex = (REGEXP*) SvREFCNT_inc( (SV*) src->src_regex ? src->src_regex : (REGEXP*)rx); n++; } } } } /* avoid calling magic multiple times on a single element e.g. =~ $qr */ if (alloced) SvSETMAGIC(pat); return pat; } /* see if there are any run-time code blocks in the pattern. * False positives are allowed */ static bool S_has_runtime_code(pTHX_ RExC_state_t * const pRExC_state, char *pat, STRLEN plen) { int n = 0; STRLEN s; PERL_UNUSED_CONTEXT; for (s = 0; s < plen; s++) { if ( pRExC_state->code_blocks && n < pRExC_state->code_blocks->count && s == pRExC_state->code_blocks->cb[n].start) { s = pRExC_state->code_blocks->cb[n].end; n++; continue; } /* TODO ideally should handle [..], (#..), /#.../x to reduce false * positives here */ if (pat[s] == '(' && s+2 <= plen && pat[s+1] == '?' && (pat[s+2] == '{' || (s + 2 <= plen && pat[s+2] == '?' && pat[s+3] == '{')) ) return 1; } return 0; } /* Handle run-time code blocks. We will already have compiled any direct * or indirect literal code blocks. Now, take the pattern 'pat' and make a * copy of it, but with any literal code blocks blanked out and * appropriate chars escaped; then feed it into * * eval "qr'modified_pattern'" * * For example, * * a\bc(?{"this was literal"})def'ghi\\jkl(?{"this is runtime"})mno * * becomes * * qr'a\\bc_______________________def\'ghi\\\\jkl(?{"this is runtime"})mno' * * After eval_sv()-ing that, grab any new code blocks from the returned qr * and merge them with any code blocks of the original regexp. * * If the pat is non-UTF8, while the evalled qr is UTF8, don't merge; * instead, just save the qr and return FALSE; this tells our caller that * the original pattern needs upgrading to utf8. */ static bool S_compile_runtime_code(pTHX_ RExC_state_t * const pRExC_state, char *pat, STRLEN plen) { SV *qr; GET_RE_DEBUG_FLAGS_DECL; if (pRExC_state->runtime_code_qr) { /* this is the second time we've been called; this should * only happen if the main pattern got upgraded to utf8 * during compilation; re-use the qr we compiled first time * round (which should be utf8 too) */ qr = pRExC_state->runtime_code_qr; pRExC_state->runtime_code_qr = NULL; assert(RExC_utf8 && SvUTF8(qr)); } else { int n = 0; STRLEN s; char *p, *newpat; int newlen = plen + 7; /* allow for "qr''xx\0" extra chars */ SV *sv, *qr_ref; dSP; /* determine how many extra chars we need for ' and \ escaping */ for (s = 0; s < plen; s++) { if (pat[s] == '\'' || pat[s] == '\\') newlen++; } Newx(newpat, newlen, char); p = newpat; *p++ = 'q'; *p++ = 'r'; *p++ = '\''; for (s = 0; s < plen; s++) { if ( pRExC_state->code_blocks && n < pRExC_state->code_blocks->count && s == pRExC_state->code_blocks->cb[n].start) { /* blank out literal code block */ assert(pat[s] == '('); while (s <= pRExC_state->code_blocks->cb[n].end) { *p++ = '_'; s++; } s--; n++; continue; } if (pat[s] == '\'' || pat[s] == '\\') *p++ = '\\'; *p++ = pat[s]; } *p++ = '\''; if (pRExC_state->pm_flags & RXf_PMf_EXTENDED) { *p++ = 'x'; if (pRExC_state->pm_flags & RXf_PMf_EXTENDED_MORE) { *p++ = 'x'; } } *p++ = '\0'; DEBUG_COMPILE_r({ Perl_re_printf( aTHX_ "%sre-parsing pattern for runtime code:%s %s\n", PL_colors[4],PL_colors[5],newpat); }); sv = newSVpvn_flags(newpat, p-newpat-1, RExC_utf8 ? SVf_UTF8 : 0); Safefree(newpat); ENTER; SAVETMPS; save_re_context(); PUSHSTACKi(PERLSI_REQUIRE); /* G_RE_REPARSING causes the toker to collapse \\ into \ when * parsing qr''; normally only q'' does this. It also alters * hints handling */ eval_sv(sv, G_SCALAR|G_RE_REPARSING); SvREFCNT_dec_NN(sv); SPAGAIN; qr_ref = POPs; PUTBACK; { SV * const errsv = ERRSV; if (SvTRUE_NN(errsv)) /* use croak_sv ? */ Perl_croak_nocontext("%" SVf, SVfARG(errsv)); } assert(SvROK(qr_ref)); qr = SvRV(qr_ref); assert(SvTYPE(qr) == SVt_REGEXP && RX_ENGINE((REGEXP*)qr)->op_comp); /* the leaving below frees the tmp qr_ref. * Give qr a life of its own */ SvREFCNT_inc(qr); POPSTACK; FREETMPS; LEAVE; } if (!RExC_utf8 && SvUTF8(qr)) { /* first time through; the pattern got upgraded; save the * qr for the next time through */ assert(!pRExC_state->runtime_code_qr); pRExC_state->runtime_code_qr = qr; return 0; } /* extract any code blocks within the returned qr// */ /* merge the main (r1) and run-time (r2) code blocks into one */ { RXi_GET_DECL(ReANY((REGEXP *)qr), r2); struct reg_code_block *new_block, *dst; RExC_state_t * const r1 = pRExC_state; /* convenient alias */ int i1 = 0, i2 = 0; int r1c, r2c; if (!r2->code_blocks || !r2->code_blocks->count) /* we guessed wrong */ { SvREFCNT_dec_NN(qr); return 1; } if (!r1->code_blocks) r1->code_blocks = S_alloc_code_blocks(aTHX_ 0); r1c = r1->code_blocks->count; r2c = r2->code_blocks->count; Newx(new_block, r1c + r2c, struct reg_code_block); dst = new_block; while (i1 < r1c || i2 < r2c) { struct reg_code_block *src; bool is_qr = 0; if (i1 == r1c) { src = &r2->code_blocks->cb[i2++]; is_qr = 1; } else if (i2 == r2c) src = &r1->code_blocks->cb[i1++]; else if ( r1->code_blocks->cb[i1].start < r2->code_blocks->cb[i2].start) { src = &r1->code_blocks->cb[i1++]; assert(src->end < r2->code_blocks->cb[i2].start); } else { assert( r1->code_blocks->cb[i1].start > r2->code_blocks->cb[i2].start); src = &r2->code_blocks->cb[i2++]; is_qr = 1; assert(src->end < r1->code_blocks->cb[i1].start); } assert(pat[src->start] == '('); assert(pat[src->end] == ')'); dst->start = src->start; dst->end = src->end; dst->block = src->block; dst->src_regex = is_qr ? (REGEXP*) SvREFCNT_inc( (SV*) qr) : src->src_regex; dst++; } r1->code_blocks->count += r2c; Safefree(r1->code_blocks->cb); r1->code_blocks->cb = new_block; } SvREFCNT_dec_NN(qr); return 1; } STATIC bool S_setup_longest(pTHX_ RExC_state_t *pRExC_state, struct reg_substr_datum *rsd, struct scan_data_substrs *sub, STRLEN longest_length) { /* This is the common code for setting up the floating and fixed length * string data extracted from Perl_re_op_compile() below. Returns a boolean * as to whether succeeded or not */ I32 t; SSize_t ml; bool eol = cBOOL(sub->flags & SF_BEFORE_EOL); bool meol = cBOOL(sub->flags & SF_BEFORE_MEOL); if (! (longest_length || (eol /* Can't have SEOL and MULTI */ && (! meol || (RExC_flags & RXf_PMf_MULTILINE))) ) /* See comments for join_exact for why REG_UNFOLDED_MULTI_SEEN */ || (RExC_seen & REG_UNFOLDED_MULTI_SEEN)) { return FALSE; } /* copy the information about the longest from the reg_scan_data over to the program. */ if (SvUTF8(sub->str)) { rsd->substr = NULL; rsd->utf8_substr = sub->str; } else { rsd->substr = sub->str; rsd->utf8_substr = NULL; } /* end_shift is how many chars that must be matched that follow this item. We calculate it ahead of time as once the lookbehind offset is added in we lose the ability to correctly calculate it.*/ ml = sub->minlenp ? *(sub->minlenp) : (SSize_t)longest_length; rsd->end_shift = ml - sub->min_offset - longest_length /* XXX SvTAIL is always false here - did you mean FBMcf_TAIL * intead? - DAPM + (SvTAIL(sub->str) != 0) */ + sub->lookbehind; t = (eol/* Can't have SEOL and MULTI */ && (! meol || (RExC_flags & RXf_PMf_MULTILINE))); fbm_compile(sub->str, t ? FBMcf_TAIL : 0); return TRUE; } /* * Perl_re_op_compile - the perl internal RE engine's function to compile a * regular expression into internal code. * The pattern may be passed either as: * a list of SVs (patternp plus pat_count) * a list of OPs (expr) * If both are passed, the SV list is used, but the OP list indicates * which SVs are actually pre-compiled code blocks * * The SVs in the list have magic and qr overloading applied to them (and * the list may be modified in-place with replacement SVs in the latter * case). * * If the pattern hasn't changed from old_re, then old_re will be * returned. * * eng is the current engine. If that engine has an op_comp method, then * handle directly (i.e. we assume that op_comp was us); otherwise, just * do the initial concatenation of arguments and pass on to the external * engine. * * If is_bare_re is not null, set it to a boolean indicating whether the * arg list reduced (after overloading) to a single bare regex which has * been returned (i.e. /$qr/). * * orig_rx_flags contains RXf_* flags. See perlreapi.pod for more details. * * pm_flags contains the PMf_* flags, typically based on those from the * pm_flags field of the related PMOP. Currently we're only interested in * PMf_HAS_CV, PMf_IS_QR, PMf_USE_RE_EVAL. * * We can't allocate space until we know how big the compiled form will be, * but we can't compile it (and thus know how big it is) until we've got a * place to put the code. So we cheat: we compile it twice, once with code * generation turned off and size counting turned on, and once "for real". * This also means that we don't allocate space until we are sure that the * thing really will compile successfully, and we never have to move the * code and thus invalidate pointers into it. (Note that it has to be in * one piece because free() must be able to free it all.) [NB: not true in perl] * * Beware that the optimization-preparation code in here knows about some * of the structure of the compiled regexp. [I'll say.] */ REGEXP * Perl_re_op_compile(pTHX_ SV ** const patternp, int pat_count, OP *expr, const regexp_engine* eng, REGEXP *old_re, bool *is_bare_re, U32 orig_rx_flags, U32 pm_flags) { REGEXP *rx; struct regexp *r; regexp_internal *ri; STRLEN plen; char *exp; regnode *scan; I32 flags; SSize_t minlen = 0; U32 rx_flags; SV *pat; SV** new_patternp = patternp; /* these are all flags - maybe they should be turned * into a single int with different bit masks */ I32 sawlookahead = 0; I32 sawplus = 0; I32 sawopen = 0; I32 sawminmod = 0; regex_charset initial_charset = get_regex_charset(orig_rx_flags); bool recompile = 0; bool runtime_code = 0; scan_data_t data; RExC_state_t RExC_state; RExC_state_t * const pRExC_state = &RExC_state; #ifdef TRIE_STUDY_OPT int restudied = 0; RExC_state_t copyRExC_state; #endif GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_RE_OP_COMPILE; DEBUG_r(if (!PL_colorset) reginitcolors()); /* Initialize these here instead of as-needed, as is quick and avoids * having to test them each time otherwise */ if (! PL_AboveLatin1) { #ifdef DEBUGGING char * dump_len_string; #endif PL_AboveLatin1 = _new_invlist_C_array(AboveLatin1_invlist); PL_Latin1 = _new_invlist_C_array(Latin1_invlist); PL_UpperLatin1 = _new_invlist_C_array(UpperLatin1_invlist); PL_utf8_foldable = _new_invlist_C_array(_Perl_Any_Folds_invlist); PL_HasMultiCharFold = _new_invlist_C_array(_Perl_Folds_To_Multi_Char_invlist); /* This is calculated here, because the Perl program that generates the * static global ones doesn't currently have access to * NUM_ANYOF_CODE_POINTS */ PL_InBitmap = _new_invlist(2); PL_InBitmap = _add_range_to_invlist(PL_InBitmap, 0, NUM_ANYOF_CODE_POINTS - 1); #ifdef DEBUGGING dump_len_string = PerlEnv_getenv("PERL_DUMP_RE_MAX_LEN"); if ( ! dump_len_string || ! grok_atoUV(dump_len_string, (UV *)&PL_dump_re_max_len, NULL)) { PL_dump_re_max_len = 60; /* A reasonable default */ } #endif } pRExC_state->warn_text = NULL; pRExC_state->code_blocks = NULL; if (is_bare_re) *is_bare_re = FALSE; if (expr && (expr->op_type == OP_LIST || (expr->op_type == OP_NULL && expr->op_targ == OP_LIST))) { /* allocate code_blocks if needed */ OP *o; int ncode = 0; for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) if (o->op_type == OP_NULL && (o->op_flags & OPf_SPECIAL)) ncode++; /* count of DO blocks */ if (ncode) pRExC_state->code_blocks = S_alloc_code_blocks(aTHX_ ncode); } if (!pat_count) { /* compile-time pattern with just OP_CONSTs and DO blocks */ int n; OP *o; /* find how many CONSTs there are */ assert(expr); n = 0; if (expr->op_type == OP_CONST) n = 1; else for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) { if (o->op_type == OP_CONST) n++; } /* fake up an SV array */ assert(!new_patternp); Newx(new_patternp, n, SV*); SAVEFREEPV(new_patternp); pat_count = n; n = 0; if (expr->op_type == OP_CONST) new_patternp[n] = cSVOPx_sv(expr); else for (o = cLISTOPx(expr)->op_first; o; o = OpSIBLING(o)) { if (o->op_type == OP_CONST) new_patternp[n++] = cSVOPo_sv; } } DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Assembling pattern from %d elements%s\n", pat_count, orig_rx_flags & RXf_SPLIT ? " for split" : "")); /* set expr to the first arg op */ if (pRExC_state->code_blocks && pRExC_state->code_blocks->count && expr->op_type != OP_CONST) { expr = cLISTOPx(expr)->op_first; assert( expr->op_type == OP_PUSHMARK || (expr->op_type == OP_NULL && expr->op_targ == OP_PUSHMARK) || expr->op_type == OP_PADRANGE); expr = OpSIBLING(expr); } pat = S_concat_pat(aTHX_ pRExC_state, NULL, new_patternp, pat_count, expr, &recompile, NULL); /* handle bare (possibly after overloading) regex: foo =~ $re */ { SV *re = pat; if (SvROK(re)) re = SvRV(re); if (SvTYPE(re) == SVt_REGEXP) { if (is_bare_re) *is_bare_re = TRUE; SvREFCNT_inc(re); DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Precompiled pattern%s\n", orig_rx_flags & RXf_SPLIT ? " for split" : "")); return (REGEXP*)re; } } exp = SvPV_nomg(pat, plen); if (!eng->op_comp) { if ((SvUTF8(pat) && IN_BYTES) || SvGMAGICAL(pat) || SvAMAGIC(pat)) { /* make a temporary copy; either to convert to bytes, * or to avoid repeating get-magic / overloaded stringify */ pat = newSVpvn_flags(exp, plen, SVs_TEMP | (IN_BYTES ? 0 : SvUTF8(pat))); } return CALLREGCOMP_ENG(eng, pat, orig_rx_flags); } /* ignore the utf8ness if the pattern is 0 length */ RExC_utf8 = RExC_orig_utf8 = (plen == 0 || IN_BYTES) ? 0 : SvUTF8(pat); RExC_uni_semantics = 0; RExC_seen_unfolded_sharp_s = 0; RExC_contains_locale = 0; RExC_strict = cBOOL(pm_flags & RXf_PMf_STRICT); RExC_study_started = 0; pRExC_state->runtime_code_qr = NULL; RExC_frame_head= NULL; RExC_frame_last= NULL; RExC_frame_count= 0; DEBUG_r({ RExC_mysv1= sv_newmortal(); RExC_mysv2= sv_newmortal(); }); DEBUG_COMPILE_r({ SV *dsv= sv_newmortal(); RE_PV_QUOTED_DECL(s, RExC_utf8, dsv, exp, plen, PL_dump_re_max_len); Perl_re_printf( aTHX_ "%sCompiling REx%s %s\n", PL_colors[4],PL_colors[5],s); }); redo_first_pass: /* we jump here if we have to recompile, e.g., from upgrading the pattern * to utf8 */ if ((pm_flags & PMf_USE_RE_EVAL) /* this second condition covers the non-regex literal case, * i.e. $foo =~ '(?{})'. */ || (IN_PERL_COMPILETIME && (PL_hints & HINT_RE_EVAL)) ) runtime_code = S_has_runtime_code(aTHX_ pRExC_state, exp, plen); /* return old regex if pattern hasn't changed */ /* XXX: note in the below we have to check the flags as well as the * pattern. * * Things get a touch tricky as we have to compare the utf8 flag * independently from the compile flags. */ if ( old_re && !recompile && !!RX_UTF8(old_re) == !!RExC_utf8 && ( RX_COMPFLAGS(old_re) == ( orig_rx_flags & RXf_PMf_FLAGCOPYMASK ) ) && RX_PRECOMP(old_re) && RX_PRELEN(old_re) == plen && memEQ(RX_PRECOMP(old_re), exp, plen) && !runtime_code /* with runtime code, always recompile */ ) { return old_re; } rx_flags = orig_rx_flags; if ( initial_charset == REGEX_DEPENDS_CHARSET && (RExC_utf8 ||RExC_uni_semantics)) { /* Set to use unicode semantics if the pattern is in utf8 and has the * 'depends' charset specified, as it means unicode when utf8 */ set_regex_charset(&rx_flags, REGEX_UNICODE_CHARSET); } RExC_precomp = exp; RExC_precomp_adj = 0; RExC_flags = rx_flags; RExC_pm_flags = pm_flags; if (runtime_code) { assert(TAINTING_get || !TAINT_get); if (TAINT_get) Perl_croak(aTHX_ "Eval-group in insecure regular expression"); if (!S_compile_runtime_code(aTHX_ pRExC_state, exp, plen)) { /* whoops, we have a non-utf8 pattern, whilst run-time code * got compiled as utf8. Try again with a utf8 pattern */ S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen, pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0); goto redo_first_pass; } } assert(!pRExC_state->runtime_code_qr); RExC_sawback = 0; RExC_seen = 0; RExC_maxlen = 0; RExC_in_lookbehind = 0; RExC_seen_zerolen = *exp == '^' ? -1 : 0; RExC_extralen = 0; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif RExC_in_multi_char_class = 0; /* First pass: determine size, legality. */ RExC_parse = exp; RExC_start = RExC_adjusted_start = exp; RExC_end = exp + plen; RExC_precomp_end = RExC_end; RExC_naughty = 0; RExC_npar = 1; RExC_nestroot = 0; RExC_size = 0L; RExC_emit = (regnode *) &RExC_emit_dummy; RExC_whilem_seen = 0; RExC_open_parens = NULL; RExC_close_parens = NULL; RExC_end_op = NULL; RExC_paren_names = NULL; #ifdef DEBUGGING RExC_paren_name_list = NULL; #endif RExC_recurse = NULL; RExC_study_chunk_recursed = NULL; RExC_study_chunk_recursed_bytes= 0; RExC_recurse_count = 0; pRExC_state->code_index = 0; /* This NUL is guaranteed because the pattern comes from an SV*, and the sv * code makes sure the final byte is an uncounted NUL. But should this * ever not be the case, lots of things could read beyond the end of the * buffer: loops like * while(isFOO(*RExC_parse)) RExC_parse++; * strchr(RExC_parse, "foo"); * etc. So it is worth noting. */ assert(*RExC_end == '\0'); DEBUG_PARSE_r( Perl_re_printf( aTHX_ "Starting first pass (sizing)\n"); RExC_lastnum=0; RExC_lastparse=NULL; ); if (reg(pRExC_state, 0, &flags,1) == NULL) { /* It's possible to write a regexp in ascii that represents Unicode codepoints outside of the byte range, such as via \x{100}. If we detect such a sequence we have to convert the entire pattern to utf8 and then recompile, as our sizing calculation will have been based on 1 byte == 1 character, but we will need to use utf8 to encode at least some part of the pattern, and therefore must convert the whole thing. -- dmq */ if (flags & RESTART_PASS1) { if (flags & NEED_UTF8) { S_pat_upgrade_to_utf8(aTHX_ pRExC_state, &exp, &plen, pRExC_state->code_blocks ? pRExC_state->code_blocks->count : 0); } else { DEBUG_PARSE_r(Perl_re_printf( aTHX_ "Need to redo pass 1\n")); } goto redo_first_pass; } Perl_croak(aTHX_ "panic: reg returned NULL to re_op_compile for sizing pass, flags=%#" UVxf, (UV) flags); } DEBUG_PARSE_r({ Perl_re_printf( aTHX_ "Required size %" IVdf " nodes\n" "Starting second pass (creation)\n", (IV)RExC_size); RExC_lastnum=0; RExC_lastparse=NULL; }); /* The first pass could have found things that force Unicode semantics */ if ((RExC_utf8 || RExC_uni_semantics) && get_regex_charset(rx_flags) == REGEX_DEPENDS_CHARSET) { set_regex_charset(&rx_flags, REGEX_UNICODE_CHARSET); } /* Small enough for pointer-storage convention? If extralen==0, this means that we will not need long jumps. */ if (RExC_size >= 0x10000L && RExC_extralen) RExC_size += RExC_extralen; else RExC_extralen = 0; if (RExC_whilem_seen > 15) RExC_whilem_seen = 15; /* Allocate space and zero-initialize. Note, the two step process of zeroing when in debug mode, thus anything assigned has to happen after that */ rx = (REGEXP*) newSV_type(SVt_REGEXP); r = ReANY(rx); Newxc(ri, sizeof(regexp_internal) + (unsigned)RExC_size * sizeof(regnode), char, regexp_internal); if ( r == NULL || ri == NULL ) FAIL("Regexp out of space"); #ifdef DEBUGGING /* avoid reading uninitialized memory in DEBUGGING code in study_chunk() */ Zero(ri, sizeof(regexp_internal) + (unsigned)RExC_size * sizeof(regnode), char); #else /* bulk initialize base fields with 0. */ Zero(ri, sizeof(regexp_internal), char); #endif /* non-zero initialization begins here */ RXi_SET( r, ri ); r->engine= eng; r->extflags = rx_flags; RXp_COMPFLAGS(r) = orig_rx_flags & RXf_PMf_FLAGCOPYMASK; if (pm_flags & PMf_IS_QR) { ri->code_blocks = pRExC_state->code_blocks; if (ri->code_blocks) ri->code_blocks->refcnt++; } { bool has_p = ((r->extflags & RXf_PMf_KEEPCOPY) == RXf_PMf_KEEPCOPY); bool has_charset = (get_regex_charset(r->extflags) != REGEX_DEPENDS_CHARSET); /* The caret is output if there are any defaults: if not all the STD * flags are set, or if no character set specifier is needed */ bool has_default = (((r->extflags & RXf_PMf_STD_PMMOD) != RXf_PMf_STD_PMMOD) || ! has_charset); bool has_runon = ((RExC_seen & REG_RUN_ON_COMMENT_SEEN) == REG_RUN_ON_COMMENT_SEEN); U8 reganch = (U8)((r->extflags & RXf_PMf_STD_PMMOD) >> RXf_PMf_STD_PMMOD_SHIFT); const char *fptr = STD_PAT_MODS; /*"msixxn"*/ char *p; /* We output all the necessary flags; we never output a minus, as all * those are defaults, so are * covered by the caret */ const STRLEN wraplen = plen + has_p + has_runon + has_default /* If needs a caret */ + PL_bitcount[reganch] /* 1 char for each set standard flag */ /* If needs a character set specifier */ + ((has_charset) ? MAX_CHARSET_NAME_LENGTH : 0) + (sizeof("(?:)") - 1); /* make sure PL_bitcount bounds not exceeded */ assert(sizeof(STD_PAT_MODS) <= 8); p = sv_grow(MUTABLE_SV(rx), wraplen + 1); /* +1 for the ending NUL */ SvPOK_on(rx); if (RExC_utf8) SvFLAGS(rx) |= SVf_UTF8; *p++='('; *p++='?'; /* If a default, cover it using the caret */ if (has_default) { *p++= DEFAULT_PAT_MOD; } if (has_charset) { STRLEN len; const char* const name = get_regex_charset_name(r->extflags, &len); Copy(name, p, len, char); p += len; } if (has_p) *p++ = KEEPCOPY_PAT_MOD; /*'p'*/ { char ch; while((ch = *fptr++)) { if(reganch & 1) *p++ = ch; reganch >>= 1; } } *p++ = ':'; Copy(RExC_precomp, p, plen, char); assert ((RX_WRAPPED(rx) - p) < 16); r->pre_prefix = p - RX_WRAPPED(rx); p += plen; if (has_runon) *p++ = '\n'; *p++ = ')'; *p = 0; SvCUR_set(rx, p - RX_WRAPPED(rx)); } r->intflags = 0; r->nparens = RExC_npar - 1; /* set early to validate backrefs */ /* Useful during FAIL. */ #ifdef RE_TRACK_PATTERN_OFFSETS Newxz(ri->u.offsets, 2*RExC_size+1, U32); /* MJD 20001228 */ DEBUG_OFFSETS_r(Perl_re_printf( aTHX_ "%s %" UVuf " bytes for offset annotations.\n", ri->u.offsets ? "Got" : "Couldn't get", (UV)((2*RExC_size+1) * sizeof(U32)))); #endif SetProgLen(ri,RExC_size); RExC_rx_sv = rx; RExC_rx = r; RExC_rxi = ri; /* Second pass: emit code. */ RExC_flags = rx_flags; /* don't let top level (?i) bleed */ RExC_pm_flags = pm_flags; RExC_parse = exp; RExC_end = exp + plen; RExC_naughty = 0; RExC_emit_start = ri->program; RExC_emit = ri->program; RExC_emit_bound = ri->program + RExC_size + 1; pRExC_state->code_index = 0; *((char*) RExC_emit++) = (char) REG_MAGIC; /* setup various meta data about recursion, this all requires * RExC_npar to be correctly set, and a bit later on we clear it */ if (RExC_seen & REG_RECURSE_SEEN) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting up open/close parens\n", 22, "| |", (int)(0 * 2 + 1), "")); /* setup RExC_open_parens, which holds the address of each * OPEN tag, and to make things simpler for the 0 index * the start of the program - this is used later for offsets */ Newxz(RExC_open_parens, RExC_npar,regnode *); SAVEFREEPV(RExC_open_parens); RExC_open_parens[0] = RExC_emit; /* setup RExC_close_parens, which holds the address of each * CLOSE tag, and to make things simpler for the 0 index * the end of the program - this is used later for offsets */ Newxz(RExC_close_parens, RExC_npar,regnode *); SAVEFREEPV(RExC_close_parens); /* we dont know where end op starts yet, so we dont * need to set RExC_close_parens[0] like we do RExC_open_parens[0] above */ /* Note, RExC_npar is 1 + the number of parens in a pattern. * So its 1 if there are no parens. */ RExC_study_chunk_recursed_bytes= (RExC_npar >> 3) + ((RExC_npar & 0x07) != 0); Newx(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes * RExC_npar, U8); SAVEFREEPV(RExC_study_chunk_recursed); } RExC_npar = 1; if (reg(pRExC_state, 0, &flags,1) == NULL) { ReREFCNT_dec(rx); Perl_croak(aTHX_ "panic: reg returned NULL to re_op_compile for generation pass, flags=%#" UVxf, (UV) flags); } DEBUG_OPTIMISE_r( Perl_re_printf( aTHX_ "Starting post parse optimization\n"); ); /* XXXX To minimize changes to RE engine we always allocate 3-units-long substrs field. */ Newx(r->substrs, 1, struct reg_substr_data); if (RExC_recurse_count) { Newxz(RExC_recurse,RExC_recurse_count,regnode *); SAVEFREEPV(RExC_recurse); } reStudy: r->minlen = minlen = sawlookahead = sawplus = sawopen = sawminmod = 0; DEBUG_r( RExC_study_chunk_recursed_count= 0; ); Zero(r->substrs, 1, struct reg_substr_data); if (RExC_study_chunk_recursed) { Zero(RExC_study_chunk_recursed, RExC_study_chunk_recursed_bytes * RExC_npar, U8); } #ifdef TRIE_STUDY_OPT if (!restudied) { StructCopy(&zero_scan_data, &data, scan_data_t); copyRExC_state = RExC_state; } else { U32 seen=RExC_seen; DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ "Restudying\n")); RExC_state = copyRExC_state; if (seen & REG_TOP_LEVEL_BRANCHES_SEEN) RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN; else RExC_seen &= ~REG_TOP_LEVEL_BRANCHES_SEEN; StructCopy(&zero_scan_data, &data, scan_data_t); } #else StructCopy(&zero_scan_data, &data, scan_data_t); #endif /* Dig out information for optimizations. */ r->extflags = RExC_flags; /* was pm_op */ /*dmq: removed as part of de-PMOP: pm->op_pmflags = RExC_flags; */ if (UTF) SvUTF8_on(rx); /* Unicode in it? */ ri->regstclass = NULL; if (RExC_naughty >= TOO_NAUGHTY) /* Probably an expensive pattern. */ r->intflags |= PREGf_NAUGHTY; scan = ri->program + 1; /* First BRANCH. */ /* testing for BRANCH here tells us whether there is "must appear" data in the pattern. If there is then we can use it for optimisations */ if (!(RExC_seen & REG_TOP_LEVEL_BRANCHES_SEEN)) { /* Only one top-level choice. */ SSize_t fake; STRLEN longest_length[2]; regnode_ssc ch_class; /* pointed to by data */ int stclass_flag; SSize_t last_close = 0; /* pointed to by data */ regnode *first= scan; regnode *first_next= regnext(first); int i; /* * Skip introductions and multiplicators >= 1 * so that we can extract the 'meat' of the pattern that must * match in the large if() sequence following. * NOTE that EXACT is NOT covered here, as it is normally * picked up by the optimiser separately. * * This is unfortunate as the optimiser isnt handling lookahead * properly currently. * */ while ((OP(first) == OPEN && (sawopen = 1)) || /* An OR of *one* alternative - should not happen now. */ (OP(first) == BRANCH && OP(first_next) != BRANCH) || /* for now we can't handle lookbehind IFMATCH*/ (OP(first) == IFMATCH && !first->flags && (sawlookahead = 1)) || (OP(first) == PLUS) || (OP(first) == MINMOD) || /* An {n,m} with n>0 */ (PL_regkind[OP(first)] == CURLY && ARG1(first) > 0) || (OP(first) == NOTHING && PL_regkind[OP(first_next)] != END )) { /* * the only op that could be a regnode is PLUS, all the rest * will be regnode_1 or regnode_2. * * (yves doesn't think this is true) */ if (OP(first) == PLUS) sawplus = 1; else { if (OP(first) == MINMOD) sawminmod = 1; first += regarglen[OP(first)]; } first = NEXTOPER(first); first_next= regnext(first); } /* Starting-point info. */ again: DEBUG_PEEP("first:", first, 0, 0); /* Ignore EXACT as we deal with it later. */ if (PL_regkind[OP(first)] == EXACT) { if (OP(first) == EXACT || OP(first) == EXACTL) NOOP; /* Empty, get anchored substr later. */ else ri->regstclass = first; } #ifdef TRIE_STCLASS else if (PL_regkind[OP(first)] == TRIE && ((reg_trie_data *)ri->data->data[ ARG(first) ])->minlen>0) { /* this can happen only on restudy */ ri->regstclass = construct_ahocorasick_from_trie(pRExC_state, (regnode *)first, 0); } #endif else if (REGNODE_SIMPLE(OP(first))) ri->regstclass = first; else if (PL_regkind[OP(first)] == BOUND || PL_regkind[OP(first)] == NBOUND) ri->regstclass = first; else if (PL_regkind[OP(first)] == BOL) { r->intflags |= (OP(first) == MBOL ? PREGf_ANCH_MBOL : PREGf_ANCH_SBOL); first = NEXTOPER(first); goto again; } else if (OP(first) == GPOS) { r->intflags |= PREGf_ANCH_GPOS; first = NEXTOPER(first); goto again; } else if ((!sawopen || !RExC_sawback) && !sawlookahead && (OP(first) == STAR && PL_regkind[OP(NEXTOPER(first))] == REG_ANY) && !(r->intflags & PREGf_ANCH) && !pRExC_state->code_blocks) { /* turn .* into ^.* with an implied $*=1 */ const int type = (OP(NEXTOPER(first)) == REG_ANY) ? PREGf_ANCH_MBOL : PREGf_ANCH_SBOL; r->intflags |= (type | PREGf_IMPLICIT); first = NEXTOPER(first); goto again; } if (sawplus && !sawminmod && !sawlookahead && (!sawopen || !RExC_sawback) && !pRExC_state->code_blocks) /* May examine pos and $& */ /* x+ must match at the 1st pos of run of x's */ r->intflags |= PREGf_SKIP; /* Scan is after the zeroth branch, first is atomic matcher. */ #ifdef TRIE_STUDY_OPT DEBUG_PARSE_r( if (!restudied) Perl_re_printf( aTHX_ "first at %" IVdf "\n", (IV)(first - scan + 1)) ); #else DEBUG_PARSE_r( Perl_re_printf( aTHX_ "first at %" IVdf "\n", (IV)(first - scan + 1)) ); #endif /* * If there's something expensive in the r.e., find the * longest literal string that must appear and make it the * regmust. Resolve ties in favor of later strings, since * the regstart check works with the beginning of the r.e. * and avoiding duplication strengthens checking. Not a * strong reason, but sufficient in the absence of others. * [Now we resolve ties in favor of the earlier string if * it happens that c_offset_min has been invalidated, since the * earlier string may buy us something the later one won't.] */ data.substrs[0].str = newSVpvs(""); data.substrs[1].str = newSVpvs(""); data.last_found = newSVpvs(""); data.cur_is_floating = 0; /* initially any found substring is fixed */ ENTER_with_name("study_chunk"); SAVEFREESV(data.substrs[0].str); SAVEFREESV(data.substrs[1].str); SAVEFREESV(data.last_found); first = scan; if (!ri->regstclass) { ssc_init(pRExC_state, &ch_class); data.start_class = &ch_class; stclass_flag = SCF_DO_STCLASS_AND; } else /* XXXX Check for BOUND? */ stclass_flag = 0; data.last_closep = &last_close; DEBUG_RExC_seen(); minlen = study_chunk(pRExC_state, &first, &minlen, &fake, scan + RExC_size, /* Up to end */ &data, -1, 0, NULL, SCF_DO_SUBSTR | SCF_WHILEM_VISITED_POS | stclass_flag | (restudied ? SCF_TRIE_DOING_RESTUDY : 0), 0); CHECK_RESTUDY_GOTO_butfirst(LEAVE_with_name("study_chunk")); if ( RExC_npar == 1 && !data.cur_is_floating && data.last_start_min == 0 && data.last_end > 0 && !RExC_seen_zerolen && !(RExC_seen & REG_VERBARG_SEEN) && !(RExC_seen & REG_GPOS_SEEN) ){ r->extflags |= RXf_CHECK_ALL; } scan_commit(pRExC_state, &data,&minlen,0); /* XXX this is done in reverse order because that's the way the * code was before it was parameterised. Don't know whether it * actually needs doing in reverse order. DAPM */ for (i = 1; i >= 0; i--) { longest_length[i] = CHR_SVLEN(data.substrs[i].str); if ( !( i && SvCUR(data.substrs[0].str) /* ok to leave SvCUR */ && data.substrs[0].min_offset == data.substrs[1].min_offset && SvCUR(data.substrs[0].str) == SvCUR(data.substrs[1].str) ) && S_setup_longest (aTHX_ pRExC_state, &(r->substrs->data[i]), &(data.substrs[i]), longest_length[i])) { r->substrs->data[i].min_offset = data.substrs[i].min_offset - data.substrs[i].lookbehind; r->substrs->data[i].max_offset = data.substrs[i].max_offset; /* Don't offset infinity */ if (data.substrs[i].max_offset < SSize_t_MAX) r->substrs->data[i].max_offset -= data.substrs[i].lookbehind; SvREFCNT_inc_simple_void_NN(data.substrs[i].str); } else { r->substrs->data[i].substr = NULL; r->substrs->data[i].utf8_substr = NULL; longest_length[i] = 0; } } LEAVE_with_name("study_chunk"); if (ri->regstclass && (OP(ri->regstclass) == REG_ANY || OP(ri->regstclass) == SANY)) ri->regstclass = NULL; if ((!(r->substrs->data[0].substr || r->substrs->data[0].utf8_substr) || r->substrs->data[0].min_offset) && stclass_flag && ! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING) && is_ssc_worth_it(pRExC_state, data.start_class)) { const U32 n = add_data(pRExC_state, STR_WITH_LEN("f")); ssc_finalize(pRExC_state, data.start_class); Newx(RExC_rxi->data->data[n], 1, regnode_ssc); StructCopy(data.start_class, (regnode_ssc*)RExC_rxi->data->data[n], regnode_ssc); ri->regstclass = (regnode*)RExC_rxi->data->data[n]; r->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */ DEBUG_COMPILE_r({ SV *sv = sv_newmortal(); regprop(r, sv, (regnode*)data.start_class, NULL, pRExC_state); Perl_re_printf( aTHX_ "synthetic stclass \"%s\".\n", SvPVX_const(sv));}); data.start_class = NULL; } /* A temporary algorithm prefers floated substr to fixed one of * same length to dig more info. */ i = (longest_length[0] <= longest_length[1]); r->substrs->check_ix = i; r->check_end_shift = r->substrs->data[i].end_shift; r->check_substr = r->substrs->data[i].substr; r->check_utf8 = r->substrs->data[i].utf8_substr; r->check_offset_min = r->substrs->data[i].min_offset; r->check_offset_max = r->substrs->data[i].max_offset; if (!i && (r->intflags & (PREGf_ANCH_SBOL|PREGf_ANCH_GPOS))) r->intflags |= PREGf_NOSCAN; if ((r->check_substr || r->check_utf8) ) { r->extflags |= RXf_USE_INTUIT; if (SvTAIL(r->check_substr ? r->check_substr : r->check_utf8)) r->extflags |= RXf_INTUIT_TAIL; } /* XXX Unneeded? dmq (shouldn't as this is handled elsewhere) if ( (STRLEN)minlen < longest_length[1] ) minlen= longest_length[1]; if ( (STRLEN)minlen < longest_length[0] ) minlen= longest_length[0]; */ } else { /* Several toplevels. Best we can is to set minlen. */ SSize_t fake; regnode_ssc ch_class; SSize_t last_close = 0; DEBUG_PARSE_r(Perl_re_printf( aTHX_ "\nMulti Top Level\n")); scan = ri->program + 1; ssc_init(pRExC_state, &ch_class); data.start_class = &ch_class; data.last_closep = &last_close; DEBUG_RExC_seen(); minlen = study_chunk(pRExC_state, &scan, &minlen, &fake, scan + RExC_size, &data, -1, 0, NULL, SCF_DO_STCLASS_AND|SCF_WHILEM_VISITED_POS|(restudied ? SCF_TRIE_DOING_RESTUDY : 0), 0); CHECK_RESTUDY_GOTO_butfirst(NOOP); r->check_substr = NULL; r->check_utf8 = NULL; r->substrs->data[0].substr = NULL; r->substrs->data[0].utf8_substr = NULL; r->substrs->data[1].substr = NULL; r->substrs->data[1].utf8_substr = NULL; if (! (ANYOF_FLAGS(data.start_class) & SSC_MATCHES_EMPTY_STRING) && is_ssc_worth_it(pRExC_state, data.start_class)) { const U32 n = add_data(pRExC_state, STR_WITH_LEN("f")); ssc_finalize(pRExC_state, data.start_class); Newx(RExC_rxi->data->data[n], 1, regnode_ssc); StructCopy(data.start_class, (regnode_ssc*)RExC_rxi->data->data[n], regnode_ssc); ri->regstclass = (regnode*)RExC_rxi->data->data[n]; r->intflags &= ~PREGf_SKIP; /* Used in find_byclass(). */ DEBUG_COMPILE_r({ SV* sv = sv_newmortal(); regprop(r, sv, (regnode*)data.start_class, NULL, pRExC_state); Perl_re_printf( aTHX_ "synthetic stclass \"%s\".\n", SvPVX_const(sv));}); data.start_class = NULL; } } if (RExC_seen & REG_UNBOUNDED_QUANTIFIER_SEEN) { r->extflags |= RXf_UNBOUNDED_QUANTIFIER_SEEN; r->maxlen = REG_INFTY; } else { r->maxlen = RExC_maxlen; } /* Guard against an embedded (?=) or (?<=) with a longer minlen than the "real" pattern. */ DEBUG_OPTIMISE_r({ Perl_re_printf( aTHX_ "minlen: %" IVdf " r->minlen:%" IVdf " maxlen:%" IVdf "\n", (IV)minlen, (IV)r->minlen, (IV)RExC_maxlen); }); r->minlenret = minlen; if (r->minlen < minlen) r->minlen = minlen; if (RExC_seen & REG_RECURSE_SEEN ) { r->intflags |= PREGf_RECURSE_SEEN; Newxz(r->recurse_locinput, r->nparens + 1, char *); } if (RExC_seen & REG_GPOS_SEEN) r->intflags |= PREGf_GPOS_SEEN; if (RExC_seen & REG_LOOKBEHIND_SEEN) r->extflags |= RXf_NO_INPLACE_SUBST; /* inplace might break the lookbehind */ if (pRExC_state->code_blocks) r->extflags |= RXf_EVAL_SEEN; if (RExC_seen & REG_VERBARG_SEEN) { r->intflags |= PREGf_VERBARG_SEEN; r->extflags |= RXf_NO_INPLACE_SUBST; /* don't understand this! Yves */ } if (RExC_seen & REG_CUTGROUP_SEEN) r->intflags |= PREGf_CUTGROUP_SEEN; if (pm_flags & PMf_USE_RE_EVAL) r->intflags |= PREGf_USE_RE_EVAL; if (RExC_paren_names) RXp_PAREN_NAMES(r) = MUTABLE_HV(SvREFCNT_inc(RExC_paren_names)); else RXp_PAREN_NAMES(r) = NULL; /* If we have seen an anchor in our pattern then we set the extflag RXf_IS_ANCHORED * so it can be used in pp.c */ if (r->intflags & PREGf_ANCH) r->extflags |= RXf_IS_ANCHORED; { /* this is used to identify "special" patterns that might result * in Perl NOT calling the regex engine and instead doing the match "itself", * particularly special cases in split//. By having the regex compiler * do this pattern matching at a regop level (instead of by inspecting the pattern) * we avoid weird issues with equivalent patterns resulting in different behavior, * AND we allow non Perl engines to get the same optimizations by the setting the * flags appropriately - Yves */ regnode *first = ri->program + 1; U8 fop = OP(first); regnode *next = regnext(first); U8 nop = OP(next); if (PL_regkind[fop] == NOTHING && nop == END) r->extflags |= RXf_NULL; else if ((fop == MBOL || (fop == SBOL && !first->flags)) && nop == END) /* when fop is SBOL first->flags will be true only when it was * produced by parsing /\A/, and not when parsing /^/. This is * very important for the split code as there we want to * treat /^/ as /^/m, but we do not want to treat /\A/ as /^/m. * See rt #122761 for more details. -- Yves */ r->extflags |= RXf_START_ONLY; else if (fop == PLUS && PL_regkind[nop] == POSIXD && FLAGS(next) == _CC_SPACE && nop == END) r->extflags |= RXf_WHITE; else if ( r->extflags & RXf_SPLIT && (fop == EXACT || fop == EXACTL) && STR_LEN(first) == 1 && *(STRING(first)) == ' ' && nop == END ) r->extflags |= (RXf_SKIPWHITE|RXf_WHITE); } if (RExC_contains_locale) { RXp_EXTFLAGS(r) |= RXf_TAINTED; } #ifdef DEBUGGING if (RExC_paren_names) { ri->name_list_idx = add_data( pRExC_state, STR_WITH_LEN("a")); ri->data->data[ri->name_list_idx] = (void*)SvREFCNT_inc(RExC_paren_name_list); } else #endif ri->name_list_idx = 0; while ( RExC_recurse_count > 0 ) { const regnode *scan = RExC_recurse[ --RExC_recurse_count ]; /* * This data structure is set up in study_chunk() and is used * to calculate the distance between a GOSUB regopcode and * the OPEN/CURLYM (CURLYM's are special and can act like OPEN's) * it refers to. * * If for some reason someone writes code that optimises * away a GOSUB opcode then the assert should be changed to * an if(scan) to guard the ARG2L_SET() - Yves * */ assert(scan && OP(scan) == GOSUB); ARG2L_SET( scan, RExC_open_parens[ARG(scan)] - scan ); } Newxz(r->offs, RExC_npar, regexp_paren_pair); /* assume we don't need to swap parens around before we match */ DEBUG_TEST_r({ Perl_re_printf( aTHX_ "study_chunk_recursed_count: %lu\n", (unsigned long)RExC_study_chunk_recursed_count); }); DEBUG_DUMP_r({ DEBUG_RExC_seen(); Perl_re_printf( aTHX_ "Final program:\n"); regdump(r); }); #ifdef RE_TRACK_PATTERN_OFFSETS DEBUG_OFFSETS_r(if (ri->u.offsets) { const STRLEN len = ri->u.offsets[0]; STRLEN i; GET_RE_DEBUG_FLAGS_DECL; Perl_re_printf( aTHX_ "Offsets: [%" UVuf "]\n\t", (UV)ri->u.offsets[0]); for (i = 1; i <= len; i++) { if (ri->u.offsets[i*2-1] || ri->u.offsets[i*2]) Perl_re_printf( aTHX_ "%" UVuf ":%" UVuf "[%" UVuf "] ", (UV)i, (UV)ri->u.offsets[i*2-1], (UV)ri->u.offsets[i*2]); } Perl_re_printf( aTHX_ "\n"); }); #endif #ifdef USE_ITHREADS /* under ithreads the ?pat? PMf_USED flag on the pmop is simulated * by setting the regexp SV to readonly-only instead. If the * pattern's been recompiled, the USEDness should remain. */ if (old_re && SvREADONLY(old_re)) SvREADONLY_on(rx); #endif return rx; } SV* Perl_reg_named_buff(pTHX_ REGEXP * const rx, SV * const key, SV * const value, const U32 flags) { PERL_ARGS_ASSERT_REG_NAMED_BUFF; PERL_UNUSED_ARG(value); if (flags & RXapif_FETCH) { return reg_named_buff_fetch(rx, key, flags); } else if (flags & (RXapif_STORE | RXapif_DELETE | RXapif_CLEAR)) { Perl_croak_no_modify(); return NULL; } else if (flags & RXapif_EXISTS) { return reg_named_buff_exists(rx, key, flags) ? &PL_sv_yes : &PL_sv_no; } else if (flags & RXapif_REGNAMES) { return reg_named_buff_all(rx, flags); } else if (flags & (RXapif_SCALAR | RXapif_REGNAMES_COUNT)) { return reg_named_buff_scalar(rx, flags); } else { Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff", (int)flags); return NULL; } } SV* Perl_reg_named_buff_iter(pTHX_ REGEXP * const rx, const SV * const lastkey, const U32 flags) { PERL_ARGS_ASSERT_REG_NAMED_BUFF_ITER; PERL_UNUSED_ARG(lastkey); if (flags & RXapif_FIRSTKEY) return reg_named_buff_firstkey(rx, flags); else if (flags & RXapif_NEXTKEY) return reg_named_buff_nextkey(rx, flags); else { Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_iter", (int)flags); return NULL; } } SV* Perl_reg_named_buff_fetch(pTHX_ REGEXP * const r, SV * const namesv, const U32 flags) { SV *ret; struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_FETCH; if (rx && RXp_PAREN_NAMES(rx)) { HE *he_str = hv_fetch_ent( RXp_PAREN_NAMES(rx), namesv, 0, 0 ); if (he_str) { IV i; SV* sv_dat=HeVAL(he_str); I32 *nums=(I32*)SvPVX(sv_dat); AV * const retarray = (flags & RXapif_ALL) ? newAV() : NULL; for ( i=0; i<SvIVX(sv_dat); i++ ) { if ((I32)(rx->nparens) >= nums[i] && rx->offs[nums[i]].start != -1 && rx->offs[nums[i]].end != -1) { ret = newSVpvs(""); CALLREG_NUMBUF_FETCH(r,nums[i],ret); if (!retarray) return ret; } else { if (retarray) ret = newSVsv(&PL_sv_undef); } if (retarray) av_push(retarray, ret); } if (retarray) return newRV_noinc(MUTABLE_SV(retarray)); } } return NULL; } bool Perl_reg_named_buff_exists(pTHX_ REGEXP * const r, SV * const key, const U32 flags) { struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_EXISTS; if (rx && RXp_PAREN_NAMES(rx)) { if (flags & RXapif_ALL) { return hv_exists_ent(RXp_PAREN_NAMES(rx), key, 0); } else { SV *sv = CALLREG_NAMED_BUFF_FETCH(r, key, flags); if (sv) { SvREFCNT_dec_NN(sv); return TRUE; } else { return FALSE; } } } else { return FALSE; } } SV* Perl_reg_named_buff_firstkey(pTHX_ REGEXP * const r, const U32 flags) { struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_FIRSTKEY; if ( rx && RXp_PAREN_NAMES(rx) ) { (void)hv_iterinit(RXp_PAREN_NAMES(rx)); return CALLREG_NAMED_BUFF_NEXTKEY(r, NULL, flags & ~RXapif_FIRSTKEY); } else { return FALSE; } } SV* Perl_reg_named_buff_nextkey(pTHX_ REGEXP * const r, const U32 flags) { struct regexp *const rx = ReANY(r); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REG_NAMED_BUFF_NEXTKEY; if (rx && RXp_PAREN_NAMES(rx)) { HV *hv = RXp_PAREN_NAMES(rx); HE *temphe; while ( (temphe = hv_iternext_flags(hv,0)) ) { IV i; IV parno = 0; SV* sv_dat = HeVAL(temphe); I32 *nums = (I32*)SvPVX(sv_dat); for ( i = 0; i < SvIVX(sv_dat); i++ ) { if ((I32)(rx->lastparen) >= nums[i] && rx->offs[nums[i]].start != -1 && rx->offs[nums[i]].end != -1) { parno = nums[i]; break; } } if (parno || flags & RXapif_ALL) { return newSVhek(HeKEY_hek(temphe)); } } } return NULL; } SV* Perl_reg_named_buff_scalar(pTHX_ REGEXP * const r, const U32 flags) { SV *ret; AV *av; SSize_t length; struct regexp *const rx = ReANY(r); PERL_ARGS_ASSERT_REG_NAMED_BUFF_SCALAR; if (rx && RXp_PAREN_NAMES(rx)) { if (flags & (RXapif_ALL | RXapif_REGNAMES_COUNT)) { return newSViv(HvTOTALKEYS(RXp_PAREN_NAMES(rx))); } else if (flags & RXapif_ONE) { ret = CALLREG_NAMED_BUFF_ALL(r, (flags | RXapif_REGNAMES)); av = MUTABLE_AV(SvRV(ret)); length = av_tindex(av); SvREFCNT_dec_NN(ret); return newSViv(length + 1); } else { Perl_croak(aTHX_ "panic: Unknown flags %d in named_buff_scalar", (int)flags); return NULL; } } return &PL_sv_undef; } SV* Perl_reg_named_buff_all(pTHX_ REGEXP * const r, const U32 flags) { struct regexp *const rx = ReANY(r); AV *av = newAV(); PERL_ARGS_ASSERT_REG_NAMED_BUFF_ALL; if (rx && RXp_PAREN_NAMES(rx)) { HV *hv= RXp_PAREN_NAMES(rx); HE *temphe; (void)hv_iterinit(hv); while ( (temphe = hv_iternext_flags(hv,0)) ) { IV i; IV parno = 0; SV* sv_dat = HeVAL(temphe); I32 *nums = (I32*)SvPVX(sv_dat); for ( i = 0; i < SvIVX(sv_dat); i++ ) { if ((I32)(rx->lastparen) >= nums[i] && rx->offs[nums[i]].start != -1 && rx->offs[nums[i]].end != -1) { parno = nums[i]; break; } } if (parno || flags & RXapif_ALL) { av_push(av, newSVhek(HeKEY_hek(temphe))); } } } return newRV_noinc(MUTABLE_SV(av)); } void Perl_reg_numbered_buff_fetch(pTHX_ REGEXP * const r, const I32 paren, SV * const sv) { struct regexp *const rx = ReANY(r); char *s = NULL; SSize_t i = 0; SSize_t s1, t1; I32 n = paren; PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_FETCH; if ( n == RX_BUFF_IDX_CARET_PREMATCH || n == RX_BUFF_IDX_CARET_FULLMATCH || n == RX_BUFF_IDX_CARET_POSTMATCH ) { bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY); if (!keepcopy) { /* on something like * $r = qr/.../; * /$qr/p; * the KEEPCOPY is set on the PMOP rather than the regex */ if (PL_curpm && r == PM_GETRE(PL_curpm)) keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY); } if (!keepcopy) goto ret_undef; } if (!rx->subbeg) goto ret_undef; if (n == RX_BUFF_IDX_CARET_FULLMATCH) /* no need to distinguish between them any more */ n = RX_BUFF_IDX_FULLMATCH; if ((n == RX_BUFF_IDX_PREMATCH || n == RX_BUFF_IDX_CARET_PREMATCH) && rx->offs[0].start != -1) { /* $`, ${^PREMATCH} */ i = rx->offs[0].start; s = rx->subbeg; } else if ((n == RX_BUFF_IDX_POSTMATCH || n == RX_BUFF_IDX_CARET_POSTMATCH) && rx->offs[0].end != -1) { /* $', ${^POSTMATCH} */ s = rx->subbeg - rx->suboffset + rx->offs[0].end; i = rx->sublen + rx->suboffset - rx->offs[0].end; } else if ( 0 <= n && n <= (I32)rx->nparens && (s1 = rx->offs[n].start) != -1 && (t1 = rx->offs[n].end) != -1) { /* $&, ${^MATCH}, $1 ... */ i = t1 - s1; s = rx->subbeg + s1 - rx->suboffset; } else { goto ret_undef; } assert(s >= rx->subbeg); assert((STRLEN)rx->sublen >= (STRLEN)((s - rx->subbeg) + i) ); if (i >= 0) { #ifdef NO_TAINT_SUPPORT sv_setpvn(sv, s, i); #else const int oldtainted = TAINT_get; TAINT_NOT; sv_setpvn(sv, s, i); TAINT_set(oldtainted); #endif if (RXp_MATCH_UTF8(rx)) SvUTF8_on(sv); else SvUTF8_off(sv); if (TAINTING_get) { if (RXp_MATCH_TAINTED(rx)) { if (SvTYPE(sv) >= SVt_PVMG) { MAGIC* const mg = SvMAGIC(sv); MAGIC* mgt; TAINT; SvMAGIC_set(sv, mg->mg_moremagic); SvTAINT(sv); if ((mgt = SvMAGIC(sv))) { mg->mg_moremagic = mgt; SvMAGIC_set(sv, mg); } } else { TAINT; SvTAINT(sv); } } else SvTAINTED_off(sv); } } else { ret_undef: sv_set_undef(sv); return; } } void Perl_reg_numbered_buff_store(pTHX_ REGEXP * const rx, const I32 paren, SV const * const value) { PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_STORE; PERL_UNUSED_ARG(rx); PERL_UNUSED_ARG(paren); PERL_UNUSED_ARG(value); if (!PL_localizing) Perl_croak_no_modify(); } I32 Perl_reg_numbered_buff_length(pTHX_ REGEXP * const r, const SV * const sv, const I32 paren) { struct regexp *const rx = ReANY(r); I32 i; I32 s1, t1; PERL_ARGS_ASSERT_REG_NUMBERED_BUFF_LENGTH; if ( paren == RX_BUFF_IDX_CARET_PREMATCH || paren == RX_BUFF_IDX_CARET_FULLMATCH || paren == RX_BUFF_IDX_CARET_POSTMATCH ) { bool keepcopy = cBOOL(rx->extflags & RXf_PMf_KEEPCOPY); if (!keepcopy) { /* on something like * $r = qr/.../; * /$qr/p; * the KEEPCOPY is set on the PMOP rather than the regex */ if (PL_curpm && r == PM_GETRE(PL_curpm)) keepcopy = cBOOL(PL_curpm->op_pmflags & PMf_KEEPCOPY); } if (!keepcopy) goto warn_undef; } /* Some of this code was originally in C<Perl_magic_len> in F<mg.c> */ switch (paren) { case RX_BUFF_IDX_CARET_PREMATCH: /* ${^PREMATCH} */ case RX_BUFF_IDX_PREMATCH: /* $` */ if (rx->offs[0].start != -1) { i = rx->offs[0].start; if (i > 0) { s1 = 0; t1 = i; goto getlen; } } return 0; case RX_BUFF_IDX_CARET_POSTMATCH: /* ${^POSTMATCH} */ case RX_BUFF_IDX_POSTMATCH: /* $' */ if (rx->offs[0].end != -1) { i = rx->sublen - rx->offs[0].end; if (i > 0) { s1 = rx->offs[0].end; t1 = rx->sublen; goto getlen; } } return 0; default: /* $& / ${^MATCH}, $1, $2, ... */ if (paren <= (I32)rx->nparens && (s1 = rx->offs[paren].start) != -1 && (t1 = rx->offs[paren].end) != -1) { i = t1 - s1; goto getlen; } else { warn_undef: if (ckWARN(WARN_UNINITIALIZED)) report_uninit((const SV *)sv); return 0; } } getlen: if (i > 0 && RXp_MATCH_UTF8(rx)) { const char * const s = rx->subbeg - rx->suboffset + s1; const U8 *ep; STRLEN el; i = t1 - s1; if (is_utf8_string_loclen((U8*)s, i, &ep, &el)) i = el; } return i; } SV* Perl_reg_qr_package(pTHX_ REGEXP * const rx) { PERL_ARGS_ASSERT_REG_QR_PACKAGE; PERL_UNUSED_ARG(rx); if (0) return NULL; else return newSVpvs("Regexp"); } /* Scans the name of a named buffer from the pattern. * If flags is REG_RSN_RETURN_NULL returns null. * If flags is REG_RSN_RETURN_NAME returns an SV* containing the name * If flags is REG_RSN_RETURN_DATA returns the data SV* corresponding * to the parsed name as looked up in the RExC_paren_names hash. * If there is an error throws a vFAIL().. type exception. */ #define REG_RSN_RETURN_NULL 0 #define REG_RSN_RETURN_NAME 1 #define REG_RSN_RETURN_DATA 2 STATIC SV* S_reg_scan_name(pTHX_ RExC_state_t *pRExC_state, U32 flags) { char *name_start = RExC_parse; PERL_ARGS_ASSERT_REG_SCAN_NAME; assert (RExC_parse <= RExC_end); if (RExC_parse == RExC_end) NOOP; else if (isIDFIRST_lazy_if_safe(RExC_parse, RExC_end, UTF)) { /* Note that the code here assumes well-formed UTF-8. Skip IDFIRST by * using do...while */ if (UTF) do { RExC_parse += UTF8SKIP(RExC_parse); } while ( RExC_parse < RExC_end && isWORDCHAR_utf8_safe((U8*)RExC_parse, (U8*) RExC_end)); else do { RExC_parse++; } while (RExC_parse < RExC_end && isWORDCHAR(*RExC_parse)); } else { RExC_parse++; /* so the <- from the vFAIL is after the offending character */ vFAIL("Group name must start with a non-digit word character"); } if ( flags ) { SV* sv_name = newSVpvn_flags(name_start, (int)(RExC_parse - name_start), SVs_TEMP | (UTF ? SVf_UTF8 : 0)); if ( flags == REG_RSN_RETURN_NAME) return sv_name; else if (flags==REG_RSN_RETURN_DATA) { HE *he_str = NULL; SV *sv_dat = NULL; if ( ! sv_name ) /* should not happen*/ Perl_croak(aTHX_ "panic: no svname in reg_scan_name"); if (RExC_paren_names) he_str = hv_fetch_ent( RExC_paren_names, sv_name, 0, 0 ); if ( he_str ) sv_dat = HeVAL(he_str); if ( ! sv_dat ) vFAIL("Reference to nonexistent named group"); return sv_dat; } else { Perl_croak(aTHX_ "panic: bad flag %lx in reg_scan_name", (unsigned long) flags); } NOT_REACHED; /* NOTREACHED */ } return NULL; } #define DEBUG_PARSE_MSG(funcname) DEBUG_PARSE_r({ \ int num; \ if (RExC_lastparse!=RExC_parse) { \ Perl_re_printf( aTHX_ "%s", \ Perl_pv_pretty(aTHX_ RExC_mysv1, RExC_parse, \ RExC_end - RExC_parse, 16, \ "", "", \ PERL_PV_ESCAPE_UNI_DETECT | \ PERL_PV_PRETTY_ELLIPSES | \ PERL_PV_PRETTY_LTGT | \ PERL_PV_ESCAPE_RE | \ PERL_PV_PRETTY_EXACTSIZE \ ) \ ); \ } else \ Perl_re_printf( aTHX_ "%16s",""); \ \ if (SIZE_ONLY) \ num = RExC_size + 1; \ else \ num=REG_NODE_NUM(RExC_emit); \ if (RExC_lastnum!=num) \ Perl_re_printf( aTHX_ "|%4d",num); \ else \ Perl_re_printf( aTHX_ "|%4s",""); \ Perl_re_printf( aTHX_ "|%*s%-4s", \ (int)((depth*2)), "", \ (funcname) \ ); \ RExC_lastnum=num; \ RExC_lastparse=RExC_parse; \ }) #define DEBUG_PARSE(funcname) DEBUG_PARSE_r({ \ DEBUG_PARSE_MSG((funcname)); \ Perl_re_printf( aTHX_ "%4s","\n"); \ }) #define DEBUG_PARSE_FMT(funcname,fmt,args) DEBUG_PARSE_r({\ DEBUG_PARSE_MSG((funcname)); \ Perl_re_printf( aTHX_ fmt "\n",args); \ }) /* This section of code defines the inversion list object and its methods. The * interfaces are highly subject to change, so as much as possible is static to * this file. An inversion list is here implemented as a malloc'd C UV array * as an SVt_INVLIST scalar. * * An inversion list for Unicode is an array of code points, sorted by ordinal * number. Each element gives the code point that begins a range that extends * up-to but not including the code point given by the next element. The final * element gives the first code point of a range that extends to the platform's * infinity. The even-numbered elements (invlist[0], invlist[2], invlist[4], * ...) give ranges whose code points are all in the inversion list. We say * that those ranges are in the set. The odd-numbered elements give ranges * whose code points are not in the inversion list, and hence not in the set. * Thus, element [0] is the first code point in the list. Element [1] * is the first code point beyond that not in the list; and element [2] is the * first code point beyond that that is in the list. In other words, the first * range is invlist[0]..(invlist[1]-1), and all code points in that range are * in the inversion list. The second range is invlist[1]..(invlist[2]-1), and * all code points in that range are not in the inversion list. The third * range invlist[2]..(invlist[3]-1) gives code points that are in the inversion * list, and so forth. Thus every element whose index is divisible by two * gives the beginning of a range that is in the list, and every element whose * index is not divisible by two gives the beginning of a range not in the * list. If the final element's index is divisible by two, the inversion list * extends to the platform's infinity; otherwise the highest code point in the * inversion list is the contents of that element minus 1. * * A range that contains just a single code point N will look like * invlist[i] == N * invlist[i+1] == N+1 * * If N is UV_MAX (the highest representable code point on the machine), N+1 is * impossible to represent, so element [i+1] is omitted. The single element * inversion list * invlist[0] == UV_MAX * contains just UV_MAX, but is interpreted as matching to infinity. * * Taking the complement (inverting) an inversion list is quite simple, if the * first element is 0, remove it; otherwise add a 0 element at the beginning. * This implementation reserves an element at the beginning of each inversion * list to always contain 0; there is an additional flag in the header which * indicates if the list begins at the 0, or is offset to begin at the next * element. This means that the inversion list can be inverted without any * copying; just flip the flag. * * More about inversion lists can be found in "Unicode Demystified" * Chapter 13 by Richard Gillam, published by Addison-Wesley. * * The inversion list data structure is currently implemented as an SV pointing * to an array of UVs that the SV thinks are bytes. This allows us to have an * array of UV whose memory management is automatically handled by the existing * facilities for SV's. * * Some of the methods should always be private to the implementation, and some * should eventually be made public */ /* The header definitions are in F<invlist_inline.h> */ #ifndef PERL_IN_XSUB_RE PERL_STATIC_INLINE UV* S__invlist_array_init(SV* const invlist, const bool will_have_0) { /* Returns a pointer to the first element in the inversion list's array. * This is called upon initialization of an inversion list. Where the * array begins depends on whether the list has the code point U+0000 in it * or not. The other parameter tells it whether the code that follows this * call is about to put a 0 in the inversion list or not. The first * element is either the element reserved for 0, if TRUE, or the element * after it, if FALSE */ bool* offset = get_invlist_offset_addr(invlist); UV* zero_addr = (UV *) SvPVX(invlist); PERL_ARGS_ASSERT__INVLIST_ARRAY_INIT; /* Must be empty */ assert(! _invlist_len(invlist)); *zero_addr = 0; /* 1^1 = 0; 1^0 = 1 */ *offset = 1 ^ will_have_0; return zero_addr + *offset; } #endif PERL_STATIC_INLINE void S_invlist_set_len(pTHX_ SV* const invlist, const UV len, const bool offset) { /* Sets the current number of elements stored in the inversion list. * Updates SvCUR correspondingly */ PERL_UNUSED_CONTEXT; PERL_ARGS_ASSERT_INVLIST_SET_LEN; assert(SvTYPE(invlist) == SVt_INVLIST); SvCUR_set(invlist, (len == 0) ? 0 : TO_INTERNAL_SIZE(len + offset)); assert(SvLEN(invlist) == 0 || SvCUR(invlist) <= SvLEN(invlist)); } #ifndef PERL_IN_XSUB_RE STATIC void S_invlist_replace_list_destroys_src(pTHX_ SV * dest, SV * src) { /* Replaces the inversion list in 'dest' with the one from 'src'. It * steals the list from 'src', so 'src' is made to have a NULL list. This * is similar to what SvSetMagicSV() would do, if it were implemented on * inversion lists, though this routine avoids a copy */ const UV src_len = _invlist_len(src); const bool src_offset = *get_invlist_offset_addr(src); const STRLEN src_byte_len = SvLEN(src); char * array = SvPVX(src); const int oldtainted = TAINT_get; PERL_ARGS_ASSERT_INVLIST_REPLACE_LIST_DESTROYS_SRC; assert(SvTYPE(src) == SVt_INVLIST); assert(SvTYPE(dest) == SVt_INVLIST); assert(! invlist_is_iterating(src)); assert(SvCUR(src) == 0 || SvCUR(src) < SvLEN(src)); /* Make sure it ends in the right place with a NUL, as our inversion list * manipulations aren't careful to keep this true, but sv_usepvn_flags() * asserts it */ array[src_byte_len - 1] = '\0'; TAINT_NOT; /* Otherwise it breaks */ sv_usepvn_flags(dest, (char *) array, src_byte_len - 1, /* This flag is documented to cause a copy to be avoided */ SV_HAS_TRAILING_NUL); TAINT_set(oldtainted); SvPV_set(src, 0); SvLEN_set(src, 0); SvCUR_set(src, 0); /* Finish up copying over the other fields in an inversion list */ *get_invlist_offset_addr(dest) = src_offset; invlist_set_len(dest, src_len, src_offset); *get_invlist_previous_index_addr(dest) = 0; invlist_iterfinish(dest); } PERL_STATIC_INLINE IV* S_get_invlist_previous_index_addr(SV* invlist) { /* Return the address of the IV that is reserved to hold the cached index * */ PERL_ARGS_ASSERT_GET_INVLIST_PREVIOUS_INDEX_ADDR; assert(SvTYPE(invlist) == SVt_INVLIST); return &(((XINVLIST*) SvANY(invlist))->prev_index); } PERL_STATIC_INLINE IV S_invlist_previous_index(SV* const invlist) { /* Returns cached index of previous search */ PERL_ARGS_ASSERT_INVLIST_PREVIOUS_INDEX; return *get_invlist_previous_index_addr(invlist); } PERL_STATIC_INLINE void S_invlist_set_previous_index(SV* const invlist, const IV index) { /* Caches <index> for later retrieval */ PERL_ARGS_ASSERT_INVLIST_SET_PREVIOUS_INDEX; assert(index == 0 || index < (int) _invlist_len(invlist)); *get_invlist_previous_index_addr(invlist) = index; } PERL_STATIC_INLINE void S_invlist_trim(SV* invlist) { /* Free the not currently-being-used space in an inversion list */ /* But don't free up the space needed for the 0 UV that is always at the * beginning of the list, nor the trailing NUL */ const UV min_size = TO_INTERNAL_SIZE(1) + 1; PERL_ARGS_ASSERT_INVLIST_TRIM; assert(SvTYPE(invlist) == SVt_INVLIST); SvPV_renew(invlist, MAX(min_size, SvCUR(invlist) + 1)); } PERL_STATIC_INLINE void S_invlist_clear(pTHX_ SV* invlist) /* Empty the inversion list */ { PERL_ARGS_ASSERT_INVLIST_CLEAR; assert(SvTYPE(invlist) == SVt_INVLIST); invlist_set_len(invlist, 0, 0); invlist_trim(invlist); } #endif /* ifndef PERL_IN_XSUB_RE */ PERL_STATIC_INLINE bool S_invlist_is_iterating(SV* const invlist) { PERL_ARGS_ASSERT_INVLIST_IS_ITERATING; return *(get_invlist_iter_addr(invlist)) < (STRLEN) UV_MAX; } #ifndef PERL_IN_XSUB_RE PERL_STATIC_INLINE UV S_invlist_max(SV* const invlist) { /* Returns the maximum number of elements storable in the inversion list's * array, without having to realloc() */ PERL_ARGS_ASSERT_INVLIST_MAX; assert(SvTYPE(invlist) == SVt_INVLIST); /* Assumes worst case, in which the 0 element is not counted in the * inversion list, so subtracts 1 for that */ return SvLEN(invlist) == 0 /* This happens under _new_invlist_C_array */ ? FROM_INTERNAL_SIZE(SvCUR(invlist)) - 1 : FROM_INTERNAL_SIZE(SvLEN(invlist)) - 1; } SV* Perl__new_invlist(pTHX_ IV initial_size) { /* Return a pointer to a newly constructed inversion list, with enough * space to store 'initial_size' elements. If that number is negative, a * system default is used instead */ SV* new_list; if (initial_size < 0) { initial_size = 10; } /* Allocate the initial space */ new_list = newSV_type(SVt_INVLIST); /* First 1 is in case the zero element isn't in the list; second 1 is for * trailing NUL */ SvGROW(new_list, TO_INTERNAL_SIZE(initial_size + 1) + 1); invlist_set_len(new_list, 0, 0); /* Force iterinit() to be used to get iteration to work */ *get_invlist_iter_addr(new_list) = (STRLEN) UV_MAX; *get_invlist_previous_index_addr(new_list) = 0; return new_list; } SV* Perl__new_invlist_C_array(pTHX_ const UV* const list) { /* Return a pointer to a newly constructed inversion list, initialized to * point to <list>, which has to be in the exact correct inversion list * form, including internal fields. Thus this is a dangerous routine that * should not be used in the wrong hands. The passed in 'list' contains * several header fields at the beginning that are not part of the * inversion list body proper */ const STRLEN length = (STRLEN) list[0]; const UV version_id = list[1]; const bool offset = cBOOL(list[2]); #define HEADER_LENGTH 3 /* If any of the above changes in any way, you must change HEADER_LENGTH * (if appropriate) and regenerate INVLIST_VERSION_ID by running * perl -E 'say int(rand 2**31-1)' */ #define INVLIST_VERSION_ID 148565664 /* This is a combination of a version and data structure type, so that one being passed in can be validated to be an inversion list of the correct vintage. */ SV* invlist = newSV_type(SVt_INVLIST); PERL_ARGS_ASSERT__NEW_INVLIST_C_ARRAY; if (version_id != INVLIST_VERSION_ID) { Perl_croak(aTHX_ "panic: Incorrect version for previously generated inversion list"); } /* The generated array passed in includes header elements that aren't part * of the list proper, so start it just after them */ SvPV_set(invlist, (char *) (list + HEADER_LENGTH)); SvLEN_set(invlist, 0); /* Means we own the contents, and the system shouldn't touch it */ *(get_invlist_offset_addr(invlist)) = offset; /* The 'length' passed to us is the physical number of elements in the * inversion list. But if there is an offset the logical number is one * less than that */ invlist_set_len(invlist, length - offset, offset); invlist_set_previous_index(invlist, 0); /* Initialize the iteration pointer. */ invlist_iterfinish(invlist); SvREADONLY_on(invlist); return invlist; } STATIC void S_invlist_extend(pTHX_ SV* const invlist, const UV new_max) { /* Grow the maximum size of an inversion list */ PERL_ARGS_ASSERT_INVLIST_EXTEND; assert(SvTYPE(invlist) == SVt_INVLIST); /* Add one to account for the zero element at the beginning which may not * be counted by the calling parameters */ SvGROW((SV *)invlist, TO_INTERNAL_SIZE(new_max + 1)); } STATIC void S__append_range_to_invlist(pTHX_ SV* const invlist, const UV start, const UV end) { /* Subject to change or removal. Append the range from 'start' to 'end' at * the end of the inversion list. The range must be above any existing * ones. */ UV* array; UV max = invlist_max(invlist); UV len = _invlist_len(invlist); bool offset; PERL_ARGS_ASSERT__APPEND_RANGE_TO_INVLIST; if (len == 0) { /* Empty lists must be initialized */ offset = start != 0; array = _invlist_array_init(invlist, ! offset); } else { /* Here, the existing list is non-empty. The current max entry in the * list is generally the first value not in the set, except when the * set extends to the end of permissible values, in which case it is * the first entry in that final set, and so this call is an attempt to * append out-of-order */ UV final_element = len - 1; array = invlist_array(invlist); if ( array[final_element] > start || ELEMENT_RANGE_MATCHES_INVLIST(final_element)) { Perl_croak(aTHX_ "panic: attempting to append to an inversion list, but wasn't at the end of the list, final=%" UVuf ", start=%" UVuf ", match=%c", array[final_element], start, ELEMENT_RANGE_MATCHES_INVLIST(final_element) ? 't' : 'f'); } /* Here, it is a legal append. If the new range begins 1 above the end * of the range below it, it is extending the range below it, so the * new first value not in the set is one greater than the newly * extended range. */ offset = *get_invlist_offset_addr(invlist); if (array[final_element] == start) { if (end != UV_MAX) { array[final_element] = end + 1; } else { /* But if the end is the maximum representable on the machine, * assume that infinity was actually what was meant. Just let * the range that this would extend to have no end */ invlist_set_len(invlist, len - 1, offset); } return; } } /* Here the new range doesn't extend any existing set. Add it */ len += 2; /* Includes an element each for the start and end of range */ /* If wll overflow the existing space, extend, which may cause the array to * be moved */ if (max < len) { invlist_extend(invlist, len); /* Have to set len here to avoid assert failure in invlist_array() */ invlist_set_len(invlist, len, offset); array = invlist_array(invlist); } else { invlist_set_len(invlist, len, offset); } /* The next item on the list starts the range, the one after that is * one past the new range. */ array[len - 2] = start; if (end != UV_MAX) { array[len - 1] = end + 1; } else { /* But if the end is the maximum representable on the machine, just let * the range have no end */ invlist_set_len(invlist, len - 1, offset); } } SSize_t Perl__invlist_search(SV* const invlist, const UV cp) { /* Searches the inversion list for the entry that contains the input code * point <cp>. If <cp> is not in the list, -1 is returned. Otherwise, the * return value is the index into the list's array of the range that * contains <cp>, that is, 'i' such that * array[i] <= cp < array[i+1] */ IV low = 0; IV mid; IV high = _invlist_len(invlist); const IV highest_element = high - 1; const UV* array; PERL_ARGS_ASSERT__INVLIST_SEARCH; /* If list is empty, return failure. */ if (high == 0) { return -1; } /* (We can't get the array unless we know the list is non-empty) */ array = invlist_array(invlist); mid = invlist_previous_index(invlist); assert(mid >=0); if (mid > highest_element) { mid = highest_element; } /* <mid> contains the cache of the result of the previous call to this * function (0 the first time). See if this call is for the same result, * or if it is for mid-1. This is under the theory that calls to this * function will often be for related code points that are near each other. * And benchmarks show that caching gives better results. We also test * here if the code point is within the bounds of the list. These tests * replace others that would have had to be made anyway to make sure that * the array bounds were not exceeded, and these give us extra information * at the same time */ if (cp >= array[mid]) { if (cp >= array[highest_element]) { return highest_element; } /* Here, array[mid] <= cp < array[highest_element]. This means that * the final element is not the answer, so can exclude it; it also * means that <mid> is not the final element, so can refer to 'mid + 1' * safely */ if (cp < array[mid + 1]) { return mid; } high--; low = mid + 1; } else { /* cp < aray[mid] */ if (cp < array[0]) { /* Fail if outside the array */ return -1; } high = mid; if (cp >= array[mid - 1]) { goto found_entry; } } /* Binary search. What we are looking for is <i> such that * array[i] <= cp < array[i+1] * The loop below converges on the i+1. Note that there may not be an * (i+1)th element in the array, and things work nonetheless */ while (low < high) { mid = (low + high) / 2; assert(mid <= highest_element); if (array[mid] <= cp) { /* cp >= array[mid] */ low = mid + 1; /* We could do this extra test to exit the loop early. if (cp < array[low]) { return mid; } */ } else { /* cp < array[mid] */ high = mid; } } found_entry: high--; invlist_set_previous_index(invlist, high); return high; } void Perl__invlist_populate_swatch(SV* const invlist, const UV start, const UV end, U8* swatch) { /* populates a swatch of a swash the same way swatch_get() does in utf8.c, * but is used when the swash has an inversion list. This makes this much * faster, as it uses a binary search instead of a linear one. This is * intimately tied to that function, and perhaps should be in utf8.c, * except it is intimately tied to inversion lists as well. It assumes * that <swatch> is all 0's on input */ UV current = start; const IV len = _invlist_len(invlist); IV i; const UV * array; PERL_ARGS_ASSERT__INVLIST_POPULATE_SWATCH; if (len == 0) { /* Empty inversion list */ return; } array = invlist_array(invlist); /* Find which element it is */ i = _invlist_search(invlist, start); /* We populate from <start> to <end> */ while (current < end) { UV upper; /* The inversion list gives the results for every possible code point * after the first one in the list. Only those ranges whose index is * even are ones that the inversion list matches. For the odd ones, * and if the initial code point is not in the list, we have to skip * forward to the next element */ if (i == -1 || ! ELEMENT_RANGE_MATCHES_INVLIST(i)) { i++; if (i >= len) { /* Finished if beyond the end of the array */ return; } current = array[i]; if (current >= end) { /* Finished if beyond the end of what we are populating */ if (LIKELY(end < UV_MAX)) { return; } /* We get here when the upper bound is the maximum * representable on the machine, and we are looking for just * that code point. Have to special case it */ i = len; goto join_end_of_list; } } assert(current >= start); /* The current range ends one below the next one, except don't go past * <end> */ i++; upper = (i < len && array[i] < end) ? array[i] : end; /* Here we are in a range that matches. Populate a bit in the 3-bit U8 * for each code point in it */ for (; current < upper; current++) { const STRLEN offset = (STRLEN)(current - start); swatch[offset >> 3] |= 1 << (offset & 7); } join_end_of_list: /* Quit if at the end of the list */ if (i >= len) { /* But first, have to deal with the highest possible code point on * the platform. The previous code assumes that <end> is one * beyond where we want to populate, but that is impossible at the * platform's infinity, so have to handle it specially */ if (UNLIKELY(end == UV_MAX && ELEMENT_RANGE_MATCHES_INVLIST(len-1))) { const STRLEN offset = (STRLEN)(end - start); swatch[offset >> 3] |= 1 << (offset & 7); } return; } /* Advance to the next range, which will be for code points not in the * inversion list */ current = array[i]; } return; } void Perl__invlist_union_maybe_complement_2nd(pTHX_ SV* const a, SV* const b, const bool complement_b, SV** output) { /* Take the union of two inversion lists and point '*output' to it. On * input, '*output' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly * even 'a' or 'b'). If to an inversion list, the contents of the original * list will be replaced by the union. The first list, 'a', may be * NULL, in which case a copy of the second list is placed in '*output'. * If 'complement_b' is TRUE, the union is taken of the complement * (inversion) of 'b' instead of b itself. * * The basis for this comes from "Unicode Demystified" Chapter 13 by * Richard Gillam, published by Addison-Wesley, and explained at some * length there. The preface says to incorporate its examples into your * code at your own risk. * * The algorithm is like a merge sort. */ const UV* array_a; /* a's array */ const UV* array_b; UV len_a; /* length of a's array */ UV len_b; SV* u; /* the resulting union */ UV* array_u; UV len_u = 0; UV i_a = 0; /* current index into a's array */ UV i_b = 0; UV i_u = 0; /* running count, as explained in the algorithm source book; items are * stopped accumulating and are output when the count changes to/from 0. * The count is incremented when we start a range that's in an input's set, * and decremented when we start a range that's not in a set. So this * variable can be 0, 1, or 2. When it is 0 neither input is in their set, * and hence nothing goes into the union; 1, just one of the inputs is in * its set (and its current range gets added to the union); and 2 when both * inputs are in their sets. */ UV count = 0; PERL_ARGS_ASSERT__INVLIST_UNION_MAYBE_COMPLEMENT_2ND; assert(a != b); assert(*output == NULL || SvTYPE(*output) == SVt_INVLIST); len_b = _invlist_len(b); if (len_b == 0) { /* Here, 'b' is empty, hence it's complement is all possible code * points. So if the union includes the complement of 'b', it includes * everything, and we need not even look at 'a'. It's easiest to * create a new inversion list that matches everything. */ if (complement_b) { SV* everything = _add_range_to_invlist(NULL, 0, UV_MAX); if (*output == NULL) { /* If the output didn't exist, just point it at the new list */ *output = everything; } else { /* Otherwise, replace its contents with the new list */ invlist_replace_list_destroys_src(*output, everything); SvREFCNT_dec_NN(everything); } return; } /* Here, we don't want the complement of 'b', and since 'b' is empty, * the union will come entirely from 'a'. If 'a' is NULL or empty, the * output will be empty */ if (a == NULL || _invlist_len(a) == 0) { if (*output == NULL) { *output = _new_invlist(0); } else { invlist_clear(*output); } return; } /* Here, 'a' is not empty, but 'b' is, so 'a' entirely determines the * union. We can just return a copy of 'a' if '*output' doesn't point * to an existing list */ if (*output == NULL) { *output = invlist_clone(a); return; } /* If the output is to overwrite 'a', we have a no-op, as it's * already in 'a' */ if (*output == a) { return; } /* Here, '*output' is to be overwritten by 'a' */ u = invlist_clone(a); invlist_replace_list_destroys_src(*output, u); SvREFCNT_dec_NN(u); return; } /* Here 'b' is not empty. See about 'a' */ if (a == NULL || ((len_a = _invlist_len(a)) == 0)) { /* Here, 'a' is empty (and b is not). That means the union will come * entirely from 'b'. If '*output' is NULL, we can directly return a * clone of 'b'. Otherwise, we replace the contents of '*output' with * the clone */ SV ** dest = (*output == NULL) ? output : &u; *dest = invlist_clone(b); if (complement_b) { _invlist_invert(*dest); } if (dest == &u) { invlist_replace_list_destroys_src(*output, u); SvREFCNT_dec_NN(u); } return; } /* Here both lists exist and are non-empty */ array_a = invlist_array(a); array_b = invlist_array(b); /* If are to take the union of 'a' with the complement of b, set it * up so are looking at b's complement. */ if (complement_b) { /* To complement, we invert: if the first element is 0, remove it. To * do this, we just pretend the array starts one later */ if (array_b[0] == 0) { array_b++; len_b--; } else { /* But if the first element is not zero, we pretend the list starts * at the 0 that is always stored immediately before the array. */ array_b--; len_b++; } } /* Size the union for the worst case: that the sets are completely * disjoint */ u = _new_invlist(len_a + len_b); /* Will contain U+0000 if either component does */ array_u = _invlist_array_init(u, ( len_a > 0 && array_a[0] == 0) || (len_b > 0 && array_b[0] == 0)); /* Go through each input list item by item, stopping when have exhausted * one of them */ while (i_a < len_a && i_b < len_b) { UV cp; /* The element to potentially add to the union's array */ bool cp_in_set; /* is it in the the input list's set or not */ /* We need to take one or the other of the two inputs for the union. * Since we are merging two sorted lists, we take the smaller of the * next items. In case of a tie, we take first the one that is in its * set. If we first took the one not in its set, it would decrement * the count, possibly to 0 which would cause it to be output as ending * the range, and the next time through we would take the same number, * and output it again as beginning the next range. By doing it the * opposite way, there is no possibility that the count will be * momentarily decremented to 0, and thus the two adjoining ranges will * be seamlessly merged. (In a tie and both are in the set or both not * in the set, it doesn't matter which we take first.) */ if ( array_a[i_a] < array_b[i_b] || ( array_a[i_a] == array_b[i_b] && ELEMENT_RANGE_MATCHES_INVLIST(i_a))) { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a); cp = array_a[i_a++]; } else { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b); cp = array_b[i_b++]; } /* Here, have chosen which of the two inputs to look at. Only output * if the running count changes to/from 0, which marks the * beginning/end of a range that's in the set */ if (cp_in_set) { if (count == 0) { array_u[i_u++] = cp; } count++; } else { count--; if (count == 0) { array_u[i_u++] = cp; } } } /* The loop above increments the index into exactly one of the input lists * each iteration, and ends when either index gets to its list end. That * means the other index is lower than its end, and so something is * remaining in that one. We decrement 'count', as explained below, if * that list is in its set. (i_a and i_b each currently index the element * beyond the one we care about.) */ if ( (i_a != len_a && PREV_RANGE_MATCHES_INVLIST(i_a)) || (i_b != len_b && PREV_RANGE_MATCHES_INVLIST(i_b))) { count--; } /* Above we decremented 'count' if the list that had unexamined elements in * it was in its set. This has made it so that 'count' being non-zero * means there isn't anything left to output; and 'count' equal to 0 means * that what is left to output is precisely that which is left in the * non-exhausted input list. * * To see why, note first that the exhausted input obviously has nothing * left to add to the union. If it was in its set at its end, that means * the set extends from here to the platform's infinity, and hence so does * the union and the non-exhausted set is irrelevant. The exhausted set * also contributed 1 to 'count'. If 'count' was 2, it got decremented to * 1, but if it was 1, the non-exhausted set wasn't in its set, and so * 'count' remains at 1. This is consistent with the decremented 'count' * != 0 meaning there's nothing left to add to the union. * * But if the exhausted input wasn't in its set, it contributed 0 to * 'count', and the rest of the union will be whatever the other input is. * If 'count' was 0, neither list was in its set, and 'count' remains 0; * otherwise it gets decremented to 0. This is consistent with 'count' * == 0 meaning the remainder of the union is whatever is left in the * non-exhausted list. */ if (count != 0) { len_u = i_u; } else { IV copy_count = len_a - i_a; if (copy_count > 0) { /* The non-exhausted input is 'a' */ Copy(array_a + i_a, array_u + i_u, copy_count, UV); } else { /* The non-exhausted input is b */ copy_count = len_b - i_b; Copy(array_b + i_b, array_u + i_u, copy_count, UV); } len_u = i_u + copy_count; } /* Set the result to the final length, which can change the pointer to * array_u, so re-find it. (Note that it is unlikely that this will * change, as we are shrinking the space, not enlarging it) */ if (len_u != _invlist_len(u)) { invlist_set_len(u, len_u, *get_invlist_offset_addr(u)); invlist_trim(u); array_u = invlist_array(u); } if (*output == NULL) { /* Simply return the new inversion list */ *output = u; } else { /* Otherwise, overwrite the inversion list that was in '*output'. We * could instead free '*output', and then set it to 'u', but experience * has shown [perl #127392] that if the input is a mortal, we can get a * huge build-up of these during regex compilation before they get * freed. */ invlist_replace_list_destroys_src(*output, u); SvREFCNT_dec_NN(u); } return; } void Perl__invlist_intersection_maybe_complement_2nd(pTHX_ SV* const a, SV* const b, const bool complement_b, SV** i) { /* Take the intersection of two inversion lists and point '*i' to it. On * input, '*i' MUST POINT TO NULL OR TO AN SV* INVERSION LIST (possibly * even 'a' or 'b'). If to an inversion list, the contents of the original * list will be replaced by the intersection. The first list, 'a', may be * NULL, in which case '*i' will be an empty list. If 'complement_b' is * TRUE, the result will be the intersection of 'a' and the complement (or * inversion) of 'b' instead of 'b' directly. * * The basis for this comes from "Unicode Demystified" Chapter 13 by * Richard Gillam, published by Addison-Wesley, and explained at some * length there. The preface says to incorporate its examples into your * code at your own risk. In fact, it had bugs * * The algorithm is like a merge sort, and is essentially the same as the * union above */ const UV* array_a; /* a's array */ const UV* array_b; UV len_a; /* length of a's array */ UV len_b; SV* r; /* the resulting intersection */ UV* array_r; UV len_r = 0; UV i_a = 0; /* current index into a's array */ UV i_b = 0; UV i_r = 0; /* running count of how many of the two inputs are postitioned at ranges * that are in their sets. As explained in the algorithm source book, * items are stopped accumulating and are output when the count changes * to/from 2. The count is incremented when we start a range that's in an * input's set, and decremented when we start a range that's not in a set. * Only when it is 2 are we in the intersection. */ UV count = 0; PERL_ARGS_ASSERT__INVLIST_INTERSECTION_MAYBE_COMPLEMENT_2ND; assert(a != b); assert(*i == NULL || SvTYPE(*i) == SVt_INVLIST); /* Special case if either one is empty */ len_a = (a == NULL) ? 0 : _invlist_len(a); if ((len_a == 0) || ((len_b = _invlist_len(b)) == 0)) { if (len_a != 0 && complement_b) { /* Here, 'a' is not empty, therefore from the enclosing 'if', 'b' * must be empty. Here, also we are using 'b's complement, which * hence must be every possible code point. Thus the intersection * is simply 'a'. */ if (*i == a) { /* No-op */ return; } if (*i == NULL) { *i = invlist_clone(a); return; } r = invlist_clone(a); invlist_replace_list_destroys_src(*i, r); SvREFCNT_dec_NN(r); return; } /* Here, 'a' or 'b' is empty and not using the complement of 'b'. The * intersection must be empty */ if (*i == NULL) { *i = _new_invlist(0); return; } invlist_clear(*i); return; } /* Here both lists exist and are non-empty */ array_a = invlist_array(a); array_b = invlist_array(b); /* If are to take the intersection of 'a' with the complement of b, set it * up so are looking at b's complement. */ if (complement_b) { /* To complement, we invert: if the first element is 0, remove it. To * do this, we just pretend the array starts one later */ if (array_b[0] == 0) { array_b++; len_b--; } else { /* But if the first element is not zero, we pretend the list starts * at the 0 that is always stored immediately before the array. */ array_b--; len_b++; } } /* Size the intersection for the worst case: that the intersection ends up * fragmenting everything to be completely disjoint */ r= _new_invlist(len_a + len_b); /* Will contain U+0000 iff both components do */ array_r = _invlist_array_init(r, len_a > 0 && array_a[0] == 0 && len_b > 0 && array_b[0] == 0); /* Go through each list item by item, stopping when have exhausted one of * them */ while (i_a < len_a && i_b < len_b) { UV cp; /* The element to potentially add to the intersection's array */ bool cp_in_set; /* Is it in the input list's set or not */ /* We need to take one or the other of the two inputs for the * intersection. Since we are merging two sorted lists, we take the * smaller of the next items. In case of a tie, we take first the one * that is not in its set (a difference from the union algorithm). If * we first took the one in its set, it would increment the count, * possibly to 2 which would cause it to be output as starting a range * in the intersection, and the next time through we would take that * same number, and output it again as ending the set. By doing the * opposite of this, there is no possibility that the count will be * momentarily incremented to 2. (In a tie and both are in the set or * both not in the set, it doesn't matter which we take first.) */ if ( array_a[i_a] < array_b[i_b] || ( array_a[i_a] == array_b[i_b] && ! ELEMENT_RANGE_MATCHES_INVLIST(i_a))) { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_a); cp = array_a[i_a++]; } else { cp_in_set = ELEMENT_RANGE_MATCHES_INVLIST(i_b); cp= array_b[i_b++]; } /* Here, have chosen which of the two inputs to look at. Only output * if the running count changes to/from 2, which marks the * beginning/end of a range that's in the intersection */ if (cp_in_set) { count++; if (count == 2) { array_r[i_r++] = cp; } } else { if (count == 2) { array_r[i_r++] = cp; } count--; } } /* The loop above increments the index into exactly one of the input lists * each iteration, and ends when either index gets to its list end. That * means the other index is lower than its end, and so something is * remaining in that one. We increment 'count', as explained below, if the * exhausted list was in its set. (i_a and i_b each currently index the * element beyond the one we care about.) */ if ( (i_a == len_a && PREV_RANGE_MATCHES_INVLIST(i_a)) || (i_b == len_b && PREV_RANGE_MATCHES_INVLIST(i_b))) { count++; } /* Above we incremented 'count' if the exhausted list was in its set. This * has made it so that 'count' being below 2 means there is nothing left to * output; otheriwse what's left to add to the intersection is precisely * that which is left in the non-exhausted input list. * * To see why, note first that the exhausted input obviously has nothing * left to affect the intersection. If it was in its set at its end, that * means the set extends from here to the platform's infinity, and hence * anything in the non-exhausted's list will be in the intersection, and * anything not in it won't be. Hence, the rest of the intersection is * precisely what's in the non-exhausted list The exhausted set also * contributed 1 to 'count', meaning 'count' was at least 1. Incrementing * it means 'count' is now at least 2. This is consistent with the * incremented 'count' being >= 2 means to add the non-exhausted list to * the intersection. * * But if the exhausted input wasn't in its set, it contributed 0 to * 'count', and the intersection can't include anything further; the * non-exhausted set is irrelevant. 'count' was at most 1, and doesn't get * incremented. This is consistent with 'count' being < 2 meaning nothing * further to add to the intersection. */ if (count < 2) { /* Nothing left to put in the intersection. */ len_r = i_r; } else { /* copy the non-exhausted list, unchanged. */ IV copy_count = len_a - i_a; if (copy_count > 0) { /* a is the one with stuff left */ Copy(array_a + i_a, array_r + i_r, copy_count, UV); } else { /* b is the one with stuff left */ copy_count = len_b - i_b; Copy(array_b + i_b, array_r + i_r, copy_count, UV); } len_r = i_r + copy_count; } /* Set the result to the final length, which can change the pointer to * array_r, so re-find it. (Note that it is unlikely that this will * change, as we are shrinking the space, not enlarging it) */ if (len_r != _invlist_len(r)) { invlist_set_len(r, len_r, *get_invlist_offset_addr(r)); invlist_trim(r); array_r = invlist_array(r); } if (*i == NULL) { /* Simply return the calculated intersection */ *i = r; } else { /* Otherwise, replace the existing inversion list in '*i'. We could instead free '*i', and then set it to 'r', but experience has shown [perl #127392] that if the input is a mortal, we can get a huge build-up of these during regex compilation before they get freed. */ if (len_r) { invlist_replace_list_destroys_src(*i, r); } else { invlist_clear(*i); } SvREFCNT_dec_NN(r); } return; } SV* Perl__add_range_to_invlist(pTHX_ SV* invlist, UV start, UV end) { /* Add the range from 'start' to 'end' inclusive to the inversion list's * set. A pointer to the inversion list is returned. This may actually be * a new list, in which case the passed in one has been destroyed. The * passed-in inversion list can be NULL, in which case a new one is created * with just the one range in it. The new list is not necessarily * NUL-terminated. Space is not freed if the inversion list shrinks as a * result of this function. The gain would not be large, and in many * cases, this is called multiple times on a single inversion list, so * anything freed may almost immediately be needed again. * * This used to mostly call the 'union' routine, but that is much more * heavyweight than really needed for a single range addition */ UV* array; /* The array implementing the inversion list */ UV len; /* How many elements in 'array' */ SSize_t i_s; /* index into the invlist array where 'start' should go */ SSize_t i_e = 0; /* And the index where 'end' should go */ UV cur_highest; /* The highest code point in the inversion list upon entry to this function */ /* This range becomes the whole inversion list if none already existed */ if (invlist == NULL) { invlist = _new_invlist(2); _append_range_to_invlist(invlist, start, end); return invlist; } /* Likewise, if the inversion list is currently empty */ len = _invlist_len(invlist); if (len == 0) { _append_range_to_invlist(invlist, start, end); return invlist; } /* Starting here, we have to know the internals of the list */ array = invlist_array(invlist); /* If the new range ends higher than the current highest ... */ cur_highest = invlist_highest(invlist); if (end > cur_highest) { /* If the whole range is higher, we can just append it */ if (start > cur_highest) { _append_range_to_invlist(invlist, start, end); return invlist; } /* Otherwise, add the portion that is higher ... */ _append_range_to_invlist(invlist, cur_highest + 1, end); /* ... and continue on below to handle the rest. As a result of the * above append, we know that the index of the end of the range is the * final even numbered one of the array. Recall that the final element * always starts a range that extends to infinity. If that range is in * the set (meaning the set goes from here to infinity), it will be an * even index, but if it isn't in the set, it's odd, and the final * range in the set is one less, which is even. */ if (end == UV_MAX) { i_e = len; } else { i_e = len - 2; } } /* We have dealt with appending, now see about prepending. If the new * range starts lower than the current lowest ... */ if (start < array[0]) { /* Adding something which has 0 in it is somewhat tricky, and uncommon. * Let the union code handle it, rather than having to know the * trickiness in two code places. */ if (UNLIKELY(start == 0)) { SV* range_invlist; range_invlist = _new_invlist(2); _append_range_to_invlist(range_invlist, start, end); _invlist_union(invlist, range_invlist, &invlist); SvREFCNT_dec_NN(range_invlist); return invlist; } /* If the whole new range comes before the first entry, and doesn't * extend it, we have to insert it as an additional range */ if (end < array[0] - 1) { i_s = i_e = -1; goto splice_in_new_range; } /* Here the new range adjoins the existing first range, extending it * downwards. */ array[0] = start; /* And continue on below to handle the rest. We know that the index of * the beginning of the range is the first one of the array */ i_s = 0; } else { /* Not prepending any part of the new range to the existing list. * Find where in the list it should go. This finds i_s, such that: * invlist[i_s] <= start < array[i_s+1] */ i_s = _invlist_search(invlist, start); } /* At this point, any extending before the beginning of the inversion list * and/or after the end has been done. This has made it so that, in the * code below, each endpoint of the new range is either in a range that is * in the set, or is in a gap between two ranges that are. This means we * don't have to worry about exceeding the array bounds. * * Find where in the list the new range ends (but we can skip this if we * have already determined what it is, or if it will be the same as i_s, * which we already have computed) */ if (i_e == 0) { i_e = (start == end) ? i_s : _invlist_search(invlist, end); } /* Here generally invlist[i_e] <= end < array[i_e+1]. But if invlist[i_e] * is a range that goes to infinity there is no element at invlist[i_e+1], * so only the first relation holds. */ if ( ! ELEMENT_RANGE_MATCHES_INVLIST(i_s)) { /* Here, the ranges on either side of the beginning of the new range * are in the set, and this range starts in the gap between them. * * The new range extends the range above it downwards if the new range * ends at or above that range's start */ const bool extends_the_range_above = ( end == UV_MAX || end + 1 >= array[i_s+1]); /* The new range extends the range below it upwards if it begins just * after where that range ends */ if (start == array[i_s]) { /* If the new range fills the entire gap between the other ranges, * they will get merged together. Other ranges may also get * merged, depending on how many of them the new range spans. In * the general case, we do the merge later, just once, after we * figure out how many to merge. But in the case where the new * range exactly spans just this one gap (possibly extending into * the one above), we do the merge here, and an early exit. This * is done here to avoid having to special case later. */ if (i_e - i_s <= 1) { /* If i_e - i_s == 1, it means that the new range terminates * within the range above, and hence 'extends_the_range_above' * must be true. (If the range above it extends to infinity, * 'i_s+2' will be above the array's limit, but 'len-i_s-2' * will be 0, so no harm done.) */ if (extends_the_range_above) { Move(array + i_s + 2, array + i_s, len - i_s - 2, UV); invlist_set_len(invlist, len - 2, *(get_invlist_offset_addr(invlist))); return invlist; } /* Here, i_e must == i_s. We keep them in sync, as they apply * to the same range, and below we are about to decrement i_s * */ i_e--; } /* Here, the new range is adjacent to the one below. (It may also * span beyond the range above, but that will get resolved later.) * Extend the range below to include this one. */ array[i_s] = (end == UV_MAX) ? UV_MAX : end + 1; i_s--; start = array[i_s]; } else if (extends_the_range_above) { /* Here the new range only extends the range above it, but not the * one below. It merges with the one above. Again, we keep i_e * and i_s in sync if they point to the same range */ if (i_e == i_s) { i_e++; } i_s++; array[i_s] = start; } } /* Here, we've dealt with the new range start extending any adjoining * existing ranges. * * If the new range extends to infinity, it is now the final one, * regardless of what was there before */ if (UNLIKELY(end == UV_MAX)) { invlist_set_len(invlist, i_s + 1, *(get_invlist_offset_addr(invlist))); return invlist; } /* If i_e started as == i_s, it has also been dealt with, * and been updated to the new i_s, which will fail the following if */ if (! ELEMENT_RANGE_MATCHES_INVLIST(i_e)) { /* Here, the ranges on either side of the end of the new range are in * the set, and this range ends in the gap between them. * * If this range is adjacent to (hence extends) the range above it, it * becomes part of that range; likewise if it extends the range below, * it becomes part of that range */ if (end + 1 == array[i_e+1]) { i_e++; array[i_e] = start; } else if (start <= array[i_e]) { array[i_e] = end + 1; i_e--; } } if (i_s == i_e) { /* If the range fits entirely in an existing range (as possibly already * extended above), it doesn't add anything new */ if (ELEMENT_RANGE_MATCHES_INVLIST(i_s)) { return invlist; } /* Here, no part of the range is in the list. Must add it. It will * occupy 2 more slots */ splice_in_new_range: invlist_extend(invlist, len + 2); array = invlist_array(invlist); /* Move the rest of the array down two slots. Don't include any * trailing NUL */ Move(array + i_e + 1, array + i_e + 3, len - i_e - 1, UV); /* Do the actual splice */ array[i_e+1] = start; array[i_e+2] = end + 1; invlist_set_len(invlist, len + 2, *(get_invlist_offset_addr(invlist))); return invlist; } /* Here the new range crossed the boundaries of a pre-existing range. The * code above has adjusted things so that both ends are in ranges that are * in the set. This means everything in between must also be in the set. * Just squash things together */ Move(array + i_e + 1, array + i_s + 1, len - i_e - 1, UV); invlist_set_len(invlist, len - i_e + i_s, *(get_invlist_offset_addr(invlist))); return invlist; } SV* Perl__setup_canned_invlist(pTHX_ const STRLEN size, const UV element0, UV** other_elements_ptr) { /* Create and return an inversion list whose contents are to be populated * by the caller. The caller gives the number of elements (in 'size') and * the very first element ('element0'). This function will set * '*other_elements_ptr' to an array of UVs, where the remaining elements * are to be placed. * * Obviously there is some trust involved that the caller will properly * fill in the other elements of the array. * * (The first element needs to be passed in, as the underlying code does * things differently depending on whether it is zero or non-zero) */ SV* invlist = _new_invlist(size); bool offset; PERL_ARGS_ASSERT__SETUP_CANNED_INVLIST; invlist = add_cp_to_invlist(invlist, element0); offset = *get_invlist_offset_addr(invlist); invlist_set_len(invlist, size, offset); *other_elements_ptr = invlist_array(invlist) + 1; return invlist; } #endif PERL_STATIC_INLINE SV* S_add_cp_to_invlist(pTHX_ SV* invlist, const UV cp) { return _add_range_to_invlist(invlist, cp, cp); } #ifndef PERL_IN_XSUB_RE void Perl__invlist_invert(pTHX_ SV* const invlist) { /* Complement the input inversion list. This adds a 0 if the list didn't * have a zero; removes it otherwise. As described above, the data * structure is set up so that this is very efficient */ PERL_ARGS_ASSERT__INVLIST_INVERT; assert(! invlist_is_iterating(invlist)); /* The inverse of matching nothing is matching everything */ if (_invlist_len(invlist) == 0) { _append_range_to_invlist(invlist, 0, UV_MAX); return; } *get_invlist_offset_addr(invlist) = ! *get_invlist_offset_addr(invlist); } #endif PERL_STATIC_INLINE SV* S_invlist_clone(pTHX_ SV* const invlist) { /* Return a new inversion list that is a copy of the input one, which is * unchanged. The new list will not be mortal even if the old one was. */ /* Need to allocate extra space to accommodate Perl's addition of a * trailing NUL to SvPV's, since it thinks they are always strings */ SV* new_invlist = _new_invlist(_invlist_len(invlist) + 1); STRLEN physical_length = SvCUR(invlist); bool offset = *(get_invlist_offset_addr(invlist)); PERL_ARGS_ASSERT_INVLIST_CLONE; *(get_invlist_offset_addr(new_invlist)) = offset; invlist_set_len(new_invlist, _invlist_len(invlist), offset); Copy(SvPVX(invlist), SvPVX(new_invlist), physical_length, char); return new_invlist; } PERL_STATIC_INLINE STRLEN* S_get_invlist_iter_addr(SV* invlist) { /* Return the address of the UV that contains the current iteration * position */ PERL_ARGS_ASSERT_GET_INVLIST_ITER_ADDR; assert(SvTYPE(invlist) == SVt_INVLIST); return &(((XINVLIST*) SvANY(invlist))->iterator); } PERL_STATIC_INLINE void S_invlist_iterinit(SV* invlist) /* Initialize iterator for invlist */ { PERL_ARGS_ASSERT_INVLIST_ITERINIT; *get_invlist_iter_addr(invlist) = 0; } PERL_STATIC_INLINE void S_invlist_iterfinish(SV* invlist) { /* Terminate iterator for invlist. This is to catch development errors. * Any iteration that is interrupted before completed should call this * function. Functions that add code points anywhere else but to the end * of an inversion list assert that they are not in the middle of an * iteration. If they were, the addition would make the iteration * problematical: if the iteration hadn't reached the place where things * were being added, it would be ok */ PERL_ARGS_ASSERT_INVLIST_ITERFINISH; *get_invlist_iter_addr(invlist) = (STRLEN) UV_MAX; } STATIC bool S_invlist_iternext(SV* invlist, UV* start, UV* end) { /* An C<invlist_iterinit> call on <invlist> must be used to set this up. * This call sets in <*start> and <*end>, the next range in <invlist>. * Returns <TRUE> if successful and the next call will return the next * range; <FALSE> if was already at the end of the list. If the latter, * <*start> and <*end> are unchanged, and the next call to this function * will start over at the beginning of the list */ STRLEN* pos = get_invlist_iter_addr(invlist); UV len = _invlist_len(invlist); UV *array; PERL_ARGS_ASSERT_INVLIST_ITERNEXT; if (*pos >= len) { *pos = (STRLEN) UV_MAX; /* Force iterinit() to be required next time */ return FALSE; } array = invlist_array(invlist); *start = array[(*pos)++]; if (*pos >= len) { *end = UV_MAX; } else { *end = array[(*pos)++] - 1; } return TRUE; } PERL_STATIC_INLINE UV S_invlist_highest(SV* const invlist) { /* Returns the highest code point that matches an inversion list. This API * has an ambiguity, as it returns 0 under either the highest is actually * 0, or if the list is empty. If this distinction matters to you, check * for emptiness before calling this function */ UV len = _invlist_len(invlist); UV *array; PERL_ARGS_ASSERT_INVLIST_HIGHEST; if (len == 0) { return 0; } array = invlist_array(invlist); /* The last element in the array in the inversion list always starts a * range that goes to infinity. That range may be for code points that are * matched in the inversion list, or it may be for ones that aren't * matched. In the latter case, the highest code point in the set is one * less than the beginning of this range; otherwise it is the final element * of this range: infinity */ return (ELEMENT_RANGE_MATCHES_INVLIST(len - 1)) ? UV_MAX : array[len - 1] - 1; } STATIC SV * S_invlist_contents(pTHX_ SV* const invlist, const bool traditional_style) { /* Get the contents of an inversion list into a string SV so that they can * be printed out. If 'traditional_style' is TRUE, it uses the format * traditionally done for debug tracing; otherwise it uses a format * suitable for just copying to the output, with blanks between ranges and * a dash between range components */ UV start, end; SV* output; const char intra_range_delimiter = (traditional_style ? '\t' : '-'); const char inter_range_delimiter = (traditional_style ? '\n' : ' '); if (traditional_style) { output = newSVpvs("\n"); } else { output = newSVpvs(""); } PERL_ARGS_ASSERT_INVLIST_CONTENTS; assert(! invlist_is_iterating(invlist)); invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { if (end == UV_MAX) { Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%cINFINITY%c", start, intra_range_delimiter, inter_range_delimiter); } else if (end != start) { Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c%04" UVXf "%c", start, intra_range_delimiter, end, inter_range_delimiter); } else { Perl_sv_catpvf(aTHX_ output, "%04" UVXf "%c", start, inter_range_delimiter); } } if (SvCUR(output) && ! traditional_style) {/* Get rid of trailing blank */ SvCUR_set(output, SvCUR(output) - 1); } return output; } #ifndef PERL_IN_XSUB_RE void Perl__invlist_dump(pTHX_ PerlIO *file, I32 level, const char * const indent, SV* const invlist) { /* Designed to be called only by do_sv_dump(). Dumps out the ranges of the * inversion list 'invlist' to 'file' at 'level' Each line is prefixed by * the string 'indent'. The output looks like this: [0] 0x000A .. 0x000D [2] 0x0085 [4] 0x2028 .. 0x2029 [6] 0x3104 .. INFINITY * This means that the first range of code points matched by the list are * 0xA through 0xD; the second range contains only the single code point * 0x85, etc. An inversion list is an array of UVs. Two array elements * are used to define each range (except if the final range extends to * infinity, only a single element is needed). The array index of the * first element for the corresponding range is given in brackets. */ UV start, end; STRLEN count = 0; PERL_ARGS_ASSERT__INVLIST_DUMP; if (invlist_is_iterating(invlist)) { Perl_dump_indent(aTHX_ level, file, "%sCan't dump inversion list because is in middle of iterating\n", indent); return; } invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { if (end == UV_MAX) { Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf " .. INFINITY\n", indent, (UV)count, start); } else if (end != start) { Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf " .. 0x%04" UVXf "\n", indent, (UV)count, start, end); } else { Perl_dump_indent(aTHX_ level, file, "%s[%" UVuf "] 0x%04" UVXf "\n", indent, (UV)count, start); } count += 2; } } void Perl__load_PL_utf8_foldclosures (pTHX) { assert(! PL_utf8_foldclosures); /* If the folds haven't been read in, call a fold function * to force that */ if (! PL_utf8_tofold) { U8 dummy[UTF8_MAXBYTES_CASE+1]; const U8 hyphen[] = HYPHEN_UTF8; /* This string is just a short named one above \xff */ toFOLD_utf8_safe(hyphen, hyphen + sizeof(hyphen) - 1, dummy, NULL); assert(PL_utf8_tofold); /* Verify that worked */ } PL_utf8_foldclosures = _swash_inversion_hash(PL_utf8_tofold); } #endif #if defined(PERL_ARGS_ASSERT__INVLISTEQ) && !defined(PERL_IN_XSUB_RE) bool Perl__invlistEQ(pTHX_ SV* const a, SV* const b, const bool complement_b) { /* Return a boolean as to if the two passed in inversion lists are * identical. The final argument, if TRUE, says to take the complement of * the second inversion list before doing the comparison */ const UV* array_a = invlist_array(a); const UV* array_b = invlist_array(b); UV len_a = _invlist_len(a); UV len_b = _invlist_len(b); PERL_ARGS_ASSERT__INVLISTEQ; /* If are to compare 'a' with the complement of b, set it * up so are looking at b's complement. */ if (complement_b) { /* The complement of nothing is everything, so <a> would have to have * just one element, starting at zero (ending at infinity) */ if (len_b == 0) { return (len_a == 1 && array_a[0] == 0); } else if (array_b[0] == 0) { /* Otherwise, to complement, we invert. Here, the first element is * 0, just remove it. To do this, we just pretend the array starts * one later */ array_b++; len_b--; } else { /* But if the first element is not zero, we pretend the list starts * at the 0 that is always stored immediately before the array. */ array_b--; len_b++; } } return len_a == len_b && memEQ(array_a, array_b, len_a * sizeof(array_a[0])); } #endif /* * As best we can, determine the characters that can match the start of * the given EXACTF-ish node. * * Returns the invlist as a new SV*; it is the caller's responsibility to * call SvREFCNT_dec() when done with it. */ STATIC SV* S__make_exactf_invlist(pTHX_ RExC_state_t *pRExC_state, regnode *node) { const U8 * s = (U8*)STRING(node); SSize_t bytelen = STR_LEN(node); UV uc; /* Start out big enough for 2 separate code points */ SV* invlist = _new_invlist(4); PERL_ARGS_ASSERT__MAKE_EXACTF_INVLIST; if (! UTF) { uc = *s; /* We punt and assume can match anything if the node begins * with a multi-character fold. Things are complicated. For * example, /ffi/i could match any of: * "\N{LATIN SMALL LIGATURE FFI}" * "\N{LATIN SMALL LIGATURE FF}I" * "F\N{LATIN SMALL LIGATURE FI}" * plus several other things; and making sure we have all the * possibilities is hard. */ if (is_MULTI_CHAR_FOLD_latin1_safe(s, s + bytelen)) { invlist = _add_range_to_invlist(invlist, 0, UV_MAX); } else { /* Any Latin1 range character can potentially match any * other depending on the locale */ if (OP(node) == EXACTFL) { _invlist_union(invlist, PL_Latin1, &invlist); } else { /* But otherwise, it matches at least itself. We can * quickly tell if it has a distinct fold, and if so, * it matches that as well */ invlist = add_cp_to_invlist(invlist, uc); if (IS_IN_SOME_FOLD_L1(uc)) invlist = add_cp_to_invlist(invlist, PL_fold_latin1[uc]); } /* Some characters match above-Latin1 ones under /i. This * is true of EXACTFL ones when the locale is UTF-8 */ if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(uc) && (! isASCII(uc) || (OP(node) != EXACTFA && OP(node) != EXACTFA_NO_TRIE))) { add_above_Latin1_folds(pRExC_state, (U8) uc, &invlist); } } } else { /* Pattern is UTF-8 */ U8 folded[UTF8_MAX_FOLD_CHAR_EXPAND * UTF8_MAXBYTES_CASE + 1] = { '\0' }; STRLEN foldlen = UTF8SKIP(s); const U8* e = s + bytelen; SV** listp; uc = utf8_to_uvchr_buf(s, s + bytelen, NULL); /* The only code points that aren't folded in a UTF EXACTFish * node are are the problematic ones in EXACTFL nodes */ if (OP(node) == EXACTFL && is_PROBLEMATIC_LOCALE_FOLDEDS_START_cp(uc)) { /* We need to check for the possibility that this EXACTFL * node begins with a multi-char fold. Therefore we fold * the first few characters of it so that we can make that * check */ U8 *d = folded; int i; for (i = 0; i < UTF8_MAX_FOLD_CHAR_EXPAND && s < e; i++) { if (isASCII(*s)) { *(d++) = (U8) toFOLD(*s); s++; } else { STRLEN len; toFOLD_utf8_safe(s, e, d, &len); d += len; s += UTF8SKIP(s); } } /* And set up so the code below that looks in this folded * buffer instead of the node's string */ e = d; foldlen = UTF8SKIP(folded); s = folded; } /* When we reach here 's' points to the fold of the first * character(s) of the node; and 'e' points to far enough along * the folded string to be just past any possible multi-char * fold. 'foldlen' is the length in bytes of the first * character in 's' * * Unlike the non-UTF-8 case, the macro for determining if a * string is a multi-char fold requires all the characters to * already be folded. This is because of all the complications * if not. Note that they are folded anyway, except in EXACTFL * nodes. Like the non-UTF case above, we punt if the node * begins with a multi-char fold */ if (is_MULTI_CHAR_FOLD_utf8_safe(s, e)) { invlist = _add_range_to_invlist(invlist, 0, UV_MAX); } else { /* Single char fold */ /* It matches all the things that fold to it, which are * found in PL_utf8_foldclosures (including itself) */ invlist = add_cp_to_invlist(invlist, uc); if (! PL_utf8_foldclosures) _load_PL_utf8_foldclosures(); if ((listp = hv_fetch(PL_utf8_foldclosures, (char *) s, foldlen, FALSE))) { AV* list = (AV*) *listp; IV k; for (k = 0; k <= av_tindex_skip_len_mg(list); k++) { SV** c_p = av_fetch(list, k, FALSE); UV c; assert(c_p); c = SvUV(*c_p); /* /aa doesn't allow folds between ASCII and non- */ if ((OP(node) == EXACTFA || OP(node) == EXACTFA_NO_TRIE) && isASCII(c) != isASCII(uc)) { continue; } invlist = add_cp_to_invlist(invlist, c); } } } } return invlist; } #undef HEADER_LENGTH #undef TO_INTERNAL_SIZE #undef FROM_INTERNAL_SIZE #undef INVLIST_VERSION_ID /* End of inversion list object */ STATIC void S_parse_lparen_question_flags(pTHX_ RExC_state_t *pRExC_state) { /* This parses the flags that are in either the '(?foo)' or '(?foo:bar)' * constructs, and updates RExC_flags with them. On input, RExC_parse * should point to the first flag; it is updated on output to point to the * final ')' or ':'. There needs to be at least one flag, or this will * abort */ /* for (?g), (?gc), and (?o) warnings; warning about (?c) will warn about (?g) -- japhy */ #define WASTED_O 0x01 #define WASTED_G 0x02 #define WASTED_C 0x04 #define WASTED_GC (WASTED_G|WASTED_C) I32 wastedflags = 0x00; U32 posflags = 0, negflags = 0; U32 *flagsp = &posflags; char has_charset_modifier = '\0'; regex_charset cs; bool has_use_defaults = FALSE; const char* const seqstart = RExC_parse - 1; /* Point to the '?' */ int x_mod_count = 0; PERL_ARGS_ASSERT_PARSE_LPAREN_QUESTION_FLAGS; /* '^' as an initial flag sets certain defaults */ if (UCHARAT(RExC_parse) == '^') { RExC_parse++; has_use_defaults = TRUE; STD_PMMOD_FLAGS_CLEAR(&RExC_flags); set_regex_charset(&RExC_flags, (RExC_utf8 || RExC_uni_semantics) ? REGEX_UNICODE_CHARSET : REGEX_DEPENDS_CHARSET); } cs = get_regex_charset(RExC_flags); if (cs == REGEX_DEPENDS_CHARSET && (RExC_utf8 || RExC_uni_semantics)) { cs = REGEX_UNICODE_CHARSET; } while (RExC_parse < RExC_end) { /* && strchr("iogcmsx", *RExC_parse) */ /* (?g), (?gc) and (?o) are useless here and must be globally applied -- japhy */ switch (*RExC_parse) { /* Code for the imsxn flags */ CASE_STD_PMMOD_FLAGS_PARSE_SET(flagsp, x_mod_count); case LOCALE_PAT_MOD: if (has_charset_modifier) { goto excess_modifier; } else if (flagsp == &negflags) { goto neg_modifier; } cs = REGEX_LOCALE_CHARSET; has_charset_modifier = LOCALE_PAT_MOD; break; case UNICODE_PAT_MOD: if (has_charset_modifier) { goto excess_modifier; } else if (flagsp == &negflags) { goto neg_modifier; } cs = REGEX_UNICODE_CHARSET; has_charset_modifier = UNICODE_PAT_MOD; break; case ASCII_RESTRICT_PAT_MOD: if (flagsp == &negflags) { goto neg_modifier; } if (has_charset_modifier) { if (cs != REGEX_ASCII_RESTRICTED_CHARSET) { goto excess_modifier; } /* Doubled modifier implies more restricted */ cs = REGEX_ASCII_MORE_RESTRICTED_CHARSET; } else { cs = REGEX_ASCII_RESTRICTED_CHARSET; } has_charset_modifier = ASCII_RESTRICT_PAT_MOD; break; case DEPENDS_PAT_MOD: if (has_use_defaults) { goto fail_modifiers; } else if (flagsp == &negflags) { goto neg_modifier; } else if (has_charset_modifier) { goto excess_modifier; } /* The dual charset means unicode semantics if the * pattern (or target, not known until runtime) are * utf8, or something in the pattern indicates unicode * semantics */ cs = (RExC_utf8 || RExC_uni_semantics) ? REGEX_UNICODE_CHARSET : REGEX_DEPENDS_CHARSET; has_charset_modifier = DEPENDS_PAT_MOD; break; excess_modifier: RExC_parse++; if (has_charset_modifier == ASCII_RESTRICT_PAT_MOD) { vFAIL2("Regexp modifier \"%c\" may appear a maximum of twice", ASCII_RESTRICT_PAT_MOD); } else if (has_charset_modifier == *(RExC_parse - 1)) { vFAIL2("Regexp modifier \"%c\" may not appear twice", *(RExC_parse - 1)); } else { vFAIL3("Regexp modifiers \"%c\" and \"%c\" are mutually exclusive", has_charset_modifier, *(RExC_parse - 1)); } NOT_REACHED; /*NOTREACHED*/ neg_modifier: RExC_parse++; vFAIL2("Regexp modifier \"%c\" may not appear after the \"-\"", *(RExC_parse - 1)); NOT_REACHED; /*NOTREACHED*/ case ONCE_PAT_MOD: /* 'o' */ case GLOBAL_PAT_MOD: /* 'g' */ if (PASS2 && ckWARN(WARN_REGEXP)) { const I32 wflagbit = *RExC_parse == 'o' ? WASTED_O : WASTED_G; if (! (wastedflags & wflagbit) ) { wastedflags |= wflagbit; /* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */ vWARN5( RExC_parse + 1, "Useless (%s%c) - %suse /%c modifier", flagsp == &negflags ? "?-" : "?", *RExC_parse, flagsp == &negflags ? "don't " : "", *RExC_parse ); } } break; case CONTINUE_PAT_MOD: /* 'c' */ if (PASS2 && ckWARN(WARN_REGEXP)) { if (! (wastedflags & WASTED_C) ) { wastedflags |= WASTED_GC; /* diag_listed_as: Useless (?-%s) - don't use /%s modifier in regex; marked by <-- HERE in m/%s/ */ vWARN3( RExC_parse + 1, "Useless (%sc) - %suse /gc modifier", flagsp == &negflags ? "?-" : "?", flagsp == &negflags ? "don't " : "" ); } } break; case KEEPCOPY_PAT_MOD: /* 'p' */ if (flagsp == &negflags) { if (PASS2) ckWARNreg(RExC_parse + 1,"Useless use of (?-p)"); } else { *flagsp |= RXf_PMf_KEEPCOPY; } break; case '-': /* A flag is a default iff it is following a minus, so * if there is a minus, it means will be trying to * re-specify a default which is an error */ if (has_use_defaults || flagsp == &negflags) { goto fail_modifiers; } flagsp = &negflags; wastedflags = 0; /* reset so (?g-c) warns twice */ x_mod_count = 0; break; case ':': case ')': if ((posflags & (RXf_PMf_EXTENDED|RXf_PMf_EXTENDED_MORE)) == RXf_PMf_EXTENDED) { negflags |= RXf_PMf_EXTENDED_MORE; } RExC_flags |= posflags; if (negflags & RXf_PMf_EXTENDED) { negflags |= RXf_PMf_EXTENDED_MORE; } RExC_flags &= ~negflags; set_regex_charset(&RExC_flags, cs); return; default: fail_modifiers: RExC_parse += SKIP_IF_CHAR(RExC_parse); /* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */ vFAIL2utf8f("Sequence (%" UTF8f "...) not recognized", UTF8fARG(UTF, RExC_parse-seqstart, seqstart)); NOT_REACHED; /*NOTREACHED*/ } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } vFAIL("Sequence (?... not terminated"); } /* - reg - regular expression, i.e. main body or parenthesized thing * * Caller must absorb opening parenthesis. * * Combining parenthesis handling with the base level of regular expression * is a trifle forced, but the need to tie the tails of the branches to what * follows makes it hard to avoid. */ #define REGTAIL(x,y,z) regtail((x),(y),(z),depth+1) #ifdef DEBUGGING #define REGTAIL_STUDY(x,y,z) regtail_study((x),(y),(z),depth+1) #else #define REGTAIL_STUDY(x,y,z) regtail((x),(y),(z),depth+1) #endif PERL_STATIC_INLINE regnode * S_handle_named_backref(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, char * parse_start, char ch ) { regnode *ret; char* name_start = RExC_parse; U32 num = 0; SV *sv_dat = reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_HANDLE_NAMED_BACKREF; if (RExC_parse == name_start || *RExC_parse != ch) { /* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */ vFAIL2("Sequence %.3s... not terminated",parse_start); } if (!SIZE_ONLY) { num = add_data( pRExC_state, STR_WITH_LEN("S")); RExC_rxi->data->data[num]=(void*)sv_dat; SvREFCNT_inc_simple_void(sv_dat); } RExC_sawback = 1; ret = reganode(pRExC_state, ((! FOLD) ? NREF : (ASCII_FOLD_RESTRICTED) ? NREFFA : (AT_LEAST_UNI_SEMANTICS) ? NREFFU : (LOC) ? NREFFL : NREFF), num); *flagp |= HASWIDTH; Set_Node_Offset(ret, parse_start+1); Set_Node_Cur_Length(ret, parse_start); nextchar(pRExC_state); return ret; } /* Returns NULL, setting *flagp to TRYAGAIN at the end of (?) that only sets flags. Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8. Otherwise would only return NULL if regbranch() returns NULL, which cannot happen. */ STATIC regnode * S_reg(pTHX_ RExC_state_t *pRExC_state, I32 paren, I32 *flagp,U32 depth) /* paren: Parenthesized? 0=top; 1,2=inside '(': changed to letter. * 2 is like 1, but indicates that nextchar() has been called to advance * RExC_parse beyond the '('. Things like '(?' are indivisible tokens, and * this flag alerts us to the need to check for that */ { regnode *ret; /* Will be the head of the group. */ regnode *br; regnode *lastbr; regnode *ender = NULL; I32 parno = 0; I32 flags; U32 oregflags = RExC_flags; bool have_branch = 0; bool is_open = 0; I32 freeze_paren = 0; I32 after_freeze = 0; I32 num; /* numeric backreferences */ char * parse_start = RExC_parse; /* MJD */ char * const oregcomp_parse = RExC_parse; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REG; DEBUG_PARSE("reg "); *flagp = 0; /* Tentatively. */ /* Having this true makes it feasible to have a lot fewer tests for the * parse pointer being in scope. For example, we can write * while(isFOO(*RExC_parse)) RExC_parse++; * instead of * while(RExC_parse < RExC_end && isFOO(*RExC_parse)) RExC_parse++; */ assert(*RExC_end == '\0'); /* Make an OPEN node, if parenthesized. */ if (paren) { /* Under /x, space and comments can be gobbled up between the '(' and * here (if paren ==2). The forms '(*VERB' and '(?...' disallow such * intervening space, as the sequence is a token, and a token should be * indivisible */ bool has_intervening_patws = paren == 2 && *(RExC_parse - 1) != '('; if (RExC_parse >= RExC_end) { vFAIL("Unmatched ("); } if ( *RExC_parse == '*') { /* (*VERB:ARG) */ char *start_verb = RExC_parse + 1; STRLEN verb_len; char *start_arg = NULL; unsigned char op = 0; int arg_required = 0; int internal_argval = -1; /* if >-1 we are not allowed an argument*/ if (has_intervening_patws) { RExC_parse++; /* past the '*' */ vFAIL("In '(*VERB...)', the '(' and '*' must be adjacent"); } while (RExC_parse < RExC_end && *RExC_parse != ')' ) { if ( *RExC_parse == ':' ) { start_arg = RExC_parse + 1; break; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } verb_len = RExC_parse - start_verb; if ( start_arg ) { if (RExC_parse >= RExC_end) { goto unterminated_verb_pattern; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; while ( RExC_parse < RExC_end && *RExC_parse != ')' ) RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) unterminated_verb_pattern: vFAIL("Unterminated verb pattern argument"); if ( RExC_parse == start_arg ) start_arg = NULL; } else { if ( RExC_parse >= RExC_end || *RExC_parse != ')' ) vFAIL("Unterminated verb pattern"); } /* Here, we know that RExC_parse < RExC_end */ switch ( *start_verb ) { case 'A': /* (*ACCEPT) */ if ( memEQs(start_verb,verb_len,"ACCEPT") ) { op = ACCEPT; internal_argval = RExC_nestroot; } break; case 'C': /* (*COMMIT) */ if ( memEQs(start_verb,verb_len,"COMMIT") ) op = COMMIT; break; case 'F': /* (*FAIL) */ if ( verb_len==1 || memEQs(start_verb,verb_len,"FAIL") ) { op = OPFAIL; } break; case ':': /* (*:NAME) */ case 'M': /* (*MARK:NAME) */ if ( verb_len==0 || memEQs(start_verb,verb_len,"MARK") ) { op = MARKPOINT; arg_required = 1; } break; case 'P': /* (*PRUNE) */ if ( memEQs(start_verb,verb_len,"PRUNE") ) op = PRUNE; break; case 'S': /* (*SKIP) */ if ( memEQs(start_verb,verb_len,"SKIP") ) op = SKIP; break; case 'T': /* (*THEN) */ /* [19:06] <TimToady> :: is then */ if ( memEQs(start_verb,verb_len,"THEN") ) { op = CUTGROUP; RExC_seen |= REG_CUTGROUP_SEEN; } break; } if ( ! op ) { RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; vFAIL2utf8f( "Unknown verb pattern '%" UTF8f "'", UTF8fARG(UTF, verb_len, start_verb)); } if ( arg_required && !start_arg ) { vFAIL3("Verb pattern '%.*s' has a mandatory argument", verb_len, start_verb); } if (internal_argval == -1) { ret = reganode(pRExC_state, op, 0); } else { ret = reg2Lanode(pRExC_state, op, 0, internal_argval); } RExC_seen |= REG_VERBARG_SEEN; if ( ! SIZE_ONLY ) { if (start_arg) { SV *sv = newSVpvn( start_arg, RExC_parse - start_arg); ARG(ret) = add_data( pRExC_state, STR_WITH_LEN("S")); RExC_rxi->data->data[ARG(ret)]=(void*)sv; ret->flags = 1; } else { ret->flags = 0; } if ( internal_argval != -1 ) ARG2L_SET(ret, internal_argval); } nextchar(pRExC_state); return ret; } else if (*RExC_parse == '?') { /* (?...) */ bool is_logical = 0; const char * const seqstart = RExC_parse; const char * endptr; if (has_intervening_patws) { RExC_parse++; vFAIL("In '(?...)', the '(' and '?' must be adjacent"); } RExC_parse++; /* past the '?' */ paren = *RExC_parse; /* might be a trailing NUL, if not well-formed */ RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; if (RExC_parse > RExC_end) { paren = '\0'; } ret = NULL; /* For look-ahead/behind. */ switch (paren) { case 'P': /* (?P...) variants for those used to PCRE/Python */ paren = *RExC_parse; if ( paren == '<') { /* (?P<...>) named capture */ RExC_parse++; if (RExC_parse >= RExC_end) { vFAIL("Sequence (?P<... not terminated"); } goto named_capture; } else if (paren == '>') { /* (?P>name) named recursion */ RExC_parse++; if (RExC_parse >= RExC_end) { vFAIL("Sequence (?P>... not terminated"); } goto named_recursion; } else if (paren == '=') { /* (?P=...) named backref */ RExC_parse++; return handle_named_backref(pRExC_state, flagp, parse_start, ')'); } RExC_parse += SKIP_IF_CHAR(RExC_parse); /* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */ vFAIL3("Sequence (%.*s...) not recognized", RExC_parse-seqstart, seqstart); NOT_REACHED; /*NOTREACHED*/ case '<': /* (?<...) */ if (*RExC_parse == '!') paren = ','; else if (*RExC_parse != '=') named_capture: { /* (?<...>) */ char *name_start; SV *svname; paren= '>'; /* FALLTHROUGH */ case '\'': /* (?'...') */ name_start = RExC_parse; svname = reg_scan_name(pRExC_state, SIZE_ONLY /* reverse test from the others */ ? REG_RSN_RETURN_NAME : REG_RSN_RETURN_NULL); if ( RExC_parse == name_start || RExC_parse >= RExC_end || *RExC_parse != paren) { vFAIL2("Sequence (?%c... not terminated", paren=='>' ? '<' : paren); } if (SIZE_ONLY) { HE *he_str; SV *sv_dat = NULL; if (!svname) /* shouldn't happen */ Perl_croak(aTHX_ "panic: reg_scan_name returned NULL"); if (!RExC_paren_names) { RExC_paren_names= newHV(); sv_2mortal(MUTABLE_SV(RExC_paren_names)); #ifdef DEBUGGING RExC_paren_name_list= newAV(); sv_2mortal(MUTABLE_SV(RExC_paren_name_list)); #endif } he_str = hv_fetch_ent( RExC_paren_names, svname, 1, 0 ); if ( he_str ) sv_dat = HeVAL(he_str); if ( ! sv_dat ) { /* croak baby croak */ Perl_croak(aTHX_ "panic: paren_name hash element allocation failed"); } else if ( SvPOK(sv_dat) ) { /* (?|...) can mean we have dupes so scan to check its already been stored. Maybe a flag indicating we are inside such a construct would be useful, but the arrays are likely to be quite small, so for now we punt -- dmq */ IV count = SvIV(sv_dat); I32 *pv = (I32*)SvPVX(sv_dat); IV i; for ( i = 0 ; i < count ; i++ ) { if ( pv[i] == RExC_npar ) { count = 0; break; } } if ( count ) { pv = (I32*)SvGROW(sv_dat, SvCUR(sv_dat) + sizeof(I32)+1); SvCUR_set(sv_dat, SvCUR(sv_dat) + sizeof(I32)); pv[count] = RExC_npar; SvIV_set(sv_dat, SvIVX(sv_dat) + 1); } } else { (void)SvUPGRADE(sv_dat,SVt_PVNV); sv_setpvn(sv_dat, (char *)&(RExC_npar), sizeof(I32)); SvIOK_on(sv_dat); SvIV_set(sv_dat, 1); } #ifdef DEBUGGING /* Yes this does cause a memory leak in debugging Perls * */ if (!av_store(RExC_paren_name_list, RExC_npar, SvREFCNT_inc(svname))) SvREFCNT_dec_NN(svname); #endif /*sv_dump(sv_dat);*/ } nextchar(pRExC_state); paren = 1; goto capturing_parens; } RExC_seen |= REG_LOOKBEHIND_SEEN; RExC_in_lookbehind++; RExC_parse++; if (RExC_parse >= RExC_end) { vFAIL("Sequence (?... not terminated"); } /* FALLTHROUGH */ case '=': /* (?=...) */ RExC_seen_zerolen++; break; case '!': /* (?!...) */ RExC_seen_zerolen++; /* check if we're really just a "FAIL" assertion */ skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); if (*RExC_parse == ')') { ret=reganode(pRExC_state, OPFAIL, 0); nextchar(pRExC_state); return ret; } break; case '|': /* (?|...) */ /* branch reset, behave like a (?:...) except that buffers in alternations share the same numbers */ paren = ':'; after_freeze = freeze_paren = RExC_npar; break; case ':': /* (?:...) */ case '>': /* (?>...) */ break; case '$': /* (?$...) */ case '@': /* (?@...) */ vFAIL2("Sequence (?%c...) not implemented", (int)paren); break; case '0' : /* (?0) */ case 'R' : /* (?R) */ if (RExC_parse == RExC_end || *RExC_parse != ')') FAIL("Sequence (?R) not terminated"); num = 0; RExC_seen |= REG_RECURSE_SEEN; *flagp |= POSTPONED; goto gen_recurse_regop; /*notreached*/ /* named and numeric backreferences */ case '&': /* (?&NAME) */ parse_start = RExC_parse - 1; named_recursion: { SV *sv_dat = reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); num = sv_dat ? *((I32 *)SvPVX(sv_dat)) : 0; } if (RExC_parse >= RExC_end || *RExC_parse != ')') vFAIL("Sequence (?&... not terminated"); goto gen_recurse_regop; /* NOTREACHED */ case '+': if (!(RExC_parse[0] >= '1' && RExC_parse[0] <= '9')) { RExC_parse++; vFAIL("Illegal pattern"); } goto parse_recursion; /* NOTREACHED*/ case '-': /* (?-1) */ if (!(RExC_parse[0] >= '1' && RExC_parse[0] <= '9')) { RExC_parse--; /* rewind to let it be handled later */ goto parse_flags; } /* FALLTHROUGH */ case '1': case '2': case '3': case '4': /* (?1) */ case '5': case '6': case '7': case '8': case '9': RExC_parse = (char *) seqstart + 1; /* Point to the digit */ parse_recursion: { bool is_neg = FALSE; UV unum; parse_start = RExC_parse - 1; /* MJD */ if (*RExC_parse == '-') { RExC_parse++; is_neg = TRUE; } if (grok_atoUV(RExC_parse, &unum, &endptr) && unum <= I32_MAX ) { num = (I32)unum; RExC_parse = (char*)endptr; } else num = I32_MAX; if (is_neg) { /* Some limit for num? */ num = -num; } } if (*RExC_parse!=')') vFAIL("Expecting close bracket"); gen_recurse_regop: if ( paren == '-' ) { /* Diagram of capture buffer numbering. Top line is the normal capture buffer numbers Bottom line is the negative indexing as from the X (the (?-2)) + 1 2 3 4 5 X 6 7 /(a(x)y)(a(b(c(?-2)d)e)f)(g(h))/ - 5 4 3 2 1 X x x */ num = RExC_npar + num; if (num < 1) { RExC_parse++; vFAIL("Reference to nonexistent group"); } } else if ( paren == '+' ) { num = RExC_npar + num - 1; } /* We keep track how many GOSUB items we have produced. To start off the ARG2L() of the GOSUB holds its "id", which is used later in conjunction with RExC_recurse to calculate the offset we need to jump for the GOSUB, which it will store in the final representation. We have to defer the actual calculation until much later as the regop may move. */ ret = reg2Lanode(pRExC_state, GOSUB, num, RExC_recurse_count); if (!SIZE_ONLY) { if (num > (I32)RExC_rx->nparens) { RExC_parse++; vFAIL("Reference to nonexistent group"); } RExC_recurse_count++; DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Recurse #%" UVuf " to %" IVdf "\n", 22, "| |", (int)(depth * 2 + 1), "", (UV)ARG(ret), (IV)ARG2L(ret))); } RExC_seen |= REG_RECURSE_SEEN; Set_Node_Length(ret, 1 + regarglen[OP(ret)]); /* MJD */ Set_Node_Offset(ret, parse_start); /* MJD */ *flagp |= POSTPONED; assert(*RExC_parse == ')'); nextchar(pRExC_state); return ret; /* NOTREACHED */ case '?': /* (??...) */ is_logical = 1; if (*RExC_parse != '{') { RExC_parse += SKIP_IF_CHAR(RExC_parse); /* diag_listed_as: Sequence (?%s...) not recognized in regex; marked by <-- HERE in m/%s/ */ vFAIL2utf8f( "Sequence (%" UTF8f "...) not recognized", UTF8fARG(UTF, RExC_parse-seqstart, seqstart)); NOT_REACHED; /*NOTREACHED*/ } *flagp |= POSTPONED; paren = '{'; RExC_parse++; /* FALLTHROUGH */ case '{': /* (?{...}) */ { U32 n = 0; struct reg_code_block *cb; RExC_seen_zerolen++; if ( !pRExC_state->code_blocks || pRExC_state->code_index >= pRExC_state->code_blocks->count || pRExC_state->code_blocks->cb[pRExC_state->code_index].start != (STRLEN)((RExC_parse -3 - (is_logical ? 1 : 0)) - RExC_start) ) { if (RExC_pm_flags & PMf_USE_RE_EVAL) FAIL("panic: Sequence (?{...}): no code block found\n"); FAIL("Eval-group not allowed at runtime, use re 'eval'"); } /* this is a pre-compiled code block (?{...}) */ cb = &pRExC_state->code_blocks->cb[pRExC_state->code_index]; RExC_parse = RExC_start + cb->end; if (!SIZE_ONLY) { OP *o = cb->block; if (cb->src_regex) { n = add_data(pRExC_state, STR_WITH_LEN("rl")); RExC_rxi->data->data[n] = (void*)SvREFCNT_inc((SV*)cb->src_regex); RExC_rxi->data->data[n+1] = (void*)o; } else { n = add_data(pRExC_state, (RExC_pm_flags & PMf_HAS_CV) ? "L" : "l", 1); RExC_rxi->data->data[n] = (void*)o; } } pRExC_state->code_index++; nextchar(pRExC_state); if (is_logical) { regnode *eval; ret = reg_node(pRExC_state, LOGICAL); eval = reg2Lanode(pRExC_state, EVAL, n, /* for later propagation into (??{}) * return value */ RExC_flags & RXf_PMf_COMPILETIME ); if (!SIZE_ONLY) { ret->flags = 2; } REGTAIL(pRExC_state, ret, eval); /* deal with the length of this later - MJD */ return ret; } ret = reg2Lanode(pRExC_state, EVAL, n, 0); Set_Node_Length(ret, RExC_parse - parse_start + 1); Set_Node_Offset(ret, parse_start); return ret; } case '(': /* (?(?{...})...) and (?(?=...)...) */ { int is_define= 0; const int DEFINE_len = sizeof("DEFINE") - 1; if (RExC_parse[0] == '?') { /* (?(?...)) */ if ( RExC_parse < RExC_end - 1 && ( RExC_parse[1] == '=' || RExC_parse[1] == '!' || RExC_parse[1] == '<' || RExC_parse[1] == '{') ) { /* Lookahead or eval. */ I32 flag; regnode *tail; ret = reg_node(pRExC_state, LOGICAL); if (!SIZE_ONLY) ret->flags = 1; tail = reg(pRExC_state, 1, &flag, depth+1); if (flag & (RESTART_PASS1|NEED_UTF8)) { *flagp = flag & (RESTART_PASS1|NEED_UTF8); return NULL; } REGTAIL(pRExC_state, ret, tail); goto insert_if; } /* Fall through to ‘Unknown switch condition’ at the end of the if/else chain. */ } else if ( RExC_parse[0] == '<' /* (?(<NAME>)...) */ || RExC_parse[0] == '\'' ) /* (?('NAME')...) */ { char ch = RExC_parse[0] == '<' ? '>' : '\''; char *name_start= RExC_parse++; U32 num = 0; SV *sv_dat=reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); if ( RExC_parse == name_start || RExC_parse >= RExC_end || *RExC_parse != ch) { vFAIL2("Sequence (?(%c... not terminated", (ch == '>' ? '<' : ch)); } RExC_parse++; if (!SIZE_ONLY) { num = add_data( pRExC_state, STR_WITH_LEN("S")); RExC_rxi->data->data[num]=(void*)sv_dat; SvREFCNT_inc_simple_void(sv_dat); } ret = reganode(pRExC_state,NGROUPP,num); goto insert_if_check_paren; } else if (memBEGINs(RExC_parse, (STRLEN) (RExC_end - RExC_parse), "DEFINE")) { ret = reganode(pRExC_state,DEFINEP,0); RExC_parse += DEFINE_len; is_define = 1; goto insert_if_check_paren; } else if (RExC_parse[0] == 'R') { RExC_parse++; /* parno == 0 => /(?(R)YES|NO)/ "in any form of recursion OR eval" * parno == 1 => /(?(R0)YES|NO)/ "in GOSUB (?0) / (?R)" * parno == 2 => /(?(R1)YES|NO)/ "in GOSUB (?1) (parno-1)" */ parno = 0; if (RExC_parse[0] == '0') { parno = 1; RExC_parse++; } else if (RExC_parse[0] >= '1' && RExC_parse[0] <= '9' ) { UV uv; if (grok_atoUV(RExC_parse, &uv, &endptr) && uv <= I32_MAX ) { parno = (I32)uv + 1; RExC_parse = (char*)endptr; } /* else "Switch condition not recognized" below */ } else if (RExC_parse[0] == '&') { SV *sv_dat; RExC_parse++; sv_dat = reg_scan_name(pRExC_state, SIZE_ONLY ? REG_RSN_RETURN_NULL : REG_RSN_RETURN_DATA); /* we should only have a false sv_dat when * SIZE_ONLY is true, and we always have false * sv_dat when SIZE_ONLY is true. * reg_scan_name() will VFAIL() if the name is * unknown when SIZE_ONLY is false, and otherwise * will return something, and when SIZE_ONLY is * true, reg_scan_name() just parses the string, * and doesnt return anything. (in theory) */ assert(SIZE_ONLY ? !sv_dat : !!sv_dat); if (sv_dat) parno = 1 + *((I32 *)SvPVX(sv_dat)); } ret = reganode(pRExC_state,INSUBP,parno); goto insert_if_check_paren; } else if (RExC_parse[0] >= '1' && RExC_parse[0] <= '9' ) { /* (?(1)...) */ char c; UV uv; if (grok_atoUV(RExC_parse, &uv, &endptr) && uv <= I32_MAX ) { parno = (I32)uv; RExC_parse = (char*)endptr; } else { vFAIL("panic: grok_atoUV returned FALSE"); } ret = reganode(pRExC_state, GROUPP, parno); insert_if_check_paren: if (UCHARAT(RExC_parse) != ')') { RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; vFAIL("Switch condition not recognized"); } nextchar(pRExC_state); insert_if: REGTAIL(pRExC_state, ret, reganode(pRExC_state, IFTHEN, 0)); br = regbranch(pRExC_state, &flags, 1,depth+1); if (br == NULL) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } else REGTAIL(pRExC_state, br, reganode(pRExC_state, LONGJMP, 0)); c = UCHARAT(RExC_parse); nextchar(pRExC_state); if (flags&HASWIDTH) *flagp |= HASWIDTH; if (c == '|') { if (is_define) vFAIL("(?(DEFINE)....) does not allow branches"); /* Fake one for optimizer. */ lastbr = reganode(pRExC_state, IFTHEN, 0); if (!regbranch(pRExC_state, &flags, 1,depth+1)) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } REGTAIL(pRExC_state, ret, lastbr); if (flags&HASWIDTH) *flagp |= HASWIDTH; c = UCHARAT(RExC_parse); nextchar(pRExC_state); } else lastbr = NULL; if (c != ')') { if (RExC_parse >= RExC_end) vFAIL("Switch (?(condition)... not terminated"); else vFAIL("Switch (?(condition)... contains too many branches"); } ender = reg_node(pRExC_state, TAIL); REGTAIL(pRExC_state, br, ender); if (lastbr) { REGTAIL(pRExC_state, lastbr, ender); REGTAIL(pRExC_state, NEXTOPER(NEXTOPER(lastbr)), ender); } else REGTAIL(pRExC_state, ret, ender); RExC_size++; /* XXX WHY do we need this?!! For large programs it seems to be required but I can't figure out why. -- dmq*/ return ret; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; vFAIL("Unknown switch condition (?(...))"); } case '[': /* (?[ ... ]) */ return handle_regex_sets(pRExC_state, NULL, flagp, depth, oregcomp_parse); case 0: /* A NUL */ RExC_parse--; /* for vFAIL to print correctly */ vFAIL("Sequence (? incomplete"); break; default: /* e.g., (?i) */ RExC_parse = (char *) seqstart + 1; parse_flags: parse_lparen_question_flags(pRExC_state); if (UCHARAT(RExC_parse) != ':') { if (RExC_parse < RExC_end) nextchar(pRExC_state); *flagp = TRYAGAIN; return NULL; } paren = ':'; nextchar(pRExC_state); ret = NULL; goto parse_rest; } /* end switch */ } else if (!(RExC_flags & RXf_PMf_NOCAPTURE)) { /* (...) */ capturing_parens: parno = RExC_npar; RExC_npar++; ret = reganode(pRExC_state, OPEN, parno); if (!SIZE_ONLY ){ if (!RExC_nestroot) RExC_nestroot = parno; if (RExC_open_parens && !RExC_open_parens[parno]) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting open paren #%" IVdf " to %d\n", 22, "| |", (int)(depth * 2 + 1), "", (IV)parno, REG_NODE_NUM(ret))); RExC_open_parens[parno]= ret; } } Set_Node_Length(ret, 1); /* MJD */ Set_Node_Offset(ret, RExC_parse); /* MJD */ is_open = 1; } else { /* with RXf_PMf_NOCAPTURE treat (...) as (?:...) */ paren = ':'; ret = NULL; } } else /* ! paren */ ret = NULL; parse_rest: /* Pick up the branches, linking them together. */ parse_start = RExC_parse; /* MJD */ br = regbranch(pRExC_state, &flags, 1,depth+1); /* branch_len = (paren != 0); */ if (br == NULL) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } if (*RExC_parse == '|') { if (!SIZE_ONLY && RExC_extralen) { reginsert(pRExC_state, BRANCHJ, br, depth+1); } else { /* MJD */ reginsert(pRExC_state, BRANCH, br, depth+1); Set_Node_Length(br, paren != 0); Set_Node_Offset_To_R(br-RExC_emit_start, parse_start-RExC_start); } have_branch = 1; if (SIZE_ONLY) RExC_extralen += 1; /* For BRANCHJ-BRANCH. */ } else if (paren == ':') { *flagp |= flags&SIMPLE; } if (is_open) { /* Starts with OPEN. */ REGTAIL(pRExC_state, ret, br); /* OPEN -> first. */ } else if (paren != '?') /* Not Conditional */ ret = br; *flagp |= flags & (SPSTART | HASWIDTH | POSTPONED); lastbr = br; while (*RExC_parse == '|') { if (!SIZE_ONLY && RExC_extralen) { ender = reganode(pRExC_state, LONGJMP,0); /* Append to the previous. */ REGTAIL(pRExC_state, NEXTOPER(NEXTOPER(lastbr)), ender); } if (SIZE_ONLY) RExC_extralen += 2; /* Account for LONGJMP. */ nextchar(pRExC_state); if (freeze_paren) { if (RExC_npar > after_freeze) after_freeze = RExC_npar; RExC_npar = freeze_paren; } br = regbranch(pRExC_state, &flags, 0, depth+1); if (br == NULL) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regbranch returned NULL, flags=%#" UVxf, (UV) flags); } REGTAIL(pRExC_state, lastbr, br); /* BRANCH -> BRANCH. */ lastbr = br; *flagp |= flags & (SPSTART | HASWIDTH | POSTPONED); } if (have_branch || paren != ':') { /* Make a closing node, and hook it on the end. */ switch (paren) { case ':': ender = reg_node(pRExC_state, TAIL); break; case 1: case 2: ender = reganode(pRExC_state, CLOSE, parno); if ( RExC_close_parens ) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting close paren #%" IVdf " to %d\n", 22, "| |", (int)(depth * 2 + 1), "", (IV)parno, REG_NODE_NUM(ender))); RExC_close_parens[parno]= ender; if (RExC_nestroot == parno) RExC_nestroot = 0; } Set_Node_Offset(ender,RExC_parse+1); /* MJD */ Set_Node_Length(ender,1); /* MJD */ break; case '<': case ',': case '=': case '!': *flagp &= ~HASWIDTH; /* FALLTHROUGH */ case '>': ender = reg_node(pRExC_state, SUCCEED); break; case 0: ender = reg_node(pRExC_state, END); if (!SIZE_ONLY) { assert(!RExC_end_op); /* there can only be one! */ RExC_end_op = ender; if (RExC_close_parens) { DEBUG_OPTIMISE_MORE_r(Perl_re_printf( aTHX_ "%*s%*s Setting close paren #0 (END) to %d\n", 22, "| |", (int)(depth * 2 + 1), "", REG_NODE_NUM(ender))); RExC_close_parens[0]= ender; } } break; } DEBUG_PARSE_r(if (!SIZE_ONLY) { DEBUG_PARSE_MSG("lsbr"); regprop(RExC_rx, RExC_mysv1, lastbr, NULL, pRExC_state); regprop(RExC_rx, RExC_mysv2, ender, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ tying lastbr %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n", SvPV_nolen_const(RExC_mysv1), (IV)REG_NODE_NUM(lastbr), SvPV_nolen_const(RExC_mysv2), (IV)REG_NODE_NUM(ender), (IV)(ender - lastbr) ); }); REGTAIL(pRExC_state, lastbr, ender); if (have_branch && !SIZE_ONLY) { char is_nothing= 1; if (depth==1) RExC_seen |= REG_TOP_LEVEL_BRANCHES_SEEN; /* Hook the tails of the branches to the closing node. */ for (br = ret; br; br = regnext(br)) { const U8 op = PL_regkind[OP(br)]; if (op == BRANCH) { REGTAIL_STUDY(pRExC_state, NEXTOPER(br), ender); if ( OP(NEXTOPER(br)) != NOTHING || regnext(NEXTOPER(br)) != ender) is_nothing= 0; } else if (op == BRANCHJ) { REGTAIL_STUDY(pRExC_state, NEXTOPER(NEXTOPER(br)), ender); /* for now we always disable this optimisation * / if ( OP(NEXTOPER(NEXTOPER(br))) != NOTHING || regnext(NEXTOPER(NEXTOPER(br))) != ender) */ is_nothing= 0; } } if (is_nothing) { br= PL_regkind[OP(ret)] != BRANCH ? regnext(ret) : ret; DEBUG_PARSE_r(if (!SIZE_ONLY) { DEBUG_PARSE_MSG("NADA"); regprop(RExC_rx, RExC_mysv1, ret, NULL, pRExC_state); regprop(RExC_rx, RExC_mysv2, ender, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ converting ret %s (%" IVdf ") to ender %s (%" IVdf ") offset %" IVdf "\n", SvPV_nolen_const(RExC_mysv1), (IV)REG_NODE_NUM(ret), SvPV_nolen_const(RExC_mysv2), (IV)REG_NODE_NUM(ender), (IV)(ender - ret) ); }); OP(br)= NOTHING; if (OP(ender) == TAIL) { NEXT_OFF(br)= 0; RExC_emit= br + 1; } else { regnode *opt; for ( opt= br + 1; opt < ender ; opt++ ) OP(opt)= OPTIMIZED; NEXT_OFF(br)= ender - br; } } } } { const char *p; static const char parens[] = "=!<,>"; if (paren && (p = strchr(parens, paren))) { U8 node = ((p - parens) % 2) ? UNLESSM : IFMATCH; int flag = (p - parens) > 1; if (paren == '>') node = SUSPEND, flag = 0; reginsert(pRExC_state, node,ret, depth+1); Set_Node_Cur_Length(ret, parse_start); Set_Node_Offset(ret, parse_start + 1); ret->flags = flag; REGTAIL_STUDY(pRExC_state, ret, reg_node(pRExC_state, TAIL)); } } /* Check for proper termination. */ if (paren) { /* restore original flags, but keep (?p) and, if we've changed from /d * rules to /u, keep the /u */ RExC_flags = oregflags | (RExC_flags & RXf_PMf_KEEPCOPY); if (DEPENDS_SEMANTICS && RExC_uni_semantics) { set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); } if (RExC_parse >= RExC_end || UCHARAT(RExC_parse) != ')') { RExC_parse = oregcomp_parse; vFAIL("Unmatched ("); } nextchar(pRExC_state); } else if (!paren && RExC_parse < RExC_end) { if (*RExC_parse == ')') { RExC_parse++; vFAIL("Unmatched )"); } else FAIL("Junk on end of regexp"); /* "Can't happen". */ NOT_REACHED; /* NOTREACHED */ } if (RExC_in_lookbehind) { RExC_in_lookbehind--; } if (after_freeze > RExC_npar) RExC_npar = after_freeze; return(ret); } /* - regbranch - one alternative of an | operator * * Implements the concatenation operator. * * Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be * restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8 */ STATIC regnode * S_regbranch(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, I32 first, U32 depth) { regnode *ret; regnode *chain = NULL; regnode *latest; I32 flags = 0, c = 0; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGBRANCH; DEBUG_PARSE("brnc"); if (first) ret = NULL; else { if (!SIZE_ONLY && RExC_extralen) ret = reganode(pRExC_state, BRANCHJ,0); else { ret = reg_node(pRExC_state, BRANCH); Set_Node_Length(ret, 1); } } if (!first && SIZE_ONLY) RExC_extralen += 1; /* BRANCHJ */ *flagp = WORST; /* Tentatively. */ skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); while (RExC_parse < RExC_end && *RExC_parse != '|' && *RExC_parse != ')') { flags &= ~TRYAGAIN; latest = regpiece(pRExC_state, &flags,depth+1); if (latest == NULL) { if (flags & TRYAGAIN) continue; if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: regpiece returned NULL, flags=%#" UVxf, (UV) flags); } else if (ret == NULL) ret = latest; *flagp |= flags&(HASWIDTH|POSTPONED); if (chain == NULL) /* First piece. */ *flagp |= flags&SPSTART; else { /* FIXME adding one for every branch after the first is probably * excessive now we have TRIE support. (hv) */ MARK_NAUGHTY(1); REGTAIL(pRExC_state, chain, latest); } chain = latest; c++; } if (chain == NULL) { /* Loop ran zero times. */ chain = reg_node(pRExC_state, NOTHING); if (ret == NULL) ret = chain; } if (c == 1) { *flagp |= flags&SIMPLE; } return ret; } /* - regpiece - something followed by possible quantifier * + ? {n,m} * * Note that the branching code sequences used for ? and the general cases * of * and + are somewhat optimized: they use the same NOTHING node as * both the endmarker for their branch list and the body of the last branch. * It might seem that this node could be dispensed with entirely, but the * endmarker role is not redundant. * * Returns NULL, setting *flagp to TRYAGAIN if regatom() returns NULL with * TRYAGAIN. * Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be * restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8 */ STATIC regnode * S_regpiece(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth) { regnode *ret; char op; char *next; I32 flags; const char * const origparse = RExC_parse; I32 min; I32 max = REG_INFTY; #ifdef RE_TRACK_PATTERN_OFFSETS char *parse_start; #endif const char *maxpos = NULL; UV uv; /* Save the original in case we change the emitted regop to a FAIL. */ regnode * const orig_emit = RExC_emit; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGPIECE; DEBUG_PARSE("piec"); ret = regatom(pRExC_state, &flags,depth+1); if (ret == NULL) { if (flags & (TRYAGAIN|RESTART_PASS1|NEED_UTF8)) *flagp |= flags & (TRYAGAIN|RESTART_PASS1|NEED_UTF8); else FAIL2("panic: regatom returned NULL, flags=%#" UVxf, (UV) flags); return(NULL); } op = *RExC_parse; if (op == '{' && regcurly(RExC_parse)) { maxpos = NULL; #ifdef RE_TRACK_PATTERN_OFFSETS parse_start = RExC_parse; /* MJD */ #endif next = RExC_parse + 1; while (isDIGIT(*next) || *next == ',') { if (*next == ',') { if (maxpos) break; else maxpos = next; } next++; } if (*next == '}') { /* got one */ const char* endptr; if (!maxpos) maxpos = next; RExC_parse++; if (isDIGIT(*RExC_parse)) { if (!grok_atoUV(RExC_parse, &uv, &endptr)) vFAIL("Invalid quantifier in {,}"); if (uv >= REG_INFTY) vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1); min = (I32)uv; } else { min = 0; } if (*maxpos == ',') maxpos++; else maxpos = RExC_parse; if (isDIGIT(*maxpos)) { if (!grok_atoUV(maxpos, &uv, &endptr)) vFAIL("Invalid quantifier in {,}"); if (uv >= REG_INFTY) vFAIL2("Quantifier in {,} bigger than %d", REG_INFTY - 1); max = (I32)uv; } else { max = REG_INFTY; /* meaning "infinity" */ } RExC_parse = next; nextchar(pRExC_state); if (max < min) { /* If can't match, warn and optimize to fail unconditionally */ reginsert(pRExC_state, OPFAIL, orig_emit, depth+1); if (PASS2) { ckWARNreg(RExC_parse, "Quantifier {n,m} with n > m can't match"); NEXT_OFF(orig_emit)= regarglen[OPFAIL] + NODE_STEP_REGNODE; } return ret; } else if (min == max && *RExC_parse == '?') { if (PASS2) { ckWARN2reg(RExC_parse + 1, "Useless use of greediness modifier '%c'", *RExC_parse); } } do_curly: if ((flags&SIMPLE)) { if (min == 0 && max == REG_INFTY) { reginsert(pRExC_state, STAR, ret, depth+1); MARK_NAUGHTY(4); RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN; goto nest_check; } if (min == 1 && max == REG_INFTY) { reginsert(pRExC_state, PLUS, ret, depth+1); MARK_NAUGHTY(3); RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN; goto nest_check; } MARK_NAUGHTY_EXP(2, 2); reginsert(pRExC_state, CURLY, ret, depth+1); Set_Node_Offset(ret, parse_start+1); /* MJD */ Set_Node_Cur_Length(ret, parse_start); } else { regnode * const w = reg_node(pRExC_state, WHILEM); w->flags = 0; REGTAIL(pRExC_state, ret, w); if (!SIZE_ONLY && RExC_extralen) { reginsert(pRExC_state, LONGJMP,ret, depth+1); reginsert(pRExC_state, NOTHING,ret, depth+1); NEXT_OFF(ret) = 3; /* Go over LONGJMP. */ } reginsert(pRExC_state, CURLYX,ret, depth+1); /* MJD hk */ Set_Node_Offset(ret, parse_start+1); Set_Node_Length(ret, op == '{' ? (RExC_parse - parse_start) : 1); if (!SIZE_ONLY && RExC_extralen) NEXT_OFF(ret) = 3; /* Go over NOTHING to LONGJMP. */ REGTAIL(pRExC_state, ret, reg_node(pRExC_state, NOTHING)); if (SIZE_ONLY) RExC_whilem_seen++, RExC_extralen += 3; MARK_NAUGHTY_EXP(1, 4); /* compound interest */ } ret->flags = 0; if (min > 0) *flagp = WORST; if (max > 0) *flagp |= HASWIDTH; if (!SIZE_ONLY) { ARG1_SET(ret, (U16)min); ARG2_SET(ret, (U16)max); } if (max == REG_INFTY) RExC_seen |= REG_UNBOUNDED_QUANTIFIER_SEEN; goto nest_check; } } if (!ISMULT1(op)) { *flagp = flags; return(ret); } #if 0 /* Now runtime fix should be reliable. */ /* if this is reinstated, don't forget to put this back into perldiag: =item Regexp *+ operand could be empty at {#} in regex m/%s/ (F) The part of the regexp subject to either the * or + quantifier could match an empty string. The {#} shows in the regular expression about where the problem was discovered. */ if (!(flags&HASWIDTH) && op != '?') vFAIL("Regexp *+ operand could be empty"); #endif #ifdef RE_TRACK_PATTERN_OFFSETS parse_start = RExC_parse; #endif nextchar(pRExC_state); *flagp = (op != '+') ? (WORST|SPSTART|HASWIDTH) : (WORST|HASWIDTH); if (op == '*') { min = 0; goto do_curly; } else if (op == '+') { min = 1; goto do_curly; } else if (op == '?') { min = 0; max = 1; goto do_curly; } nest_check: if (!SIZE_ONLY && !(flags&(HASWIDTH|POSTPONED)) && max > REG_INFTY/3) { SAVEFREESV(RExC_rx_sv); /* in case of fatal warnings */ ckWARN2reg(RExC_parse, "%" UTF8f " matches null string many times", UTF8fARG(UTF, (RExC_parse >= origparse ? RExC_parse - origparse : 0), origparse)); (void)ReREFCNT_inc(RExC_rx_sv); } if (*RExC_parse == '?') { nextchar(pRExC_state); reginsert(pRExC_state, MINMOD, ret, depth+1); REGTAIL(pRExC_state, ret, ret + NODE_STEP_REGNODE); } else if (*RExC_parse == '+') { regnode *ender; nextchar(pRExC_state); ender = reg_node(pRExC_state, SUCCEED); REGTAIL(pRExC_state, ret, ender); reginsert(pRExC_state, SUSPEND, ret, depth+1); ender = reg_node(pRExC_state, TAIL); REGTAIL(pRExC_state, ret, ender); } if (ISMULT2(RExC_parse)) { RExC_parse++; vFAIL("Nested quantifiers"); } return(ret); } STATIC bool S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state, regnode ** node_p, UV * code_point_p, int * cp_count, I32 * flagp, const bool strict, const U32 depth ) { /* This routine teases apart the various meanings of \N and returns * accordingly. The input parameters constrain which meaning(s) is/are valid * in the current context. * * Exactly one of <node_p> and <code_point_p> must be non-NULL. * * If <code_point_p> is not NULL, the context is expecting the result to be a * single code point. If this \N instance turns out to a single code point, * the function returns TRUE and sets *code_point_p to that code point. * * If <node_p> is not NULL, the context is expecting the result to be one of * the things representable by a regnode. If this \N instance turns out to be * one such, the function generates the regnode, returns TRUE and sets *node_p * to point to that regnode. * * If this instance of \N isn't legal in any context, this function will * generate a fatal error and not return. * * On input, RExC_parse should point to the first char following the \N at the * time of the call. On successful return, RExC_parse will have been updated * to point to just after the sequence identified by this routine. Also * *flagp has been updated as needed. * * When there is some problem with the current context and this \N instance, * the function returns FALSE, without advancing RExC_parse, nor setting * *node_p, nor *code_point_p, nor *flagp. * * If <cp_count> is not NULL, the caller wants to know the length (in code * points) that this \N sequence matches. This is set even if the function * returns FALSE, as detailed below. * * There are 5 possibilities here, as detailed in the next 5 paragraphs. * * Probably the most common case is for the \N to specify a single code point. * *cp_count will be set to 1, and *code_point_p will be set to that code * point. * * Another possibility is for the input to be an empty \N{}, which for * backwards compatibility we accept. *cp_count will be set to 0. *node_p * will be set to a generated NOTHING node. * * Still another possibility is for the \N to mean [^\n]. *cp_count will be * set to 0. *node_p will be set to a generated REG_ANY node. * * The fourth possibility is that \N resolves to a sequence of more than one * code points. *cp_count will be set to the number of code points in the * sequence. *node_p * will be set to a generated node returned by this * function calling S_reg(). * * The final possibility is that it is premature to be calling this function; * that pass1 needs to be restarted. This can happen when this changes from * /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The * latter occurs only when the fourth possibility would otherwise be in * effect, and is because one of those code points requires the pattern to be * recompiled as UTF-8. The function returns FALSE, and sets the * RESTART_PASS1 and NEED_UTF8 flags in *flagp, as appropriate. When this * happens, the caller needs to desist from continuing parsing, and return * this information to its caller. This is not set for when there is only one * code point, as this can be called as part of an ANYOF node, and they can * store above-Latin1 code points without the pattern having to be in UTF-8. * * For non-single-quoted regexes, the tokenizer has resolved character and * sequence names inside \N{...} into their Unicode values, normalizing the * result into what we should see here: '\N{U+c1.c2...}', where c1... are the * hex-represented code points in the sequence. This is done there because * the names can vary based on what charnames pragma is in scope at the time, * so we need a way to take a snapshot of what they resolve to at the time of * the original parse. [perl #56444]. * * That parsing is skipped for single-quoted regexes, so we may here get * '\N{NAME}'. This is a fatal error. These names have to be resolved by the * parser. But if the single-quoted regex is something like '\N{U+41}', that * is legal and handled here. The code point is Unicode, and has to be * translated into the native character set for non-ASCII platforms. */ char * endbrace; /* points to '}' following the name */ char *endchar; /* Points to '.' or '}' ending cur char in the input stream */ char* p = RExC_parse; /* Temporary */ GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_GROK_BSLASH_N; GET_RE_DEBUG_FLAGS; assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */ assert(! (node_p && cp_count)); /* At most 1 should be set */ if (cp_count) { /* Initialize return for the most common case */ *cp_count = 1; } /* The [^\n] meaning of \N ignores spaces and comments under the /x * modifier. The other meanings do not, so use a temporary until we find * out which we are being called with */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* Disambiguate between \N meaning a named character versus \N meaning * [^\n]. The latter is assumed when the {...} following the \N is a legal * quantifier, or there is no '{' at all */ if (*p != '{' || regcurly(p)) { RExC_parse = p; if (cp_count) { *cp_count = -1; } if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(*node_p, 1); /* MJD */ return TRUE; } /* Here, we have decided it should be a named character or sequence */ /* The test above made sure that the next real character is a '{', but * under the /x modifier, it could be separated by space (or a comment and * \n) and this is not allowed (for consistency with \x{...} and the * tokenizer handling of \N{NAME}). */ if (*RExC_parse != '{') { vFAIL("Missing braces on \\N{}"); } RExC_parse++; /* Skip past the '{' */ endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse); if (! endbrace) { /* no trailing brace */ vFAIL2("Missing right brace on \\%c{}", 'N'); } else if (!( endbrace == RExC_parse /* nothing between the {} */ || memBEGINs(RExC_parse, /* U+ (bad hex is checked below for a better error msg) */ (STRLEN) (RExC_end - RExC_parse), "U+"))) { RExC_parse = endbrace; /* position msg's '<--HERE' */ vFAIL("\\N{NAME} must be resolved by the lexer"); } REQUIRE_UNI_RULES(flagp, FALSE); /* Unicode named chars imply Unicode semantics */ if (endbrace == RExC_parse) { /* empty: \N{} */ if (strict) { RExC_parse++; /* Position after the "}" */ vFAIL("Zero length \\N{}"); } if (cp_count) { *cp_count = 0; } nextchar(pRExC_state); if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state,NOTHING); return TRUE; } RExC_parse += 2; /* Skip past the 'U+' */ /* Because toke.c has generated a special construct for us guaranteed not * to have NULs, we can use a str function */ endchar = RExC_parse + strcspn(RExC_parse, ".}"); /* Code points are separated by dots. If none, there is only one code * point, and is terminated by the brace */ if (endchar >= endbrace) { STRLEN length_of_hex; I32 grok_hex_flags; /* Here, exactly one code point. If that isn't what is wanted, fail */ if (! code_point_p) { RExC_parse = p; return FALSE; } /* Convert code point from hex */ length_of_hex = (STRLEN)(endchar - RExC_parse); grok_hex_flags = PERL_SCAN_ALLOW_UNDERSCORES | PERL_SCAN_DISALLOW_PREFIX /* No errors in the first pass (See [perl * #122671].) We let the code below find the * errors when there are multiple chars. */ | ((SIZE_ONLY) ? PERL_SCAN_SILENT_ILLDIGIT : 0); /* This routine is the one place where both single- and double-quotish * \N{U+xxxx} are evaluated. The value is a Unicode code point which * must be converted to native. */ *code_point_p = UNI_TO_NATIVE(grok_hex(RExC_parse, &length_of_hex, &grok_hex_flags, NULL)); /* The tokenizer should have guaranteed validity, but it's possible to * bypass it by using single quoting, so check. Don't do the check * here when there are multiple chars; we do it below anyway. */ if (length_of_hex == 0 || length_of_hex != (STRLEN)(endchar - RExC_parse) ) { RExC_parse += length_of_hex; /* Includes all the valid */ RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */ ? UTF8SKIP(RExC_parse) : 1; /* Guard against malformed utf8 */ if (RExC_parse >= endchar) { RExC_parse = endchar; } vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = endbrace + 1; return TRUE; } else { /* Is a multiple character sequence */ SV * substitute_parse; STRLEN len; char *orig_end = RExC_end; char *save_start = RExC_start; I32 flags; /* Count the code points, if desired, in the sequence */ if (cp_count) { *cp_count = 0; while (RExC_parse < endbrace) { /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); (*cp_count)++; } } /* Fail if caller doesn't want to handle a multi-code-point sequence. * But don't backup up the pointer if the caller wants to know how many * code points there are (they can then handle things) */ if (! node_p) { if (! cp_count) { RExC_parse = p; } return FALSE; } /* What is done here is to convert this to a sub-pattern of the form * \x{char1}\x{char2}... and then call reg recursively to parse it * (enclosing in "(?: ... )" ). That way, it retains its atomicness, * while not having to worry about special handling that some code * points may have. */ substitute_parse = newSVpvs("?:"); while (RExC_parse < endbrace) { /* Convert to notation the rest of the code understands */ sv_catpv(substitute_parse, "\\x{"); sv_catpvn(substitute_parse, RExC_parse, endchar - RExC_parse); sv_catpv(substitute_parse, "}"); /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); } sv_catpv(substitute_parse, ")"); len = SvCUR(substitute_parse); /* Don't allow empty number */ if (len < (STRLEN) 8) { RExC_parse = endbrace; vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = RExC_start = RExC_adjusted_start = SvPV_nolen(substitute_parse); RExC_end = RExC_parse + len; /* The values are Unicode, and therefore not subject to recoding, but * have to be converted to native on a non-Unicode (meaning non-ASCII) * platform. */ #ifdef EBCDIC RExC_recode_x_to_native = 1; #endif *node_p = reg(pRExC_state, 1, &flags, depth+1); /* Restore the saved values */ RExC_start = RExC_adjusted_start = save_start; RExC_parse = endbrace; RExC_end = orig_end; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif SvREFCNT_dec_NN(substitute_parse); if (! *node_p) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return FALSE; } FAIL2("panic: reg returned NULL to grok_bslash_N, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); nextchar(pRExC_state); return TRUE; } } PERL_STATIC_INLINE U8 S_compute_EXACTish(RExC_state_t *pRExC_state) { U8 op; PERL_ARGS_ASSERT_COMPUTE_EXACTISH; if (! FOLD) { return (LOC) ? EXACTL : EXACT; } op = get_regex_charset(RExC_flags); if (op >= REGEX_ASCII_RESTRICTED_CHARSET) { op--; /* /a is same as /u, and map /aa's offset to what /a's would have been, so there is no hole */ } return op + EXACTF; } PERL_STATIC_INLINE void S_alloc_maybe_populate_EXACT(pTHX_ RExC_state_t *pRExC_state, regnode *node, I32* flagp, STRLEN len, UV code_point, bool downgradable) { /* This knows the details about sizing an EXACTish node, setting flags for * it (by setting <*flagp>, and potentially populating it with a single * character. * * If <len> (the length in bytes) is non-zero, this function assumes that * the node has already been populated, and just does the sizing. In this * case <code_point> should be the final code point that has already been * placed into the node. This value will be ignored except that under some * circumstances <*flagp> is set based on it. * * If <len> is zero, the function assumes that the node is to contain only * the single character given by <code_point> and calculates what <len> * should be. In pass 1, it sizes the node appropriately. In pass 2, it * additionally will populate the node's STRING with <code_point> or its * fold if folding. * * In both cases <*flagp> is appropriately set * * It knows that under FOLD, the Latin Sharp S and UTF characters above * 255, must be folded (the former only when the rules indicate it can * match 'ss') * * When it does the populating, it looks at the flag 'downgradable'. If * true with a node that folds, it checks if the single code point * participates in a fold, and if not downgrades the node to an EXACT. * This helps the optimizer */ bool len_passed_in = cBOOL(len != 0); U8 character[UTF8_MAXBYTES_CASE+1]; PERL_ARGS_ASSERT_ALLOC_MAYBE_POPULATE_EXACT; /* Don't bother to check for downgrading in PASS1, as it doesn't make any * sizing difference, and is extra work that is thrown away */ if (downgradable && ! PASS2) { downgradable = FALSE; } if (! len_passed_in) { if (UTF) { if (UVCHR_IS_INVARIANT(code_point)) { if (LOC || ! FOLD) { /* /l defers folding until runtime */ *character = (U8) code_point; } else { /* Here is /i and not /l. (toFOLD() is defined on just ASCII, which isn't the same thing as INVARIANT on EBCDIC, but it works there, as the extra invariants fold to themselves) */ *character = toFOLD((U8) code_point); /* We can downgrade to an EXACT node if this character * isn't a folding one. Note that this assumes that * nothing above Latin1 folds to some other invariant than * one of these alphabetics; otherwise we would also have * to check: * && (! HAS_NONLATIN1_FOLD_CLOSURE(code_point) * || ASCII_FOLD_RESTRICTED)) */ if (downgradable && PL_fold[code_point] == code_point) { OP(node) = EXACT; } } len = 1; } else if (FOLD && (! LOC || ! is_PROBLEMATIC_LOCALE_FOLD_cp(code_point))) { /* Folding, and ok to do so now */ UV folded = _to_uni_fold_flags( code_point, character, &len, FOLD_FLAGS_FULL | ((ASCII_FOLD_RESTRICTED) ? FOLD_FLAGS_NOMIX_ASCII : 0)); if (downgradable && folded == code_point /* This quickly rules out many cases, avoiding the _invlist_contains_cp() overhead for those. */ && ! _invlist_contains_cp(PL_utf8_foldable, code_point)) { OP(node) = (LOC) ? EXACTL : EXACT; } } else if (code_point <= MAX_UTF8_TWO_BYTE) { /* Not folding this cp, and can output it directly */ *character = UTF8_TWO_BYTE_HI(code_point); *(character + 1) = UTF8_TWO_BYTE_LO(code_point); len = 2; } else { uvchr_to_utf8( character, code_point); len = UTF8SKIP(character); } } /* Else pattern isn't UTF8. */ else if (! FOLD) { *character = (U8) code_point; len = 1; } /* Else is folded non-UTF8 */ #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) else if (LIKELY(code_point != LATIN_SMALL_LETTER_SHARP_S)) { #else else if (1) { #endif /* We don't fold any non-UTF8 except possibly the Sharp s (see * comments at join_exact()); */ *character = (U8) code_point; len = 1; /* Can turn into an EXACT node if we know the fold at compile time, * and it folds to itself and doesn't particpate in other folds */ if (downgradable && ! LOC && PL_fold_latin1[code_point] == code_point && (! HAS_NONLATIN1_FOLD_CLOSURE(code_point) || (isASCII(code_point) && ASCII_FOLD_RESTRICTED))) { OP(node) = EXACT; } } /* else is Sharp s. May need to fold it */ else if (AT_LEAST_UNI_SEMANTICS && ! ASCII_FOLD_RESTRICTED) { *character = 's'; *(character + 1) = 's'; len = 2; } else { *character = LATIN_SMALL_LETTER_SHARP_S; len = 1; } } if (SIZE_ONLY) { RExC_size += STR_SZ(len); } else { RExC_emit += STR_SZ(len); STR_LEN(node) = len; if (! len_passed_in) { Copy((char *) character, STRING(node), len, char); } } *flagp |= HASWIDTH; /* A single character node is SIMPLE, except for the special-cased SHARP S * under /di. */ if ((len == 1 || (UTF && len == UVCHR_SKIP(code_point))) #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) && ( code_point != LATIN_SMALL_LETTER_SHARP_S || ! FOLD || ! DEPENDS_SEMANTICS) #endif ) { *flagp |= SIMPLE; } /* The OP may not be well defined in PASS1 */ if (PASS2 && OP(node) == EXACTFL) { RExC_contains_locale = 1; } } STATIC bool S_new_regcurly(const char *s, const char *e) { /* This is a temporary function designed to match the most lenient form of * a {m,n} quantifier we ever envision, with either number omitted, and * spaces anywhere between/before/after them. * * If this function fails, then the string it matches is very unlikely to * ever be considered a valid quantifier, so we can allow the '{' that * begins it to be considered as a literal */ bool has_min = FALSE; bool has_max = FALSE; PERL_ARGS_ASSERT_NEW_REGCURLY; if (s >= e || *s++ != '{') return FALSE; while (s < e && isSPACE(*s)) { s++; } while (s < e && isDIGIT(*s)) { has_min = TRUE; s++; } while (s < e && isSPACE(*s)) { s++; } if (*s == ',') { s++; while (s < e && isSPACE(*s)) { s++; } while (s < e && isDIGIT(*s)) { has_max = TRUE; s++; } while (s < e && isSPACE(*s)) { s++; } } return s < e && *s == '}' && (has_min || has_max); } /* Parse backref decimal value, unless it's too big to sensibly be a backref, * in which case return I32_MAX (rather than possibly 32-bit wrapping) */ static I32 S_backref_value(char *p) { const char* endptr; UV val; if (grok_atoUV(p, &val, &endptr) && val <= I32_MAX) return (I32)val; return I32_MAX; } /* - regatom - the lowest level Try to identify anything special at the start of the current parse position. If there is, then handle it as required. This may involve generating a single regop, such as for an assertion; or it may involve recursing, such as to handle a () structure. If the string doesn't start with something special then we gobble up as much literal text as we can. If we encounter a quantifier, we have to back off the final literal character, as that quantifier applies to just it and not to the whole string of literals. Once we have been able to handle whatever type of thing started the sequence, we return. Note: we have to be careful with escapes, as they can be both literal and special, and in the case of \10 and friends, context determines which. A summary of the code structure is: switch (first_byte) { cases for each special: handle this special; break; case '\\': switch (2nd byte) { cases for each unambiguous special: handle this special; break; cases for each ambigous special/literal: disambiguate; if (special) handle here else goto defchar; default: // unambiguously literal: goto defchar; } default: // is a literal char // FALL THROUGH defchar: create EXACTish node for literal; while (more input and node isn't full) { switch (input_byte) { cases for each special; make sure parse pointer is set so that the next call to regatom will see this special first goto loopdone; // EXACTish node terminated by prev. char default: append char to EXACTISH node; } get next input byte; } loopdone: } return the generated node; Specifically there are two separate switches for handling escape sequences, with the one for handling literal escapes requiring a dummy entry for all of the special escapes that are actually handled by the other. Returns NULL, setting *flagp to TRYAGAIN if reg() returns NULL with TRYAGAIN. Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded to UTF-8 Otherwise does not return NULL. */ STATIC regnode * S_regatom(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth) { regnode *ret = NULL; I32 flags = 0; char *parse_start; U8 op; int invert = 0; U8 arg; GET_RE_DEBUG_FLAGS_DECL; *flagp = WORST; /* Tentatively. */ DEBUG_PARSE("atom"); PERL_ARGS_ASSERT_REGATOM; tryagain: parse_start = RExC_parse; assert(RExC_parse < RExC_end); switch ((U8)*RExC_parse) { case '^': RExC_seen_zerolen++; nextchar(pRExC_state); if (RExC_flags & RXf_PMf_MULTILINE) ret = reg_node(pRExC_state, MBOL); else ret = reg_node(pRExC_state, SBOL); Set_Node_Length(ret, 1); /* MJD */ break; case '$': nextchar(pRExC_state); if (*RExC_parse) RExC_seen_zerolen++; if (RExC_flags & RXf_PMf_MULTILINE) ret = reg_node(pRExC_state, MEOL); else ret = reg_node(pRExC_state, SEOL); Set_Node_Length(ret, 1); /* MJD */ break; case '.': nextchar(pRExC_state); if (RExC_flags & RXf_PMf_SINGLELINE) ret = reg_node(pRExC_state, SANY); else ret = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(ret, 1); /* MJD */ break; case '[': { char * const oregcomp_parse = ++RExC_parse; ret = regclass(pRExC_state, flagp,depth+1, FALSE, /* means parse the whole char class */ TRUE, /* allow multi-char folds */ FALSE, /* don't silence non-portable warnings. */ (bool) RExC_strict, TRUE, /* Allow an optimized regnode result */ NULL, NULL); if (ret == NULL) { if (*flagp & (RESTART_PASS1|NEED_UTF8)) return NULL; FAIL2("panic: regclass returned NULL to regatom, flags=%#" UVxf, (UV) *flagp); } if (*RExC_parse != ']') { RExC_parse = oregcomp_parse; vFAIL("Unmatched ["); } nextchar(pRExC_state); Set_Node_Length(ret, RExC_parse - oregcomp_parse + 1); /* MJD */ break; } case '(': nextchar(pRExC_state); ret = reg(pRExC_state, 2, &flags,depth+1); if (ret == NULL) { if (flags & TRYAGAIN) { if (RExC_parse >= RExC_end) { /* Make parent create an empty node if needed. */ *flagp |= TRYAGAIN; return(NULL); } goto tryagain; } if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return NULL; } FAIL2("panic: reg returned NULL to regatom, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); break; case '|': case ')': if (flags & TRYAGAIN) { *flagp |= TRYAGAIN; return NULL; } vFAIL("Internal urp"); /* Supposed to be caught earlier. */ break; case '?': case '+': case '*': RExC_parse++; vFAIL("Quantifier follows nothing"); break; case '\\': /* Special Escapes This switch handles escape sequences that resolve to some kind of special regop and not to literal text. Escape sequnces that resolve to literal text are handled below in the switch marked "Literal Escapes". Every entry in this switch *must* have a corresponding entry in the literal escape switch. However, the opposite is not required, as the default for this switch is to jump to the literal text handling code. */ RExC_parse++; switch ((U8)*RExC_parse) { /* Special Escapes */ case 'A': RExC_seen_zerolen++; ret = reg_node(pRExC_state, SBOL); /* SBOL is shared with /^/ so we set the flags so we can tell * /\A/ from /^/ in split. We check ret because first pass we * have no regop struct to set the flags on. */ if (PASS2) ret->flags = 1; *flagp |= SIMPLE; goto finish_meta_pat; case 'G': ret = reg_node(pRExC_state, GPOS); RExC_seen |= REG_GPOS_SEEN; *flagp |= SIMPLE; goto finish_meta_pat; case 'K': RExC_seen_zerolen++; ret = reg_node(pRExC_state, KEEPS); *flagp |= SIMPLE; /* XXX:dmq : disabling in-place substitution seems to * be necessary here to avoid cases of memory corruption, as * with: C<$_="x" x 80; s/x\K/y/> -- rgs */ RExC_seen |= REG_LOOKBEHIND_SEEN; goto finish_meta_pat; case 'Z': ret = reg_node(pRExC_state, SEOL); *flagp |= SIMPLE; RExC_seen_zerolen++; /* Do not optimize RE away */ goto finish_meta_pat; case 'z': ret = reg_node(pRExC_state, EOS); *flagp |= SIMPLE; RExC_seen_zerolen++; /* Do not optimize RE away */ goto finish_meta_pat; case 'C': vFAIL("\\C no longer supported"); case 'X': ret = reg_node(pRExC_state, CLUMP); *flagp |= HASWIDTH; goto finish_meta_pat; case 'W': invert = 1; /* FALLTHROUGH */ case 'w': arg = ANYOF_WORDCHAR; goto join_posix; case 'B': invert = 1; /* FALLTHROUGH */ case 'b': { regex_charset charset = get_regex_charset(RExC_flags); RExC_seen_zerolen++; RExC_seen |= REG_LOOKBEHIND_SEEN; op = BOUND + charset; if (op == BOUNDL) { RExC_contains_locale = 1; } ret = reg_node(pRExC_state, op); *flagp |= SIMPLE; if (RExC_parse >= RExC_end || *(RExC_parse + 1) != '{') { FLAGS(ret) = TRADITIONAL_BOUND; if (PASS2 && op > BOUNDA) { /* /aa is same as /a */ OP(ret) = BOUNDA; } } else { STRLEN length; char name = *RExC_parse; char * endbrace = NULL; RExC_parse += 2; if (RExC_parse < RExC_end) { endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse); } if (! endbrace) { vFAIL2("Missing right brace on \\%c{}", name); } /* XXX Need to decide whether to take spaces or not. Should be * consistent with \p{}, but that currently is SPACE, which * means vertical too, which seems wrong * while (isBLANK(*RExC_parse)) { RExC_parse++; }*/ if (endbrace == RExC_parse) { RExC_parse++; /* After the '}' */ vFAIL2("Empty \\%c{}", name); } length = endbrace - RExC_parse; /*while (isBLANK(*(RExC_parse + length - 1))) { length--; }*/ switch (*RExC_parse) { case 'g': if ( length != 1 && (memNEs(RExC_parse + 1, length - 1, "cb"))) { goto bad_bound_type; } FLAGS(ret) = GCB_BOUND; break; case 'l': if (length != 2 || *(RExC_parse + 1) != 'b') { goto bad_bound_type; } FLAGS(ret) = LB_BOUND; break; case 's': if (length != 2 || *(RExC_parse + 1) != 'b') { goto bad_bound_type; } FLAGS(ret) = SB_BOUND; break; case 'w': if (length != 2 || *(RExC_parse + 1) != 'b') { goto bad_bound_type; } FLAGS(ret) = WB_BOUND; break; default: bad_bound_type: RExC_parse = endbrace; vFAIL2utf8f( "'%" UTF8f "' is an unknown bound type", UTF8fARG(UTF, length, endbrace - length)); NOT_REACHED; /*NOTREACHED*/ } RExC_parse = endbrace; REQUIRE_UNI_RULES(flagp, NULL); if (PASS2 && op >= BOUNDA) { /* /aa is same as /a */ OP(ret) = BOUNDU; length += 4; /* Don't have to worry about UTF-8, in this message because * to get here the contents of the \b must be ASCII */ ckWARN4reg(RExC_parse + 1, /* Include the '}' in msg */ "Using /u for '%.*s' instead of /%s", (unsigned) length, endbrace - length + 1, (charset == REGEX_ASCII_RESTRICTED_CHARSET) ? ASCII_RESTRICT_PAT_MODS : ASCII_MORE_RESTRICT_PAT_MODS); } } if (PASS2 && invert) { OP(ret) += NBOUND - BOUND; } goto finish_meta_pat; } case 'D': invert = 1; /* FALLTHROUGH */ case 'd': arg = ANYOF_DIGIT; if (! DEPENDS_SEMANTICS) { goto join_posix; } /* \d doesn't have any matches in the upper Latin1 range, hence /d * is equivalent to /u. Changing to /u saves some branches at * runtime */ op = POSIXU; goto join_posix_op_known; case 'R': ret = reg_node(pRExC_state, LNBREAK); *flagp |= HASWIDTH|SIMPLE; goto finish_meta_pat; case 'H': invert = 1; /* FALLTHROUGH */ case 'h': arg = ANYOF_BLANK; op = POSIXU; goto join_posix_op_known; case 'V': invert = 1; /* FALLTHROUGH */ case 'v': arg = ANYOF_VERTWS; op = POSIXU; goto join_posix_op_known; case 'S': invert = 1; /* FALLTHROUGH */ case 's': arg = ANYOF_SPACE; join_posix: op = POSIXD + get_regex_charset(RExC_flags); if (op > POSIXA) { /* /aa is same as /a */ op = POSIXA; } else if (op == POSIXL) { RExC_contains_locale = 1; } join_posix_op_known: if (invert) { op += NPOSIXD - POSIXD; } ret = reg_node(pRExC_state, op); if (! SIZE_ONLY) { FLAGS(ret) = namedclass_to_classnum(arg); } *flagp |= HASWIDTH|SIMPLE; /* FALLTHROUGH */ finish_meta_pat: if ( UCHARAT(RExC_parse + 1) == '{' && UNLIKELY(! new_regcurly(RExC_parse + 1, RExC_end))) { RExC_parse += 2; vFAIL("Unescaped left brace in regex is illegal here"); } nextchar(pRExC_state); Set_Node_Length(ret, 2); /* MJD */ break; case 'p': case 'P': RExC_parse--; ret = regclass(pRExC_state, flagp,depth+1, TRUE, /* means just parse this element */ FALSE, /* don't allow multi-char folds */ FALSE, /* don't silence non-portable warnings. It would be a bug if these returned non-portables */ (bool) RExC_strict, TRUE, /* Allow an optimized regnode result */ NULL, NULL); if (*flagp & RESTART_PASS1) return NULL; /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if * multi-char folds are allowed. */ if (!ret) FAIL2("panic: regclass returned NULL to regatom, flags=%#" UVxf, (UV) *flagp); RExC_parse--; Set_Node_Offset(ret, parse_start); Set_Node_Cur_Length(ret, parse_start - 2); nextchar(pRExC_state); break; case 'N': /* Handle \N, \N{} and \N{NAMED SEQUENCE} (the latter meaning the * \N{...} evaluates to a sequence of more than one code points). * The function call below returns a regnode, which is our result. * The parameters cause it to fail if the \N{} evaluates to a * single code point; we handle those like any other literal. The * reason that the multicharacter case is handled here and not as * part of the EXACtish code is because of quantifiers. In * /\N{BLAH}+/, the '+' applies to the whole thing, and doing it * this way makes that Just Happen. dmq. * join_exact() will join this up with adjacent EXACTish nodes * later on, if appropriate. */ ++RExC_parse; if (grok_bslash_N(pRExC_state, &ret, /* Want a regnode returned */ NULL, /* Fail if evaluates to a single code point */ NULL, /* Don't need a count of how many code points */ flagp, RExC_strict, depth) ) { break; } if (*flagp & RESTART_PASS1) return NULL; /* Here, evaluates to a single code point. Go get that */ RExC_parse = parse_start; goto defchar; case 'k': /* Handle \k<NAME> and \k'NAME' */ parse_named_seq: { char ch; if ( RExC_parse >= RExC_end - 1 || (( ch = RExC_parse[1]) != '<' && ch != '\'' && ch != '{')) { RExC_parse++; /* diag_listed_as: Sequence \%s... not terminated in regex; marked by <-- HERE in m/%s/ */ vFAIL2("Sequence %.2s... not terminated",parse_start); } else { RExC_parse += 2; ret = handle_named_backref(pRExC_state, flagp, parse_start, (ch == '<') ? '>' : (ch == '{') ? '}' : '\''); } break; } case 'g': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': { I32 num; bool hasbrace = 0; if (*RExC_parse == 'g') { bool isrel = 0; RExC_parse++; if (*RExC_parse == '{') { RExC_parse++; hasbrace = 1; } if (*RExC_parse == '-') { RExC_parse++; isrel = 1; } if (hasbrace && !isDIGIT(*RExC_parse)) { if (isrel) RExC_parse--; RExC_parse -= 2; goto parse_named_seq; } if (RExC_parse >= RExC_end) { goto unterminated_g; } num = S_backref_value(RExC_parse); if (num == 0) vFAIL("Reference to invalid group 0"); else if (num == I32_MAX) { if (isDIGIT(*RExC_parse)) vFAIL("Reference to nonexistent group"); else unterminated_g: vFAIL("Unterminated \\g... pattern"); } if (isrel) { num = RExC_npar - num; if (num < 1) vFAIL("Reference to nonexistent or unclosed group"); } } else { num = S_backref_value(RExC_parse); /* bare \NNN might be backref or octal - if it is larger * than or equal RExC_npar then it is assumed to be an * octal escape. Note RExC_npar is +1 from the actual * number of parens. */ /* Note we do NOT check if num == I32_MAX here, as that is * handled by the RExC_npar check */ if ( /* any numeric escape < 10 is always a backref */ num > 9 /* any numeric escape < RExC_npar is a backref */ && num >= RExC_npar /* cannot be an octal escape if it starts with 8 */ && *RExC_parse != '8' /* cannot be an octal escape it it starts with 9 */ && *RExC_parse != '9' ) { /* Probably not a backref, instead likely to be an * octal character escape, e.g. \35 or \777. * The above logic should make it obvious why using * octal escapes in patterns is problematic. - Yves */ RExC_parse = parse_start; goto defchar; } } /* At this point RExC_parse points at a numeric escape like * \12 or \88 or something similar, which we should NOT treat * as an octal escape. It may or may not be a valid backref * escape. For instance \88888888 is unlikely to be a valid * backref. */ while (isDIGIT(*RExC_parse)) RExC_parse++; if (hasbrace) { if (*RExC_parse != '}') vFAIL("Unterminated \\g{...} pattern"); RExC_parse++; } if (!SIZE_ONLY) { if (num > (I32)RExC_rx->nparens) vFAIL("Reference to nonexistent group"); } RExC_sawback = 1; ret = reganode(pRExC_state, ((! FOLD) ? REF : (ASCII_FOLD_RESTRICTED) ? REFFA : (AT_LEAST_UNI_SEMANTICS) ? REFFU : (LOC) ? REFFL : REFF), num); *flagp |= HASWIDTH; /* override incorrect value set in reganode MJD */ Set_Node_Offset(ret, parse_start); Set_Node_Cur_Length(ret, parse_start-1); skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); } break; case '\0': if (RExC_parse >= RExC_end) FAIL("Trailing \\"); /* FALLTHROUGH */ default: /* Do not generate "unrecognized" warnings here, we fall back into the quick-grab loop below */ RExC_parse = parse_start; goto defchar; } /* end of switch on a \foo sequence */ break; case '#': /* '#' comments should have been spaced over before this function was * called */ assert((RExC_flags & RXf_PMf_EXTENDED) == 0); /* if (RExC_flags & RXf_PMf_EXTENDED) { RExC_parse = reg_skipcomment( pRExC_state, RExC_parse ); if (RExC_parse < RExC_end) goto tryagain; } */ /* FALLTHROUGH */ default: defchar: { /* Here, we have determined that the next thing is probably a * literal character. RExC_parse points to the first byte of its * definition. (It still may be an escape sequence that evaluates * to a single character) */ STRLEN len = 0; UV ender = 0; char *p; char *s; #define MAX_NODE_STRING_SIZE 127 char foldbuf[MAX_NODE_STRING_SIZE+UTF8_MAXBYTES_CASE]; char *s0; U8 upper_parse = MAX_NODE_STRING_SIZE; U8 node_type = compute_EXACTish(pRExC_state); bool next_is_quantifier; char * oldp = NULL; /* We can convert EXACTF nodes to EXACTFU if they contain only * characters that match identically regardless of the target * string's UTF8ness. The reason to do this is that EXACTF is not * trie-able, EXACTFU is. * * Similarly, we can convert EXACTFL nodes to EXACTFLU8 if they * contain only above-Latin1 characters (hence must be in UTF8), * which don't participate in folds with Latin1-range characters, * as the latter's folds aren't known until runtime. (We don't * need to figure this out until pass 2) */ bool maybe_exactfu = PASS2 && (node_type == EXACTF || node_type == EXACTFL); /* If a folding node contains only code points that don't * participate in folds, it can be changed into an EXACT node, * which allows the optimizer more things to look for */ bool maybe_exact; ret = reg_node(pRExC_state, node_type); /* In pass1, folded, we use a temporary buffer instead of the * actual node, as the node doesn't exist yet */ s = (SIZE_ONLY && FOLD) ? foldbuf : STRING(ret); s0 = s; reparse: /* We look for the EXACTFish to EXACT node optimizaton only if * folding. (And we don't need to figure this out until pass 2). * XXX It might actually make sense to split the node into portions * that are exact and ones that aren't, so that we could later use * the exact ones to find the longest fixed and floating strings. * One would want to join them back into a larger node. One could * use a pseudo regnode like 'EXACT_ORIG_FOLD' */ maybe_exact = FOLD && PASS2; /* XXX The node can hold up to 255 bytes, yet this only goes to * 127. I (khw) do not know why. Keeping it somewhat less than * 255 allows us to not have to worry about overflow due to * converting to utf8 and fold expansion, but that value is * 255-UTF8_MAXBYTES_CASE. join_exact() may join adjacent nodes * split up by this limit into a single one using the real max of * 255. Even at 127, this breaks under rare circumstances. If * folding, we do not want to split a node at a character that is a * non-final in a multi-char fold, as an input string could just * happen to want to match across the node boundary. The join * would solve that problem if the join actually happens. But a * series of more than two nodes in a row each of 127 would cause * the first join to succeed to get to 254, but then there wouldn't * be room for the next one, which could at be one of those split * multi-char folds. I don't know of any fool-proof solution. One * could back off to end with only a code point that isn't such a * non-final, but it is possible for there not to be any in the * entire node. */ assert( ! UTF /* Is at the beginning of a character */ || UTF8_IS_INVARIANT(UCHARAT(RExC_parse)) || UTF8_IS_START(UCHARAT(RExC_parse))); /* Here, we have a literal character. Find the maximal string of * them in the input that we can fit into a single EXACTish node. * We quit at the first non-literal or when the node gets full */ for (p = RExC_parse; len < upper_parse && p < RExC_end; len++) { oldp = p; /* White space has already been ignored */ assert( (RExC_flags & RXf_PMf_EXTENDED) == 0 || ! is_PATWS_safe((p), RExC_end, UTF)); switch ((U8)*p) { case '^': case '$': case '.': case '[': case '(': case ')': case '|': goto loopdone; case '\\': /* Literal Escapes Switch This switch is meant to handle escape sequences that resolve to a literal character. Every escape sequence that represents something else, like an assertion or a char class, is handled in the switch marked 'Special Escapes' above in this routine, but also has an entry here as anything that isn't explicitly mentioned here will be treated as an unescaped equivalent literal. */ switch ((U8)*++p) { /* These are all the special escapes. */ case 'A': /* Start assertion */ case 'b': case 'B': /* Word-boundary assertion*/ case 'C': /* Single char !DANGEROUS! */ case 'd': case 'D': /* digit class */ case 'g': case 'G': /* generic-backref, pos assertion */ case 'h': case 'H': /* HORIZWS */ case 'k': case 'K': /* named backref, keep marker */ case 'p': case 'P': /* Unicode property */ case 'R': /* LNBREAK */ case 's': case 'S': /* space class */ case 'v': case 'V': /* VERTWS */ case 'w': case 'W': /* word class */ case 'X': /* eXtended Unicode "combining character sequence" */ case 'z': case 'Z': /* End of line/string assertion */ --p; goto loopdone; /* Anything after here is an escape that resolves to a literal. (Except digits, which may or may not) */ case 'n': ender = '\n'; p++; break; case 'N': /* Handle a single-code point named character. */ RExC_parse = p + 1; if (! grok_bslash_N(pRExC_state, NULL, /* Fail if evaluates to anything other than a single code point */ &ender, /* The returned single code point */ NULL, /* Don't need a count of how many code points */ flagp, RExC_strict, depth) ) { if (*flagp & NEED_UTF8) FAIL("panic: grok_bslash_N set NEED_UTF8"); if (*flagp & RESTART_PASS1) return NULL; /* Here, it wasn't a single code point. Go close * up this EXACTish node. The switch() prior to * this switch handles the other cases */ RExC_parse = p = oldp; goto loopdone; } p = RExC_parse; RExC_parse = parse_start; if (ender > 0xff) { REQUIRE_UTF8(flagp); } break; case 'r': ender = '\r'; p++; break; case 't': ender = '\t'; p++; break; case 'f': ender = '\f'; p++; break; case 'e': ender = ESC_NATIVE; p++; break; case 'a': ender = '\a'; p++; break; case 'o': { UV result; const char* error_msg; bool valid = grok_bslash_o(&p, RExC_end, &result, &error_msg, PASS2, /* out warnings */ (bool) RExC_strict, TRUE, /* Output warnings for non- portables */ UTF); if (! valid) { RExC_parse = p; /* going to die anyway; point to exact spot of failure */ vFAIL(error_msg); } ender = result; if (ender > 0xff) { REQUIRE_UTF8(flagp); } break; } case 'x': { UV result = UV_MAX; /* initialize to erroneous value */ const char* error_msg; bool valid = grok_bslash_x(&p, RExC_end, &result, &error_msg, PASS2, /* out warnings */ (bool) RExC_strict, TRUE, /* Silence warnings for non- portables */ UTF); if (! valid) { RExC_parse = p; /* going to die anyway; point to exact spot of failure */ vFAIL(error_msg); } ender = result; if (ender < 0x100) { #ifdef EBCDIC if (RExC_recode_x_to_native) { ender = LATIN1_TO_NATIVE(ender); } #endif } else { REQUIRE_UTF8(flagp); } break; } case 'c': p++; ender = grok_bslash_c(*p++, PASS2); break; case '8': case '9': /* must be a backreference */ --p; /* we have an escape like \8 which cannot be an octal escape * so we exit the loop, and let the outer loop handle this * escape which may or may not be a legitimate backref. */ goto loopdone; case '1': case '2': case '3':case '4': case '5': case '6': case '7': /* When we parse backslash escapes there is ambiguity * between backreferences and octal escapes. Any escape * from \1 - \9 is a backreference, any multi-digit * escape which does not start with 0 and which when * evaluated as decimal could refer to an already * parsed capture buffer is a back reference. Anything * else is octal. * * Note this implies that \118 could be interpreted as * 118 OR as "\11" . "8" depending on whether there * were 118 capture buffers defined already in the * pattern. */ /* NOTE, RExC_npar is 1 more than the actual number of * parens we have seen so far, hence the < RExC_npar below. */ if ( !isDIGIT(p[1]) || S_backref_value(p) < RExC_npar) { /* Not to be treated as an octal constant, go find backref */ --p; goto loopdone; } /* FALLTHROUGH */ case '0': { I32 flags = PERL_SCAN_SILENT_ILLDIGIT; STRLEN numlen = 3; ender = grok_oct(p, &numlen, &flags, NULL); if (ender > 0xff) { REQUIRE_UTF8(flagp); } p += numlen; if (PASS2 /* like \08, \178 */ && numlen < 3 && isDIGIT(*p) && ckWARN(WARN_REGEXP)) { reg_warn_non_literal_string( p + 1, form_short_octal_warning(p, numlen)); } } break; case '\0': if (p >= RExC_end) FAIL("Trailing \\"); /* FALLTHROUGH */ default: if (!SIZE_ONLY&& isALPHANUMERIC(*p)) { /* Include any left brace following the alpha to emphasize * that it could be part of an escape at some point * in the future */ int len = (isALPHA(*p) && *(p + 1) == '{') ? 2 : 1; ckWARN3reg(p + len, "Unrecognized escape \\%.*s passed through", len, p); } goto normal_default; } /* End of switch on '\' */ break; case '{': /* Currently we allow an lbrace at the start of a construct * without raising a warning. This is because we think we * will never want such a brace to be meant to be other * than taken literally. */ if (len || (p > RExC_start && isALPHA_A(*(p - 1)))) { /* But, we raise a fatal warning otherwise, as the * deprecation cycle has come and gone. Except that it * turns out that some heavily-relied on upstream * software, notably GNU Autoconf, have failed to fix * their uses. For these, don't make it fatal unless * we anticipate using the '{' for something else. * This happens after any alpha, and for a looser {m,n} * quantifier specification */ if ( RExC_strict || ( p > parse_start + 1 && isALPHA_A(*(p - 1)) && *(p - 2) == '\\') || new_regcurly(p, RExC_end)) { RExC_parse = p + 1; vFAIL("Unescaped left brace in regex is " "illegal here"); } if (PASS2) { ckWARNregdep(p + 1, "Unescaped left brace in regex is " "deprecated here (and will be fatal " "in Perl 5.30), passed through"); } } goto normal_default; case '}': case ']': if (PASS2 && p > RExC_parse && RExC_strict) { ckWARN2reg(p + 1, "Unescaped literal '%c'", *p); } /*FALLTHROUGH*/ default: /* A literal character */ normal_default: if (! UTF8_IS_INVARIANT(*p) && UTF) { STRLEN numlen; ender = utf8n_to_uvchr((U8*)p, RExC_end - p, &numlen, UTF8_ALLOW_DEFAULT); p += numlen; } else ender = (U8) *p++; break; } /* End of switch on the literal */ /* Here, have looked at the literal character and <ender> * contains its ordinal, <p> points to the character after it. * We need to check if the next non-ignored thing is a * quantifier. Move <p> to after anything that should be * ignored, which, as a side effect, positions <p> for the next * loop iteration */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* If the next thing is a quantifier, it applies to this * character only, which means that this character has to be in * its own node and can't just be appended to the string in an * existing node, so if there are already other characters in * the node, close the node with just them, and set up to do * this character again next time through, when it will be the * only thing in its new node */ next_is_quantifier = LIKELY(p < RExC_end) && UNLIKELY(ISMULT2(p)); if (next_is_quantifier && LIKELY(len)) { p = oldp; goto loopdone; } /* Ready to add 'ender' to the node */ if (! FOLD) { /* The simple case, just append the literal */ /* In the sizing pass, we need only the size of the * character we are appending, hence we can delay getting * its representation until PASS2. */ if (SIZE_ONLY) { if (UTF && ! UVCHR_IS_INVARIANT(ender)) { const STRLEN unilen = UVCHR_SKIP(ender); s += unilen; /* We have to subtract 1 just below (and again in * the corresponding PASS2 code) because the loop * increments <len> each time, as all but this path * (and one other) through it add a single byte to * the EXACTish node. But these paths would change * len to be the correct final value, so cancel out * the increment that follows */ len += unilen - 1; } else { s++; } } else { /* PASS2 */ not_fold_common: if (UTF && ! UVCHR_IS_INVARIANT(ender)) { U8 * new_s = uvchr_to_utf8((U8*)s, ender); len += (char *) new_s - s - 1; s = (char *) new_s; } else { *(s++) = (char) ender; } } } else if (LOC && is_PROBLEMATIC_LOCALE_FOLD_cp(ender)) { /* Here are folding under /l, and the code point is * problematic. First, we know we can't simplify things */ maybe_exact = FALSE; maybe_exactfu = FALSE; /* A problematic code point in this context means that its * fold isn't known until runtime, so we can't fold it now. * (The non-problematic code points are the above-Latin1 * ones that fold to also all above-Latin1. Their folds * don't vary no matter what the locale is.) But here we * have characters whose fold depends on the locale. * Unlike the non-folding case above, we have to keep track * of these in the sizing pass, so that we can make sure we * don't split too-long nodes in the middle of a potential * multi-char fold. And unlike the regular fold case * handled in the else clauses below, we don't actually * fold and don't have special cases to consider. What we * do for both passes is the PASS2 code for non-folding */ goto not_fold_common; } else /* A regular FOLD code point */ if (! ( UTF #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) /* See comments for join_exact() as to why we fold * this non-UTF at compile time */ || ( node_type == EXACTFU && ender == LATIN_SMALL_LETTER_SHARP_S) #endif )) { /* Here, are folding and are not UTF-8 encoded; therefore * the character must be in the range 0-255, and is not /l * (Not /l because we already handled these under /l in * is_PROBLEMATIC_LOCALE_FOLD_cp) */ if (IS_IN_SOME_FOLD_L1(ender)) { maybe_exact = FALSE; /* See if the character's fold differs between /d and * /u. This includes the multi-char fold SHARP S to * 'ss' */ if (UNLIKELY(ender == LATIN_SMALL_LETTER_SHARP_S)) { RExC_seen_unfolded_sharp_s = 1; maybe_exactfu = FALSE; } else if (maybe_exactfu && (PL_fold[ender] != PL_fold_latin1[ender] #if UNICODE_MAJOR_VERSION > 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && ( UNICODE_DOT_VERSION > 0) \ || UNICODE_DOT_DOT_VERSION > 0) || ( len > 0 && isALPHA_FOLD_EQ(ender, 's') && isALPHA_FOLD_EQ(*(s-1), 's')) #endif )) { maybe_exactfu = FALSE; } } /* Even when folding, we store just the input character, as * we have an array that finds its fold quickly */ *(s++) = (char) ender; } else { /* FOLD, and UTF (or sharp s) */ /* Unlike the non-fold case, we do actually have to * calculate the results here in pass 1. This is for two * reasons, the folded length may be longer than the * unfolded, and we have to calculate how many EXACTish * nodes it will take; and we may run out of room in a node * in the middle of a potential multi-char fold, and have * to back off accordingly. */ UV folded; if (isASCII_uni(ender)) { folded = toFOLD(ender); *(s)++ = (U8) folded; } else { STRLEN foldlen; folded = _to_uni_fold_flags( ender, (U8 *) s, &foldlen, FOLD_FLAGS_FULL | ((ASCII_FOLD_RESTRICTED) ? FOLD_FLAGS_NOMIX_ASCII : 0)); s += foldlen; /* The loop increments <len> each time, as all but this * path (and one other) through it add a single byte to * the EXACTish node. But this one has changed len to * be the correct final value, so subtract one to * cancel out the increment that follows */ len += foldlen - 1; } /* If this node only contains non-folding code points so * far, see if this new one is also non-folding */ if (maybe_exact) { if (folded != ender) { maybe_exact = FALSE; } else { /* Here the fold is the original; we have to check * further to see if anything folds to it */ if (_invlist_contains_cp(PL_utf8_foldable, ender)) { maybe_exact = FALSE; } } } ender = folded; } if (next_is_quantifier) { /* Here, the next input is a quantifier, and to get here, * the current character is the only one in the node. * Also, here <len> doesn't include the final byte for this * character */ len++; goto loopdone; } } /* End of loop through literal characters */ /* Here we have either exhausted the input or ran out of room in * the node. (If we encountered a character that can't be in the * node, transfer is made directly to <loopdone>, and so we * wouldn't have fallen off the end of the loop.) In the latter * case, we artificially have to split the node into two, because * we just don't have enough space to hold everything. This * creates a problem if the final character participates in a * multi-character fold in the non-final position, as a match that * should have occurred won't, due to the way nodes are matched, * and our artificial boundary. So back off until we find a non- * problematic character -- one that isn't at the beginning or * middle of such a fold. (Either it doesn't participate in any * folds, or appears only in the final position of all the folds it * does participate in.) A better solution with far fewer false * positives, and that would fill the nodes more completely, would * be to actually have available all the multi-character folds to * test against, and to back-off only far enough to be sure that * this node isn't ending with a partial one. <upper_parse> is set * further below (if we need to reparse the node) to include just * up through that final non-problematic character that this code * identifies, so when it is set to less than the full node, we can * skip the rest of this */ if (FOLD && p < RExC_end && upper_parse == MAX_NODE_STRING_SIZE) { const STRLEN full_len = len; assert(len >= MAX_NODE_STRING_SIZE); /* Here, <s> points to the final byte of the final character. * Look backwards through the string until find a non- * problematic character */ if (! UTF) { /* This has no multi-char folds to non-UTF characters */ if (ASCII_FOLD_RESTRICTED) { goto loopdone; } while (--s >= s0 && IS_NON_FINAL_FOLD(*s)) { } len = s - s0 + 1; } else { if (! PL_NonL1NonFinalFold) { PL_NonL1NonFinalFold = _new_invlist_C_array( NonL1_Perl_Non_Final_Folds_invlist); } /* Point to the first byte of the final character */ s = (char *) utf8_hop((U8 *) s, -1); while (s >= s0) { /* Search backwards until find non-problematic char */ if (UTF8_IS_INVARIANT(*s)) { /* There are no ascii characters that participate * in multi-char folds under /aa. In EBCDIC, the * non-ascii invariants are all control characters, * so don't ever participate in any folds. */ if (ASCII_FOLD_RESTRICTED || ! IS_NON_FINAL_FOLD(*s)) { break; } } else if (UTF8_IS_DOWNGRADEABLE_START(*s)) { if (! IS_NON_FINAL_FOLD(EIGHT_BIT_UTF8_TO_NATIVE( *s, *(s+1)))) { break; } } else if (! _invlist_contains_cp( PL_NonL1NonFinalFold, valid_utf8_to_uvchr((U8 *) s, NULL))) { break; } /* Here, the current character is problematic in that * it does occur in the non-final position of some * fold, so try the character before it, but have to * special case the very first byte in the string, so * we don't read outside the string */ s = (s == s0) ? s -1 : (char *) utf8_hop((U8 *) s, -1); } /* End of loop backwards through the string */ /* If there were only problematic characters in the string, * <s> will point to before s0, in which case the length * should be 0, otherwise include the length of the * non-problematic character just found */ len = (s < s0) ? 0 : s - s0 + UTF8SKIP(s); } /* Here, have found the final character, if any, that is * non-problematic as far as ending the node without splitting * it across a potential multi-char fold. <len> contains the * number of bytes in the node up-to and including that * character, or is 0 if there is no such character, meaning * the whole node contains only problematic characters. In * this case, give up and just take the node as-is. We can't * do any better */ if (len == 0) { len = full_len; /* If the node ends in an 's' we make sure it stays EXACTF, * as if it turns into an EXACTFU, it could later get * joined with another 's' that would then wrongly match * the sharp s */ if (maybe_exactfu && isALPHA_FOLD_EQ(ender, 's')) { maybe_exactfu = FALSE; } } else { /* Here, the node does contain some characters that aren't * problematic. If one such is the final character in the * node, we are done */ if (len == full_len) { goto loopdone; } else if (len + ((UTF) ? UTF8SKIP(s) : 1) == full_len) { /* If the final character is problematic, but the * penultimate is not, back-off that last character to * later start a new node with it */ p = oldp; goto loopdone; } /* Here, the final non-problematic character is earlier * in the input than the penultimate character. What we do * is reparse from the beginning, going up only as far as * this final ok one, thus guaranteeing that the node ends * in an acceptable character. The reason we reparse is * that we know how far in the character is, but we don't * know how to correlate its position with the input parse. * An alternate implementation would be to build that * correlation as we go along during the original parse, * but that would entail extra work for every node, whereas * this code gets executed only when the string is too * large for the node, and the final two characters are * problematic, an infrequent occurrence. Yet another * possible strategy would be to save the tail of the * string, and the next time regatom is called, initialize * with that. The problem with this is that unless you * back off one more character, you won't be guaranteed * regatom will get called again, unless regbranch, * regpiece ... are also changed. If you do back off that * extra character, so that there is input guaranteed to * force calling regatom, you can't handle the case where * just the first character in the node is acceptable. I * (khw) decided to try this method which doesn't have that * pitfall; if performance issues are found, we can do a * combination of the current approach plus that one */ upper_parse = len; len = 0; s = s0; goto reparse; } } /* End of verifying node ends with an appropriate char */ loopdone: /* Jumped to when encounters something that shouldn't be in the node */ /* I (khw) don't know if you can get here with zero length, but the * old code handled this situation by creating a zero-length EXACT * node. Might as well be NOTHING instead */ if (len == 0) { OP(ret) = NOTHING; } else { if (FOLD) { /* If 'maybe_exact' is still set here, means there are no * code points in the node that participate in folds; * similarly for 'maybe_exactfu' and code points that match * differently depending on UTF8ness of the target string * (for /u), or depending on locale for /l */ if (maybe_exact) { OP(ret) = (LOC) ? EXACTL : EXACT; } else if (maybe_exactfu) { OP(ret) = (LOC) ? EXACTFLU8 : EXACTFU; } } alloc_maybe_populate_EXACT(pRExC_state, ret, flagp, len, ender, FALSE /* Don't look to see if could be turned into an EXACT node, as we have already computed that */ ); } RExC_parse = p - 1; Set_Node_Cur_Length(ret, parse_start); RExC_parse = p; { /* len is STRLEN which is unsigned, need to copy to signed */ IV iv = len; if (iv < 0) vFAIL("Internal disaster"); } } /* End of label 'defchar:' */ break; } /* End of giant switch on input character */ /* Position parse to next real character */ skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force to /x */ ); if (PASS2 && *RExC_parse == '{' && OP(ret) != SBOL && ! regcurly(RExC_parse)) { ckWARNregdep(RExC_parse + 1, "Unescaped left brace in regex is deprecated here (and will be fatal in Perl 5.30), passed through"); } return(ret); } STATIC void S_populate_ANYOF_from_invlist(pTHX_ regnode *node, SV** invlist_ptr) { /* Uses the inversion list '*invlist_ptr' to populate the ANYOF 'node'. It * sets up the bitmap and any flags, removing those code points from the * inversion list, setting it to NULL should it become completely empty */ PERL_ARGS_ASSERT_POPULATE_ANYOF_FROM_INVLIST; assert(PL_regkind[OP(node)] == ANYOF); ANYOF_BITMAP_ZERO(node); if (*invlist_ptr) { /* This gets set if we actually need to modify things */ bool change_invlist = FALSE; UV start, end; /* Start looking through *invlist_ptr */ invlist_iterinit(*invlist_ptr); while (invlist_iternext(*invlist_ptr, &start, &end)) { UV high; int i; if (end == UV_MAX && start <= NUM_ANYOF_CODE_POINTS) { ANYOF_FLAGS(node) |= ANYOF_MATCHES_ALL_ABOVE_BITMAP; } /* Quit if are above what we should change */ if (start >= NUM_ANYOF_CODE_POINTS) { break; } change_invlist = TRUE; /* Set all the bits in the range, up to the max that we are doing */ high = (end < NUM_ANYOF_CODE_POINTS - 1) ? end : NUM_ANYOF_CODE_POINTS - 1; for (i = start; i <= (int) high; i++) { if (! ANYOF_BITMAP_TEST(node, i)) { ANYOF_BITMAP_SET(node, i); } } } invlist_iterfinish(*invlist_ptr); /* Done with loop; remove any code points that are in the bitmap from * *invlist_ptr; similarly for code points above the bitmap if we have * a flag to match all of them anyways */ if (change_invlist) { _invlist_subtract(*invlist_ptr, PL_InBitmap, invlist_ptr); } if (ANYOF_FLAGS(node) & ANYOF_MATCHES_ALL_ABOVE_BITMAP) { _invlist_intersection(*invlist_ptr, PL_InBitmap, invlist_ptr); } /* If have completely emptied it, remove it completely */ if (_invlist_len(*invlist_ptr) == 0) { SvREFCNT_dec_NN(*invlist_ptr); *invlist_ptr = NULL; } } } /* Parse POSIX character classes: [[:foo:]], [[=foo=]], [[.foo.]]. Character classes ([:foo:]) can also be negated ([:^foo:]). Returns a named class id (ANYOF_XXX) if successful, -1 otherwise. Equivalence classes ([=foo=]) and composites ([.foo.]) are parsed, but trigger failures because they are currently unimplemented. */ #define POSIXCC_DONE(c) ((c) == ':') #define POSIXCC_NOTYET(c) ((c) == '=' || (c) == '.') #define POSIXCC(c) (POSIXCC_DONE(c) || POSIXCC_NOTYET(c)) #define MAYBE_POSIXCC(c) (POSIXCC(c) || (c) == '^' || (c) == ';') #define WARNING_PREFIX "Assuming NOT a POSIX class since " #define NO_BLANKS_POSIX_WARNING "no blanks are allowed in one" #define SEMI_COLON_POSIX_WARNING "a semi-colon was found instead of a colon" #define NOT_MEANT_TO_BE_A_POSIX_CLASS (OOB_NAMEDCLASS - 1) /* 'posix_warnings' and 'warn_text' are names of variables in the following * routine. q.v. */ #define ADD_POSIX_WARNING(p, text) STMT_START { \ if (posix_warnings) { \ if (! RExC_warn_text ) RExC_warn_text = (AV *) sv_2mortal((SV *) newAV()); \ av_push(RExC_warn_text, Perl_newSVpvf(aTHX_ \ WARNING_PREFIX \ text \ REPORT_LOCATION, \ REPORT_LOCATION_ARGS(p))); \ } \ } STMT_END #define CLEAR_POSIX_WARNINGS() \ STMT_START { \ if (posix_warnings && RExC_warn_text) \ av_clear(RExC_warn_text); \ } STMT_END #define CLEAR_POSIX_WARNINGS_AND_RETURN(ret) \ STMT_START { \ CLEAR_POSIX_WARNINGS(); \ return ret; \ } STMT_END STATIC int S_handle_possible_posix(pTHX_ RExC_state_t *pRExC_state, const char * const s, /* Where the putative posix class begins. Normally, this is one past the '['. This parameter exists so it can be somewhere besides RExC_parse. */ char ** updated_parse_ptr, /* Where to set the updated parse pointer, or NULL */ AV ** posix_warnings, /* Where to place any generated warnings, or NULL */ const bool check_only /* Don't die if error */ ) { /* This parses what the caller thinks may be one of the three POSIX * constructs: * 1) a character class, like [:blank:] * 2) a collating symbol, like [. .] * 3) an equivalence class, like [= =] * In the latter two cases, it croaks if it finds a syntactically legal * one, as these are not handled by Perl. * * The main purpose is to look for a POSIX character class. It returns: * a) the class number * if it is a completely syntactically and semantically legal class. * 'updated_parse_ptr', if not NULL, is set to point to just after the * closing ']' of the class * b) OOB_NAMEDCLASS * if it appears that one of the three POSIX constructs was meant, but * its specification was somehow defective. 'updated_parse_ptr', if * not NULL, is set to point to the character just after the end * character of the class. See below for handling of warnings. * c) NOT_MEANT_TO_BE_A_POSIX_CLASS * if it doesn't appear that a POSIX construct was intended. * 'updated_parse_ptr' is not changed. No warnings nor errors are * raised. * * In b) there may be errors or warnings generated. If 'check_only' is * TRUE, then any errors are discarded. Warnings are returned to the * caller via an AV* created into '*posix_warnings' if it is not NULL. If * instead it is NULL, warnings are suppressed. This is done in all * passes. The reason for this is that the rest of the parsing is heavily * dependent on whether this routine found a valid posix class or not. If * it did, the closing ']' is absorbed as part of the class. If no class, * or an invalid one is found, any ']' will be considered the terminator of * the outer bracketed character class, leading to very different results. * In particular, a '(?[ ])' construct will likely have a syntax error if * the class is parsed other than intended, and this will happen in pass1, * before the warnings would normally be output. This mechanism allows the * caller to output those warnings in pass1 just before dieing, giving a * much better clue as to what is wrong. * * The reason for this function, and its complexity is that a bracketed * character class can contain just about anything. But it's easy to * mistype the very specific posix class syntax but yielding a valid * regular bracketed class, so it silently gets compiled into something * quite unintended. * * The solution adopted here maintains backward compatibility except that * it adds a warning if it looks like a posix class was intended but * improperly specified. The warning is not raised unless what is input * very closely resembles one of the 14 legal posix classes. To do this, * it uses fuzzy parsing. It calculates how many single-character edits it * would take to transform what was input into a legal posix class. Only * if that number is quite small does it think that the intention was a * posix class. Obviously these are heuristics, and there will be cases * where it errs on one side or another, and they can be tweaked as * experience informs. * * The syntax for a legal posix class is: * * qr/(?xa: \[ : \^? [[:lower:]]{4,6} : \] )/ * * What this routine considers syntactically to be an intended posix class * is this (the comments indicate some restrictions that the pattern * doesn't show): * * qr/(?x: \[? # The left bracket, possibly * # omitted * \h* # possibly followed by blanks * (?: \^ \h* )? # possibly a misplaced caret * [:;]? # The opening class character, * # possibly omitted. A typo * # semi-colon can also be used. * \h* * \^? # possibly a correctly placed * # caret, but not if there was also * # a misplaced one * \h* * .{3,15} # The class name. If there are * # deviations from the legal syntax, * # its edit distance must be close * # to a real class name in order * # for it to be considered to be * # an intended posix class. * \h* * [[:punct:]]? # The closing class character, * # possibly omitted. If not a colon * # nor semi colon, the class name * # must be even closer to a valid * # one * \h* * \]? # The right bracket, possibly * # omitted. * )/ * * In the above, \h must be ASCII-only. * * These are heuristics, and can be tweaked as field experience dictates. * There will be cases when someone didn't intend to specify a posix class * that this warns as being so. The goal is to minimize these, while * maximizing the catching of things intended to be a posix class that * aren't parsed as such. */ const char* p = s; const char * const e = RExC_end; unsigned complement = 0; /* If to complement the class */ bool found_problem = FALSE; /* Assume OK until proven otherwise */ bool has_opening_bracket = FALSE; bool has_opening_colon = FALSE; int class_number = OOB_NAMEDCLASS; /* Out-of-bounds until find valid class */ const char * possible_end = NULL; /* used for a 2nd parse pass */ const char* name_start; /* ptr to class name first char */ /* If the number of single-character typos the input name is away from a * legal name is no more than this number, it is considered to have meant * the legal name */ int max_distance = 2; /* to store the name. The size determines the maximum length before we * decide that no posix class was intended. Should be at least * sizeof("alphanumeric") */ UV input_text[15]; STATIC_ASSERT_DECL(C_ARRAY_LENGTH(input_text) >= sizeof "alphanumeric"); PERL_ARGS_ASSERT_HANDLE_POSSIBLE_POSIX; CLEAR_POSIX_WARNINGS(); if (p >= e) { return NOT_MEANT_TO_BE_A_POSIX_CLASS; } if (*(p - 1) != '[') { ADD_POSIX_WARNING(p, "it doesn't start with a '['"); found_problem = TRUE; } else { has_opening_bracket = TRUE; } /* They could be confused and think you can put spaces between the * components */ if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } /* For [. .] and [= =]. These are quite different internally from [: :], * so they are handled separately. */ if (POSIXCC_NOTYET(*p) && p < e - 3) /* 1 for the close, and 1 for the ']' and 1 for at least one char in it */ { const char open_char = *p; const char * temp_ptr = p + 1; /* These two constructs are not handled by perl, and if we find a * syntactically valid one, we croak. khw, who wrote this code, finds * this explanation of them very unclear: * http://pubs.opengroup.org/onlinepubs/009696899/basedefs/xbd_chap09.html * And searching the rest of the internet wasn't very helpful either. * It looks like just about any byte can be in these constructs, * depending on the locale. But unless the pattern is being compiled * under /l, which is very rare, Perl runs under the C or POSIX locale. * In that case, it looks like [= =] isn't allowed at all, and that * [. .] could be any single code point, but for longer strings the * constituent characters would have to be the ASCII alphabetics plus * the minus-hyphen. Any sensible locale definition would limit itself * to these. And any portable one definitely should. Trying to parse * the general case is a nightmare (see [perl #127604]). So, this code * looks only for interiors of these constructs that match: * qr/.|[-\w]{2,}/ * Using \w relaxes the apparent rules a little, without adding much * danger of mistaking something else for one of these constructs. * * [. .] in some implementations described on the internet is usable to * escape a character that otherwise is special in bracketed character * classes. For example [.].] means a literal right bracket instead of * the ending of the class * * [= =] can legitimately contain a [. .] construct, but we don't * handle this case, as that [. .] construct will later get parsed * itself and croak then. And [= =] is checked for even when not under * /l, as Perl has long done so. * * The code below relies on there being a trailing NUL, so it doesn't * have to keep checking if the parse ptr < e. */ if (temp_ptr[1] == open_char) { temp_ptr++; } else while ( temp_ptr < e && (isWORDCHAR(*temp_ptr) || *temp_ptr == '-')) { temp_ptr++; } if (*temp_ptr == open_char) { temp_ptr++; if (*temp_ptr == ']') { temp_ptr++; if (! found_problem && ! check_only) { RExC_parse = (char *) temp_ptr; vFAIL3("POSIX syntax [%c %c] is reserved for future " "extensions", open_char, open_char); } /* Here, the syntax wasn't completely valid, or else the call * is to check-only */ if (updated_parse_ptr) { *updated_parse_ptr = (char *) temp_ptr; } CLEAR_POSIX_WARNINGS_AND_RETURN(OOB_NAMEDCLASS); } } /* If we find something that started out to look like one of these * constructs, but isn't, we continue below so that it can be checked * for being a class name with a typo of '.' or '=' instead of a colon. * */ } /* Here, we think there is a possibility that a [: :] class was meant, and * we have the first real character. It could be they think the '^' comes * first */ if (*p == '^') { found_problem = TRUE; ADD_POSIX_WARNING(p + 1, "the '^' must come after the colon"); complement = 1; p++; if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } } /* But the first character should be a colon, which they could have easily * mistyped on a qwerty keyboard as a semi-colon (and which may be hard to * distinguish from a colon, so treat that as a colon). */ if (*p == ':') { p++; has_opening_colon = TRUE; } else if (*p == ';') { found_problem = TRUE; p++; ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING); has_opening_colon = TRUE; } else { found_problem = TRUE; ADD_POSIX_WARNING(p, "there must be a starting ':'"); /* Consider an initial punctuation (not one of the recognized ones) to * be a left terminator */ if (*p != '^' && *p != ']' && isPUNCT(*p)) { p++; } } /* They may think that you can put spaces between the components */ if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } if (*p == '^') { /* We consider something like [^:^alnum:]] to not have been intended to * be a posix class, but XXX maybe we should */ if (complement) { CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } complement = 1; p++; } /* Again, they may think that you can put spaces between the components */ if (isBLANK(*p)) { found_problem = TRUE; do { p++; } while (p < e && isBLANK(*p)); ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } if (*p == ']') { /* XXX This ']' may be a typo, and something else was meant. But * treating it as such creates enough complications, that that * possibility isn't currently considered here. So we assume that the * ']' is what is intended, and if we've already found an initial '[', * this leaves this construct looking like [:] or [:^], which almost * certainly weren't intended to be posix classes */ if (has_opening_bracket) { CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* But this function can be called when we parse the colon for * something like qr/[alpha:]]/, so we back up to look for the * beginning */ p--; if (*p == ';') { found_problem = TRUE; ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING); } else if (*p != ':') { /* XXX We are currently very restrictive here, so this code doesn't * consider the possibility that, say, /[alpha.]]/ was intended to * be a posix class. */ CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* Here we have something like 'foo:]'. There was no initial colon, * and we back up over 'foo. XXX Unlike the going forward case, we * don't handle typos of non-word chars in the middle */ has_opening_colon = FALSE; p--; while (p > RExC_start && isWORDCHAR(*p)) { p--; } p++; /* Here, we have positioned ourselves to where we think the first * character in the potential class is */ } /* Now the interior really starts. There are certain key characters that * can end the interior, or these could just be typos. To catch both * cases, we may have to do two passes. In the first pass, we keep on * going unless we come to a sequence that matches * qr/ [[:punct:]] [[:blank:]]* \] /xa * This means it takes a sequence to end the pass, so two typos in a row if * that wasn't what was intended. If the class is perfectly formed, just * this one pass is needed. We also stop if there are too many characters * being accumulated, but this number is deliberately set higher than any * real class. It is set high enough so that someone who thinks that * 'alphanumeric' is a correct name would get warned that it wasn't. * While doing the pass, we keep track of where the key characters were in * it. If we don't find an end to the class, and one of the key characters * was found, we redo the pass, but stop when we get to that character. * Thus the key character was considered a typo in the first pass, but a * terminator in the second. If two key characters are found, we stop at * the second one in the first pass. Again this can miss two typos, but * catches a single one * * In the first pass, 'possible_end' starts as NULL, and then gets set to * point to the first key character. For the second pass, it starts as -1. * */ name_start = p; parse_name: { bool has_blank = FALSE; bool has_upper = FALSE; bool has_terminating_colon = FALSE; bool has_terminating_bracket = FALSE; bool has_semi_colon = FALSE; unsigned int name_len = 0; int punct_count = 0; while (p < e) { /* Squeeze out blanks when looking up the class name below */ if (isBLANK(*p) ) { has_blank = TRUE; found_problem = TRUE; p++; continue; } /* The name will end with a punctuation */ if (isPUNCT(*p)) { const char * peek = p + 1; /* Treat any non-']' punctuation followed by a ']' (possibly * with intervening blanks) as trying to terminate the class. * ']]' is very likely to mean a class was intended (but * missing the colon), but the warning message that gets * generated shows the error position better if we exit the * loop at the bottom (eventually), so skip it here. */ if (*p != ']') { if (peek < e && isBLANK(*peek)) { has_blank = TRUE; found_problem = TRUE; do { peek++; } while (peek < e && isBLANK(*peek)); } if (peek < e && *peek == ']') { has_terminating_bracket = TRUE; if (*p == ':') { has_terminating_colon = TRUE; } else if (*p == ';') { has_semi_colon = TRUE; has_terminating_colon = TRUE; } else { found_problem = TRUE; } p = peek + 1; goto try_posix; } } /* Here we have punctuation we thought didn't end the class. * Keep track of the position of the key characters that are * more likely to have been class-enders */ if (*p == ']' || *p == '[' || *p == ':' || *p == ';') { /* Allow just one such possible class-ender not actually * ending the class. */ if (possible_end) { break; } possible_end = p; } /* If we have too many punctuation characters, no use in * keeping going */ if (++punct_count > max_distance) { break; } /* Treat the punctuation as a typo. */ input_text[name_len++] = *p; p++; } else if (isUPPER(*p)) { /* Use lowercase for lookup */ input_text[name_len++] = toLOWER(*p); has_upper = TRUE; found_problem = TRUE; p++; } else if (! UTF || UTF8_IS_INVARIANT(*p)) { input_text[name_len++] = *p; p++; } else { input_text[name_len++] = utf8_to_uvchr_buf((U8 *) p, e, NULL); p+= UTF8SKIP(p); } /* The declaration of 'input_text' is how long we allow a potential * class name to be, before saying they didn't mean a class name at * all */ if (name_len >= C_ARRAY_LENGTH(input_text)) { break; } } /* We get to here when the possible class name hasn't been properly * terminated before: * 1) we ran off the end of the pattern; or * 2) found two characters, each of which might have been intended to * be the name's terminator * 3) found so many punctuation characters in the purported name, * that the edit distance to a valid one is exceeded * 4) we decided it was more characters than anyone could have * intended to be one. */ found_problem = TRUE; /* In the final two cases, we know that looking up what we've * accumulated won't lead to a match, even a fuzzy one. */ if ( name_len >= C_ARRAY_LENGTH(input_text) || punct_count > max_distance) { /* If there was an intermediate key character that could have been * an intended end, redo the parse, but stop there */ if (possible_end && possible_end != (char *) -1) { possible_end = (char *) -1; /* Special signal value to say we've done a first pass */ p = name_start; goto parse_name; } /* Otherwise, it can't have meant to have been a class */ CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* If we ran off the end, and the final character was a punctuation * one, back up one, to look at that final one just below. Later, we * will restore the parse pointer if appropriate */ if (name_len && p == e && isPUNCT(*(p-1))) { p--; name_len--; } if (p < e && isPUNCT(*p)) { if (*p == ']') { has_terminating_bracket = TRUE; /* If this is a 2nd ']', and the first one is just below this * one, consider that to be the real terminator. This gives a * uniform and better positioning for the warning message */ if ( possible_end && possible_end != (char *) -1 && *possible_end == ']' && name_len && input_text[name_len - 1] == ']') { name_len--; p = possible_end; /* And this is actually equivalent to having done the 2nd * pass now, so set it to not try again */ possible_end = (char *) -1; } } else { if (*p == ':') { has_terminating_colon = TRUE; } else if (*p == ';') { has_semi_colon = TRUE; has_terminating_colon = TRUE; } p++; } } try_posix: /* Here, we have a class name to look up. We can short circuit the * stuff below for short names that can't possibly be meant to be a * class name. (We can do this on the first pass, as any second pass * will yield an even shorter name) */ if (name_len < 3) { CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } /* Find which class it is. Initially switch on the length of the name. * */ switch (name_len) { case 4: if (memEQs(name_start, 4, "word")) { /* this is not POSIX, this is the Perl \w */ class_number = ANYOF_WORDCHAR; } break; case 5: /* Names all of length 5: alnum alpha ascii blank cntrl digit * graph lower print punct space upper * Offset 4 gives the best switch position. */ switch (name_start[4]) { case 'a': if (memBEGINs(name_start, 5, "alph")) /* alpha */ class_number = ANYOF_ALPHA; break; case 'e': if (memBEGINs(name_start, 5, "spac")) /* space */ class_number = ANYOF_SPACE; break; case 'h': if (memBEGINs(name_start, 5, "grap")) /* graph */ class_number = ANYOF_GRAPH; break; case 'i': if (memBEGINs(name_start, 5, "asci")) /* ascii */ class_number = ANYOF_ASCII; break; case 'k': if (memBEGINs(name_start, 5, "blan")) /* blank */ class_number = ANYOF_BLANK; break; case 'l': if (memBEGINs(name_start, 5, "cntr")) /* cntrl */ class_number = ANYOF_CNTRL; break; case 'm': if (memBEGINs(name_start, 5, "alnu")) /* alnum */ class_number = ANYOF_ALPHANUMERIC; break; case 'r': if (memBEGINs(name_start, 5, "lowe")) /* lower */ class_number = (FOLD) ? ANYOF_CASED : ANYOF_LOWER; else if (memBEGINs(name_start, 5, "uppe")) /* upper */ class_number = (FOLD) ? ANYOF_CASED : ANYOF_UPPER; break; case 't': if (memBEGINs(name_start, 5, "digi")) /* digit */ class_number = ANYOF_DIGIT; else if (memBEGINs(name_start, 5, "prin")) /* print */ class_number = ANYOF_PRINT; else if (memBEGINs(name_start, 5, "punc")) /* punct */ class_number = ANYOF_PUNCT; break; } break; case 6: if (memEQs(name_start, 6, "xdigit")) class_number = ANYOF_XDIGIT; break; } /* If the name exactly matches a posix class name the class number will * here be set to it, and the input almost certainly was meant to be a * posix class, so we can skip further checking. If instead the syntax * is exactly correct, but the name isn't one of the legal ones, we * will return that as an error below. But if neither of these apply, * it could be that no posix class was intended at all, or that one * was, but there was a typo. We tease these apart by doing fuzzy * matching on the name */ if (class_number == OOB_NAMEDCLASS && found_problem) { const UV posix_names[][6] = { { 'a', 'l', 'n', 'u', 'm' }, { 'a', 'l', 'p', 'h', 'a' }, { 'a', 's', 'c', 'i', 'i' }, { 'b', 'l', 'a', 'n', 'k' }, { 'c', 'n', 't', 'r', 'l' }, { 'd', 'i', 'g', 'i', 't' }, { 'g', 'r', 'a', 'p', 'h' }, { 'l', 'o', 'w', 'e', 'r' }, { 'p', 'r', 'i', 'n', 't' }, { 'p', 'u', 'n', 'c', 't' }, { 's', 'p', 'a', 'c', 'e' }, { 'u', 'p', 'p', 'e', 'r' }, { 'w', 'o', 'r', 'd' }, { 'x', 'd', 'i', 'g', 'i', 't' } }; /* The names of the above all have added NULs to make them the same * size, so we need to also have the real lengths */ const UV posix_name_lengths[] = { sizeof("alnum") - 1, sizeof("alpha") - 1, sizeof("ascii") - 1, sizeof("blank") - 1, sizeof("cntrl") - 1, sizeof("digit") - 1, sizeof("graph") - 1, sizeof("lower") - 1, sizeof("print") - 1, sizeof("punct") - 1, sizeof("space") - 1, sizeof("upper") - 1, sizeof("word") - 1, sizeof("xdigit")- 1 }; unsigned int i; int temp_max = max_distance; /* Use a temporary, so if we reparse, we haven't changed the outer one */ /* Use a smaller max edit distance if we are missing one of the * delimiters */ if ( has_opening_bracket + has_opening_colon < 2 || has_terminating_bracket + has_terminating_colon < 2) { temp_max--; } /* See if the input name is close to a legal one */ for (i = 0; i < C_ARRAY_LENGTH(posix_names); i++) { /* Short circuit call if the lengths are too far apart to be * able to match */ if (abs( (int) (name_len - posix_name_lengths[i])) > temp_max) { continue; } if (edit_distance(input_text, posix_names[i], name_len, posix_name_lengths[i], temp_max ) > -1) { /* If it is close, it probably was intended to be a class */ goto probably_meant_to_be; } } /* Here the input name is not close enough to a valid class name * for us to consider it to be intended to be a posix class. If * we haven't already done so, and the parse found a character that * could have been terminators for the name, but which we absorbed * as typos during the first pass, repeat the parse, signalling it * to stop at that character */ if (possible_end && possible_end != (char *) -1) { possible_end = (char *) -1; p = name_start; goto parse_name; } /* Here neither pass found a close-enough class name */ CLEAR_POSIX_WARNINGS_AND_RETURN(NOT_MEANT_TO_BE_A_POSIX_CLASS); } probably_meant_to_be: /* Here we think that a posix specification was intended. Update any * parse pointer */ if (updated_parse_ptr) { *updated_parse_ptr = (char *) p; } /* If a posix class name was intended but incorrectly specified, we * output or return the warnings */ if (found_problem) { /* We set flags for these issues in the parse loop above instead of * adding them to the list of warnings, because we can parse it * twice, and we only want one warning instance */ if (has_upper) { ADD_POSIX_WARNING(p, "the name must be all lowercase letters"); } if (has_blank) { ADD_POSIX_WARNING(p, NO_BLANKS_POSIX_WARNING); } if (has_semi_colon) { ADD_POSIX_WARNING(p, SEMI_COLON_POSIX_WARNING); } else if (! has_terminating_colon) { ADD_POSIX_WARNING(p, "there is no terminating ':'"); } if (! has_terminating_bracket) { ADD_POSIX_WARNING(p, "there is no terminating ']'"); } if (posix_warnings && RExC_warn_text && av_top_index(RExC_warn_text) > -1) { *posix_warnings = RExC_warn_text; } } else if (class_number != OOB_NAMEDCLASS) { /* If it is a known class, return the class. The class number * #defines are structured so each complement is +1 to the normal * one */ CLEAR_POSIX_WARNINGS_AND_RETURN(class_number + complement); } else if (! check_only) { /* Here, it is an unrecognized class. This is an error (unless the * call is to check only, which we've already handled above) */ const char * const complement_string = (complement) ? "^" : ""; RExC_parse = (char *) p; vFAIL3utf8f("POSIX class [:%s%" UTF8f ":] unknown", complement_string, UTF8fARG(UTF, RExC_parse - name_start - 2, name_start)); } } return OOB_NAMEDCLASS; } #undef ADD_POSIX_WARNING STATIC unsigned int S_regex_set_precedence(const U8 my_operator) { /* Returns the precedence in the (?[...]) construct of the input operator, * specified by its character representation. The precedence follows * general Perl rules, but it extends this so that ')' and ']' have (low) * precedence even though they aren't really operators */ switch (my_operator) { case '!': return 5; case '&': return 4; case '^': case '|': case '+': case '-': return 3; case ')': return 2; case ']': return 1; } NOT_REACHED; /* NOTREACHED */ return 0; /* Silence compiler warning */ } STATIC regnode * S_handle_regex_sets(pTHX_ RExC_state_t *pRExC_state, SV** return_invlist, I32 *flagp, U32 depth, char * const oregcomp_parse) { /* Handle the (?[...]) construct to do set operations */ U8 curchar; /* Current character being parsed */ UV start, end; /* End points of code point ranges */ SV* final = NULL; /* The end result inversion list */ SV* result_string; /* 'final' stringified */ AV* stack; /* stack of operators and operands not yet resolved */ AV* fence_stack = NULL; /* A stack containing the positions in 'stack' of where the undealt-with left parens would be if they were actually put there */ /* The 'volatile' is a workaround for an optimiser bug * in Solaris Studio 12.3. See RT #127455 */ volatile IV fence = 0; /* Position of where most recent undealt- with left paren in stack is; -1 if none. */ STRLEN len; /* Temporary */ regnode* node; /* Temporary, and final regnode returned by this function */ const bool save_fold = FOLD; /* Temporary */ char *save_end, *save_parse; /* Temporaries */ const bool in_locale = LOC; /* we turn off /l during processing */ AV* posix_warnings = NULL; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_HANDLE_REGEX_SETS; if (in_locale) { set_regex_charset(&RExC_flags, REGEX_UNICODE_CHARSET); } REQUIRE_UNI_RULES(flagp, NULL); /* The use of this operator implies /u. This is required so that the compile time values are valid in all runtime cases */ /* This will return only an ANYOF regnode, or (unlikely) something smaller * (such as EXACT). Thus we can skip most everything if just sizing. We * call regclass to handle '[]' so as to not have to reinvent its parsing * rules here (throwing away the size it computes each time). And, we exit * upon an unescaped ']' that isn't one ending a regclass. To do both * these things, we need to realize that something preceded by a backslash * is escaped, so we have to keep track of backslashes */ if (SIZE_ONLY) { UV depth = 0; /* how many nested (?[...]) constructs */ while (RExC_parse < RExC_end) { SV* current = NULL; skip_to_be_ignored_text(pRExC_state, &RExC_parse, TRUE /* Force /x */ ); switch (*RExC_parse) { case '?': if (RExC_parse[1] == '[') depth++, RExC_parse++; /* FALLTHROUGH */ default: break; case '\\': /* Skip past this, so the next character gets skipped, after * the switch */ RExC_parse++; if (*RExC_parse == 'c') { /* Skip the \cX notation for control characters */ RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } break; case '[': { /* See if this is a [:posix:] class. */ bool is_posix_class = (OOB_NAMEDCLASS < handle_possible_posix(pRExC_state, RExC_parse + 1, NULL, NULL, TRUE /* checking only */)); /* If it is a posix class, leave the parse pointer at the * '[' to fool regclass() into thinking it is part of a * '[[:posix:]]'. */ if (! is_posix_class) { RExC_parse++; } /* regclass() can only return RESTART_PASS1 and NEED_UTF8 * if multi-char folds are allowed. */ if (!regclass(pRExC_state, flagp,depth+1, is_posix_class, /* parse the whole char class only if not a posix class */ FALSE, /* don't allow multi-char folds */ TRUE, /* silence non-portable warnings. */ TRUE, /* strict */ FALSE, /* Require return to be an ANYOF */ &current, &posix_warnings )) FAIL2("panic: regclass returned NULL to handle_sets, " "flags=%#" UVxf, (UV) *flagp); /* function call leaves parse pointing to the ']', except * if we faked it */ if (is_posix_class) { RExC_parse--; } SvREFCNT_dec(current); /* In case it returned something */ break; } case ']': if (depth--) break; RExC_parse++; if (*RExC_parse == ')') { node = reganode(pRExC_state, ANYOF, 0); RExC_size += ANYOF_SKIP; nextchar(pRExC_state); Set_Node_Length(node, RExC_parse - oregcomp_parse + 1); /* MJD */ if (in_locale) { set_regex_charset(&RExC_flags, REGEX_LOCALE_CHARSET); } return node; } goto no_close; } RExC_parse += UTF ? UTF8SKIP(RExC_parse) : 1; } no_close: /* We output the messages even if warnings are off, because we'll fail * the very next thing, and these give a likely diagnosis for that */ if (posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0) { output_or_return_posix_warnings(pRExC_state, posix_warnings, NULL); } FAIL("Syntax error in (?[...])"); } /* Pass 2 only after this. */ Perl_ck_warner_d(aTHX_ packWARN(WARN_EXPERIMENTAL__REGEX_SETS), "The regex_sets feature is experimental" REPORT_LOCATION, REPORT_LOCATION_ARGS(RExC_parse)); /* Everything in this construct is a metacharacter. Operands begin with * either a '\' (for an escape sequence), or a '[' for a bracketed * character class. Any other character should be an operator, or * parenthesis for grouping. Both types of operands are handled by calling * regclass() to parse them. It is called with a parameter to indicate to * return the computed inversion list. The parsing here is implemented via * a stack. Each entry on the stack is a single character representing one * of the operators; or else a pointer to an operand inversion list. */ #define IS_OPERATOR(a) SvIOK(a) #define IS_OPERAND(a) (! IS_OPERATOR(a)) /* The stack is kept in Łukasiewicz order. (That's pronounced similar * to luke-a-shave-itch (or -itz), but people who didn't want to bother * with pronouncing it called it Reverse Polish instead, but now that YOU * know how to pronounce it you can use the correct term, thus giving due * credit to the person who invented it, and impressing your geek friends. * Wikipedia says that the pronounciation of "Ł" has been changing so that * it is now more like an English initial W (as in wonk) than an L.) * * This means that, for example, 'a | b & c' is stored on the stack as * * c [4] * b [3] * & [2] * a [1] * | [0] * * where the numbers in brackets give the stack [array] element number. * In this implementation, parentheses are not stored on the stack. * Instead a '(' creates a "fence" so that the part of the stack below the * fence is invisible except to the corresponding ')' (this allows us to * replace testing for parens, by using instead subtraction of the fence * position). As new operands are processed they are pushed onto the stack * (except as noted in the next paragraph). New operators of higher * precedence than the current final one are inserted on the stack before * the lhs operand (so that when the rhs is pushed next, everything will be * in the correct positions shown above. When an operator of equal or * lower precedence is encountered in parsing, all the stacked operations * of equal or higher precedence are evaluated, leaving the result as the * top entry on the stack. This makes higher precedence operations * evaluate before lower precedence ones, and causes operations of equal * precedence to left associate. * * The only unary operator '!' is immediately pushed onto the stack when * encountered. When an operand is encountered, if the top of the stack is * a '!", the complement is immediately performed, and the '!' popped. The * resulting value is treated as a new operand, and the logic in the * previous paragraph is executed. Thus in the expression * [a] + ! [b] * the stack looks like * * ! * a * + * * as 'b' gets parsed, the latter gets evaluated to '!b', and the stack * becomes * * !b * a * + * * A ')' is treated as an operator with lower precedence than all the * aforementioned ones, which causes all operations on the stack above the * corresponding '(' to be evaluated down to a single resultant operand. * Then the fence for the '(' is removed, and the operand goes through the * algorithm above, without the fence. * * A separate stack is kept of the fence positions, so that the position of * the latest so-far unbalanced '(' is at the top of it. * * The ']' ending the construct is treated as the lowest operator of all, * so that everything gets evaluated down to a single operand, which is the * result */ sv_2mortal((SV *)(stack = newAV())); sv_2mortal((SV *)(fence_stack = newAV())); while (RExC_parse < RExC_end) { I32 top_index; /* Index of top-most element in 'stack' */ SV** top_ptr; /* Pointer to top 'stack' element */ SV* current = NULL; /* To contain the current inversion list operand */ SV* only_to_avoid_leaks; skip_to_be_ignored_text(pRExC_state, &RExC_parse, TRUE /* Force /x */ ); if (RExC_parse >= RExC_end) { Perl_croak(aTHX_ "panic: Read past end of '(?[ ])'"); } curchar = UCHARAT(RExC_parse); redo_curchar: #ifdef ENABLE_REGEX_SETS_DEBUGGING /* Enable with -Accflags=-DENABLE_REGEX_SETS_DEBUGGING */ DEBUG_U(dump_regex_sets_structures(pRExC_state, stack, fence, fence_stack)); #endif top_index = av_tindex_skip_len_mg(stack); switch (curchar) { SV** stacked_ptr; /* Ptr to something already on 'stack' */ char stacked_operator; /* The topmost operator on the 'stack'. */ SV* lhs; /* Operand to the left of the operator */ SV* rhs; /* Operand to the right of the operator */ SV* fence_ptr; /* Pointer to top element of the fence stack */ case '(': if ( RExC_parse < RExC_end - 1 && (UCHARAT(RExC_parse + 1) == '?')) { /* If is a '(?', could be an embedded '(?flags:(?[...])'. * This happens when we have some thing like * * my $thai_or_lao = qr/(?[ \p{Thai} + \p{Lao} ])/; * ... * qr/(?[ \p{Digit} & $thai_or_lao ])/; * * Here we would be handling the interpolated * '$thai_or_lao'. We handle this by a recursive call to * ourselves which returns the inversion list the * interpolated expression evaluates to. We use the flags * from the interpolated pattern. */ U32 save_flags = RExC_flags; const char * save_parse; RExC_parse += 2; /* Skip past the '(?' */ save_parse = RExC_parse; /* Parse any flags for the '(?' */ parse_lparen_question_flags(pRExC_state); if (RExC_parse == save_parse /* Makes sure there was at least one flag (or else this embedding wasn't compiled) */ || RExC_parse >= RExC_end - 4 || UCHARAT(RExC_parse) != ':' || UCHARAT(++RExC_parse) != '(' || UCHARAT(++RExC_parse) != '?' || UCHARAT(++RExC_parse) != '[') { /* In combination with the above, this moves the * pointer to the point just after the first erroneous * character (or if there are no flags, to where they * should have been) */ if (RExC_parse >= RExC_end - 4) { RExC_parse = RExC_end; } else if (RExC_parse != save_parse) { RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; } vFAIL("Expecting '(?flags:(?[...'"); } /* Recurse, with the meat of the embedded expression */ RExC_parse++; (void) handle_regex_sets(pRExC_state, &current, flagp, depth+1, oregcomp_parse); /* Here, 'current' contains the embedded expression's * inversion list, and RExC_parse points to the trailing * ']'; the next character should be the ')' */ RExC_parse++; assert(UCHARAT(RExC_parse) == ')'); /* Then the ')' matching the original '(' handled by this * case: statement */ RExC_parse++; assert(UCHARAT(RExC_parse) == ')'); RExC_parse++; RExC_flags = save_flags; goto handle_operand; } /* A regular '('. Look behind for illegal syntax */ if (top_index - fence >= 0) { /* If the top entry on the stack is an operator, it had * better be a '!', otherwise the entry below the top * operand should be an operator */ if ( ! (top_ptr = av_fetch(stack, top_index, FALSE)) || (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) != '!') || ( IS_OPERAND(*top_ptr) && ( top_index - fence < 1 || ! (stacked_ptr = av_fetch(stack, top_index - 1, FALSE)) || ! IS_OPERATOR(*stacked_ptr)))) { RExC_parse++; vFAIL("Unexpected '(' with no preceding operator"); } } /* Stack the position of this undealt-with left paren */ av_push(fence_stack, newSViv(fence)); fence = top_index + 1; break; case '\\': /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if * multi-char folds are allowed. */ if (!regclass(pRExC_state, flagp,depth+1, TRUE, /* means parse just the next thing */ FALSE, /* don't allow multi-char folds */ FALSE, /* don't silence non-portable warnings. */ TRUE, /* strict */ FALSE, /* Require return to be an ANYOF */ &current, NULL)) { FAIL2("panic: regclass returned NULL to handle_sets, " "flags=%#" UVxf, (UV) *flagp); } /* regclass() will return with parsing just the \ sequence, * leaving the parse pointer at the next thing to parse */ RExC_parse--; goto handle_operand; case '[': /* Is a bracketed character class */ { /* See if this is a [:posix:] class. */ bool is_posix_class = (OOB_NAMEDCLASS < handle_possible_posix(pRExC_state, RExC_parse + 1, NULL, NULL, TRUE /* checking only */)); /* If it is a posix class, leave the parse pointer at the '[' * to fool regclass() into thinking it is part of a * '[[:posix:]]'. */ if (! is_posix_class) { RExC_parse++; } /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if * multi-char folds are allowed. */ if (!regclass(pRExC_state, flagp,depth+1, is_posix_class, /* parse the whole char class only if not a posix class */ FALSE, /* don't allow multi-char folds */ TRUE, /* silence non-portable warnings. */ TRUE, /* strict */ FALSE, /* Require return to be an ANYOF */ &current, NULL )) { FAIL2("panic: regclass returned NULL to handle_sets, " "flags=%#" UVxf, (UV) *flagp); } /* function call leaves parse pointing to the ']', except if we * faked it */ if (is_posix_class) { RExC_parse--; } goto handle_operand; } case ']': if (top_index >= 1) { goto join_operators; } /* Only a single operand on the stack: are done */ goto done; case ')': if (av_tindex_skip_len_mg(fence_stack) < 0) { RExC_parse++; vFAIL("Unexpected ')'"); } /* If nothing after the fence, is missing an operand */ if (top_index - fence < 0) { RExC_parse++; goto bad_syntax; } /* If at least two things on the stack, treat this as an * operator */ if (top_index - fence >= 1) { goto join_operators; } /* Here only a single thing on the fenced stack, and there is a * fence. Get rid of it */ fence_ptr = av_pop(fence_stack); assert(fence_ptr); fence = SvIV(fence_ptr) - 1; SvREFCNT_dec_NN(fence_ptr); fence_ptr = NULL; if (fence < 0) { fence = 0; } /* Having gotten rid of the fence, we pop the operand at the * stack top and process it as a newly encountered operand */ current = av_pop(stack); if (IS_OPERAND(current)) { goto handle_operand; } RExC_parse++; goto bad_syntax; case '&': case '|': case '+': case '-': case '^': /* These binary operators should have a left operand already * parsed */ if ( top_index - fence < 0 || top_index - fence == 1 || ( ! (top_ptr = av_fetch(stack, top_index, FALSE))) || ! IS_OPERAND(*top_ptr)) { goto unexpected_binary; } /* If only the one operand is on the part of the stack visible * to us, we just place this operator in the proper position */ if (top_index - fence < 2) { /* Place the operator before the operand */ SV* lhs = av_pop(stack); av_push(stack, newSVuv(curchar)); av_push(stack, lhs); break; } /* But if there is something else on the stack, we need to * process it before this new operator if and only if the * stacked operation has equal or higher precedence than the * new one */ join_operators: /* The operator on the stack is supposed to be below both its * operands */ if ( ! (stacked_ptr = av_fetch(stack, top_index - 2, FALSE)) || IS_OPERAND(*stacked_ptr)) { /* But if not, it's legal and indicates we are completely * done if and only if we're currently processing a ']', * which should be the final thing in the expression */ if (curchar == ']') { goto done; } unexpected_binary: RExC_parse++; vFAIL2("Unexpected binary operator '%c' with no " "preceding operand", curchar); } stacked_operator = (char) SvUV(*stacked_ptr); if (regex_set_precedence(curchar) > regex_set_precedence(stacked_operator)) { /* Here, the new operator has higher precedence than the * stacked one. This means we need to add the new one to * the stack to await its rhs operand (and maybe more * stuff). We put it before the lhs operand, leaving * untouched the stacked operator and everything below it * */ lhs = av_pop(stack); assert(IS_OPERAND(lhs)); av_push(stack, newSVuv(curchar)); av_push(stack, lhs); break; } /* Here, the new operator has equal or lower precedence than * what's already there. This means the operation already * there should be performed now, before the new one. */ rhs = av_pop(stack); if (! IS_OPERAND(rhs)) { /* This can happen when a ! is not followed by an operand, * like in /(?[\t &!])/ */ goto bad_syntax; } lhs = av_pop(stack); if (! IS_OPERAND(lhs)) { /* This can happen when there is an empty (), like in * /(?[[0]+()+])/ */ goto bad_syntax; } switch (stacked_operator) { case '&': _invlist_intersection(lhs, rhs, &rhs); break; case '|': case '+': _invlist_union(lhs, rhs, &rhs); break; case '-': _invlist_subtract(lhs, rhs, &rhs); break; case '^': /* The union minus the intersection */ { SV* i = NULL; SV* u = NULL; _invlist_union(lhs, rhs, &u); _invlist_intersection(lhs, rhs, &i); _invlist_subtract(u, i, &rhs); SvREFCNT_dec_NN(i); SvREFCNT_dec_NN(u); break; } } SvREFCNT_dec(lhs); /* Here, the higher precedence operation has been done, and the * result is in 'rhs'. We overwrite the stacked operator with * the result. Then we redo this code to either push the new * operator onto the stack or perform any higher precedence * stacked operation */ only_to_avoid_leaks = av_pop(stack); SvREFCNT_dec(only_to_avoid_leaks); av_push(stack, rhs); goto redo_curchar; case '!': /* Highest priority, right associative */ /* If what's already at the top of the stack is another '!", * they just cancel each other out */ if ( (top_ptr = av_fetch(stack, top_index, FALSE)) && (IS_OPERATOR(*top_ptr) && SvUV(*top_ptr) == '!')) { only_to_avoid_leaks = av_pop(stack); SvREFCNT_dec(only_to_avoid_leaks); } else { /* Otherwise, since it's right associative, just push onto the stack */ av_push(stack, newSVuv(curchar)); } break; default: RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; vFAIL("Unexpected character"); handle_operand: /* Here 'current' is the operand. If something is already on the * stack, we have to check if it is a !. But first, the code above * may have altered the stack in the time since we earlier set * 'top_index'. */ top_index = av_tindex_skip_len_mg(stack); if (top_index - fence >= 0) { /* If the top entry on the stack is an operator, it had better * be a '!', otherwise the entry below the top operand should * be an operator */ top_ptr = av_fetch(stack, top_index, FALSE); assert(top_ptr); if (IS_OPERATOR(*top_ptr)) { /* The only permissible operator at the top of the stack is * '!', which is applied immediately to this operand. */ curchar = (char) SvUV(*top_ptr); if (curchar != '!') { SvREFCNT_dec(current); vFAIL2("Unexpected binary operator '%c' with no " "preceding operand", curchar); } _invlist_invert(current); only_to_avoid_leaks = av_pop(stack); SvREFCNT_dec(only_to_avoid_leaks); /* And we redo with the inverted operand. This allows * handling multiple ! in a row */ goto handle_operand; } /* Single operand is ok only for the non-binary ')' * operator */ else if ((top_index - fence == 0 && curchar != ')') || (top_index - fence > 0 && (! (stacked_ptr = av_fetch(stack, top_index - 1, FALSE)) || IS_OPERAND(*stacked_ptr)))) { SvREFCNT_dec(current); vFAIL("Operand with no preceding operator"); } } /* Here there was nothing on the stack or the top element was * another operand. Just add this new one */ av_push(stack, current); } /* End of switch on next parse token */ RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; } /* End of loop parsing through the construct */ done: if (av_tindex_skip_len_mg(fence_stack) >= 0) { vFAIL("Unmatched ("); } if (av_tindex_skip_len_mg(stack) < 0 /* Was empty */ || ((final = av_pop(stack)) == NULL) || ! IS_OPERAND(final) || SvTYPE(final) != SVt_INVLIST || av_tindex_skip_len_mg(stack) >= 0) /* More left on stack */ { bad_syntax: SvREFCNT_dec(final); vFAIL("Incomplete expression within '(?[ ])'"); } /* Here, 'final' is the resultant inversion list from evaluating the * expression. Return it if so requested */ if (return_invlist) { *return_invlist = final; return END; } /* Otherwise generate a resultant node, based on 'final'. regclass() is * expecting a string of ranges and individual code points */ invlist_iterinit(final); result_string = newSVpvs(""); while (invlist_iternext(final, &start, &end)) { if (start == end) { Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}", start); } else { Perl_sv_catpvf(aTHX_ result_string, "\\x{%" UVXf "}-\\x{%" UVXf "}", start, end); } } /* About to generate an ANYOF (or similar) node from the inversion list we * have calculated */ save_parse = RExC_parse; RExC_parse = SvPV(result_string, len); save_end = RExC_end; RExC_end = RExC_parse + len; /* We turn off folding around the call, as the class we have constructed * already has all folding taken into consideration, and we don't want * regclass() to add to that */ RExC_flags &= ~RXf_PMf_FOLD; /* regclass() can only return RESTART_PASS1 and NEED_UTF8 if multi-char * folds are allowed. */ node = regclass(pRExC_state, flagp,depth+1, FALSE, /* means parse the whole char class */ FALSE, /* don't allow multi-char folds */ TRUE, /* silence non-portable warnings. The above may very well have generated non-portable code points, but they're valid on this machine */ FALSE, /* similarly, no need for strict */ FALSE, /* Require return to be an ANYOF */ NULL, NULL ); if (!node) FAIL2("panic: regclass returned NULL to handle_sets, flags=%#" UVxf, PTR2UV(flagp)); /* Fix up the node type if we are in locale. (We have pretended we are * under /u for the purposes of regclass(), as this construct will only * work under UTF-8 locales. But now we change the opcode to be ANYOFL (so * as to cause any warnings about bad locales to be output in regexec.c), * and add the flag that indicates to check if not in a UTF-8 locale. The * reason we above forbid optimization into something other than an ANYOF * node is simply to minimize the number of code changes in regexec.c. * Otherwise we would have to create new EXACTish node types and deal with * them. This decision could be revisited should this construct become * popular. * * (One might think we could look at the resulting ANYOF node and suppress * the flag if everything is above 255, as those would be UTF-8 only, * but this isn't true, as the components that led to that result could * have been locale-affected, and just happen to cancel each other out * under UTF-8 locales.) */ if (in_locale) { set_regex_charset(&RExC_flags, REGEX_LOCALE_CHARSET); assert(OP(node) == ANYOF); OP(node) = ANYOFL; ANYOF_FLAGS(node) |= ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } if (save_fold) { RExC_flags |= RXf_PMf_FOLD; } RExC_parse = save_parse + 1; RExC_end = save_end; SvREFCNT_dec_NN(final); SvREFCNT_dec_NN(result_string); nextchar(pRExC_state); Set_Node_Length(node, RExC_parse - oregcomp_parse + 1); /* MJD */ return node; } #ifdef ENABLE_REGEX_SETS_DEBUGGING STATIC void S_dump_regex_sets_structures(pTHX_ RExC_state_t *pRExC_state, AV * stack, const IV fence, AV * fence_stack) { /* Dumps the stacks in handle_regex_sets() */ const SSize_t stack_top = av_tindex_skip_len_mg(stack); const SSize_t fence_stack_top = av_tindex_skip_len_mg(fence_stack); SSize_t i; PERL_ARGS_ASSERT_DUMP_REGEX_SETS_STRUCTURES; PerlIO_printf(Perl_debug_log, "\nParse position is:%s\n", RExC_parse); if (stack_top < 0) { PerlIO_printf(Perl_debug_log, "Nothing on stack\n"); } else { PerlIO_printf(Perl_debug_log, "Stack: (fence=%d)\n", (int) fence); for (i = stack_top; i >= 0; i--) { SV ** element_ptr = av_fetch(stack, i, FALSE); if (! element_ptr) { } if (IS_OPERATOR(*element_ptr)) { PerlIO_printf(Perl_debug_log, "[%d]: %c\n", (int) i, (int) SvIV(*element_ptr)); } else { PerlIO_printf(Perl_debug_log, "[%d] ", (int) i); sv_dump(*element_ptr); } } } if (fence_stack_top < 0) { PerlIO_printf(Perl_debug_log, "Nothing on fence_stack\n"); } else { PerlIO_printf(Perl_debug_log, "Fence_stack: \n"); for (i = fence_stack_top; i >= 0; i--) { SV ** element_ptr = av_fetch(fence_stack, i, FALSE); if (! element_ptr) { } PerlIO_printf(Perl_debug_log, "[%d]: %d\n", (int) i, (int) SvIV(*element_ptr)); } } } #endif #undef IS_OPERATOR #undef IS_OPERAND STATIC void S_add_above_Latin1_folds(pTHX_ RExC_state_t *pRExC_state, const U8 cp, SV** invlist) { /* This hard-codes the Latin1/above-Latin1 folding rules, so that an * innocent-looking character class, like /[ks]/i won't have to go out to * disk to find the possible matches. * * This should be called only for a Latin1-range code points, cp, which is * known to be involved in a simple fold with other code points above * Latin1. It would give false results if /aa has been specified. * Multi-char folds are outside the scope of this, and must be handled * specially. * * XXX It would be better to generate these via regen, in case a new * version of the Unicode standard adds new mappings, though that is not * really likely, and may be caught by the default: case of the switch * below. */ PERL_ARGS_ASSERT_ADD_ABOVE_LATIN1_FOLDS; assert(HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(cp)); switch (cp) { case 'k': case 'K': *invlist = add_cp_to_invlist(*invlist, KELVIN_SIGN); break; case 's': case 'S': *invlist = add_cp_to_invlist(*invlist, LATIN_SMALL_LETTER_LONG_S); break; case MICRO_SIGN: *invlist = add_cp_to_invlist(*invlist, GREEK_CAPITAL_LETTER_MU); *invlist = add_cp_to_invlist(*invlist, GREEK_SMALL_LETTER_MU); break; case LATIN_CAPITAL_LETTER_A_WITH_RING_ABOVE: case LATIN_SMALL_LETTER_A_WITH_RING_ABOVE: *invlist = add_cp_to_invlist(*invlist, ANGSTROM_SIGN); break; case LATIN_SMALL_LETTER_Y_WITH_DIAERESIS: *invlist = add_cp_to_invlist(*invlist, LATIN_CAPITAL_LETTER_Y_WITH_DIAERESIS); break; #ifdef LATIN_CAPITAL_LETTER_SHARP_S /* not defined in early Unicode releases */ case LATIN_SMALL_LETTER_SHARP_S: *invlist = add_cp_to_invlist(*invlist, LATIN_CAPITAL_LETTER_SHARP_S); break; #endif #if UNICODE_MAJOR_VERSION < 3 \ || (UNICODE_MAJOR_VERSION == 3 && UNICODE_DOT_VERSION == 0) /* In 3.0 and earlier, U+0130 folded simply to 'i'; and in 3.0.1 so did * U+0131. */ case 'i': case 'I': *invlist = add_cp_to_invlist(*invlist, LATIN_CAPITAL_LETTER_I_WITH_DOT_ABOVE); # if UNICODE_DOT_DOT_VERSION == 1 *invlist = add_cp_to_invlist(*invlist, LATIN_SMALL_LETTER_DOTLESS_I); # endif break; #endif default: /* Use deprecated warning to increase the chances of this being * output */ if (PASS2) { ckWARN2reg_d(RExC_parse, "Perl folding rules are not up-to-date for 0x%02X; please use the perlbug utility to report;", cp); } break; } } STATIC void S_output_or_return_posix_warnings(pTHX_ RExC_state_t *pRExC_state, AV* posix_warnings, AV** return_posix_warnings) { /* If the final parameter is NULL, output the elements of the array given * by '*posix_warnings' as REGEXP warnings. Otherwise, the elements are * pushed onto it, (creating if necessary) */ SV * msg; const bool first_is_fatal = ! return_posix_warnings && ckDEAD(packWARN(WARN_REGEXP)); PERL_ARGS_ASSERT_OUTPUT_OR_RETURN_POSIX_WARNINGS; while ((msg = av_shift(posix_warnings)) != &PL_sv_undef) { if (return_posix_warnings) { if (! *return_posix_warnings) { /* mortalize to not leak if warnings are fatal */ *return_posix_warnings = (AV *) sv_2mortal((SV *) newAV()); } av_push(*return_posix_warnings, msg); } else { if (first_is_fatal) { /* Avoid leaking this */ av_undef(posix_warnings); /* This isn't necessary if the array is mortal, but is a fail-safe */ (void) sv_2mortal(msg); if (PASS2) { SAVEFREESV(RExC_rx_sv); } } Perl_warner(aTHX_ packWARN(WARN_REGEXP), "%s", SvPVX(msg)); SvREFCNT_dec_NN(msg); } } } STATIC AV * S_add_multi_match(pTHX_ AV* multi_char_matches, SV* multi_string, const STRLEN cp_count) { /* This adds the string scalar <multi_string> to the array * <multi_char_matches>. <multi_string> is known to have exactly * <cp_count> code points in it. This is used when constructing a * bracketed character class and we find something that needs to match more * than a single character. * * <multi_char_matches> is actually an array of arrays. Each top-level * element is an array that contains all the strings known so far that are * the same length. And that length (in number of code points) is the same * as the index of the top-level array. Hence, the [2] element is an * array, each element thereof is a string containing TWO code points; * while element [3] is for strings of THREE characters, and so on. Since * this is for multi-char strings there can never be a [0] nor [1] element. * * When we rewrite the character class below, we will do so such that the * longest strings are written first, so that it prefers the longest * matching strings first. This is done even if it turns out that any * quantifier is non-greedy, out of this programmer's (khw) laziness. Tom * Christiansen has agreed that this is ok. This makes the test for the * ligature 'ffi' come before the test for 'ff', for example */ AV* this_array; AV** this_array_ptr; PERL_ARGS_ASSERT_ADD_MULTI_MATCH; if (! multi_char_matches) { multi_char_matches = newAV(); } if (av_exists(multi_char_matches, cp_count)) { this_array_ptr = (AV**) av_fetch(multi_char_matches, cp_count, FALSE); this_array = *this_array_ptr; } else { this_array = newAV(); av_store(multi_char_matches, cp_count, (SV*) this_array); } av_push(this_array, multi_string); return multi_char_matches; } /* The names of properties whose definitions are not known at compile time are * stored in this SV, after a constant heading. So if the length has been * changed since initialization, then there is a run-time definition. */ #define HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION \ (SvCUR(listsv) != initial_listsv_len) /* There is a restricted set of white space characters that are legal when * ignoring white space in a bracketed character class. This generates the * code to skip them. * * There is a line below that uses the same white space criteria but is outside * this macro. Both here and there must use the same definition */ #define SKIP_BRACKETED_WHITE_SPACE(do_skip, p) \ STMT_START { \ if (do_skip) { \ while (isBLANK_A(UCHARAT(p))) \ { \ p++; \ } \ } \ } STMT_END STATIC regnode * S_regclass(pTHX_ RExC_state_t *pRExC_state, I32 *flagp, U32 depth, const bool stop_at_1, /* Just parse the next thing, don't look for a full character class */ bool allow_multi_folds, const bool silence_non_portable, /* Don't output warnings about too large characters */ const bool strict, bool optimizable, /* ? Allow a non-ANYOF return node */ SV** ret_invlist, /* Return an inversion list, not a node */ AV** return_posix_warnings ) { /* parse a bracketed class specification. Most of these will produce an * ANYOF node; but something like [a] will produce an EXACT node; [aA], an * EXACTFish node; [[:ascii:]], a POSIXA node; etc. It is more complex * under /i with multi-character folds: it will be rewritten following the * paradigm of this example, where the <multi-fold>s are characters which * fold to multiple character sequences: * /[abc\x{multi-fold1}def\x{multi-fold2}ghi]/i * gets effectively rewritten as: * /(?:\x{multi-fold1}|\x{multi-fold2}|[abcdefghi]/i * reg() gets called (recursively) on the rewritten version, and this * function will return what it constructs. (Actually the <multi-fold>s * aren't physically removed from the [abcdefghi], it's just that they are * ignored in the recursion by means of a flag: * <RExC_in_multi_char_class>.) * * ANYOF nodes contain a bit map for the first NUM_ANYOF_CODE_POINTS * characters, with the corresponding bit set if that character is in the * list. For characters above this, a range list or swash is used. There * are extra bits for \w, etc. in locale ANYOFs, as what these match is not * determinable at compile time * * Returns NULL, setting *flagp to RESTART_PASS1 if the sizing scan needs * to be restarted, or'd with NEED_UTF8 if the pattern needs to be upgraded * to UTF-8. This can only happen if ret_invlist is non-NULL. */ UV prevvalue = OOB_UNICODE, save_prevvalue = OOB_UNICODE; IV range = 0; UV value = OOB_UNICODE, save_value = OOB_UNICODE; regnode *ret; STRLEN numlen; int namedclass = OOB_NAMEDCLASS; char *rangebegin = NULL; bool need_class = 0; SV *listsv = NULL; STRLEN initial_listsv_len = 0; /* Kind of a kludge to see if it is more than just initialized. */ SV* properties = NULL; /* Code points that match \p{} \P{} */ SV* posixes = NULL; /* Code points that match classes like [:word:], extended beyond the Latin1 range. These have to be kept separate from other code points for much of this function because their handling is different under /i, and for most classes under /d as well */ SV* nposixes = NULL; /* Similarly for [:^word:]. These are kept separate for a while from the non-complemented versions because of complications with /d matching */ SV* simple_posixes = NULL; /* But under some conditions, the classes can be treated more simply than the general case, leading to less compilation and execution work */ UV element_count = 0; /* Number of distinct elements in the class. Optimizations may be possible if this is tiny */ AV * multi_char_matches = NULL; /* Code points that fold to more than one character; used under /i */ UV n; char * stop_ptr = RExC_end; /* where to stop parsing */ /* ignore unescaped whitespace? */ const bool skip_white = cBOOL( ret_invlist || (RExC_flags & RXf_PMf_EXTENDED_MORE)); /* Unicode properties are stored in a swash; this holds the current one * being parsed. If this swash is the only above-latin1 component of the * character class, an optimization is to pass it directly on to the * execution engine. Otherwise, it is set to NULL to indicate that there * are other things in the class that have to be dealt with at execution * time */ SV* swash = NULL; /* Code points that match \p{} \P{} */ /* Set if a component of this character class is user-defined; just passed * on to the engine */ bool has_user_defined_property = FALSE; /* inversion list of code points this node matches only when the target * string is in UTF-8. These are all non-ASCII, < 256. (Because is under * /d) */ SV* has_upper_latin1_only_utf8_matches = NULL; /* Inversion list of code points this node matches regardless of things * like locale, folding, utf8ness of the target string */ SV* cp_list = NULL; /* Like cp_list, but code points on this list need to be checked for things * that fold to/from them under /i */ SV* cp_foldable_list = NULL; /* Like cp_list, but code points on this list are valid only when the * runtime locale is UTF-8 */ SV* only_utf8_locale_list = NULL; /* In a range, if one of the endpoints is non-character-set portable, * meaning that it hard-codes a code point that may mean a different * charactger in ASCII vs. EBCDIC, as opposed to, say, a literal 'A' or a * mnemonic '\t' which each mean the same character no matter which * character set the platform is on. */ unsigned int non_portable_endpoint = 0; /* Is the range unicode? which means on a platform that isn't 1-1 native * to Unicode (i.e. non-ASCII), each code point in it should be considered * to be a Unicode value. */ bool unicode_range = FALSE; bool invert = FALSE; /* Is this class to be complemented */ bool warn_super = ALWAYS_WARN_SUPER; regnode * const orig_emit = RExC_emit; /* Save the original RExC_emit in case we need to change the emitted regop to an EXACT. */ const char * orig_parse = RExC_parse; const SSize_t orig_size = RExC_size; bool posixl_matches_all = FALSE; /* Does /l class have both e.g. \W,\w ? */ /* This variable is used to mark where the end in the input is of something * that looks like a POSIX construct but isn't. During the parse, when * something looks like it could be such a construct is encountered, it is * checked for being one, but not if we've already checked this area of the * input. Only after this position is reached do we check again */ char *not_posix_region_end = RExC_parse - 1; AV* posix_warnings = NULL; const bool do_posix_warnings = return_posix_warnings || (PASS2 && ckWARN(WARN_REGEXP)); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGCLASS; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif DEBUG_PARSE("clas"); #if UNICODE_MAJOR_VERSION < 3 /* no multifolds in early Unicode */ \ || (UNICODE_MAJOR_VERSION == 3 && UNICODE_DOT_VERSION == 0 \ && UNICODE_DOT_DOT_VERSION == 0) allow_multi_folds = FALSE; #endif /* Assume we are going to generate an ANYOF node. */ ret = reganode(pRExC_state, (LOC) ? ANYOFL : ANYOF, 0); if (SIZE_ONLY) { RExC_size += ANYOF_SKIP; listsv = &PL_sv_undef; /* For code scanners: listsv always non-NULL. */ } else { ANYOF_FLAGS(ret) = 0; RExC_emit += ANYOF_SKIP; listsv = newSVpvs_flags("# comment\n", SVs_TEMP); initial_listsv_len = SvCUR(listsv); SvTEMP_off(listsv); /* Grr, TEMPs and mortals are conflated. */ } SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); assert(RExC_parse <= RExC_end); if (UCHARAT(RExC_parse) == '^') { /* Complement the class */ RExC_parse++; invert = TRUE; allow_multi_folds = FALSE; MARK_NAUGHTY(1); SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); } /* Check that they didn't say [:posix:] instead of [[:posix:]] */ if (! ret_invlist && MAYBE_POSIXCC(UCHARAT(RExC_parse))) { int maybe_class = handle_possible_posix(pRExC_state, RExC_parse, &not_posix_region_end, NULL, TRUE /* checking only */); if (PASS2 && maybe_class >= OOB_NAMEDCLASS && do_posix_warnings) { SAVEFREESV(RExC_rx_sv); ckWARN4reg(not_posix_region_end, "POSIX syntax [%c %c] belongs inside character classes%s", *RExC_parse, *RExC_parse, (maybe_class == OOB_NAMEDCLASS) ? ((POSIXCC_NOTYET(*RExC_parse)) ? " (but this one isn't implemented)" : " (but this one isn't fully valid)") : "" ); (void)ReREFCNT_inc(RExC_rx_sv); } } /* If the caller wants us to just parse a single element, accomplish this * by faking the loop ending condition */ if (stop_at_1 && RExC_end > RExC_parse) { stop_ptr = RExC_parse + 1; } /* allow 1st char to be ']' (allowing it to be '-' is dealt with later) */ if (UCHARAT(RExC_parse) == ']') goto charclassloop; while (1) { if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0 && RExC_parse > not_posix_region_end) { /* Warnings about posix class issues are considered tentative until * we are far enough along in the parse that we can no longer * change our mind, at which point we either output them or add * them, if it has so specified, to what gets returned to the * caller. This is done each time through the loop so that a later * class won't zap them before they have been dealt with. */ output_or_return_posix_warnings(pRExC_state, posix_warnings, return_posix_warnings); } if (RExC_parse >= stop_ptr) { break; } SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); if (UCHARAT(RExC_parse) == ']') { break; } charclassloop: namedclass = OOB_NAMEDCLASS; /* initialize as illegal */ save_value = value; save_prevvalue = prevvalue; if (!range) { rangebegin = RExC_parse; element_count++; non_portable_endpoint = 0; } if (UTF && ! UTF8_IS_INVARIANT(* RExC_parse)) { value = utf8n_to_uvchr((U8*)RExC_parse, RExC_end - RExC_parse, &numlen, UTF8_ALLOW_DEFAULT); RExC_parse += numlen; } else value = UCHARAT(RExC_parse++); if (value == '[') { char * posix_class_end; namedclass = handle_possible_posix(pRExC_state, RExC_parse, &posix_class_end, do_posix_warnings ? &posix_warnings : NULL, FALSE /* die if error */); if (namedclass > OOB_NAMEDCLASS) { /* If there was an earlier attempt to parse this particular * posix class, and it failed, it was a false alarm, as this * successful one proves */ if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0 && not_posix_region_end >= RExC_parse && not_posix_region_end <= posix_class_end) { av_undef(posix_warnings); } RExC_parse = posix_class_end; } else if (namedclass == OOB_NAMEDCLASS) { not_posix_region_end = posix_class_end; } else { namedclass = OOB_NAMEDCLASS; } } else if ( RExC_parse - 1 > not_posix_region_end && MAYBE_POSIXCC(value)) { (void) handle_possible_posix( pRExC_state, RExC_parse - 1, /* -1 because parse has already been advanced */ &not_posix_region_end, do_posix_warnings ? &posix_warnings : NULL, TRUE /* checking only */); } else if (value == '\\') { /* Is a backslash; get the code point of the char after it */ if (RExC_parse >= RExC_end) { vFAIL("Unmatched ["); } if (UTF && ! UTF8_IS_INVARIANT(UCHARAT(RExC_parse))) { value = utf8n_to_uvchr((U8*)RExC_parse, RExC_end - RExC_parse, &numlen, UTF8_ALLOW_DEFAULT); RExC_parse += numlen; } else value = UCHARAT(RExC_parse++); /* Some compilers cannot handle switching on 64-bit integer * values, therefore value cannot be an UV. Yes, this will * be a problem later if we want switch on Unicode. * A similar issue a little bit later when switching on * namedclass. --jhi */ /* If the \ is escaping white space when white space is being * skipped, it means that that white space is wanted literally, and * is already in 'value'. Otherwise, need to translate the escape * into what it signifies. */ if (! skip_white || ! isBLANK_A(value)) switch ((I32)value) { case 'w': namedclass = ANYOF_WORDCHAR; break; case 'W': namedclass = ANYOF_NWORDCHAR; break; case 's': namedclass = ANYOF_SPACE; break; case 'S': namedclass = ANYOF_NSPACE; break; case 'd': namedclass = ANYOF_DIGIT; break; case 'D': namedclass = ANYOF_NDIGIT; break; case 'v': namedclass = ANYOF_VERTWS; break; case 'V': namedclass = ANYOF_NVERTWS; break; case 'h': namedclass = ANYOF_HORIZWS; break; case 'H': namedclass = ANYOF_NHORIZWS; break; case 'N': /* Handle \N{NAME} in class */ { const char * const backslash_N_beg = RExC_parse - 2; int cp_count; if (! grok_bslash_N(pRExC_state, NULL, /* No regnode */ &value, /* Yes single value */ &cp_count, /* Multiple code pt count */ flagp, strict, depth) ) { if (*flagp & NEED_UTF8) FAIL("panic: grok_bslash_N set NEED_UTF8"); if (*flagp & RESTART_PASS1) return NULL; if (cp_count < 0) { vFAIL("\\N in a character class must be a named character: \\N{...}"); } else if (cp_count == 0) { if (PASS2) { ckWARNreg(RExC_parse, "Ignoring zero length \\N{} in character class"); } } else { /* cp_count > 1 */ if (! RExC_in_multi_char_class) { if (invert || range || *RExC_parse == '-') { if (strict) { RExC_parse--; vFAIL("\\N{} in inverted character class or as a range end-point is restricted to one character"); } else if (PASS2) { ckWARNreg(RExC_parse, "Using just the first character returned by \\N{} in character class"); } break; /* <value> contains the first code point. Drop out of the switch to process it */ } else { SV * multi_char_N = newSVpvn(backslash_N_beg, RExC_parse - backslash_N_beg); multi_char_matches = add_multi_match(multi_char_matches, multi_char_N, cp_count); } } } /* End of cp_count != 1 */ /* This element should not be processed further in this * class */ element_count--; value = save_value; prevvalue = save_prevvalue; continue; /* Back to top of loop to get next char */ } /* Here, is a single code point, and <value> contains it */ unicode_range = TRUE; /* \N{} are Unicode */ } break; case 'p': case 'P': { char *e; /* We will handle any undefined properties ourselves */ U8 swash_init_flags = _CORE_SWASH_INIT_RETURN_IF_UNDEF /* And we actually would prefer to get * the straight inversion list of the * swash, since we will be accessing it * anyway, to save a little time */ |_CORE_SWASH_INIT_ACCEPT_INVLIST; if (RExC_parse >= RExC_end) vFAIL2("Empty \\%c", (U8)value); if (*RExC_parse == '{') { const U8 c = (U8)value; e = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse); if (!e) { RExC_parse++; vFAIL2("Missing right brace on \\%c{}", c); } RExC_parse++; while (isSPACE(*RExC_parse)) { RExC_parse++; } if (UCHARAT(RExC_parse) == '^') { /* toggle. (The rhs xor gets the single bit that * differs between P and p; the other xor inverts just * that bit) */ value ^= 'P' ^ 'p'; RExC_parse++; while (isSPACE(*RExC_parse)) { RExC_parse++; } } if (e == RExC_parse) vFAIL2("Empty \\%c{}", c); n = e - RExC_parse; while (isSPACE(*(RExC_parse + n - 1))) n--; } /* The \p isn't immediately followed by a '{' */ else if (! isALPHA(*RExC_parse)) { RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; vFAIL2("Character following \\%c must be '{' or a " "single-character Unicode property name", (U8) value); } else { e = RExC_parse; n = 1; } if (!SIZE_ONLY) { SV* invlist; char* name; char* base_name; /* name after any packages are stripped */ char* lookup_name = NULL; const char * const colon_colon = "::"; /* Try to get the definition of the property into * <invlist>. If /i is in effect, the effective property * will have its name be <__NAME_i>. The design is * discussed in commit * 2f833f5208e26b208886e51e09e2c072b5eabb46 */ name = savepv(Perl_form(aTHX_ "%.*s", (int)n, RExC_parse)); SAVEFREEPV(name); if (FOLD) { lookup_name = savepv(Perl_form(aTHX_ "__%s_i", name)); /* The function call just below that uses this can fail * to return, leaking memory if we don't do this */ SAVEFREEPV(lookup_name); } /* Look up the property name, and get its swash and * inversion list, if the property is found */ SvREFCNT_dec(swash); /* Free any left-overs */ swash = _core_swash_init("utf8", (lookup_name) ? lookup_name : name, &PL_sv_undef, 1, /* binary */ 0, /* not tr/// */ NULL, /* No inversion list */ &swash_init_flags ); if (! swash || ! (invlist = _get_swash_invlist(swash))) { HV* curpkg = (IN_PERL_COMPILETIME) ? PL_curstash : CopSTASH(PL_curcop); UV final_n = n; bool has_pkg; if (swash) { /* Got a swash but no inversion list. Something is likely wrong that will be sorted-out later */ SvREFCNT_dec_NN(swash); swash = NULL; } /* Here didn't find it. It could be a an error (like a * typo) in specifying a Unicode property, or it could * be a user-defined property that will be available at * run-time. The names of these must begin with 'In' * or 'Is' (after any packages are stripped off). So * if not one of those, or if we accept only * compile-time properties, is an error; otherwise add * it to the list for run-time look up. */ if ((base_name = rninstr(name, name + n, colon_colon, colon_colon + 2))) { /* Has ::. We know this must be a user-defined property */ base_name += 2; final_n -= base_name - name; has_pkg = TRUE; } else { base_name = name; has_pkg = FALSE; } if ( final_n < 3 || base_name[0] != 'I' || (base_name[1] != 's' && base_name[1] != 'n') || ret_invlist) { const char * const msg = (has_pkg) ? "Illegal user-defined property name" : "Can't find Unicode property definition"; RExC_parse = e + 1; /* diag_listed_as: Can't find Unicode property definition "%s" */ vFAIL3utf8f("%s \"%" UTF8f "\"", msg, UTF8fARG(UTF, n, name)); } /* If the property name doesn't already have a package * name, add the current one to it so that it can be * referred to outside it. [perl #121777] */ if (! has_pkg && curpkg) { char* pkgname = HvNAME(curpkg); if (memNEs(pkgname, HvNAMELEN(curpkg), "main")) { char* full_name = Perl_form(aTHX_ "%s::%s", pkgname, name); n = strlen(full_name); name = savepvn(full_name, n); SAVEFREEPV(name); } } Perl_sv_catpvf(aTHX_ listsv, "%cutf8::%s%" UTF8f "%s\n", (value == 'p' ? '+' : '!'), (FOLD) ? "__" : "", UTF8fARG(UTF, n, name), (FOLD) ? "_i" : ""); has_user_defined_property = TRUE; optimizable = FALSE; /* Will have to leave this an ANYOF node */ /* We don't know yet what this matches, so have to flag * it */ ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP; } else { /* Here, did get the swash and its inversion list. If * the swash is from a user-defined property, then this * whole character class should be regarded as such */ if (swash_init_flags & _CORE_SWASH_INIT_USER_DEFINED_PROPERTY) { has_user_defined_property = TRUE; } else if /* We warn on matching an above-Unicode code point * if the match would return true, except don't * warn for \p{All}, which has exactly one element * = 0 */ (_invlist_contains_cp(invlist, 0x110000) && (! (_invlist_len(invlist) == 1 && *invlist_array(invlist) == 0))) { warn_super = TRUE; } /* Invert if asking for the complement */ if (value == 'P') { _invlist_union_complement_2nd(properties, invlist, &properties); /* The swash can't be used as-is, because we've * inverted things; delay removing it to here after * have copied its invlist above */ SvREFCNT_dec_NN(swash); swash = NULL; } else { _invlist_union(properties, invlist, &properties); } } } RExC_parse = e + 1; namedclass = ANYOF_UNIPROP; /* no official name, but it's named */ /* \p means they want Unicode semantics */ REQUIRE_UNI_RULES(flagp, NULL); } break; case 'n': value = '\n'; break; case 'r': value = '\r'; break; case 't': value = '\t'; break; case 'f': value = '\f'; break; case 'b': value = '\b'; break; case 'e': value = ESC_NATIVE; break; case 'a': value = '\a'; break; case 'o': RExC_parse--; /* function expects to be pointed at the 'o' */ { const char* error_msg; bool valid = grok_bslash_o(&RExC_parse, RExC_end, &value, &error_msg, PASS2, /* warnings only in pass 2 */ strict, silence_non_portable, UTF); if (! valid) { vFAIL(error_msg); } } non_portable_endpoint++; break; case 'x': RExC_parse--; /* function expects to be pointed at the 'x' */ { const char* error_msg; bool valid = grok_bslash_x(&RExC_parse, RExC_end, &value, &error_msg, PASS2, /* Output warnings */ strict, silence_non_portable, UTF); if (! valid) { vFAIL(error_msg); } } non_portable_endpoint++; break; case 'c': value = grok_bslash_c(*RExC_parse++, PASS2); non_portable_endpoint++; break; case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': { /* Take 1-3 octal digits */ I32 flags = PERL_SCAN_SILENT_ILLDIGIT; numlen = (strict) ? 4 : 3; value = grok_oct(--RExC_parse, &numlen, &flags, NULL); RExC_parse += numlen; if (numlen != 3) { if (strict) { RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; vFAIL("Need exactly 3 octal digits"); } else if (! SIZE_ONLY /* like \08, \178 */ && numlen < 3 && RExC_parse < RExC_end && isDIGIT(*RExC_parse) && ckWARN(WARN_REGEXP)) { SAVEFREESV(RExC_rx_sv); reg_warn_non_literal_string( RExC_parse + 1, form_short_octal_warning(RExC_parse, numlen)); (void)ReREFCNT_inc(RExC_rx_sv); } } non_portable_endpoint++; break; } default: /* Allow \_ to not give an error */ if (!SIZE_ONLY && isWORDCHAR(value) && value != '_') { if (strict) { vFAIL2("Unrecognized escape \\%c in character class", (int)value); } else { SAVEFREESV(RExC_rx_sv); ckWARN2reg(RExC_parse, "Unrecognized escape \\%c in character class passed through", (int)value); (void)ReREFCNT_inc(RExC_rx_sv); } } break; } /* End of switch on char following backslash */ } /* end of handling backslash escape sequences */ /* Here, we have the current token in 'value' */ if (namedclass > OOB_NAMEDCLASS) { /* this is a named class \blah */ U8 classnum; /* a bad range like a-\d, a-[:digit:]. The '-' is taken as a * literal, as is the character that began the false range, i.e. * the 'a' in the examples */ if (range) { if (!SIZE_ONLY) { const int w = (RExC_parse >= rangebegin) ? RExC_parse - rangebegin : 0; if (strict) { vFAIL2utf8f( "False [] range \"%" UTF8f "\"", UTF8fARG(UTF, w, rangebegin)); } else { SAVEFREESV(RExC_rx_sv); /* in case of fatal warnings */ ckWARN2reg(RExC_parse, "False [] range \"%" UTF8f "\"", UTF8fARG(UTF, w, rangebegin)); (void)ReREFCNT_inc(RExC_rx_sv); cp_list = add_cp_to_invlist(cp_list, '-'); cp_foldable_list = add_cp_to_invlist(cp_foldable_list, prevvalue); } } range = 0; /* this was not a true range */ element_count += 2; /* So counts for three values */ } classnum = namedclass_to_classnum(namedclass); if (LOC && namedclass < ANYOF_POSIXL_MAX #ifndef HAS_ISASCII && classnum != _CC_ASCII #endif ) { /* What the Posix classes (like \w, [:space:]) match in locale * isn't knowable under locale until actual match time. Room * must be reserved (one time per outer bracketed class) to * store such classes. The space will contain a bit for each * named class that is to be matched against. This isn't * needed for \p{} and pseudo-classes, as they are not affected * by locale, and hence are dealt with separately */ if (! need_class) { need_class = 1; if (SIZE_ONLY) { RExC_size += ANYOF_POSIXL_SKIP - ANYOF_SKIP; } else { RExC_emit += ANYOF_POSIXL_SKIP - ANYOF_SKIP; } ANYOF_FLAGS(ret) |= ANYOF_MATCHES_POSIXL; ANYOF_POSIXL_ZERO(ret); /* We can't change this into some other type of node * (unless this is the only element, in which case there * are nodes that mean exactly this) as has runtime * dependencies */ optimizable = FALSE; } /* Coverity thinks it is possible for this to be negative; both * jhi and khw think it's not, but be safer */ assert(! (ANYOF_FLAGS(ret) & ANYOF_MATCHES_POSIXL) || (namedclass + ((namedclass % 2) ? -1 : 1)) >= 0); /* See if it already matches the complement of this POSIX * class */ if ((ANYOF_FLAGS(ret) & ANYOF_MATCHES_POSIXL) && ANYOF_POSIXL_TEST(ret, namedclass + ((namedclass % 2) ? -1 : 1))) { posixl_matches_all = TRUE; break; /* No need to continue. Since it matches both e.g., \w and \W, it matches everything, and the bracketed class can be optimized into qr/./s */ } /* Add this class to those that should be checked at runtime */ ANYOF_POSIXL_SET(ret, namedclass); /* The above-Latin1 characters are not subject to locale rules. * Just add them, in the second pass, to the * unconditionally-matched list */ if (! SIZE_ONLY) { SV* scratch_list = NULL; /* Get the list of the above-Latin1 code points this * matches */ _invlist_intersection_maybe_complement_2nd(PL_AboveLatin1, PL_XPosix_ptrs[classnum], /* Odd numbers are complements, like * NDIGIT, NASCII, ... */ namedclass % 2 != 0, &scratch_list); /* Checking if 'cp_list' is NULL first saves an extra * clone. Its reference count will be decremented at the * next union, etc, or if this is the only instance, at the * end of the routine */ if (! cp_list) { cp_list = scratch_list; } else { _invlist_union(cp_list, scratch_list, &cp_list); SvREFCNT_dec_NN(scratch_list); } continue; /* Go get next character */ } } else if (! SIZE_ONLY) { /* Here, not in pass1 (in that pass we skip calculating the * contents of this class), and is not /l, or is a POSIX class * for which /l doesn't matter (or is a Unicode property, which * is skipped here). */ if (namedclass >= ANYOF_POSIXL_MAX) { /* If a special class */ if (namedclass != ANYOF_UNIPROP) { /* UNIPROP = \p and \P */ /* Here, should be \h, \H, \v, or \V. None of /d, /i * nor /l make a difference in what these match, * therefore we just add what they match to cp_list. */ if (classnum != _CC_VERTSPACE) { assert( namedclass == ANYOF_HORIZWS || namedclass == ANYOF_NHORIZWS); /* It turns out that \h is just a synonym for * XPosixBlank */ classnum = _CC_BLANK; } _invlist_union_maybe_complement_2nd( cp_list, PL_XPosix_ptrs[classnum], namedclass % 2 != 0, /* Complement if odd (NHORIZWS, NVERTWS) */ &cp_list); } } else if ( UNI_SEMANTICS || classnum == _CC_ASCII || (DEPENDS_SEMANTICS && ( classnum == _CC_DIGIT || classnum == _CC_XDIGIT))) { /* We usually have to worry about /d and /a affecting what * POSIX classes match, with special code needed for /d * because we won't know until runtime what all matches. * But there is no extra work needed under /u, and * [:ascii:] is unaffected by /a and /d; and :digit: and * :xdigit: don't have runtime differences under /d. So we * can special case these, and avoid some extra work below, * and at runtime. */ _invlist_union_maybe_complement_2nd( simple_posixes, PL_XPosix_ptrs[classnum], namedclass % 2 != 0, &simple_posixes); } else { /* Garden variety class. If is NUPPER, NALPHA, ... complement and use nposixes */ SV** posixes_ptr = namedclass % 2 == 0 ? &posixes : &nposixes; _invlist_union_maybe_complement_2nd( *posixes_ptr, PL_XPosix_ptrs[classnum], namedclass % 2 != 0, posixes_ptr); } } } /* end of namedclass \blah */ SKIP_BRACKETED_WHITE_SPACE(skip_white, RExC_parse); /* If 'range' is set, 'value' is the ending of a range--check its * validity. (If value isn't a single code point in the case of a * range, we should have figured that out above in the code that * catches false ranges). Later, we will handle each individual code * point in the range. If 'range' isn't set, this could be the * beginning of a range, so check for that by looking ahead to see if * the next real character to be processed is the range indicator--the * minus sign */ if (range) { #ifdef EBCDIC /* For unicode ranges, we have to test that the Unicode as opposed * to the native values are not decreasing. (Above 255, there is * no difference between native and Unicode) */ if (unicode_range && prevvalue < 255 && value < 255) { if (NATIVE_TO_LATIN1(prevvalue) > NATIVE_TO_LATIN1(value)) { goto backwards_range; } } else #endif if (prevvalue > value) /* b-a */ { int w; #ifdef EBCDIC backwards_range: #endif w = RExC_parse - rangebegin; vFAIL2utf8f( "Invalid [] range \"%" UTF8f "\"", UTF8fARG(UTF, w, rangebegin)); NOT_REACHED; /* NOTREACHED */ } } else { prevvalue = value; /* save the beginning of the potential range */ if (! stop_at_1 /* Can't be a range if parsing just one thing */ && *RExC_parse == '-') { char* next_char_ptr = RExC_parse + 1; /* Get the next real char after the '-' */ SKIP_BRACKETED_WHITE_SPACE(skip_white, next_char_ptr); /* If the '-' is at the end of the class (just before the ']', * it is a literal minus; otherwise it is a range */ if (next_char_ptr < RExC_end && *next_char_ptr != ']') { RExC_parse = next_char_ptr; /* a bad range like \w-, [:word:]- ? */ if (namedclass > OOB_NAMEDCLASS) { if (strict || (PASS2 && ckWARN(WARN_REGEXP))) { const int w = RExC_parse >= rangebegin ? RExC_parse - rangebegin : 0; if (strict) { vFAIL4("False [] range \"%*.*s\"", w, w, rangebegin); } else if (PASS2) { vWARN4(RExC_parse, "False [] range \"%*.*s\"", w, w, rangebegin); } } if (!SIZE_ONLY) { cp_list = add_cp_to_invlist(cp_list, '-'); } element_count++; } else range = 1; /* yeah, it's a range! */ continue; /* but do it the next time */ } } } if (namedclass > OOB_NAMEDCLASS) { continue; } /* Here, we have a single value this time through the loop, and * <prevvalue> is the beginning of the range, if any; or <value> if * not. */ /* non-Latin1 code point implies unicode semantics. Must be set in * pass1 so is there for the whole of pass 2 */ if (value > 255) { REQUIRE_UNI_RULES(flagp, NULL); } /* Ready to process either the single value, or the completed range. * For single-valued non-inverted ranges, we consider the possibility * of multi-char folds. (We made a conscious decision to not do this * for the other cases because it can often lead to non-intuitive * results. For example, you have the peculiar case that: * "s s" =~ /^[^\xDF]+$/i => Y * "ss" =~ /^[^\xDF]+$/i => N * * See [perl #89750] */ if (FOLD && allow_multi_folds && value == prevvalue) { if (value == LATIN_SMALL_LETTER_SHARP_S || (value > 255 && _invlist_contains_cp(PL_HasMultiCharFold, value))) { /* Here <value> is indeed a multi-char fold. Get what it is */ U8 foldbuf[UTF8_MAXBYTES_CASE]; STRLEN foldlen; UV folded = _to_uni_fold_flags( value, foldbuf, &foldlen, FOLD_FLAGS_FULL | (ASCII_FOLD_RESTRICTED ? FOLD_FLAGS_NOMIX_ASCII : 0) ); /* Here, <folded> should be the first character of the * multi-char fold of <value>, with <foldbuf> containing the * whole thing. But, if this fold is not allowed (because of * the flags), <fold> will be the same as <value>, and should * be processed like any other character, so skip the special * handling */ if (folded != value) { /* Skip if we are recursed, currently parsing the class * again. Otherwise add this character to the list of * multi-char folds. */ if (! RExC_in_multi_char_class) { STRLEN cp_count = utf8_length(foldbuf, foldbuf + foldlen); SV* multi_fold = sv_2mortal(newSVpvs("")); Perl_sv_catpvf(aTHX_ multi_fold, "\\x{%" UVXf "}", value); multi_char_matches = add_multi_match(multi_char_matches, multi_fold, cp_count); } /* This element should not be processed further in this * class */ element_count--; value = save_value; prevvalue = save_prevvalue; continue; } } } if (strict && PASS2 && ckWARN(WARN_REGEXP)) { if (range) { /* If the range starts above 255, everything is portable and * likely to be so for any forseeable character set, so don't * warn. */ if (unicode_range && non_portable_endpoint && prevvalue < 256) { vWARN(RExC_parse, "Both or neither range ends should be Unicode"); } else if (prevvalue != value) { /* Under strict, ranges that stop and/or end in an ASCII * printable should have each end point be a portable value * for it (preferably like 'A', but we don't warn if it is * a (portable) Unicode name or code point), and the range * must be be all digits or all letters of the same case. * Otherwise, the range is non-portable and unclear as to * what it contains */ if ( (isPRINT_A(prevvalue) || isPRINT_A(value)) && ( non_portable_endpoint || ! ( (isDIGIT_A(prevvalue) && isDIGIT_A(value)) || (isLOWER_A(prevvalue) && isLOWER_A(value)) || (isUPPER_A(prevvalue) && isUPPER_A(value)) ))) { vWARN(RExC_parse, "Ranges of ASCII printables should" " be some subset of \"0-9\"," " \"A-Z\", or \"a-z\""); } else if (prevvalue >= 0x660) { /* ARABIC_INDIC_DIGIT_ZERO */ SSize_t index_start; SSize_t index_final; /* But the nature of Unicode and languages mean we * can't do the same checks for above-ASCII ranges, * except in the case of digit ones. These should * contain only digits from the same group of 10. The * ASCII case is handled just above. 0x660 is the * first digit character beyond ASCII. Hence here, the * range could be a range of digits. First some * unlikely special cases. Grandfather in that a range * ending in 19DA (NEW TAI LUE THAM DIGIT ONE) is bad * if its starting value is one of the 10 digits prior * to it. This is because it is an alternate way of * writing 19D1, and some people may expect it to be in * that group. But it is bad, because it won't give * the expected results. In Unicode 5.2 it was * considered to be in that group (of 11, hence), but * this was fixed in the next version */ if (UNLIKELY(value == 0x19DA && prevvalue >= 0x19D0)) { goto warn_bad_digit_range; } else if (UNLIKELY( prevvalue >= 0x1D7CE && value <= 0x1D7FF)) { /* This is the only other case currently in Unicode * where the algorithm below fails. The code * points just above are the end points of a single * range containing only decimal digits. It is 5 * different series of 0-9. All other ranges of * digits currently in Unicode are just a single * series. (And mktables will notify us if a later * Unicode version breaks this.) * * If the range being checked is at most 9 long, * and the digit values represented are in * numerical order, they are from the same series. * */ if ( value - prevvalue > 9 || ((( value - 0x1D7CE) % 10) <= (prevvalue - 0x1D7CE) % 10)) { goto warn_bad_digit_range; } } else { /* For all other ranges of digits in Unicode, the * algorithm is just to check if both end points * are in the same series, which is the same range. * */ index_start = _invlist_search( PL_XPosix_ptrs[_CC_DIGIT], prevvalue); /* Warn if the range starts and ends with a digit, * and they are not in the same group of 10. */ if ( index_start >= 0 && ELEMENT_RANGE_MATCHES_INVLIST(index_start) && (index_final = _invlist_search(PL_XPosix_ptrs[_CC_DIGIT], value)) != index_start && index_final >= 0 && ELEMENT_RANGE_MATCHES_INVLIST(index_final)) { warn_bad_digit_range: vWARN(RExC_parse, "Ranges of digits should be" " from the same group of" " 10"); } } } } } if ((! range || prevvalue == value) && non_portable_endpoint) { if (isPRINT_A(value)) { char literal[3]; unsigned d = 0; if (isBACKSLASHED_PUNCT(value)) { literal[d++] = '\\'; } literal[d++] = (char) value; literal[d++] = '\0'; vWARN4(RExC_parse, "\"%.*s\" is more clearly written simply as \"%s\"", (int) (RExC_parse - rangebegin), rangebegin, literal ); } else if isMNEMONIC_CNTRL(value) { vWARN4(RExC_parse, "\"%.*s\" is more clearly written simply as \"%s\"", (int) (RExC_parse - rangebegin), rangebegin, cntrl_to_mnemonic((U8) value) ); } } } /* Deal with this element of the class */ if (! SIZE_ONLY) { #ifndef EBCDIC cp_foldable_list = _add_range_to_invlist(cp_foldable_list, prevvalue, value); #else /* On non-ASCII platforms, for ranges that span all of 0..255, and * ones that don't require special handling, we can just add the * range like we do for ASCII platforms */ if ((UNLIKELY(prevvalue == 0) && value >= 255) || ! (prevvalue < 256 && (unicode_range || (! non_portable_endpoint && ((isLOWER_A(prevvalue) && isLOWER_A(value)) || (isUPPER_A(prevvalue) && isUPPER_A(value))))))) { cp_foldable_list = _add_range_to_invlist(cp_foldable_list, prevvalue, value); } else { /* Here, requires special handling. This can be because it is * a range whose code points are considered to be Unicode, and * so must be individually translated into native, or because * its a subrange of 'A-Z' or 'a-z' which each aren't * contiguous in EBCDIC, but we have defined them to include * only the "expected" upper or lower case ASCII alphabetics. * Subranges above 255 are the same in native and Unicode, so * can be added as a range */ U8 start = NATIVE_TO_LATIN1(prevvalue); unsigned j; U8 end = (value < 256) ? NATIVE_TO_LATIN1(value) : 255; for (j = start; j <= end; j++) { cp_foldable_list = add_cp_to_invlist(cp_foldable_list, LATIN1_TO_NATIVE(j)); } if (value > 255) { cp_foldable_list = _add_range_to_invlist(cp_foldable_list, 256, value); } } #endif } range = 0; /* this range (if it was one) is done now */ } /* End of loop through all the text within the brackets */ if ( posix_warnings && av_tindex_skip_len_mg(posix_warnings) >= 0) { output_or_return_posix_warnings(pRExC_state, posix_warnings, return_posix_warnings); } /* If anything in the class expands to more than one character, we have to * deal with them by building up a substitute parse string, and recursively * calling reg() on it, instead of proceeding */ if (multi_char_matches) { SV * substitute_parse = newSVpvn_flags("?:", 2, SVs_TEMP); I32 cp_count; STRLEN len; char *save_end = RExC_end; char *save_parse = RExC_parse; char *save_start = RExC_start; STRLEN prefix_end = 0; /* We copy the character class after a prefix supplied here. This is the size + 1 of that prefix */ bool first_time = TRUE; /* First multi-char occurrence doesn't get a "|" */ I32 reg_flags; assert(! invert); assert(RExC_precomp_adj == 0); /* Only one level of recursion allowed */ #if 0 /* Have decided not to deal with multi-char folds in inverted classes, because too confusing */ if (invert) { sv_catpv(substitute_parse, "(?:"); } #endif /* Look at the longest folds first */ for (cp_count = av_tindex_skip_len_mg(multi_char_matches); cp_count > 0; cp_count--) { if (av_exists(multi_char_matches, cp_count)) { AV** this_array_ptr; SV* this_sequence; this_array_ptr = (AV**) av_fetch(multi_char_matches, cp_count, FALSE); while ((this_sequence = av_pop(*this_array_ptr)) != &PL_sv_undef) { if (! first_time) { sv_catpv(substitute_parse, "|"); } first_time = FALSE; sv_catpv(substitute_parse, SvPVX(this_sequence)); } } } /* If the character class contains anything else besides these * multi-character folds, have to include it in recursive parsing */ if (element_count) { sv_catpv(substitute_parse, "|["); prefix_end = SvCUR(substitute_parse); sv_catpvn(substitute_parse, orig_parse, RExC_parse - orig_parse); /* Put in a closing ']' only if not going off the end, as otherwise * we are adding something that really isn't there */ if (RExC_parse < RExC_end) { sv_catpv(substitute_parse, "]"); } } sv_catpv(substitute_parse, ")"); #if 0 if (invert) { /* This is a way to get the parse to skip forward a whole named * sequence instead of matching the 2nd character when it fails the * first */ sv_catpv(substitute_parse, "(*THEN)(*SKIP)(*FAIL)|.)"); } #endif /* Set up the data structure so that any errors will be properly * reported. See the comments at the definition of * REPORT_LOCATION_ARGS for details */ RExC_precomp_adj = orig_parse - RExC_precomp; RExC_start = RExC_parse = SvPV(substitute_parse, len); RExC_adjusted_start = RExC_start + prefix_end; RExC_end = RExC_parse + len; RExC_in_multi_char_class = 1; RExC_emit = (regnode *)orig_emit; ret = reg(pRExC_state, 1, &reg_flags, depth+1); *flagp |= reg_flags&(HASWIDTH|SIMPLE|SPSTART|POSTPONED|RESTART_PASS1|NEED_UTF8); /* And restore so can parse the rest of the pattern */ RExC_parse = save_parse; RExC_start = RExC_adjusted_start = save_start; RExC_precomp_adj = 0; RExC_end = save_end; RExC_in_multi_char_class = 0; SvREFCNT_dec_NN(multi_char_matches); return ret; } /* Here, we've gone through the entire class and dealt with multi-char * folds. We are now in a position that we can do some checks to see if we * can optimize this ANYOF node into a simpler one, even in Pass 1. * Currently we only do two checks: * 1) is in the unlikely event that the user has specified both, eg. \w and * \W under /l, then the class matches everything. (This optimization * is done only to make the optimizer code run later work.) * 2) if the character class contains only a single element (including a * single range), we see if there is an equivalent node for it. * Other checks are possible */ if ( optimizable && ! ret_invlist /* Can't optimize if returning the constructed inversion list */ && (UNLIKELY(posixl_matches_all) || element_count == 1)) { U8 op = END; U8 arg = 0; if (UNLIKELY(posixl_matches_all)) { op = SANY; } else if (namedclass > OOB_NAMEDCLASS) { /* this is a single named class, like \w or [:digit:] or \p{foo} */ /* All named classes are mapped into POSIXish nodes, with its FLAG * argument giving which class it is */ switch ((I32)namedclass) { case ANYOF_UNIPROP: break; /* These don't depend on the charset modifiers. They always * match under /u rules */ case ANYOF_NHORIZWS: case ANYOF_HORIZWS: namedclass = ANYOF_BLANK + namedclass - ANYOF_HORIZWS; /* FALLTHROUGH */ case ANYOF_NVERTWS: case ANYOF_VERTWS: op = POSIXU; goto join_posix; /* The actual POSIXish node for all the rest depends on the * charset modifier. The ones in the first set depend only on * ASCII or, if available on this platform, also locale */ case ANYOF_ASCII: case ANYOF_NASCII: #ifdef HAS_ISASCII op = (LOC) ? POSIXL : POSIXA; #else op = POSIXA; #endif goto join_posix; /* The following don't have any matches in the upper Latin1 * range, hence /d is equivalent to /u for them. Making it /u * saves some branches at runtime */ case ANYOF_DIGIT: case ANYOF_NDIGIT: case ANYOF_XDIGIT: case ANYOF_NXDIGIT: if (! DEPENDS_SEMANTICS) { goto treat_as_default; } op = POSIXU; goto join_posix; /* The following change to CASED under /i */ case ANYOF_LOWER: case ANYOF_NLOWER: case ANYOF_UPPER: case ANYOF_NUPPER: if (FOLD) { namedclass = ANYOF_CASED + (namedclass % 2); } /* FALLTHROUGH */ /* The rest have more possibilities depending on the charset. * We take advantage of the enum ordering of the charset * modifiers to get the exact node type, */ default: treat_as_default: op = POSIXD + get_regex_charset(RExC_flags); if (op > POSIXA) { /* /aa is same as /a */ op = POSIXA; } join_posix: /* The odd numbered ones are the complements of the * next-lower even number one */ if (namedclass % 2 == 1) { invert = ! invert; namedclass--; } arg = namedclass_to_classnum(namedclass); break; } } else if (value == prevvalue) { /* Here, the class consists of just a single code point */ if (invert) { if (! LOC && value == '\n') { op = REG_ANY; /* Optimize [^\n] */ *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); } } else if (value < 256 || UTF) { /* Optimize a single value into an EXACTish node, but not if it * would require converting the pattern to UTF-8. */ op = compute_EXACTish(pRExC_state); } } /* Otherwise is a range */ else if (! LOC) { /* locale could vary these */ if (prevvalue == '0') { if (value == '9') { arg = _CC_DIGIT; op = POSIXA; } } else if (! FOLD || ASCII_FOLD_RESTRICTED) { /* We can optimize A-Z or a-z, but not if they could match * something like the KELVIN SIGN under /i. */ if (prevvalue == 'A') { if (value == 'Z' #ifdef EBCDIC && ! non_portable_endpoint #endif ) { arg = (FOLD) ? _CC_ALPHA : _CC_UPPER; op = POSIXA; } } else if (prevvalue == 'a') { if (value == 'z' #ifdef EBCDIC && ! non_portable_endpoint #endif ) { arg = (FOLD) ? _CC_ALPHA : _CC_LOWER; op = POSIXA; } } } } /* Here, we have changed <op> away from its initial value iff we found * an optimization */ if (op != END) { /* Throw away this ANYOF regnode, and emit the calculated one, * which should correspond to the beginning, not current, state of * the parse */ const char * cur_parse = RExC_parse; RExC_parse = (char *)orig_parse; if ( SIZE_ONLY) { if (! LOC) { /* To get locale nodes to not use the full ANYOF size would * require moving the code above that writes the portions * of it that aren't in other nodes to after this point. * e.g. ANYOF_POSIXL_SET */ RExC_size = orig_size; } } else { RExC_emit = (regnode *)orig_emit; if (PL_regkind[op] == POSIXD) { if (op == POSIXL) { RExC_contains_locale = 1; } if (invert) { op += NPOSIXD - POSIXD; } } } ret = reg_node(pRExC_state, op); if (PL_regkind[op] == POSIXD || PL_regkind[op] == NPOSIXD) { if (! SIZE_ONLY) { FLAGS(ret) = arg; } *flagp |= HASWIDTH|SIMPLE; } else if (PL_regkind[op] == EXACT) { alloc_maybe_populate_EXACT(pRExC_state, ret, flagp, 0, value, TRUE /* downgradable to EXACT */ ); } RExC_parse = (char *) cur_parse; SvREFCNT_dec(posixes); SvREFCNT_dec(nposixes); SvREFCNT_dec(simple_posixes); SvREFCNT_dec(cp_list); SvREFCNT_dec(cp_foldable_list); return ret; } } if (SIZE_ONLY) return ret; /****** !SIZE_ONLY (Pass 2) AFTER HERE *********/ /* If folding, we calculate all characters that could fold to or from the * ones already on the list */ if (cp_foldable_list) { if (FOLD) { UV start, end; /* End points of code point ranges */ SV* fold_intersection = NULL; SV** use_list; /* Our calculated list will be for Unicode rules. For locale * matching, we have to keep a separate list that is consulted at * runtime only when the locale indicates Unicode rules. For * non-locale, we just use the general list */ if (LOC) { use_list = &only_utf8_locale_list; } else { use_list = &cp_list; } /* Only the characters in this class that participate in folds need * be checked. Get the intersection of this class and all the * possible characters that are foldable. This can quickly narrow * down a large class */ _invlist_intersection(PL_utf8_foldable, cp_foldable_list, &fold_intersection); /* The folds for all the Latin1 characters are hard-coded into this * program, but we have to go out to disk to get the others. */ if (invlist_highest(cp_foldable_list) >= 256) { /* This is a hash that for a particular fold gives all * characters that are involved in it */ if (! PL_utf8_foldclosures) { _load_PL_utf8_foldclosures(); } } /* Now look at the foldable characters in this class individually */ invlist_iterinit(fold_intersection); while (invlist_iternext(fold_intersection, &start, &end)) { UV j; /* Look at every character in the range */ for (j = start; j <= end; j++) { U8 foldbuf[UTF8_MAXBYTES_CASE+1]; STRLEN foldlen; SV** listp; if (j < 256) { if (IS_IN_SOME_FOLD_L1(j)) { /* ASCII is always matched; non-ASCII is matched * only under Unicode rules (which could happen * under /l if the locale is a UTF-8 one */ if (isASCII(j) || ! DEPENDS_SEMANTICS) { *use_list = add_cp_to_invlist(*use_list, PL_fold_latin1[j]); } else { has_upper_latin1_only_utf8_matches = add_cp_to_invlist( has_upper_latin1_only_utf8_matches, PL_fold_latin1[j]); } } if (HAS_NONLATIN1_SIMPLE_FOLD_CLOSURE(j) && (! isASCII(j) || ! ASCII_FOLD_RESTRICTED)) { add_above_Latin1_folds(pRExC_state, (U8) j, use_list); } continue; } /* Here is an above Latin1 character. We don't have the * rules hard-coded for it. First, get its fold. This is * the simple fold, as the multi-character folds have been * handled earlier and separated out */ _to_uni_fold_flags(j, foldbuf, &foldlen, (ASCII_FOLD_RESTRICTED) ? FOLD_FLAGS_NOMIX_ASCII : 0); /* Single character fold of above Latin1. Add everything in * its fold closure to the list that this node should match. * The fold closures data structure is a hash with the keys * being the UTF-8 of every character that is folded to, like * 'k', and the values each an array of all code points that * fold to its key. e.g. [ 'k', 'K', KELVIN_SIGN ]. * Multi-character folds are not included */ if ((listp = hv_fetch(PL_utf8_foldclosures, (char *) foldbuf, foldlen, FALSE))) { AV* list = (AV*) *listp; IV k; for (k = 0; k <= av_tindex_skip_len_mg(list); k++) { SV** c_p = av_fetch(list, k, FALSE); UV c; assert(c_p); c = SvUV(*c_p); /* /aa doesn't allow folds between ASCII and non- */ if ((ASCII_FOLD_RESTRICTED && (isASCII(c) != isASCII(j)))) { continue; } /* Folds under /l which cross the 255/256 boundary * are added to a separate list. (These are valid * only when the locale is UTF-8.) */ if (c < 256 && LOC) { *use_list = add_cp_to_invlist(*use_list, c); continue; } if (isASCII(c) || c > 255 || AT_LEAST_UNI_SEMANTICS) { cp_list = add_cp_to_invlist(cp_list, c); } else { /* Similarly folds involving non-ascii Latin1 * characters under /d are added to their list */ has_upper_latin1_only_utf8_matches = add_cp_to_invlist( has_upper_latin1_only_utf8_matches, c); } } } } } SvREFCNT_dec_NN(fold_intersection); } /* Now that we have finished adding all the folds, there is no reason * to keep the foldable list separate */ _invlist_union(cp_list, cp_foldable_list, &cp_list); SvREFCNT_dec_NN(cp_foldable_list); } /* And combine the result (if any) with any inversion lists from posix * classes. The lists are kept separate up to now because we don't want to * fold the classes (folding of those is automatically handled by the swash * fetching code) */ if (simple_posixes) { /* These are the classes known to be unaffected by /a, /aa, and /d */ if (cp_list) { _invlist_union(cp_list, simple_posixes, &cp_list); SvREFCNT_dec_NN(simple_posixes); } else { cp_list = simple_posixes; } } if (posixes || nposixes) { /* We have to adjust /a and /aa */ if (AT_LEAST_ASCII_RESTRICTED) { /* Under /a and /aa, nothing above ASCII matches these */ if (posixes) { _invlist_intersection(posixes, PL_XPosix_ptrs[_CC_ASCII], &posixes); } /* Under /a and /aa, everything above ASCII matches these * complements */ if (nposixes) { _invlist_union_complement_2nd(nposixes, PL_XPosix_ptrs[_CC_ASCII], &nposixes); } } if (! DEPENDS_SEMANTICS) { /* For everything but /d, we can just add the current 'posixes' and * 'nposixes' to the main list */ if (posixes) { if (cp_list) { _invlist_union(cp_list, posixes, &cp_list); SvREFCNT_dec_NN(posixes); } else { cp_list = posixes; } } if (nposixes) { if (cp_list) { _invlist_union(cp_list, nposixes, &cp_list); SvREFCNT_dec_NN(nposixes); } else { cp_list = nposixes; } } } else { /* Under /d, things like \w match upper Latin1 characters only if * the target string is in UTF-8. But things like \W match all the * upper Latin1 characters if the target string is not in UTF-8. * * Handle the case where there something like \W separately */ if (nposixes) { SV* only_non_utf8_list = invlist_clone(PL_UpperLatin1); /* A complemented posix class matches all upper Latin1 * characters if not in UTF-8. And it matches just certain * ones when in UTF-8. That means those certain ones are * matched regardless, so can just be added to the * unconditional list */ if (cp_list) { _invlist_union(cp_list, nposixes, &cp_list); SvREFCNT_dec_NN(nposixes); nposixes = NULL; } else { cp_list = nposixes; } /* Likewise for 'posixes' */ _invlist_union(posixes, cp_list, &cp_list); /* Likewise for anything else in the range that matched only * under UTF-8 */ if (has_upper_latin1_only_utf8_matches) { _invlist_union(cp_list, has_upper_latin1_only_utf8_matches, &cp_list); SvREFCNT_dec_NN(has_upper_latin1_only_utf8_matches); has_upper_latin1_only_utf8_matches = NULL; } /* If we don't match all the upper Latin1 characters regardless * of UTF-8ness, we have to set a flag to match the rest when * not in UTF-8 */ _invlist_subtract(only_non_utf8_list, cp_list, &only_non_utf8_list); if (_invlist_len(only_non_utf8_list) != 0) { ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER; } } else { /* Here there were no complemented posix classes. That means * the upper Latin1 characters in 'posixes' match only when the * target string is in UTF-8. So we have to add them to the * list of those types of code points, while adding the * remainder to the unconditional list. * * First calculate what they are */ SV* nonascii_but_latin1_properties = NULL; _invlist_intersection(posixes, PL_UpperLatin1, &nonascii_but_latin1_properties); /* And add them to the final list of such characters. */ _invlist_union(has_upper_latin1_only_utf8_matches, nonascii_but_latin1_properties, &has_upper_latin1_only_utf8_matches); /* Remove them from what now becomes the unconditional list */ _invlist_subtract(posixes, nonascii_but_latin1_properties, &posixes); /* And add those unconditional ones to the final list */ if (cp_list) { _invlist_union(cp_list, posixes, &cp_list); SvREFCNT_dec_NN(posixes); posixes = NULL; } else { cp_list = posixes; } SvREFCNT_dec(nonascii_but_latin1_properties); /* Get rid of any characters that we now know are matched * unconditionally from the conditional list, which may make * that list empty */ _invlist_subtract(has_upper_latin1_only_utf8_matches, cp_list, &has_upper_latin1_only_utf8_matches); if (_invlist_len(has_upper_latin1_only_utf8_matches) == 0) { SvREFCNT_dec_NN(has_upper_latin1_only_utf8_matches); has_upper_latin1_only_utf8_matches = NULL; } } } } /* And combine the result (if any) with any inversion list from properties. * The lists are kept separate up to now so that we can distinguish the two * in regards to matching above-Unicode. A run-time warning is generated * if a Unicode property is matched against a non-Unicode code point. But, * we allow user-defined properties to match anything, without any warning, * and we also suppress the warning if there is a portion of the character * class that isn't a Unicode property, and which matches above Unicode, \W * or [\x{110000}] for example. * (Note that in this case, unlike the Posix one above, there is no * <has_upper_latin1_only_utf8_matches>, because having a Unicode property * forces Unicode semantics */ if (properties) { if (cp_list) { /* If it matters to the final outcome, see if a non-property * component of the class matches above Unicode. If so, the * warning gets suppressed. This is true even if just a single * such code point is specified, as, though not strictly correct if * another such code point is matched against, the fact that they * are using above-Unicode code points indicates they should know * the issues involved */ if (warn_super) { warn_super = ! (invert ^ (invlist_highest(cp_list) > PERL_UNICODE_MAX)); } _invlist_union(properties, cp_list, &cp_list); SvREFCNT_dec_NN(properties); } else { cp_list = properties; } if (warn_super) { ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER; /* Because an ANYOF node is the only one that warns, this node * can't be optimized into something else */ optimizable = FALSE; } } /* Here, we have calculated what code points should be in the character * class. * * Now we can see about various optimizations. Fold calculation (which we * did above) needs to take place before inversion. Otherwise /[^k]/i * would invert to include K, which under /i would match k, which it * shouldn't. Therefore we can't invert folded locale now, as it won't be * folded until runtime */ /* If we didn't do folding, it's because some information isn't available * until runtime; set the run-time fold flag for these. (We don't have to * worry about properties folding, as that is taken care of by the swash * fetching). We know to set the flag if we have a non-NULL list for UTF-8 * locales, or the class matches at least one 0-255 range code point */ if (LOC && FOLD) { /* Some things on the list might be unconditionally included because of * other components. Remove them, and clean up the list if it goes to * 0 elements */ if (only_utf8_locale_list && cp_list) { _invlist_subtract(only_utf8_locale_list, cp_list, &only_utf8_locale_list); if (_invlist_len(only_utf8_locale_list) == 0) { SvREFCNT_dec_NN(only_utf8_locale_list); only_utf8_locale_list = NULL; } } if (only_utf8_locale_list) { ANYOF_FLAGS(ret) |= ANYOFL_FOLD |ANYOFL_SHARED_UTF8_LOCALE_fold_HAS_MATCHES_nonfold_REQD; } else if (cp_list) { /* Look to see if a 0-255 code point is in list */ UV start, end; invlist_iterinit(cp_list); if (invlist_iternext(cp_list, &start, &end) && start < 256) { ANYOF_FLAGS(ret) |= ANYOFL_FOLD; } invlist_iterfinish(cp_list); } } else if ( DEPENDS_SEMANTICS && ( has_upper_latin1_only_utf8_matches || (ANYOF_FLAGS(ret) & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER))) { OP(ret) = ANYOFD; optimizable = FALSE; } /* Optimize inverted simple patterns (e.g. [^a-z]) when everything is known * at compile time. Besides not inverting folded locale now, we can't * invert if there are things such as \w, which aren't known until runtime * */ if (cp_list && invert && OP(ret) != ANYOFD && ! (ANYOF_FLAGS(ret) & (ANYOF_LOCALE_FLAGS)) && ! HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION) { _invlist_invert(cp_list); /* Any swash can't be used as-is, because we've inverted things */ if (swash) { SvREFCNT_dec_NN(swash); swash = NULL; } /* Clear the invert flag since have just done it here */ invert = FALSE; } if (ret_invlist) { assert(cp_list); *ret_invlist = cp_list; SvREFCNT_dec(swash); /* Discard the generated node */ if (SIZE_ONLY) { RExC_size = orig_size; } else { RExC_emit = orig_emit; } return orig_emit; } /* Some character classes are equivalent to other nodes. Such nodes take * up less room and generally fewer operations to execute than ANYOF nodes. * Above, we checked for and optimized into some such equivalents for * certain common classes that are easy to test. Getting to this point in * the code means that the class didn't get optimized there. Since this * code is only executed in Pass 2, it is too late to save space--it has * been allocated in Pass 1, and currently isn't given back. But turning * things into an EXACTish node can allow the optimizer to join it to any * adjacent such nodes. And if the class is equivalent to things like /./, * expensive run-time swashes can be avoided. Now that we have more * complete information, we can find things necessarily missed by the * earlier code. Another possible "optimization" that isn't done is that * something like [Ee] could be changed into an EXACTFU. khw tried this * and found that the ANYOF is faster, including for code points not in the * bitmap. This still might make sense to do, provided it got joined with * an adjacent node(s) to create a longer EXACTFU one. This could be * accomplished by creating a pseudo ANYOF_EXACTFU node type that the join * routine would know is joinable. If that didn't happen, the node type * could then be made a straight ANYOF */ if (optimizable && cp_list && ! invert) { UV start, end; U8 op = END; /* The optimzation node-type */ int posix_class = -1; /* Illegal value */ const char * cur_parse= RExC_parse; invlist_iterinit(cp_list); if (! invlist_iternext(cp_list, &start, &end)) { /* Here, the list is empty. This happens, for example, when a * Unicode property that doesn't match anything is the only element * in the character class (perluniprops.pod notes such properties). * */ op = OPFAIL; *flagp |= HASWIDTH|SIMPLE; } else if (start == end) { /* The range is a single code point */ if (! invlist_iternext(cp_list, &start, &end) /* Don't do this optimization if it would require changing * the pattern to UTF-8 */ && (start < 256 || UTF)) { /* Here, the list contains a single code point. Can optimize * into an EXACTish node */ value = start; if (! FOLD) { op = (LOC) ? EXACTL : EXACT; } else if (LOC) { /* A locale node under folding with one code point can be * an EXACTFL, as its fold won't be calculated until * runtime */ op = EXACTFL; } else { /* Here, we are generally folding, but there is only one * code point to match. If we have to, we use an EXACT * node, but it would be better for joining with adjacent * nodes in the optimization pass if we used the same * EXACTFish node that any such are likely to be. We can * do this iff the code point doesn't participate in any * folds. For example, an EXACTF of a colon is the same as * an EXACT one, since nothing folds to or from a colon. */ if (value < 256) { if (IS_IN_SOME_FOLD_L1(value)) { op = EXACT; } } else { if (_invlist_contains_cp(PL_utf8_foldable, value)) { op = EXACT; } } /* If we haven't found the node type, above, it means we * can use the prevailing one */ if (op == END) { op = compute_EXACTish(pRExC_state); } } } } /* End of first range contains just a single code point */ else if (start == 0) { if (end == UV_MAX) { op = SANY; *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); } else if (end == '\n' - 1 && invlist_iternext(cp_list, &start, &end) && start == '\n' + 1 && end == UV_MAX) { op = REG_ANY; *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); } } invlist_iterfinish(cp_list); if (op == END) { const UV cp_list_len = _invlist_len(cp_list); const UV* cp_list_array = invlist_array(cp_list); /* Here, didn't find an optimization. See if this matches any of * the POSIX classes. These run slightly faster for above-Unicode * code points, so don't bother with POSIXA ones nor the 2 that * have no above-Unicode matches. We can avoid these checks unless * the ANYOF matches at least as high as the lowest POSIX one * (which was manually found to be \v. The actual code point may * increase in later Unicode releases, if a higher code point is * assigned to be \v, but this code will never break. It would * just mean we could execute the checks for posix optimizations * unnecessarily) */ if (cp_list_array[cp_list_len-1] > 0x2029) { for (posix_class = 0; posix_class <= _HIGHEST_REGCOMP_DOT_H_SYNC; posix_class++) { int try_inverted; if (posix_class == _CC_ASCII || posix_class == _CC_CNTRL) { continue; } for (try_inverted = 0; try_inverted < 2; try_inverted++) { /* Check if matches normal or inverted */ if (_invlistEQ(cp_list, PL_XPosix_ptrs[posix_class], try_inverted)) { op = (try_inverted) ? NPOSIXU : POSIXU; *flagp |= HASWIDTH|SIMPLE; goto found_posix; } } } found_posix: ; } } if (op != END) { RExC_parse = (char *)orig_parse; RExC_emit = (regnode *)orig_emit; if (regarglen[op]) { ret = reganode(pRExC_state, op, 0); } else { ret = reg_node(pRExC_state, op); } RExC_parse = (char *)cur_parse; if (PL_regkind[op] == EXACT) { alloc_maybe_populate_EXACT(pRExC_state, ret, flagp, 0, value, TRUE /* downgradable to EXACT */ ); } else if (PL_regkind[op] == POSIXD || PL_regkind[op] == NPOSIXD) { FLAGS(ret) = posix_class; } SvREFCNT_dec_NN(cp_list); return ret; } } /* Here, <cp_list> contains all the code points we can determine at * compile time that match under all conditions. Go through it, and * for things that belong in the bitmap, put them there, and delete from * <cp_list>. While we are at it, see if everything above 255 is in the * list, and if so, set a flag to speed up execution */ populate_ANYOF_from_invlist(ret, &cp_list); if (invert) { ANYOF_FLAGS(ret) |= ANYOF_INVERT; } /* Here, the bitmap has been populated with all the Latin1 code points that * always match. Can now add to the overall list those that match only * when the target string is UTF-8 (<has_upper_latin1_only_utf8_matches>). * */ if (has_upper_latin1_only_utf8_matches) { if (cp_list) { _invlist_union(cp_list, has_upper_latin1_only_utf8_matches, &cp_list); SvREFCNT_dec_NN(has_upper_latin1_only_utf8_matches); } else { cp_list = has_upper_latin1_only_utf8_matches; } ANYOF_FLAGS(ret) |= ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP; } /* If there is a swash and more than one element, we can't use the swash in * the optimization below. */ if (swash && element_count > 1) { SvREFCNT_dec_NN(swash); swash = NULL; } /* Note that the optimization of using 'swash' if it is the only thing in * the class doesn't have us change swash at all, so it can include things * that are also in the bitmap; otherwise we have purposely deleted that * duplicate information */ set_ANYOF_arg(pRExC_state, ret, cp_list, (HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION) ? listsv : NULL, only_utf8_locale_list, swash, has_user_defined_property); *flagp |= HASWIDTH|SIMPLE; if (ANYOF_FLAGS(ret) & ANYOF_LOCALE_FLAGS) { RExC_contains_locale = 1; } return ret; } #undef HAS_NONLOCALE_RUNTIME_PROPERTY_DEFINITION STATIC void S_set_ANYOF_arg(pTHX_ RExC_state_t* const pRExC_state, regnode* const node, SV* const cp_list, SV* const runtime_defns, SV* const only_utf8_locale_list, SV* const swash, const bool has_user_defined_property) { /* Sets the arg field of an ANYOF-type node 'node', using information about * the node passed-in. If there is nothing outside the node's bitmap, the * arg is set to ANYOF_ONLY_HAS_BITMAP. Otherwise, it sets the argument to * the count returned by add_data(), having allocated and stored an array, * av, that that count references, as follows: * av[0] stores the character class description in its textual form. * This is used later (regexec.c:Perl_regclass_swash()) to * initialize the appropriate swash, and is also useful for dumping * the regnode. This is set to &PL_sv_undef if the textual * description is not needed at run-time (as happens if the other * elements completely define the class) * av[1] if &PL_sv_undef, is a placeholder to later contain the swash * computed from av[0]. But if no further computation need be done, * the swash is stored here now (and av[0] is &PL_sv_undef). * av[2] stores the inversion list of code points that match only if the * current locale is UTF-8 * av[3] stores the cp_list inversion list for use in addition or instead * of av[0]; used only if cp_list exists and av[1] is &PL_sv_undef. * (Otherwise everything needed is already in av[0] and av[1]) * av[4] is set if any component of the class is from a user-defined * property; used only if av[3] exists */ UV n; PERL_ARGS_ASSERT_SET_ANYOF_ARG; if (! cp_list && ! runtime_defns && ! only_utf8_locale_list) { assert(! (ANYOF_FLAGS(node) & ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP)); ARG_SET(node, ANYOF_ONLY_HAS_BITMAP); } else { AV * const av = newAV(); SV *rv; av_store(av, 0, (runtime_defns) ? SvREFCNT_inc(runtime_defns) : &PL_sv_undef); if (swash) { assert(cp_list); av_store(av, 1, swash); SvREFCNT_dec_NN(cp_list); } else { av_store(av, 1, &PL_sv_undef); if (cp_list) { av_store(av, 3, cp_list); av_store(av, 4, newSVuv(has_user_defined_property)); } } if (only_utf8_locale_list) { av_store(av, 2, only_utf8_locale_list); } else { av_store(av, 2, &PL_sv_undef); } rv = newRV_noinc(MUTABLE_SV(av)); n = add_data(pRExC_state, STR_WITH_LEN("s")); RExC_rxi->data->data[n] = (void*)rv; ARG_SET(node, n); } } #if !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION) SV * Perl__get_regclass_nonbitmap_data(pTHX_ const regexp *prog, const regnode* node, bool doinit, SV** listsvp, SV** only_utf8_locale_ptr, SV** output_invlist) { /* For internal core use only. * Returns the swash for the input 'node' in the regex 'prog'. * If <doinit> is 'true', will attempt to create the swash if not already * done. * If <listsvp> is non-null, will return the printable contents of the * swash. This can be used to get debugging information even before the * swash exists, by calling this function with 'doinit' set to false, in * which case the components that will be used to eventually create the * swash are returned (in a printable form). * If <only_utf8_locale_ptr> is not NULL, it is where this routine is to * store an inversion list of code points that should match only if the * execution-time locale is a UTF-8 one. * If <output_invlist> is not NULL, it is where this routine is to store an * inversion list of the code points that would be instead returned in * <listsvp> if this were NULL. Thus, what gets output in <listsvp> * when this parameter is used, is just the non-code point data that * will go into creating the swash. This currently should be just * user-defined properties whose definitions were not known at compile * time. Using this parameter allows for easier manipulation of the * swash's data by the caller. It is illegal to call this function with * this parameter set, but not <listsvp> * * Tied intimately to how S_set_ANYOF_arg sets up the data structure. Note * that, in spite of this function's name, the swash it returns may include * the bitmap data as well */ SV *sw = NULL; SV *si = NULL; /* Input swash initialization string */ SV* invlist = NULL; RXi_GET_DECL(prog,progi); const struct reg_data * const data = prog ? progi->data : NULL; PERL_ARGS_ASSERT__GET_REGCLASS_NONBITMAP_DATA; assert(! output_invlist || listsvp); if (data && data->count) { const U32 n = ARG(node); if (data->what[n] == 's') { SV * const rv = MUTABLE_SV(data->data[n]); AV * const av = MUTABLE_AV(SvRV(rv)); SV **const ary = AvARRAY(av); U8 swash_init_flags = _CORE_SWASH_INIT_ACCEPT_INVLIST; si = *ary; /* ary[0] = the string to initialize the swash with */ if (av_tindex_skip_len_mg(av) >= 2) { if (only_utf8_locale_ptr && ary[2] && ary[2] != &PL_sv_undef) { *only_utf8_locale_ptr = ary[2]; } else { assert(only_utf8_locale_ptr); *only_utf8_locale_ptr = NULL; } /* Elements 3 and 4 are either both present or both absent. [3] * is any inversion list generated at compile time; [4] * indicates if that inversion list has any user-defined * properties in it. */ if (av_tindex_skip_len_mg(av) >= 3) { invlist = ary[3]; if (SvUV(ary[4])) { swash_init_flags |= _CORE_SWASH_INIT_USER_DEFINED_PROPERTY; } } else { invlist = NULL; } } /* Element [1] is reserved for the set-up swash. If already there, * return it; if not, create it and store it there */ if (ary[1] && SvROK(ary[1])) { sw = ary[1]; } else if (doinit && ((si && si != &PL_sv_undef) || (invlist && invlist != &PL_sv_undef))) { assert(si); sw = _core_swash_init("utf8", /* the utf8 package */ "", /* nameless */ si, 1, /* binary */ 0, /* not from tr/// */ invlist, &swash_init_flags); (void)av_store(av, 1, sw); } } } /* If requested, return a printable version of what this swash matches */ if (listsvp) { SV* matches_string = NULL; /* The swash should be used, if possible, to get the data, as it * contains the resolved data. But this function can be called at * compile-time, before everything gets resolved, in which case we * return the currently best available information, which is the string * that will eventually be used to do that resolving, 'si' */ if ((! sw || (invlist = _get_swash_invlist(sw)) == NULL) && (si && si != &PL_sv_undef)) { /* Here, we only have 'si' (and possibly some passed-in data in * 'invlist', which is handled below) If the caller only wants * 'si', use that. */ if (! output_invlist) { matches_string = newSVsv(si); } else { /* But if the caller wants an inversion list of the node, we * need to parse 'si' and place as much as possible in the * desired output inversion list, making 'matches_string' only * contain the currently unresolvable things */ const char *si_string = SvPVX(si); STRLEN remaining = SvCUR(si); UV prev_cp = 0; U8 count = 0; /* Ignore everything before the first new-line */ while (*si_string != '\n' && remaining > 0) { si_string++; remaining--; } assert(remaining > 0); si_string++; remaining--; while (remaining > 0) { /* The data consists of just strings defining user-defined * property names, but in prior incarnations, and perhaps * somehow from pluggable regex engines, it could still * hold hex code point definitions. Each component of a * range would be separated by a tab, and each range by a * new-line. If these are found, instead add them to the * inversion list */ I32 grok_flags = PERL_SCAN_SILENT_ILLDIGIT |PERL_SCAN_SILENT_NON_PORTABLE; STRLEN len = remaining; UV cp = grok_hex(si_string, &len, &grok_flags, NULL); /* If the hex decode routine found something, it should go * up to the next \n */ if ( *(si_string + len) == '\n') { if (count) { /* 2nd code point on line */ *output_invlist = _add_range_to_invlist(*output_invlist, prev_cp, cp); } else { *output_invlist = add_cp_to_invlist(*output_invlist, cp); } count = 0; goto prepare_for_next_iteration; } /* If the hex decode was instead for the lower range limit, * save it, and go parse the upper range limit */ if (*(si_string + len) == '\t') { assert(count == 0); prev_cp = cp; count = 1; prepare_for_next_iteration: si_string += len + 1; remaining -= len + 1; continue; } /* Here, didn't find a legal hex number. Just add it from * here to the next \n */ remaining -= len; while (*(si_string + len) != '\n' && remaining > 0) { remaining--; len++; } if (*(si_string + len) == '\n') { len++; remaining--; } if (matches_string) { sv_catpvn(matches_string, si_string, len - 1); } else { matches_string = newSVpvn(si_string, len - 1); } si_string += len; sv_catpvs(matches_string, " "); } /* end of loop through the text */ assert(matches_string); if (SvCUR(matches_string)) { /* Get rid of trailing blank */ SvCUR_set(matches_string, SvCUR(matches_string) - 1); } } /* end of has an 'si' but no swash */ } /* If we have a swash in place, its equivalent inversion list was above * placed into 'invlist'. If not, this variable may contain a stored * inversion list which is information beyond what is in 'si' */ if (invlist) { /* Again, if the caller doesn't want the output inversion list, put * everything in 'matches-string' */ if (! output_invlist) { if ( ! matches_string) { matches_string = newSVpvs("\n"); } sv_catsv(matches_string, invlist_contents(invlist, TRUE /* traditional style */ )); } else if (! *output_invlist) { *output_invlist = invlist_clone(invlist); } else { _invlist_union(*output_invlist, invlist, output_invlist); } } *listsvp = matches_string; } return sw; } #endif /* !defined(PERL_IN_XSUB_RE) || defined(PLUGGABLE_RE_EXTENSION) */ /* reg_skipcomment() Absorbs an /x style # comment from the input stream, returning a pointer to the first character beyond the comment, or if the comment terminates the pattern without anything following it, this returns one past the final character of the pattern (in other words, RExC_end) and sets the REG_RUN_ON_COMMENT_SEEN flag. Note it's the callers responsibility to ensure that we are actually in /x mode */ PERL_STATIC_INLINE char* S_reg_skipcomment(RExC_state_t *pRExC_state, char* p) { PERL_ARGS_ASSERT_REG_SKIPCOMMENT; assert(*p == '#'); while (p < RExC_end) { if (*(++p) == '\n') { return p+1; } } /* we ran off the end of the pattern without ending the comment, so we have * to add an \n when wrapping */ RExC_seen |= REG_RUN_ON_COMMENT_SEEN; return p; } STATIC void S_skip_to_be_ignored_text(pTHX_ RExC_state_t *pRExC_state, char ** p, const bool force_to_xmod ) { /* If the text at the current parse position '*p' is a '(?#...)' comment, * or if we are under /x or 'force_to_xmod' is TRUE, and the text at '*p' * is /x whitespace, advance '*p' so that on exit it points to the first * byte past all such white space and comments */ const bool use_xmod = force_to_xmod || (RExC_flags & RXf_PMf_EXTENDED); PERL_ARGS_ASSERT_SKIP_TO_BE_IGNORED_TEXT; assert( ! UTF || UTF8_IS_INVARIANT(**p) || UTF8_IS_START(**p)); for (;;) { if (RExC_end - (*p) >= 3 && *(*p) == '(' && *(*p + 1) == '?' && *(*p + 2) == '#') { while (*(*p) != ')') { if ((*p) == RExC_end) FAIL("Sequence (?#... not terminated"); (*p)++; } (*p)++; continue; } if (use_xmod) { const char * save_p = *p; while ((*p) < RExC_end) { STRLEN len; if ((len = is_PATWS_safe((*p), RExC_end, UTF))) { (*p) += len; } else if (*(*p) == '#') { (*p) = reg_skipcomment(pRExC_state, (*p)); } else { break; } } if (*p != save_p) { continue; } } break; } return; } /* nextchar() Advances the parse position by one byte, unless that byte is the beginning of a '(?#...)' style comment, or is /x whitespace and /x is in effect. In those two cases, the parse position is advanced beyond all such comments and white space. This is the UTF, (?#...), and /x friendly way of saying RExC_parse++. */ STATIC void S_nextchar(pTHX_ RExC_state_t *pRExC_state) { PERL_ARGS_ASSERT_NEXTCHAR; if (RExC_parse < RExC_end) { assert( ! UTF || UTF8_IS_INVARIANT(*RExC_parse) || UTF8_IS_START(*RExC_parse)); RExC_parse += (UTF) ? UTF8SKIP(RExC_parse) : 1; skip_to_be_ignored_text(pRExC_state, &RExC_parse, FALSE /* Don't force /x */ ); } } STATIC regnode * S_regnode_guts(pTHX_ RExC_state_t *pRExC_state, const U8 op, const STRLEN extra_size, const char* const name) { /* Allocate a regnode for 'op' and returns it, with 'extra_size' extra * space. In pass1, it aligns and increments RExC_size; in pass2, * RExC_emit */ regnode * const ret = RExC_emit; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGNODE_GUTS; assert(extra_size >= regarglen[op]); if (SIZE_ONLY) { SIZE_ALIGN(RExC_size); RExC_size += 1 + extra_size; return(ret); } if (RExC_emit >= RExC_emit_bound) Perl_croak(aTHX_ "panic: reg_node overrun trying to emit %d, %p>=%p", op, (void*)RExC_emit, (void*)RExC_emit_bound); NODE_ALIGN_FILL(ret); #ifndef RE_TRACK_PATTERN_OFFSETS PERL_UNUSED_ARG(name); #else if (RExC_offsets) { /* MJD */ MJD_OFFSET_DEBUG( ("%s:%d: (op %s) %s %" UVuf " (len %" UVuf ") (max %" UVuf ").\n", name, __LINE__, PL_reg_name[op], (UV)(RExC_emit - RExC_emit_start) > RExC_offsets[0] ? "Overwriting end of array!\n" : "OK", (UV)(RExC_emit - RExC_emit_start), (UV)(RExC_parse - RExC_start), (UV)RExC_offsets[0])); Set_Node_Offset(RExC_emit, RExC_parse + (op == END)); } #endif return(ret); } /* - reg_node - emit a node */ STATIC regnode * /* Location. */ S_reg_node(pTHX_ RExC_state_t *pRExC_state, U8 op) { regnode * const ret = regnode_guts(pRExC_state, op, regarglen[op], "reg_node"); PERL_ARGS_ASSERT_REG_NODE; assert(regarglen[op] == 0); if (PASS2) { regnode *ptr = ret; FILL_ADVANCE_NODE(ptr, op); RExC_emit = ptr; } return(ret); } /* - reganode - emit a node with an argument */ STATIC regnode * /* Location. */ S_reganode(pTHX_ RExC_state_t *pRExC_state, U8 op, U32 arg) { regnode * const ret = regnode_guts(pRExC_state, op, regarglen[op], "reganode"); PERL_ARGS_ASSERT_REGANODE; assert(regarglen[op] == 1); if (PASS2) { regnode *ptr = ret; FILL_ADVANCE_NODE_ARG(ptr, op, arg); RExC_emit = ptr; } return(ret); } STATIC regnode * S_reg2Lanode(pTHX_ RExC_state_t *pRExC_state, const U8 op, const U32 arg1, const I32 arg2) { /* emit a node with U32 and I32 arguments */ regnode * const ret = regnode_guts(pRExC_state, op, regarglen[op], "reg2Lanode"); PERL_ARGS_ASSERT_REG2LANODE; assert(regarglen[op] == 2); if (PASS2) { regnode *ptr = ret; FILL_ADVANCE_NODE_2L_ARG(ptr, op, arg1, arg2); RExC_emit = ptr; } return(ret); } /* - reginsert - insert an operator in front of already-emitted operand * * Means relocating the operand. * * IMPORTANT NOTE - it is the *callers* responsibility to correctly * set up NEXT_OFF() of the inserted node if needed. Something like this: * * reginsert(pRExC, OPFAIL, orig_emit, depth+1); * if (PASS2) * NEXT_OFF(orig_emit) = regarglen[OPFAIL] + NODE_STEP_REGNODE; * * ALSO NOTE - operand->flags will be set to 0 as well. */ STATIC void S_reginsert(pTHX_ RExC_state_t *pRExC_state, U8 op, regnode *operand, U32 depth) { regnode *src; regnode *dst; regnode *place; const int offset = regarglen[(U8)op]; const int size = NODE_STEP_REGNODE + offset; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGINSERT; PERL_UNUSED_CONTEXT; PERL_UNUSED_ARG(depth); /* (PL_regkind[(U8)op] == CURLY ? EXTRA_STEP_2ARGS : 0); */ DEBUG_PARSE_FMT("inst"," - %s",PL_reg_name[op]); if (SIZE_ONLY) { RExC_size += size; return; } assert(!RExC_study_started); /* I believe we should never use reginsert once we have started studying. If this is wrong then we need to adjust RExC_recurse below like we do with RExC_open_parens/RExC_close_parens. */ src = RExC_emit; RExC_emit += size; dst = RExC_emit; if (RExC_open_parens) { int paren; /*DEBUG_PARSE_FMT("inst"," - %" IVdf, (IV)RExC_npar);*/ /* remember that RExC_npar is rex->nparens + 1, * iow it is 1 more than the number of parens seen in * the pattern so far. */ for ( paren=0 ; paren < RExC_npar ; paren++ ) { /* note, RExC_open_parens[0] is the start of the * regex, it can't move. RExC_close_parens[0] is the end * of the regex, it *can* move. */ if ( paren && RExC_open_parens[paren] >= operand ) { /*DEBUG_PARSE_FMT("open"," - %d",size);*/ RExC_open_parens[paren] += size; } else { /*DEBUG_PARSE_FMT("open"," - %s","ok");*/ } if ( RExC_close_parens[paren] >= operand ) { /*DEBUG_PARSE_FMT("close"," - %d",size);*/ RExC_close_parens[paren] += size; } else { /*DEBUG_PARSE_FMT("close"," - %s","ok");*/ } } } if (RExC_end_op) RExC_end_op += size; while (src > operand) { StructCopy(--src, --dst, regnode); #ifdef RE_TRACK_PATTERN_OFFSETS if (RExC_offsets) { /* MJD 20010112 */ MJD_OFFSET_DEBUG( ("%s(%d): (op %s) %s copy %" UVuf " -> %" UVuf " (max %" UVuf ").\n", "reg_insert", __LINE__, PL_reg_name[op], (UV)(dst - RExC_emit_start) > RExC_offsets[0] ? "Overwriting end of array!\n" : "OK", (UV)(src - RExC_emit_start), (UV)(dst - RExC_emit_start), (UV)RExC_offsets[0])); Set_Node_Offset_To_R(dst-RExC_emit_start, Node_Offset(src)); Set_Node_Length_To_R(dst-RExC_emit_start, Node_Length(src)); } #endif } place = operand; /* Op node, where operand used to be. */ #ifdef RE_TRACK_PATTERN_OFFSETS if (RExC_offsets) { /* MJD */ MJD_OFFSET_DEBUG( ("%s(%d): (op %s) %s %" UVuf " <- %" UVuf " (max %" UVuf ").\n", "reginsert", __LINE__, PL_reg_name[op], (UV)(place - RExC_emit_start) > RExC_offsets[0] ? "Overwriting end of array!\n" : "OK", (UV)(place - RExC_emit_start), (UV)(RExC_parse - RExC_start), (UV)RExC_offsets[0])); Set_Node_Offset(place, RExC_parse); Set_Node_Length(place, 1); } #endif src = NEXTOPER(place); place->flags = 0; FILL_ADVANCE_NODE(place, op); Zero(src, offset, regnode); } /* - regtail - set the next-pointer at the end of a node chain of p to val. - SEE ALSO: regtail_study */ STATIC void S_regtail(pTHX_ RExC_state_t * pRExC_state, const regnode * const p, const regnode * const val, const U32 depth) { regnode *scan; GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGTAIL; #ifndef DEBUGGING PERL_UNUSED_ARG(depth); #endif if (SIZE_ONLY) return; /* Find last node. */ scan = (regnode *) p; for (;;) { regnode * const temp = regnext(scan); DEBUG_PARSE_r({ DEBUG_PARSE_MSG((scan==p ? "tail" : "")); regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ %s (%d) %s %s\n", SvPV_nolen_const(RExC_mysv), REG_NODE_NUM(scan), (temp == NULL ? "->" : ""), (temp == NULL ? PL_reg_name[OP(val)] : "") ); }); if (temp == NULL) break; scan = temp; } if (reg_off_by_arg[OP(scan)]) { ARG_SET(scan, val - scan); } else { NEXT_OFF(scan) = val - scan; } } #ifdef DEBUGGING /* - regtail_study - set the next-pointer at the end of a node chain of p to val. - Look for optimizable sequences at the same time. - currently only looks for EXACT chains. This is experimental code. The idea is to use this routine to perform in place optimizations on branches and groups as they are constructed, with the long term intention of removing optimization from study_chunk so that it is purely analytical. Currently only used when in DEBUG mode. The macro REGTAIL_STUDY() is used to control which is which. */ /* TODO: All four parms should be const */ STATIC U8 S_regtail_study(pTHX_ RExC_state_t *pRExC_state, regnode *p, const regnode *val,U32 depth) { regnode *scan; U8 exact = PSEUDO; #ifdef EXPERIMENTAL_INPLACESCAN I32 min = 0; #endif GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGTAIL_STUDY; if (SIZE_ONLY) return exact; /* Find last node. */ scan = p; for (;;) { regnode * const temp = regnext(scan); #ifdef EXPERIMENTAL_INPLACESCAN if (PL_regkind[OP(scan)] == EXACT) { bool unfolded_multi_char; /* Unexamined in this routine */ if (join_exact(pRExC_state, scan, &min, &unfolded_multi_char, 1, val, depth+1)) return EXACT; } #endif if ( exact ) { switch (OP(scan)) { case EXACT: case EXACTL: case EXACTF: case EXACTFA_NO_TRIE: case EXACTFA: case EXACTFU: case EXACTFLU8: case EXACTFU_SS: case EXACTFL: if( exact == PSEUDO ) exact= OP(scan); else if ( exact != OP(scan) ) exact= 0; case NOTHING: break; default: exact= 0; } } DEBUG_PARSE_r({ DEBUG_PARSE_MSG((scan==p ? "tsdy" : "")); regprop(RExC_rx, RExC_mysv, scan, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ %s (%d) -> %s\n", SvPV_nolen_const(RExC_mysv), REG_NODE_NUM(scan), PL_reg_name[exact]); }); if (temp == NULL) break; scan = temp; } DEBUG_PARSE_r({ DEBUG_PARSE_MSG(""); regprop(RExC_rx, RExC_mysv, val, NULL, pRExC_state); Perl_re_printf( aTHX_ "~ attach to %s (%" IVdf ") offset to %" IVdf "\n", SvPV_nolen_const(RExC_mysv), (IV)REG_NODE_NUM(val), (IV)(val - scan) ); }); if (reg_off_by_arg[OP(scan)]) { ARG_SET(scan, val - scan); } else { NEXT_OFF(scan) = val - scan; } return exact; } #endif /* - regdump - dump a regexp onto Perl_debug_log in vaguely comprehensible form */ #ifdef DEBUGGING static void S_regdump_intflags(pTHX_ const char *lead, const U32 flags) { int bit; int set=0; ASSUME(REG_INTFLAGS_NAME_SIZE <= sizeof(flags)*8); for (bit=0; bit<REG_INTFLAGS_NAME_SIZE; bit++) { if (flags & (1<<bit)) { if (!set++ && lead) Perl_re_printf( aTHX_ "%s",lead); Perl_re_printf( aTHX_ "%s ",PL_reg_intflags_name[bit]); } } if (lead) { if (set) Perl_re_printf( aTHX_ "\n"); else Perl_re_printf( aTHX_ "%s[none-set]\n",lead); } } static void S_regdump_extflags(pTHX_ const char *lead, const U32 flags) { int bit; int set=0; regex_charset cs; ASSUME(REG_EXTFLAGS_NAME_SIZE <= sizeof(flags)*8); for (bit=0; bit<REG_EXTFLAGS_NAME_SIZE; bit++) { if (flags & (1<<bit)) { if ((1<<bit) & RXf_PMf_CHARSET) { /* Output separately, below */ continue; } if (!set++ && lead) Perl_re_printf( aTHX_ "%s",lead); Perl_re_printf( aTHX_ "%s ",PL_reg_extflags_name[bit]); } } if ((cs = get_regex_charset(flags)) != REGEX_DEPENDS_CHARSET) { if (!set++ && lead) { Perl_re_printf( aTHX_ "%s",lead); } switch (cs) { case REGEX_UNICODE_CHARSET: Perl_re_printf( aTHX_ "UNICODE"); break; case REGEX_LOCALE_CHARSET: Perl_re_printf( aTHX_ "LOCALE"); break; case REGEX_ASCII_RESTRICTED_CHARSET: Perl_re_printf( aTHX_ "ASCII-RESTRICTED"); break; case REGEX_ASCII_MORE_RESTRICTED_CHARSET: Perl_re_printf( aTHX_ "ASCII-MORE_RESTRICTED"); break; default: Perl_re_printf( aTHX_ "UNKNOWN CHARACTER SET"); break; } } if (lead) { if (set) Perl_re_printf( aTHX_ "\n"); else Perl_re_printf( aTHX_ "%s[none-set]\n",lead); } } #endif void Perl_regdump(pTHX_ const regexp *r) { #ifdef DEBUGGING int i; SV * const sv = sv_newmortal(); SV *dsv= sv_newmortal(); RXi_GET_DECL(r,ri); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGDUMP; (void)dumpuntil(r, ri->program, ri->program + 1, NULL, NULL, sv, 0, 0); /* Header fields of interest. */ for (i = 0; i < 2; i++) { if (r->substrs->data[i].substr) { RE_PV_QUOTED_DECL(s, 0, dsv, SvPVX_const(r->substrs->data[i].substr), RE_SV_DUMPLEN(r->substrs->data[i].substr), PL_dump_re_max_len); Perl_re_printf( aTHX_ "%s %s%s at %" IVdf "..%" UVuf " ", i ? "floating" : "anchored", s, RE_SV_TAIL(r->substrs->data[i].substr), (IV)r->substrs->data[i].min_offset, (UV)r->substrs->data[i].max_offset); } else if (r->substrs->data[i].utf8_substr) { RE_PV_QUOTED_DECL(s, 1, dsv, SvPVX_const(r->substrs->data[i].utf8_substr), RE_SV_DUMPLEN(r->substrs->data[i].utf8_substr), 30); Perl_re_printf( aTHX_ "%s utf8 %s%s at %" IVdf "..%" UVuf " ", i ? "floating" : "anchored", s, RE_SV_TAIL(r->substrs->data[i].utf8_substr), (IV)r->substrs->data[i].min_offset, (UV)r->substrs->data[i].max_offset); } } if (r->check_substr || r->check_utf8) Perl_re_printf( aTHX_ (const char *) ( r->check_substr == r->substrs->data[1].substr && r->check_utf8 == r->substrs->data[1].utf8_substr ? "(checking floating" : "(checking anchored")); if (r->intflags & PREGf_NOSCAN) Perl_re_printf( aTHX_ " noscan"); if (r->extflags & RXf_CHECK_ALL) Perl_re_printf( aTHX_ " isall"); if (r->check_substr || r->check_utf8) Perl_re_printf( aTHX_ ") "); if (ri->regstclass) { regprop(r, sv, ri->regstclass, NULL, NULL); Perl_re_printf( aTHX_ "stclass %s ", SvPVX_const(sv)); } if (r->intflags & PREGf_ANCH) { Perl_re_printf( aTHX_ "anchored"); if (r->intflags & PREGf_ANCH_MBOL) Perl_re_printf( aTHX_ "(MBOL)"); if (r->intflags & PREGf_ANCH_SBOL) Perl_re_printf( aTHX_ "(SBOL)"); if (r->intflags & PREGf_ANCH_GPOS) Perl_re_printf( aTHX_ "(GPOS)"); Perl_re_printf( aTHX_ " "); } if (r->intflags & PREGf_GPOS_SEEN) Perl_re_printf( aTHX_ "GPOS:%" UVuf " ", (UV)r->gofs); if (r->intflags & PREGf_SKIP) Perl_re_printf( aTHX_ "plus "); if (r->intflags & PREGf_IMPLICIT) Perl_re_printf( aTHX_ "implicit "); Perl_re_printf( aTHX_ "minlen %" IVdf " ", (IV)r->minlen); if (r->extflags & RXf_EVAL_SEEN) Perl_re_printf( aTHX_ "with eval "); Perl_re_printf( aTHX_ "\n"); DEBUG_FLAGS_r({ regdump_extflags("r->extflags: ",r->extflags); regdump_intflags("r->intflags: ",r->intflags); }); #else PERL_ARGS_ASSERT_REGDUMP; PERL_UNUSED_CONTEXT; PERL_UNUSED_ARG(r); #endif /* DEBUGGING */ } /* Should be synchronized with ANYOF_ #defines in regcomp.h */ #ifdef DEBUGGING # if _CC_WORDCHAR != 0 || _CC_DIGIT != 1 || _CC_ALPHA != 2 \ || _CC_LOWER != 3 || _CC_UPPER != 4 || _CC_PUNCT != 5 \ || _CC_PRINT != 6 || _CC_ALPHANUMERIC != 7 || _CC_GRAPH != 8 \ || _CC_CASED != 9 || _CC_SPACE != 10 || _CC_BLANK != 11 \ || _CC_XDIGIT != 12 || _CC_CNTRL != 13 || _CC_ASCII != 14 \ || _CC_VERTSPACE != 15 # error Need to adjust order of anyofs[] # endif static const char * const anyofs[] = { "\\w", "\\W", "\\d", "\\D", "[:alpha:]", "[:^alpha:]", "[:lower:]", "[:^lower:]", "[:upper:]", "[:^upper:]", "[:punct:]", "[:^punct:]", "[:print:]", "[:^print:]", "[:alnum:]", "[:^alnum:]", "[:graph:]", "[:^graph:]", "[:cased:]", "[:^cased:]", "\\s", "\\S", "[:blank:]", "[:^blank:]", "[:xdigit:]", "[:^xdigit:]", "[:cntrl:]", "[:^cntrl:]", "[:ascii:]", "[:^ascii:]", "\\v", "\\V" }; #endif /* - regprop - printable representation of opcode, with run time support */ void Perl_regprop(pTHX_ const regexp *prog, SV *sv, const regnode *o, const regmatch_info *reginfo, const RExC_state_t *pRExC_state) { #ifdef DEBUGGING int k; RXi_GET_DECL(prog,progi); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGPROP; SvPVCLEAR(sv); if (OP(o) > REGNODE_MAX) /* regnode.type is unsigned */ /* It would be nice to FAIL() here, but this may be called from regexec.c, and it would be hard to supply pRExC_state. */ Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d", (int)OP(o), (int)REGNODE_MAX); sv_catpv(sv, PL_reg_name[OP(o)]); /* Take off const! */ k = PL_regkind[OP(o)]; if (k == EXACT) { sv_catpvs(sv, " "); /* Using is_utf8_string() (via PERL_PV_UNI_DETECT) * is a crude hack but it may be the best for now since * we have no flag "this EXACTish node was UTF-8" * --jhi */ pv_pretty(sv, STRING(o), STR_LEN(o), PL_dump_re_max_len, PL_colors[0], PL_colors[1], PERL_PV_ESCAPE_UNI_DETECT | PERL_PV_ESCAPE_NONASCII | PERL_PV_PRETTY_ELLIPSES | PERL_PV_PRETTY_LTGT | PERL_PV_PRETTY_NOCLEAR ); } else if (k == TRIE) { /* print the details of the trie in dumpuntil instead, as * progi->data isn't available here */ const char op = OP(o); const U32 n = ARG(o); const reg_ac_data * const ac = IS_TRIE_AC(op) ? (reg_ac_data *)progi->data->data[n] : NULL; const reg_trie_data * const trie = (reg_trie_data*)progi->data->data[!IS_TRIE_AC(op) ? n : ac->trie]; Perl_sv_catpvf(aTHX_ sv, "-%s",PL_reg_name[o->flags]); DEBUG_TRIE_COMPILE_r({ if (trie->jump) sv_catpvs(sv, "(JUMP)"); Perl_sv_catpvf(aTHX_ sv, "<S:%" UVuf "/%" IVdf " W:%" UVuf " L:%" UVuf "/%" UVuf " C:%" UVuf "/%" UVuf ">", (UV)trie->startstate, (IV)trie->statecount-1, /* -1 because of the unused 0 element */ (UV)trie->wordcount, (UV)trie->minlen, (UV)trie->maxlen, (UV)TRIE_CHARCOUNT(trie), (UV)trie->uniquecharcount ); }); if ( IS_ANYOF_TRIE(op) || trie->bitmap ) { sv_catpvs(sv, "["); (void) put_charclass_bitmap_innards(sv, ((IS_ANYOF_TRIE(op)) ? ANYOF_BITMAP(o) : TRIE_BITMAP(trie)), NULL, NULL, NULL, FALSE ); sv_catpvs(sv, "]"); } } else if (k == CURLY) { U32 lo = ARG1(o), hi = ARG2(o); if (OP(o) == CURLYM || OP(o) == CURLYN || OP(o) == CURLYX) Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags); /* Parenth number */ Perl_sv_catpvf(aTHX_ sv, "{%u,", (unsigned) lo); if (hi == REG_INFTY) sv_catpvs(sv, "INFTY"); else Perl_sv_catpvf(aTHX_ sv, "%u", (unsigned) hi); sv_catpvs(sv, "}"); } else if (k == WHILEM && o->flags) /* Ordinal/of */ Perl_sv_catpvf(aTHX_ sv, "[%d/%d]", o->flags & 0xf, o->flags>>4); else if (k == REF || k == OPEN || k == CLOSE || k == GROUPP || OP(o)==ACCEPT) { AV *name_list= NULL; U32 parno= OP(o) == ACCEPT ? (U32)ARG2L(o) : ARG(o); Perl_sv_catpvf(aTHX_ sv, "%" UVuf, (UV)parno); /* Parenth number */ if ( RXp_PAREN_NAMES(prog) ) { name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]); } else if ( pRExC_state ) { name_list= RExC_paren_name_list; } if (name_list) { if ( k != REF || (OP(o) < NREF)) { SV **name= av_fetch(name_list, parno, 0 ); if (name) Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name)); } else { SV *sv_dat= MUTABLE_SV(progi->data->data[ parno ]); I32 *nums=(I32*)SvPVX(sv_dat); SV **name= av_fetch(name_list, nums[0], 0 ); I32 n; if (name) { for ( n=0; n<SvIVX(sv_dat); n++ ) { Perl_sv_catpvf(aTHX_ sv, "%s%" IVdf, (n ? "," : ""), (IV)nums[n]); } Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name)); } } } if ( k == REF && reginfo) { U32 n = ARG(o); /* which paren pair */ I32 ln = prog->offs[n].start; if (prog->lastparen < n || ln == -1) Perl_sv_catpvf(aTHX_ sv, ": FAIL"); else if (ln == prog->offs[n].end) Perl_sv_catpvf(aTHX_ sv, ": ACCEPT - EMPTY STRING"); else { const char *s = reginfo->strbeg + ln; Perl_sv_catpvf(aTHX_ sv, ": "); Perl_pv_pretty( aTHX_ sv, s, prog->offs[n].end - prog->offs[n].start, 32, 0, 0, PERL_PV_ESCAPE_UNI_DETECT|PERL_PV_PRETTY_NOCLEAR|PERL_PV_PRETTY_ELLIPSES|PERL_PV_PRETTY_QUOTE ); } } } else if (k == GOSUB) { AV *name_list= NULL; if ( RXp_PAREN_NAMES(prog) ) { name_list= MUTABLE_AV(progi->data->data[progi->name_list_idx]); } else if ( pRExC_state ) { name_list= RExC_paren_name_list; } /* Paren and offset */ Perl_sv_catpvf(aTHX_ sv, "%d[%+d:%d]", (int)ARG(o),(int)ARG2L(o), (int)((o + (int)ARG2L(o)) - progi->program) ); if (name_list) { SV **name= av_fetch(name_list, ARG(o), 0 ); if (name) Perl_sv_catpvf(aTHX_ sv, " '%" SVf "'", SVfARG(*name)); } } else if (k == LOGICAL) /* 2: embedded, otherwise 1 */ Perl_sv_catpvf(aTHX_ sv, "[%d]", o->flags); else if (k == ANYOF) { const U8 flags = ANYOF_FLAGS(o); bool do_sep = FALSE; /* Do we need to separate various components of the output? */ /* Set if there is still an unresolved user-defined property */ SV *unresolved = NULL; /* Things that are ignored except when the runtime locale is UTF-8 */ SV *only_utf8_locale_invlist = NULL; /* Code points that don't fit in the bitmap */ SV *nonbitmap_invlist = NULL; /* And things that aren't in the bitmap, but are small enough to be */ SV* bitmap_range_not_in_bitmap = NULL; const bool inverted = flags & ANYOF_INVERT; if (OP(o) == ANYOFL) { if (ANYOFL_UTF8_LOCALE_REQD(flags)) { sv_catpvs(sv, "{utf8-locale-reqd}"); } if (flags & ANYOFL_FOLD) { sv_catpvs(sv, "{i}"); } } /* If there is stuff outside the bitmap, get it */ if (ARG(o) != ANYOF_ONLY_HAS_BITMAP) { (void) _get_regclass_nonbitmap_data(prog, o, FALSE, &unresolved, &only_utf8_locale_invlist, &nonbitmap_invlist); /* The non-bitmap data may contain stuff that could fit in the * bitmap. This could come from a user-defined property being * finally resolved when this call was done; or much more likely * because there are matches that require UTF-8 to be valid, and so * aren't in the bitmap. This is teased apart later */ _invlist_intersection(nonbitmap_invlist, PL_InBitmap, &bitmap_range_not_in_bitmap); /* Leave just the things that don't fit into the bitmap */ _invlist_subtract(nonbitmap_invlist, PL_InBitmap, &nonbitmap_invlist); } /* Obey this flag to add all above-the-bitmap code points */ if (flags & ANYOF_MATCHES_ALL_ABOVE_BITMAP) { nonbitmap_invlist = _add_range_to_invlist(nonbitmap_invlist, NUM_ANYOF_CODE_POINTS, UV_MAX); } /* Ready to start outputting. First, the initial left bracket */ Perl_sv_catpvf(aTHX_ sv, "[%s", PL_colors[0]); /* Then all the things that could fit in the bitmap */ do_sep = put_charclass_bitmap_innards(sv, ANYOF_BITMAP(o), bitmap_range_not_in_bitmap, only_utf8_locale_invlist, o, /* Can't try inverting for a * better display if there are * things that haven't been * resolved */ unresolved != NULL); SvREFCNT_dec(bitmap_range_not_in_bitmap); /* If there are user-defined properties which haven't been defined yet, * output them. If the result is not to be inverted, it is clearest to * output them in a separate [] from the bitmap range stuff. If the * result is to be complemented, we have to show everything in one [], * as the inversion applies to the whole thing. Use {braces} to * separate them from anything in the bitmap and anything above the * bitmap. */ if (unresolved) { if (inverted) { if (! do_sep) { /* If didn't output anything in the bitmap */ sv_catpvs(sv, "^"); } sv_catpvs(sv, "{"); } else if (do_sep) { Perl_sv_catpvf(aTHX_ sv,"%s][%s",PL_colors[1],PL_colors[0]); } sv_catsv(sv, unresolved); if (inverted) { sv_catpvs(sv, "}"); } do_sep = ! inverted; } /* And, finally, add the above-the-bitmap stuff */ if (nonbitmap_invlist && _invlist_len(nonbitmap_invlist)) { SV* contents; /* See if truncation size is overridden */ const STRLEN dump_len = (PL_dump_re_max_len > 256) ? PL_dump_re_max_len : 256; /* This is output in a separate [] */ if (do_sep) { Perl_sv_catpvf(aTHX_ sv,"%s][%s",PL_colors[1],PL_colors[0]); } /* And, for easy of understanding, it is shown in the * uncomplemented form if possible. The one exception being if * there are unresolved items, where the inversion has to be * delayed until runtime */ if (inverted && ! unresolved) { _invlist_invert(nonbitmap_invlist); _invlist_subtract(nonbitmap_invlist, PL_InBitmap, &nonbitmap_invlist); } contents = invlist_contents(nonbitmap_invlist, FALSE /* output suitable for catsv */ ); /* If the output is shorter than the permissible maximum, just do it. */ if (SvCUR(contents) <= dump_len) { sv_catsv(sv, contents); } else { const char * contents_string = SvPVX(contents); STRLEN i = dump_len; /* Otherwise, start at the permissible max and work back to the * first break possibility */ while (i > 0 && contents_string[i] != ' ') { i--; } if (i == 0) { /* Fail-safe. Use the max if we couldn't find a legal break */ i = dump_len; } sv_catpvn(sv, contents_string, i); sv_catpvs(sv, "..."); } SvREFCNT_dec_NN(contents); SvREFCNT_dec_NN(nonbitmap_invlist); } /* And finally the matching, closing ']' */ Perl_sv_catpvf(aTHX_ sv, "%s]", PL_colors[1]); SvREFCNT_dec(unresolved); } else if (k == POSIXD || k == NPOSIXD) { U8 index = FLAGS(o) * 2; if (index < C_ARRAY_LENGTH(anyofs)) { if (*anyofs[index] != '[') { sv_catpv(sv, "["); } sv_catpv(sv, anyofs[index]); if (*anyofs[index] != '[') { sv_catpv(sv, "]"); } } else { Perl_sv_catpvf(aTHX_ sv, "[illegal type=%d])", index); } } else if (k == BOUND || k == NBOUND) { /* Must be synced with order of 'bound_type' in regcomp.h */ const char * const bounds[] = { "", /* Traditional */ "{gcb}", "{lb}", "{sb}", "{wb}" }; assert(FLAGS(o) < C_ARRAY_LENGTH(bounds)); sv_catpv(sv, bounds[FLAGS(o)]); } else if (k == BRANCHJ && (OP(o) == UNLESSM || OP(o) == IFMATCH)) Perl_sv_catpvf(aTHX_ sv, "[%d]", -(o->flags)); else if (OP(o) == SBOL) Perl_sv_catpvf(aTHX_ sv, " /%s/", o->flags ? "\\A" : "^"); /* add on the verb argument if there is one */ if ( ( k == VERB || OP(o) == ACCEPT || OP(o) == OPFAIL ) && o->flags) { if ( ARG(o) ) Perl_sv_catpvf(aTHX_ sv, ":%" SVf, SVfARG((MUTABLE_SV(progi->data->data[ ARG( o ) ])))); else sv_catpvs(sv, ":NULL"); } #else PERL_UNUSED_CONTEXT; PERL_UNUSED_ARG(sv); PERL_UNUSED_ARG(o); PERL_UNUSED_ARG(prog); PERL_UNUSED_ARG(reginfo); PERL_UNUSED_ARG(pRExC_state); #endif /* DEBUGGING */ } SV * Perl_re_intuit_string(pTHX_ REGEXP * const r) { /* Assume that RE_INTUIT is set */ struct regexp *const prog = ReANY(r); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_RE_INTUIT_STRING; PERL_UNUSED_CONTEXT; DEBUG_COMPILE_r( { const char * const s = SvPV_nolen_const(RX_UTF8(r) ? prog->check_utf8 : prog->check_substr); if (!PL_colorset) reginitcolors(); Perl_re_printf( aTHX_ "%sUsing REx %ssubstr:%s \"%s%.60s%s%s\"\n", PL_colors[4], RX_UTF8(r) ? "utf8 " : "", PL_colors[5],PL_colors[0], s, PL_colors[1], (strlen(s) > PL_dump_re_max_len ? "..." : "")); } ); /* use UTF8 check substring if regexp pattern itself is in UTF8 */ return RX_UTF8(r) ? prog->check_utf8 : prog->check_substr; } /* pregfree() handles refcounting and freeing the perl core regexp structure. When it is necessary to actually free the structure the first thing it does is call the 'free' method of the regexp_engine associated to the regexp, allowing the handling of the void *pprivate; member first. (This routine is not overridable by extensions, which is why the extensions free is called first.) See regdupe and regdupe_internal if you change anything here. */ #ifndef PERL_IN_XSUB_RE void Perl_pregfree(pTHX_ REGEXP *r) { SvREFCNT_dec(r); } void Perl_pregfree2(pTHX_ REGEXP *rx) { struct regexp *const r = ReANY(rx); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_PREGFREE2; if (r->mother_re) { ReREFCNT_dec(r->mother_re); } else { CALLREGFREE_PVT(rx); /* free the private data */ SvREFCNT_dec(RXp_PAREN_NAMES(r)); } if (r->substrs) { int i; for (i = 0; i < 2; i++) { SvREFCNT_dec(r->substrs->data[i].substr); SvREFCNT_dec(r->substrs->data[i].utf8_substr); } Safefree(r->substrs); } RX_MATCH_COPY_FREE(rx); #ifdef PERL_ANY_COW SvREFCNT_dec(r->saved_copy); #endif Safefree(r->offs); SvREFCNT_dec(r->qr_anoncv); if (r->recurse_locinput) Safefree(r->recurse_locinput); } /* reg_temp_copy() Copy ssv to dsv, both of which should of type SVt_REGEXP or SVt_PVLV, except that dsv will be created if NULL. This function is used in two main ways. First to implement $r = qr/....; $s = $$r; Secondly, it is used as a hacky workaround to the structural issue of match results being stored in the regexp structure which is in turn stored in PL_curpm/PL_reg_curpm. The problem is that due to qr// the pattern could be PL_curpm in multiple contexts, and could require multiple result sets being associated with the pattern simultaneously, such as when doing a recursive match with (??{$qr}) The solution is to make a lightweight copy of the regexp structure when a qr// is returned from the code executed by (??{$qr}) this lightweight copy doesn't actually own any of its data except for the starp/end and the actual regexp structure itself. */ REGEXP * Perl_reg_temp_copy(pTHX_ REGEXP *dsv, REGEXP *ssv) { struct regexp *drx; struct regexp *const srx = ReANY(ssv); const bool islv = dsv && SvTYPE(dsv) == SVt_PVLV; PERL_ARGS_ASSERT_REG_TEMP_COPY; if (!dsv) dsv = (REGEXP*) newSV_type(SVt_REGEXP); else { SvOK_off((SV *)dsv); if (islv) { /* For PVLVs, the head (sv_any) points to an XPVLV, while * the LV's xpvlenu_rx will point to a regexp body, which * we allocate here */ REGEXP *temp = (REGEXP *)newSV_type(SVt_REGEXP); assert(!SvPVX(dsv)); ((XPV*)SvANY(dsv))->xpv_len_u.xpvlenu_rx = temp->sv_any; temp->sv_any = NULL; SvFLAGS(temp) = (SvFLAGS(temp) & ~SVTYPEMASK) | SVt_NULL; SvREFCNT_dec_NN(temp); /* SvCUR still resides in the xpvlv struct, so the regexp copy- ing below will not set it. */ SvCUR_set(dsv, SvCUR(ssv)); } } /* This ensures that SvTHINKFIRST(sv) is true, and hence that sv_force_normal(sv) is called. */ SvFAKE_on(dsv); drx = ReANY(dsv); SvFLAGS(dsv) |= SvFLAGS(ssv) & (SVf_POK|SVp_POK|SVf_UTF8); SvPV_set(dsv, RX_WRAPPED(ssv)); /* We share the same string buffer as the original regexp, on which we hold a reference count, incremented when mother_re is set below. The string pointer is copied here, being part of the regexp struct. */ memcpy(&(drx->xpv_cur), &(srx->xpv_cur), sizeof(regexp) - STRUCT_OFFSET(regexp, xpv_cur)); if (!islv) SvLEN_set(dsv, 0); if (srx->offs) { const I32 npar = srx->nparens+1; Newx(drx->offs, npar, regexp_paren_pair); Copy(srx->offs, drx->offs, npar, regexp_paren_pair); } if (srx->substrs) { int i; Newx(drx->substrs, 1, struct reg_substr_data); StructCopy(srx->substrs, drx->substrs, struct reg_substr_data); for (i = 0; i < 2; i++) { SvREFCNT_inc_void(drx->substrs->data[i].substr); SvREFCNT_inc_void(drx->substrs->data[i].utf8_substr); } /* check_substr and check_utf8, if non-NULL, point to either their anchored or float namesakes, and don't hold a second reference. */ } RX_MATCH_COPIED_off(dsv); #ifdef PERL_ANY_COW drx->saved_copy = NULL; #endif drx->mother_re = ReREFCNT_inc(srx->mother_re ? srx->mother_re : ssv); SvREFCNT_inc_void(drx->qr_anoncv); if (srx->recurse_locinput) Newxz(drx->recurse_locinput,srx->nparens + 1,char *); return dsv; } #endif /* regfree_internal() Free the private data in a regexp. This is overloadable by extensions. Perl takes care of the regexp structure in pregfree(), this covers the *pprivate pointer which technically perl doesn't know about, however of course we have to handle the regexp_internal structure when no extension is in use. Note this is called before freeing anything in the regexp structure. */ void Perl_regfree_internal(pTHX_ REGEXP * const rx) { struct regexp *const r = ReANY(rx); RXi_GET_DECL(r,ri); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_REGFREE_INTERNAL; DEBUG_COMPILE_r({ if (!PL_colorset) reginitcolors(); { SV *dsv= sv_newmortal(); RE_PV_QUOTED_DECL(s, RX_UTF8(rx), dsv, RX_PRECOMP(rx), RX_PRELEN(rx), PL_dump_re_max_len); Perl_re_printf( aTHX_ "%sFreeing REx:%s %s\n", PL_colors[4],PL_colors[5],s); } }); #ifdef RE_TRACK_PATTERN_OFFSETS if (ri->u.offsets) Safefree(ri->u.offsets); /* 20010421 MJD */ #endif if (ri->code_blocks) S_free_codeblocks(aTHX_ ri->code_blocks); if (ri->data) { int n = ri->data->count; while (--n >= 0) { /* If you add a ->what type here, update the comment in regcomp.h */ switch (ri->data->what[n]) { case 'a': case 'r': case 's': case 'S': case 'u': SvREFCNT_dec(MUTABLE_SV(ri->data->data[n])); break; case 'f': Safefree(ri->data->data[n]); break; case 'l': case 'L': break; case 'T': { /* Aho Corasick add-on structure for a trie node. Used in stclass optimization only */ U32 refcount; reg_ac_data *aho=(reg_ac_data*)ri->data->data[n]; #ifdef USE_ITHREADS dVAR; #endif OP_REFCNT_LOCK; refcount = --aho->refcount; OP_REFCNT_UNLOCK; if ( !refcount ) { PerlMemShared_free(aho->states); PerlMemShared_free(aho->fail); /* do this last!!!! */ PerlMemShared_free(ri->data->data[n]); /* we should only ever get called once, so * assert as much, and also guard the free * which /might/ happen twice. At the least * it will make code anlyzers happy and it * doesn't cost much. - Yves */ assert(ri->regstclass); if (ri->regstclass) { PerlMemShared_free(ri->regstclass); ri->regstclass = 0; } } } break; case 't': { /* trie structure. */ U32 refcount; reg_trie_data *trie=(reg_trie_data*)ri->data->data[n]; #ifdef USE_ITHREADS dVAR; #endif OP_REFCNT_LOCK; refcount = --trie->refcount; OP_REFCNT_UNLOCK; if ( !refcount ) { PerlMemShared_free(trie->charmap); PerlMemShared_free(trie->states); PerlMemShared_free(trie->trans); if (trie->bitmap) PerlMemShared_free(trie->bitmap); if (trie->jump) PerlMemShared_free(trie->jump); PerlMemShared_free(trie->wordinfo); /* do this last!!!! */ PerlMemShared_free(ri->data->data[n]); } } break; default: Perl_croak(aTHX_ "panic: regfree data code '%c'", ri->data->what[n]); } } Safefree(ri->data->what); Safefree(ri->data); } Safefree(ri); } #define av_dup_inc(s,t) MUTABLE_AV(sv_dup_inc((const SV *)s,t)) #define hv_dup_inc(s,t) MUTABLE_HV(sv_dup_inc((const SV *)s,t)) #define SAVEPVN(p,n) ((p) ? savepvn(p,n) : NULL) /* re_dup_guts - duplicate a regexp. This routine is expected to clone a given regexp structure. It is only compiled under USE_ITHREADS. After all of the core data stored in struct regexp is duplicated the regexp_engine.dupe method is used to copy any private data stored in the *pprivate pointer. This allows extensions to handle any duplication it needs to do. See pregfree() and regfree_internal() if you change anything here. */ #if defined(USE_ITHREADS) #ifndef PERL_IN_XSUB_RE void Perl_re_dup_guts(pTHX_ const REGEXP *sstr, REGEXP *dstr, CLONE_PARAMS *param) { dVAR; I32 npar; const struct regexp *r = ReANY(sstr); struct regexp *ret = ReANY(dstr); PERL_ARGS_ASSERT_RE_DUP_GUTS; npar = r->nparens+1; Newx(ret->offs, npar, regexp_paren_pair); Copy(r->offs, ret->offs, npar, regexp_paren_pair); if (ret->substrs) { /* Do it this way to avoid reading from *r after the StructCopy(). That way, if any of the sv_dup_inc()s dislodge *r from the L1 cache, it doesn't matter. */ int i; const bool anchored = r->check_substr ? r->check_substr == r->substrs->data[0].substr : r->check_utf8 == r->substrs->data[0].utf8_substr; Newx(ret->substrs, 1, struct reg_substr_data); StructCopy(r->substrs, ret->substrs, struct reg_substr_data); for (i = 0; i < 2; i++) { ret->substrs->data[i].substr = sv_dup_inc(ret->substrs->data[i].substr, param); ret->substrs->data[i].utf8_substr = sv_dup_inc(ret->substrs->data[i].utf8_substr, param); } /* check_substr and check_utf8, if non-NULL, point to either their anchored or float namesakes, and don't hold a second reference. */ if (ret->check_substr) { if (anchored) { assert(r->check_utf8 == r->substrs->data[0].utf8_substr); ret->check_substr = ret->substrs->data[0].substr; ret->check_utf8 = ret->substrs->data[0].utf8_substr; } else { assert(r->check_substr == r->substrs->data[1].substr); assert(r->check_utf8 == r->substrs->data[1].utf8_substr); ret->check_substr = ret->substrs->data[1].substr; ret->check_utf8 = ret->substrs->data[1].utf8_substr; } } else if (ret->check_utf8) { if (anchored) { ret->check_utf8 = ret->substrs->data[0].utf8_substr; } else { ret->check_utf8 = ret->substrs->data[1].utf8_substr; } } } RXp_PAREN_NAMES(ret) = hv_dup_inc(RXp_PAREN_NAMES(ret), param); ret->qr_anoncv = MUTABLE_CV(sv_dup_inc((const SV *)ret->qr_anoncv, param)); if (r->recurse_locinput) Newxz(ret->recurse_locinput,r->nparens + 1,char *); if (ret->pprivate) RXi_SET(ret,CALLREGDUPE_PVT(dstr,param)); if (RX_MATCH_COPIED(dstr)) ret->subbeg = SAVEPVN(ret->subbeg, ret->sublen); else ret->subbeg = NULL; #ifdef PERL_ANY_COW ret->saved_copy = NULL; #endif /* Whether mother_re be set or no, we need to copy the string. We cannot refrain from copying it when the storage points directly to our mother regexp, because that's 1: a buffer in a different thread 2: something we no longer hold a reference on so we need to copy it locally. */ RX_WRAPPED(dstr) = SAVEPVN(RX_WRAPPED_const(sstr), SvCUR(sstr)+1); ret->mother_re = NULL; } #endif /* PERL_IN_XSUB_RE */ /* regdupe_internal() This is the internal complement to regdupe() which is used to copy the structure pointed to by the *pprivate pointer in the regexp. This is the core version of the extension overridable cloning hook. The regexp structure being duplicated will be copied by perl prior to this and will be provided as the regexp *r argument, however with the /old/ structures pprivate pointer value. Thus this routine may override any copying normally done by perl. It returns a pointer to the new regexp_internal structure. */ void * Perl_regdupe_internal(pTHX_ REGEXP * const rx, CLONE_PARAMS *param) { dVAR; struct regexp *const r = ReANY(rx); regexp_internal *reti; int len; RXi_GET_DECL(r,ri); PERL_ARGS_ASSERT_REGDUPE_INTERNAL; len = ProgLen(ri); Newxc(reti, sizeof(regexp_internal) + len*sizeof(regnode), char, regexp_internal); Copy(ri->program, reti->program, len+1, regnode); if (ri->code_blocks) { int n; Newx(reti->code_blocks, 1, struct reg_code_blocks); Newx(reti->code_blocks->cb, ri->code_blocks->count, struct reg_code_block); Copy(ri->code_blocks->cb, reti->code_blocks->cb, ri->code_blocks->count, struct reg_code_block); for (n = 0; n < ri->code_blocks->count; n++) reti->code_blocks->cb[n].src_regex = (REGEXP*) sv_dup_inc((SV*)(ri->code_blocks->cb[n].src_regex), param); reti->code_blocks->count = ri->code_blocks->count; reti->code_blocks->refcnt = 1; } else reti->code_blocks = NULL; reti->regstclass = NULL; if (ri->data) { struct reg_data *d; const int count = ri->data->count; int i; Newxc(d, sizeof(struct reg_data) + count*sizeof(void *), char, struct reg_data); Newx(d->what, count, U8); d->count = count; for (i = 0; i < count; i++) { d->what[i] = ri->data->what[i]; switch (d->what[i]) { /* see also regcomp.h and regfree_internal() */ case 'a': /* actually an AV, but the dup function is identical. values seem to be "plain sv's" generally. */ case 'r': /* a compiled regex (but still just another SV) */ case 's': /* an RV (currently only used for an RV to an AV by the ANYOF code) this use case should go away, the code could have used 'a' instead - see S_set_ANYOF_arg() for array contents. */ case 'S': /* actually an SV, but the dup function is identical. */ case 'u': /* actually an HV, but the dup function is identical. values are "plain sv's" */ d->data[i] = sv_dup_inc((const SV *)ri->data->data[i], param); break; case 'f': /* Synthetic Start Class - "Fake" charclass we generate to optimize * patterns which could start with several different things. Pre-TRIE * this was more important than it is now, however this still helps * in some places, for instance /x?a+/ might produce a SSC equivalent * to [xa]. This is used by Perl_re_intuit_start() and S_find_byclass() * in regexec.c */ /* This is cheating. */ Newx(d->data[i], 1, regnode_ssc); StructCopy(ri->data->data[i], d->data[i], regnode_ssc); reti->regstclass = (regnode*)d->data[i]; break; case 'T': /* AHO-CORASICK fail table */ /* Trie stclasses are readonly and can thus be shared * without duplication. We free the stclass in pregfree * when the corresponding reg_ac_data struct is freed. */ reti->regstclass= ri->regstclass; /* FALLTHROUGH */ case 't': /* TRIE transition table */ OP_REFCNT_LOCK; ((reg_trie_data*)ri->data->data[i])->refcount++; OP_REFCNT_UNLOCK; /* FALLTHROUGH */ case 'l': /* (?{...}) or (??{ ... }) code (cb->block) */ case 'L': /* same when RExC_pm_flags & PMf_HAS_CV and code is not from another regexp */ d->data[i] = ri->data->data[i]; break; default: Perl_croak(aTHX_ "panic: re_dup_guts unknown data code '%c'", ri->data->what[i]); } } reti->data = d; } else reti->data = NULL; reti->name_list_idx = ri->name_list_idx; #ifdef RE_TRACK_PATTERN_OFFSETS if (ri->u.offsets) { Newx(reti->u.offsets, 2*len+1, U32); Copy(ri->u.offsets, reti->u.offsets, 2*len+1, U32); } #else SetProgLen(reti,len); #endif return (void*)reti; } #endif /* USE_ITHREADS */ #ifndef PERL_IN_XSUB_RE /* - regnext - dig the "next" pointer out of a node */ regnode * Perl_regnext(pTHX_ regnode *p) { I32 offset; if (!p) return(NULL); if (OP(p) > REGNODE_MAX) { /* regnode.type is unsigned */ Perl_croak(aTHX_ "Corrupted regexp opcode %d > %d", (int)OP(p), (int)REGNODE_MAX); } offset = (reg_off_by_arg[OP(p)] ? ARG(p) : NEXT_OFF(p)); if (offset == 0) return(NULL); return(p+offset); } #endif STATIC void S_re_croak2(pTHX_ bool utf8, const char* pat1,const char* pat2,...) { va_list args; STRLEN l1 = strlen(pat1); STRLEN l2 = strlen(pat2); char buf[512]; SV *msv; const char *message; PERL_ARGS_ASSERT_RE_CROAK2; if (l1 > 510) l1 = 510; if (l1 + l2 > 510) l2 = 510 - l1; Copy(pat1, buf, l1 , char); Copy(pat2, buf + l1, l2 , char); buf[l1 + l2] = '\n'; buf[l1 + l2 + 1] = '\0'; va_start(args, pat2); msv = vmess(buf, &args); va_end(args); message = SvPV_const(msv,l1); if (l1 > 512) l1 = 512; Copy(message, buf, l1 , char); /* l1-1 to avoid \n */ Perl_croak(aTHX_ "%" UTF8f, UTF8fARG(utf8, l1-1, buf)); } /* XXX Here's a total kludge. But we need to re-enter for swash routines. */ #ifndef PERL_IN_XSUB_RE void Perl_save_re_context(pTHX) { I32 nparens = -1; I32 i; /* Save $1..$n (#18107: UTF-8 s/(\w+)/uc($1)/e); AMS 20021106. */ if (PL_curpm) { const REGEXP * const rx = PM_GETRE(PL_curpm); if (rx) nparens = RX_NPARENS(rx); } /* RT #124109. This is a complete hack; in the SWASHNEW case we know * that PL_curpm will be null, but that utf8.pm and the modules it * loads will only use $1..$3. * The t/porting/re_context.t test file checks this assumption. */ if (nparens == -1) nparens = 3; for (i = 1; i <= nparens; i++) { char digits[TYPE_CHARS(long)]; const STRLEN len = my_snprintf(digits, sizeof(digits), "%lu", (long)i); GV *const *const gvp = (GV**)hv_fetch(PL_defstash, digits, len, 0); if (gvp) { GV * const gv = *gvp; if (SvTYPE(gv) == SVt_PVGV && GvSV(gv)) save_scalar(gv); } } } #endif #ifdef DEBUGGING STATIC void S_put_code_point(pTHX_ SV *sv, UV c) { PERL_ARGS_ASSERT_PUT_CODE_POINT; if (c > 255) { Perl_sv_catpvf(aTHX_ sv, "\\x{%04" UVXf "}", c); } else if (isPRINT(c)) { const char string = (char) c; /* We use {phrase} as metanotation in the class, so also escape literal * braces */ if (isBACKSLASHED_PUNCT(c) || c == '{' || c == '}') sv_catpvs(sv, "\\"); sv_catpvn(sv, &string, 1); } else if (isMNEMONIC_CNTRL(c)) { Perl_sv_catpvf(aTHX_ sv, "%s", cntrl_to_mnemonic((U8) c)); } else { Perl_sv_catpvf(aTHX_ sv, "\\x%02X", (U8) c); } } #define MAX_PRINT_A MAX_PRINT_A_FOR_USE_ONLY_BY_REGCOMP_DOT_C STATIC void S_put_range(pTHX_ SV *sv, UV start, const UV end, const bool allow_literals) { /* Appends to 'sv' a displayable version of the range of code points from * 'start' to 'end'. Mnemonics (like '\r') are used for the few controls * that have them, when they occur at the beginning or end of the range. * It uses hex to output the remaining code points, unless 'allow_literals' * is true, in which case the printable ASCII ones are output as-is (though * some of these will be escaped by put_code_point()). * * NOTE: This is designed only for printing ranges of code points that fit * inside an ANYOF bitmap. Higher code points are simply suppressed */ const unsigned int min_range_count = 3; assert(start <= end); PERL_ARGS_ASSERT_PUT_RANGE; while (start <= end) { UV this_end; const char * format; if (end - start < min_range_count) { /* Output chars individually when they occur in short ranges */ for (; start <= end; start++) { put_code_point(sv, start); } break; } /* If permitted by the input options, and there is a possibility that * this range contains a printable literal, look to see if there is * one. */ if (allow_literals && start <= MAX_PRINT_A) { /* If the character at the beginning of the range isn't an ASCII * printable, effectively split the range into two parts: * 1) the portion before the first such printable, * 2) the rest * and output them separately. */ if (! isPRINT_A(start)) { UV temp_end = start + 1; /* There is no point looking beyond the final possible * printable, in MAX_PRINT_A */ UV max = MIN(end, MAX_PRINT_A); while (temp_end <= max && ! isPRINT_A(temp_end)) { temp_end++; } /* Here, temp_end points to one beyond the first printable if * found, or to one beyond 'max' if not. If none found, make * sure that we use the entire range */ if (temp_end > MAX_PRINT_A) { temp_end = end + 1; } /* Output the first part of the split range: the part that * doesn't have printables, with the parameter set to not look * for literals (otherwise we would infinitely recurse) */ put_range(sv, start, temp_end - 1, FALSE); /* The 2nd part of the range (if any) starts here. */ start = temp_end; /* We do a continue, instead of dropping down, because even if * the 2nd part is non-empty, it could be so short that we want * to output it as individual characters, as tested for at the * top of this loop. */ continue; } /* Here, 'start' is a printable ASCII. If it is an alphanumeric, * output a sub-range of just the digits or letters, then process * the remaining portion as usual. */ if (isALPHANUMERIC_A(start)) { UV mask = (isDIGIT_A(start)) ? _CC_DIGIT : isUPPER_A(start) ? _CC_UPPER : _CC_LOWER; UV temp_end = start + 1; /* Find the end of the sub-range that includes just the * characters in the same class as the first character in it */ while (temp_end <= end && _generic_isCC_A(temp_end, mask)) { temp_end++; } temp_end--; /* For short ranges, don't duplicate the code above to output * them; just call recursively */ if (temp_end - start < min_range_count) { put_range(sv, start, temp_end, FALSE); } else { /* Output as a range */ put_code_point(sv, start); sv_catpvs(sv, "-"); put_code_point(sv, temp_end); } start = temp_end + 1; continue; } /* We output any other printables as individual characters */ if (isPUNCT_A(start) || isSPACE_A(start)) { while (start <= end && (isPUNCT_A(start) || isSPACE_A(start))) { put_code_point(sv, start); start++; } continue; } } /* End of looking for literals */ /* Here is not to output as a literal. Some control characters have * mnemonic names. Split off any of those at the beginning and end of * the range to print mnemonically. It isn't possible for many of * these to be in a row, so this won't overwhelm with output */ if ( start <= end && (isMNEMONIC_CNTRL(start) || isMNEMONIC_CNTRL(end))) { while (isMNEMONIC_CNTRL(start) && start <= end) { put_code_point(sv, start); start++; } /* If this didn't take care of the whole range ... */ if (start <= end) { /* Look backwards from the end to find the final non-mnemonic * */ UV temp_end = end; while (isMNEMONIC_CNTRL(temp_end)) { temp_end--; } /* And separately output the interior range that doesn't start * or end with mnemonics */ put_range(sv, start, temp_end, FALSE); /* Then output the mnemonic trailing controls */ start = temp_end + 1; while (start <= end) { put_code_point(sv, start); start++; } break; } } /* As a final resort, output the range or subrange as hex. */ this_end = (end < NUM_ANYOF_CODE_POINTS) ? end : NUM_ANYOF_CODE_POINTS - 1; #if NUM_ANYOF_CODE_POINTS > 256 format = (this_end < 256) ? "\\x%02" UVXf "-\\x%02" UVXf : "\\x{%04" UVXf "}-\\x{%04" UVXf "}"; #else format = "\\x%02" UVXf "-\\x%02" UVXf; #endif GCC_DIAG_IGNORE(-Wformat-nonliteral); Perl_sv_catpvf(aTHX_ sv, format, start, this_end); GCC_DIAG_RESTORE; break; } } STATIC void S_put_charclass_bitmap_innards_invlist(pTHX_ SV *sv, SV* invlist) { /* Concatenate onto the PV in 'sv' a displayable form of the inversion list * 'invlist' */ UV start, end; bool allow_literals = TRUE; PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_INVLIST; /* Generally, it is more readable if printable characters are output as * literals, but if a range (nearly) spans all of them, it's best to output * it as a single range. This code will use a single range if all but 2 * ASCII printables are in it */ invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { /* If the range starts beyond the final printable, it doesn't have any * in it */ if (start > MAX_PRINT_A) { break; } /* In both ASCII and EBCDIC, a SPACE is the lowest printable. To span * all but two, the range must start and end no later than 2 from * either end */ if (start < ' ' + 2 && end > MAX_PRINT_A - 2) { if (end > MAX_PRINT_A) { end = MAX_PRINT_A; } if (start < ' ') { start = ' '; } if (end - start >= MAX_PRINT_A - ' ' - 2) { allow_literals = FALSE; } break; } } invlist_iterfinish(invlist); /* Here we have figured things out. Output each range */ invlist_iterinit(invlist); while (invlist_iternext(invlist, &start, &end)) { if (start >= NUM_ANYOF_CODE_POINTS) { break; } put_range(sv, start, end, allow_literals); } invlist_iterfinish(invlist); return; } STATIC SV* S_put_charclass_bitmap_innards_common(pTHX_ SV* invlist, /* The bitmap */ SV* posixes, /* Under /l, things like [:word:], \S */ SV* only_utf8, /* Under /d, matches iff the target is UTF-8 */ SV* not_utf8, /* /d, matches iff the target isn't UTF-8 */ SV* only_utf8_locale, /* Under /l, matches if the locale is UTF-8 */ const bool invert /* Is the result to be inverted? */ ) { /* Create and return an SV containing a displayable version of the bitmap * and associated information determined by the input parameters. If the * output would have been only the inversion indicator '^', NULL is instead * returned. */ SV * output; PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS_COMMON; if (invert) { output = newSVpvs("^"); } else { output = newSVpvs(""); } /* First, the code points in the bitmap that are unconditionally there */ put_charclass_bitmap_innards_invlist(output, invlist); /* Traditionally, these have been placed after the main code points */ if (posixes) { sv_catsv(output, posixes); } if (only_utf8 && _invlist_len(only_utf8)) { Perl_sv_catpvf(aTHX_ output, "%s{utf8}%s", PL_colors[1], PL_colors[0]); put_charclass_bitmap_innards_invlist(output, only_utf8); } if (not_utf8 && _invlist_len(not_utf8)) { Perl_sv_catpvf(aTHX_ output, "%s{not utf8}%s", PL_colors[1], PL_colors[0]); put_charclass_bitmap_innards_invlist(output, not_utf8); } if (only_utf8_locale && _invlist_len(only_utf8_locale)) { Perl_sv_catpvf(aTHX_ output, "%s{utf8 locale}%s", PL_colors[1], PL_colors[0]); put_charclass_bitmap_innards_invlist(output, only_utf8_locale); /* This is the only list in this routine that can legally contain code * points outside the bitmap range. The call just above to * 'put_charclass_bitmap_innards_invlist' will simply suppress them, so * output them here. There's about a half-dozen possible, and none in * contiguous ranges longer than 2 */ if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) { UV start, end; SV* above_bitmap = NULL; _invlist_subtract(only_utf8_locale, PL_InBitmap, &above_bitmap); invlist_iterinit(above_bitmap); while (invlist_iternext(above_bitmap, &start, &end)) { UV i; for (i = start; i <= end; i++) { put_code_point(output, i); } } invlist_iterfinish(above_bitmap); SvREFCNT_dec_NN(above_bitmap); } } if (invert && SvCUR(output) == 1) { return NULL; } return output; } STATIC bool S_put_charclass_bitmap_innards(pTHX_ SV *sv, char *bitmap, SV *nonbitmap_invlist, SV *only_utf8_locale_invlist, const regnode * const node, const bool force_as_is_display) { /* Appends to 'sv' a displayable version of the innards of the bracketed * character class defined by the other arguments: * 'bitmap' points to the bitmap. * 'nonbitmap_invlist' is an inversion list of the code points that are in * the bitmap range, but for some reason aren't in the bitmap; NULL if * none. The reasons for this could be that they require some * condition such as the target string being or not being in UTF-8 * (under /d), or because they came from a user-defined property that * was not resolved at the time of the regex compilation (under /u) * 'only_utf8_locale_invlist' is an inversion list of the code points that * are valid only if the runtime locale is a UTF-8 one; NULL if none * 'node' is the regex pattern node. It is needed only when the above two * parameters are not null, and is passed so that this routine can * tease apart the various reasons for them. * 'force_as_is_display' is TRUE if this routine should definitely NOT try * to invert things to see if that leads to a cleaner display. If * FALSE, this routine is free to use its judgment about doing this. * * It returns TRUE if there was actually something output. (It may be that * the bitmap, etc is empty.) * * When called for outputting the bitmap of a non-ANYOF node, just pass the * bitmap, with the succeeding parameters set to NULL, and the final one to * FALSE. */ /* In general, it tries to display the 'cleanest' representation of the * innards, choosing whether to display them inverted or not, regardless of * whether the class itself is to be inverted. However, there are some * cases where it can't try inverting, as what actually matches isn't known * until runtime, and hence the inversion isn't either. */ bool inverting_allowed = ! force_as_is_display; int i; STRLEN orig_sv_cur = SvCUR(sv); SV* invlist; /* Inversion list we accumulate of code points that are unconditionally matched */ SV* only_utf8 = NULL; /* Under /d, list of matches iff the target is UTF-8 */ SV* not_utf8 = NULL; /* /d, list of matches iff the target isn't UTF-8 */ SV* posixes = NULL; /* Under /l, string of things like [:word:], \D */ SV* only_utf8_locale = NULL; /* Under /l, list of matches if the locale is UTF-8 */ SV* as_is_display; /* The output string when we take the inputs literally */ SV* inverted_display; /* The output string when we invert the inputs */ U8 flags = (node) ? ANYOF_FLAGS(node) : 0; bool invert = cBOOL(flags & ANYOF_INVERT); /* Is the input to be inverted to match? */ /* We are biased in favor of displaying things without them being inverted, * as that is generally easier to understand */ const int bias = 5; PERL_ARGS_ASSERT_PUT_CHARCLASS_BITMAP_INNARDS; /* Start off with whatever code points are passed in. (We clone, so we * don't change the caller's list) */ if (nonbitmap_invlist) { assert(invlist_highest(nonbitmap_invlist) < NUM_ANYOF_CODE_POINTS); invlist = invlist_clone(nonbitmap_invlist); } else { /* Worst case size is every other code point is matched */ invlist = _new_invlist(NUM_ANYOF_CODE_POINTS / 2); } if (flags) { if (OP(node) == ANYOFD) { /* This flag indicates that the code points below 0x100 in the * nonbitmap list are precisely the ones that match only when the * target is UTF-8 (they should all be non-ASCII). */ if (flags & ANYOF_SHARED_d_UPPER_LATIN1_UTF8_STRING_MATCHES_non_d_RUNTIME_USER_PROP) { _invlist_intersection(invlist, PL_UpperLatin1, &only_utf8); _invlist_subtract(invlist, only_utf8, &invlist); } /* And this flag for matching all non-ASCII 0xFF and below */ if (flags & ANYOF_SHARED_d_MATCHES_ALL_NON_UTF8_NON_ASCII_non_d_WARN_SUPER) { not_utf8 = invlist_clone(PL_UpperLatin1); } } else if (OP(node) == ANYOFL) { /* If either of these flags are set, what matches isn't * determinable except during execution, so don't know enough here * to invert */ if (flags & (ANYOFL_FOLD|ANYOF_MATCHES_POSIXL)) { inverting_allowed = FALSE; } /* What the posix classes match also varies at runtime, so these * will be output symbolically. */ if (ANYOF_POSIXL_TEST_ANY_SET(node)) { int i; posixes = newSVpvs(""); for (i = 0; i < ANYOF_POSIXL_MAX; i++) { if (ANYOF_POSIXL_TEST(node,i)) { sv_catpv(posixes, anyofs[i]); } } } } } /* Accumulate the bit map into the unconditional match list */ for (i = 0; i < NUM_ANYOF_CODE_POINTS; i++) { if (BITMAP_TEST(bitmap, i)) { int start = i++; for (; i < NUM_ANYOF_CODE_POINTS && BITMAP_TEST(bitmap, i); i++) { /* empty */ } invlist = _add_range_to_invlist(invlist, start, i-1); } } /* Make sure that the conditional match lists don't have anything in them * that match unconditionally; otherwise the output is quite confusing. * This could happen if the code that populates these misses some * duplication. */ if (only_utf8) { _invlist_subtract(only_utf8, invlist, &only_utf8); } if (not_utf8) { _invlist_subtract(not_utf8, invlist, &not_utf8); } if (only_utf8_locale_invlist) { /* Since this list is passed in, we have to make a copy before * modifying it */ only_utf8_locale = invlist_clone(only_utf8_locale_invlist); _invlist_subtract(only_utf8_locale, invlist, &only_utf8_locale); /* And, it can get really weird for us to try outputting an inverted * form of this list when it has things above the bitmap, so don't even * try */ if (invlist_highest(only_utf8_locale) >= NUM_ANYOF_CODE_POINTS) { inverting_allowed = FALSE; } } /* Calculate what the output would be if we take the input as-is */ as_is_display = put_charclass_bitmap_innards_common(invlist, posixes, only_utf8, not_utf8, only_utf8_locale, invert); /* If have to take the output as-is, just do that */ if (! inverting_allowed) { if (as_is_display) { sv_catsv(sv, as_is_display); SvREFCNT_dec_NN(as_is_display); } } else { /* But otherwise, create the output again on the inverted input, and use whichever version is shorter */ int inverted_bias, as_is_bias; /* We will apply our bias to whichever of the the results doesn't have * the '^' */ if (invert) { invert = FALSE; as_is_bias = bias; inverted_bias = 0; } else { invert = TRUE; as_is_bias = 0; inverted_bias = bias; } /* Now invert each of the lists that contribute to the output, * excluding from the result things outside the possible range */ /* For the unconditional inversion list, we have to add in all the * conditional code points, so that when inverted, they will be gone * from it */ _invlist_union(only_utf8, invlist, &invlist); _invlist_union(not_utf8, invlist, &invlist); _invlist_union(only_utf8_locale, invlist, &invlist); _invlist_invert(invlist); _invlist_intersection(invlist, PL_InBitmap, &invlist); if (only_utf8) { _invlist_invert(only_utf8); _invlist_intersection(only_utf8, PL_UpperLatin1, &only_utf8); } else if (not_utf8) { /* If a code point matches iff the target string is not in UTF-8, * then complementing the result has it not match iff not in UTF-8, * which is the same thing as matching iff it is UTF-8. */ only_utf8 = not_utf8; not_utf8 = NULL; } if (only_utf8_locale) { _invlist_invert(only_utf8_locale); _invlist_intersection(only_utf8_locale, PL_InBitmap, &only_utf8_locale); } inverted_display = put_charclass_bitmap_innards_common( invlist, posixes, only_utf8, not_utf8, only_utf8_locale, invert); /* Use the shortest representation, taking into account our bias * against showing it inverted */ if ( inverted_display && ( ! as_is_display || ( SvCUR(inverted_display) + inverted_bias < SvCUR(as_is_display) + as_is_bias))) { sv_catsv(sv, inverted_display); } else if (as_is_display) { sv_catsv(sv, as_is_display); } SvREFCNT_dec(as_is_display); SvREFCNT_dec(inverted_display); } SvREFCNT_dec_NN(invlist); SvREFCNT_dec(only_utf8); SvREFCNT_dec(not_utf8); SvREFCNT_dec(posixes); SvREFCNT_dec(only_utf8_locale); return SvCUR(sv) > orig_sv_cur; } #define CLEAR_OPTSTART \ if (optstart) STMT_START { \ DEBUG_OPTIMISE_r(Perl_re_printf( aTHX_ \ " (%" IVdf " nodes)\n", (IV)(node - optstart))); \ optstart=NULL; \ } STMT_END #define DUMPUNTIL(b,e) \ CLEAR_OPTSTART; \ node=dumpuntil(r,start,(b),(e),last,sv,indent+1,depth+1); STATIC const regnode * S_dumpuntil(pTHX_ const regexp *r, const regnode *start, const regnode *node, const regnode *last, const regnode *plast, SV* sv, I32 indent, U32 depth) { U8 op = PSEUDO; /* Arbitrary non-END op. */ const regnode *next; const regnode *optstart= NULL; RXi_GET_DECL(r,ri); GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_DUMPUNTIL; #ifdef DEBUG_DUMPUNTIL Perl_re_printf( aTHX_ "--- %d : %d - %d - %d\n",indent,node-start, last ? last-start : 0,plast ? plast-start : 0); #endif if (plast && plast < last) last= plast; while (PL_regkind[op] != END && (!last || node < last)) { assert(node); /* While that wasn't END last time... */ NODE_ALIGN(node); op = OP(node); if (op == CLOSE || op == WHILEM) indent--; next = regnext((regnode *)node); /* Where, what. */ if (OP(node) == OPTIMIZED) { if (!optstart && RE_DEBUG_FLAG(RE_DEBUG_COMPILE_OPTIMISE)) optstart = node; else goto after_print; } else CLEAR_OPTSTART; regprop(r, sv, node, NULL, NULL); Perl_re_printf( aTHX_ "%4" IVdf ":%*s%s", (IV)(node - start), (int)(2*indent + 1), "", SvPVX_const(sv)); if (OP(node) != OPTIMIZED) { if (next == NULL) /* Next ptr. */ Perl_re_printf( aTHX_ " (0)"); else if (PL_regkind[(U8)op] == BRANCH && PL_regkind[OP(next)] != BRANCH ) Perl_re_printf( aTHX_ " (FAIL)"); else Perl_re_printf( aTHX_ " (%" IVdf ")", (IV)(next - start)); Perl_re_printf( aTHX_ "\n"); } after_print: if (PL_regkind[(U8)op] == BRANCHJ) { assert(next); { const regnode *nnode = (OP(next) == LONGJMP ? regnext((regnode *)next) : next); if (last && nnode > last) nnode = last; DUMPUNTIL(NEXTOPER(NEXTOPER(node)), nnode); } } else if (PL_regkind[(U8)op] == BRANCH) { assert(next); DUMPUNTIL(NEXTOPER(node), next); } else if ( PL_regkind[(U8)op] == TRIE ) { const regnode *this_trie = node; const char op = OP(node); const U32 n = ARG(node); const reg_ac_data * const ac = op>=AHOCORASICK ? (reg_ac_data *)ri->data->data[n] : NULL; const reg_trie_data * const trie = (reg_trie_data*)ri->data->data[op<AHOCORASICK ? n : ac->trie]; #ifdef DEBUGGING AV *const trie_words = MUTABLE_AV(ri->data->data[n + TRIE_WORDS_OFFSET]); #endif const regnode *nextbranch= NULL; I32 word_idx; SvPVCLEAR(sv); for (word_idx= 0; word_idx < (I32)trie->wordcount; word_idx++) { SV ** const elem_ptr = av_fetch(trie_words,word_idx,0); Perl_re_indentf( aTHX_ "%s ", indent+3, elem_ptr ? pv_pretty(sv, SvPV_nolen_const(*elem_ptr), SvCUR(*elem_ptr), PL_dump_re_max_len, PL_colors[0], PL_colors[1], (SvUTF8(*elem_ptr) ? PERL_PV_ESCAPE_UNI : 0) | PERL_PV_PRETTY_ELLIPSES | PERL_PV_PRETTY_LTGT ) : "???" ); if (trie->jump) { U16 dist= trie->jump[word_idx+1]; Perl_re_printf( aTHX_ "(%" UVuf ")\n", (UV)((dist ? this_trie + dist : next) - start)); if (dist) { if (!nextbranch) nextbranch= this_trie + trie->jump[0]; DUMPUNTIL(this_trie + dist, nextbranch); } if (nextbranch && PL_regkind[OP(nextbranch)]==BRANCH) nextbranch= regnext((regnode *)nextbranch); } else { Perl_re_printf( aTHX_ "\n"); } } if (last && next > last) node= last; else node= next; } else if ( op == CURLY ) { /* "next" might be very big: optimizer */ DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS, NEXTOPER(node) + EXTRA_STEP_2ARGS + 1); } else if (PL_regkind[(U8)op] == CURLY && op != CURLYX) { assert(next); DUMPUNTIL(NEXTOPER(node) + EXTRA_STEP_2ARGS, next); } else if ( op == PLUS || op == STAR) { DUMPUNTIL(NEXTOPER(node), NEXTOPER(node) + 1); } else if (PL_regkind[(U8)op] == ANYOF) { /* arglen 1 + class block */ node += 1 + ((ANYOF_FLAGS(node) & ANYOF_MATCHES_POSIXL) ? ANYOF_POSIXL_SKIP : ANYOF_SKIP); node = NEXTOPER(node); } else if (PL_regkind[(U8)op] == EXACT) { /* Literal string, where present. */ node += NODE_SZ_STR(node) - 1; node = NEXTOPER(node); } else { node = NEXTOPER(node); node += regarglen[(U8)op]; } if (op == CURLYX || op == OPEN) indent++; } CLEAR_OPTSTART; #ifdef DEBUG_DUMPUNTIL Perl_re_printf( aTHX_ "--- %d\n", (int)indent); #endif return node; } #endif /* DEBUGGING */ /* * ex: set ts=8 sts=4 sw=4 et: */
S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state, regnode ** node_p, UV * code_point_p, int * cp_count, I32 * flagp, const bool strict, const U32 depth ) { /* This routine teases apart the various meanings of \N and returns * accordingly. The input parameters constrain which meaning(s) is/are valid * in the current context. * * Exactly one of <node_p> and <code_point_p> must be non-NULL. * * If <code_point_p> is not NULL, the context is expecting the result to be a * single code point. If this \N instance turns out to a single code point, * the function returns TRUE and sets *code_point_p to that code point. * * If <node_p> is not NULL, the context is expecting the result to be one of * the things representable by a regnode. If this \N instance turns out to be * one such, the function generates the regnode, returns TRUE and sets *node_p * to point to that regnode. * * If this instance of \N isn't legal in any context, this function will * generate a fatal error and not return. * * On input, RExC_parse should point to the first char following the \N at the * time of the call. On successful return, RExC_parse will have been updated * to point to just after the sequence identified by this routine. Also * *flagp has been updated as needed. * * When there is some problem with the current context and this \N instance, * the function returns FALSE, without advancing RExC_parse, nor setting * *node_p, nor *code_point_p, nor *flagp. * * If <cp_count> is not NULL, the caller wants to know the length (in code * points) that this \N sequence matches. This is set even if the function * returns FALSE, as detailed below. * * There are 5 possibilities here, as detailed in the next 5 paragraphs. * * Probably the most common case is for the \N to specify a single code point. * *cp_count will be set to 1, and *code_point_p will be set to that code * point. * * Another possibility is for the input to be an empty \N{}, which for * backwards compatibility we accept. *cp_count will be set to 0. *node_p * will be set to a generated NOTHING node. * * Still another possibility is for the \N to mean [^\n]. *cp_count will be * set to 0. *node_p will be set to a generated REG_ANY node. * * The fourth possibility is that \N resolves to a sequence of more than one * code points. *cp_count will be set to the number of code points in the * sequence. *node_p * will be set to a generated node returned by this * function calling S_reg(). * * The final possibility is that it is premature to be calling this function; * that pass1 needs to be restarted. This can happen when this changes from * /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The * latter occurs only when the fourth possibility would otherwise be in * effect, and is because one of those code points requires the pattern to be * recompiled as UTF-8. The function returns FALSE, and sets the * RESTART_PASS1 and NEED_UTF8 flags in *flagp, as appropriate. When this * happens, the caller needs to desist from continuing parsing, and return * this information to its caller. This is not set for when there is only one * code point, as this can be called as part of an ANYOF node, and they can * store above-Latin1 code points without the pattern having to be in UTF-8. * * For non-single-quoted regexes, the tokenizer has resolved character and * sequence names inside \N{...} into their Unicode values, normalizing the * result into what we should see here: '\N{U+c1.c2...}', where c1... are the * hex-represented code points in the sequence. This is done there because * the names can vary based on what charnames pragma is in scope at the time, * so we need a way to take a snapshot of what they resolve to at the time of * the original parse. [perl #56444]. * * That parsing is skipped for single-quoted regexes, so we may here get * '\N{NAME}'. This is a fatal error. These names have to be resolved by the * parser. But if the single-quoted regex is something like '\N{U+41}', that * is legal and handled here. The code point is Unicode, and has to be * translated into the native character set for non-ASCII platforms. */ char * endbrace; /* points to '}' following the name */ char *endchar; /* Points to '.' or '}' ending cur char in the input stream */ char* p = RExC_parse; /* Temporary */ GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_GROK_BSLASH_N; GET_RE_DEBUG_FLAGS; assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */ assert(! (node_p && cp_count)); /* At most 1 should be set */ if (cp_count) { /* Initialize return for the most common case */ *cp_count = 1; } /* The [^\n] meaning of \N ignores spaces and comments under the /x * modifier. The other meanings do not, so use a temporary until we find * out which we are being called with */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* Disambiguate between \N meaning a named character versus \N meaning * [^\n]. The latter is assumed when the {...} following the \N is a legal * quantifier, or there is no '{' at all */ if (*p != '{' || regcurly(p)) { RExC_parse = p; if (cp_count) { *cp_count = -1; } if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(*node_p, 1); /* MJD */ return TRUE; } /* Here, we have decided it should be a named character or sequence */ /* The test above made sure that the next real character is a '{', but * under the /x modifier, it could be separated by space (or a comment and * \n) and this is not allowed (for consistency with \x{...} and the * tokenizer handling of \N{NAME}). */ if (*RExC_parse != '{') { vFAIL("Missing braces on \\N{}"); } RExC_parse++; /* Skip past the '{' */ endbrace = strchr(RExC_parse, '}'); if (! endbrace) { /* no trailing brace */ vFAIL2("Missing right brace on \\%c{}", 'N'); } else if (!( endbrace == RExC_parse /* nothing between the {} */ || memBEGINs(RExC_parse, /* U+ (bad hex is checked below for a better error msg) */ (STRLEN) (RExC_end - RExC_parse), "U+"))) { RExC_parse = endbrace; /* position msg's '<--HERE' */ vFAIL("\\N{NAME} must be resolved by the lexer"); } REQUIRE_UNI_RULES(flagp, FALSE); /* Unicode named chars imply Unicode semantics */ if (endbrace == RExC_parse) { /* empty: \N{} */ if (strict) { RExC_parse++; /* Position after the "}" */ vFAIL("Zero length \\N{}"); } if (cp_count) { *cp_count = 0; } nextchar(pRExC_state); if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state,NOTHING); return TRUE; } RExC_parse += 2; /* Skip past the 'U+' */ /* Because toke.c has generated a special construct for us guaranteed not * to have NULs, we can use a str function */ endchar = RExC_parse + strcspn(RExC_parse, ".}"); /* Code points are separated by dots. If none, there is only one code * point, and is terminated by the brace */ if (endchar >= endbrace) { STRLEN length_of_hex; I32 grok_hex_flags; /* Here, exactly one code point. If that isn't what is wanted, fail */ if (! code_point_p) { RExC_parse = p; return FALSE; } /* Convert code point from hex */ length_of_hex = (STRLEN)(endchar - RExC_parse); grok_hex_flags = PERL_SCAN_ALLOW_UNDERSCORES | PERL_SCAN_DISALLOW_PREFIX /* No errors in the first pass (See [perl * #122671].) We let the code below find the * errors when there are multiple chars. */ | ((SIZE_ONLY) ? PERL_SCAN_SILENT_ILLDIGIT : 0); /* This routine is the one place where both single- and double-quotish * \N{U+xxxx} are evaluated. The value is a Unicode code point which * must be converted to native. */ *code_point_p = UNI_TO_NATIVE(grok_hex(RExC_parse, &length_of_hex, &grok_hex_flags, NULL)); /* The tokenizer should have guaranteed validity, but it's possible to * bypass it by using single quoting, so check. Don't do the check * here when there are multiple chars; we do it below anyway. */ if (length_of_hex == 0 || length_of_hex != (STRLEN)(endchar - RExC_parse) ) { RExC_parse += length_of_hex; /* Includes all the valid */ RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */ ? UTF8SKIP(RExC_parse) : 1; /* Guard against malformed utf8 */ if (RExC_parse >= endchar) { RExC_parse = endchar; } vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = endbrace + 1; return TRUE; } else { /* Is a multiple character sequence */ SV * substitute_parse; STRLEN len; char *orig_end = RExC_end; char *save_start = RExC_start; I32 flags; /* Count the code points, if desired, in the sequence */ if (cp_count) { *cp_count = 0; while (RExC_parse < endbrace) { /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); (*cp_count)++; } } /* Fail if caller doesn't want to handle a multi-code-point sequence. * But don't backup up the pointer if the caller wants to know how many * code points there are (they can then handle things) */ if (! node_p) { if (! cp_count) { RExC_parse = p; } return FALSE; } /* What is done here is to convert this to a sub-pattern of the form * \x{char1}\x{char2}... and then call reg recursively to parse it * (enclosing in "(?: ... )" ). That way, it retains its atomicness, * while not having to worry about special handling that some code * points may have. */ substitute_parse = newSVpvs("?:"); while (RExC_parse < endbrace) { /* Convert to notation the rest of the code understands */ sv_catpv(substitute_parse, "\\x{"); sv_catpvn(substitute_parse, RExC_parse, endchar - RExC_parse); sv_catpv(substitute_parse, "}"); /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); } sv_catpv(substitute_parse, ")"); len = SvCUR(substitute_parse); /* Don't allow empty number */ if (len < (STRLEN) 8) { RExC_parse = endbrace; vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = RExC_start = RExC_adjusted_start = SvPV_nolen(substitute_parse); RExC_end = RExC_parse + len; /* The values are Unicode, and therefore not subject to recoding, but * have to be converted to native on a non-Unicode (meaning non-ASCII) * platform. */ #ifdef EBCDIC RExC_recode_x_to_native = 1; #endif *node_p = reg(pRExC_state, 1, &flags, depth+1); /* Restore the saved values */ RExC_start = RExC_adjusted_start = save_start; RExC_parse = endbrace; RExC_end = orig_end; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif SvREFCNT_dec_NN(substitute_parse); if (! *node_p) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return FALSE; } FAIL2("panic: reg returned NULL to grok_bslash_N, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); nextchar(pRExC_state); return TRUE; } }
S_grok_bslash_N(pTHX_ RExC_state_t *pRExC_state, regnode ** node_p, UV * code_point_p, int * cp_count, I32 * flagp, const bool strict, const U32 depth ) { /* This routine teases apart the various meanings of \N and returns * accordingly. The input parameters constrain which meaning(s) is/are valid * in the current context. * * Exactly one of <node_p> and <code_point_p> must be non-NULL. * * If <code_point_p> is not NULL, the context is expecting the result to be a * single code point. If this \N instance turns out to a single code point, * the function returns TRUE and sets *code_point_p to that code point. * * If <node_p> is not NULL, the context is expecting the result to be one of * the things representable by a regnode. If this \N instance turns out to be * one such, the function generates the regnode, returns TRUE and sets *node_p * to point to that regnode. * * If this instance of \N isn't legal in any context, this function will * generate a fatal error and not return. * * On input, RExC_parse should point to the first char following the \N at the * time of the call. On successful return, RExC_parse will have been updated * to point to just after the sequence identified by this routine. Also * *flagp has been updated as needed. * * When there is some problem with the current context and this \N instance, * the function returns FALSE, without advancing RExC_parse, nor setting * *node_p, nor *code_point_p, nor *flagp. * * If <cp_count> is not NULL, the caller wants to know the length (in code * points) that this \N sequence matches. This is set even if the function * returns FALSE, as detailed below. * * There are 5 possibilities here, as detailed in the next 5 paragraphs. * * Probably the most common case is for the \N to specify a single code point. * *cp_count will be set to 1, and *code_point_p will be set to that code * point. * * Another possibility is for the input to be an empty \N{}, which for * backwards compatibility we accept. *cp_count will be set to 0. *node_p * will be set to a generated NOTHING node. * * Still another possibility is for the \N to mean [^\n]. *cp_count will be * set to 0. *node_p will be set to a generated REG_ANY node. * * The fourth possibility is that \N resolves to a sequence of more than one * code points. *cp_count will be set to the number of code points in the * sequence. *node_p * will be set to a generated node returned by this * function calling S_reg(). * * The final possibility is that it is premature to be calling this function; * that pass1 needs to be restarted. This can happen when this changes from * /d to /u rules, or when the pattern needs to be upgraded to UTF-8. The * latter occurs only when the fourth possibility would otherwise be in * effect, and is because one of those code points requires the pattern to be * recompiled as UTF-8. The function returns FALSE, and sets the * RESTART_PASS1 and NEED_UTF8 flags in *flagp, as appropriate. When this * happens, the caller needs to desist from continuing parsing, and return * this information to its caller. This is not set for when there is only one * code point, as this can be called as part of an ANYOF node, and they can * store above-Latin1 code points without the pattern having to be in UTF-8. * * For non-single-quoted regexes, the tokenizer has resolved character and * sequence names inside \N{...} into their Unicode values, normalizing the * result into what we should see here: '\N{U+c1.c2...}', where c1... are the * hex-represented code points in the sequence. This is done there because * the names can vary based on what charnames pragma is in scope at the time, * so we need a way to take a snapshot of what they resolve to at the time of * the original parse. [perl #56444]. * * That parsing is skipped for single-quoted regexes, so we may here get * '\N{NAME}'. This is a fatal error. These names have to be resolved by the * parser. But if the single-quoted regex is something like '\N{U+41}', that * is legal and handled here. The code point is Unicode, and has to be * translated into the native character set for non-ASCII platforms. */ char * endbrace; /* points to '}' following the name */ char *endchar; /* Points to '.' or '}' ending cur char in the input stream */ char* p = RExC_parse; /* Temporary */ GET_RE_DEBUG_FLAGS_DECL; PERL_ARGS_ASSERT_GROK_BSLASH_N; GET_RE_DEBUG_FLAGS; assert(cBOOL(node_p) ^ cBOOL(code_point_p)); /* Exactly one should be set */ assert(! (node_p && cp_count)); /* At most 1 should be set */ if (cp_count) { /* Initialize return for the most common case */ *cp_count = 1; } /* The [^\n] meaning of \N ignores spaces and comments under the /x * modifier. The other meanings do not, so use a temporary until we find * out which we are being called with */ skip_to_be_ignored_text(pRExC_state, &p, FALSE /* Don't force to /x */ ); /* Disambiguate between \N meaning a named character versus \N meaning * [^\n]. The latter is assumed when the {...} following the \N is a legal * quantifier, or there is no '{' at all */ if (*p != '{' || regcurly(p)) { RExC_parse = p; if (cp_count) { *cp_count = -1; } if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state, REG_ANY); *flagp |= HASWIDTH|SIMPLE; MARK_NAUGHTY(1); Set_Node_Length(*node_p, 1); /* MJD */ return TRUE; } /* Here, we have decided it should be a named character or sequence */ /* The test above made sure that the next real character is a '{', but * under the /x modifier, it could be separated by space (or a comment and * \n) and this is not allowed (for consistency with \x{...} and the * tokenizer handling of \N{NAME}). */ if (*RExC_parse != '{') { vFAIL("Missing braces on \\N{}"); } RExC_parse++; /* Skip past the '{' */ endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse); if (! endbrace) { /* no trailing brace */ vFAIL2("Missing right brace on \\%c{}", 'N'); } else if (!( endbrace == RExC_parse /* nothing between the {} */ || memBEGINs(RExC_parse, /* U+ (bad hex is checked below for a better error msg) */ (STRLEN) (RExC_end - RExC_parse), "U+"))) { RExC_parse = endbrace; /* position msg's '<--HERE' */ vFAIL("\\N{NAME} must be resolved by the lexer"); } REQUIRE_UNI_RULES(flagp, FALSE); /* Unicode named chars imply Unicode semantics */ if (endbrace == RExC_parse) { /* empty: \N{} */ if (strict) { RExC_parse++; /* Position after the "}" */ vFAIL("Zero length \\N{}"); } if (cp_count) { *cp_count = 0; } nextchar(pRExC_state); if (! node_p) { return FALSE; } *node_p = reg_node(pRExC_state,NOTHING); return TRUE; } RExC_parse += 2; /* Skip past the 'U+' */ /* Because toke.c has generated a special construct for us guaranteed not * to have NULs, we can use a str function */ endchar = RExC_parse + strcspn(RExC_parse, ".}"); /* Code points are separated by dots. If none, there is only one code * point, and is terminated by the brace */ if (endchar >= endbrace) { STRLEN length_of_hex; I32 grok_hex_flags; /* Here, exactly one code point. If that isn't what is wanted, fail */ if (! code_point_p) { RExC_parse = p; return FALSE; } /* Convert code point from hex */ length_of_hex = (STRLEN)(endchar - RExC_parse); grok_hex_flags = PERL_SCAN_ALLOW_UNDERSCORES | PERL_SCAN_DISALLOW_PREFIX /* No errors in the first pass (See [perl * #122671].) We let the code below find the * errors when there are multiple chars. */ | ((SIZE_ONLY) ? PERL_SCAN_SILENT_ILLDIGIT : 0); /* This routine is the one place where both single- and double-quotish * \N{U+xxxx} are evaluated. The value is a Unicode code point which * must be converted to native. */ *code_point_p = UNI_TO_NATIVE(grok_hex(RExC_parse, &length_of_hex, &grok_hex_flags, NULL)); /* The tokenizer should have guaranteed validity, but it's possible to * bypass it by using single quoting, so check. Don't do the check * here when there are multiple chars; we do it below anyway. */ if (length_of_hex == 0 || length_of_hex != (STRLEN)(endchar - RExC_parse) ) { RExC_parse += length_of_hex; /* Includes all the valid */ RExC_parse += (RExC_orig_utf8) /* point to after 1st invalid */ ? UTF8SKIP(RExC_parse) : 1; /* Guard against malformed utf8 */ if (RExC_parse >= endchar) { RExC_parse = endchar; } vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = endbrace + 1; return TRUE; } else { /* Is a multiple character sequence */ SV * substitute_parse; STRLEN len; char *orig_end = RExC_end; char *save_start = RExC_start; I32 flags; /* Count the code points, if desired, in the sequence */ if (cp_count) { *cp_count = 0; while (RExC_parse < endbrace) { /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); (*cp_count)++; } } /* Fail if caller doesn't want to handle a multi-code-point sequence. * But don't backup up the pointer if the caller wants to know how many * code points there are (they can then handle things) */ if (! node_p) { if (! cp_count) { RExC_parse = p; } return FALSE; } /* What is done here is to convert this to a sub-pattern of the form * \x{char1}\x{char2}... and then call reg recursively to parse it * (enclosing in "(?: ... )" ). That way, it retains its atomicness, * while not having to worry about special handling that some code * points may have. */ substitute_parse = newSVpvs("?:"); while (RExC_parse < endbrace) { /* Convert to notation the rest of the code understands */ sv_catpv(substitute_parse, "\\x{"); sv_catpvn(substitute_parse, RExC_parse, endchar - RExC_parse); sv_catpv(substitute_parse, "}"); /* Point to the beginning of the next character in the sequence. */ RExC_parse = endchar + 1; endchar = RExC_parse + strcspn(RExC_parse, ".}"); } sv_catpv(substitute_parse, ")"); len = SvCUR(substitute_parse); /* Don't allow empty number */ if (len < (STRLEN) 8) { RExC_parse = endbrace; vFAIL("Invalid hexadecimal number in \\N{U+...}"); } RExC_parse = RExC_start = RExC_adjusted_start = SvPV_nolen(substitute_parse); RExC_end = RExC_parse + len; /* The values are Unicode, and therefore not subject to recoding, but * have to be converted to native on a non-Unicode (meaning non-ASCII) * platform. */ #ifdef EBCDIC RExC_recode_x_to_native = 1; #endif *node_p = reg(pRExC_state, 1, &flags, depth+1); /* Restore the saved values */ RExC_start = RExC_adjusted_start = save_start; RExC_parse = endbrace; RExC_end = orig_end; #ifdef EBCDIC RExC_recode_x_to_native = 0; #endif SvREFCNT_dec_NN(substitute_parse); if (! *node_p) { if (flags & (RESTART_PASS1|NEED_UTF8)) { *flagp = flags & (RESTART_PASS1|NEED_UTF8); return FALSE; } FAIL2("panic: reg returned NULL to grok_bslash_N, flags=%#" UVxf, (UV) flags); } *flagp |= flags&(HASWIDTH|SPSTART|SIMPLE|POSTPONED); nextchar(pRExC_state); return TRUE; } }
{'added': [(12059, " endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);"), (12776, ' char * endbrace = NULL;'), (12778, ' if (RExC_parse < RExC_end) {'), (12779, " endbrace = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);"), (12780, ' }'), (16317, "\t\t e = (char *) memchr(RExC_parse, '}', RExC_end - RExC_parse);")], 'deleted': [(12059, " endbrace = strchr(RExC_parse, '}');"), (12776, ' char * endbrace;'), (12778, " endbrace = strchr(RExC_parse, '}');"), (16315, "\t\t e = strchr(RExC_parse, '}');")]}
6
4
12,668
72,494
159
774
32
https://github.com/Perl/perl5
CVE-2018-18313
CWE-125
1,203
jpgfile.c
C++
ReadJpegSections
//-------------------------------------------------------------------------- // Program to pull the information out of various types of EXIF digital // camera files and show it in a reasonably consistent way // // This module handles basic Jpeg file handling // // Matthias Wandel //-------------------------------------------------------------------------- #include "jhead.h" // Storage for simplified info extracted from file. ImageInfo_t ImageInfo; static Section_t * Sections = NULL; static int SectionsAllocated; static int SectionsRead; static int HaveAll; #define PSEUDO_IMAGE_MARKER 0x123; // Extra value. //-------------------------------------------------------------------------- // Get 16 bits motorola order (always) for jpeg header stuff. //-------------------------------------------------------------------------- static int Get16m(const void * Short) { return (((uchar *)Short)[0] << 8) | ((uchar *)Short)[1]; } //-------------------------------------------------------------------------- // Process a COM marker. // We want to print out the marker contents as legible text; // we must guard against random junk and varying newline representations. //-------------------------------------------------------------------------- static void process_COM (const uchar * Data, int length) { int ch; char Comment[MAX_COMMENT_SIZE+1]; int nch; int a; nch = 0; if (length > MAX_COMMENT_SIZE) length = MAX_COMMENT_SIZE; // Truncate if it won't fit in our structure. for (a=2;a<length;a++){ ch = Data[a]; if (ch == '\r' && a < length-1 && Data[a+1] == '\n') continue; // Remove cr followed by lf. if (ch >= 32 || ch == '\n' || ch == '\t'){ Comment[nch++] = (char)ch; }else{ Comment[nch++] = '?'; } } Comment[nch] = '\0'; // Null terminate if (ShowTags){ printf("COM marker comment: %s\n",Comment); } strcpy(ImageInfo.Comments,Comment); } //-------------------------------------------------------------------------- // Process a SOFn marker. This is useful for the image dimensions //-------------------------------------------------------------------------- static void process_SOFn (const uchar * Data, int marker) { int data_precision, num_components; data_precision = Data[2]; ImageInfo.Height = Get16m(Data+3); ImageInfo.Width = Get16m(Data+5); num_components = Data[7]; if (num_components == 3){ ImageInfo.IsColor = 1; }else{ ImageInfo.IsColor = 0; } ImageInfo.Process = marker; if (ShowTags){ printf("JPEG image is %uw * %uh, %d color components, %d bits per sample\n", ImageInfo.Width, ImageInfo.Height, num_components, data_precision); } } //-------------------------------------------------------------------------- // Check sections array to see if it needs to be increased in size. //-------------------------------------------------------------------------- static void CheckSectionsAllocated(void) { if (SectionsRead > SectionsAllocated){ ErrFatal("allocation screwup"); } if (SectionsRead >= SectionsAllocated){ SectionsAllocated += SectionsAllocated/2; Sections = (Section_t *)realloc(Sections, sizeof(Section_t)*SectionsAllocated); if (Sections == NULL){ ErrFatal("could not allocate data for entire image"); } } } //-------------------------------------------------------------------------- // Parse the marker stream until SOS or EOI is seen; //-------------------------------------------------------------------------- int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; Data = (uchar *)malloc(itemlen); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; } //-------------------------------------------------------------------------- // Discard read data. //-------------------------------------------------------------------------- void DiscardData(void) { int a; for (a=0;a<SectionsRead;a++){ free(Sections[a].Data); } memset(&ImageInfo, 0, sizeof(ImageInfo)); SectionsRead = 0; HaveAll = 0; } //-------------------------------------------------------------------------- // Read image data. //-------------------------------------------------------------------------- int ReadJpegFile(const char * FileName, ReadMode_t ReadMode) { FILE * infile; int ret; infile = fopen(FileName, "rb"); // Unix ignores 'b', windows needs it. if (infile == NULL) { fprintf(stderr, "can't open '%s'\n", FileName); return FALSE; } // Scan the JPEG headers. ret = ReadJpegSections(infile, ReadMode); if (!ret){ if (ReadMode == READ_ANY){ // Process any files mode. Ignore the fact that it's not // a jpeg file. ret = TRUE; }else{ fprintf(stderr,"Not JPEG: %s\n",FileName); } } fclose(infile); if (ret == FALSE){ DiscardData(); } return ret; } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int SaveThumbnail(char * ThumbFileName) { FILE * ThumbnailFile; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailSize == 0){ fprintf(stderr,"Image contains no thumbnail\n"); return FALSE; } if (strcmp(ThumbFileName, "-") == 0){ // A filename of '-' indicates thumbnail goes to stdout. // This doesn't make much sense under Windows, so this feature is unix only. ThumbnailFile = stdout; }else{ ThumbnailFile = fopen(ThumbFileName,"wb"); } if (ThumbnailFile){ uchar * ThumbnailPointer; Section_t * ExifSection; ExifSection = FindSection(M_EXIF); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; fwrite(ThumbnailPointer, ImageInfo.ThumbnailSize ,1, ThumbnailFile); fclose(ThumbnailFile); return TRUE; }else{ ErrFatal("Could not write thumbnail file"); return FALSE; } } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int ReplaceThumbnail(const char * ThumbFileName) { FILE * ThumbnailFile; int ThumbLen, NewExifSize; Section_t * ExifSection; uchar * ThumbnailPointer; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailAtEnd == FALSE){ if (ThumbFileName == NULL){ // Delete of nonexistent thumbnail (not even pointers present) // No action, no error. return FALSE; } // Adding or removing of thumbnail is not possible - that would require rearranging // of the exif header, which is risky, and jhad doesn't know how to do. fprintf(stderr,"Image contains no thumbnail to replace - add is not possible\n"); return FALSE; } if (ThumbFileName){ ThumbnailFile = fopen(ThumbFileName,"rb"); if (ThumbnailFile == NULL){ noread: ErrFatal("Could not read thumbnail file"); return FALSE; } // get length fseek(ThumbnailFile, 0, SEEK_END); ThumbLen = ftell(ThumbnailFile); fseek(ThumbnailFile, 0, SEEK_SET); if (ThumbLen + ImageInfo.ThumbnailOffset > 0x10000-20){ ErrFatal("Thumbnail is too large to insert into exif header"); } }else{ if (ImageInfo.ThumbnailSize == 0){ return FALSE; } ThumbLen = 0; ThumbnailFile = NULL; } ExifSection = FindSection(M_EXIF); NewExifSize = ImageInfo.ThumbnailOffset+8+ThumbLen; ExifSection->Data = (uchar *)realloc(ExifSection->Data, NewExifSize); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; if (ThumbnailFile){ if (fread(ThumbnailPointer, 1, ThumbLen, ThumbnailFile) != ThumbLen){ goto noread; } fclose(ThumbnailFile); } ImageInfo.ThumbnailSize = ThumbLen; Put32u(ExifSection->Data+ImageInfo.ThumbnailSizeOffset+8, ThumbLen); ExifSection->Data[0] = (uchar)(NewExifSize >> 8); ExifSection->Data[1] = (uchar)NewExifSize; ExifSection->Size = NewExifSize; return TRUE; } //-------------------------------------------------------------------------- // Discard everything but the exif and comment sections. //-------------------------------------------------------------------------- void DiscardAllButExif(void) { Section_t ExifKeeper; Section_t CommentKeeper; Section_t IptcKeeper; Section_t XmpKeeper; int a; memset(&ExifKeeper, 0, sizeof(ExifKeeper)); memset(&CommentKeeper, 0, sizeof(CommentKeeper)); memset(&IptcKeeper, 0, sizeof(IptcKeeper)); memset(&XmpKeeper, 0, sizeof(IptcKeeper)); for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == M_EXIF && ExifKeeper.Type == 0){ ExifKeeper = Sections[a]; }else if (Sections[a].Type == M_XMP && XmpKeeper.Type == 0){ XmpKeeper = Sections[a]; }else if (Sections[a].Type == M_COM && CommentKeeper.Type == 0){ CommentKeeper = Sections[a]; }else if (Sections[a].Type == M_IPTC && IptcKeeper.Type == 0){ IptcKeeper = Sections[a]; }else{ free(Sections[a].Data); } } SectionsRead = 0; if (ExifKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = ExifKeeper; } if (CommentKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = CommentKeeper; } if (IptcKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = IptcKeeper; } if (XmpKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = XmpKeeper; } } //-------------------------------------------------------------------------- // Write image data back to disk. //-------------------------------------------------------------------------- void WriteJpegFile(const char * FileName) { FILE * outfile; int a; if (!HaveAll){ ErrFatal("Can't write back - didn't read all"); } outfile = fopen(FileName,"wb"); if (outfile == NULL){ ErrFatal("Could not open file for write"); } // Initial static jpeg marker. fputc(0xff,outfile); fputc(0xd8,outfile); if (Sections[0].Type != M_EXIF && Sections[0].Type != M_JFIF){ // The image must start with an exif or jfif marker. If we threw those away, create one. static uchar JfifHead[18] = { 0xff, M_JFIF, 0x00, 0x10, 'J' , 'F' , 'I' , 'F' , 0x00, 0x01, 0x01, 0x01, 0x01, 0x2C, 0x01, 0x2C, 0x00, 0x00 }; if (ImageInfo.ResolutionUnit == 2 || ImageInfo.ResolutionUnit == 3){ // Use the exif resolution info to fill out the jfif header. // Usually, for exif images, there's no jfif header, so if wediscard // the exif header, use info from the exif header for the jfif header. ImageInfo.JfifHeader.ResolutionUnits = (char)(ImageInfo.ResolutionUnit-1); // Jfif is 1 and 2, Exif is 2 and 3 for In and cm respecively ImageInfo.JfifHeader.XDensity = (int)ImageInfo.xResolution; ImageInfo.JfifHeader.YDensity = (int)ImageInfo.yResolution; } JfifHead[11] = ImageInfo.JfifHeader.ResolutionUnits; JfifHead[12] = (uchar)(ImageInfo.JfifHeader.XDensity >> 8); JfifHead[13] = (uchar)ImageInfo.JfifHeader.XDensity; JfifHead[14] = (uchar)(ImageInfo.JfifHeader.YDensity >> 8); JfifHead[15] = (uchar)ImageInfo.JfifHeader.YDensity; fwrite(JfifHead, 18, 1, outfile); // use the values from the exif data for the jfif header, if we have found values if (ImageInfo.ResolutionUnit != 0) { // JFIF.ResolutionUnit is {1,2}, EXIF.ResolutionUnit is {2,3} JfifHead[11] = (uchar)ImageInfo.ResolutionUnit - 1; } if (ImageInfo.xResolution > 0.0 && ImageInfo.yResolution > 0.0) { JfifHead[12] = (uchar)((int)ImageInfo.xResolution>>8); JfifHead[13] = (uchar)((int)ImageInfo.xResolution); JfifHead[14] = (uchar)((int)ImageInfo.yResolution>>8); JfifHead[15] = (uchar)((int)ImageInfo.yResolution); } } // Write all the misc sections for (a=0;a<SectionsRead-1;a++){ fputc(0xff,outfile); fputc((unsigned char)Sections[a].Type, outfile); fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); } // Write the remaining image data. fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); fclose(outfile); } //-------------------------------------------------------------------------- // Check if image has exif header. //-------------------------------------------------------------------------- Section_t * FindSection(int SectionType) { int a; for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == SectionType){ return &Sections[a]; } } // Could not be found. return NULL; } //-------------------------------------------------------------------------- // Remove a certain type of section. //-------------------------------------------------------------------------- int RemoveSectionType(int SectionType) { int a; int retval = FALSE; for (a=0;a<SectionsRead-1;a++){ if (Sections[a].Type == SectionType){ // Free up this section free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; a -= 1; retval = TRUE; } } return retval; } //-------------------------------------------------------------------------- // Remove sectons not part of image and not exif or comment sections. //-------------------------------------------------------------------------- int RemoveUnknownSections(void) { int a; int Modified = FALSE; for (a=0;a<SectionsRead-1;){ switch(Sections[a].Type){ case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: case M_SOI: case M_EOI: case M_SOS: case M_JFIF: case M_EXIF: case M_XMP: case M_COM: case M_DQT: case M_DHT: case M_DRI: case M_IPTC: // keep. a++; break; default: // Unknown. Delete. free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; Modified = TRUE; } } return Modified; } //-------------------------------------------------------------------------- // Add a section (assume it doesn't already exist) - used for // adding comment sections and exif sections //-------------------------------------------------------------------------- Section_t * CreateSection(int SectionType, unsigned char * Data, int Size) { Section_t * NewSection; int a; int NewIndex; NewIndex = 0; // Figure out where to put the comment section. if (SectionType == M_EXIF){ // Exif alwas goes first! }else{ for (;NewIndex < 3;NewIndex++){ // Maximum fourth position (just for the heck of it) if (Sections[NewIndex].Type == M_JFIF) continue; // Put it after Jfif if (Sections[NewIndex].Type == M_EXIF) continue; // Put it after Exif break; } } if (SectionsRead < NewIndex){ ErrFatal("Too few sections!"); } CheckSectionsAllocated(); for (a=SectionsRead;a>NewIndex;a--){ Sections[a] = Sections[a-1]; } SectionsRead += 1; NewSection = Sections+NewIndex; NewSection->Type = SectionType; NewSection->Size = Size; NewSection->Data = Data; return NewSection; } //-------------------------------------------------------------------------- // Initialisation. //-------------------------------------------------------------------------- void ResetJpgfile(void) { if (Sections == NULL){ Sections = (Section_t *)malloc(sizeof(Section_t)*5); SectionsAllocated = 5; } SectionsRead = 0; HaveAll = 0; }
//-------------------------------------------------------------------------- // Program to pull the information out of various types of EXIF digital // camera files and show it in a reasonably consistent way // // This module handles basic Jpeg file handling // // Matthias Wandel //-------------------------------------------------------------------------- #include "jhead.h" // Storage for simplified info extracted from file. ImageInfo_t ImageInfo; static Section_t * Sections = NULL; static int SectionsAllocated; static int SectionsRead; static int HaveAll; #define PSEUDO_IMAGE_MARKER 0x123; // Extra value. //-------------------------------------------------------------------------- // Get 16 bits motorola order (always) for jpeg header stuff. //-------------------------------------------------------------------------- static int Get16m(const void * Short) { return (((uchar *)Short)[0] << 8) | ((uchar *)Short)[1]; } //-------------------------------------------------------------------------- // Process a COM marker. // We want to print out the marker contents as legible text; // we must guard against random junk and varying newline representations. //-------------------------------------------------------------------------- static void process_COM (const uchar * Data, int length) { int ch; char Comment[MAX_COMMENT_SIZE+1]; int nch; int a; nch = 0; if (length > MAX_COMMENT_SIZE) length = MAX_COMMENT_SIZE; // Truncate if it won't fit in our structure. for (a=2;a<length;a++){ ch = Data[a]; if (ch == '\r' && a < length-1 && Data[a+1] == '\n') continue; // Remove cr followed by lf. if (ch >= 32 || ch == '\n' || ch == '\t'){ Comment[nch++] = (char)ch; }else{ Comment[nch++] = '?'; } } Comment[nch] = '\0'; // Null terminate if (ShowTags){ printf("COM marker comment: %s\n",Comment); } strcpy(ImageInfo.Comments,Comment); } //-------------------------------------------------------------------------- // Process a SOFn marker. This is useful for the image dimensions //-------------------------------------------------------------------------- static void process_SOFn (const uchar * Data, int marker) { int data_precision, num_components; data_precision = Data[2]; ImageInfo.Height = Get16m(Data+3); ImageInfo.Width = Get16m(Data+5); num_components = Data[7]; if (num_components == 3){ ImageInfo.IsColor = 1; }else{ ImageInfo.IsColor = 0; } ImageInfo.Process = marker; if (ShowTags){ printf("JPEG image is %uw * %uh, %d color components, %d bits per sample\n", ImageInfo.Width, ImageInfo.Height, num_components, data_precision); } } //-------------------------------------------------------------------------- // Check sections array to see if it needs to be increased in size. //-------------------------------------------------------------------------- static void CheckSectionsAllocated(void) { if (SectionsRead > SectionsAllocated){ ErrFatal("allocation screwup"); } if (SectionsRead >= SectionsAllocated){ SectionsAllocated += SectionsAllocated/2; Sections = (Section_t *)realloc(Sections, sizeof(Section_t)*SectionsAllocated); if (Sections == NULL){ ErrFatal("could not allocate data for entire image"); } } } //-------------------------------------------------------------------------- // Parse the marker stream until SOS or EOI is seen; //-------------------------------------------------------------------------- int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; // Allocate an extra 20 bytes more than needed, because sometimes when reading structures, // if the section erroneously ends before short structures that should be there, that can trip // memory checkers in combination with fuzzers. Data = (uchar *)malloc(itemlen+20); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; } //-------------------------------------------------------------------------- // Discard read data. //-------------------------------------------------------------------------- void DiscardData(void) { int a; for (a=0;a<SectionsRead;a++){ free(Sections[a].Data); } memset(&ImageInfo, 0, sizeof(ImageInfo)); SectionsRead = 0; HaveAll = 0; } //-------------------------------------------------------------------------- // Read image data. //-------------------------------------------------------------------------- int ReadJpegFile(const char * FileName, ReadMode_t ReadMode) { FILE * infile; int ret; infile = fopen(FileName, "rb"); // Unix ignores 'b', windows needs it. if (infile == NULL) { fprintf(stderr, "can't open '%s'\n", FileName); return FALSE; } // Scan the JPEG headers. ret = ReadJpegSections(infile, ReadMode); if (!ret){ if (ReadMode == READ_ANY){ // Process any files mode. Ignore the fact that it's not // a jpeg file. ret = TRUE; }else{ fprintf(stderr,"Not JPEG: %s\n",FileName); } } fclose(infile); if (ret == FALSE){ DiscardData(); } return ret; } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int SaveThumbnail(char * ThumbFileName) { FILE * ThumbnailFile; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailSize == 0){ fprintf(stderr,"Image contains no thumbnail\n"); return FALSE; } if (strcmp(ThumbFileName, "-") == 0){ // A filename of '-' indicates thumbnail goes to stdout. // This doesn't make much sense under Windows, so this feature is unix only. ThumbnailFile = stdout; }else{ ThumbnailFile = fopen(ThumbFileName,"wb"); } if (ThumbnailFile){ uchar * ThumbnailPointer; Section_t * ExifSection; ExifSection = FindSection(M_EXIF); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; fwrite(ThumbnailPointer, ImageInfo.ThumbnailSize ,1, ThumbnailFile); fclose(ThumbnailFile); return TRUE; }else{ ErrFatal("Could not write thumbnail file"); return FALSE; } } //-------------------------------------------------------------------------- // Replace or remove exif thumbnail //-------------------------------------------------------------------------- int ReplaceThumbnail(const char * ThumbFileName) { FILE * ThumbnailFile; int ThumbLen, NewExifSize; Section_t * ExifSection; uchar * ThumbnailPointer; if (ImageInfo.ThumbnailOffset == 0 || ImageInfo.ThumbnailAtEnd == FALSE){ if (ThumbFileName == NULL){ // Delete of nonexistent thumbnail (not even pointers present) // No action, no error. return FALSE; } // Adding or removing of thumbnail is not possible - that would require rearranging // of the exif header, which is risky, and jhad doesn't know how to do. fprintf(stderr,"Image contains no thumbnail to replace - add is not possible\n"); return FALSE; } if (ThumbFileName){ ThumbnailFile = fopen(ThumbFileName,"rb"); if (ThumbnailFile == NULL){ noread: ErrFatal("Could not read thumbnail file"); return FALSE; } // get length fseek(ThumbnailFile, 0, SEEK_END); ThumbLen = ftell(ThumbnailFile); fseek(ThumbnailFile, 0, SEEK_SET); if (ThumbLen + ImageInfo.ThumbnailOffset > 0x10000-20){ ErrFatal("Thumbnail is too large to insert into exif header"); } }else{ if (ImageInfo.ThumbnailSize == 0){ return FALSE; } ThumbLen = 0; ThumbnailFile = NULL; } ExifSection = FindSection(M_EXIF); NewExifSize = ImageInfo.ThumbnailOffset+8+ThumbLen; ExifSection->Data = (uchar *)realloc(ExifSection->Data, NewExifSize); ThumbnailPointer = ExifSection->Data+ImageInfo.ThumbnailOffset+8; if (ThumbnailFile){ if (fread(ThumbnailPointer, 1, ThumbLen, ThumbnailFile) != ThumbLen){ goto noread; } fclose(ThumbnailFile); } ImageInfo.ThumbnailSize = ThumbLen; Put32u(ExifSection->Data+ImageInfo.ThumbnailSizeOffset+8, ThumbLen); ExifSection->Data[0] = (uchar)(NewExifSize >> 8); ExifSection->Data[1] = (uchar)NewExifSize; ExifSection->Size = NewExifSize; return TRUE; } //-------------------------------------------------------------------------- // Discard everything but the exif and comment sections. //-------------------------------------------------------------------------- void DiscardAllButExif(void) { Section_t ExifKeeper; Section_t CommentKeeper; Section_t IptcKeeper; Section_t XmpKeeper; int a; memset(&ExifKeeper, 0, sizeof(ExifKeeper)); memset(&CommentKeeper, 0, sizeof(CommentKeeper)); memset(&IptcKeeper, 0, sizeof(IptcKeeper)); memset(&XmpKeeper, 0, sizeof(IptcKeeper)); for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == M_EXIF && ExifKeeper.Type == 0){ ExifKeeper = Sections[a]; }else if (Sections[a].Type == M_XMP && XmpKeeper.Type == 0){ XmpKeeper = Sections[a]; }else if (Sections[a].Type == M_COM && CommentKeeper.Type == 0){ CommentKeeper = Sections[a]; }else if (Sections[a].Type == M_IPTC && IptcKeeper.Type == 0){ IptcKeeper = Sections[a]; }else{ free(Sections[a].Data); } } SectionsRead = 0; if (ExifKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = ExifKeeper; } if (CommentKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = CommentKeeper; } if (IptcKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = IptcKeeper; } if (XmpKeeper.Type){ CheckSectionsAllocated(); Sections[SectionsRead++] = XmpKeeper; } } //-------------------------------------------------------------------------- // Write image data back to disk. //-------------------------------------------------------------------------- void WriteJpegFile(const char * FileName) { FILE * outfile; int a; if (!HaveAll){ ErrFatal("Can't write back - didn't read all"); } outfile = fopen(FileName,"wb"); if (outfile == NULL){ ErrFatal("Could not open file for write"); } // Initial static jpeg marker. fputc(0xff,outfile); fputc(0xd8,outfile); if (Sections[0].Type != M_EXIF && Sections[0].Type != M_JFIF){ // The image must start with an exif or jfif marker. If we threw those away, create one. static uchar JfifHead[18] = { 0xff, M_JFIF, 0x00, 0x10, 'J' , 'F' , 'I' , 'F' , 0x00, 0x01, 0x01, 0x01, 0x01, 0x2C, 0x01, 0x2C, 0x00, 0x00 }; if (ImageInfo.ResolutionUnit == 2 || ImageInfo.ResolutionUnit == 3){ // Use the exif resolution info to fill out the jfif header. // Usually, for exif images, there's no jfif header, so if wediscard // the exif header, use info from the exif header for the jfif header. ImageInfo.JfifHeader.ResolutionUnits = (char)(ImageInfo.ResolutionUnit-1); // Jfif is 1 and 2, Exif is 2 and 3 for In and cm respecively ImageInfo.JfifHeader.XDensity = (int)ImageInfo.xResolution; ImageInfo.JfifHeader.YDensity = (int)ImageInfo.yResolution; } JfifHead[11] = ImageInfo.JfifHeader.ResolutionUnits; JfifHead[12] = (uchar)(ImageInfo.JfifHeader.XDensity >> 8); JfifHead[13] = (uchar)ImageInfo.JfifHeader.XDensity; JfifHead[14] = (uchar)(ImageInfo.JfifHeader.YDensity >> 8); JfifHead[15] = (uchar)ImageInfo.JfifHeader.YDensity; fwrite(JfifHead, 18, 1, outfile); // use the values from the exif data for the jfif header, if we have found values if (ImageInfo.ResolutionUnit != 0) { // JFIF.ResolutionUnit is {1,2}, EXIF.ResolutionUnit is {2,3} JfifHead[11] = (uchar)ImageInfo.ResolutionUnit - 1; } if (ImageInfo.xResolution > 0.0 && ImageInfo.yResolution > 0.0) { JfifHead[12] = (uchar)((int)ImageInfo.xResolution>>8); JfifHead[13] = (uchar)((int)ImageInfo.xResolution); JfifHead[14] = (uchar)((int)ImageInfo.yResolution>>8); JfifHead[15] = (uchar)((int)ImageInfo.yResolution); } } // Write all the misc sections for (a=0;a<SectionsRead-1;a++){ fputc(0xff,outfile); fputc((unsigned char)Sections[a].Type, outfile); fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); } // Write the remaining image data. fwrite(Sections[a].Data, Sections[a].Size, 1, outfile); fclose(outfile); } //-------------------------------------------------------------------------- // Check if image has exif header. //-------------------------------------------------------------------------- Section_t * FindSection(int SectionType) { int a; for (a=0;a<SectionsRead;a++){ if (Sections[a].Type == SectionType){ return &Sections[a]; } } // Could not be found. return NULL; } //-------------------------------------------------------------------------- // Remove a certain type of section. //-------------------------------------------------------------------------- int RemoveSectionType(int SectionType) { int a; int retval = FALSE; for (a=0;a<SectionsRead-1;a++){ if (Sections[a].Type == SectionType){ // Free up this section free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; a -= 1; retval = TRUE; } } return retval; } //-------------------------------------------------------------------------- // Remove sectons not part of image and not exif or comment sections. //-------------------------------------------------------------------------- int RemoveUnknownSections(void) { int a; int Modified = FALSE; for (a=0;a<SectionsRead-1;){ switch(Sections[a].Type){ case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: case M_SOI: case M_EOI: case M_SOS: case M_JFIF: case M_EXIF: case M_XMP: case M_COM: case M_DQT: case M_DHT: case M_DRI: case M_IPTC: // keep. a++; break; default: // Unknown. Delete. free (Sections[a].Data); // Move succeding sections back by one to close space in array. memmove(Sections+a, Sections+a+1, sizeof(Section_t) * (SectionsRead-a)); SectionsRead -= 1; Modified = TRUE; } } return Modified; } //-------------------------------------------------------------------------- // Add a section (assume it doesn't already exist) - used for // adding comment sections and exif sections //-------------------------------------------------------------------------- Section_t * CreateSection(int SectionType, unsigned char * Data, int Size) { Section_t * NewSection; int a; int NewIndex; NewIndex = 0; // Figure out where to put the comment section. if (SectionType == M_EXIF){ // Exif alwas goes first! }else{ for (;NewIndex < 3;NewIndex++){ // Maximum fourth position (just for the heck of it) if (Sections[NewIndex].Type == M_JFIF) continue; // Put it after Jfif if (Sections[NewIndex].Type == M_EXIF) continue; // Put it after Exif break; } } if (SectionsRead < NewIndex){ ErrFatal("Too few sections!"); } CheckSectionsAllocated(); for (a=SectionsRead;a>NewIndex;a--){ Sections[a] = Sections[a-1]; } SectionsRead += 1; NewSection = Sections+NewIndex; NewSection->Type = SectionType; NewSection->Size = Size; NewSection->Data = Data; return NewSection; } //-------------------------------------------------------------------------- // Initialisation. //-------------------------------------------------------------------------- void ResetJpgfile(void) { if (Sections == NULL){ Sections = (Section_t *)malloc(sizeof(Section_t)*5); SectionsAllocated = 5; } SectionsRead = 0; HaveAll = 0; }
int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; Data = (uchar *)malloc(itemlen); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; }
int ReadJpegSections (FILE * infile, ReadMode_t ReadMode) { int a; int HaveCom = FALSE; a = fgetc(infile); if (a != 0xff || fgetc(infile) != M_SOI){ return FALSE; } ImageInfo.JfifHeader.XDensity = ImageInfo.JfifHeader.YDensity = 300; ImageInfo.JfifHeader.ResolutionUnits = 1; for(;;){ int itemlen; int prev; int marker = 0; int ll,lh, got; uchar * Data; CheckSectionsAllocated(); prev = 0; for (a=0;;a++){ marker = fgetc(infile); if (marker != 0xff && prev == 0xff) break; if (marker == EOF){ ErrFatal("Unexpected end of file"); } prev = marker; } if (a > 10){ ErrNonfatal("Extraneous %d padding bytes before section %02X",a-1,marker); } Sections[SectionsRead].Type = marker; // Read the length of the section. lh = fgetc(infile); ll = fgetc(infile); if (lh == EOF || ll == EOF){ ErrFatal("Unexpected end of file"); } itemlen = (lh << 8) | ll; if (itemlen < 2){ ErrFatal("invalid marker"); } Sections[SectionsRead].Size = itemlen; // Allocate an extra 20 bytes more than needed, because sometimes when reading structures, // if the section erroneously ends before short structures that should be there, that can trip // memory checkers in combination with fuzzers. Data = (uchar *)malloc(itemlen+20); if (Data == NULL){ ErrFatal("Could not allocate memory"); } Sections[SectionsRead].Data = Data; // Store first two pre-read bytes. Data[0] = (uchar)lh; Data[1] = (uchar)ll; got = fread(Data+2, 1, itemlen-2, infile); // Read the whole section. if (got != itemlen-2){ ErrFatal("Premature end of file?"); } SectionsRead += 1; switch(marker){ case M_SOS: // stop before hitting compressed data // If reading entire image is requested, read the rest of the data. if (ReadMode & READ_IMAGE){ int cp, ep, size; // Determine how much file is left. cp = ftell(infile); fseek(infile, 0, SEEK_END); ep = ftell(infile); fseek(infile, cp, SEEK_SET); size = ep-cp; Data = (uchar *)malloc(size); if (Data == NULL){ ErrFatal("could not allocate data for entire image"); } got = fread(Data, 1, size, infile); if (got != size){ ErrFatal("could not read the rest of the image"); } CheckSectionsAllocated(); Sections[SectionsRead].Data = Data; Sections[SectionsRead].Size = size; Sections[SectionsRead].Type = PSEUDO_IMAGE_MARKER; SectionsRead ++; HaveAll = 1; } return TRUE; case M_DQT: // Use for jpeg quality guessing process_DQT(Data, itemlen); break; case M_DHT: // Use for jpeg quality guessing process_DHT(Data, itemlen); break; case M_EOI: // in case it's a tables-only JPEG stream fprintf(stderr,"No image in jpeg!\n"); return FALSE; case M_COM: // Comment section if (HaveCom || ((ReadMode & READ_METADATA) == 0)){ // Discard this section. free(Sections[--SectionsRead].Data); }else{ process_COM(Data, itemlen); HaveCom = TRUE; } break; case M_JFIF: // Regular jpegs always have this tag, exif images have the exif // marker instead, althogh ACDsee will write images with both markers. // this program will re-create this marker on absence of exif marker. // hence no need to keep the copy from the file. if (itemlen < 16){ fprintf(stderr,"Jfif header too short\n"); goto ignore; } if (memcmp(Data+2, "JFIF\0",5)){ fprintf(stderr,"Header missing JFIF marker\n"); } ImageInfo.JfifHeader.Present = TRUE; ImageInfo.JfifHeader.ResolutionUnits = Data[9]; ImageInfo.JfifHeader.XDensity = (Data[10]<<8) | Data[11]; ImageInfo.JfifHeader.YDensity = (Data[12]<<8) | Data[13]; if (ShowTags){ printf("JFIF SOI marker: Units: %d ",ImageInfo.JfifHeader.ResolutionUnits); switch(ImageInfo.JfifHeader.ResolutionUnits){ case 0: printf("(aspect ratio)"); break; case 1: printf("(dots per inch)"); break; case 2: printf("(dots per cm)"); break; default: printf("(unknown)"); break; } printf(" X-density=%d Y-density=%d\n",ImageInfo.JfifHeader.XDensity, ImageInfo.JfifHeader.YDensity); if (Data[14] || Data[15]){ fprintf(stderr,"Ignoring jfif header thumbnail\n"); } } ignore: free(Sections[--SectionsRead].Data); break; case M_EXIF: // There can be different section using the same marker. if (ReadMode & READ_METADATA){ if (memcmp(Data+2, "Exif", 4) == 0){ process_EXIF(Data, itemlen); break; }else if (memcmp(Data+2, "http:", 5) == 0){ Sections[SectionsRead-1].Type = M_XMP; // Change tag for internal purposes. if (ShowTags){ printf("Image contains XMP section, %d bytes long\n", itemlen); if (ShowTags){ ShowXmp(Sections[SectionsRead-1]); } } break; } } // Oterwise, discard this section. free(Sections[--SectionsRead].Data); break; case M_IPTC: if (ReadMode & READ_METADATA){ if (ShowTags){ printf("Image contains IPTC section, %d bytes long\n", itemlen); } // Note: We just store the IPTC section. Its relatively straightforward // and we don't act on any part of it, so just display it at parse time. }else{ free(Sections[--SectionsRead].Data); } break; case M_SOF0: case M_SOF1: case M_SOF2: case M_SOF3: case M_SOF5: case M_SOF6: case M_SOF7: case M_SOF9: case M_SOF10: case M_SOF11: case M_SOF13: case M_SOF14: case M_SOF15: if (itemlen < 8){ fprintf(stderr,"Section too short\n"); break; } process_SOFn(Data, marker); break; default: // Skip any other sections. if (ShowTags){ printf("Jpeg section marker 0x%02x size %d\n",marker, itemlen); } break; } } return TRUE; }
{'added': [(172, ' // Allocate an extra 20 bytes more than needed, because sometimes when reading structures,'), (173, ' // if the section erroneously ends before short structures that should be there, that can trip'), (174, ' // memory checkers in combination with fuzzers.'), (175, ' Data = (uchar *)malloc(itemlen+20);'), (482, ' ThumbLen = 0;')], 'deleted': [(172, ' Data = (uchar *)malloc(itemlen);'), (479, ' ThumbLen = 0;')]}
5
2
542
3,310
177
1,028
57
https://github.com/F-ZhaoYang/jhead
CVE-2020-26208
CWE-787
2,407
pack-bitmap.c
C
show_object
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "progress.h" #include "list-objects.h" #include "pack.h" #include "pack-bitmap.h" #include "pack-revindex.h" #include "pack-objects.h" /* * An entry on the bitmap index, representing the bitmap for a given * commit. */ struct stored_bitmap { unsigned char sha1[20]; struct ewah_bitmap *root; struct stored_bitmap *xor; int flags; }; /* * The currently active bitmap index. By design, repositories only have * a single bitmap index available (the index for the biggest packfile in * the repository), since bitmap indexes need full closure. * * If there is more than one bitmap index available (e.g. because of alternates), * the active bitmap index is the largest one. */ static struct bitmap_index { /* Packfile to which this bitmap index belongs to */ struct packed_git *pack; /* * Mark the first `reuse_objects` in the packfile as reused: * they will be sent as-is without using them for repacking * calculations */ uint32_t reuse_objects; /* mmapped buffer of the whole bitmap index */ unsigned char *map; size_t map_size; /* size of the mmaped buffer */ size_t map_pos; /* current position when loading the index */ /* * Type indexes. * * Each bitmap marks which objects in the packfile are of the given * type. This provides type information when yielding the objects from * the packfile during a walk, which allows for better delta bases. */ struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */ khash_sha1 *bitmaps; /* Number of bitmapped commits */ uint32_t entry_count; /* Name-hash cache (or NULL if not present). */ uint32_t *hashes; /* * Extended index. * * When trying to perform bitmap operations with objects that are not * packed in `pack`, these objects are added to this "fake index" and * are assumed to appear at the end of the packfile for all operations */ struct eindex { struct object **objects; uint32_t *hashes; uint32_t count, alloc; khash_sha1_pos *positions; } ext_index; /* Bitmap result of the last performed walk */ struct bitmap *result; /* Version of the bitmap index */ unsigned int version; unsigned loaded : 1; } bitmap_git; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) { struct ewah_bitmap *parent; struct ewah_bitmap *composed; if (st->xor == NULL) return st->root; composed = ewah_pool_new(); parent = lookup_stored_bitmap(st->xor); ewah_xor(st->root, parent, composed); ewah_pool_free(st->root); st->root = composed; st->xor = NULL; return composed; } /* * Read a bitmap from the current read position on the mmaped * index, and increase the read position accordingly */ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) { struct ewah_bitmap *b = ewah_pool_new(); int bitmap_size = ewah_read_mmap(b, index->map + index->map_pos, index->map_size - index->map_pos); if (bitmap_size < 0) { error("Failed to load bitmap index (corrupted?)"); ewah_pool_free(b); return NULL; } index->map_pos += bitmap_size; return b; } static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; if (index->map_size < sizeof(*header) + 20) return error("Corrupted bitmap index (missing header data)"); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) return error("Corrupted bitmap index file (wrong header)"); index->version = ntohs(header->version); if (index->version != 1) return error("Unsupported version for bitmap index file (%d)", index->version); /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); if ((flags & BITMAP_OPT_FULL_DAG) == 0) return error("Unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { unsigned char *end = index->map + index->map_size - 20; index->hashes = ((uint32_t *)end) - index->pack->num_objects; } } index->entry_count = ntohl(header->entry_count); index->map_pos += sizeof(*header); return 0; } static struct stored_bitmap *store_bitmap(struct bitmap_index *index, struct ewah_bitmap *root, const unsigned char *sha1, struct stored_bitmap *xor_with, int flags) { struct stored_bitmap *stored; khiter_t hash_pos; int ret; stored = xmalloc(sizeof(struct stored_bitmap)); stored->root = root; stored->xor = xor_with; stored->flags = flags; hashcpy(stored->sha1, sha1); hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); /* a 0 return code means the insertion succeeded with no changes, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); return NULL; } kh_value(index->bitmaps, hash_pos) = stored; return stored; } static inline uint32_t read_be32(const unsigned char *buffer, size_t *pos) { uint32_t result = get_be32(buffer + *pos); (*pos) += sizeof(result); return result; } static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) { return buffer[(*pos)++]; } #define MAX_XOR_OFFSET 160 static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; struct stored_bitmap *recent_bitmaps[MAX_XOR_OFFSET] = { NULL }; for (i = 0; i < index->entry_count; ++i) { int xor_offset, flags; struct ewah_bitmap *bitmap = NULL; struct stored_bitmap *xor_bitmap = NULL; uint32_t commit_idx_pos; const unsigned char *sha1; commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); bitmap = read_bitmap_1(index); if (!bitmap) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) return error("Corrupted bitmap pack index"); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (xor_bitmap == NULL) return error("Invalid XOR offset in bitmap pack index"); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( index, bitmap, sha1, xor_bitmap, flags); } return 0; } static char *pack_bitmap_filename(struct packed_git *p) { size_t len; if (!strip_suffix(p->pack_name, ".pack", &len)) die("BUG: pack_name does not end in .pack"); return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } static int open_pack_bitmap_1(struct packed_git *packfile) { int fd; struct stat st; char *idx_name; if (open_pack_index(packfile)) return -1; idx_name = pack_bitmap_filename(packfile); fd = git_open_noatime(idx_name); free(idx_name); if (fd < 0) return -1; if (fstat(fd, &st)) { close(fd); return -1; } if (bitmap_git.pack) { warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; } bitmap_git.pack = packfile; bitmap_git.map_size = xsize_t(st.st_size); bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); bitmap_git.map_pos = 0; close(fd); if (load_bitmap_header(&bitmap_git) < 0) { munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } return 0; } static int load_pack_bitmap(void) { assert(bitmap_git.map && !bitmap_git.loaded); bitmap_git.bitmaps = kh_init_sha1(); bitmap_git.ext_index.positions = kh_init_sha1_pos(); load_pack_revindex(bitmap_git.pack); if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) goto failed; if (load_bitmap_entries_v1(&bitmap_git) < 0) goto failed; bitmap_git.loaded = 1; return 0; failed: munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } static int open_pack_bitmap(void) { struct packed_git *p; int ret = -1; assert(!bitmap_git.map && !bitmap_git.loaded); prepare_packed_git(); for (p = packed_git; p; p = p->next) { if (open_pack_bitmap_1(p) == 0) ret = 0; } return ret; } int prepare_bitmap_git(void) { if (bitmap_git.loaded) return 0; if (!open_pack_bitmap()) return load_pack_bitmap(); return -1; } struct include_data { struct bitmap *base; struct bitmap *seen; }; static inline int bitmap_position_extended(const unsigned char *sha1) { khash_sha1_pos *positions = bitmap_git.ext_index.positions; khiter_t pos = kh_get_sha1_pos(positions, sha1); if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); return bitmap_pos + bitmap_git.pack->num_objects; } return -1; } static inline int bitmap_position_packfile(const unsigned char *sha1) { off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); if (!offset) return -1; return find_revindex_position(bitmap_git.pack, offset); } static int bitmap_position(const unsigned char *sha1) { int pos = bitmap_position_packfile(sha1); return (pos >= 0) ? pos : bitmap_position_extended(sha1); } static int ext_index_add_object(struct object *object, const char *name) { struct eindex *eindex = &bitmap_git.ext_index; khiter_t hash_pos; int hash_ret; int bitmap_pos; hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret); if (hash_ret > 0) { if (eindex->count >= eindex->alloc) { eindex->alloc = (eindex->alloc + 16) * 3 / 2; REALLOC_ARRAY(eindex->objects, eindex->alloc); REALLOC_ARRAY(eindex->hashes, eindex->alloc); } bitmap_pos = eindex->count; eindex->objects[eindex->count] = object; eindex->hashes[eindex->count] = pack_name_hash(name); kh_value(eindex->positions, hash_pos) = bitmap_pos; eindex->count++; } else { bitmap_pos = kh_value(eindex->positions, hash_pos); } return bitmap_pos + bitmap_git.pack->num_objects; } static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); } static void show_commit(struct commit *commit, void *data) { } static int add_to_include_set(struct include_data *data, const unsigned char *sha1, int bitmap_pos) { khiter_t hash_pos; if (data->seen && bitmap_get(data->seen, bitmap_pos)) return 0; if (bitmap_get(data->base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); if (hash_pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); return 0; } bitmap_set(data->base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct include_data *data = _data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object((struct object *)commit, NULL); if (!add_to_include_set(data, commit->object.oid.hash, bitmap_pos)) { struct commit_list *parent = commit->parents; while (parent) { parent->item->object.flags |= SEEN; parent = parent->next; } return 0; } return 1; } static struct bitmap *find_objects(struct rev_info *revs, struct object_list *roots, struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; struct object_list *not_mapped = NULL; /* * Go through all the roots for the walk. The ones that have bitmaps * on the bitmap index will be `or`ed together to form an initial * global reachability analysis. * * The ones without bitmaps in the index will be stored in the * `not_mapped_list` for further processing. */ while (roots) { struct object *object = roots->item; roots = roots->next; if (object->type == OBJ_COMMIT) { khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *or_with = lookup_stored_bitmap(st); if (base == NULL) base = ewah_to_bitmap(or_with); else bitmap_or_ewah(base, or_with); object->flags |= SEEN; continue; } } object_list_insert(object, &not_mapped); } /* * Best case scenario: We found bitmaps for all the roots, * so the resulting `or` bitmap has the full reachability analysis */ if (not_mapped == NULL) return base; roots = not_mapped; /* * Let's iterate through all the roots that don't have bitmaps to * check if we can determine them to be reachable from the existing * global bitmap. * * If we cannot find them in the existing global bitmap, we'll need * to push them to an actual walk and run it until we can confirm * they are reachable */ while (roots) { struct object *object = roots->item; int pos; roots = roots->next; pos = bitmap_position(object->oid.hash); if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { object->flags &= ~UNINTERESTING; add_pending_object(revs, object, ""); needs_walk = 1; } else { object->flags |= SEEN; } } if (needs_walk) { struct include_data incdata; if (base == NULL) base = bitmap_new(); incdata.base = base; incdata.seen = seen; revs->include_check = should_include; revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, show_commit, show_object, base); } return base; } static void show_extended_objects(struct bitmap *objects, show_reachable_fn show_reach) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i; for (i = 0; i < eindex->count; ++i) { struct object *obj; if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) continue; obj = eindex->objects[i]; show_reach(obj->oid.hash, obj->type, 0, eindex->hashes[i], NULL, 0); } } static void show_objects_for_type( struct bitmap *objects, struct ewah_bitmap *type_filter, enum object_type object_type, show_reachable_fn show_reach) { size_t pos = 0, i = 0; uint32_t offset; struct ewah_iterator it; eword_t filter; if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) return; ewah_iterator_init(&it, type_filter); while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i] & filter; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { const unsigned char *sha1; struct revindex_entry *entry; uint32_t hash = 0; if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); if (pos + offset < bitmap_git.reuse_objects) continue; entry = &bitmap_git.pack->revindex[pos + offset]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); if (bitmap_git.hashes) hash = ntohl(bitmap_git.hashes[entry->nr]); show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); } pos += BITS_IN_EWORD; i++; } } static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; } int prepare_bitmap_walk(struct rev_info *revs) { unsigned int i; unsigned int pending_nr = revs->pending.nr; struct object_array_entry *pending_e = revs->pending.objects; struct object_list *wants = NULL; struct object_list *haves = NULL; struct bitmap *wants_bitmap = NULL; struct bitmap *haves_bitmap = NULL; if (!bitmap_git.loaded) { /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ if (open_pack_bitmap() < 0) return -1; } for (i = 0; i < pending_nr; ++i) { struct object *object = pending_e[i].item; if (object->type == OBJ_NONE) parse_object_or_die(object->oid.hash, NULL); while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); if (!tag->tagged) die("bad tag"); object = parse_object_or_die(tag->tagged->oid.hash, NULL); } if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); } /* * if we have a HAVES list, but none of those haves is contained * in the packfile that has a bitmap, we don't have anything to * optimize here */ if (haves && !in_bitmapped_pack(haves)) return -1; /* if we don't want anything, we're done here */ if (!wants) return -1; /* * now we're going to use bitmaps, so load the actual bitmap entries * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ if (!bitmap_git.loaded && load_pack_bitmap() < 0) return -1; revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; if (haves) { revs->ignore_missing_links = 1; haves_bitmap = find_objects(revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; if (haves_bitmap == NULL) die("BUG: failed to perform bitmap walk"); } wants_bitmap = find_objects(revs, wants, haves_bitmap); if (!wants_bitmap) die("BUG: failed to perform bitmap walk"); if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git.result = wants_bitmap; bitmap_free(haves_bitmap); return 0; } int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to) { /* * Reuse the packfile content if we need more than * 90% of its objects */ static const double REUSE_PERCENT = 0.9; struct bitmap *result = bitmap_git.result; uint32_t reuse_threshold; uint32_t i, reuse_objects = 0; assert(result); for (i = 0; i < result->word_alloc; ++i) { if (result->words[i] != (eword_t)~0) { reuse_objects += ewah_bit_ctz64(~result->words[i]); break; } reuse_objects += BITS_IN_EWORD; } #ifdef GIT_BITMAP_DEBUG { const unsigned char *sha1; struct revindex_entry *entry; entry = &bitmap_git.reverse_index->revindex[reuse_objects]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); fprintf(stderr, "Failed to reuse at %d (%016llx)\n", reuse_objects, result->words[i]); fprintf(stderr, " %s\n", sha1_to_hex(sha1)); } #endif if (!reuse_objects) return -1; if (reuse_objects >= bitmap_git.pack->num_objects) { bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; *up_to = -1; /* reuse the full pack */ *packfile = bitmap_git.pack; return 0; } reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; if (reuse_objects < reuse_threshold) return -1; bitmap_git.reuse_objects = *entries = reuse_objects; *up_to = bitmap_git.pack->revindex[reuse_objects].offset; *packfile = bitmap_git.pack; return 0; } void traverse_bitmap_commit_list(show_reachable_fn show_reachable) { assert(bitmap_git.result); show_objects_for_type(bitmap_git.result, bitmap_git.commits, OBJ_COMMIT, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.trees, OBJ_TREE, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.blobs, OBJ_BLOB, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.tags, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git.result, show_reachable); bitmap_free(bitmap_git.result); bitmap_git.result = NULL; } static uint32_t count_object_type(struct bitmap *objects, enum object_type type) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i = 0, count = 0; struct ewah_iterator it; eword_t filter; switch (type) { case OBJ_COMMIT: ewah_iterator_init(&it, bitmap_git.commits); break; case OBJ_TREE: ewah_iterator_init(&it, bitmap_git.trees); break; case OBJ_BLOB: ewah_iterator_init(&it, bitmap_git.blobs); break; case OBJ_TAG: ewah_iterator_init(&it, bitmap_git.tags); break; default: return 0; } while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i++] & filter; count += ewah_bit_popcount64(word); } for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && bitmap_get(objects, bitmap_git.pack->num_objects + i)) count++; } return count; } void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags) { assert(bitmap_git.result); if (commits) *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); if (trees) *trees = count_object_type(bitmap_git.result, OBJ_TREE); if (blobs) *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); if (tags) *tags = count_object_type(bitmap_git.result, OBJ_TAG); } struct bitmap_test_data { struct bitmap *base; struct progress *prg; size_t seen; }; static void test_show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } static void test_show_commit(struct commit *commit, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } void test_bitmap_walk(struct rev_info *revs) { struct object *root; struct bitmap *result = NULL; khiter_t pos; size_t result_popcnt; struct bitmap_test_data tdata; if (prepare_bitmap_git()) die("failed to load bitmap indexes"); if (revs->pending.nr != 1) die("you must specify exactly one commit to test"); fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", bitmap_git.version, bitmap_git.entry_count); root = revs->pending.objects[0].item; pos = kh_get_sha1(bitmap_git.bitmaps, root->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *bm = lookup_stored_bitmap(st); fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (result == NULL) die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) die("revision walk setup failed"); tdata.base = bitmap_new(); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) fprintf(stderr, "OK!\n"); else fprintf(stderr, "Mismatch!\n"); bitmap_free(result); } static int rebuild_bitmap(uint32_t *reposition, struct ewah_bitmap *source, struct bitmap *dest) { uint32_t pos = 0; struct ewah_iterator it; eword_t word; ewah_iterator_init(&it, source); while (ewah_iterator_next(&word, &it)) { uint32_t offset, bit_pos; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); bit_pos = reposition[pos + offset]; if (bit_pos > 0) bitmap_set(dest, bit_pos - 1); else /* can't reuse, we don't have the object */ return -1; } pos += BITS_IN_EWORD; } return 0; } int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress) { uint32_t i, num_objects; uint32_t *reposition; struct bitmap *rebuild; struct stored_bitmap *stored; struct progress *progress = NULL; khiter_t hash_pos; int hash_ret; if (prepare_bitmap_git() < 0) return -1; num_objects = bitmap_git.pack->num_objects; reposition = xcalloc(num_objects, sizeof(uint32_t)); for (i = 0; i < num_objects; ++i) { const unsigned char *sha1; struct revindex_entry *entry; struct object_entry *oe; entry = &bitmap_git.pack->revindex[i]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); oe = packlist_find(mapping, sha1, NULL); if (oe) reposition[i] = oe->in_pack_pos + 1; } rebuild = bitmap_new(); i = 0; if (show_progress) progress = start_progress("Reusing bitmaps", 0); kh_foreach_value(bitmap_git.bitmaps, stored, { if (stored->flags & BITMAP_FLAG_REUSE) { if (!rebuild_bitmap(reposition, lookup_stored_bitmap(stored), rebuild)) { hash_pos = kh_put_sha1(reused_bitmaps, stored->sha1, &hash_ret); kh_value(reused_bitmaps, hash_pos) = bitmap_to_ewah(rebuild); } bitmap_reset(rebuild); display_progress(progress, ++i); } }); stop_progress(&progress); free(reposition); bitmap_free(rebuild); return 0; }
#include "cache.h" #include "commit.h" #include "tag.h" #include "diff.h" #include "revision.h" #include "progress.h" #include "list-objects.h" #include "pack.h" #include "pack-bitmap.h" #include "pack-revindex.h" #include "pack-objects.h" /* * An entry on the bitmap index, representing the bitmap for a given * commit. */ struct stored_bitmap { unsigned char sha1[20]; struct ewah_bitmap *root; struct stored_bitmap *xor; int flags; }; /* * The currently active bitmap index. By design, repositories only have * a single bitmap index available (the index for the biggest packfile in * the repository), since bitmap indexes need full closure. * * If there is more than one bitmap index available (e.g. because of alternates), * the active bitmap index is the largest one. */ static struct bitmap_index { /* Packfile to which this bitmap index belongs to */ struct packed_git *pack; /* * Mark the first `reuse_objects` in the packfile as reused: * they will be sent as-is without using them for repacking * calculations */ uint32_t reuse_objects; /* mmapped buffer of the whole bitmap index */ unsigned char *map; size_t map_size; /* size of the mmaped buffer */ size_t map_pos; /* current position when loading the index */ /* * Type indexes. * * Each bitmap marks which objects in the packfile are of the given * type. This provides type information when yielding the objects from * the packfile during a walk, which allows for better delta bases. */ struct ewah_bitmap *commits; struct ewah_bitmap *trees; struct ewah_bitmap *blobs; struct ewah_bitmap *tags; /* Map from SHA1 -> `stored_bitmap` for all the bitmapped commits */ khash_sha1 *bitmaps; /* Number of bitmapped commits */ uint32_t entry_count; /* Name-hash cache (or NULL if not present). */ uint32_t *hashes; /* * Extended index. * * When trying to perform bitmap operations with objects that are not * packed in `pack`, these objects are added to this "fake index" and * are assumed to appear at the end of the packfile for all operations */ struct eindex { struct object **objects; uint32_t *hashes; uint32_t count, alloc; khash_sha1_pos *positions; } ext_index; /* Bitmap result of the last performed walk */ struct bitmap *result; /* Version of the bitmap index */ unsigned int version; unsigned loaded : 1; } bitmap_git; static struct ewah_bitmap *lookup_stored_bitmap(struct stored_bitmap *st) { struct ewah_bitmap *parent; struct ewah_bitmap *composed; if (st->xor == NULL) return st->root; composed = ewah_pool_new(); parent = lookup_stored_bitmap(st->xor); ewah_xor(st->root, parent, composed); ewah_pool_free(st->root); st->root = composed; st->xor = NULL; return composed; } /* * Read a bitmap from the current read position on the mmaped * index, and increase the read position accordingly */ static struct ewah_bitmap *read_bitmap_1(struct bitmap_index *index) { struct ewah_bitmap *b = ewah_pool_new(); int bitmap_size = ewah_read_mmap(b, index->map + index->map_pos, index->map_size - index->map_pos); if (bitmap_size < 0) { error("Failed to load bitmap index (corrupted?)"); ewah_pool_free(b); return NULL; } index->map_pos += bitmap_size; return b; } static int load_bitmap_header(struct bitmap_index *index) { struct bitmap_disk_header *header = (void *)index->map; if (index->map_size < sizeof(*header) + 20) return error("Corrupted bitmap index (missing header data)"); if (memcmp(header->magic, BITMAP_IDX_SIGNATURE, sizeof(BITMAP_IDX_SIGNATURE)) != 0) return error("Corrupted bitmap index file (wrong header)"); index->version = ntohs(header->version); if (index->version != 1) return error("Unsupported version for bitmap index file (%d)", index->version); /* Parse known bitmap format options */ { uint32_t flags = ntohs(header->options); if ((flags & BITMAP_OPT_FULL_DAG) == 0) return error("Unsupported options for bitmap index file " "(Git requires BITMAP_OPT_FULL_DAG)"); if (flags & BITMAP_OPT_HASH_CACHE) { unsigned char *end = index->map + index->map_size - 20; index->hashes = ((uint32_t *)end) - index->pack->num_objects; } } index->entry_count = ntohl(header->entry_count); index->map_pos += sizeof(*header); return 0; } static struct stored_bitmap *store_bitmap(struct bitmap_index *index, struct ewah_bitmap *root, const unsigned char *sha1, struct stored_bitmap *xor_with, int flags) { struct stored_bitmap *stored; khiter_t hash_pos; int ret; stored = xmalloc(sizeof(struct stored_bitmap)); stored->root = root; stored->xor = xor_with; stored->flags = flags; hashcpy(stored->sha1, sha1); hash_pos = kh_put_sha1(index->bitmaps, stored->sha1, &ret); /* a 0 return code means the insertion succeeded with no changes, * because the SHA1 already existed on the map. this is bad, there * shouldn't be duplicated commits in the index */ if (ret == 0) { error("Duplicate entry in bitmap index: %s", sha1_to_hex(sha1)); return NULL; } kh_value(index->bitmaps, hash_pos) = stored; return stored; } static inline uint32_t read_be32(const unsigned char *buffer, size_t *pos) { uint32_t result = get_be32(buffer + *pos); (*pos) += sizeof(result); return result; } static inline uint8_t read_u8(const unsigned char *buffer, size_t *pos) { return buffer[(*pos)++]; } #define MAX_XOR_OFFSET 160 static int load_bitmap_entries_v1(struct bitmap_index *index) { uint32_t i; struct stored_bitmap *recent_bitmaps[MAX_XOR_OFFSET] = { NULL }; for (i = 0; i < index->entry_count; ++i) { int xor_offset, flags; struct ewah_bitmap *bitmap = NULL; struct stored_bitmap *xor_bitmap = NULL; uint32_t commit_idx_pos; const unsigned char *sha1; commit_idx_pos = read_be32(index->map, &index->map_pos); xor_offset = read_u8(index->map, &index->map_pos); flags = read_u8(index->map, &index->map_pos); sha1 = nth_packed_object_sha1(index->pack, commit_idx_pos); bitmap = read_bitmap_1(index); if (!bitmap) return -1; if (xor_offset > MAX_XOR_OFFSET || xor_offset > i) return error("Corrupted bitmap pack index"); if (xor_offset > 0) { xor_bitmap = recent_bitmaps[(i - xor_offset) % MAX_XOR_OFFSET]; if (xor_bitmap == NULL) return error("Invalid XOR offset in bitmap pack index"); } recent_bitmaps[i % MAX_XOR_OFFSET] = store_bitmap( index, bitmap, sha1, xor_bitmap, flags); } return 0; } static char *pack_bitmap_filename(struct packed_git *p) { size_t len; if (!strip_suffix(p->pack_name, ".pack", &len)) die("BUG: pack_name does not end in .pack"); return xstrfmt("%.*s.bitmap", (int)len, p->pack_name); } static int open_pack_bitmap_1(struct packed_git *packfile) { int fd; struct stat st; char *idx_name; if (open_pack_index(packfile)) return -1; idx_name = pack_bitmap_filename(packfile); fd = git_open_noatime(idx_name); free(idx_name); if (fd < 0) return -1; if (fstat(fd, &st)) { close(fd); return -1; } if (bitmap_git.pack) { warning("ignoring extra bitmap file: %s", packfile->pack_name); close(fd); return -1; } bitmap_git.pack = packfile; bitmap_git.map_size = xsize_t(st.st_size); bitmap_git.map = xmmap(NULL, bitmap_git.map_size, PROT_READ, MAP_PRIVATE, fd, 0); bitmap_git.map_pos = 0; close(fd); if (load_bitmap_header(&bitmap_git) < 0) { munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } return 0; } static int load_pack_bitmap(void) { assert(bitmap_git.map && !bitmap_git.loaded); bitmap_git.bitmaps = kh_init_sha1(); bitmap_git.ext_index.positions = kh_init_sha1_pos(); load_pack_revindex(bitmap_git.pack); if (!(bitmap_git.commits = read_bitmap_1(&bitmap_git)) || !(bitmap_git.trees = read_bitmap_1(&bitmap_git)) || !(bitmap_git.blobs = read_bitmap_1(&bitmap_git)) || !(bitmap_git.tags = read_bitmap_1(&bitmap_git))) goto failed; if (load_bitmap_entries_v1(&bitmap_git) < 0) goto failed; bitmap_git.loaded = 1; return 0; failed: munmap(bitmap_git.map, bitmap_git.map_size); bitmap_git.map = NULL; bitmap_git.map_size = 0; return -1; } static int open_pack_bitmap(void) { struct packed_git *p; int ret = -1; assert(!bitmap_git.map && !bitmap_git.loaded); prepare_packed_git(); for (p = packed_git; p; p = p->next) { if (open_pack_bitmap_1(p) == 0) ret = 0; } return ret; } int prepare_bitmap_git(void) { if (bitmap_git.loaded) return 0; if (!open_pack_bitmap()) return load_pack_bitmap(); return -1; } struct include_data { struct bitmap *base; struct bitmap *seen; }; static inline int bitmap_position_extended(const unsigned char *sha1) { khash_sha1_pos *positions = bitmap_git.ext_index.positions; khiter_t pos = kh_get_sha1_pos(positions, sha1); if (pos < kh_end(positions)) { int bitmap_pos = kh_value(positions, pos); return bitmap_pos + bitmap_git.pack->num_objects; } return -1; } static inline int bitmap_position_packfile(const unsigned char *sha1) { off_t offset = find_pack_entry_one(sha1, bitmap_git.pack); if (!offset) return -1; return find_revindex_position(bitmap_git.pack, offset); } static int bitmap_position(const unsigned char *sha1) { int pos = bitmap_position_packfile(sha1); return (pos >= 0) ? pos : bitmap_position_extended(sha1); } static int ext_index_add_object(struct object *object, const char *name) { struct eindex *eindex = &bitmap_git.ext_index; khiter_t hash_pos; int hash_ret; int bitmap_pos; hash_pos = kh_put_sha1_pos(eindex->positions, object->oid.hash, &hash_ret); if (hash_ret > 0) { if (eindex->count >= eindex->alloc) { eindex->alloc = (eindex->alloc + 16) * 3 / 2; REALLOC_ARRAY(eindex->objects, eindex->alloc); REALLOC_ARRAY(eindex->hashes, eindex->alloc); } bitmap_pos = eindex->count; eindex->objects[eindex->count] = object; eindex->hashes[eindex->count] = pack_name_hash(name); kh_value(eindex->positions, hash_pos) = bitmap_pos; eindex->count++; } else { bitmap_pos = kh_value(eindex->positions, hash_pos); } return bitmap_pos + bitmap_git.pack->num_objects; } static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object(object, name); bitmap_set(base, bitmap_pos); } static void show_commit(struct commit *commit, void *data) { } static int add_to_include_set(struct include_data *data, const unsigned char *sha1, int bitmap_pos) { khiter_t hash_pos; if (data->seen && bitmap_get(data->seen, bitmap_pos)) return 0; if (bitmap_get(data->base, bitmap_pos)) return 0; hash_pos = kh_get_sha1(bitmap_git.bitmaps, sha1); if (hash_pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, hash_pos); bitmap_or_ewah(data->base, lookup_stored_bitmap(st)); return 0; } bitmap_set(data->base, bitmap_pos); return 1; } static int should_include(struct commit *commit, void *_data) { struct include_data *data = _data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object((struct object *)commit, NULL); if (!add_to_include_set(data, commit->object.oid.hash, bitmap_pos)) { struct commit_list *parent = commit->parents; while (parent) { parent->item->object.flags |= SEEN; parent = parent->next; } return 0; } return 1; } static struct bitmap *find_objects(struct rev_info *revs, struct object_list *roots, struct bitmap *seen) { struct bitmap *base = NULL; int needs_walk = 0; struct object_list *not_mapped = NULL; /* * Go through all the roots for the walk. The ones that have bitmaps * on the bitmap index will be `or`ed together to form an initial * global reachability analysis. * * The ones without bitmaps in the index will be stored in the * `not_mapped_list` for further processing. */ while (roots) { struct object *object = roots->item; roots = roots->next; if (object->type == OBJ_COMMIT) { khiter_t pos = kh_get_sha1(bitmap_git.bitmaps, object->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *or_with = lookup_stored_bitmap(st); if (base == NULL) base = ewah_to_bitmap(or_with); else bitmap_or_ewah(base, or_with); object->flags |= SEEN; continue; } } object_list_insert(object, &not_mapped); } /* * Best case scenario: We found bitmaps for all the roots, * so the resulting `or` bitmap has the full reachability analysis */ if (not_mapped == NULL) return base; roots = not_mapped; /* * Let's iterate through all the roots that don't have bitmaps to * check if we can determine them to be reachable from the existing * global bitmap. * * If we cannot find them in the existing global bitmap, we'll need * to push them to an actual walk and run it until we can confirm * they are reachable */ while (roots) { struct object *object = roots->item; int pos; roots = roots->next; pos = bitmap_position(object->oid.hash); if (pos < 0 || base == NULL || !bitmap_get(base, pos)) { object->flags &= ~UNINTERESTING; add_pending_object(revs, object, ""); needs_walk = 1; } else { object->flags |= SEEN; } } if (needs_walk) { struct include_data incdata; if (base == NULL) base = bitmap_new(); incdata.base = base; incdata.seen = seen; revs->include_check = should_include; revs->include_check_data = &incdata; if (prepare_revision_walk(revs)) die("revision walk setup failed"); traverse_commit_list(revs, show_commit, show_object, base); } return base; } static void show_extended_objects(struct bitmap *objects, show_reachable_fn show_reach) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i; for (i = 0; i < eindex->count; ++i) { struct object *obj; if (!bitmap_get(objects, bitmap_git.pack->num_objects + i)) continue; obj = eindex->objects[i]; show_reach(obj->oid.hash, obj->type, 0, eindex->hashes[i], NULL, 0); } } static void show_objects_for_type( struct bitmap *objects, struct ewah_bitmap *type_filter, enum object_type object_type, show_reachable_fn show_reach) { size_t pos = 0, i = 0; uint32_t offset; struct ewah_iterator it; eword_t filter; if (bitmap_git.reuse_objects == bitmap_git.pack->num_objects) return; ewah_iterator_init(&it, type_filter); while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i] & filter; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { const unsigned char *sha1; struct revindex_entry *entry; uint32_t hash = 0; if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); if (pos + offset < bitmap_git.reuse_objects) continue; entry = &bitmap_git.pack->revindex[pos + offset]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); if (bitmap_git.hashes) hash = ntohl(bitmap_git.hashes[entry->nr]); show_reach(sha1, object_type, 0, hash, bitmap_git.pack, entry->offset); } pos += BITS_IN_EWORD; i++; } } static int in_bitmapped_pack(struct object_list *roots) { while (roots) { struct object *object = roots->item; roots = roots->next; if (find_pack_entry_one(object->oid.hash, bitmap_git.pack) > 0) return 1; } return 0; } int prepare_bitmap_walk(struct rev_info *revs) { unsigned int i; unsigned int pending_nr = revs->pending.nr; struct object_array_entry *pending_e = revs->pending.objects; struct object_list *wants = NULL; struct object_list *haves = NULL; struct bitmap *wants_bitmap = NULL; struct bitmap *haves_bitmap = NULL; if (!bitmap_git.loaded) { /* try to open a bitmapped pack, but don't parse it yet * because we may not need to use it */ if (open_pack_bitmap() < 0) return -1; } for (i = 0; i < pending_nr; ++i) { struct object *object = pending_e[i].item; if (object->type == OBJ_NONE) parse_object_or_die(object->oid.hash, NULL); while (object->type == OBJ_TAG) { struct tag *tag = (struct tag *) object; if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); if (!tag->tagged) die("bad tag"); object = parse_object_or_die(tag->tagged->oid.hash, NULL); } if (object->flags & UNINTERESTING) object_list_insert(object, &haves); else object_list_insert(object, &wants); } /* * if we have a HAVES list, but none of those haves is contained * in the packfile that has a bitmap, we don't have anything to * optimize here */ if (haves && !in_bitmapped_pack(haves)) return -1; /* if we don't want anything, we're done here */ if (!wants) return -1; /* * now we're going to use bitmaps, so load the actual bitmap entries * from disk. this is the point of no return; after this the rev_list * becomes invalidated and we must perform the revwalk through bitmaps */ if (!bitmap_git.loaded && load_pack_bitmap() < 0) return -1; revs->pending.nr = 0; revs->pending.alloc = 0; revs->pending.objects = NULL; if (haves) { revs->ignore_missing_links = 1; haves_bitmap = find_objects(revs, haves, NULL); reset_revision_walk(); revs->ignore_missing_links = 0; if (haves_bitmap == NULL) die("BUG: failed to perform bitmap walk"); } wants_bitmap = find_objects(revs, wants, haves_bitmap); if (!wants_bitmap) die("BUG: failed to perform bitmap walk"); if (haves_bitmap) bitmap_and_not(wants_bitmap, haves_bitmap); bitmap_git.result = wants_bitmap; bitmap_free(haves_bitmap); return 0; } int reuse_partial_packfile_from_bitmap(struct packed_git **packfile, uint32_t *entries, off_t *up_to) { /* * Reuse the packfile content if we need more than * 90% of its objects */ static const double REUSE_PERCENT = 0.9; struct bitmap *result = bitmap_git.result; uint32_t reuse_threshold; uint32_t i, reuse_objects = 0; assert(result); for (i = 0; i < result->word_alloc; ++i) { if (result->words[i] != (eword_t)~0) { reuse_objects += ewah_bit_ctz64(~result->words[i]); break; } reuse_objects += BITS_IN_EWORD; } #ifdef GIT_BITMAP_DEBUG { const unsigned char *sha1; struct revindex_entry *entry; entry = &bitmap_git.reverse_index->revindex[reuse_objects]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); fprintf(stderr, "Failed to reuse at %d (%016llx)\n", reuse_objects, result->words[i]); fprintf(stderr, " %s\n", sha1_to_hex(sha1)); } #endif if (!reuse_objects) return -1; if (reuse_objects >= bitmap_git.pack->num_objects) { bitmap_git.reuse_objects = *entries = bitmap_git.pack->num_objects; *up_to = -1; /* reuse the full pack */ *packfile = bitmap_git.pack; return 0; } reuse_threshold = bitmap_popcount(bitmap_git.result) * REUSE_PERCENT; if (reuse_objects < reuse_threshold) return -1; bitmap_git.reuse_objects = *entries = reuse_objects; *up_to = bitmap_git.pack->revindex[reuse_objects].offset; *packfile = bitmap_git.pack; return 0; } void traverse_bitmap_commit_list(show_reachable_fn show_reachable) { assert(bitmap_git.result); show_objects_for_type(bitmap_git.result, bitmap_git.commits, OBJ_COMMIT, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.trees, OBJ_TREE, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.blobs, OBJ_BLOB, show_reachable); show_objects_for_type(bitmap_git.result, bitmap_git.tags, OBJ_TAG, show_reachable); show_extended_objects(bitmap_git.result, show_reachable); bitmap_free(bitmap_git.result); bitmap_git.result = NULL; } static uint32_t count_object_type(struct bitmap *objects, enum object_type type) { struct eindex *eindex = &bitmap_git.ext_index; uint32_t i = 0, count = 0; struct ewah_iterator it; eword_t filter; switch (type) { case OBJ_COMMIT: ewah_iterator_init(&it, bitmap_git.commits); break; case OBJ_TREE: ewah_iterator_init(&it, bitmap_git.trees); break; case OBJ_BLOB: ewah_iterator_init(&it, bitmap_git.blobs); break; case OBJ_TAG: ewah_iterator_init(&it, bitmap_git.tags); break; default: return 0; } while (i < objects->word_alloc && ewah_iterator_next(&filter, &it)) { eword_t word = objects->words[i++] & filter; count += ewah_bit_popcount64(word); } for (i = 0; i < eindex->count; ++i) { if (eindex->objects[i]->type == type && bitmap_get(objects, bitmap_git.pack->num_objects + i)) count++; } return count; } void count_bitmap_commit_list(uint32_t *commits, uint32_t *trees, uint32_t *blobs, uint32_t *tags) { assert(bitmap_git.result); if (commits) *commits = count_object_type(bitmap_git.result, OBJ_COMMIT); if (trees) *trees = count_object_type(bitmap_git.result, OBJ_TREE); if (blobs) *blobs = count_object_type(bitmap_git.result, OBJ_BLOB); if (tags) *tags = count_object_type(bitmap_git.result, OBJ_TAG); } struct bitmap_test_data { struct bitmap *base; struct progress *prg; size_t seen; }; static void test_show_object(struct object *object, const char *name, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&object->oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } static void test_show_commit(struct commit *commit, void *data) { struct bitmap_test_data *tdata = data; int bitmap_pos; bitmap_pos = bitmap_position(commit->object.oid.hash); if (bitmap_pos < 0) die("Object not in bitmap: %s\n", oid_to_hex(&commit->object.oid)); bitmap_set(tdata->base, bitmap_pos); display_progress(tdata->prg, ++tdata->seen); } void test_bitmap_walk(struct rev_info *revs) { struct object *root; struct bitmap *result = NULL; khiter_t pos; size_t result_popcnt; struct bitmap_test_data tdata; if (prepare_bitmap_git()) die("failed to load bitmap indexes"); if (revs->pending.nr != 1) die("you must specify exactly one commit to test"); fprintf(stderr, "Bitmap v%d test (%d entries loaded)\n", bitmap_git.version, bitmap_git.entry_count); root = revs->pending.objects[0].item; pos = kh_get_sha1(bitmap_git.bitmaps, root->oid.hash); if (pos < kh_end(bitmap_git.bitmaps)) { struct stored_bitmap *st = kh_value(bitmap_git.bitmaps, pos); struct ewah_bitmap *bm = lookup_stored_bitmap(st); fprintf(stderr, "Found bitmap for %s. %d bits / %08x checksum\n", oid_to_hex(&root->oid), (int)bm->bit_size, ewah_checksum(bm)); result = ewah_to_bitmap(bm); } if (result == NULL) die("Commit %s doesn't have an indexed bitmap", oid_to_hex(&root->oid)); revs->tag_objects = 1; revs->tree_objects = 1; revs->blob_objects = 1; result_popcnt = bitmap_popcount(result); if (prepare_revision_walk(revs)) die("revision walk setup failed"); tdata.base = bitmap_new(); tdata.prg = start_progress("Verifying bitmap entries", result_popcnt); tdata.seen = 0; traverse_commit_list(revs, &test_show_commit, &test_show_object, &tdata); stop_progress(&tdata.prg); if (bitmap_equals(result, tdata.base)) fprintf(stderr, "OK!\n"); else fprintf(stderr, "Mismatch!\n"); bitmap_free(result); } static int rebuild_bitmap(uint32_t *reposition, struct ewah_bitmap *source, struct bitmap *dest) { uint32_t pos = 0; struct ewah_iterator it; eword_t word; ewah_iterator_init(&it, source); while (ewah_iterator_next(&word, &it)) { uint32_t offset, bit_pos; for (offset = 0; offset < BITS_IN_EWORD; ++offset) { if ((word >> offset) == 0) break; offset += ewah_bit_ctz64(word >> offset); bit_pos = reposition[pos + offset]; if (bit_pos > 0) bitmap_set(dest, bit_pos - 1); else /* can't reuse, we don't have the object */ return -1; } pos += BITS_IN_EWORD; } return 0; } int rebuild_existing_bitmaps(struct packing_data *mapping, khash_sha1 *reused_bitmaps, int show_progress) { uint32_t i, num_objects; uint32_t *reposition; struct bitmap *rebuild; struct stored_bitmap *stored; struct progress *progress = NULL; khiter_t hash_pos; int hash_ret; if (prepare_bitmap_git() < 0) return -1; num_objects = bitmap_git.pack->num_objects; reposition = xcalloc(num_objects, sizeof(uint32_t)); for (i = 0; i < num_objects; ++i) { const unsigned char *sha1; struct revindex_entry *entry; struct object_entry *oe; entry = &bitmap_git.pack->revindex[i]; sha1 = nth_packed_object_sha1(bitmap_git.pack, entry->nr); oe = packlist_find(mapping, sha1, NULL); if (oe) reposition[i] = oe->in_pack_pos + 1; } rebuild = bitmap_new(); i = 0; if (show_progress) progress = start_progress("Reusing bitmaps", 0); kh_foreach_value(bitmap_git.bitmaps, stored, { if (stored->flags & BITMAP_FLAG_REUSE) { if (!rebuild_bitmap(reposition, lookup_stored_bitmap(stored), rebuild)) { hash_pos = kh_put_sha1(reused_bitmaps, stored->sha1, &hash_ret); kh_value(reused_bitmaps, hash_pos) = bitmap_to_ewah(rebuild); } bitmap_reset(rebuild); display_progress(progress, ++i); } }); stop_progress(&progress); free(reposition); bitmap_free(rebuild); return 0; }
static void show_object(struct object *object, struct strbuf *path, const char *last, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) { char *name = path_name(path, last); bitmap_pos = ext_index_add_object(object, name); free(name); } bitmap_set(base, bitmap_pos); }
static void show_object(struct object *object, const char *name, void *data) { struct bitmap *base = data; int bitmap_pos; bitmap_pos = bitmap_position(object->oid.hash); if (bitmap_pos < 0) bitmap_pos = ext_index_add_object(object, name); bitmap_set(base, bitmap_pos); }
{'added': [(417, 'static void show_object(struct object *object, const char *name, void *data)'), (424, '\tif (bitmap_pos < 0)'), (893, 'static void test_show_object(struct object *object, const char *name,'), (894, '\t\t\t void *data)')], 'deleted': [(417, 'static void show_object(struct object *object, struct strbuf *path,'), (418, '\t\t\tconst char *last, void *data)'), (425, '\tif (bitmap_pos < 0) {'), (426, '\t\tchar *name = path_name(path, last);'), (428, '\t\tfree(name);'), (429, '\t}'), (897, 'static void test_show_object(struct object *object,'), (898, '\t\t\t struct strbuf *path,'), (899, '\t\t\t const char *last, void *data)')]}
4
9
744
4,729
13
84
2
https://github.com/git/git
CVE-2016-2315
CWE-119
3,247
color.c
C
sycc444_to_rgb
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "opj_apps_config.h" #include "openjpeg.h" #include "color.h" #ifdef OPJ_HAVE_LIBLCMS2 #include <lcms2.h> #endif #ifdef OPJ_HAVE_LIBLCMS1 #include <lcms.h> #endif #ifdef OPJ_USE_LEGACY #define OPJ_CLRSPC_GRAY CLRSPC_GRAY #define OPJ_CLRSPC_SRGB CLRSPC_SRGB #endif /*-------------------------------------------------------- Matrix for sYCC, Amendment 1 to IEC 61966-2-1 Y : 0.299 0.587 0.114 :R Cb: -0.1687 -0.3312 0.5 :G Cr: 0.5 -0.4187 -0.0812 :B Inverse: R: 1 -3.68213e-05 1.40199 :Y G: 1.00003 -0.344125 -0.714128 :Cb - 2^(prec - 1) B: 0.999823 1.77204 -8.04142e-06 :Cr - 2^(prec - 1) -----------------------------------------------------------*/ static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr, int *out_r, int *out_g, int *out_b) { int r, g, b; cb -= offset; cr -= offset; r = y + (int)(1.402 * (float)cr); if(r < 0) r = 0; else if(r > upb) r = upb; *out_r = r; g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr); if(g < 0) g = 0; else if(g > upb) g = upb; *out_g = g; b = y + (int)(1.772 * (float)cb); if(b < 0) b = 0; else if(b > upb) b = upb; *out_b = b; } static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; unsigned int maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc444_to_rgb() */ static void sycc422_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; unsigned int maxw, maxh, max; int offset, upb; unsigned int i, j; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i=0U; i < maxh; ++i) { for(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if (j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; #if defined(USE_JPWL) || defined(USE_MJ2) img->comps[1].w = maxw; img->comps[1].h = maxh; img->comps[2].w = maxw; img->comps[2].h = maxh; #else img->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh; img->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh; #endif img->comps[1].dx = img->comps[0].dx; img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[0].dy; img->comps[2].dy = img->comps[0].dy; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc422_to_rgb() */ static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; unsigned int maxw, maxh, max; int offset, upb; unsigned int i, j; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i=0U; i < (maxh & ~(unsigned int)1U); i += 2U) { ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; for(j=0; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if(i < maxh) { for(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; #if defined(USE_JPWL) || defined(USE_MJ2) img->comps[1].w = maxw; img->comps[1].h = maxh; img->comps[2].w = maxw; img->comps[2].h = maxh; #else img->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh; img->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh; #endif img->comps[1].dx = img->comps[0].dx; img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[0].dy; img->comps[2].dy = img->comps[0].dy; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc420_to_rgb() */ void color_sycc_to_rgb(opj_image_t *img) { if(img->numcomps < 3) { img->color_space = OPJ_CLRSPC_GRAY; return; } if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 2) && (img->comps[2].dy == 2))/* horizontal and vertical sub-sample */ { sycc420_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* horizontal sub-sample only */ { sycc422_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 1) && (img->comps[2].dx == 1) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* no sub-sample */ { sycc444_to_rgb(img); } else { fprintf(stderr,"%s:%d:color_sycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } img->color_space = OPJ_CLRSPC_SRGB; }/* color_sycc_to_rgb() */ #if defined(OPJ_HAVE_LIBLCMS2) || defined(OPJ_HAVE_LIBLCMS1) #ifdef OPJ_HAVE_LIBLCMS1 /* Bob Friesenhahn proposed:*/ #define cmsSigXYZData icSigXYZData #define cmsSigLabData icSigLabData #define cmsSigCmykData icSigCmykData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLuvData icSigLuvData #define cmsSigGrayData icSigGrayData #define cmsSigRgbData icSigRgbData #define cmsUInt32Number DWORD #define cmsColorSpaceSignature icColorSpaceSignature #define cmsGetHeaderRenderingIntent cmsTakeRenderingIntent #endif /* OPJ_HAVE_LIBLCMS1 */ /*#define DEBUG_PROFILE*/ void color_apply_icc_profile(opj_image_t *image) { cmsHPROFILE in_prof, out_prof; cmsHTRANSFORM transform; cmsColorSpaceSignature in_space, out_space; cmsUInt32Number intent, in_type, out_type; int *r, *g, *b; size_t nr_samples; int prec, i, max, max_w, max_h, ok = 0; OPJ_COLOR_SPACE new_space; in_prof = cmsOpenProfileFromMem(image->icc_profile_buf, image->icc_profile_len); #ifdef DEBUG_PROFILE FILE *icm = fopen("debug.icm","wb"); fwrite( image->icc_profile_buf,1, image->icc_profile_len,icm); fclose(icm); #endif if(in_prof == NULL) return; in_space = cmsGetPCS(in_prof); out_space = cmsGetColorSpace(in_prof); intent = cmsGetHeaderRenderingIntent(in_prof); max_w = (int)image->comps[0].w; max_h = (int)image->comps[0].h; prec = (int)image->comps[0].prec; if(out_space == cmsSigRgbData) /* enumCS 16 */ { if( prec <= 8 ) { in_type = TYPE_RGB_8; out_type = TYPE_RGB_8; } else { in_type = TYPE_RGB_16; out_type = TYPE_RGB_16; } out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigGrayData) /* enumCS 17 */ { in_type = TYPE_GRAY_8; out_type = TYPE_RGB_8; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigYCbCrData) /* enumCS 18 */ { in_type = TYPE_YCbCr_16; out_type = TYPE_RGB_16; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d: color_apply_icc_profile\n\tICC Profile has unknown " "output colorspace(%#x)(%c%c%c%c)\n\tICC Profile ignored.\n", __FILE__,__LINE__,out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff); #endif cmsCloseProfile(in_prof); return; } if(out_prof == NULL) { cmsCloseProfile(in_prof); return; } #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tchannels(%d) prec(%d) w(%d) h(%d)" "\n\tprofile: in(%p) out(%p)\n",__FILE__,__LINE__,image->numcomps,prec, max_w,max_h, (void*)in_prof,(void*)out_prof); fprintf(stderr,"\trender_intent (%u)\n\t" "color_space: in(%#x)(%c%c%c%c) out:(%#x)(%c%c%c%c)\n\t" " type: in(%u) out:(%u)\n", intent, in_space, (in_space>>24) & 0xff,(in_space>>16) & 0xff, (in_space>>8) & 0xff, in_space & 0xff, out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff, in_type,out_type ); #else (void)prec; (void)in_space; #endif /* DEBUG_PROFILE */ transform = cmsCreateTransform(in_prof, in_type, out_prof, out_type, intent, 0); #ifdef OPJ_HAVE_LIBLCMS2 /* Possible for: LCMS_VERSION >= 2000 :*/ cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(transform == NULL) { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tcmsCreateTransform failed. " "ICC Profile ignored.\n",__FILE__,__LINE__); #endif #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif return; } if(image->numcomps > 2)/* RGB, RGBA */ { if( prec <= 8 ) { unsigned char *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails0; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; *in++ = (unsigned char)*g++; *in++ = (unsigned char)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails0: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } else /* prec > 8 */ { unsigned short *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails1; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; *in++ = (unsigned short)*g++; *in++ = (unsigned short)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails1: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } } else /* image->numcomps <= 2 : GRAY, GRAYA */ { if(prec <= 8) { unsigned char *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails2; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails2; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails2: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } else /* prec > 8 */ { unsigned short *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails3; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails3; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails3: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } }/* if(image->numcomps > 2) */ cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(ok) { image->color_space = new_space; } }/* color_apply_icc_profile() */ void color_cielab_to_rgb(opj_image_t *image) { int *row; int enumcs, numcomps; OPJ_COLOR_SPACE new_space; numcomps = (int)image->numcomps; if(numcomps != 3) { fprintf(stderr,"%s:%d:\n\tnumcomps %d not handled. Quitting.\n", __FILE__,__LINE__,numcomps); return; } row = (int*)image->icc_profile_buf; enumcs = row[0]; if(enumcs == 14) /* CIELab */ { int *L, *a, *b, *red, *green, *blue; int *src0, *src1, *src2, *dst0, *dst1, *dst2; double rl, ol, ra, oa, rb, ob, prec0, prec1, prec2; double minL, maxL, mina, maxa, minb, maxb; unsigned int default_type; unsigned int i, max; cmsHPROFILE in, out; cmsHTRANSFORM transform; cmsUInt16Number RGB[3]; cmsCIELab Lab; in = cmsCreateLab4Profile(NULL); if(in == NULL){ return; } out = cmsCreate_sRGBProfile(); if(out == NULL){ cmsCloseProfile(in); return; } transform = cmsCreateTransform(in, TYPE_Lab_DBL, out, TYPE_RGB_16, INTENT_PERCEPTUAL, 0); #ifdef OPJ_HAVE_LIBLCMS2 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(transform == NULL) { #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif return; } new_space = OPJ_CLRSPC_SRGB; prec0 = (double)image->comps[0].prec; prec1 = (double)image->comps[1].prec; prec2 = (double)image->comps[2].prec; default_type = (unsigned int)row[1]; if(default_type == 0x44454600)/* DEF : default */ { rl = 100; ra = 170; rb = 200; ol = 0; oa = pow(2, prec1 - 1); ob = pow(2, prec2 - 2) + pow(2, prec2 - 3); } else { rl = row[2]; ra = row[4]; rb = row[6]; ol = row[3]; oa = row[5]; ob = row[7]; } L = src0 = image->comps[0].data; a = src1 = image->comps[1].data; b = src2 = image->comps[2].data; max = image->comps[0].w * image->comps[0].h; red = dst0 = (int*)malloc(max * sizeof(int)); green = dst1 = (int*)malloc(max * sizeof(int)); blue = dst2 = (int*)malloc(max * sizeof(int)); if(red == NULL || green == NULL || blue == NULL) goto fails; minL = -(rl * ol)/(pow(2, prec0)-1); maxL = minL + rl; mina = -(ra * oa)/(pow(2, prec1)-1); maxa = mina + ra; minb = -(rb * ob)/(pow(2, prec2)-1); maxb = minb + rb; for(i = 0; i < max; ++i) { Lab.L = minL + (double)(*L) * (maxL - minL)/(pow(2, prec0)-1); ++L; Lab.a = mina + (double)(*a) * (maxa - mina)/(pow(2, prec1)-1); ++a; Lab.b = minb + (double)(*b) * (maxb - minb)/(pow(2, prec2)-1); ++b; cmsDoTransform(transform, &Lab, RGB, 1); *red++ = RGB[0]; *green++ = RGB[1]; *blue++ = RGB[2]; } cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif free(src0); image->comps[0].data = dst0; free(src1); image->comps[1].data = dst1; free(src2); image->comps[2].data = dst2; image->color_space = new_space; image->comps[0].prec = 16; image->comps[1].prec = 16; image->comps[2].prec = 16; return; fails: cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(red) free(red); if(green) free(green); if(blue) free(blue); return; } fprintf(stderr,"%s:%d:\n\tenumCS %d not handled. Ignoring.\n", __FILE__,__LINE__, enumcs); }/* color_cielab_to_rgb() */ #endif /* OPJ_HAVE_LIBLCMS2 || OPJ_HAVE_LIBLCMS1 */ void color_cmyk_to_rgb(opj_image_t *image) { float C, M, Y, K; float sC, sM, sY, sK; unsigned int w, h, max, i; w = image->comps[0].w; h = image->comps[0].h; if(image->numcomps < 4) return; max = w * h; sC = 1.0F / (float)((1 << image->comps[0].prec) - 1); sM = 1.0F / (float)((1 << image->comps[1].prec) - 1); sY = 1.0F / (float)((1 << image->comps[2].prec) - 1); sK = 1.0F / (float)((1 << image->comps[3].prec) - 1); for(i = 0; i < max; ++i) { /* CMYK values from 0 to 1 */ C = (float)(image->comps[0].data[i]) * sC; M = (float)(image->comps[1].data[i]) * sM; Y = (float)(image->comps[2].data[i]) * sY; K = (float)(image->comps[3].data[i]) * sK; /* Invert all CMYK values */ C = 1.0F - C; M = 1.0F - M; Y = 1.0F - Y; K = 1.0F - K; /* CMYK -> RGB : RGB results from 0 to 255 */ image->comps[0].data[i] = (int)(255.0F * C * K); /* R */ image->comps[1].data[i] = (int)(255.0F * M * K); /* G */ image->comps[2].data[i] = (int)(255.0F * Y * K); /* B */ } free(image->comps[3].data); image->comps[3].data = NULL; image->comps[0].prec = 8; image->comps[1].prec = 8; image->comps[2].prec = 8; image->numcomps -= 1; image->color_space = OPJ_CLRSPC_SRGB; for (i = 3; i < image->numcomps; ++i) { memcpy(&(image->comps[i]), &(image->comps[i+1]), sizeof(image->comps[i])); } }/* color_cmyk_to_rgb() */ /* * This code has been adopted from sjpx_openjpeg.c of ghostscript */ void color_esycc_to_rgb(opj_image_t *image) { int y, cb, cr, sign1, sign2, val; unsigned int w, h, max, i; int flip_value = (1 << (image->comps[0].prec-1)); int max_value = (1 << image->comps[0].prec) - 1; if ( (image->numcomps < 3) || (image->comps[0].dx != image->comps[1].dx) || (image->comps[0].dx != image->comps[2].dx) || (image->comps[0].dy != image->comps[1].dy) || (image->comps[0].dy != image->comps[2].dy) ) { fprintf(stderr,"%s:%d:color_esycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } w = image->comps[0].w; h = image->comps[0].h; sign1 = (int)image->comps[1].sgnd; sign2 = (int)image->comps[2].sgnd; max = w * h; for(i = 0; i < max; ++i) { y = image->comps[0].data[i]; cb = image->comps[1].data[i]; cr = image->comps[2].data[i]; if( !sign1) cb -= flip_value; if( !sign2) cr -= flip_value; val = (int) ((float)y - (float)0.0000368 * (float)cb + (float)1.40199 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[0].data[i] = val; val = (int) ((float)1.0003 * (float)y - (float)0.344125 * (float)cb - (float)0.7141128 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[1].data[i] = val; val = (int) ((float)0.999823 * (float)y + (float)1.77204 * (float)cb - (float)0.000008 *(float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[2].data[i] = val; } image->color_space = OPJ_CLRSPC_SRGB; }/* color_esycc_to_rgb() */
/* * The copyright in this software is being made available under the 2-clauses * BSD License, included below. This software may be subject to other third * party and contributor rights, including patent rights, and no such rights * are granted under this license. * * Copyright (c) 2002-2014, Universite catholique de Louvain (UCL), Belgium * Copyright (c) 2002-2014, Professor Benoit Macq * Copyright (c) 2001-2003, David Janssens * Copyright (c) 2002-2003, Yannick Verschueren * Copyright (c) 2003-2007, Francois-Olivier Devaux * Copyright (c) 2003-2014, Antonin Descampe * Copyright (c) 2005, Herve Drolon, FreeImage Team * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include "opj_apps_config.h" #include "openjpeg.h" #include "color.h" #ifdef OPJ_HAVE_LIBLCMS2 #include <lcms2.h> #endif #ifdef OPJ_HAVE_LIBLCMS1 #include <lcms.h> #endif #ifdef OPJ_USE_LEGACY #define OPJ_CLRSPC_GRAY CLRSPC_GRAY #define OPJ_CLRSPC_SRGB CLRSPC_SRGB #endif /*-------------------------------------------------------- Matrix for sYCC, Amendment 1 to IEC 61966-2-1 Y : 0.299 0.587 0.114 :R Cb: -0.1687 -0.3312 0.5 :G Cr: 0.5 -0.4187 -0.0812 :B Inverse: R: 1 -3.68213e-05 1.40199 :Y G: 1.00003 -0.344125 -0.714128 :Cb - 2^(prec - 1) B: 0.999823 1.77204 -8.04142e-06 :Cr - 2^(prec - 1) -----------------------------------------------------------*/ static void sycc_to_rgb(int offset, int upb, int y, int cb, int cr, int *out_r, int *out_g, int *out_b) { int r, g, b; cb -= offset; cr -= offset; r = y + (int)(1.402 * (float)cr); if(r < 0) r = 0; else if(r > upb) r = upb; *out_r = r; g = y - (int)(0.344 * (float)cb + 0.714 * (float)cr); if(g < 0) g = 0; else if(g > upb) g = upb; *out_g = g; b = y + (int)(1.772 * (float)cb); if(b < 0) b = 0; else if(b > upb) b = upb; *out_b = b; } static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc444_to_rgb() */ static void sycc422_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, offx, loopmaxw; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if(r == NULL || g == NULL || b == NULL) goto fails; /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; for(i=0U; i < maxh; ++i) { size_t j; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } for(j=0U; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if (j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc422_to_rgb() */ static void sycc420_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b, *nr, *ng, *nb; const int *y, *cb, *cr, *ny; size_t maxw, maxh, max, offx, loopmaxw, offy, loopmaxh; int offset, upb; size_t i; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if (r == NULL || g == NULL || b == NULL) goto fails; /* if img->x0 is odd, then first column shall use Cb/Cr = 0 */ offx = img->x0 & 1U; loopmaxw = maxw - offx; /* if img->y0 is odd, then first line shall use Cb/Cr = 0 */ offy = img->y0 & 1U; loopmaxh = maxh - offy; if (offy > 0U) { size_t j; for(j=0; j < maxw; ++j) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; } } for(i=0U; i < (loopmaxh & ~(size_t)1U); i += 2U) { size_t j; ny = y + maxw; nr = r + maxw; ng = g + maxw; nb = b + maxw; if (offx > 0U) { sycc_to_rgb(offset, upb, *y, 0, 0, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; } for(j=0; j < (loopmaxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } if(j < loopmaxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb); ++ny; ++nr; ++ng; ++nb; ++cb; ++cr; } y += maxw; r += maxw; g += maxw; b += maxw; } if(i < loopmaxh) { size_t j; for(j=0U; j < (maxw & ~(size_t)1U); j += 2U) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++r; ++g; ++b; ++cb; ++cr; } if(j < maxw) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); } } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->comps[1].w = img->comps[2].w = img->comps[0].w; img->comps[1].h = img->comps[2].h = img->comps[0].h; img->comps[1].dx = img->comps[2].dx = img->comps[0].dx; img->comps[1].dy = img->comps[2].dy = img->comps[0].dy; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc420_to_rgb() */ void color_sycc_to_rgb(opj_image_t *img) { if(img->numcomps < 3) { img->color_space = OPJ_CLRSPC_GRAY; return; } if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 2) && (img->comps[2].dy == 2))/* horizontal and vertical sub-sample */ { sycc420_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 2) && (img->comps[2].dx == 2) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* horizontal sub-sample only */ { sycc422_to_rgb(img); } else if((img->comps[0].dx == 1) && (img->comps[1].dx == 1) && (img->comps[2].dx == 1) && (img->comps[0].dy == 1) && (img->comps[1].dy == 1) && (img->comps[2].dy == 1))/* no sub-sample */ { sycc444_to_rgb(img); } else { fprintf(stderr,"%s:%d:color_sycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } }/* color_sycc_to_rgb() */ #if defined(OPJ_HAVE_LIBLCMS2) || defined(OPJ_HAVE_LIBLCMS1) #ifdef OPJ_HAVE_LIBLCMS1 /* Bob Friesenhahn proposed:*/ #define cmsSigXYZData icSigXYZData #define cmsSigLabData icSigLabData #define cmsSigCmykData icSigCmykData #define cmsSigYCbCrData icSigYCbCrData #define cmsSigLuvData icSigLuvData #define cmsSigGrayData icSigGrayData #define cmsSigRgbData icSigRgbData #define cmsUInt32Number DWORD #define cmsColorSpaceSignature icColorSpaceSignature #define cmsGetHeaderRenderingIntent cmsTakeRenderingIntent #endif /* OPJ_HAVE_LIBLCMS1 */ /*#define DEBUG_PROFILE*/ void color_apply_icc_profile(opj_image_t *image) { cmsHPROFILE in_prof, out_prof; cmsHTRANSFORM transform; cmsColorSpaceSignature in_space, out_space; cmsUInt32Number intent, in_type, out_type; int *r, *g, *b; size_t nr_samples; int prec, i, max, max_w, max_h, ok = 0; OPJ_COLOR_SPACE new_space; in_prof = cmsOpenProfileFromMem(image->icc_profile_buf, image->icc_profile_len); #ifdef DEBUG_PROFILE FILE *icm = fopen("debug.icm","wb"); fwrite( image->icc_profile_buf,1, image->icc_profile_len,icm); fclose(icm); #endif if(in_prof == NULL) return; in_space = cmsGetPCS(in_prof); out_space = cmsGetColorSpace(in_prof); intent = cmsGetHeaderRenderingIntent(in_prof); max_w = (int)image->comps[0].w; max_h = (int)image->comps[0].h; prec = (int)image->comps[0].prec; if(out_space == cmsSigRgbData) /* enumCS 16 */ { if( prec <= 8 ) { in_type = TYPE_RGB_8; out_type = TYPE_RGB_8; } else { in_type = TYPE_RGB_16; out_type = TYPE_RGB_16; } out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigGrayData) /* enumCS 17 */ { in_type = TYPE_GRAY_8; out_type = TYPE_RGB_8; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else if(out_space == cmsSigYCbCrData) /* enumCS 18 */ { in_type = TYPE_YCbCr_16; out_type = TYPE_RGB_16; out_prof = cmsCreate_sRGBProfile(); new_space = OPJ_CLRSPC_SRGB; } else { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d: color_apply_icc_profile\n\tICC Profile has unknown " "output colorspace(%#x)(%c%c%c%c)\n\tICC Profile ignored.\n", __FILE__,__LINE__,out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff); #endif cmsCloseProfile(in_prof); return; } if(out_prof == NULL) { cmsCloseProfile(in_prof); return; } #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tchannels(%d) prec(%d) w(%d) h(%d)" "\n\tprofile: in(%p) out(%p)\n",__FILE__,__LINE__,image->numcomps,prec, max_w,max_h, (void*)in_prof,(void*)out_prof); fprintf(stderr,"\trender_intent (%u)\n\t" "color_space: in(%#x)(%c%c%c%c) out:(%#x)(%c%c%c%c)\n\t" " type: in(%u) out:(%u)\n", intent, in_space, (in_space>>24) & 0xff,(in_space>>16) & 0xff, (in_space>>8) & 0xff, in_space & 0xff, out_space, (out_space>>24) & 0xff,(out_space>>16) & 0xff, (out_space>>8) & 0xff, out_space & 0xff, in_type,out_type ); #else (void)prec; (void)in_space; #endif /* DEBUG_PROFILE */ transform = cmsCreateTransform(in_prof, in_type, out_prof, out_type, intent, 0); #ifdef OPJ_HAVE_LIBLCMS2 /* Possible for: LCMS_VERSION >= 2000 :*/ cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(transform == NULL) { #ifdef DEBUG_PROFILE fprintf(stderr,"%s:%d:color_apply_icc_profile\n\tcmsCreateTransform failed. " "ICC Profile ignored.\n",__FILE__,__LINE__); #endif #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif return; } if(image->numcomps > 2)/* RGB, RGBA */ { if( prec <= 8 ) { unsigned char *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails0; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; *in++ = (unsigned char)*g++; *in++ = (unsigned char)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails0: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } else /* prec > 8 */ { unsigned short *inbuf, *outbuf, *in, *out; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); if(inbuf == NULL || outbuf == NULL) goto fails1; r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; *in++ = (unsigned short)*g++; *in++ = (unsigned short)*b++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } ok = 1; fails1: if(inbuf) free(inbuf); if(outbuf) free(outbuf); } } else /* image->numcomps <= 2 : GRAY, GRAYA */ { if(prec <= 8) { unsigned char *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned char)); in = inbuf = (unsigned char*)malloc(nr_samples); out = outbuf = (unsigned char*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails2; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails2; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned char)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails2: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } else /* prec > 8 */ { unsigned short *in, *inbuf, *out, *outbuf; opj_image_comp_t *new_comps; max = max_w * max_h; nr_samples = (size_t)(max * 3 * sizeof(unsigned short)); in = inbuf = (unsigned short*)malloc(nr_samples); out = outbuf = (unsigned short*)malloc(nr_samples); g = (int*)calloc((size_t)max, sizeof(int)); b = (int*)calloc((size_t)max, sizeof(int)); if(inbuf == NULL || outbuf == NULL || g == NULL || b == NULL) goto fails3; new_comps = (opj_image_comp_t*) realloc(image->comps, (image->numcomps+2)*sizeof(opj_image_comp_t)); if(new_comps == NULL) goto fails3; image->comps = new_comps; if(image->numcomps == 2) image->comps[3] = image->comps[1]; image->comps[1] = image->comps[0]; image->comps[2] = image->comps[0]; image->comps[1].data = g; image->comps[2].data = b; image->numcomps += 2; r = image->comps[0].data; for(i = 0; i < max; ++i) { *in++ = (unsigned short)*r++; } cmsDoTransform(transform, inbuf, outbuf, (cmsUInt32Number)max); r = image->comps[0].data; g = image->comps[1].data; b = image->comps[2].data; for(i = 0; i < max; ++i) { *r++ = (int)*out++; *g++ = (int)*out++; *b++ = (int)*out++; } r = g = b = NULL; ok = 1; fails3: if(inbuf) free(inbuf); if(outbuf) free(outbuf); if(g) free(g); if(b) free(b); } }/* if(image->numcomps > 2) */ cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in_prof); cmsCloseProfile(out_prof); #endif if(ok) { image->color_space = new_space; } }/* color_apply_icc_profile() */ void color_cielab_to_rgb(opj_image_t *image) { int *row; int enumcs, numcomps; OPJ_COLOR_SPACE new_space; numcomps = (int)image->numcomps; if(numcomps != 3) { fprintf(stderr,"%s:%d:\n\tnumcomps %d not handled. Quitting.\n", __FILE__,__LINE__,numcomps); return; } row = (int*)image->icc_profile_buf; enumcs = row[0]; if(enumcs == 14) /* CIELab */ { int *L, *a, *b, *red, *green, *blue; int *src0, *src1, *src2, *dst0, *dst1, *dst2; double rl, ol, ra, oa, rb, ob, prec0, prec1, prec2; double minL, maxL, mina, maxa, minb, maxb; unsigned int default_type; unsigned int i, max; cmsHPROFILE in, out; cmsHTRANSFORM transform; cmsUInt16Number RGB[3]; cmsCIELab Lab; in = cmsCreateLab4Profile(NULL); if(in == NULL){ return; } out = cmsCreate_sRGBProfile(); if(out == NULL){ cmsCloseProfile(in); return; } transform = cmsCreateTransform(in, TYPE_Lab_DBL, out, TYPE_RGB_16, INTENT_PERCEPTUAL, 0); #ifdef OPJ_HAVE_LIBLCMS2 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(transform == NULL) { #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif return; } new_space = OPJ_CLRSPC_SRGB; prec0 = (double)image->comps[0].prec; prec1 = (double)image->comps[1].prec; prec2 = (double)image->comps[2].prec; default_type = (unsigned int)row[1]; if(default_type == 0x44454600)/* DEF : default */ { rl = 100; ra = 170; rb = 200; ol = 0; oa = pow(2, prec1 - 1); ob = pow(2, prec2 - 2) + pow(2, prec2 - 3); } else { rl = row[2]; ra = row[4]; rb = row[6]; ol = row[3]; oa = row[5]; ob = row[7]; } L = src0 = image->comps[0].data; a = src1 = image->comps[1].data; b = src2 = image->comps[2].data; max = image->comps[0].w * image->comps[0].h; red = dst0 = (int*)malloc(max * sizeof(int)); green = dst1 = (int*)malloc(max * sizeof(int)); blue = dst2 = (int*)malloc(max * sizeof(int)); if(red == NULL || green == NULL || blue == NULL) goto fails; minL = -(rl * ol)/(pow(2, prec0)-1); maxL = minL + rl; mina = -(ra * oa)/(pow(2, prec1)-1); maxa = mina + ra; minb = -(rb * ob)/(pow(2, prec2)-1); maxb = minb + rb; for(i = 0; i < max; ++i) { Lab.L = minL + (double)(*L) * (maxL - minL)/(pow(2, prec0)-1); ++L; Lab.a = mina + (double)(*a) * (maxa - mina)/(pow(2, prec1)-1); ++a; Lab.b = minb + (double)(*b) * (maxb - minb)/(pow(2, prec2)-1); ++b; cmsDoTransform(transform, &Lab, RGB, 1); *red++ = RGB[0]; *green++ = RGB[1]; *blue++ = RGB[2]; } cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif free(src0); image->comps[0].data = dst0; free(src1); image->comps[1].data = dst1; free(src2); image->comps[2].data = dst2; image->color_space = new_space; image->comps[0].prec = 16; image->comps[1].prec = 16; image->comps[2].prec = 16; return; fails: cmsDeleteTransform(transform); #ifdef OPJ_HAVE_LIBLCMS1 cmsCloseProfile(in); cmsCloseProfile(out); #endif if(red) free(red); if(green) free(green); if(blue) free(blue); return; } fprintf(stderr,"%s:%d:\n\tenumCS %d not handled. Ignoring.\n", __FILE__,__LINE__, enumcs); }/* color_cielab_to_rgb() */ #endif /* OPJ_HAVE_LIBLCMS2 || OPJ_HAVE_LIBLCMS1 */ void color_cmyk_to_rgb(opj_image_t *image) { float C, M, Y, K; float sC, sM, sY, sK; unsigned int w, h, max, i; w = image->comps[0].w; h = image->comps[0].h; if(image->numcomps < 4) return; max = w * h; sC = 1.0F / (float)((1 << image->comps[0].prec) - 1); sM = 1.0F / (float)((1 << image->comps[1].prec) - 1); sY = 1.0F / (float)((1 << image->comps[2].prec) - 1); sK = 1.0F / (float)((1 << image->comps[3].prec) - 1); for(i = 0; i < max; ++i) { /* CMYK values from 0 to 1 */ C = (float)(image->comps[0].data[i]) * sC; M = (float)(image->comps[1].data[i]) * sM; Y = (float)(image->comps[2].data[i]) * sY; K = (float)(image->comps[3].data[i]) * sK; /* Invert all CMYK values */ C = 1.0F - C; M = 1.0F - M; Y = 1.0F - Y; K = 1.0F - K; /* CMYK -> RGB : RGB results from 0 to 255 */ image->comps[0].data[i] = (int)(255.0F * C * K); /* R */ image->comps[1].data[i] = (int)(255.0F * M * K); /* G */ image->comps[2].data[i] = (int)(255.0F * Y * K); /* B */ } free(image->comps[3].data); image->comps[3].data = NULL; image->comps[0].prec = 8; image->comps[1].prec = 8; image->comps[2].prec = 8; image->numcomps -= 1; image->color_space = OPJ_CLRSPC_SRGB; for (i = 3; i < image->numcomps; ++i) { memcpy(&(image->comps[i]), &(image->comps[i+1]), sizeof(image->comps[i])); } }/* color_cmyk_to_rgb() */ /* * This code has been adopted from sjpx_openjpeg.c of ghostscript */ void color_esycc_to_rgb(opj_image_t *image) { int y, cb, cr, sign1, sign2, val; unsigned int w, h, max, i; int flip_value = (1 << (image->comps[0].prec-1)); int max_value = (1 << image->comps[0].prec) - 1; if ( (image->numcomps < 3) || (image->comps[0].dx != image->comps[1].dx) || (image->comps[0].dx != image->comps[2].dx) || (image->comps[0].dy != image->comps[1].dy) || (image->comps[0].dy != image->comps[2].dy) ) { fprintf(stderr,"%s:%d:color_esycc_to_rgb\n\tCAN NOT CONVERT\n", __FILE__,__LINE__); return; } w = image->comps[0].w; h = image->comps[0].h; sign1 = (int)image->comps[1].sgnd; sign2 = (int)image->comps[2].sgnd; max = w * h; for(i = 0; i < max; ++i) { y = image->comps[0].data[i]; cb = image->comps[1].data[i]; cr = image->comps[2].data[i]; if( !sign1) cb -= flip_value; if( !sign2) cr -= flip_value; val = (int) ((float)y - (float)0.0000368 * (float)cb + (float)1.40199 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[0].data[i] = val; val = (int) ((float)1.0003 * (float)y - (float)0.344125 * (float)cb - (float)0.7141128 * (float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[1].data[i] = val; val = (int) ((float)0.999823 * (float)y + (float)1.77204 * (float)cb - (float)0.000008 *(float)cr + (float)0.5); if(val > max_value) val = max_value; else if(val < 0) val = 0; image->comps[2].data[i] = val; } image->color_space = OPJ_CLRSPC_SRGB; }/* color_esycc_to_rgb() */
static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; unsigned int maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * (size_t)max); d1 = g = (int*)malloc(sizeof(int) * (size_t)max); d2 = b = (int*)malloc(sizeof(int) * (size_t)max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; return; fails: if(r) free(r); if(g) free(g); if(b) free(b); }/* sycc444_to_rgb() */
static void sycc444_to_rgb(opj_image_t *img) { int *d0, *d1, *d2, *r, *g, *b; const int *y, *cb, *cr; size_t maxw, maxh, max, i; int offset, upb; upb = (int)img->comps[0].prec; offset = 1<<(upb - 1); upb = (1<<upb)-1; maxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h; max = maxw * maxh; y = img->comps[0].data; cb = img->comps[1].data; cr = img->comps[2].data; d0 = r = (int*)malloc(sizeof(int) * max); d1 = g = (int*)malloc(sizeof(int) * max); d2 = b = (int*)malloc(sizeof(int) * max); if(r == NULL || g == NULL || b == NULL) goto fails; for(i = 0U; i < max; ++i) { sycc_to_rgb(offset, upb, *y, *cb, *cr, r, g, b); ++y; ++cb; ++cr; ++r; ++g; ++b; } free(img->comps[0].data); img->comps[0].data = d0; free(img->comps[1].data); img->comps[1].data = d1; free(img->comps[2].data); img->comps[2].data = d2; img->color_space = OPJ_CLRSPC_SRGB; return; fails: free(r); free(g); free(b); }/* sycc444_to_rgb() */
{'added': [(94, '\tsize_t maxw, maxh, max, i;'), (100, '\tmaxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h;'), (107, '\td0 = r = (int*)malloc(sizeof(int) * max);'), (108, '\td1 = g = (int*)malloc(sizeof(int) * max);'), (109, '\td2 = b = (int*)malloc(sizeof(int) * max);'), (121, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (125, '\tfree(r);'), (126, '\tfree(g);'), (127, '\tfree(b);'), (134, '\tsize_t maxw, maxh, max, offx, loopmaxw;'), (136, '\tsize_t i;'), (141, '\tmaxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h;'), (148, '\td0 = r = (int*)malloc(sizeof(int) * max);'), (149, '\td1 = g = (int*)malloc(sizeof(int) * max);'), (150, '\td2 = b = (int*)malloc(sizeof(int) * max);'), (154, '\t/* if img->x0 is odd, then first column shall use Cb/Cr = 0 */'), (155, '\toffx = img->x0 & 1U;'), (156, '\tloopmaxw = maxw - offx;'), (157, ''), (160, '\t\tsize_t j;'), (161, ''), (162, '\t\tif (offx > 0U) {'), (163, '\t\t\tsycc_to_rgb(offset, upb, *y, 0, 0, r, g, b);'), (164, '\t\t\t++y; ++r; ++g; ++b;'), (165, '\t\t}'), (166, ''), (167, '\t\tfor(j=0U; j < (loopmaxw & ~(size_t)1U); j += 2U)'), (174, '\t\tif (j < loopmaxw) {'), (179, ''), (184, '\timg->comps[1].w = img->comps[2].w = img->comps[0].w;'), (185, '\timg->comps[1].h = img->comps[2].h = img->comps[0].h;'), (186, '\timg->comps[1].dx = img->comps[2].dx = img->comps[0].dx;'), (187, '\timg->comps[1].dy = img->comps[2].dy = img->comps[0].dy;'), (188, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (192, '\tfree(r);'), (193, '\tfree(g);'), (194, '\tfree(b);'), (201, '\tsize_t maxw, maxh, max, offx, loopmaxw, offy, loopmaxh;'), (203, '\tsize_t i;'), (208, '\tmaxw = (size_t)img->comps[0].w; maxh = (size_t)img->comps[0].h;'), (215, '\td0 = r = (int*)malloc(sizeof(int) * max);'), (216, '\td1 = g = (int*)malloc(sizeof(int) * max);'), (217, '\td2 = b = (int*)malloc(sizeof(int) * max);'), (218, ''), (219, '\tif (r == NULL || g == NULL || b == NULL) goto fails;'), (220, ''), (221, '\t/* if img->x0 is odd, then first column shall use Cb/Cr = 0 */'), (222, '\toffx = img->x0 & 1U;'), (223, '\tloopmaxw = maxw - offx;'), (224, '\t/* if img->y0 is odd, then first line shall use Cb/Cr = 0 */'), (225, '\toffy = img->y0 & 1U;'), (226, '\tloopmaxh = maxh - offy;'), (227, ''), (228, '\tif (offy > 0U) {'), (229, '\t\tsize_t j;'), (230, ''), (231, '\t\tfor(j=0; j < maxw; ++j)'), (232, '\t\t{'), (233, '\t\t\tsycc_to_rgb(offset, upb, *y, 0, 0, r, g, b);'), (234, '\t\t\t++y; ++r; ++g; ++b;'), (235, '\t\t}'), (236, '\t}'), (238, '\tfor(i=0U; i < (loopmaxh & ~(size_t)1U); i += 2U)'), (240, '\t\tsize_t j;'), (241, ''), (244, ''), (245, '\t\tif (offx > 0U) {'), (246, '\t\t\tsycc_to_rgb(offset, upb, *y, 0, 0, r, g, b);'), (247, '\t\t\t++y; ++r; ++g; ++b;'), (248, '\t\t\tsycc_to_rgb(offset, upb, *ny, *cb, *cr, nr, ng, nb);'), (249, '\t\t\t++ny; ++nr; ++ng; ++nb;'), (250, '\t\t}'), (252, '\t\tfor(j=0; j < (loopmaxw & ~(size_t)1U); j += 2U)'), (264, '\t\tif(j < loopmaxw)'), (274, '\tif(i < loopmaxh)'), (276, '\t\tsize_t j;'), (277, ''), (278, '\t\tfor(j=0U; j < (maxw & ~(size_t)1U); j += 2U)'), (298, '\timg->comps[1].w = img->comps[2].w = img->comps[0].w;'), (299, '\timg->comps[1].h = img->comps[2].h = img->comps[0].h;'), (300, '\timg->comps[1].dx = img->comps[2].dx = img->comps[0].dx;'), (301, '\timg->comps[1].dy = img->comps[2].dy = img->comps[0].dy;'), (302, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (306, '\tfree(r);'), (307, '\tfree(g);'), (308, '\tfree(b);')], 'deleted': [(94, '\tunsigned int maxw, maxh, max, i;'), (100, '\tmaxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h;'), (107, '\td0 = r = (int*)malloc(sizeof(int) * (size_t)max);'), (108, '\td1 = g = (int*)malloc(sizeof(int) * (size_t)max);'), (109, '\td2 = b = (int*)malloc(sizeof(int) * (size_t)max);'), (124, '\tif(r) free(r);'), (125, '\tif(g) free(g);'), (126, '\tif(b) free(b);'), (127, ''), (134, '\tunsigned int maxw, maxh, max;'), (136, '\tunsigned int i, j;'), (141, '\tmaxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h;'), (148, '\td0 = r = (int*)malloc(sizeof(int) * (size_t)max);'), (149, '\td1 = g = (int*)malloc(sizeof(int) * (size_t)max);'), (150, '\td2 = b = (int*)malloc(sizeof(int) * (size_t)max);'), (156, '\t\tfor(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U)'), (163, '\t\tif (j < maxw) {'), (172, '#if defined(USE_JPWL) || defined(USE_MJ2)'), (173, '\timg->comps[1].w = maxw; img->comps[1].h = maxh;'), (174, '\timg->comps[2].w = maxw; img->comps[2].h = maxh;'), (175, '#else'), (176, '\timg->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh;'), (177, '\timg->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh;'), (178, '#endif'), (179, '\timg->comps[1].dx = img->comps[0].dx;'), (180, '\timg->comps[2].dx = img->comps[0].dx;'), (181, '\timg->comps[1].dy = img->comps[0].dy;'), (182, '\timg->comps[2].dy = img->comps[0].dy;'), (186, '\tif(r) free(r);'), (187, '\tif(g) free(g);'), (188, '\tif(b) free(b);'), (189, ''), (196, '\tunsigned int maxw, maxh, max;'), (198, '\tunsigned int i, j;'), (203, '\tmaxw = (unsigned int)img->comps[0].w; maxh = (unsigned int)img->comps[0].h;'), (210, '\td0 = r = (int*)malloc(sizeof(int) * (size_t)max);'), (211, '\td1 = g = (int*)malloc(sizeof(int) * (size_t)max);'), (212, '\td2 = b = (int*)malloc(sizeof(int) * (size_t)max);'), (213, ''), (214, '\tif(r == NULL || g == NULL || b == NULL) goto fails;'), (216, '\tfor(i=0U; i < (maxh & ~(unsigned int)1U); i += 2U)'), (221, '\t\tfor(j=0; j < (maxw & ~(unsigned int)1U); j += 2U)'), (233, '\t\tif(j < maxw)'), (243, '\tif(i < maxh)'), (245, '\t\tfor(j=0U; j < (maxw & ~(unsigned int)1U); j += 2U)'), (265, '#if defined(USE_JPWL) || defined(USE_MJ2)'), (266, '\timg->comps[1].w = maxw; img->comps[1].h = maxh;'), (267, '\timg->comps[2].w = maxw; img->comps[2].h = maxh;'), (268, '#else'), (269, '\timg->comps[1].w = (OPJ_UINT32)maxw; img->comps[1].h = (OPJ_UINT32)maxh;'), (270, '\timg->comps[2].w = (OPJ_UINT32)maxw; img->comps[2].h = (OPJ_UINT32)maxh;'), (271, '#endif'), (272, '\timg->comps[1].dx = img->comps[0].dx;'), (273, '\timg->comps[2].dx = img->comps[0].dx;'), (274, '\timg->comps[1].dy = img->comps[0].dy;'), (275, '\timg->comps[2].dy = img->comps[0].dy;'), (279, '\tif(r) free(r);'), (280, '\tif(g) free(g);'), (281, '\tif(b) free(b);'), (282, ''), (327, '\timg->color_space = OPJ_CLRSPC_SRGB;'), (328, '')]}
86
62
686
6,633
31
393
8
https://github.com/uclouvain/openjpeg
CVE-2016-3183
CWE-125
3,205
vf_delogo.c
C
filter_frame
/* * Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com> * Copyright (c) 2011 Stefano Sabatini * Copyright (c) 2013 Jean Delvare <khali@linux-fr.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** * @file * A very simple tv station logo remover * Originally imported from MPlayer libmpcodecs/vf_delogo.c, * the algorithm was later improved. */ #include "libavutil/common.h" #include "libavutil/imgutils.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" /** * Apply a simple delogo algorithm to the image in src and put the * result in dst. * * The algorithm is only applied to the region specified by the logo * parameters. * * @param w width of the input image * @param h height of the input image * @param logo_x x coordinate of the top left corner of the logo region * @param logo_y y coordinate of the top left corner of the logo region * @param logo_w width of the logo * @param logo_h height of the logo * @param band the size of the band around the processed area * @param show show a rectangle around the processed area, useful for * parameters tweaking * @param direct if non-zero perform in-place processing */ static void apply_delogo(uint8_t *dst, int dst_linesize, uint8_t *src, int src_linesize, int w, int h, AVRational sar, int logo_x, int logo_y, int logo_w, int logo_h, unsigned int band, int show, int direct) { int x, y; uint64_t interp, weightl, weightr, weightt, weightb; uint8_t *xdst, *xsrc; uint8_t *topleft, *botleft, *topright; unsigned int left_sample, right_sample; int xclipl, xclipr, yclipt, yclipb; int logo_x1, logo_x2, logo_y1, logo_y2; xclipl = FFMAX(-logo_x, 0); xclipr = FFMAX(logo_x+logo_w-w, 0); yclipt = FFMAX(-logo_y, 0); yclipb = FFMAX(logo_y+logo_h-h, 0); logo_x1 = logo_x + xclipl; logo_x2 = logo_x + logo_w - xclipr; logo_y1 = logo_y + yclipt; logo_y2 = logo_y + logo_h - yclipb; topleft = src+logo_y1 * src_linesize+logo_x1; topright = src+logo_y1 * src_linesize+logo_x2-1; botleft = src+(logo_y2-1) * src_linesize+logo_x1; if (!direct) av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h); dst += (logo_y1 + 1) * dst_linesize; src += (logo_y1 + 1) * src_linesize; for (y = logo_y1+1; y < logo_y2-1; y++) { left_sample = topleft[src_linesize*(y-logo_y1)] + topleft[src_linesize*(y-logo_y1-1)] + topleft[src_linesize*(y-logo_y1+1)]; right_sample = topright[src_linesize*(y-logo_y1)] + topright[src_linesize*(y-logo_y1-1)] + topright[src_linesize*(y-logo_y1+1)]; for (x = logo_x1+1, xdst = dst+logo_x1+1, xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) { /* Weighted interpolation based on relative distances, taking SAR into account */ weightl = (uint64_t) (logo_x2-1-x) * (y-logo_y1) * (logo_y2-1-y) * sar.den; weightr = (uint64_t)(x-logo_x1) * (y-logo_y1) * (logo_y2-1-y) * sar.den; weightt = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (logo_y2-1-y) * sar.num; weightb = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (y-logo_y1) * sar.num; interp = left_sample * weightl + right_sample * weightr + (topleft[x-logo_x1] + topleft[x-logo_x1-1] + topleft[x-logo_x1+1]) * weightt + (botleft[x-logo_x1] + botleft[x-logo_x1-1] + botleft[x-logo_x1+1]) * weightb; interp /= (weightl + weightr + weightt + weightb) * 3U; if (y >= logo_y+band && y < logo_y+logo_h-band && x >= logo_x+band && x < logo_x+logo_w-band) { *xdst = interp; } else { unsigned dist = 0; if (x < logo_x+band) dist = FFMAX(dist, logo_x-x+band); else if (x >= logo_x+logo_w-band) dist = FFMAX(dist, x-(logo_x+logo_w-1-band)); if (y < logo_y+band) dist = FFMAX(dist, logo_y-y+band); else if (y >= logo_y+logo_h-band) dist = FFMAX(dist, y-(logo_y+logo_h-1-band)); *xdst = (*xsrc*dist + interp*(band-dist))/band; if (show && (dist == band-1)) *xdst = 0; } } dst += dst_linesize; src += src_linesize; } } typedef struct { const AVClass *class; int x, y, w, h, band, show; } DelogoContext; #define OFFSET(x) offsetof(DelogoContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption delogo_options[]= { { "x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS }, { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS }, { "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS }, { NULL }, }; AVFILTER_DEFINE_CLASS(delogo); static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; } static av_cold int init(AVFilterContext *ctx) { DelogoContext *s = ctx->priv; #define CHECK_UNSET_OPT(opt) \ if (s->opt == -1) { \ av_log(s, AV_LOG_ERROR, "Option %s was not set.\n", #opt); \ return AVERROR(EINVAL); \ } CHECK_UNSET_OPT(x); CHECK_UNSET_OPT(y); CHECK_UNSET_OPT(w); CHECK_UNSET_OPT(h); av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n", s->x, s->y, s->w, s->h, s->band, s->show); s->w += s->band*2; s->h += s->band*2; s->x -= s->band; s->y -= s->band; return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); } static const AVFilterPad avfilter_vf_delogo_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .get_video_buffer = ff_null_get_video_buffer, .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad avfilter_vf_delogo_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter avfilter_vf_delogo = { .name = "delogo", .description = NULL_IF_CONFIG_SMALL("Remove logo from input video."), .priv_size = sizeof(DelogoContext), .priv_class = &delogo_class, .init = init, .query_formats = query_formats, .inputs = avfilter_vf_delogo_inputs, .outputs = avfilter_vf_delogo_outputs, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, };
/* * Copyright (c) 2002 Jindrich Makovicka <makovick@gmail.com> * Copyright (c) 2011 Stefano Sabatini * Copyright (c) 2013 Jean Delvare <khali@linux-fr.org> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with FFmpeg; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** * @file * A very simple tv station logo remover * Originally imported from MPlayer libmpcodecs/vf_delogo.c, * the algorithm was later improved. */ #include "libavutil/common.h" #include "libavutil/imgutils.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" #include "formats.h" #include "internal.h" #include "video.h" /** * Apply a simple delogo algorithm to the image in src and put the * result in dst. * * The algorithm is only applied to the region specified by the logo * parameters. * * @param w width of the input image * @param h height of the input image * @param logo_x x coordinate of the top left corner of the logo region * @param logo_y y coordinate of the top left corner of the logo region * @param logo_w width of the logo * @param logo_h height of the logo * @param band the size of the band around the processed area * @param show show a rectangle around the processed area, useful for * parameters tweaking * @param direct if non-zero perform in-place processing */ static void apply_delogo(uint8_t *dst, int dst_linesize, uint8_t *src, int src_linesize, int w, int h, AVRational sar, int logo_x, int logo_y, int logo_w, int logo_h, unsigned int band, int show, int direct) { int x, y; uint64_t interp, weightl, weightr, weightt, weightb; uint8_t *xdst, *xsrc; uint8_t *topleft, *botleft, *topright; unsigned int left_sample, right_sample; int xclipl, xclipr, yclipt, yclipb; int logo_x1, logo_x2, logo_y1, logo_y2; xclipl = FFMAX(-logo_x, 0); xclipr = FFMAX(logo_x+logo_w-w, 0); yclipt = FFMAX(-logo_y, 0); yclipb = FFMAX(logo_y+logo_h-h, 0); logo_x1 = logo_x + xclipl; logo_x2 = logo_x + logo_w - xclipr; logo_y1 = logo_y + yclipt; logo_y2 = logo_y + logo_h - yclipb; topleft = src+logo_y1 * src_linesize+logo_x1; topright = src+logo_y1 * src_linesize+logo_x2-1; botleft = src+(logo_y2-1) * src_linesize+logo_x1; if (!direct) av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h); dst += (logo_y1 + 1) * dst_linesize; src += (logo_y1 + 1) * src_linesize; for (y = logo_y1+1; y < logo_y2-1; y++) { left_sample = topleft[src_linesize*(y-logo_y1)] + topleft[src_linesize*(y-logo_y1-1)] + topleft[src_linesize*(y-logo_y1+1)]; right_sample = topright[src_linesize*(y-logo_y1)] + topright[src_linesize*(y-logo_y1-1)] + topright[src_linesize*(y-logo_y1+1)]; for (x = logo_x1+1, xdst = dst+logo_x1+1, xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) { /* Weighted interpolation based on relative distances, taking SAR into account */ weightl = (uint64_t) (logo_x2-1-x) * (y-logo_y1) * (logo_y2-1-y) * sar.den; weightr = (uint64_t)(x-logo_x1) * (y-logo_y1) * (logo_y2-1-y) * sar.den; weightt = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (logo_y2-1-y) * sar.num; weightb = (uint64_t)(x-logo_x1) * (logo_x2-1-x) * (y-logo_y1) * sar.num; interp = left_sample * weightl + right_sample * weightr + (topleft[x-logo_x1] + topleft[x-logo_x1-1] + topleft[x-logo_x1+1]) * weightt + (botleft[x-logo_x1] + botleft[x-logo_x1-1] + botleft[x-logo_x1+1]) * weightb; interp /= (weightl + weightr + weightt + weightb) * 3U; if (y >= logo_y+band && y < logo_y+logo_h-band && x >= logo_x+band && x < logo_x+logo_w-band) { *xdst = interp; } else { unsigned dist = 0; if (x < logo_x+band) dist = FFMAX(dist, logo_x-x+band); else if (x >= logo_x+logo_w-band) dist = FFMAX(dist, x-(logo_x+logo_w-1-band)); if (y < logo_y+band) dist = FFMAX(dist, logo_y-y+band); else if (y >= logo_y+logo_h-band) dist = FFMAX(dist, y-(logo_y+logo_h-1-band)); *xdst = (*xsrc*dist + interp*(band-dist))/band; if (show && (dist == band-1)) *xdst = 0; } } dst += dst_linesize; src += src_linesize; } } typedef struct { const AVClass *class; int x, y, w, h, band, show; } DelogoContext; #define OFFSET(x) offsetof(DelogoContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM static const AVOption delogo_options[]= { { "x", "set logo x position", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "y", "set logo y position", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "w", "set logo width", OFFSET(w), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "h", "set logo height", OFFSET(h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS }, { "band", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS }, { "t", "set delogo area band size", OFFSET(band), AV_OPT_TYPE_INT, { .i64 = 4 }, 1, INT_MAX, FLAGS }, { "show", "show delogo area", OFFSET(show), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS }, { NULL }, }; AVFILTER_DEFINE_CLASS(delogo); static int query_formats(AVFilterContext *ctx) { static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE }; ff_set_common_formats(ctx, ff_make_format_list(pix_fmts)); return 0; } static av_cold int init(AVFilterContext *ctx) { DelogoContext *s = ctx->priv; #define CHECK_UNSET_OPT(opt) \ if (s->opt == -1) { \ av_log(s, AV_LOG_ERROR, "Option %s was not set.\n", #opt); \ return AVERROR(EINVAL); \ } CHECK_UNSET_OPT(x); CHECK_UNSET_OPT(y); CHECK_UNSET_OPT(w); CHECK_UNSET_OPT(h); av_log(ctx, AV_LOG_VERBOSE, "x:%d y:%d, w:%d h:%d band:%d show:%d\n", s->x, s->y, s->w, s->h, s->band, s->show); s->w += s->band*2; s->h += s->band*2; s->x -= s->band; s->y -= s->band; return 0; } static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); } static const AVFilterPad avfilter_vf_delogo_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, .get_video_buffer = ff_null_get_video_buffer, .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad avfilter_vf_delogo_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; AVFilter avfilter_vf_delogo = { .name = "delogo", .description = NULL_IF_CONFIG_SMALL("Remove logo from input video."), .priv_size = sizeof(DelogoContext), .priv_class = &delogo_class, .init = init, .query_formats = query_formats, .inputs = avfilter_vf_delogo_inputs, .outputs = avfilter_vf_delogo_outputs, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, };
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); }
static int filter_frame(AVFilterLink *inlink, AVFrame *in) { DelogoContext *s = inlink->dst->priv; AVFilterLink *outlink = inlink->dst->outputs[0]; const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); AVFrame *out; int hsub0 = desc->log2_chroma_w; int vsub0 = desc->log2_chroma_h; int direct = 0; int plane; AVRational sar; if (av_frame_is_writable(in)) { direct = 1; out = in; } else { out = ff_get_video_buffer(outlink, outlink->w, outlink->h); if (!out) { av_frame_free(&in); return AVERROR(ENOMEM); } av_frame_copy_props(out, in); } sar = in->sample_aspect_ratio; /* Assume square pixels if SAR is unknown */ if (!sar.num) sar.num = sar.den = 1; for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) { int hsub = plane == 1 || plane == 2 ? hsub0 : 0; int vsub = plane == 1 || plane == 2 ? vsub0 : 0; apply_delogo(out->data[plane], out->linesize[plane], in ->data[plane], in ->linesize[plane], FF_CEIL_RSHIFT(inlink->w, hsub), FF_CEIL_RSHIFT(inlink->h, vsub), sar, s->x>>hsub, s->y>>vsub, /* Up and left borders were rounded down, inject lost bits * into width and height to avoid error accumulation */ FF_CEIL_RSHIFT(s->w + (s->x & ((1<<hsub)-1)), hsub), FF_CEIL_RSHIFT(s->h + (s->y & ((1<<vsub)-1)), vsub), s->band>>FFMIN(hsub, vsub), s->show, direct); } if (!direct) av_frame_free(&in); return ff_filter_frame(outlink, out); }
{'added': [(240, ' for (plane = 0; plane < 4 && in->data[plane] && in->linesize[plane]; plane++) {')], 'deleted': [(240, ' for (plane = 0; plane < 4 && in->data[plane]; plane++) {')]}
1
1
195
1,697
42
366
11
https://github.com/FFmpeg/FFmpeg
CVE-2013-4263
CWE-119
2,155
udp.c
C
udp_recvmsg
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The User Datagram Protocol (UDP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Hirokazu Takahashi, <taka@valinux.co.jp> * * Fixes: * Alan Cox : verify_area() calls * Alan Cox : stopped close while in use off icmp * messages. Not a fix but a botch that * for udp at least is 'valid'. * Alan Cox : Fixed icmp handling properly * Alan Cox : Correct error for oversized datagrams * Alan Cox : Tidied select() semantics. * Alan Cox : udp_err() fixed properly, also now * select and read wake correctly on errors * Alan Cox : udp_send verify_area moved to avoid mem leak * Alan Cox : UDP can count its memory * Alan Cox : send to an unknown connection causes * an ECONNREFUSED off the icmp, but * does NOT close. * Alan Cox : Switched to new sk_buff handlers. No more backlog! * Alan Cox : Using generic datagram code. Even smaller and the PEEK * bug no longer crashes it. * Fred Van Kempen : Net2e support for sk->broadcast. * Alan Cox : Uses skb_free_datagram * Alan Cox : Added get/set sockopt support. * Alan Cox : Broadcasting without option set returns EACCES. * Alan Cox : No wakeup calls. Instead we now use the callbacks. * Alan Cox : Use ip_tos and ip_ttl * Alan Cox : SNMP Mibs * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. * Matt Dillon : UDP length checks. * Alan Cox : Smarter af_inet used properly. * Alan Cox : Use new kernel side addressing. * Alan Cox : Incorrect return on truncated datagram receive. * Arnt Gulbrandsen : New udp_send and stuff * Alan Cox : Cache last socket * Alan Cox : Route cache * Jon Peatfield : Minor efficiency fix to sendto(). * Mike Shaver : RFC1122 checks. * Alan Cox : Nonblocking error fix. * Willy Konynenberg : Transparent proxying support. * Mike McLagan : Routing by source * David S. Miller : New socket lookup architecture. * Last socket cache retained as it * does have a high hit rate. * Olaf Kirch : Don't linearise iovec on sendmsg. * Andi Kleen : Some cleanups, cache destination entry * for connect. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Melvin Smith : Check msg_name not msg_namelen in sendto(), * return ENOTCONN for unconnected sockets (POSIX) * Janos Farkas : don't deliver multi/broadcasts to a different * bound-to-device socket * Hirokazu Takahashi : HW checksumming for outgoing UDP * datagrams. * Hirokazu Takahashi : sendfile() on UDP works now. * Arnaldo C. Melo : convert /proc/net/udp to seq_file * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support * James Chapman : Add L2TP encapsulation type. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "UDP: " fmt #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/igmp.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/tcp_states.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/route.h> #include <net/checksum.h> #include <net/xfrm.h> #include <trace/events/udp.h> #include <linux/static_key.h> #include <trace/events/skb.h> #include <net/busy_poll.h> #include "udp_impl.h" struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); int sysctl_udp_rmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_rmem_min); int sysctl_udp_wmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_wmem_min); atomic_long_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int log) { struct sock *sk2; struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); sk_nulls_for_each(sk2, node, &hslot->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || !uid_eq(uid, sock_i_uid(sk2))) && (*saddr_comp)(sk, sk2)) { if (bitmap) __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); else return 1; } return 0; } /* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) { struct sock *sk2; struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, node, &hslot2->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || !uid_eq(uid, sock_i_uid(sk2))) && (*saddr_comp)(sk, sk2)) { res = 1; break; } spin_unlock(&hslot2->lock); return res; } /** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @saddr_comp: AF-dependent comparison of bound local IP addresses * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rand = net_random(); first = (((u64)rand * remaining) >> 32) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, saddr_comp, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_reserved_local_port(snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { sk_nulls_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; } EXPORT_SYMBOL(udp_lib_get_port); static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) { struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); return (!ipv6_only_sock(sk2) && (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); } static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; } int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); } static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, unsigned short hnum, __be16 sport, __be32 daddr, __be16 dport, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && !ipv6_only_sock(sk)) { struct inet_sock *inet = inet_sk(sk); score = (sk->sk_family == PF_INET ? 2 : 1); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } } return score; } /* * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) */ static inline int compute_score2(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) { struct inet_sock *inet = inet_sk(sk); if (inet->inet_rcv_saddr != daddr) return -1; if (inet->inet_num != hnum) return -1; score = (sk->sk_family == PF_INET ? 2 : 1); if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } } return score; } static unsigned int udp_ehashfn(struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 udp_ehash_secret __read_mostly; net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); } /* called with read_rcu_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; begin: result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; } /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, htonl(INADDR_ANY), hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = 0; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, saddr, hnum, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, saddr, hnum, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp4_lib_lookup); static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; const struct iphdr *iph = ip_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; else return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable); } struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp4_lib_lookup); static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; return true; } static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct hlist_nulls_node *node; struct sock *s = sk; unsigned short hnum = ntohs(loc_port); sk_nulls_for_each_from(s, node) { if (__udp_is_mcast_sock(net, s, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum)) goto found; } s = NULL; found: return s; } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable); if (sk == NULL) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: ipv4_sk_redirect(skb, sk); goto out; } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } void udp_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udp_table); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ void udp_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending) { up->len = 0; up->pending = 0; ip_flush_pending_frames(sk); } } EXPORT_SYMBOL(udp_flush_pending_frames); /** * udp4_hwcsum - handle outgoing HW checksumming * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) * @src: source IP address * @dst: destination IP address */ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) { struct udphdr *uh = udp_hdr(skb); struct sk_buff *frags = skb_shinfo(skb)->frag_list; int offset = skb_transport_offset(skb); int len = skb->len - offset; int hlen = len; __wsum csum = 0; if (!frags) { /* * Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ do { csum = csum_add(csum, frags->csum); hlen -= frags->len; } while ((frags = frags->next)); csum = skb_checksum(skb, offset, hlen, csum); skb->ip_summed = CHECKSUM_NONE; uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } EXPORT_SYMBOL_GPL(udp4_hwcsum); static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct udphdr *uh; int err = 0; int is_udplite = IS_UDPLITE(sk); int offset = skb_transport_offset(skb); int len = skb->len - offset; __wsum csum = 0; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = inet->inet_sport; uh->dest = fl4->fl4_dport; uh->len = htons(len); uh->check = 0; if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp4_hwcsum(skb, fl4->saddr, fl4->daddr); goto send; } else csum = udp_csum(skb); /* add protocol-dependent pseudo-header */ uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, sk->sk_protocol, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } /* * Push out all pending data as one UDP datagram. Socket is locked. */ int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi4 *fl4 = &inet->cork.fl.u.ip4; struct sk_buff *skb; int err = 0; skb = ip_finish_skb(sk, fl4); if (!skb) goto out; err = udp_send_skb(skb, fl4); out: up->len = 0; up->pending = 0; return err; } EXPORT_SYMBOL(udp_push_pending_frames); int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); struct flowi4 fl4_stack; struct flowi4 *fl4; int ulen = len; struct ipcm_cookie ipc; struct rtable *rt = NULL; int free = 0; int connected = 0; __be32 daddr, faddr, saddr; __be16 dport; u8 tos; int err, is_udplite = IS_UDPLITE(sk); int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sk_buff *skb; struct ip_options_data opt_copy; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET)) { release_sock(sk); return -EINVAL; } goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); /* * Get and verify the address. */ if (msg->msg_name) { struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { if (usin->sin_family != AF_UNSPEC) return -EAFNOSUPPORT; } daddr = usin->sin_addr.s_addr; dport = usin->sin_port; if (dport == 0) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; dport = inet->inet_dport; /* Open fast path for connected socket. Route will not be used, if at least one option is set. */ connected = 1; } ipc.addr = inet->inet_saddr; ipc.oif = sk->sk_bound_dev_if; sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) return err; if (ipc.opt) free = 1; connected = 0; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; connected = 0; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; connected = 0; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; connected = 0; } else if (!ipc.oif) ipc.oif = inet->uc_index; if (connected) rt = (struct rtable *)sk_dst_check(sk, 0); if (rt == NULL) { struct net *net = sock_net(sk); fl4 = &fl4_stack; flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP, faddr, saddr, dport, inet->inet_sport); security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (connected) sk_dst_set(sk, dst_clone(&rt->dst)); } if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: saddr = fl4->saddr; if (!ipc.addr) daddr = ipc.addr = fl4->daddr; /* Lockless fast path for the non-corking case. */ if (!corkreq) { skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), &ipc, &rt, msg->msg_flags); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_send_skb(skb, fl4); goto out; } lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n")); err = -EINVAL; goto out; } /* * Now cork the socket to pend data. */ fl4 = &inet->cork.fl.u.ip4; fl4->daddr = daddr; fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = inet->inet_sport; up->pending = AF_INET; do_append_data: up->len += ulen; err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), &ipc, &rt, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_flush_pending_frames(sk); else if (!corkreq) err = udp_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: dst_confirm(&rt->dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } EXPORT_SYMBOL(udp_sendmsg); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); int ret; if (!up->pending) { struct msghdr msg = { .msg_flags = flags|MSG_MORE }; /* Call udp_sendmsg to specify destination address which * sendpage interface can't pass. * This will succeed only when the socket is connected. */ ret = udp_sendmsg(NULL, sk, &msg, 0); if (ret < 0) return ret; } lock_sock(sk); if (unlikely(!up->pending)) { release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n")); return -EINVAL; } ret = ip_append_page(sk, &inet->cork.fl.u.ip4, page, offset, size, flags); if (ret == -EOPNOTSUPP) { release_sock(sk); return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); } if (ret < 0) { udp_flush_pending_frames(sk); goto out; } up->len += size; if (!(up->corkflag || (flags&MSG_MORE))) ret = udp_push_pending_frames(sk); if (!ret) ret = size; out: release_sock(sk); return ret; } /** * first_packet_length - return length of first packet in receive queue * @sk: socket * * Drops all bad checksum frames, until a valid one is found. * Returns the length of found skb, or 0 if none is found. */ static unsigned int first_packet_length(struct sock *sk) { struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; unsigned int res; __skb_queue_head_init(&list_kill); spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); __skb_queue_tail(&list_kill, skb); } res = skb ? skb->len : 0; spin_unlock_bh(&rcvq->lock); if (!skb_queue_empty(&list_kill)) { bool slow = lock_sock_fast(sk); __skb_queue_purge(&list_kill); sk_mem_reclaim_partial(sk); unlock_sock_fast(sk, slow); } return res; } /* * IOCTL requests applicable to the UDP protocol */ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { unsigned int amount = first_packet_length(sk); if (amount) /* * We will only return the amount * of this packet since that is all * that will be read. */ amount -= sizeof(struct udphdr); return put_user(amount, (int __user *)arg); } default: return -ENOIOCTLCMD; } return 0; } EXPORT_SYMBOL(udp_ioctl); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; /* * Check any passed addresses */ if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; } int udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); /* * 1003.1g - break association. */ sk->sk_state = TCP_CLOSE; inet->inet_daddr = 0; inet->inet_dport = 0; sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { sk->sk_prot->unhash(sk); inet->inet_sport = 0; } sk_dst_reset(sk); return 0; } EXPORT_SYMBOL(udp_disconnect); void udp_lib_unhash(struct sock *sk) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2; hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock_bh(&hslot->lock); if (sk_nulls_del_node_init_rcu(sk)) { hslot->count--; inet_sk(sk)->inet_num = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_lock(&hslot2->lock); hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); } spin_unlock_bh(&hslot->lock); } } EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */ void udp_lib_rehash(struct sock *sk, u16 newhash) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2, *nhslot2; hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); nhslot2 = udp_hashslot2(udptable, newhash); udp_sk(sk)->udp_portaddr_hash = newhash; if (hslot2 != nhslot2) { hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); /* we must lock primary chain too */ spin_lock_bh(&hslot->lock); spin_lock(&hslot2->lock); hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); spin_lock(&nhslot2->lock); hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &nhslot2->head); nhslot2->count++; spin_unlock(&nhslot2->lock); spin_unlock_bh(&hslot->lock); } } } EXPORT_SYMBOL(udp_lib_rehash); static void udp_v4_rehash(struct sock *sk) { u16 new_hash = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (inet_sk(sk)->inet_daddr) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); } rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; } return 0; } static struct static_key udp_encap_needed __read_mostly; void udp_encap_enable(void) { if (!static_key_enabled(&udp_encap_needed)) static_key_slow_inc(&udp_encap_needed); } EXPORT_SYMBOL(udp_encap_enable); /* returns: * -1: error * 0: success * >0: "udp encap" protocol resubmission * * Note that in the success and error cases, the skb is assumed to * have either been requeued or freed. */ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); /* * Charge it to the socket, dropping if the queue is full. */ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { int ret; ret = encap_rcv(sk, skb); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are * disabled for the following two types of errors: these depend * on the application settings, not on the functioning of the * protocol stack as such. * * RFC 3828 here recommends (sec 3.3): "There should also be a * way ... to ... at least let the receiving application block * delivery of packets with coverage values less than a value * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested * by the receiver. This is subtle: if receiver wants x and x is * greater than the buffersize/MTU then receiver will complain * that it wants x while sender emits packets of smaller size y. * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter) && udp_lib_checksum_complete(skb)) goto csum_error; if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) goto drop; rc = 0; ipv4_pktinfo_prepare(sk, skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); return rc; csum_error: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { unsigned int i; struct sk_buff *skb1 = NULL; struct sock *sk; for (i = 0; i < count; i++) { sk = stack[i]; if (likely(skb1 == NULL)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) skb1 = NULL; } if (unlikely(skb1)) kfree_skb(skb1); } static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); dst_hold(dst); sk->sk_rx_dst = dst; } /* * Multicasts and broadcasts go to each listener. * * Note: called only from the BH handler context. */ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = skb->dev->ifindex; sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing chain lock, we must take a reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); /* * do the slow work with no lock held */ if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; } /* Initialize UDP checksum. If exited with zero value (success), * CHECKSUM_UNNECESSARY means, that no more checks are required. * Otherwise, csum completion requires chacksumming packet body, * including udp header and folding it to skb->csum. */ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { const struct iphdr *iph; int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } iph = ip_hdr(skb); if (uh->check == 0) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache * in any case) and data in tiny packets (< rx copybreak). */ return 0; } /* * All we need to do is get the socket, and then do a checksum. */ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; struct udphdr *uh; unsigned short ulen; struct rtable *rt = skb_rtable(skb); __be32 saddr, daddr; struct net *net = dev_net(skb->dev); /* * Validate the packet. */ if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto drop; /* No space for header. */ uh = udp_hdr(skb); ulen = ntohs(uh->len); saddr = ip_hdr(skb)->saddr; daddr = ip_hdr(skb)->daddr; if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; uh = udp_hdr(skb); } if (udp4_csum_init(skb, uh, proto)) goto csum_error; if (skb->sk) { int ret; sk = skb->sk; if (unlikely(sk->sk_rx_dst == NULL)) udp_sk_rx_dst_set(sk, skb); ret = udp_queue_rcv_skb(sk, skb); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } else { if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); } if (sk != NULL) { int ret; ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); /* No socket. Drop packet silently, if checksum is wrong */ if (udp_lib_checksum_complete(skb)) goto csum_error; UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* * Hmm. We got an UDP packet to a port to which we * don't wanna listen. Ignore it. */ kfree_skb(skb); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), ulen, skb->len, &daddr, ntohs(uh->dest)); goto drop; csum_error: /* * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } /* We can only early demux multicast if there is a single matching socket. * If more than one socket found returns NULL */ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(loc_port); unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); struct udp_hslot *hslot = &udp_table.hash[slot]; rcu_read_lock(); begin: count = 0; result = NULL; sk_nulls_for_each_rcu(sk, node, &hslot->head) { if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum)) { result = sk; ++count; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (count != 1 || unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(!__udp_is_mcast_sock(net, result, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum))) { sock_put(result); result = NULL; } } rcu_read_unlock(); return result; } /* For unicast we should only early demux connected sockets or we can * break forwarding setups. The chains here can be long so only check * if the first socket is an exact match and if not move on. */ static struct sock *__udp4_lib_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); unsigned int slot2 = hash2 & udp_table.mask; struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr) const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); rcu_read_lock(); result = NULL; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { if (INET_MATCH(sk, net, acookie, rmt_addr, loc_addr, ports, dif)) result = sk; /* Only check first socket in chain */ break; } if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(!INET_MATCH(sk, net, acookie, rmt_addr, loc_addr, ports, dif))) { sock_put(result); result = NULL; } } rcu_read_unlock(); return result; } void udp_v4_early_demux(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); const struct udphdr *uh = udp_hdr(skb); struct sock *sk; struct dst_entry *dst; struct net *net = dev_net(skb->dev); int dif = skb->dev->ifindex; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) return; if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); else if (skb->pkt_type == PACKET_HOST) sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); else return; if (!sk) return; skb->sk = sk; skb->destructor = sock_edemux; dst = sk->sk_rx_dst; if (dst) dst = dst_check(dst, 0); if (dst) skb_dst_set_noref(skb, dst); } int udp_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); } void udp_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); unlock_sock_fast(sk, slow); if (static_key_false(&udp_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } } /* * Socket option code for UDP */ int udp_lib_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); int val; int err = 0; int is_udplite = IS_UDPLITE(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; switch (optname) { case UDP_CORK: if (val != 0) { up->corkflag = 1; } else { up->corkflag = 0; lock_sock(sk); (*push_pending_frames)(sk); release_sock(sk); } break; case UDP_ENCAP: switch (val) { case 0: case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP_NON_IKE: up->encap_rcv = xfrm4_udp_encap_rcv; /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; udp_encap_enable(); break; default: err = -ENOPROTOOPT; break; } break; /* * UDP-Lite's partial checksum coverage (RFC 3828). */ /* The sender sets actual checksum coverage length via this option. * The case coverage > packet length is handled by send module. */ case UDPLITE_SEND_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcslen = val; up->pcflag |= UDPLITE_SEND_CC; break; /* The receiver specifies a minimum checksum coverage value. To make * sense, this should be set to at least 8 (as done below). If zero is * used, this again means full checksum coverage. */ case UDPLITE_RECV_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Avoid silly minimal values. */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcrlen = val; up->pcflag |= UDPLITE_RECV_CC; break; default: err = -ENOPROTOOPT; break; } return err; } EXPORT_SYMBOL(udp_lib_setsockopt); int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return compat_ip_setsockopt(sk, level, optname, optval, optlen); } #endif int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct udp_sock *up = udp_sk(sk); int val, len; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case UDP_CORK: val = up->corkflag; break; case UDP_ENCAP: val = up->encap_type; break; /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: val = up->pcslen; break; case UDPLITE_RECV_CSCOV: val = up->pcrlen; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } EXPORT_SYMBOL(udp_lib_getsockopt); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ip_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ip_getsockopt(sk, level, optname, optval, optlen); } #endif /** * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd * and a packet with checksum error is in the queue; * then it could get return from select indicating data available * but then block when reading it. Add special case code * to work around these arguably broken applications. */ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; sock_rps_record_flow(sk); /* Check for false positives due to checksum errors */ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) mask &= ~(POLLIN | POLLRDNORM); return mask; } EXPORT_SYMBOL(udp_poll); struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .backlog_rcv = __udp_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, }; EXPORT_SYMBOL(udp_prot); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket <= state->udp_table->mask; ++state->bucket) { struct hlist_nulls_node *node; struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; if (hlist_nulls_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); sk_nulls_for_each(sk, node, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) goto found; } spin_unlock_bh(&hslot->lock); } sk = NULL; found: return sk; } static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_nulls_next(sk); } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); if (!sk) { if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); return udp_get_first(seq, state->bucket + 1); } return sk; } static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = udp_get_first(seq, 0); if (sk) while (pos && (sk = udp_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } static void *udp_seq_start(struct seq_file *seq, loff_t *pos) { struct udp_iter_state *state = seq->private; state->bucket = MAX_UDP_PORTS; return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = udp_get_idx(seq, 0); else sk = udp_get_next(seq, v); ++*pos; return sk; } static void udp_seq_stop(struct seq_file *seq, void *v) { struct udp_iter_state *state = seq->private; if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); } int udp_seq_open(struct inode *inode, struct file *file) { struct udp_seq_afinfo *afinfo = PDE_DATA(inode); struct udp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct udp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->udp_table = afinfo->udp_table; return err; } EXPORT_SYMBOL(udp_seq_open); /* ------------------------------------------------------------------------ */ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) { struct proc_dir_entry *p; int rc = 0; afinfo->seq_ops.start = udp_seq_start; afinfo->seq_ops.next = udp_seq_next; afinfo->seq_ops.stop = udp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(udp_proc_register); void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL(udp_proc_unregister); /* ------------------------------------------------------------------------ */ static void udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); } int udp4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct udp_iter_state *state = seq->private; int len; udp4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } static const struct file_operations udp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; /* ------------------------------------------------------------------------ */ static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, .udp_table = &udp_table, .seq_fops = &udp_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, }; static int __net_init udp4_proc_init_net(struct net *net) { return udp_proc_register(net, &udp4_seq_afinfo); } static void __net_exit udp4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udp4_seq_afinfo); } static struct pernet_operations udp4_net_ops = { .init = udp4_proc_init_net, .exit = udp4_proc_exit_net, }; int __init udp4_proc_init(void) { return register_pernet_subsys(&udp4_net_ops); } void udp4_proc_exit(void) { unregister_pernet_subsys(&udp4_net_ops); } #endif /* CONFIG_PROC_FS */ static __initdata unsigned long uhash_entries; static int __init set_uhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtoul(str, 0, &uhash_entries); if (ret) return 0; if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; } __setup("uhash_entries=", set_uhash_entries); void __init udp_table_init(struct udp_table *table, const char *name) { unsigned int i; table->hash = alloc_large_system_hash(name, 2 * sizeof(struct udp_hslot), uhash_entries, 21, /* one slot per 2 MB */ 0, &table->log, &table->mask, UDP_HTABLE_SIZE_MIN, 64 * 1024); table->hash2 = table->hash + (table->mask + 1); for (i = 0; i <= table->mask; i++) { INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i); table->hash2[i].count = 0; spin_lock_init(&table->hash2[i].lock); } } void __init udp_init(void) { unsigned long limit; udp_table_init(&udp_table, "UDP"); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; sysctl_udp_rmem_min = SK_MEM_QUANTUM; sysctl_udp_wmem_min = SK_MEM_QUANTUM; } struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); int mac_len = skb->mac_len; int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); __be16 protocol = skb->protocol; netdev_features_t enc_features; int outer_hlen; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = htons(ETH_P_TEB); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); segs = skb_mac_gso_segment(skb, enc_features); if (!segs || IS_ERR(segs)) goto out; outer_hlen = skb_tnl_header_len(skb); skb = segs; do { struct udphdr *uh; int udp_offset = outer_hlen - tnl_hlen; skb_reset_inner_headers(skb); skb->encapsulation = 1; skb->mac_len = mac_len; skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); uh = udp_hdr(skb); uh->len = htons(skb->len - udp_offset); /* csum segment if tunnel sets skb with csum. */ if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) { struct iphdr *iph = ip_hdr(skb); uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - udp_offset, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, skb->len - udp_offset, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else if (protocol == htons(ETH_P_IPV6)) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); u32 len = skb->len - udp_offset; uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, len, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; } skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; }
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The User Datagram Protocol (UDP). * * Authors: Ross Biro * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Alan Cox, <alan@lxorguk.ukuu.org.uk> * Hirokazu Takahashi, <taka@valinux.co.jp> * * Fixes: * Alan Cox : verify_area() calls * Alan Cox : stopped close while in use off icmp * messages. Not a fix but a botch that * for udp at least is 'valid'. * Alan Cox : Fixed icmp handling properly * Alan Cox : Correct error for oversized datagrams * Alan Cox : Tidied select() semantics. * Alan Cox : udp_err() fixed properly, also now * select and read wake correctly on errors * Alan Cox : udp_send verify_area moved to avoid mem leak * Alan Cox : UDP can count its memory * Alan Cox : send to an unknown connection causes * an ECONNREFUSED off the icmp, but * does NOT close. * Alan Cox : Switched to new sk_buff handlers. No more backlog! * Alan Cox : Using generic datagram code. Even smaller and the PEEK * bug no longer crashes it. * Fred Van Kempen : Net2e support for sk->broadcast. * Alan Cox : Uses skb_free_datagram * Alan Cox : Added get/set sockopt support. * Alan Cox : Broadcasting without option set returns EACCES. * Alan Cox : No wakeup calls. Instead we now use the callbacks. * Alan Cox : Use ip_tos and ip_ttl * Alan Cox : SNMP Mibs * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. * Matt Dillon : UDP length checks. * Alan Cox : Smarter af_inet used properly. * Alan Cox : Use new kernel side addressing. * Alan Cox : Incorrect return on truncated datagram receive. * Arnt Gulbrandsen : New udp_send and stuff * Alan Cox : Cache last socket * Alan Cox : Route cache * Jon Peatfield : Minor efficiency fix to sendto(). * Mike Shaver : RFC1122 checks. * Alan Cox : Nonblocking error fix. * Willy Konynenberg : Transparent proxying support. * Mike McLagan : Routing by source * David S. Miller : New socket lookup architecture. * Last socket cache retained as it * does have a high hit rate. * Olaf Kirch : Don't linearise iovec on sendmsg. * Andi Kleen : Some cleanups, cache destination entry * for connect. * Vitaly E. Lavrov : Transparent proxy revived after year coma. * Melvin Smith : Check msg_name not msg_namelen in sendto(), * return ENOTCONN for unconnected sockets (POSIX) * Janos Farkas : don't deliver multi/broadcasts to a different * bound-to-device socket * Hirokazu Takahashi : HW checksumming for outgoing UDP * datagrams. * Hirokazu Takahashi : sendfile() on UDP works now. * Arnaldo C. Melo : convert /proc/net/udp to seq_file * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * Derek Atkins <derek@ihtfp.com>: Add Encapulation Support * James Chapman : Add L2TP encapsulation type. * * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "UDP: " fmt #include <asm/uaccess.h> #include <asm/ioctls.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/igmp.h> #include <linux/in.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <net/tcp_states.h> #include <linux/skbuff.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/route.h> #include <net/checksum.h> #include <net/xfrm.h> #include <trace/events/udp.h> #include <linux/static_key.h> #include <trace/events/skb.h> #include <net/busy_poll.h> #include "udp_impl.h" struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); int sysctl_udp_rmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_rmem_min); int sysctl_udp_wmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_wmem_min); atomic_long_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN) static int udp_lib_lport_inuse(struct net *net, __u16 num, const struct udp_hslot *hslot, unsigned long *bitmap, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int log) { struct sock *sk2; struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); sk_nulls_for_each(sk2, node, &hslot->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || !uid_eq(uid, sock_i_uid(sk2))) && (*saddr_comp)(sk, sk2)) { if (bitmap) __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); else return 1; } return 0; } /* * Note: we still hold spinlock of primary hash chain, so no other writer * can insert/delete a socket with local_port == num */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, struct udp_hslot *hslot2, struct sock *sk, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2)) { struct sock *sk2; struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); int res = 0; spin_lock(&hslot2->lock); udp_portaddr_for_each_entry(sk2, node, &hslot2->head) if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && (!sk2->sk_reuse || !sk->sk_reuse) && (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || !uid_eq(uid, sock_i_uid(sk2))) && (*saddr_comp)(sk, sk2)) { res = 1; break; } spin_unlock(&hslot2->lock); return res; } /** * udp_lib_get_port - UDP/-Lite port lookup for IPv4 and IPv6 * * @sk: socket struct in question * @snum: port number to look up * @saddr_comp: AF-dependent comparison of bound local IP addresses * @hash2_nulladdr: AF-dependent hash value in secondary hash chains, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, int (*saddr_comp)(const struct sock *sk1, const struct sock *sk2), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; struct udp_table *udptable = sk->sk_prot->h.udp_table; int error = 1; struct net *net = sock_net(sk); if (!snum) { int low, high, remaining; unsigned int rand; unsigned short first, last; DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); inet_get_local_port_range(net, &low, &high); remaining = (high - low) + 1; rand = net_random(); first = (((u64)rand * remaining) >> 32) + low; /* * force rand to be an odd multiple of UDP_HTABLE_SIZE */ rand = (rand | 1) * (udptable->mask + 1); last = first + udptable->mask + 1; do { hslot = udp_hashslot(udptable, net, first); bitmap_zero(bitmap, PORTS_PER_CHAIN); spin_lock_bh(&hslot->lock); udp_lib_lport_inuse(net, snum, hslot, bitmap, sk, saddr_comp, udptable->log); snum = first; /* * Iterate on all possible values of snum for this hash. * Using steps of an odd multiple of UDP_HTABLE_SIZE * give us randomization and full range coverage. */ do { if (low <= snum && snum <= high && !test_bit(snum >> udptable->log, bitmap) && !inet_is_reserved_local_port(snum)) goto found; snum += rand; } while (snum != first); spin_unlock_bh(&hslot->lock); } while (++first != last); goto fail; } else { hslot = udp_hashslot(udptable, net, snum); spin_lock_bh(&hslot->lock); if (hslot->count > 10) { int exist; unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum; slot2 &= udptable->mask; hash2_nulladdr &= udptable->mask; hslot2 = udp_hashslot2(udptable, slot2); if (hslot->count < hslot2->count) goto scan_primary_hash; exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); if (!exist && (hash2_nulladdr != slot2)) { hslot2 = udp_hashslot2(udptable, hash2_nulladdr); exist = udp_lib_lport_inuse2(net, snum, hslot2, sk, saddr_comp); } if (exist) goto fail_unlock; else goto found; } scan_primary_hash: if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, saddr_comp, 0)) goto fail_unlock; } found: inet_sk(sk)->inet_num = snum; udp_sk(sk)->udp_port_hash = snum; udp_sk(sk)->udp_portaddr_hash ^= snum; if (sk_unhashed(sk)) { sk_nulls_add_node_rcu(sk, &hslot->head); hslot->count++; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock(&hslot2->lock); hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &hslot2->head); hslot2->count++; spin_unlock(&hslot2->lock); } error = 0; fail_unlock: spin_unlock_bh(&hslot->lock); fail: return error; } EXPORT_SYMBOL(udp_lib_get_port); static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) { struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); return (!ipv6_only_sock(sk2) && (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr || inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)); } static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, unsigned int port) { return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port; } int udp_v4_get_port(struct sock *sk, unsigned short snum) { unsigned int hash2_nulladdr = udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum); unsigned int hash2_partial = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); /* precompute partial secondary hash */ udp_sk(sk)->udp_portaddr_hash = hash2_partial; return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr); } static inline int compute_score(struct sock *sk, struct net *net, __be32 saddr, unsigned short hnum, __be16 sport, __be32 daddr, __be16 dport, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && !ipv6_only_sock(sk)) { struct inet_sock *inet = inet_sk(sk); score = (sk->sk_family == PF_INET ? 2 : 1); if (inet->inet_rcv_saddr) { if (inet->inet_rcv_saddr != daddr) return -1; score += 4; } if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } } return score; } /* * In this second variant, we check (daddr, dport) matches (inet_rcv_sadd, inet_num) */ static inline int compute_score2(struct sock *sk, struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && !ipv6_only_sock(sk)) { struct inet_sock *inet = inet_sk(sk); if (inet->inet_rcv_saddr != daddr) return -1; if (inet->inet_num != hnum) return -1; score = (sk->sk_family == PF_INET ? 2 : 1); if (inet->inet_daddr) { if (inet->inet_daddr != saddr) return -1; score += 4; } if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score += 4; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score += 4; } } return score; } static unsigned int udp_ehashfn(struct net *net, const __be32 laddr, const __u16 lport, const __be32 faddr, const __be16 fport) { static u32 udp_ehash_secret __read_mostly; net_get_random_once(&udp_ehash_secret, sizeof(udp_ehash_secret)); return __inet_ehashfn(laddr, lport, faddr, fport, udp_ehash_secret + net_hash_mix(net)); } /* called with read_rcu_lock() */ static struct sock *udp4_lib_lookup2(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; begin: result = NULL; badness = 0; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; } /* UDP is nearly always wildcards out the wazoo, it makes no sense to try * harder than this. -DaveM */ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; u32 hash = 0; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp4_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp4_lib_lookup2(net, saddr, sport, htonl(INADDR_ANY), hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = 0; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, saddr, hnum, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; reuseport = sk->sk_reuseport; if (reuseport) { hash = udp_ehashfn(net, daddr, hnum, saddr, sport); matches = 1; } } else if (score == badness && reuseport) { matches++; if (((u64)hash * matches) >> 32 == 0) result = sk; hash = next_pseudo_random32(hash); } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, saddr, hnum, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; } EXPORT_SYMBOL_GPL(__udp4_lib_lookup); static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; const struct iphdr *iph = ip_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; else return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, iph->daddr, dport, inet_iif(skb), udptable); } struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, __be32 daddr, __be16 dport, int dif) { return __udp4_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); } EXPORT_SYMBOL_GPL(udp4_lib_lookup); static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif, unsigned short hnum) { struct inet_sock *inet = inet_sk(sk); if (!net_eq(sock_net(sk), net) || udp_sk(sk)->udp_port_hash != hnum || (inet->inet_daddr && inet->inet_daddr != rmt_addr) || (inet->inet_dport != rmt_port && inet->inet_dport) || (inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) || ipv6_only_sock(sk) || (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)) return false; if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif)) return false; return true; } static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct hlist_nulls_node *node; struct sock *s = sk; unsigned short hnum = ntohs(loc_port); sk_nulls_for_each_from(s, node) { if (__udp_is_mcast_sock(net, s, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum)) goto found; } s = NULL; found: return s; } /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. * Header points to the ip header of the error packet. We move * on past this. Then (as it used to claim before adjustment) * header points to the first 8 bytes of the udp header. We need * to find the appropriate port. */ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) { struct inet_sock *inet; const struct iphdr *iph = (const struct iphdr *)skb->data; struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); const int type = icmp_hdr(skb)->type; const int code = icmp_hdr(skb)->code; struct sock *sk; int harderr; int err; struct net *net = dev_net(skb->dev); sk = __udp4_lib_lookup(net, iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex, udptable); if (sk == NULL) { ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); return; /* No socket for error */ } err = 0; harderr = 0; inet = inet_sk(sk); switch (type) { default: case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; case ICMP_SOURCE_QUENCH: goto out; case ICMP_PARAMETERPROB: err = EPROTO; harderr = 1; break; case ICMP_DEST_UNREACH: if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ ipv4_sk_update_pmtu(skb, sk, info); if (inet->pmtudisc != IP_PMTUDISC_DONT) { err = EMSGSIZE; harderr = 1; break; } goto out; } err = EHOSTUNREACH; if (code <= NR_ICMP_UNREACH) { harderr = icmp_err_convert[code].fatal; err = icmp_err_convert[code].errno; } break; case ICMP_REDIRECT: ipv4_sk_redirect(skb, sk); goto out; } /* * RFC1122: OK. Passes ICMP errors back to application, as per * 4.1.3.3. */ if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); } void udp_err(struct sk_buff *skb, u32 info) { __udp4_lib_err(skb, info, &udp_table); } /* * Throw away all pending data and cancel the corking. Socket is locked. */ void udp_flush_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); if (up->pending) { up->len = 0; up->pending = 0; ip_flush_pending_frames(sk); } } EXPORT_SYMBOL(udp_flush_pending_frames); /** * udp4_hwcsum - handle outgoing HW checksumming * @skb: sk_buff containing the filled-in UDP header * (checksum field must be zeroed out) * @src: source IP address * @dst: destination IP address */ void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst) { struct udphdr *uh = udp_hdr(skb); struct sk_buff *frags = skb_shinfo(skb)->frag_list; int offset = skb_transport_offset(skb); int len = skb->len - offset; int hlen = len; __wsum csum = 0; if (!frags) { /* * Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ do { csum = csum_add(csum, frags->csum); hlen -= frags->len; } while ((frags = frags->next)); csum = skb_checksum(skb, offset, hlen, csum); skb->ip_summed = CHECKSUM_NONE; uh->check = csum_tcpudp_magic(src, dst, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } } EXPORT_SYMBOL_GPL(udp4_hwcsum); static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) { struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct udphdr *uh; int err = 0; int is_udplite = IS_UDPLITE(sk); int offset = skb_transport_offset(skb); int len = skb->len - offset; __wsum csum = 0; /* * Create a UDP header */ uh = udp_hdr(skb); uh->source = inet->inet_sport; uh->dest = fl4->fl4_dport; uh->len = htons(len); uh->check = 0; if (is_udplite) /* UDP-Lite */ csum = udplite_csum(skb); else if (sk->sk_no_check == UDP_CSUM_NOXMIT) { /* UDP csum disabled */ skb->ip_summed = CHECKSUM_NONE; goto send; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ udp4_hwcsum(skb, fl4->saddr, fl4->daddr); goto send; } else csum = udp_csum(skb); /* add protocol-dependent pseudo-header */ uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len, sk->sk_protocol, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; send: err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); err = 0; } } else UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_OUTDATAGRAMS, is_udplite); return err; } /* * Push out all pending data as one UDP datagram. Socket is locked. */ int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); struct flowi4 *fl4 = &inet->cork.fl.u.ip4; struct sk_buff *skb; int err = 0; skb = ip_finish_skb(sk, fl4); if (!skb) goto out; err = udp_send_skb(skb, fl4); out: up->len = 0; up->pending = 0; return err; } EXPORT_SYMBOL(udp_push_pending_frames); int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); struct flowi4 fl4_stack; struct flowi4 *fl4; int ulen = len; struct ipcm_cookie ipc; struct rtable *rt = NULL; int free = 0; int connected = 0; __be32 daddr, faddr, saddr; __be16 dport; u8 tos; int err, is_udplite = IS_UDPLITE(sk); int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); struct sk_buff *skb; struct ip_options_data opt_copy; if (len > 0xFFFF) return -EMSGSIZE; /* * Check the flags. */ if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */ return -EOPNOTSUPP; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; fl4 = &inet->cork.fl.u.ip4; if (up->pending) { /* * There are pending frames. * The socket lock must be held while it's corked. */ lock_sock(sk); if (likely(up->pending)) { if (unlikely(up->pending != AF_INET)) { release_sock(sk); return -EINVAL; } goto do_append_data; } release_sock(sk); } ulen += sizeof(struct udphdr); /* * Get and verify the address. */ if (msg->msg_name) { struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name; if (msg->msg_namelen < sizeof(*usin)) return -EINVAL; if (usin->sin_family != AF_INET) { if (usin->sin_family != AF_UNSPEC) return -EAFNOSUPPORT; } daddr = usin->sin_addr.s_addr; dport = usin->sin_port; if (dport == 0) return -EINVAL; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = inet->inet_daddr; dport = inet->inet_dport; /* Open fast path for connected socket. Route will not be used, if at least one option is set. */ connected = 1; } ipc.addr = inet->inet_saddr; ipc.oif = sk->sk_bound_dev_if; sock_tx_timestamp(sk, &ipc.tx_flags); if (msg->msg_controllen) { err = ip_cmsg_send(sock_net(sk), msg, &ipc); if (err) return err; if (ipc.opt) free = 1; connected = 0; } if (!ipc.opt) { struct ip_options_rcu *inet_opt; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt) { memcpy(&opt_copy, inet_opt, sizeof(*inet_opt) + inet_opt->opt.optlen); ipc.opt = &opt_copy.opt; } rcu_read_unlock(); } saddr = ipc.addr; ipc.addr = faddr = daddr; if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; faddr = ipc.opt->opt.faddr; connected = 0; } tos = get_rttos(&ipc, inet); if (sock_flag(sk, SOCK_LOCALROUTE) || (msg->msg_flags & MSG_DONTROUTE) || (ipc.opt && ipc.opt->opt.is_strictroute)) { tos |= RTO_ONLINK; connected = 0; } if (ipv4_is_multicast(daddr)) { if (!ipc.oif) ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; connected = 0; } else if (!ipc.oif) ipc.oif = inet->uc_index; if (connected) rt = (struct rtable *)sk_dst_check(sk, 0); if (rt == NULL) { struct net *net = sock_net(sk); fl4 = &fl4_stack; flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP, faddr, saddr, dport, inet->inet_sport); security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); rt = ip_route_output_flow(net, fl4, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; if (err == -ENETUNREACH) IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); goto out; } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) goto out; if (connected) sk_dst_set(sk, dst_clone(&rt->dst)); } if (msg->msg_flags&MSG_CONFIRM) goto do_confirm; back_from_confirm: saddr = fl4->saddr; if (!ipc.addr) daddr = ipc.addr = fl4->daddr; /* Lockless fast path for the non-corking case. */ if (!corkreq) { skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), &ipc, &rt, msg->msg_flags); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_send_skb(skb, fl4); goto out; } lock_sock(sk); if (unlikely(up->pending)) { /* The socket is already corked while preparing it. */ /* ... which is an evident application bug. --ANK */ release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n")); err = -EINVAL; goto out; } /* * Now cork the socket to pend data. */ fl4 = &inet->cork.fl.u.ip4; fl4->daddr = daddr; fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = inet->inet_sport; up->pending = AF_INET; do_append_data: up->len += ulen; err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen, sizeof(struct udphdr), &ipc, &rt, corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); if (err) udp_flush_pending_frames(sk); else if (!corkreq) err = udp_push_pending_frames(sk); else if (unlikely(skb_queue_empty(&sk->sk_write_queue))) up->pending = 0; release_sock(sk); out: ip_rt_put(rt); if (free) kfree(ipc.opt); if (!err) return len; /* * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting * ENOBUFS might not be good (it's not tunable per se), but otherwise * we don't have a good statistic (IpOutDiscards but it can be too many * things). We could add another new stat but at least for now that * seems like overkill. */ if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_SNDBUFERRORS, is_udplite); } return err; do_confirm: dst_confirm(&rt->dst); if (!(msg->msg_flags&MSG_PROBE) || len) goto back_from_confirm; err = 0; goto out; } EXPORT_SYMBOL(udp_sendmsg); int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct udp_sock *up = udp_sk(sk); int ret; if (!up->pending) { struct msghdr msg = { .msg_flags = flags|MSG_MORE }; /* Call udp_sendmsg to specify destination address which * sendpage interface can't pass. * This will succeed only when the socket is connected. */ ret = udp_sendmsg(NULL, sk, &msg, 0); if (ret < 0) return ret; } lock_sock(sk); if (unlikely(!up->pending)) { release_sock(sk); LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n")); return -EINVAL; } ret = ip_append_page(sk, &inet->cork.fl.u.ip4, page, offset, size, flags); if (ret == -EOPNOTSUPP) { release_sock(sk); return sock_no_sendpage(sk->sk_socket, page, offset, size, flags); } if (ret < 0) { udp_flush_pending_frames(sk); goto out; } up->len += size; if (!(up->corkflag || (flags&MSG_MORE))) ret = udp_push_pending_frames(sk); if (!ret) ret = size; out: release_sock(sk); return ret; } /** * first_packet_length - return length of first packet in receive queue * @sk: socket * * Drops all bad checksum frames, until a valid one is found. * Returns the length of found skb, or 0 if none is found. */ static unsigned int first_packet_length(struct sock *sk) { struct sk_buff_head list_kill, *rcvq = &sk->sk_receive_queue; struct sk_buff *skb; unsigned int res; __skb_queue_head_init(&list_kill); spin_lock_bh(&rcvq->lock); while ((skb = skb_peek(rcvq)) != NULL && udp_lib_checksum_complete(skb)) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); atomic_inc(&sk->sk_drops); __skb_unlink(skb, rcvq); __skb_queue_tail(&list_kill, skb); } res = skb ? skb->len : 0; spin_unlock_bh(&rcvq->lock); if (!skb_queue_empty(&list_kill)) { bool slow = lock_sock_fast(sk); __skb_queue_purge(&list_kill); sk_mem_reclaim_partial(sk); unlock_sock_fast(sk, slow); } return res; } /* * IOCTL requests applicable to the UDP protocol */ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) { switch (cmd) { case SIOCOUTQ: { int amount = sk_wmem_alloc_get(sk); return put_user(amount, (int __user *)arg); } case SIOCINQ: { unsigned int amount = first_packet_length(sk); if (amount) /* * We will only return the amount * of this packet since that is all * that will be read. */ amount -= sizeof(struct udphdr); return put_user(amount, (int __user *)arg); } default: return -ENOIOCTLCMD; } return 0; } EXPORT_SYMBOL(udp_ioctl); /* * This should be easy, if there is something there we * return it, otherwise we block. */ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; } int udp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); /* * 1003.1g - break association. */ sk->sk_state = TCP_CLOSE; inet->inet_daddr = 0; inet->inet_dport = 0; sock_rps_reset_rxhash(sk); sk->sk_bound_dev_if = 0; if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { sk->sk_prot->unhash(sk); inet->inet_sport = 0; } sk_dst_reset(sk); return 0; } EXPORT_SYMBOL(udp_disconnect); void udp_lib_unhash(struct sock *sk) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2; hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); spin_lock_bh(&hslot->lock); if (sk_nulls_del_node_init_rcu(sk)) { hslot->count--; inet_sk(sk)->inet_num = 0; sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); spin_lock(&hslot2->lock); hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); } spin_unlock_bh(&hslot->lock); } } EXPORT_SYMBOL(udp_lib_unhash); /* * inet_rcv_saddr was changed, we must rehash secondary hash */ void udp_lib_rehash(struct sock *sk, u16 newhash) { if (sk_hashed(sk)) { struct udp_table *udptable = sk->sk_prot->h.udp_table; struct udp_hslot *hslot, *hslot2, *nhslot2; hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash); nhslot2 = udp_hashslot2(udptable, newhash); udp_sk(sk)->udp_portaddr_hash = newhash; if (hslot2 != nhslot2) { hslot = udp_hashslot(udptable, sock_net(sk), udp_sk(sk)->udp_port_hash); /* we must lock primary chain too */ spin_lock_bh(&hslot->lock); spin_lock(&hslot2->lock); hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node); hslot2->count--; spin_unlock(&hslot2->lock); spin_lock(&nhslot2->lock); hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node, &nhslot2->head); nhslot2->count++; spin_unlock(&nhslot2->lock); spin_unlock_bh(&hslot->lock); } } } EXPORT_SYMBOL(udp_lib_rehash); static void udp_v4_rehash(struct sock *sk) { u16 new_hash = udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, inet_sk(sk)->inet_num); udp_lib_rehash(sk, new_hash); } static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int rc; if (inet_sk(sk)->inet_daddr) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); } rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) { int is_udplite = IS_UDPLITE(sk); /* Note that an ENOMEM error is charged twice */ if (rc == -ENOMEM) UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, is_udplite); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); kfree_skb(skb); trace_udp_fail_queue_rcv_skb(rc, sk); return -1; } return 0; } static struct static_key udp_encap_needed __read_mostly; void udp_encap_enable(void) { if (!static_key_enabled(&udp_encap_needed)) static_key_slow_inc(&udp_encap_needed); } EXPORT_SYMBOL(udp_encap_enable); /* returns: * -1: error * 0: success * >0: "udp encap" protocol resubmission * * Note that in the success and error cases, the skb is assumed to * have either been requeued or freed. */ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); int rc; int is_udplite = IS_UDPLITE(sk); /* * Charge it to the socket, dropping if the queue is full. */ if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); if (static_key_false(&udp_encap_needed) && up->encap_type) { int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); /* * This is an encapsulation socket so pass the skb to * the socket's udp_encap_rcv() hook. Otherwise, just * fall through and pass this up the UDP socket. * up->encap_rcv() returns the following value: * =0 if skb was successfully passed to the encap * handler or was discarded by it. * >0 if skb should be passed on to UDP. * <0 if skb should be resubmitted as proto -N */ /* if we're overly short, let UDP handle it */ encap_rcv = ACCESS_ONCE(up->encap_rcv); if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) { int ret; ret = encap_rcv(sk, skb); if (ret <= 0) { UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); return -ret; } } /* FALLTHROUGH -- it's a UDP Packet */ } /* * UDP-Lite specific tests, ignored on UDP sockets */ if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { /* * MIB statistics other than incrementing the error count are * disabled for the following two types of errors: these depend * on the application settings, not on the functioning of the * protocol stack as such. * * RFC 3828 here recommends (sec 3.3): "There should also be a * way ... to ... at least let the receiving application block * delivery of packets with coverage values less than a value * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n", UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested * by the receiver. This is subtle: if receiver wants x and x is * greater than the buffersize/MTU then receiver will complain * that it wants x while sender emits packets of smaller size y. * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n", UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } if (rcu_access_pointer(sk->sk_filter) && udp_lib_checksum_complete(skb)) goto csum_error; if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) goto drop; rc = 0; ipv4_pktinfo_prepare(sk, skb); bh_lock_sock(sk); if (!sock_owned_by_user(sk)) rc = __udp_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) { bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); return rc; csum_error: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); drop: UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); kfree_skb(skb); return -1; } static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { unsigned int i; struct sk_buff *skb1 = NULL; struct sock *sk; for (i = 0; i < count; i++) { sk = stack[i]; if (likely(skb1 == NULL)) skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); if (!skb1) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0) skb1 = NULL; } if (unlikely(skb1)) kfree_skb(skb1); } static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); dst_hold(dst); sk->sk_rx_dst = dst; } /* * Multicasts and broadcasts go to each listener. * * Note: called only from the BH handler context. */ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = skb->dev->ifindex; sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing chain lock, we must take a reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); /* * do the slow work with no lock held */ if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; } /* Initialize UDP checksum. If exited with zero value (success), * CHECKSUM_UNNECESSARY means, that no more checks are required. * Otherwise, csum completion requires chacksumming packet body, * including udp header and folding it to skb->csum. */ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto) { const struct iphdr *iph; int err; UDP_SKB_CB(skb)->partial_cov = 0; UDP_SKB_CB(skb)->cscov = skb->len; if (proto == IPPROTO_UDPLITE) { err = udplite_checksum_init(skb, uh); if (err) return err; } iph = ip_hdr(skb); if (uh->check == 0) { skb->ip_summed = CHECKSUM_UNNECESSARY; } else if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, proto, skb->csum)) skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!skb_csum_unnecessary(skb)) skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, skb->len, proto, 0); /* Probably, we should checksum udp header (it should be in cache * in any case) and data in tiny packets (< rx copybreak). */ return 0; } /* * All we need to do is get the socket, and then do a checksum. */ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct sock *sk; struct udphdr *uh; unsigned short ulen; struct rtable *rt = skb_rtable(skb); __be32 saddr, daddr; struct net *net = dev_net(skb->dev); /* * Validate the packet. */ if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto drop; /* No space for header. */ uh = udp_hdr(skb); ulen = ntohs(uh->len); saddr = ip_hdr(skb)->saddr; daddr = ip_hdr(skb)->daddr; if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) goto short_packet; uh = udp_hdr(skb); } if (udp4_csum_init(skb, uh, proto)) goto csum_error; if (skb->sk) { int ret; sk = skb->sk; if (unlikely(sk->sk_rx_dst == NULL)) udp_sk_rx_dst_set(sk, skb); ret = udp_queue_rcv_skb(sk, skb); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } else { if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) return __udp4_lib_mcast_deliver(net, skb, uh, saddr, daddr, udptable); sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); } if (sk != NULL) { int ret; ret = udp_queue_rcv_skb(sk, skb); sock_put(sk); /* a return value > 0 means to resubmit the input, but * it wants the return to be -protocol, or 0 */ if (ret > 0) return -ret; return 0; } if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; nf_reset(skb); /* No socket. Drop packet silently, if checksum is wrong */ if (udp_lib_checksum_complete(skb)) goto csum_error; UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); /* * Hmm. We got an UDP packet to a port to which we * don't wanna listen. Ignore it. */ kfree_skb(skb); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), ulen, skb->len, &daddr, ntohs(uh->dest)); goto drop; csum_error: /* * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", proto == IPPROTO_UDPLITE ? "Lite" : "", &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; } /* We can only early demux multicast if there is a single matching socket. * If more than one socket found returns NULL */ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(loc_port); unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask); struct udp_hslot *hslot = &udp_table.hash[slot]; rcu_read_lock(); begin: count = 0; result = NULL; sk_nulls_for_each_rcu(sk, node, &hslot->head) { if (__udp_is_mcast_sock(net, sk, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum)) { result = sk; ++count; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (count != 1 || unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(!__udp_is_mcast_sock(net, result, loc_port, loc_addr, rmt_port, rmt_addr, dif, hnum))) { sock_put(result); result = NULL; } } rcu_read_unlock(); return result; } /* For unicast we should only early demux connected sockets or we can * break forwarding setups. The chains here can be long so only check * if the first socket is an exact match and if not move on. */ static struct sock *__udp4_lib_demux_lookup(struct net *net, __be16 loc_port, __be32 loc_addr, __be16 rmt_port, __be32 rmt_addr, int dif) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(loc_port); unsigned int hash2 = udp4_portaddr_hash(net, loc_addr, hnum); unsigned int slot2 = hash2 & udp_table.mask; struct udp_hslot *hslot2 = &udp_table.hash2[slot2]; INET_ADDR_COOKIE(acookie, rmt_addr, loc_addr) const __portpair ports = INET_COMBINED_PORTS(rmt_port, hnum); rcu_read_lock(); result = NULL; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { if (INET_MATCH(sk, net, acookie, rmt_addr, loc_addr, ports, dif)) result = sk; /* Only check first socket in chain */ break; } if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(!INET_MATCH(sk, net, acookie, rmt_addr, loc_addr, ports, dif))) { sock_put(result); result = NULL; } } rcu_read_unlock(); return result; } void udp_v4_early_demux(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); const struct udphdr *uh = udp_hdr(skb); struct sock *sk; struct dst_entry *dst; struct net *net = dev_net(skb->dev); int dif = skb->dev->ifindex; /* validate the packet */ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) return; if (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); else if (skb->pkt_type == PACKET_HOST) sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr, uh->source, iph->saddr, dif); else return; if (!sk) return; skb->sk = sk; skb->destructor = sock_edemux; dst = sk->sk_rx_dst; if (dst) dst = dst_check(dst, 0); if (dst) skb_dst_set_noref(skb, dst); } int udp_rcv(struct sk_buff *skb) { return __udp4_lib_rcv(skb, &udp_table, IPPROTO_UDP); } void udp_destroy_sock(struct sock *sk) { struct udp_sock *up = udp_sk(sk); bool slow = lock_sock_fast(sk); udp_flush_pending_frames(sk); unlock_sock_fast(sk, slow); if (static_key_false(&udp_encap_needed) && up->encap_type) { void (*encap_destroy)(struct sock *sk); encap_destroy = ACCESS_ONCE(up->encap_destroy); if (encap_destroy) encap_destroy(sk); } } /* * Socket option code for UDP */ int udp_lib_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen, int (*push_pending_frames)(struct sock *)) { struct udp_sock *up = udp_sk(sk); int val; int err = 0; int is_udplite = IS_UDPLITE(sk); if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *)optval)) return -EFAULT; switch (optname) { case UDP_CORK: if (val != 0) { up->corkflag = 1; } else { up->corkflag = 0; lock_sock(sk); (*push_pending_frames)(sk); release_sock(sk); } break; case UDP_ENCAP: switch (val) { case 0: case UDP_ENCAP_ESPINUDP: case UDP_ENCAP_ESPINUDP_NON_IKE: up->encap_rcv = xfrm4_udp_encap_rcv; /* FALLTHROUGH */ case UDP_ENCAP_L2TPINUDP: up->encap_type = val; udp_encap_enable(); break; default: err = -ENOPROTOOPT; break; } break; /* * UDP-Lite's partial checksum coverage (RFC 3828). */ /* The sender sets actual checksum coverage length via this option. * The case coverage > packet length is handled by send module. */ case UDPLITE_SEND_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Illegal coverage: use default (8) */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcslen = val; up->pcflag |= UDPLITE_SEND_CC; break; /* The receiver specifies a minimum checksum coverage value. To make * sense, this should be set to at least 8 (as done below). If zero is * used, this again means full checksum coverage. */ case UDPLITE_RECV_CSCOV: if (!is_udplite) /* Disable the option on UDP sockets */ return -ENOPROTOOPT; if (val != 0 && val < 8) /* Avoid silly minimal values. */ val = 8; else if (val > USHRT_MAX) val = USHRT_MAX; up->pcrlen = val; up->pcflag |= UDPLITE_RECV_CC; break; default: err = -ENOPROTOOPT; break; } return err; } EXPORT_SYMBOL(udp_lib_setsockopt); int udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return ip_setsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_push_pending_frames); return compat_ip_setsockopt(sk, level, optname, optval, optlen); } #endif int udp_lib_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { struct udp_sock *up = udp_sk(sk); int val, len; if (get_user(len, optlen)) return -EFAULT; len = min_t(unsigned int, len, sizeof(int)); if (len < 0) return -EINVAL; switch (optname) { case UDP_CORK: val = up->corkflag; break; case UDP_ENCAP: val = up->encap_type; break; /* The following two cannot be changed on UDP sockets, the return is * always 0 (which corresponds to the full checksum coverage of UDP). */ case UDPLITE_SEND_CSCOV: val = up->pcslen; break; case UDPLITE_RECV_CSCOV: val = up->pcrlen; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; } EXPORT_SYMBOL(udp_lib_getsockopt); int udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return ip_getsockopt(sk, level, optname, optval, optlen); } #ifdef CONFIG_COMPAT int compat_udp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ip_getsockopt(sk, level, optname, optval, optlen); } #endif /** * udp_poll - wait for a UDP event. * @file - file struct * @sock - socket * @wait - poll table * * This is same as datagram poll, except for the special case of * blocking sockets. If application is using a blocking fd * and a packet with checksum error is in the queue; * then it could get return from select indicating data available * but then block when reading it. Add special case code * to work around these arguably broken applications. */ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; sock_rps_record_flow(sk); /* Check for false positives due to checksum errors */ if ((mask & POLLRDNORM) && !(file->f_flags & O_NONBLOCK) && !(sk->sk_shutdown & RCV_SHUTDOWN) && !first_packet_length(sk)) mask &= ~(POLLIN | POLLRDNORM); return mask; } EXPORT_SYMBOL(udp_poll); struct proto udp_prot = { .name = "UDP", .owner = THIS_MODULE, .close = udp_lib_close, .connect = ip4_datagram_connect, .disconnect = udp_disconnect, .ioctl = udp_ioctl, .destroy = udp_destroy_sock, .setsockopt = udp_setsockopt, .getsockopt = udp_getsockopt, .sendmsg = udp_sendmsg, .recvmsg = udp_recvmsg, .sendpage = udp_sendpage, .backlog_rcv = __udp_queue_rcv_skb, .release_cb = ip4_datagram_release_cb, .hash = udp_lib_hash, .unhash = udp_lib_unhash, .rehash = udp_v4_rehash, .get_port = udp_v4_get_port, .memory_allocated = &udp_memory_allocated, .sysctl_mem = sysctl_udp_mem, .sysctl_wmem = &sysctl_udp_wmem_min, .sysctl_rmem = &sysctl_udp_rmem_min, .obj_size = sizeof(struct udp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .h.udp_table = &udp_table, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_udp_setsockopt, .compat_getsockopt = compat_udp_getsockopt, #endif .clear_sk = sk_prot_clear_portaddr_nulls, }; EXPORT_SYMBOL(udp_prot); /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PROC_FS static struct sock *udp_get_first(struct seq_file *seq, int start) { struct sock *sk; struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); for (state->bucket = start; state->bucket <= state->udp_table->mask; ++state->bucket) { struct hlist_nulls_node *node; struct udp_hslot *hslot = &state->udp_table->hash[state->bucket]; if (hlist_nulls_empty(&hslot->head)) continue; spin_lock_bh(&hslot->lock); sk_nulls_for_each(sk, node, &hslot->head) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == state->family) goto found; } spin_unlock_bh(&hslot->lock); } sk = NULL; found: return sk; } static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) { struct udp_iter_state *state = seq->private; struct net *net = seq_file_net(seq); do { sk = sk_nulls_next(sk); } while (sk && (!net_eq(sock_net(sk), net) || sk->sk_family != state->family)); if (!sk) { if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); return udp_get_first(seq, state->bucket + 1); } return sk; } static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) { struct sock *sk = udp_get_first(seq, 0); if (sk) while (pos && (sk = udp_get_next(seq, sk)) != NULL) --pos; return pos ? NULL : sk; } static void *udp_seq_start(struct seq_file *seq, loff_t *pos) { struct udp_iter_state *state = seq->private; state->bucket = MAX_UDP_PORTS; return *pos ? udp_get_idx(seq, *pos-1) : SEQ_START_TOKEN; } static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *sk; if (v == SEQ_START_TOKEN) sk = udp_get_idx(seq, 0); else sk = udp_get_next(seq, v); ++*pos; return sk; } static void udp_seq_stop(struct seq_file *seq, void *v) { struct udp_iter_state *state = seq->private; if (state->bucket <= state->udp_table->mask) spin_unlock_bh(&state->udp_table->hash[state->bucket].lock); } int udp_seq_open(struct inode *inode, struct file *file) { struct udp_seq_afinfo *afinfo = PDE_DATA(inode); struct udp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct udp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->udp_table = afinfo->udp_table; return err; } EXPORT_SYMBOL(udp_seq_open); /* ------------------------------------------------------------------------ */ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo) { struct proc_dir_entry *p; int rc = 0; afinfo->seq_ops.start = udp_seq_start; afinfo->seq_ops.next = udp_seq_next; afinfo->seq_ops.stop = udp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(udp_proc_register); void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL(udp_proc_unregister); /* ------------------------------------------------------------------------ */ static void udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) { struct inet_sock *inet = inet_sk(sp); __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); seq_printf(f, "%5d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n", bucket, src, srcp, dest, destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops), len); } int udp4_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode ref pointer drops"); else { struct udp_iter_state *state = seq->private; int len; udp4_format_sock(v, seq, state->bucket, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } return 0; } static const struct file_operations udp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = udp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; /* ------------------------------------------------------------------------ */ static struct udp_seq_afinfo udp4_seq_afinfo = { .name = "udp", .family = AF_INET, .udp_table = &udp_table, .seq_fops = &udp_afinfo_seq_fops, .seq_ops = { .show = udp4_seq_show, }, }; static int __net_init udp4_proc_init_net(struct net *net) { return udp_proc_register(net, &udp4_seq_afinfo); } static void __net_exit udp4_proc_exit_net(struct net *net) { udp_proc_unregister(net, &udp4_seq_afinfo); } static struct pernet_operations udp4_net_ops = { .init = udp4_proc_init_net, .exit = udp4_proc_exit_net, }; int __init udp4_proc_init(void) { return register_pernet_subsys(&udp4_net_ops); } void udp4_proc_exit(void) { unregister_pernet_subsys(&udp4_net_ops); } #endif /* CONFIG_PROC_FS */ static __initdata unsigned long uhash_entries; static int __init set_uhash_entries(char *str) { ssize_t ret; if (!str) return 0; ret = kstrtoul(str, 0, &uhash_entries); if (ret) return 0; if (uhash_entries && uhash_entries < UDP_HTABLE_SIZE_MIN) uhash_entries = UDP_HTABLE_SIZE_MIN; return 1; } __setup("uhash_entries=", set_uhash_entries); void __init udp_table_init(struct udp_table *table, const char *name) { unsigned int i; table->hash = alloc_large_system_hash(name, 2 * sizeof(struct udp_hslot), uhash_entries, 21, /* one slot per 2 MB */ 0, &table->log, &table->mask, UDP_HTABLE_SIZE_MIN, 64 * 1024); table->hash2 = table->hash + (table->mask + 1); for (i = 0; i <= table->mask; i++) { INIT_HLIST_NULLS_HEAD(&table->hash[i].head, i); table->hash[i].count = 0; spin_lock_init(&table->hash[i].lock); } for (i = 0; i <= table->mask; i++) { INIT_HLIST_NULLS_HEAD(&table->hash2[i].head, i); table->hash2[i].count = 0; spin_lock_init(&table->hash2[i].lock); } } void __init udp_init(void) { unsigned long limit; udp_table_init(&udp_table, "UDP"); limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); sysctl_udp_mem[0] = limit / 4 * 3; sysctl_udp_mem[1] = limit; sysctl_udp_mem[2] = sysctl_udp_mem[0] * 2; sysctl_udp_rmem_min = SK_MEM_QUANTUM; sysctl_udp_wmem_min = SK_MEM_QUANTUM; } struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); int mac_len = skb->mac_len; int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); __be16 protocol = skb->protocol; netdev_features_t enc_features; int outer_hlen; if (unlikely(!pskb_may_pull(skb, tnl_hlen))) goto out; skb->encapsulation = 0; __skb_pull(skb, tnl_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = htons(ETH_P_TEB); /* segment inner packet. */ enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); segs = skb_mac_gso_segment(skb, enc_features); if (!segs || IS_ERR(segs)) goto out; outer_hlen = skb_tnl_header_len(skb); skb = segs; do { struct udphdr *uh; int udp_offset = outer_hlen - tnl_hlen; skb_reset_inner_headers(skb); skb->encapsulation = 1; skb->mac_len = mac_len; skb_push(skb, outer_hlen); skb_reset_mac_header(skb); skb_set_network_header(skb, mac_len); skb_set_transport_header(skb, udp_offset); uh = udp_hdr(skb); uh->len = htons(skb->len - udp_offset); /* csum segment if tunnel sets skb with csum. */ if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) { struct iphdr *iph = ip_hdr(skb); uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - udp_offset, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, skb->len - udp_offset, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } else if (protocol == htons(ETH_P_IPV6)) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); u32 len = skb->len - udp_offset; uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, len, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; } skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; }
int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; /* * Check any passed addresses */ if (addr_len) *addr_len = sizeof(*sin); if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int noblock, int flags, int *addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; if (flags & MSG_ERRQUEUE) return ip_recv_error(sk, msg, len); try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), &peeked, &off, &err); if (!skb) goto out; ulen = skb->len - sizeof(struct udphdr); copied = len; if (copied > ulen) copied = ulen; else if (copied < ulen) msg->msg_flags |= MSG_TRUNC; /* * If checksum is needed at all, try to do it while copying the * data. If the data is truncated, or if we only want a partial * coverage checksum (UDP-Lite), do it before the copy. */ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) { if (udp_lib_checksum_complete(skb)) goto csum_copy_err; } if (skb_csum_unnecessary(skb)) err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) goto csum_copy_err; } if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); if (!peeked) { atomic_inc(&sk->sk_drops); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } goto out_free; } if (!peeked) UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INDATAGRAMS, is_udplite); sock_recv_ts_and_drops(msg, sk, skb); /* Copy the address. */ if (sin) { sin->sin_family = AF_INET; sin->sin_port = udp_hdr(skb)->source; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); *addr_len = sizeof(*sin); } if (inet->cmsg_flags) ip_cmsg_recv(msg, skb); err = copied; if (flags & MSG_TRUNC) err = ulen; out_free: skb_free_datagram_locked(sk, skb); out: return err; csum_copy_err: slow = lock_sock_fast(sk); if (!skb_kill_datagram(sk, skb, flags)) { UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); } unlock_sock_fast(sk, slow); if (noblock) return -EAGAIN; /* starting over for a new packet */ msg->msg_flags &= ~MSG_TRUNC; goto try_again; }
{'added': [(1299, '\t\t*addr_len = sizeof(*sin);')], 'deleted': [(1238, '\t/*'), (1239, '\t *\tCheck any passed addresses'), (1240, '\t */'), (1241, '\tif (addr_len)'), (1242, '\t\t*addr_len = sizeof(*sin);'), (1243, '')]}
1
6
1,857
12,006
80
509
20
https://github.com/torvalds/linux
CVE-2013-7263
CWE-20
1,887
gather.cc
C++
tflite::ops::builtin::gather::Eval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace gather { constexpr int kInputTensor = 0; constexpr int kInputPositions = 1; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (positions->type) { case kTfLiteInt64: case kTfLiteInt32: break; default: context->ReportError( context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; } // Assign to output the input type. output->type = input->type; // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: case kTfLiteInt64: case kTfLiteInt32: case kTfLiteBool: break; case kTfLiteString: { // Only 1D input is supported. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); } break; default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } int axis = params->axis; if (axis < 0) { axis += NumDimensions(input); } TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input)); int batch_dims = params->batch_dims; // batch_dims should be in range: [-rank(positions), rank(positions)]. // Negative batch_dims is added with rank of positions. if (batch_dims < 0) { batch_dims += NumDimensions(positions); } TF_LITE_ENSURE(context, batch_dims <= axis); TF_LITE_ENSURE(context, 0 <= batch_dims && batch_dims < NumDimensions(input)); TF_LITE_ENSURE(context, batch_dims <= NumDimensions(positions)); for (int i = 0; i < batch_dims; ++i) { TF_LITE_ENSURE_EQ(context, input->dims->data[i], positions->dims->data[i]); } const int num_dimensions = NumDimensions(input) + NumDimensions(positions) - 1 - batch_dims; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); int output_index = 0; for (int i = 0; i < axis; ++i) { output_shape->data[output_index++] = input->dims->data[i]; } for (int i = batch_dims; i < positions->dims->size; ++i) { output_shape->data[output_index++] = positions->dims->data[i]; } for (int i = axis + 1; i < input->dims->size; ++i) { output_shape->data[output_index++] = input->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } template <typename InputT, typename PositionsT> TfLiteStatus Gather(const TfLiteGatherParams& params, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { tflite::GatherParams op_params; op_params.axis = params.axis; op_params.batch_dims = params.batch_dims; optimized_ops::Gather(op_params, GetTensorShape(input), GetTensorData<InputT>(input), GetTensorShape(positions), GetTensorData<PositionsT>(positions), GetTensorShape(output), GetTensorData<InputT>(output)); return kTfLiteOk; } template <typename PositionT> TfLiteStatus GatherStrings(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { DynamicBuffer buffer; const PositionT* indexes = GetTensorData<PositionT>(positions); const PositionT num_strings = GetStringCount(input); const int num_indexes = NumElements(positions); for (int i = 0; i < num_indexes; ++i) { const PositionT pos = indexes[i]; TF_LITE_ENSURE(context, pos < num_strings); const auto string_ref = GetString(input, pos); buffer.AddString(string_ref.str, string_ref.len); } buffer.WriteToTensor(output, /*new_shape=*/nullptr); return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int32_t>(*params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int32_t>(*params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int32_t>(*params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int32_t>(*params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int32_t>(*params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int32_t>(*params, input, positions, output); case kTfLiteBool: return Gather<bool, int32_t>(*params, input, positions, output); case kTfLiteString: return GatherStrings<int32_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int64_t>(*params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int64_t>(*params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int64_t>(*params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int64_t>(*params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int64_t>(*params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int64_t>(*params, input, positions, output); case kTfLiteBool: return Gather<bool, int64_t>(*params, input, positions, output); case kTfLiteString: return GatherStrings<int64_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } context->ReportError(context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; } } // namespace gather TfLiteRegistration* Register_GATHER() { static TfLiteRegistration r = {nullptr, nullptr, gather::Prepare, gather::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace gather { constexpr int kInputTensor = 0; constexpr int kInputPositions = 1; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); switch (positions->type) { case kTfLiteInt64: case kTfLiteInt32: break; default: context->ReportError( context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; } // Assign to output the input type. output->type = input->type; // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: case kTfLiteInt64: case kTfLiteInt32: case kTfLiteBool: break; case kTfLiteString: { // Only 1D input is supported. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1); } break; default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } int axis = params->axis; if (axis < 0) { axis += NumDimensions(input); } TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input)); int batch_dims = params->batch_dims; // batch_dims should be in range: [-rank(positions), rank(positions)]. // Negative batch_dims is added with rank of positions. if (batch_dims < 0) { batch_dims += NumDimensions(positions); } TF_LITE_ENSURE(context, batch_dims <= axis); TF_LITE_ENSURE(context, 0 <= batch_dims && batch_dims < NumDimensions(input)); TF_LITE_ENSURE(context, batch_dims <= NumDimensions(positions)); for (int i = 0; i < batch_dims; ++i) { TF_LITE_ENSURE_EQ(context, input->dims->data[i], positions->dims->data[i]); } const int num_dimensions = NumDimensions(input) + NumDimensions(positions) - 1 - batch_dims; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(num_dimensions); int output_index = 0; for (int i = 0; i < axis; ++i) { output_shape->data[output_index++] = input->dims->data[i]; } for (int i = batch_dims; i < positions->dims->size; ++i) { output_shape->data[output_index++] = positions->dims->data[i]; } for (int i = axis + 1; i < input->dims->size; ++i) { output_shape->data[output_index++] = input->dims->data[i]; } return context->ResizeTensor(context, output, output_shape); } template <typename InputT, typename PositionsT> TfLiteStatus Gather(TfLiteContext* context, const TfLiteGatherParams& params, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { const PositionsT* indexes = GetTensorData<PositionsT>(positions); bool indices_has_only_positive_elements = true; const size_t num_indices = positions->bytes / sizeof(PositionsT); for (size_t i = 0; i < num_indices; i++) { if (indexes[i] < 0) { indices_has_only_positive_elements = false; break; } } TF_LITE_ENSURE(context, indices_has_only_positive_elements); tflite::GatherParams op_params; op_params.axis = params.axis; op_params.batch_dims = params.batch_dims; optimized_ops::Gather(op_params, GetTensorShape(input), GetTensorData<InputT>(input), GetTensorShape(positions), GetTensorData<PositionsT>(positions), GetTensorShape(output), GetTensorData<InputT>(output)); return kTfLiteOk; } template <typename PositionT> TfLiteStatus GatherStrings(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* positions, TfLiteTensor* output) { DynamicBuffer buffer; const PositionT* indexes = GetTensorData<PositionT>(positions); bool indices_has_only_positive_elements = true; const size_t num_indices = positions->bytes / sizeof(PositionT); for (size_t i = 0; i < num_indices; i++) { if (indexes[i] < 0) { indices_has_only_positive_elements = false; break; } } TF_LITE_ENSURE(context, indices_has_only_positive_elements); const PositionT num_strings = GetStringCount(input); const int num_indexes = NumElements(positions); for (int i = 0; i < num_indexes; ++i) { const PositionT pos = indexes[i]; TF_LITE_ENSURE(context, pos < num_strings); const auto string_ref = GetString(input, pos); buffer.AddString(string_ref.str, string_ref.len); } buffer.WriteToTensor(output, /*new_shape=*/nullptr); return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int32_t>(context, *params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int32_t>(context, *params, input, positions, output); case kTfLiteBool: return Gather<bool, int32_t>(context, *params, input, positions, output); case kTfLiteString: return GatherStrings<int32_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int64_t>(context, *params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int64_t>(context, *params, input, positions, output); case kTfLiteBool: return Gather<bool, int64_t>(context, *params, input, positions, output); case kTfLiteString: return GatherStrings<int64_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } context->ReportError(context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; } } // namespace gather TfLiteRegistration* Register_GATHER() { static TfLiteRegistration r = {nullptr, nullptr, gather::Prepare, gather::Eval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int32_t>(*params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int32_t>(*params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int32_t>(*params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int32_t>(*params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int32_t>(*params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int32_t>(*params, input, positions, output); case kTfLiteBool: return Gather<bool, int32_t>(*params, input, positions, output); case kTfLiteString: return GatherStrings<int32_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int64_t>(*params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int64_t>(*params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int64_t>(*params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int64_t>(*params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int64_t>(*params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int64_t>(*params, input, positions, output); case kTfLiteBool: return Gather<bool, int64_t>(*params, input, positions, output); case kTfLiteString: return GatherStrings<int64_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } context->ReportError(context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; }
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<const TfLiteGatherParams*>(node->builtin_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); const TfLiteTensor* positions; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputPositions, &positions)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); if (positions->type == kTfLiteInt32) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int32_t>(context, *params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int32_t>(context, *params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int32_t>(context, *params, input, positions, output); case kTfLiteBool: return Gather<bool, int32_t>(context, *params, input, positions, output); case kTfLiteString: return GatherStrings<int32_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } if (positions->type == kTfLiteInt64) { switch (input->type) { case kTfLiteFloat32: return Gather<float, int64_t>(context, *params, input, positions, output); case kTfLiteUInt8: return Gather<uint8_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt8: return Gather<int8_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt16: return Gather<int16_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt32: return Gather<int32_t, int64_t>(context, *params, input, positions, output); case kTfLiteInt64: return Gather<int64_t, int64_t>(context, *params, input, positions, output); case kTfLiteBool: return Gather<bool, int64_t>(context, *params, input, positions, output); case kTfLiteString: return GatherStrings<int64_t>(context, input, positions, output); default: context->ReportError(context, "Type '%s' is not supported by gather.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } context->ReportError(context, "Positions of type '%s' are not supported by gather.", TfLiteTypeGetName(positions->type)); return kTfLiteError; }
{'added': [(120, 'TfLiteStatus Gather(TfLiteContext* context, const TfLiteGatherParams& params,'), (121, ' const TfLiteTensor* input, const TfLiteTensor* positions,'), (122, ' TfLiteTensor* output) {'), (123, ' const PositionsT* indexes = GetTensorData<PositionsT>(positions);'), (124, ' bool indices_has_only_positive_elements = true;'), (125, ' const size_t num_indices = positions->bytes / sizeof(PositionsT);'), (126, ' for (size_t i = 0; i < num_indices; i++) {'), (127, ' if (indexes[i] < 0) {'), (128, ' indices_has_only_positive_elements = false;'), (129, ' break;'), (130, ' }'), (131, ' }'), (132, ' TF_LITE_ENSURE(context, indices_has_only_positive_elements);'), (133, ''), (149, ''), (151, ' bool indices_has_only_positive_elements = true;'), (152, ' const size_t num_indices = positions->bytes / sizeof(PositionT);'), (153, ' for (size_t i = 0; i < num_indices; i++) {'), (154, ' if (indexes[i] < 0) {'), (155, ' indices_has_only_positive_elements = false;'), (156, ' break;'), (157, ' }'), (158, ' }'), (159, ' TF_LITE_ENSURE(context, indices_has_only_positive_elements);'), (160, ''), (189, ' return Gather<float, int32_t>(context, *params, input, positions,'), (190, ' output);'), (192, ' return Gather<uint8_t, int32_t>(context, *params, input, positions,'), (193, ' output);'), (195, ' return Gather<int8_t, int32_t>(context, *params, input, positions,'), (196, ' output);'), (198, ' return Gather<int16_t, int32_t>(context, *params, input, positions,'), (199, ' output);'), (201, ' return Gather<int32_t, int32_t>(context, *params, input, positions,'), (202, ' output);'), (204, ' return Gather<int64_t, int32_t>(context, *params, input, positions,'), (205, ' output);'), (207, ' return Gather<bool, int32_t>(context, *params, input, positions,'), (208, ' output);'), (220, ' return Gather<float, int64_t>(context, *params, input, positions,'), (221, ' output);'), (223, ' return Gather<uint8_t, int64_t>(context, *params, input, positions,'), (224, ' output);'), (226, ' return Gather<int8_t, int64_t>(context, *params, input, positions,'), (227, ' output);'), (229, ' return Gather<int16_t, int64_t>(context, *params, input, positions,'), (230, ' output);'), (232, ' return Gather<int32_t, int64_t>(context, *params, input, positions,'), (233, ' output);'), (235, ' return Gather<int64_t, int64_t>(context, *params, input, positions,'), (236, ' output);'), (238, ' return Gather<bool, int64_t>(context, *params, input, positions,'), (239, ' output);')], 'deleted': [(120, 'TfLiteStatus Gather(const TfLiteGatherParams& params, const TfLiteTensor* input,'), (121, ' const TfLiteTensor* positions, TfLiteTensor* output) {'), (166, ' return Gather<float, int32_t>(*params, input, positions, output);'), (168, ' return Gather<uint8_t, int32_t>(*params, input, positions, output);'), (170, ' return Gather<int8_t, int32_t>(*params, input, positions, output);'), (172, ' return Gather<int16_t, int32_t>(*params, input, positions, output);'), (174, ' return Gather<int32_t, int32_t>(*params, input, positions, output);'), (176, ' return Gather<int64_t, int32_t>(*params, input, positions, output);'), (178, ' return Gather<bool, int32_t>(*params, input, positions, output);'), (190, ' return Gather<float, int64_t>(*params, input, positions, output);'), (192, ' return Gather<uint8_t, int64_t>(*params, input, positions, output);'), (194, ' return Gather<int8_t, int64_t>(*params, input, positions, output);'), (196, ' return Gather<int16_t, int64_t>(*params, input, positions, output);'), (198, ' return Gather<int32_t, int64_t>(*params, input, positions, output);'), (200, ' return Gather<int64_t, int64_t>(*params, input, positions, output);'), (202, ' return Gather<bool, int64_t>(*params, input, positions, output);')]}
53
16
224
1,524
64
521
19
https://github.com/tensorflow/tensorflow
CVE-2021-37687
CWE-125
3,240
image.cxx
C
gif_read_lzw
/* * Image handling routines for HTMLDOC, a HTML document processing program. * * Copyright © 2011-2022 by Michael R Sweet. * Copyright © 1997-2010 by Easy Software Products. All rights reserved. * * This program is free software. Distribution and use rights are outlined in * the file "COPYING". */ /* * Include necessary headers. */ #include "htmldoc.h" #include <setjmp.h> #ifdef HAVE_LIBJPEG extern "C" { /* Workaround for JPEG header problems... */ # include <jpeglib.h> /* JPEG/JFIF image definitions */ } #endif // HAVE_JPEG #ifdef HAVE_LIBPNG # include <png.h> /* Portable Network Graphics (PNG) definitions */ #endif // HAVE_LIBPNG /* * GIF definitions... */ #define GIF_INTERLACE 0x40 #define GIF_COLORMAP 0x80 typedef uchar gif_cmap_t[256][3]; /* * BMP definitions... */ #ifndef BI_RGB # define BI_RGB 0 /* No compression - straight BGR data */ # define BI_RLE8 1 /* 8-bit run-length compression */ # define BI_RLE4 2 /* 4-bit run-length compression */ # define BI_BITFIELDS 3 /* RGB bitmap with RGB masks */ #endif /* !BI_RGB */ /* * Local globals... */ static size_t num_images = 0, /* Number of images in cache */ alloc_images = 0; /* Allocated images */ static image_t **images = NULL; /* Images in cache */ static int gif_eof = 0; /* Did we hit EOF? */ /* * Local functions... */ static int gif_read_cmap(FILE *fp, int ncolors, gif_cmap_t cmap, int *gray); static int gif_get_block(FILE *fp, uchar *buffer); static int gif_get_code (FILE *fp, int code_size, int first_time); static int gif_read_image(FILE *fp, image_t *img, gif_cmap_t cmap, int interlace, int transparent); static int gif_read_lzw(FILE *fp, int first_time, int input_code_size); static int image_compare(image_t **img1, image_t **img2); static int image_load_bmp(image_t *img, FILE *fp, int gray, int load_data); static int image_load_gif(image_t *img, FILE *fp, int gray, int load_data); #ifdef HAVE_LIBJPEG static int image_load_jpeg(image_t *img, FILE *fp, int gray, int load_data); static void jpeg_error_handler(j_common_ptr); #endif // HAVE_LIBJPEG #ifdef HAVE_LIBPNG static int image_load_png(image_t *img, FILE *fp, int gray, int load_data); #endif // HAVE_LIBPNG static void image_need_mask(image_t *img, int scaling = 1); static void image_set_mask(image_t *img, int x, int y, uchar alpha = 0); static int read_long(FILE *fp); static unsigned short read_word(FILE *fp); static unsigned int read_dword(FILE *fp); /* * 'gif_read_cmap()' - Read the colormap from a GIF file... */ static int /* O - 0 on success, -1 on error */ gif_read_cmap(FILE *fp, /* I - File to read from */ int ncolors, /* I - Number of colors */ gif_cmap_t cmap, /* IO - Colormap array */ int *gray) /* IO - 1 = grayscale */ { int i; /* Looping var */ /* * Read the colormap... */ if (fread(cmap, 3, (size_t)ncolors, fp) < (size_t)ncolors) { progress_error(HD_ERROR_READ_ERROR, "Unable to read GIF colormap: %s", strerror(errno)); return (-1); } /* * Check to see if the colormap is a grayscale ramp... */ for (i = 0; i < ncolors; i ++) if (cmap[i][0] != cmap[i][1] || cmap[i][1] != cmap[i][2]) break; if (i == ncolors) { *gray = 1; return (0); } /* * If this needs to be a grayscale image, convert the RGB values to * luminance values... */ if (*gray) for (i = 0; i < ncolors; i ++) cmap[i][0] = (cmap[i][0] * 31 + cmap[i][1] * 61 + cmap[i][2] * 8) / 100; return (0); } /* * 'gif_get_block()' - Read a GIF data block... */ static int /* O - Number characters read */ gif_get_block(FILE *fp, /* I - File to read from */ uchar *buf) /* I - Input buffer */ { int count; /* Number of character to read */ /* * Read the count byte followed by the data from the file... */ if ((count = getc(fp)) == EOF) { gif_eof = 1; return (-1); } else if (count == 0) gif_eof = 1; else if (fread(buf, 1, (size_t)count, fp) < (size_t)count) { progress_error(HD_ERROR_READ_ERROR, "Unable to read GIF block of %d bytes: %s", count, strerror(errno)); gif_eof = 1; return (-1); } else gif_eof = 0; return (count); } /* * 'gif_get_code()' - Get a LZW code from the file... */ static int /* O - LZW code */ gif_get_code(FILE *fp, /* I - File to read from */ int code_size, /* I - Size of code in bits */ int first_time) /* I - 1 = first time, 0 = not first time */ { unsigned i, j, /* Looping vars */ ret; /* Return value */ int count; /* Number of bytes read */ static uchar buf[280]; /* Input buffer */ static unsigned curbit, /* Current bit */ lastbit, /* Last bit in buffer */ done, /* Done with this buffer? */ last_byte; /* Last byte in buffer */ static unsigned bits[8] = /* Bit masks for codes */ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; if (first_time) { /* * Just initialize the input buffer... */ curbit = 0; lastbit = 0; last_byte = 0; done = 0; return (0); } if ((curbit + (unsigned)code_size) >= lastbit) { /* * Don't have enough bits to hold the code... */ if (done) { progress_error(HD_ERROR_READ_ERROR, "Not enough data left to read GIF compression code."); return (-1); /* Sorry, no more... */ } /* * Move last two bytes to front of buffer... */ if (last_byte > 1) { buf[0] = buf[last_byte - 2]; buf[1] = buf[last_byte - 1]; last_byte = 2; } else if (last_byte == 1) { buf[0] = buf[last_byte - 1]; last_byte = 1; } /* * Read in another buffer... */ if ((count = gif_get_block(fp, buf + last_byte)) <= 0) { /* * Whoops, no more data! */ done = 1; return (-1); } /* * Update buffer state... */ curbit = curbit + 8 * last_byte - lastbit; last_byte += (unsigned)count; lastbit = last_byte * 8; } for (ret = 0, i = curbit + (unsigned)code_size - 1, j = (unsigned)code_size; j > 0; i --, j --) ret = (ret << 1) | ((buf[i / 8] & bits[i & 7]) != 0); curbit += (unsigned)code_size; return (int)ret; } /* * 'gif_read_image()' - Read a GIF image stream... */ static int /* I - 0 = success, -1 = failure */ gif_read_image(FILE *fp, /* I - Input file */ image_t *img, /* I - Image pointer */ gif_cmap_t cmap, /* I - Colormap */ int interlace, /* I - Non-zero = interlaced image */ int transparent) /* I - Transparent color */ { uchar code_size, /* Code size */ *temp; /* Current pixel */ int xpos, /* Current X position */ ypos, /* Current Y position */ pass; /* Current pass */ int pixel; /* Current pixel */ static int xpasses[4] = { 8, 8, 4, 2 }, ypasses[5] = { 0, 4, 2, 1, 999999 }; xpos = 0; ypos = 0; pass = 0; code_size = (uchar)getc(fp); if (gif_read_lzw(fp, 1, code_size) < 0) return (-1); temp = img->pixels; while ((pixel = gif_read_lzw(fp, 0, code_size)) >= 0 && pixel < 256) { temp[0] = cmap[pixel][0]; if (img->depth > 1) { temp[1] = cmap[pixel][1]; temp[2] = cmap[pixel][2]; } if (pixel == transparent) image_set_mask(img, xpos, ypos); xpos ++; temp += img->depth; if (xpos == img->width) { xpos = 0; if (interlace) { ypos += xpasses[pass]; temp += (xpasses[pass] - 1) * img->width * img->depth; if (ypos >= img->height) { pass ++; ypos = ypasses[pass]; temp = img->pixels + ypos * img->width * img->depth; } } else ypos ++; } if (ypos >= img->height) break; } return (0); } /* * 'gif_read_lzw()' - Read a byte from the LZW stream... */ static int /* I - Byte from stream */ gif_read_lzw(FILE *fp, /* I - File to read from */ int first_time, /* I - 1 = first time, 0 = not first time */ int input_code_size) /* I - Code size in bits */ { int i, /* Looping var */ code, /* Current code */ incode; /* Input code */ static short fresh = 0, /* 1 = empty buffers */ code_size = 0, /* Current code size */ set_code_size = 0, /* Initial code size set */ max_code = 0, /* Maximum code used */ max_code_size = 0, /* Maximum code size */ firstcode = 0, /* First code read */ oldcode = 0, /* Last code read */ clear_code = 0, /* Clear code for LZW input */ end_code = 0, /* End code for LZW input */ table[2][4096], /* String table */ stack[8192], /* Output stack */ *sp = stack; /* Current stack pointer */ if (first_time) { /* * Setup LZW state... */ set_code_size = (short)input_code_size; code_size = set_code_size + 1; clear_code = (short)(1 << set_code_size); end_code = clear_code + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; /* * Initialize input buffers... */ gif_get_code(fp, 0, 1); /* * Wipe the decompressor table... */ fresh = 1; for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][0] = 0; sp = stack; return (0); } else if (fresh) { fresh = 0; do firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); while (firstcode == clear_code); return (firstcode); } if (sp > stack) return (*--sp); while ((code = gif_get_code (fp, code_size, 0)) >= 0) { if (code == clear_code) { for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][i] = 0; code_size = set_code_size + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; sp = stack; firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); return (firstcode); } else if (code == end_code) { uchar buf[260]; if (!gif_eof) while (gif_get_block(fp, buf) > 0); return (-2); } incode = code; if (code >= max_code) { *sp++ = firstcode; code = oldcode; } while (code >= clear_code) { *sp++ = table[1][code]; if (code == table[0][code]) return (255); code = table[0][code]; } *sp++ = firstcode = table[1][code]; code = max_code; if (code < 4096) { table[0][code] = oldcode; table[1][code] = firstcode; max_code ++; if (max_code >= max_code_size && max_code_size < 4096) { max_code_size *= 2; code_size ++; } } oldcode = (short)incode; if (sp > stack) return (*--sp); } return (code); } /* * 'image_compare()' - Compare two image filenames... */ static int /* O - Result of comparison */ image_compare(image_t **img1, /* I - First image */ image_t **img2) /* I - Second image */ { #ifdef WIN32 return (strcasecmp((*img1)->filename, (*img2)->filename)); #else return (strcmp((*img1)->filename, (*img2)->filename)); #endif /* WIN32 */ } /* * 'image_copy()' - Copy image files to the destination directory... */ void image_copy(const char *src, /* I - Source file */ const char *realsrc, /* I - Real source file */ const char *destpath) /* I - Destination path */ { char dest[255]; /* Destination file */ FILE *in, *out; /* Input/output files */ uchar buffer[8192]; /* Data buffer */ int nbytes; /* Number of bytes in buffer */ if (!src || !realsrc || !destpath) return; /* * Figure out the destination filename... */ if (!strcmp(destpath, ".")) strlcpy(dest, file_basename(src), sizeof(dest)); else snprintf(dest, sizeof(dest), "%s/%s", destpath, file_basename(src)); if (!strcmp(dest, realsrc)) return; /* * Open files and copy... */ if ((in = fopen(realsrc, "rb")) == NULL) { progress_error(HD_ERROR_READ_ERROR, "Unable to open \"%s\" - %s", realsrc, strerror(errno)); return; } if ((out = fopen(dest, "wb")) == NULL) { progress_error(HD_ERROR_READ_ERROR, "Unable to create \"%s\" - %s", dest, strerror(errno)); fclose(in); return; } while ((nbytes = fread(buffer, 1, sizeof(buffer), in)) > 0) fwrite(buffer, 1, (size_t)nbytes, out); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(in); fclose(out); } /* * 'image_find()' - Find an image file in memory... */ image_t * /* O - Pointer to image */ image_find(const char *filename,/* I - Name of image file */ int load_data)/* I - 1 = load image data */ { image_t key, /* Search key... */ *keyptr, /* Pointer to search key... */ **match; /* Matching image */ /* * Range check... */ if (filename == NULL) return (NULL); if (filename[0] == '\0') /* Microsoft VC++ runtime bug workaround... */ return (NULL); /* * See if we've already loaded it... */ if (num_images > 0) { strlcpy(key.filename, filename, sizeof(key.filename)); keyptr = &key; match = (image_t **)bsearch(&keyptr, images, (size_t)num_images, sizeof(image_t *), (int (*)(const void *, const void *))image_compare); if (match != NULL) { if (load_data && !(*match)->pixels) return (image_load((*match)->filename, (*match)->depth == 1, 1)); else return (*match); } } return (NULL); } /* * 'image_flush_cache()' - Flush the image cache... */ void image_flush_cache(void) { size_t i; /* Looping var */ /* * Free the memory used by each image... */ for (i = 0; i < num_images; i ++) { if (images[i]->mask) free(images[i]->mask); if (images[i]->pixels) free(images[i]->pixels); free(images[i]); } if (alloc_images) { free(images); alloc_images = 0; } num_images = 0; } /* * 'image_getlist()' - Get the list of images that are loaded. */ int /* O - Number of images in array */ image_getlist(image_t ***ptrs) /* O - Pointer to images array */ { *ptrs = images; return (num_images); } /* * 'image_load()' - Load an image file from disk... */ image_t * /* O - Pointer to image */ image_load(const char *filename,/* I - Name of image file */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { #ifdef DEBUG int i; /* Looping var */ #endif // DEBUG FILE *fp; /* File pointer */ uchar header[16]; /* First 16 bytes of file */ image_t *img, /* New image buffer */ key, /* Search key... */ *keyptr, /* Pointer to search key... */ **match, /* Matching image */ **temp; /* Temporary array pointer */ int status; /* Status of load... */ const char *realname; /* Real filename */ /* * Range check... */ if (filename == NULL) return (NULL); if (filename[0] == '\0') /* Microsoft VC++ runtime bug workaround... */ return (NULL); DEBUG_printf(("image_load(filename=\"%s\", gray=%d, load_data=%d)\n", filename, gray, load_data)); DEBUG_printf(("Path = \"%s\"\n", Path)); /* * See if we've already loaded it... */ if (num_images > 0) { strlcpy(key.filename, filename, sizeof(key.filename)); keyptr = &key; match = (image_t **)bsearch(&keyptr, images, (size_t)num_images, sizeof(image_t *), (int (*)(const void *, const void *))image_compare); if (match != NULL && (!load_data || (*match)->pixels)) { (*match)->use ++; return (*match); } } else match = NULL; /* * Figure out the file type... */ if ((realname = file_find(Path, filename)) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to find image file \"%s\"!", filename); return (NULL); } if ((fp = fopen(realname, "rb")) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open image file \"%s\" (%s) for reading!", filename, realname); return (NULL); } if (fread(header, 1, sizeof(header), fp) == 0) { progress_error(HD_ERROR_READ_ERROR, "Unable to read image file \"%s\"!", filename); fclose(fp); return (NULL); } #ifdef DEBUG printf("Header for \"%s\" (%s): \"", filename, realname); for (i = 0; i < (int)sizeof(header); i ++) if (header[i] < ' ' || header[i] >= 127) printf("\\x%02X", header[i]); else putchar(header[i]); puts("\"\n"); printf("match = %p\n", (void *)match); #endif // DEBUG rewind(fp); // See if the images array needs to be resized... if (!match) { if (num_images >= alloc_images) { // Yes... alloc_images += ALLOC_FILES; if (num_images == 0) temp = (image_t **)malloc(sizeof(image_t *) * alloc_images); else temp = (image_t **)realloc(images, sizeof(image_t *) * alloc_images); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d images - %s", (int)alloc_images, strerror(errno)); fclose(fp); return (NULL); } images = temp; } // Allocate memory... img = (image_t *)calloc(sizeof(image_t), 1); if (img == NULL) { progress_error(HD_ERROR_READ_ERROR, "Unable to allocate memory for \"%s\"", filename); fclose(fp); return (NULL); } images[num_images] = img; strlcpy(img->filename, filename, sizeof(img->filename)); img->use = 1; } else img = *match; // Load the image as appropriate... if (memcmp(header, "GIF87a", 6) == 0 || memcmp(header, "GIF89a", 6) == 0) status = image_load_gif(img, fp, gray, load_data); else if (memcmp(header, "BM", 2) == 0) status = image_load_bmp(img, fp, gray, load_data); #ifdef HAVE_LIBPNG else if (memcmp(header, "\211PNG", 4) == 0) status = image_load_png(img, fp, gray, load_data); #endif // HAVE_LIBPNG #ifdef HAVE_LIBJPEG else if (memcmp(header, "\377\330\377", 3) == 0) status = image_load_jpeg(img, fp, gray, load_data); #endif // HAVE_LIBJPEG else { progress_error(HD_ERROR_BAD_FORMAT, "Unknown image file format for \"%s\".", file_rlookup(filename)); fclose(fp); free(img); return (NULL); } fclose(fp); if (status) { progress_error(HD_ERROR_READ_ERROR, "Unable to load image file \"%s\"!", file_rlookup(filename)); if (!match) free(img); return (NULL); } if (!match) { num_images ++; if (num_images > 1) qsort(images, num_images, sizeof(image_t *), (int (*)(const void *, const void *))image_compare); } return (img); } /* * 'image_load_bmp()' - Read a BMP image file. */ static int /* O - 0 = success, -1 = fail */ image_load_bmp(image_t *img, /* I - Image to load into */ FILE *fp, /* I - File to read from */ int gray, /* I - Grayscale image? */ int load_data)/* I - 1 = load image data, 0 = just info */ { int info_size, /* Size of info header */ depth, /* Depth of image (bits) */ compression, /* Type of compression */ colors_used, /* Number of colors used */ x, y, /* Looping vars */ color, /* Color of RLE pixel */ count, /* Number of times to repeat */ temp, /* Temporary color */ align; /* Alignment bytes */ uchar bit, /* Bit in image */ byte; /* Byte in image */ uchar *ptr; /* Pointer into pixels */ uchar colormap[256][4];/* Colormap */ // Get the header... getc(fp); /* Skip "BM" sync chars */ getc(fp); read_dword(fp); /* Skip size */ read_word(fp); /* Skip reserved stuff */ read_word(fp); read_dword(fp); // Then the bitmap information... info_size = (int)read_dword(fp); img->width = read_long(fp); img->height = read_long(fp); read_word(fp); depth = read_word(fp); compression = (int)read_dword(fp); read_dword(fp); read_long(fp); read_long(fp); colors_used = (int)read_dword(fp); read_dword(fp); if (img->width <= 0 || img->width > 8192 || img->height <= 0 || img->height > 8192 || info_size < 0) return (-1); if (info_size > 40) { for (info_size -= 40; info_size > 0; info_size --) getc(fp); } // Get colormap... if (colors_used == 0 && depth <= 8) colors_used = 1 << depth; else if (colors_used < 0 || colors_used > 256) return (-1); fread(colormap, (size_t)colors_used, 4, fp); // Setup image and buffers... img->depth = gray ? 1 : 3; // If this image is indexed and we are writing an encrypted PDF file, bump the use count so // we create an image object (Acrobat 6 bug workaround) if (depth <= 8 && Encryption) img->use ++; // Return now if we only need the dimensions... if (!load_data) return (0); img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) return (-1); if (gray && depth <= 8) { // Convert colormap to grayscale... for (color = colors_used - 1; color >= 0; color --) colormap[color][0] = (colormap[color][2] * 31 + colormap[color][1] * 61 + colormap[color][0] * 8) / 100; } // Read the image data... color = 0; count = 0; align = 0; byte = 0; temp = 0; for (y = img->height - 1; y >= 0; y --) { ptr = img->pixels + y * img->width * img->depth; switch (depth) { case 1 : /* Bitmap */ for (x = img->width, bit = 128; x > 0; x --) { if (bit == 128) byte = (uchar)getc(fp); if (byte & bit) { if (!gray) { *ptr++ = colormap[1][2]; *ptr++ = colormap[1][1]; } *ptr++ = colormap[1][0]; } else { if (!gray) { *ptr++ = colormap[0][2]; *ptr++ = colormap[0][1]; } *ptr++ = colormap[0][0]; } if (bit > 1) bit >>= 1; else bit = 128; } /* * Read remaining bytes to align to 32 bits... */ for (temp = (img->width + 7) / 8; temp & 3; temp ++) getc(fp); break; case 4 : /* 16-color */ for (x = img->width, bit = 0xf0; x > 0; x --) { /* * Get a new count as needed... */ if (compression != BI_RLE4 && count == 0) { count = 2; color = -1; } if (count == 0) { while (align > 0) { align --; getc(fp); } if ((count = getc(fp)) == 0) { if ((count = getc(fp)) == 0) { /* * End of line... */ x ++; continue; } else if (count == 1) { /* * End of image... */ break; } else if (count == 2) { /* * Delta... */ count = getc(fp) * getc(fp) * img->width; color = 0; } else { /* * Absolute... */ color = -1; align = ((4 - (count & 3)) / 2) & 1; } } else color = getc(fp); } /* * Get a new color as needed... */ count --; if (bit == 0xf0) { if (color < 0) temp = getc(fp) & 255; else temp = color; /* * Copy the color value... */ if (!gray) { *ptr++ = colormap[temp >> 4][2]; *ptr++ = colormap[temp >> 4][1]; } *ptr++ = colormap[temp >> 4][0]; bit = 0x0f; } else { /* * Copy the color value... */ if (!gray) { *ptr++ = colormap[temp & 15][2]; *ptr++ = colormap[temp & 15][1]; } *ptr++ = colormap[temp & 15][0]; bit = 0xf0; } } break; case 8 : /* 256-color */ for (x = img->width; x > 0; x --) { /* * Get a new count as needed... */ if (compression != BI_RLE8) { count = 1; color = -1; } if (count == 0) { while (align > 0) { align --; getc(fp); } if ((count = getc(fp)) == 0) { if ((count = getc(fp)) == 0) { /* * End of line... */ x ++; continue; } else if (count == 1) { /* * End of image... */ break; } else if (count == 2) { /* * Delta... */ count = getc(fp) * getc(fp) * img->width; color = 0; } else { /* * Absolute... */ color = -1; align = (2 - (count & 1)) & 1; } } else color = getc(fp); } /* * Get a new color as needed... */ if (color < 0) temp = getc(fp); else temp = color; count --; /* * Copy the color value... */ if (!gray) { *ptr++ = colormap[temp][2]; *ptr++ = colormap[temp][1]; } *ptr++ = colormap[temp][0]; } break; case 24 : /* 24-bit RGB */ if (gray) { for (x = img->width; x > 0; x --) { temp = getc(fp) * 8; temp += getc(fp) * 61; temp += getc(fp) * 31; *ptr++ = (uchar)(temp / 100); } } else { for (x = img->width; x > 0; x --, ptr += 3) { ptr[2] = (uchar)getc(fp); ptr[1] = (uchar)getc(fp); ptr[0] = (uchar)getc(fp); } } /* * Read remaining bytes to align to 32 bits... */ for (temp = img->width * 3; temp & 3; temp ++) getc(fp); break; } } return (0); } /* * 'image_load_gif()' - Load a GIF image file... */ static int /* O - 0 = success, -1 = fail */ image_load_gif(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to load from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { uchar buf[1024]; /* Input buffer */ gif_cmap_t cmap; /* Colormap */ int ncolors, /* Bits per pixel */ transparent; /* Transparent color index */ /* * Read the header; we already know it is a GIF file... */ fread(buf, 13, 1, fp); img->width = (buf[7] << 8) | buf[6]; img->height = (buf[9] << 8) | buf[8]; ncolors = 2 << (buf[10] & 0x07); if (img->width <= 0 || img->width > 32767 || img->height <= 0 || img->height > 32767) return (-1); // If we are writing an encrypted PDF file, bump the use count so we create // an image object (Acrobat 6 bug workaround) if (Encryption) img->use ++; if (buf[10] & GIF_COLORMAP) if (gif_read_cmap(fp, ncolors, cmap, &gray)) return (-1); transparent = -1; while (1) { switch (getc(fp)) { case ';' : /* End of image */ return (-1); /* Early end of file */ case '!' : /* Extension record */ buf[0] = (uchar)getc(fp); if (buf[0] == 0xf9) /* Graphic Control Extension */ { gif_get_block(fp, buf); if (buf[0] & 1) /* Get transparent color index */ transparent = buf[3]; } while (gif_get_block(fp, buf) != 0); break; case ',' : /* Image data */ fread(buf, 9, 1, fp); if (buf[8] & GIF_COLORMAP) { ncolors = 2 << (buf[8] & 0x07); if (gif_read_cmap(fp, ncolors, cmap, &gray)) return (-1); } img->width = (buf[5] << 8) | buf[4]; img->height = (buf[7] << 8) | buf[6]; img->depth = gray ? 1 : 3; if (img->width <= 0 || img->width > 32767 || img->height <= 0 || img->height > 32767) return (-1); if (transparent >= 0) { /* * Map transparent color to background color... */ if (BodyColor[0]) { float rgb[3]; /* RGB color */ get_color((uchar *)BodyColor, rgb); cmap[transparent][0] = (uchar)(rgb[0] * 255.0f + 0.5f); cmap[transparent][1] = (uchar)(rgb[1] * 255.0f + 0.5f); cmap[transparent][2] = (uchar)(rgb[2] * 255.0f + 0.5f); } else { cmap[transparent][0] = 255; cmap[transparent][1] = 255; cmap[transparent][2] = 255; } /* * Allocate a mask image... */ image_need_mask(img); } if (!load_data) return (0); img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) return (-1); return (gif_read_image(fp, img, cmap, buf[8] & GIF_INTERLACE, transparent)); } } } #ifdef HAVE_LIBJPEG typedef struct hd_jpeg_err_s // JPEG error manager extension { struct jpeg_error_mgr jerr; // JPEG error manager information jmp_buf retbuf; // setjmp() return buffer char message[JMSG_LENGTH_MAX]; // Last error message } hd_jpeg_err_t; /* * 'image_load_jpeg()' - Load a JPEG image file. */ static int /* O - 0 = success, -1 = fail */ image_load_jpeg(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to load from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { struct jpeg_decompress_struct cinfo; /* Decompressor info */ hd_jpeg_err_t jerr; // JPEG error handler JSAMPROW row; /* Sample row pointer */ jpeg_std_error(&jerr.jerr); jerr.jerr.error_exit = jpeg_error_handler; if (setjmp(jerr.retbuf)) { progress_error(HD_ERROR_BAD_FORMAT, "%s (%s)", jerr.message, file_rlookup(img->filename)); jpeg_destroy_decompress(&cinfo); return (-1); } cinfo.err = (struct jpeg_error_mgr *)&jerr; jpeg_create_decompress(&cinfo); jpeg_stdio_src(&cinfo, fp); jpeg_read_header(&cinfo, (boolean)1); cinfo.quantize_colors = FALSE; if (gray || cinfo.num_components == 1) { cinfo.out_color_space = JCS_GRAYSCALE; cinfo.out_color_components = 1; cinfo.output_components = 1; } else if (cinfo.num_components != 3) { jpeg_destroy_decompress(&cinfo); progress_error(HD_ERROR_BAD_FORMAT, "CMYK JPEG files are not supported! (%s)", file_rlookup(img->filename)); return (-1); } else { cinfo.out_color_space = JCS_RGB; cinfo.out_color_components = 3; cinfo.output_components = 3; } jpeg_calc_output_dimensions(&cinfo); img->width = (int)cinfo.output_width; img->height = (int)cinfo.output_height; img->depth = (int)cinfo.output_components; if (!load_data) { jpeg_destroy_decompress(&cinfo); return (0); } img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) { jpeg_destroy_decompress(&cinfo); return (-1); } jpeg_start_decompress(&cinfo); while (cinfo.output_scanline < cinfo.output_height) { row = (JSAMPROW)(img->pixels + (size_t)cinfo.output_scanline * (size_t)cinfo.output_width * (size_t)cinfo.output_components); jpeg_read_scanlines(&cinfo, &row, (JDIMENSION)1); } jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); return (0); } #endif // HAVE_LIBJPEG #ifdef HAVE_LIBPNG /* * 'image_load_png()' - Load a PNG image file. */ static int /* O - 0 = success, -1 = fail */ image_load_png(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to read from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { int i, j; /* Looping vars */ png_structp pp; /* PNG read pointer */ png_infop info; /* PNG info pointers */ int depth; /* Input image depth */ png_bytep *rows = NULL; /* PNG row pointers */ uchar *inptr, /* Input pixels */ *outptr; /* Output pixels */ int color_type, /* PNG color mode */ bit_depth; /* PNG bit depth */ /* * Setup the PNG data structures... */ pp = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (!pp) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for PNG file: %s", strerror(errno)); return (-1); } info = png_create_info_struct(pp); if (!info) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for PNG info: %s", strerror(errno)); png_destroy_read_struct(&pp, NULL, NULL); return (-1); } if (setjmp(png_jmpbuf(pp))) { progress_error(HD_ERROR_BAD_FORMAT, "PNG file contains errors!"); png_destroy_read_struct(&pp, &info, NULL); if (img != NULL) { free(img->pixels); img->pixels = NULL; } free(rows); rows = NULL; return (-1); } /* * Initialize the PNG read "engine"... */ png_init_io(pp, fp); # if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) // Don't throw errors with "invalid" sRGB profiles produced by Adobe apps. png_set_option(pp, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); # endif // PNG_SKIP_sRGB_CHECK_PROFILE && PNG_SET_OPTION_SUPPORTED /* * Get the image dimensions and convert to grayscale or RGB... */ png_read_info(pp, info); bit_depth = png_get_bit_depth(pp, info); color_type = png_get_color_type(pp, info); if (png_get_valid(pp, info, PNG_INFO_tRNS)) { png_set_tRNS_to_alpha(pp); color_type |= PNG_COLOR_MASK_ALPHA; } if (color_type & PNG_COLOR_MASK_PALETTE) { png_set_palette_to_rgb(pp); // If we are writing an encrypted PDF file, bump the use count so we create // an image object (Acrobat 6 bug workaround) if (Encryption) img->use ++; } else if (!(color_type & PNG_COLOR_MASK_COLOR) && bit_depth < 8) { png_set_expand_gray_1_2_4_to_8(pp); } else if (bit_depth == 16) { # if PNG_LIBPNG_VER >= 10504 png_set_scale_16(pp); # else png_set_strip_16(pp); # endif // PNG_LIBPNG_VER >= 10504 } if (color_type & PNG_COLOR_MASK_COLOR) { depth = 3; img->depth = gray ? 1 : 3; } else { depth = 1; img->depth = 1; } img->width = (int)png_get_image_width(pp, info); img->height = (int)png_get_image_height(pp, info); if (color_type & PNG_COLOR_MASK_ALPHA) { if ((PSLevel == 0 && PDFVersion >= 14) || PSLevel == 3) image_need_mask(img, 8); else if (PSLevel == 0 && PDFVersion == 13) image_need_mask(img, 2); else image_need_mask(img); depth ++; } # ifdef DEBUG printf("bit_depth=%d, color_type=0x%04x, depth=%d, img->width=%d, img->height=%d, img->depth=%d\n", bit_depth, color_type, depth, img->width, img->height, img->depth); if (color_type & PNG_COLOR_MASK_COLOR) puts(" COLOR"); else puts(" GRAYSCALE"); if (color_type & PNG_COLOR_MASK_ALPHA) puts(" ALPHA"); if (color_type & PNG_COLOR_MASK_PALETTE) puts(" PALETTE"); # endif // DEBUG if (!load_data) { png_destroy_read_struct(&pp, &info, NULL); return (0); } img->pixels = (uchar *)calloc(1, (size_t)(img->width * img->height * depth)); /* * Allocate pointers... */ rows = (png_bytep *)calloc(png_get_image_height(pp, info), sizeof(png_bytep)); for (i = 0; i < (int)png_get_image_height(pp, info); i ++) rows[i] = img->pixels + i * img->width * depth; /* * Read the image, handling interlacing as needed... */ for (i = png_set_interlace_handling(pp); i > 0; i --) png_read_rows(pp, rows, NULL, (png_uint_32)img->height); /* * Generate the alpha mask as necessary... */ if (color_type & PNG_COLOR_MASK_ALPHA) { # ifdef DEBUG for (inptr = img->pixels, i = 0; i < img->height; i ++) { for (j = 0; j < img->width; j ++, inptr += depth) switch (depth) { case 2 : printf(" %02X%02X", inptr[0], inptr[1]); break; case 4 : printf(" %02X%02X%02X%02X", inptr[0], inptr[1], inptr[2], inptr[3]); break; } putchar('\n'); } # endif // DEBUG for (inptr = img->pixels + depth - 1, i = 0; i < img->height; i ++) for (j = 0; j < img->width; j ++, inptr += depth) image_set_mask(img, j, i, *inptr); } /* * Reformat the data as necessary for the reader... */ if (gray && (color_type & PNG_COLOR_MASK_COLOR)) { /* * Grayscale output needed... */ for (inptr = img->pixels, outptr = img->pixels, i = img->width * img->height; i > 0; inptr += depth, outptr ++, i --) *outptr = (31 * inptr[0] + 61 * inptr[1] + 8 * inptr[2]) / 100; } else if (img->depth != depth) { /* * Remove alpha from final array... */ if (depth == 4) { for (inptr = img->pixels, outptr = img->pixels, i = img->width * img->height; i > 0; inptr ++, i --) { *outptr++ = *inptr++; *outptr++ = *inptr++; *outptr++ = *inptr++; } } else { for (inptr = img->pixels, outptr = img->pixels, i = img->width * img->height; i > 0; inptr ++, i --) *outptr++ = *inptr++; } } /* * Free memory and return... */ png_read_end(pp, info); png_destroy_read_struct(&pp, &info, NULL); free(rows); return (0); } #endif // HAVE_LIBPNG /* * 'image_need_mask()' - Allocate memory for the image mask... */ static void image_need_mask(image_t *img, /* I - Image to add mask to */ int scaling) /* I - Scaling for mask image */ { size_t size; /* Byte size of mask image */ if (img == NULL || img->mask != NULL) return; /* * Figure out the size of the mask image, and then allocate and set all the * bits needed... */ img->maskscale = scaling; if (scaling == 8) { // Alpha image img->maskwidth = img->width; size = (size_t)(img->width * img->height); } else { // Alpha mask img->maskwidth = (img->width * scaling + 7) / 8; size = (size_t)(img->maskwidth * img->height * scaling + 1); } img->mask = (uchar *)calloc(size, 1); } /* * 'image_set_mask()' - Set a bit in the image mask. */ static void image_set_mask(image_t *img, /* I - Image to operate on */ int x, /* I - X coordinate */ int y, /* I - Y coordinate */ uchar alpha) /* I - Alpha value */ { int i, j; /* Looping vars */ uchar *maskptr; /* Pointer into mask image */ static uchar masks[8] = /* Masks for each bit */ { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 }; static uchar dither[4][4] = // Simple 4x4 clustered-dot dither { { 0, 2, 15, 6 }, { 4, 12, 9, 11 }, { 14, 7, 1, 3 }, { 8, 10, 5, 13 } }; if (img == NULL || img->mask == NULL || x < 0 || x >= img->width || y < 0 || y >= img->height) return; if (img->maskscale == 8) { // Store the alpha value directly... if (PSLevel) img->mask[y * img->maskwidth + x] = 255 - alpha; else img->mask[y * img->maskwidth + x] = alpha; } else { // Store an alpha mask... x *= img->maskscale; y *= img->maskscale; alpha >>= 4; for (i = 0; i < img->maskscale; i ++, y ++, x -= img->maskscale) for (j = 0; j < img->maskscale; j ++, x ++) { maskptr = img->mask + y * img->maskwidth + x / 8; if (alpha <= dither[x & 3][y & 3]) *maskptr |= masks[x & 7]; } } } /* * 'image_unload()' - Unload an image from memory. */ void image_unload(image_t *img) // I - Image { if (!img) return; if (!img->use || !img->pixels) return; if (img->obj) img->use = 0; else img->use --; if (img->use) return; free(img->pixels); img->pixels = NULL; } #ifdef HAVE_LIBJPEG /* * 'jpeg_error_handler()' - Handle JPEG errors by not exiting. */ static void jpeg_error_handler(j_common_ptr p) // Common JPEG data { hd_jpeg_err_t *jerr = (hd_jpeg_err_t *)p->err; // JPEG error handler // Save the error message in the string buffer... (jerr->jerr.format_message)(p, jerr->message); // Return to the point we called setjmp()... longjmp(jerr->retbuf, 1); } #endif // HAVE_LIBJPEG /* * 'read_word()' - Read a 16-bit unsigned integer. */ static unsigned short /* O - 16-bit unsigned integer */ read_word(FILE *fp) /* I - File to read from */ { unsigned char b0, b1; /* Bytes from file */ b0 = (uchar)getc(fp); b1 = (uchar)getc(fp); return (unsigned short)((b1 << 8) | b0); } /* * 'read_dword()' - Read a 32-bit unsigned integer. */ static unsigned int /* O - 32-bit unsigned integer */ read_dword(FILE *fp) /* I - File to read from */ { unsigned char b0, b1, b2, b3; /* Bytes from file */ b0 = (uchar)getc(fp); b1 = (uchar)getc(fp); b2 = (uchar)getc(fp); b3 = (uchar)getc(fp); return (unsigned)((((((b3 << 8) | b2) << 8) | b1) << 8) | b0); } /* * 'read_long()' - Read a 32-bit signed integer. */ static int /* O - 32-bit signed integer */ read_long(FILE *fp) /* I - File to read from */ { unsigned char b0, b1, b2, b3; /* Bytes from file */ b0 = (uchar)getc(fp); b1 = (uchar)getc(fp); b2 = (uchar)getc(fp); b3 = (uchar)getc(fp); return ((int)(((((b3 << 8) | b2) << 8) | b1) << 8) | b0); }
/* * Image handling routines for HTMLDOC, a HTML document processing program. * * Copyright © 2011-2022 by Michael R Sweet. * Copyright © 1997-2010 by Easy Software Products. All rights reserved. * * This program is free software. Distribution and use rights are outlined in * the file "COPYING". */ /* * Include necessary headers. */ #include "htmldoc.h" #include <setjmp.h> #ifdef HAVE_LIBJPEG extern "C" { /* Workaround for JPEG header problems... */ # include <jpeglib.h> /* JPEG/JFIF image definitions */ } #endif // HAVE_JPEG #ifdef HAVE_LIBPNG # include <png.h> /* Portable Network Graphics (PNG) definitions */ #endif // HAVE_LIBPNG /* * GIF definitions... */ #define GIF_INTERLACE 0x40 #define GIF_COLORMAP 0x80 typedef uchar gif_cmap_t[256][3]; /* * BMP definitions... */ #ifndef BI_RGB # define BI_RGB 0 /* No compression - straight BGR data */ # define BI_RLE8 1 /* 8-bit run-length compression */ # define BI_RLE4 2 /* 4-bit run-length compression */ # define BI_BITFIELDS 3 /* RGB bitmap with RGB masks */ #endif /* !BI_RGB */ /* * Local globals... */ static size_t num_images = 0, /* Number of images in cache */ alloc_images = 0; /* Allocated images */ static image_t **images = NULL; /* Images in cache */ static int gif_eof = 0; /* Did we hit EOF? */ /* * Local functions... */ static int gif_read_cmap(FILE *fp, int ncolors, gif_cmap_t cmap, int *gray); static int gif_get_block(FILE *fp, uchar *buffer); static int gif_get_code (FILE *fp, int code_size, int first_time); static int gif_read_image(FILE *fp, image_t *img, gif_cmap_t cmap, int interlace, int transparent); static int gif_read_lzw(FILE *fp, int first_time, int input_code_size); static int image_compare(image_t **img1, image_t **img2); static int image_load_bmp(image_t *img, FILE *fp, int gray, int load_data); static int image_load_gif(image_t *img, FILE *fp, int gray, int load_data); #ifdef HAVE_LIBJPEG static int image_load_jpeg(image_t *img, FILE *fp, int gray, int load_data); static void jpeg_error_handler(j_common_ptr); #endif // HAVE_LIBJPEG #ifdef HAVE_LIBPNG static int image_load_png(image_t *img, FILE *fp, int gray, int load_data); #endif // HAVE_LIBPNG static void image_need_mask(image_t *img, int scaling = 1); static void image_set_mask(image_t *img, int x, int y, uchar alpha = 0); static int read_long(FILE *fp); static unsigned short read_word(FILE *fp); static unsigned int read_dword(FILE *fp); /* * 'gif_read_cmap()' - Read the colormap from a GIF file... */ static int /* O - 0 on success, -1 on error */ gif_read_cmap(FILE *fp, /* I - File to read from */ int ncolors, /* I - Number of colors */ gif_cmap_t cmap, /* IO - Colormap array */ int *gray) /* IO - 1 = grayscale */ { int i; /* Looping var */ /* * Read the colormap... */ if (fread(cmap, 3, (size_t)ncolors, fp) < (size_t)ncolors) { progress_error(HD_ERROR_READ_ERROR, "Unable to read GIF colormap: %s", strerror(errno)); return (-1); } /* * Check to see if the colormap is a grayscale ramp... */ for (i = 0; i < ncolors; i ++) if (cmap[i][0] != cmap[i][1] || cmap[i][1] != cmap[i][2]) break; if (i == ncolors) { *gray = 1; return (0); } /* * If this needs to be a grayscale image, convert the RGB values to * luminance values... */ if (*gray) for (i = 0; i < ncolors; i ++) cmap[i][0] = (cmap[i][0] * 31 + cmap[i][1] * 61 + cmap[i][2] * 8) / 100; return (0); } /* * 'gif_get_block()' - Read a GIF data block... */ static int /* O - Number characters read */ gif_get_block(FILE *fp, /* I - File to read from */ uchar *buf) /* I - Input buffer */ { int count; /* Number of character to read */ /* * Read the count byte followed by the data from the file... */ if ((count = getc(fp)) == EOF) { gif_eof = 1; return (-1); } else if (count == 0) gif_eof = 1; else if (fread(buf, 1, (size_t)count, fp) < (size_t)count) { progress_error(HD_ERROR_READ_ERROR, "Unable to read GIF block of %d bytes: %s", count, strerror(errno)); gif_eof = 1; return (-1); } else gif_eof = 0; return (count); } /* * 'gif_get_code()' - Get a LZW code from the file... */ static int /* O - LZW code */ gif_get_code(FILE *fp, /* I - File to read from */ int code_size, /* I - Size of code in bits */ int first_time) /* I - 1 = first time, 0 = not first time */ { unsigned i, j, /* Looping vars */ ret; /* Return value */ int count; /* Number of bytes read */ static uchar buf[280]; /* Input buffer */ static unsigned curbit, /* Current bit */ lastbit, /* Last bit in buffer */ done, /* Done with this buffer? */ last_byte; /* Last byte in buffer */ static unsigned bits[8] = /* Bit masks for codes */ { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; if (first_time) { /* * Just initialize the input buffer... */ curbit = 0; lastbit = 0; last_byte = 0; done = 0; return (0); } if ((curbit + (unsigned)code_size) >= lastbit) { /* * Don't have enough bits to hold the code... */ if (done) { progress_error(HD_ERROR_READ_ERROR, "Not enough data left to read GIF compression code."); return (-1); /* Sorry, no more... */ } /* * Move last two bytes to front of buffer... */ if (last_byte > 1) { buf[0] = buf[last_byte - 2]; buf[1] = buf[last_byte - 1]; last_byte = 2; } else if (last_byte == 1) { buf[0] = buf[last_byte - 1]; last_byte = 1; } /* * Read in another buffer... */ if ((count = gif_get_block(fp, buf + last_byte)) <= 0) { /* * Whoops, no more data! */ done = 1; return (-1); } /* * Update buffer state... */ curbit = curbit + 8 * last_byte - lastbit; last_byte += (unsigned)count; lastbit = last_byte * 8; } for (ret = 0, i = curbit + (unsigned)code_size - 1, j = (unsigned)code_size; j > 0; i --, j --) ret = (ret << 1) | ((buf[i / 8] & bits[i & 7]) != 0); curbit += (unsigned)code_size; return (int)ret; } /* * 'gif_read_image()' - Read a GIF image stream... */ static int /* I - 0 = success, -1 = failure */ gif_read_image(FILE *fp, /* I - Input file */ image_t *img, /* I - Image pointer */ gif_cmap_t cmap, /* I - Colormap */ int interlace, /* I - Non-zero = interlaced image */ int transparent) /* I - Transparent color */ { uchar code_size, /* Code size */ *temp; /* Current pixel */ int xpos, /* Current X position */ ypos, /* Current Y position */ pass; /* Current pass */ int pixel; /* Current pixel */ static int xpasses[4] = { 8, 8, 4, 2 }, ypasses[5] = { 0, 4, 2, 1, 999999 }; xpos = 0; ypos = 0; pass = 0; code_size = (uchar)getc(fp); if (code_size > 12) { progress_error(HD_ERROR_READ_ERROR, "Bad GIF file \"%s\" - invalid code size %d.", img->filename, code_size); return (-1); } if (gif_read_lzw(fp, 1, code_size) < 0) return (-1); temp = img->pixels; while ((pixel = gif_read_lzw(fp, 0, code_size)) >= 0 && pixel < 256) { temp[0] = cmap[pixel][0]; if (img->depth > 1) { temp[1] = cmap[pixel][1]; temp[2] = cmap[pixel][2]; } if (pixel == transparent) image_set_mask(img, xpos, ypos); xpos ++; temp += img->depth; if (xpos == img->width) { xpos = 0; if (interlace) { ypos += xpasses[pass]; temp += (xpasses[pass] - 1) * img->width * img->depth; if (ypos >= img->height) { pass ++; ypos = ypasses[pass]; temp = img->pixels + ypos * img->width * img->depth; } } else ypos ++; } if (ypos >= img->height) break; } return (0); } /* * 'gif_read_lzw()' - Read a byte from the LZW stream... */ static int /* I - Byte from stream */ gif_read_lzw(FILE *fp, /* I - File to read from */ int first_time, /* I - 1 = first time, 0 = not first time */ int input_code_size) /* I - Code size in bits */ { int i, /* Looping var */ code, /* Current code */ incode; /* Input code */ static short fresh = 0, /* 1 = empty buffers */ code_size = 0, /* Current code size */ set_code_size = 0, /* Initial code size set */ max_code = 0, /* Maximum code used */ max_code_size = 0, /* Maximum code size */ firstcode = 0, /* First code read */ oldcode = 0, /* Last code read */ clear_code = 0, /* Clear code for LZW input */ end_code = 0, /* End code for LZW input */ table[2][4096], /* String table */ stack[8192], /* Output stack */ *sp = stack; /* Current stack pointer */ if (first_time) { /* * Setup LZW state... */ set_code_size = (short)input_code_size; code_size = set_code_size + 1; clear_code = (short)(1 << set_code_size); end_code = clear_code + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; /* * Initialize input buffers... */ gif_get_code(fp, 0, 1); /* * Wipe the decompressor table... */ fresh = 1; for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][0] = 0; sp = stack; return (0); } else if (fresh) { fresh = 0; do firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); while (firstcode == clear_code); return (firstcode); } if (sp > stack) return (*--sp); while ((code = gif_get_code(fp, code_size, 0)) >= 0) { if (code == clear_code) { for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][i] = 0; code_size = set_code_size + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; sp = stack; firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); return (firstcode); } else if (code == end_code) { uchar buf[260]; if (!gif_eof) while (gif_get_block(fp, buf) > 0); return (-2); } incode = code; if (code >= max_code) { *sp++ = firstcode; code = oldcode; } while (code >= clear_code) { *sp++ = table[1][code]; if (code == table[0][code]) return (255); code = table[0][code]; } *sp++ = firstcode = table[1][code]; code = max_code; if (code < 4096) { table[0][code] = oldcode; table[1][code] = firstcode; max_code ++; if (max_code >= max_code_size && max_code_size < 4096) { max_code_size *= 2; code_size ++; } } oldcode = (short)incode; if (sp > stack) return (*--sp); } return (code); } /* * 'image_compare()' - Compare two image filenames... */ static int /* O - Result of comparison */ image_compare(image_t **img1, /* I - First image */ image_t **img2) /* I - Second image */ { #ifdef WIN32 return (strcasecmp((*img1)->filename, (*img2)->filename)); #else return (strcmp((*img1)->filename, (*img2)->filename)); #endif /* WIN32 */ } /* * 'image_copy()' - Copy image files to the destination directory... */ void image_copy(const char *src, /* I - Source file */ const char *realsrc, /* I - Real source file */ const char *destpath) /* I - Destination path */ { char dest[255]; /* Destination file */ FILE *in, *out; /* Input/output files */ uchar buffer[8192]; /* Data buffer */ int nbytes; /* Number of bytes in buffer */ if (!src || !realsrc || !destpath) return; /* * Figure out the destination filename... */ if (!strcmp(destpath, ".")) strlcpy(dest, file_basename(src), sizeof(dest)); else snprintf(dest, sizeof(dest), "%s/%s", destpath, file_basename(src)); if (!strcmp(dest, realsrc)) return; /* * Open files and copy... */ if ((in = fopen(realsrc, "rb")) == NULL) { progress_error(HD_ERROR_READ_ERROR, "Unable to open \"%s\" - %s", realsrc, strerror(errno)); return; } if ((out = fopen(dest, "wb")) == NULL) { progress_error(HD_ERROR_READ_ERROR, "Unable to create \"%s\" - %s", dest, strerror(errno)); fclose(in); return; } while ((nbytes = fread(buffer, 1, sizeof(buffer), in)) > 0) fwrite(buffer, 1, (size_t)nbytes, out); progress_error(HD_ERROR_NONE, "BYTES: %ld", ftell(out)); fclose(in); fclose(out); } /* * 'image_find()' - Find an image file in memory... */ image_t * /* O - Pointer to image */ image_find(const char *filename,/* I - Name of image file */ int load_data)/* I - 1 = load image data */ { image_t key, /* Search key... */ *keyptr, /* Pointer to search key... */ **match; /* Matching image */ /* * Range check... */ if (filename == NULL) return (NULL); if (filename[0] == '\0') /* Microsoft VC++ runtime bug workaround... */ return (NULL); /* * See if we've already loaded it... */ if (num_images > 0) { strlcpy(key.filename, filename, sizeof(key.filename)); keyptr = &key; match = (image_t **)bsearch(&keyptr, images, (size_t)num_images, sizeof(image_t *), (int (*)(const void *, const void *))image_compare); if (match != NULL) { if (load_data && !(*match)->pixels) return (image_load((*match)->filename, (*match)->depth == 1, 1)); else return (*match); } } return (NULL); } /* * 'image_flush_cache()' - Flush the image cache... */ void image_flush_cache(void) { size_t i; /* Looping var */ /* * Free the memory used by each image... */ for (i = 0; i < num_images; i ++) { if (images[i]->mask) free(images[i]->mask); if (images[i]->pixels) free(images[i]->pixels); free(images[i]); } if (alloc_images) { free(images); alloc_images = 0; } num_images = 0; } /* * 'image_getlist()' - Get the list of images that are loaded. */ int /* O - Number of images in array */ image_getlist(image_t ***ptrs) /* O - Pointer to images array */ { *ptrs = images; return (num_images); } /* * 'image_load()' - Load an image file from disk... */ image_t * /* O - Pointer to image */ image_load(const char *filename,/* I - Name of image file */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { #ifdef DEBUG int i; /* Looping var */ #endif // DEBUG FILE *fp; /* File pointer */ uchar header[16]; /* First 16 bytes of file */ image_t *img, /* New image buffer */ key, /* Search key... */ *keyptr, /* Pointer to search key... */ **match, /* Matching image */ **temp; /* Temporary array pointer */ int status; /* Status of load... */ const char *realname; /* Real filename */ /* * Range check... */ if (filename == NULL) return (NULL); if (filename[0] == '\0') /* Microsoft VC++ runtime bug workaround... */ return (NULL); DEBUG_printf(("image_load(filename=\"%s\", gray=%d, load_data=%d)\n", filename, gray, load_data)); DEBUG_printf(("Path = \"%s\"\n", Path)); /* * See if we've already loaded it... */ if (num_images > 0) { strlcpy(key.filename, filename, sizeof(key.filename)); keyptr = &key; match = (image_t **)bsearch(&keyptr, images, (size_t)num_images, sizeof(image_t *), (int (*)(const void *, const void *))image_compare); if (match != NULL && (!load_data || (*match)->pixels)) { (*match)->use ++; return (*match); } } else match = NULL; /* * Figure out the file type... */ if ((realname = file_find(Path, filename)) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to find image file \"%s\"!", filename); return (NULL); } if ((fp = fopen(realname, "rb")) == NULL) { progress_error(HD_ERROR_FILE_NOT_FOUND, "Unable to open image file \"%s\" (%s) for reading!", filename, realname); return (NULL); } if (fread(header, 1, sizeof(header), fp) == 0) { progress_error(HD_ERROR_READ_ERROR, "Unable to read image file \"%s\"!", filename); fclose(fp); return (NULL); } #ifdef DEBUG printf("Header for \"%s\" (%s): \"", filename, realname); for (i = 0; i < (int)sizeof(header); i ++) if (header[i] < ' ' || header[i] >= 127) printf("\\x%02X", header[i]); else putchar(header[i]); puts("\"\n"); printf("match = %p\n", (void *)match); #endif // DEBUG rewind(fp); // See if the images array needs to be resized... if (!match) { if (num_images >= alloc_images) { // Yes... alloc_images += ALLOC_FILES; if (num_images == 0) temp = (image_t **)malloc(sizeof(image_t *) * alloc_images); else temp = (image_t **)realloc(images, sizeof(image_t *) * alloc_images); if (temp == NULL) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for %d images - %s", (int)alloc_images, strerror(errno)); fclose(fp); return (NULL); } images = temp; } // Allocate memory... img = (image_t *)calloc(sizeof(image_t), 1); if (img == NULL) { progress_error(HD_ERROR_READ_ERROR, "Unable to allocate memory for \"%s\"", filename); fclose(fp); return (NULL); } images[num_images] = img; strlcpy(img->filename, filename, sizeof(img->filename)); img->use = 1; } else img = *match; // Load the image as appropriate... if (memcmp(header, "GIF87a", 6) == 0 || memcmp(header, "GIF89a", 6) == 0) status = image_load_gif(img, fp, gray, load_data); else if (memcmp(header, "BM", 2) == 0) status = image_load_bmp(img, fp, gray, load_data); #ifdef HAVE_LIBPNG else if (memcmp(header, "\211PNG", 4) == 0) status = image_load_png(img, fp, gray, load_data); #endif // HAVE_LIBPNG #ifdef HAVE_LIBJPEG else if (memcmp(header, "\377\330\377", 3) == 0) status = image_load_jpeg(img, fp, gray, load_data); #endif // HAVE_LIBJPEG else { progress_error(HD_ERROR_BAD_FORMAT, "Unknown image file format for \"%s\".", file_rlookup(filename)); fclose(fp); free(img); return (NULL); } fclose(fp); if (status) { progress_error(HD_ERROR_READ_ERROR, "Unable to load image file \"%s\"!", file_rlookup(filename)); if (!match) free(img); return (NULL); } if (!match) { num_images ++; if (num_images > 1) qsort(images, num_images, sizeof(image_t *), (int (*)(const void *, const void *))image_compare); } return (img); } /* * 'image_load_bmp()' - Read a BMP image file. */ static int /* O - 0 = success, -1 = fail */ image_load_bmp(image_t *img, /* I - Image to load into */ FILE *fp, /* I - File to read from */ int gray, /* I - Grayscale image? */ int load_data)/* I - 1 = load image data, 0 = just info */ { int info_size, /* Size of info header */ depth, /* Depth of image (bits) */ compression, /* Type of compression */ colors_used, /* Number of colors used */ x, y, /* Looping vars */ color, /* Color of RLE pixel */ count, /* Number of times to repeat */ temp, /* Temporary color */ align; /* Alignment bytes */ uchar bit, /* Bit in image */ byte; /* Byte in image */ uchar *ptr; /* Pointer into pixels */ uchar colormap[256][4];/* Colormap */ // Get the header... getc(fp); /* Skip "BM" sync chars */ getc(fp); read_dword(fp); /* Skip size */ read_word(fp); /* Skip reserved stuff */ read_word(fp); read_dword(fp); // Then the bitmap information... info_size = (int)read_dword(fp); img->width = read_long(fp); img->height = read_long(fp); read_word(fp); depth = read_word(fp); compression = (int)read_dword(fp); read_dword(fp); read_long(fp); read_long(fp); colors_used = (int)read_dword(fp); read_dword(fp); if (img->width <= 0 || img->width > 8192 || img->height <= 0 || img->height > 8192 || info_size < 0) return (-1); if (info_size > 40) { for (info_size -= 40; info_size > 0; info_size --) getc(fp); } // Get colormap... if (colors_used == 0 && depth <= 8) colors_used = 1 << depth; else if (colors_used < 0 || colors_used > 256) return (-1); fread(colormap, (size_t)colors_used, 4, fp); // Setup image and buffers... img->depth = gray ? 1 : 3; // If this image is indexed and we are writing an encrypted PDF file, bump the use count so // we create an image object (Acrobat 6 bug workaround) if (depth <= 8 && Encryption) img->use ++; // Return now if we only need the dimensions... if (!load_data) return (0); img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) return (-1); if (gray && depth <= 8) { // Convert colormap to grayscale... for (color = colors_used - 1; color >= 0; color --) colormap[color][0] = (colormap[color][2] * 31 + colormap[color][1] * 61 + colormap[color][0] * 8) / 100; } // Read the image data... color = 0; count = 0; align = 0; byte = 0; temp = 0; for (y = img->height - 1; y >= 0; y --) { ptr = img->pixels + y * img->width * img->depth; switch (depth) { case 1 : /* Bitmap */ for (x = img->width, bit = 128; x > 0; x --) { if (bit == 128) byte = (uchar)getc(fp); if (byte & bit) { if (!gray) { *ptr++ = colormap[1][2]; *ptr++ = colormap[1][1]; } *ptr++ = colormap[1][0]; } else { if (!gray) { *ptr++ = colormap[0][2]; *ptr++ = colormap[0][1]; } *ptr++ = colormap[0][0]; } if (bit > 1) bit >>= 1; else bit = 128; } /* * Read remaining bytes to align to 32 bits... */ for (temp = (img->width + 7) / 8; temp & 3; temp ++) getc(fp); break; case 4 : /* 16-color */ for (x = img->width, bit = 0xf0; x > 0; x --) { /* * Get a new count as needed... */ if (compression != BI_RLE4 && count == 0) { count = 2; color = -1; } if (count == 0) { while (align > 0) { align --; getc(fp); } if ((count = getc(fp)) == 0) { if ((count = getc(fp)) == 0) { /* * End of line... */ x ++; continue; } else if (count == 1) { /* * End of image... */ break; } else if (count == 2) { /* * Delta... */ count = getc(fp) * getc(fp) * img->width; color = 0; } else { /* * Absolute... */ color = -1; align = ((4 - (count & 3)) / 2) & 1; } } else color = getc(fp); } /* * Get a new color as needed... */ count --; if (bit == 0xf0) { if (color < 0) temp = getc(fp) & 255; else temp = color; /* * Copy the color value... */ if (!gray) { *ptr++ = colormap[temp >> 4][2]; *ptr++ = colormap[temp >> 4][1]; } *ptr++ = colormap[temp >> 4][0]; bit = 0x0f; } else { /* * Copy the color value... */ if (!gray) { *ptr++ = colormap[temp & 15][2]; *ptr++ = colormap[temp & 15][1]; } *ptr++ = colormap[temp & 15][0]; bit = 0xf0; } } break; case 8 : /* 256-color */ for (x = img->width; x > 0; x --) { /* * Get a new count as needed... */ if (compression != BI_RLE8) { count = 1; color = -1; } if (count == 0) { while (align > 0) { align --; getc(fp); } if ((count = getc(fp)) == 0) { if ((count = getc(fp)) == 0) { /* * End of line... */ x ++; continue; } else if (count == 1) { /* * End of image... */ break; } else if (count == 2) { /* * Delta... */ count = getc(fp) * getc(fp) * img->width; color = 0; } else { /* * Absolute... */ color = -1; align = (2 - (count & 1)) & 1; } } else color = getc(fp); } /* * Get a new color as needed... */ if (color < 0) temp = getc(fp); else temp = color; count --; /* * Copy the color value... */ if (!gray) { *ptr++ = colormap[temp][2]; *ptr++ = colormap[temp][1]; } *ptr++ = colormap[temp][0]; } break; case 24 : /* 24-bit RGB */ if (gray) { for (x = img->width; x > 0; x --) { temp = getc(fp) * 8; temp += getc(fp) * 61; temp += getc(fp) * 31; *ptr++ = (uchar)(temp / 100); } } else { for (x = img->width; x > 0; x --, ptr += 3) { ptr[2] = (uchar)getc(fp); ptr[1] = (uchar)getc(fp); ptr[0] = (uchar)getc(fp); } } /* * Read remaining bytes to align to 32 bits... */ for (temp = img->width * 3; temp & 3; temp ++) getc(fp); break; } } return (0); } /* * 'image_load_gif()' - Load a GIF image file... */ static int /* O - 0 = success, -1 = fail */ image_load_gif(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to load from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { uchar buf[1024]; /* Input buffer */ gif_cmap_t cmap; /* Colormap */ int ncolors, /* Bits per pixel */ transparent; /* Transparent color index */ /* * Read the header; we already know it is a GIF file... */ fread(buf, 13, 1, fp); img->width = (buf[7] << 8) | buf[6]; img->height = (buf[9] << 8) | buf[8]; ncolors = 2 << (buf[10] & 0x07); if (img->width <= 0 || img->width > 32767 || img->height <= 0 || img->height > 32767) return (-1); // If we are writing an encrypted PDF file, bump the use count so we create // an image object (Acrobat 6 bug workaround) if (Encryption) img->use ++; if (buf[10] & GIF_COLORMAP) if (gif_read_cmap(fp, ncolors, cmap, &gray)) return (-1); transparent = -1; while (1) { switch (getc(fp)) { case ';' : /* End of image */ return (-1); /* Early end of file */ case '!' : /* Extension record */ buf[0] = (uchar)getc(fp); if (buf[0] == 0xf9) /* Graphic Control Extension */ { gif_get_block(fp, buf); if (buf[0] & 1) /* Get transparent color index */ transparent = buf[3]; } while (gif_get_block(fp, buf) != 0); break; case ',' : /* Image data */ fread(buf, 9, 1, fp); if (buf[8] & GIF_COLORMAP) { ncolors = 2 << (buf[8] & 0x07); if (gif_read_cmap(fp, ncolors, cmap, &gray)) return (-1); } img->width = (buf[5] << 8) | buf[4]; img->height = (buf[7] << 8) | buf[6]; img->depth = gray ? 1 : 3; if (img->width <= 0 || img->width > 32767 || img->height <= 0 || img->height > 32767) return (-1); if (transparent >= 0) { /* * Map transparent color to background color... */ if (BodyColor[0]) { float rgb[3]; /* RGB color */ get_color((uchar *)BodyColor, rgb); cmap[transparent][0] = (uchar)(rgb[0] * 255.0f + 0.5f); cmap[transparent][1] = (uchar)(rgb[1] * 255.0f + 0.5f); cmap[transparent][2] = (uchar)(rgb[2] * 255.0f + 0.5f); } else { cmap[transparent][0] = 255; cmap[transparent][1] = 255; cmap[transparent][2] = 255; } /* * Allocate a mask image... */ image_need_mask(img); } if (!load_data) return (0); img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) return (-1); return (gif_read_image(fp, img, cmap, buf[8] & GIF_INTERLACE, transparent)); } } } #ifdef HAVE_LIBJPEG typedef struct hd_jpeg_err_s // JPEG error manager extension { struct jpeg_error_mgr jerr; // JPEG error manager information jmp_buf retbuf; // setjmp() return buffer char message[JMSG_LENGTH_MAX]; // Last error message } hd_jpeg_err_t; /* * 'image_load_jpeg()' - Load a JPEG image file. */ static int /* O - 0 = success, -1 = fail */ image_load_jpeg(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to load from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { struct jpeg_decompress_struct cinfo; /* Decompressor info */ hd_jpeg_err_t jerr; // JPEG error handler JSAMPROW row; /* Sample row pointer */ jpeg_std_error(&jerr.jerr); jerr.jerr.error_exit = jpeg_error_handler; if (setjmp(jerr.retbuf)) { progress_error(HD_ERROR_BAD_FORMAT, "%s (%s)", jerr.message, file_rlookup(img->filename)); jpeg_destroy_decompress(&cinfo); return (-1); } cinfo.err = (struct jpeg_error_mgr *)&jerr; jpeg_create_decompress(&cinfo); jpeg_stdio_src(&cinfo, fp); jpeg_read_header(&cinfo, (boolean)1); cinfo.quantize_colors = FALSE; if (gray || cinfo.num_components == 1) { cinfo.out_color_space = JCS_GRAYSCALE; cinfo.out_color_components = 1; cinfo.output_components = 1; } else if (cinfo.num_components != 3) { jpeg_destroy_decompress(&cinfo); progress_error(HD_ERROR_BAD_FORMAT, "CMYK JPEG files are not supported! (%s)", file_rlookup(img->filename)); return (-1); } else { cinfo.out_color_space = JCS_RGB; cinfo.out_color_components = 3; cinfo.output_components = 3; } jpeg_calc_output_dimensions(&cinfo); img->width = (int)cinfo.output_width; img->height = (int)cinfo.output_height; img->depth = (int)cinfo.output_components; if (!load_data) { jpeg_destroy_decompress(&cinfo); return (0); } img->pixels = (uchar *)malloc((size_t)(img->width * img->height * img->depth)); if (img->pixels == NULL) { jpeg_destroy_decompress(&cinfo); return (-1); } jpeg_start_decompress(&cinfo); while (cinfo.output_scanline < cinfo.output_height) { row = (JSAMPROW)(img->pixels + (size_t)cinfo.output_scanline * (size_t)cinfo.output_width * (size_t)cinfo.output_components); jpeg_read_scanlines(&cinfo, &row, (JDIMENSION)1); } jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); return (0); } #endif // HAVE_LIBJPEG #ifdef HAVE_LIBPNG /* * 'image_load_png()' - Load a PNG image file. */ static int /* O - 0 = success, -1 = fail */ image_load_png(image_t *img, /* I - Image pointer */ FILE *fp, /* I - File to read from */ int gray, /* I - 0 = color, 1 = grayscale */ int load_data)/* I - 1 = load image data, 0 = just info */ { int i, j; /* Looping vars */ png_structp pp; /* PNG read pointer */ png_infop info; /* PNG info pointers */ int depth; /* Input image depth */ png_bytep *rows = NULL; /* PNG row pointers */ uchar *inptr, /* Input pixels */ *outptr; /* Output pixels */ int color_type, /* PNG color mode */ bit_depth; /* PNG bit depth */ /* * Setup the PNG data structures... */ pp = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (!pp) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for PNG file: %s", strerror(errno)); return (-1); } info = png_create_info_struct(pp); if (!info) { progress_error(HD_ERROR_OUT_OF_MEMORY, "Unable to allocate memory for PNG info: %s", strerror(errno)); png_destroy_read_struct(&pp, NULL, NULL); return (-1); } if (setjmp(png_jmpbuf(pp))) { progress_error(HD_ERROR_BAD_FORMAT, "PNG file contains errors!"); png_destroy_read_struct(&pp, &info, NULL); if (img != NULL) { free(img->pixels); img->pixels = NULL; } free(rows); rows = NULL; return (-1); } /* * Initialize the PNG read "engine"... */ png_init_io(pp, fp); # if defined(PNG_SKIP_sRGB_CHECK_PROFILE) && defined(PNG_SET_OPTION_SUPPORTED) // Don't throw errors with "invalid" sRGB profiles produced by Adobe apps. png_set_option(pp, PNG_SKIP_sRGB_CHECK_PROFILE, PNG_OPTION_ON); # endif // PNG_SKIP_sRGB_CHECK_PROFILE && PNG_SET_OPTION_SUPPORTED /* * Get the image dimensions and convert to grayscale or RGB... */ png_read_info(pp, info); bit_depth = png_get_bit_depth(pp, info); color_type = png_get_color_type(pp, info); if (png_get_valid(pp, info, PNG_INFO_tRNS)) { png_set_tRNS_to_alpha(pp); color_type |= PNG_COLOR_MASK_ALPHA; } if (color_type & PNG_COLOR_MASK_PALETTE) { png_set_palette_to_rgb(pp); // If we are writing an encrypted PDF file, bump the use count so we create // an image object (Acrobat 6 bug workaround) if (Encryption) img->use ++; } else if (!(color_type & PNG_COLOR_MASK_COLOR) && bit_depth < 8) { png_set_expand_gray_1_2_4_to_8(pp); } else if (bit_depth == 16) { # if PNG_LIBPNG_VER >= 10504 png_set_scale_16(pp); # else png_set_strip_16(pp); # endif // PNG_LIBPNG_VER >= 10504 } if (color_type & PNG_COLOR_MASK_COLOR) { depth = 3; img->depth = gray ? 1 : 3; } else { depth = 1; img->depth = 1; } img->width = (int)png_get_image_width(pp, info); img->height = (int)png_get_image_height(pp, info); if (color_type & PNG_COLOR_MASK_ALPHA) { if ((PSLevel == 0 && PDFVersion >= 14) || PSLevel == 3) image_need_mask(img, 8); else if (PSLevel == 0 && PDFVersion == 13) image_need_mask(img, 2); else image_need_mask(img); depth ++; } # ifdef DEBUG printf("bit_depth=%d, color_type=0x%04x, depth=%d, img->width=%d, img->height=%d, img->depth=%d\n", bit_depth, color_type, depth, img->width, img->height, img->depth); if (color_type & PNG_COLOR_MASK_COLOR) puts(" COLOR"); else puts(" GRAYSCALE"); if (color_type & PNG_COLOR_MASK_ALPHA) puts(" ALPHA"); if (color_type & PNG_COLOR_MASK_PALETTE) puts(" PALETTE"); # endif // DEBUG if (!load_data) { png_destroy_read_struct(&pp, &info, NULL); return (0); } img->pixels = (uchar *)calloc(1, (size_t)(img->width * img->height * depth)); /* * Allocate pointers... */ rows = (png_bytep *)calloc(png_get_image_height(pp, info), sizeof(png_bytep)); for (i = 0; i < (int)png_get_image_height(pp, info); i ++) rows[i] = img->pixels + i * img->width * depth; /* * Read the image, handling interlacing as needed... */ for (i = png_set_interlace_handling(pp); i > 0; i --) png_read_rows(pp, rows, NULL, (png_uint_32)img->height); /* * Generate the alpha mask as necessary... */ if (color_type & PNG_COLOR_MASK_ALPHA) { # ifdef DEBUG for (inptr = img->pixels, i = 0; i < img->height; i ++) { for (j = 0; j < img->width; j ++, inptr += depth) switch (depth) { case 2 : printf(" %02X%02X", inptr[0], inptr[1]); break; case 4 : printf(" %02X%02X%02X%02X", inptr[0], inptr[1], inptr[2], inptr[3]); break; } putchar('\n'); } # endif // DEBUG for (inptr = img->pixels + depth - 1, i = 0; i < img->height; i ++) for (j = 0; j < img->width; j ++, inptr += depth) image_set_mask(img, j, i, *inptr); } /* * Reformat the data as necessary for the reader... */ if (gray && (color_type & PNG_COLOR_MASK_COLOR)) { /* * Grayscale output needed... */ for (inptr = img->pixels, outptr = img->pixels, i = img->width * img->height; i > 0; inptr += depth, outptr ++, i --) *outptr = (31 * inptr[0] + 61 * inptr[1] + 8 * inptr[2]) / 100; } else if (img->depth != depth) { /* * Remove alpha from final array... */ if (depth == 4) { for (inptr = img->pixels, outptr = img->pixels, i = img->width * img->height; i > 0; inptr ++, i --) { *outptr++ = *inptr++; *outptr++ = *inptr++; *outptr++ = *inptr++; } } else { for (inptr = img->pixels, outptr = img->pixels, i = img->width * img->height; i > 0; inptr ++, i --) *outptr++ = *inptr++; } } /* * Free memory and return... */ png_read_end(pp, info); png_destroy_read_struct(&pp, &info, NULL); free(rows); return (0); } #endif // HAVE_LIBPNG /* * 'image_need_mask()' - Allocate memory for the image mask... */ static void image_need_mask(image_t *img, /* I - Image to add mask to */ int scaling) /* I - Scaling for mask image */ { size_t size; /* Byte size of mask image */ if (img == NULL || img->mask != NULL) return; /* * Figure out the size of the mask image, and then allocate and set all the * bits needed... */ img->maskscale = scaling; if (scaling == 8) { // Alpha image img->maskwidth = img->width; size = (size_t)(img->width * img->height); } else { // Alpha mask img->maskwidth = (img->width * scaling + 7) / 8; size = (size_t)(img->maskwidth * img->height * scaling + 1); } img->mask = (uchar *)calloc(size, 1); } /* * 'image_set_mask()' - Set a bit in the image mask. */ static void image_set_mask(image_t *img, /* I - Image to operate on */ int x, /* I - X coordinate */ int y, /* I - Y coordinate */ uchar alpha) /* I - Alpha value */ { int i, j; /* Looping vars */ uchar *maskptr; /* Pointer into mask image */ static uchar masks[8] = /* Masks for each bit */ { 0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01 }; static uchar dither[4][4] = // Simple 4x4 clustered-dot dither { { 0, 2, 15, 6 }, { 4, 12, 9, 11 }, { 14, 7, 1, 3 }, { 8, 10, 5, 13 } }; if (img == NULL || img->mask == NULL || x < 0 || x >= img->width || y < 0 || y >= img->height) return; if (img->maskscale == 8) { // Store the alpha value directly... if (PSLevel) img->mask[y * img->maskwidth + x] = 255 - alpha; else img->mask[y * img->maskwidth + x] = alpha; } else { // Store an alpha mask... x *= img->maskscale; y *= img->maskscale; alpha >>= 4; for (i = 0; i < img->maskscale; i ++, y ++, x -= img->maskscale) for (j = 0; j < img->maskscale; j ++, x ++) { maskptr = img->mask + y * img->maskwidth + x / 8; if (alpha <= dither[x & 3][y & 3]) *maskptr |= masks[x & 7]; } } } /* * 'image_unload()' - Unload an image from memory. */ void image_unload(image_t *img) // I - Image { if (!img) return; if (!img->use || !img->pixels) return; if (img->obj) img->use = 0; else img->use --; if (img->use) return; free(img->pixels); img->pixels = NULL; } #ifdef HAVE_LIBJPEG /* * 'jpeg_error_handler()' - Handle JPEG errors by not exiting. */ static void jpeg_error_handler(j_common_ptr p) // Common JPEG data { hd_jpeg_err_t *jerr = (hd_jpeg_err_t *)p->err; // JPEG error handler // Save the error message in the string buffer... (jerr->jerr.format_message)(p, jerr->message); // Return to the point we called setjmp()... longjmp(jerr->retbuf, 1); } #endif // HAVE_LIBJPEG /* * 'read_word()' - Read a 16-bit unsigned integer. */ static unsigned short /* O - 16-bit unsigned integer */ read_word(FILE *fp) /* I - File to read from */ { unsigned char b0, b1; /* Bytes from file */ b0 = (uchar)getc(fp); b1 = (uchar)getc(fp); return (unsigned short)((b1 << 8) | b0); } /* * 'read_dword()' - Read a 32-bit unsigned integer. */ static unsigned int /* O - 32-bit unsigned integer */ read_dword(FILE *fp) /* I - File to read from */ { unsigned char b0, b1, b2, b3; /* Bytes from file */ b0 = (uchar)getc(fp); b1 = (uchar)getc(fp); b2 = (uchar)getc(fp); b3 = (uchar)getc(fp); return (unsigned)((((((b3 << 8) | b2) << 8) | b1) << 8) | b0); } /* * 'read_long()' - Read a 32-bit signed integer. */ static int /* O - 32-bit signed integer */ read_long(FILE *fp) /* I - File to read from */ { unsigned char b0, b1, b2, b3; /* Bytes from file */ b0 = (uchar)getc(fp); b1 = (uchar)getc(fp); b2 = (uchar)getc(fp); b3 = (uchar)getc(fp); return ((int)(((((b3 << 8) | b2) << 8) | b1) << 8) | b0); }
gif_read_lzw(FILE *fp, /* I - File to read from */ int first_time, /* I - 1 = first time, 0 = not first time */ int input_code_size) /* I - Code size in bits */ { int i, /* Looping var */ code, /* Current code */ incode; /* Input code */ static short fresh = 0, /* 1 = empty buffers */ code_size = 0, /* Current code size */ set_code_size = 0, /* Initial code size set */ max_code = 0, /* Maximum code used */ max_code_size = 0, /* Maximum code size */ firstcode = 0, /* First code read */ oldcode = 0, /* Last code read */ clear_code = 0, /* Clear code for LZW input */ end_code = 0, /* End code for LZW input */ table[2][4096], /* String table */ stack[8192], /* Output stack */ *sp = stack; /* Current stack pointer */ if (first_time) { /* * Setup LZW state... */ set_code_size = (short)input_code_size; code_size = set_code_size + 1; clear_code = (short)(1 << set_code_size); end_code = clear_code + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; /* * Initialize input buffers... */ gif_get_code(fp, 0, 1); /* * Wipe the decompressor table... */ fresh = 1; for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][0] = 0; sp = stack; return (0); } else if (fresh) { fresh = 0; do firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); while (firstcode == clear_code); return (firstcode); } if (sp > stack) return (*--sp); while ((code = gif_get_code (fp, code_size, 0)) >= 0) { if (code == clear_code) { for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][i] = 0; code_size = set_code_size + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; sp = stack; firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); return (firstcode); } else if (code == end_code) { uchar buf[260]; if (!gif_eof) while (gif_get_block(fp, buf) > 0); return (-2); } incode = code; if (code >= max_code) { *sp++ = firstcode; code = oldcode; } while (code >= clear_code) { *sp++ = table[1][code]; if (code == table[0][code]) return (255); code = table[0][code]; } *sp++ = firstcode = table[1][code]; code = max_code; if (code < 4096) { table[0][code] = oldcode; table[1][code] = firstcode; max_code ++; if (max_code >= max_code_size && max_code_size < 4096) { max_code_size *= 2; code_size ++; } } oldcode = (short)incode; if (sp > stack) return (*--sp); } return (code); }
gif_read_lzw(FILE *fp, /* I - File to read from */ int first_time, /* I - 1 = first time, 0 = not first time */ int input_code_size) /* I - Code size in bits */ { int i, /* Looping var */ code, /* Current code */ incode; /* Input code */ static short fresh = 0, /* 1 = empty buffers */ code_size = 0, /* Current code size */ set_code_size = 0, /* Initial code size set */ max_code = 0, /* Maximum code used */ max_code_size = 0, /* Maximum code size */ firstcode = 0, /* First code read */ oldcode = 0, /* Last code read */ clear_code = 0, /* Clear code for LZW input */ end_code = 0, /* End code for LZW input */ table[2][4096], /* String table */ stack[8192], /* Output stack */ *sp = stack; /* Current stack pointer */ if (first_time) { /* * Setup LZW state... */ set_code_size = (short)input_code_size; code_size = set_code_size + 1; clear_code = (short)(1 << set_code_size); end_code = clear_code + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; /* * Initialize input buffers... */ gif_get_code(fp, 0, 1); /* * Wipe the decompressor table... */ fresh = 1; for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][0] = 0; sp = stack; return (0); } else if (fresh) { fresh = 0; do firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); while (firstcode == clear_code); return (firstcode); } if (sp > stack) return (*--sp); while ((code = gif_get_code(fp, code_size, 0)) >= 0) { if (code == clear_code) { for (i = 0; i < clear_code; i ++) { table[0][i] = 0; table[1][i] = (short)i; } for (; i < 4096; i ++) table[0][i] = table[1][i] = 0; code_size = set_code_size + 1; max_code_size = 2 * clear_code; max_code = clear_code + 2; sp = stack; firstcode = oldcode = (short)gif_get_code(fp, code_size, 0); return (firstcode); } else if (code == end_code) { uchar buf[260]; if (!gif_eof) while (gif_get_block(fp, buf) > 0); return (-2); } incode = code; if (code >= max_code) { *sp++ = firstcode; code = oldcode; } while (code >= clear_code) { *sp++ = table[1][code]; if (code == table[0][code]) return (255); code = table[0][code]; } *sp++ = firstcode = table[1][code]; code = max_code; if (code < 4096) { table[0][code] = oldcode; table[1][code] = firstcode; max_code ++; if (max_code >= max_code_size && max_code_size < 4096) { max_code_size *= 2; code_size ++; } } oldcode = (short)incode; if (sp > stack) return (*--sp); } return (code); }
{'added': [(308, ' if (code_size > 12)'), (309, ' {'), (310, ' progress_error(HD_ERROR_READ_ERROR, "Bad GIF file \\"%s\\" - invalid code size %d.", img->filename, code_size);'), (311, ' return (-1);'), (312, ' }'), (313, ''), (441, ' while ((code = gif_get_code(fp, code_size, 0)) >= 0)')], 'deleted': [(435, ' while ((code = gif_get_code (fp, code_size, 0)) >= 0)')]}
7
1
1,215
7,491
106
604
21
https://github.com/michaelrsweet/htmldoc
CVE-2022-0534
CWE-125
1,486
readelf.c
C
do_bid_note
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.137 2017/08/13 00:21:47 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, uint16_t *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int, int *, uint16_t *); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int, int, int *, uint16_t *); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *, uint16_t *, int, off_t, int, off_t); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 128 #define MAX_SHNUM 32768 #define SIZE_UNKNOWN ((off_t)-1) private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_vaddr (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_vaddr ? \ elf_getu32(swap, ph32.p_vaddr) : 4) \ : (off_t) (ph64.p_vaddr ? \ elf_getu64(swap, ph64.p_vaddr) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof(nh32) \ : sizeof(nh64)) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #define xauxv_addr (clazz == ELFCLASS32 \ ? (void *)&auxv32 \ : (void *)&auxv64) #define xauxv_sizeof (clazz == ELFCLASS32 \ ? sizeof(auxv32) \ : sizeof(auxv64)) #define xauxv_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, auxv32.a_type) \ : elf_getu64(swap, auxv64.a_type)) #define xauxv_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, auxv32.a_v) \ : elf_getu64(swap, auxv64.a_v)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_CORE_STYLE 0x003 #define FLAGS_DID_CORE 0x004 #define FLAGS_DID_OS_NOTE 0x008 #define FLAGS_DID_BUILD_ID 0x010 #define FLAGS_DID_CORE_STYLE 0x020 #define FLAGS_DID_NETBSD_PAX 0x040 #define FLAGS_DID_NETBSD_MARCH 0x080 #define FLAGS_DID_NETBSD_CMODEL 0x100 #define FLAGS_DID_NETBSD_UNKNOWN 0x200 #define FLAGS_IS_CORE 0x400 #define FLAGS_DID_AUXV 0x800 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, uint16_t *notecount) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; off_t ph_off = off; int ph_num = num; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags, notecount, fd, ph_off, ph_num, fsize); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private int /*ARGSUSED*/ do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz >= 4 || descsz <= 20)) { uint8_t desc[20]; const char *btype; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; switch (descsz) { case 8: btype = "xxHash"; break; case 16: btype = "md5/uuid"; break; case 20: btype = "sha1"; break; default: btype = "unknown"; break; } if (file_printf(ms, ", BuildID[%s]=", btype) == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; } private int do_os_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && type == NT_GNU_VERSION && descsz == 2) { *flags |= FLAGS_DID_OS_NOTE; file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); return 1; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for GNU/") == -1) return 1; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return 1; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return 1; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return 1; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return 1; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return 1; break; default: if (file_printf(ms, "<unknown>") == -1) return 1; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return 1; return 1; } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { if (type == NT_NETBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; do_note_netbsd_version(ms, swap, &nbuf[doff]); return 1; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (type == NT_FREEBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; do_note_freebsd_version(ms, swap, &nbuf[doff]); return 1; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && type == NT_OPENBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for OpenBSD") == -1) return 1; /* Content of note is always 0 */ return 1; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for DragonFly") == -1) return 1; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return 1; return 1; } return 0; } private int do_pax_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; *flags |= FLAGS_DID_NETBSD_PAX; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return 1; for (i = 0; i < __arraycount(pax); i++) { if (((1 << (int)i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return 1; } return 1; } return 0; } private int do_core_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags, size_t size, int clazz) { #ifdef ELFCORE int os_style = -1; /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return 1; *flags |= FLAGS_DID_CORE_STYLE; *flags |= os_style; } switch (os_style) { case OS_STYLE_NETBSD: if (type == NT_NETBSD_CORE_PROCINFO) { char sbuf[512]; struct NetBSD_elfcore_procinfo pi; memset(&pi, 0, sizeof(pi)); memcpy(&pi, nbuf + doff, descsz); if (file_printf(ms, ", from '%.31s', pid=%u, uid=%u, " "gid=%u, nlwps=%u, lwp=%u (signal %u/code %u)", file_printable(sbuf, sizeof(sbuf), CAST(char *, pi.cpi_name)), elf_getu32(swap, pi.cpi_pid), elf_getu32(swap, pi.cpi_euid), elf_getu32(swap, pi.cpi_egid), elf_getu32(swap, pi.cpi_nlwps), elf_getu32(swap, pi.cpi_siglwp), elf_getu32(swap, pi.cpi_signo), elf_getu32(swap, pi.cpi_sigcode)) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; } break; default: if (type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; tryanother: ; } } break; } #endif return 0; } private off_t get_offset_from_virtaddr(struct magic_set *ms, int swap, int clazz, int fd, off_t off, int num, off_t fsize, uint64_t virtaddr) { Elf32_Phdr ph32; Elf64_Phdr ph64; /* * Loop through all the program headers and find the header with * virtual address in which the "virtaddr" belongs to. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += xph_sizeof; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (virtaddr >= xph_vaddr && virtaddr < xph_vaddr + xph_filesz) return xph_offset + (virtaddr - xph_vaddr); } return 0; } private size_t get_string_on_virtaddr(struct magic_set *ms, int swap, int clazz, int fd, off_t ph_off, int ph_num, off_t fsize, uint64_t virtaddr, char *buf, ssize_t buflen) { char *bptr; off_t offset; if (buflen == 0) return 0; offset = get_offset_from_virtaddr(ms, swap, clazz, fd, ph_off, ph_num, fsize, virtaddr); if ((buflen = pread(fd, buf, CAST(size_t, buflen), offset)) <= 0) { file_badread(ms); return 0; } buf[buflen - 1] = '\0'; /* We expect only printable characters, so return if buffer contains * non-printable character before the '\0' or just '\0'. */ for (bptr = buf; *bptr && isprint((unsigned char)*bptr); bptr++) continue; if (*bptr != '\0') return 0; return bptr - buf; } private int do_auxv_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz __attribute__((__unused__)), uint32_t descsz __attribute__((__unused__)), size_t noff __attribute__((__unused__)), size_t doff, int *flags, size_t size __attribute__((__unused__)), int clazz, int fd, off_t ph_off, int ph_num, off_t fsize) { #ifdef ELFCORE Aux32Info auxv32; Aux64Info auxv64; size_t elsize = xauxv_sizeof; const char *tag; int is_string; size_t nval; if ((*flags & (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE)) != (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE)) return 0; switch (*flags & FLAGS_CORE_STYLE) { case OS_STYLE_SVR4: if (type != NT_AUXV) return 0; break; #ifdef notyet case OS_STYLE_NETBSD: if (type != NT_NETBSD_CORE_AUXV) return 0; break; case OS_STYLE_FREEBSD: if (type != NT_FREEBSD_PROCSTAT_AUXV) return 0; break; #endif default: return 0; } *flags |= FLAGS_DID_AUXV; nval = 0; for (size_t off = 0; off + elsize <= descsz; off += elsize) { (void)memcpy(xauxv_addr, &nbuf[doff + off], xauxv_sizeof); /* Limit processing to 50 vector entries to prevent DoS */ if (nval++ >= 50) { file_error(ms, 0, "Too many ELF Auxv elements"); return 1; } switch(xauxv_type) { case AT_LINUX_EXECFN: is_string = 1; tag = "execfn"; break; case AT_LINUX_PLATFORM: is_string = 1; tag = "platform"; break; case AT_LINUX_UID: is_string = 0; tag = "real uid"; break; case AT_LINUX_GID: is_string = 0; tag = "real gid"; break; case AT_LINUX_EUID: is_string = 0; tag = "effective uid"; break; case AT_LINUX_EGID: is_string = 0; tag = "effective gid"; break; default: is_string = 0; tag = NULL; break; } if (tag == NULL) continue; if (is_string) { char buf[256]; ssize_t buflen; buflen = get_string_on_virtaddr(ms, swap, clazz, fd, ph_off, ph_num, fsize, xauxv_val, buf, sizeof(buf)); if (buflen == 0) continue; if (file_printf(ms, ", %s: '%s'", tag, buf) == -1) return 0; } else { if (file_printf(ms, ", %s: %d", tag, (int) xauxv_val) == -1) return 0; } } return 1; #else return 0; #endif } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags, uint16_t *notecount, int fd, off_t ph_off, int ph_num, off_t fsize) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); if (*notecount == 0) return 0; --*notecount; if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size %#lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size %#lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & FLAGS_DID_OS_NOTE) == 0) { if (do_os_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return offset; } if ((*flags & FLAGS_DID_BUILD_ID) == 0) { if (do_bid_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return offset; } if ((*flags & FLAGS_DID_NETBSD_PAX) == 0) { if (do_pax_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return offset; } if ((*flags & FLAGS_DID_CORE) == 0) { if (do_core_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags, size, clazz)) return offset; } if ((*flags & FLAGS_DID_AUXV) == 0) { if (do_auxv_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags, size, clazz, fd, ph_off, ph_num, fsize)) return offset; } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { if (descsz > 100) descsz = 100; switch (xnh_type) { case NT_NETBSD_VERSION: return offset; case NT_NETBSD_MARCH: if (*flags & FLAGS_DID_NETBSD_MARCH) return offset; *flags |= FLAGS_DID_NETBSD_MARCH; if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return offset; break; case NT_NETBSD_CMODEL: if (*flags & FLAGS_DID_NETBSD_CMODEL) return offset; *flags |= FLAGS_DID_NETBSD_CMODEL; if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return offset; break; default: if (*flags & FLAGS_DID_NETBSD_UNKNOWN) return offset; *flags |= FLAGS_DID_NETBSD_UNKNOWN; if (file_printf(ms, ", note=%u", xnh_type) == -1) return offset; break; } return offset; } return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int mach, int strtab, int *flags, uint16_t *notecount) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1, has_debug_info = 0; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilities */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilities */ char name[50]; ssize_t namesize; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, CAST(off_t, (off + size * strtab))) < (ssize_t)xsh_sizeof) { if (file_printf(ms, ", missing section headers") == -1) return -1; return 0; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) { file_badread(ms); return -1; } name[namesize] = '\0'; if (strcmp(name, ".debug_info") == 0) { has_debug_info = 1; stripped = 0; } if (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((uintmax_t)(xsh_size + xsh_offset) > (uintmax_t)fsize) { if (file_printf(ms, ", note offset/size %#" INTMAX_T_FORMAT "x+%#" INTMAX_T_FORMAT "x exceeds" " file size %#" INTMAX_T_FORMAT "x", (uintmax_t)xsh_offset, (uintmax_t)xsh_size, (uintmax_t)fsize) == -1) return -1; return 0; } if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags, notecount, fd, 0, 0, 0); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "%#" INT64_T_FORMAT "x = %#" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (has_debug_info) { if (file_printf(ms, ", with debug_info") == -1) return -1; } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability %#" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability %#" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability %#" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int sh_num, int *flags, uint16_t *notecount) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *interp = ""; unsigned char nbuf[BUFSIZ]; char ibuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; bufsize = 0; align = 4; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_NOTE: if (sh_num) /* Did this through section headers */ continue; if (((align = xph_align) & 0x80000000UL) != 0 || align < 4) { if (file_printf(ms, ", invalid note alignment %#lx", (unsigned long)align) == -1) return -1; align = 4; } /*FALLTHROUGH*/ case PT_INTERP: len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } break; default: if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_INTERP: if (bufsize && nbuf[0]) { nbuf[bufsize - 1] = '\0'; interp = (const char *)nbuf; } else interp = "*empty*"; break; case PT_NOTE: /* * This is a PT_NOTE section; loop through all the notes * in the section. */ offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags, notecount, fd, 0, 0, 0); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked", linking_style) == -1) return -1; if (interp[0]) if (file_printf(ms, ", interpreter %s", file_printable(ibuf, sizeof(ibuf), interp)) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum, notecount; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE|MAGIC_EXTENSION)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } if (S_ISREG(st.st_mode) || st.st_size != 0) fsize = st.st_size; else fsize = SIZE_UNKNOWN; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
/* * Copyright (c) Christos Zoulas 2003. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "file.h" #ifndef lint FILE_RCSID("@(#)$File: readelf.c,v 1.138 2017/08/27 07:55:02 christos Exp $") #endif #ifdef BUILTIN_ELF #include <string.h> #include <ctype.h> #include <stdlib.h> #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include "readelf.h" #include "magic.h" #ifdef ELFCORE private int dophn_core(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int *, uint16_t *); #endif private int dophn_exec(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int, int *, uint16_t *); private int doshn(struct magic_set *, int, int, int, off_t, int, size_t, off_t, int, int, int *, uint16_t *); private size_t donote(struct magic_set *, void *, size_t, size_t, int, int, size_t, int *, uint16_t *, int, off_t, int, off_t); #define ELF_ALIGN(a) ((((a) + align - 1) / align) * align) #define isquote(c) (strchr("'\"`", (c)) != NULL) private uint16_t getu16(int, uint16_t); private uint32_t getu32(int, uint32_t); private uint64_t getu64(int, uint64_t); #define MAX_PHNUM 128 #define MAX_SHNUM 32768 #define SIZE_UNKNOWN ((off_t)-1) private int toomany(struct magic_set *ms, const char *name, uint16_t num) { if (file_printf(ms, ", too many %s (%u)", name, num ) == -1) return -1; return 0; } private uint16_t getu16(int swap, uint16_t value) { union { uint16_t ui; char c[2]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[1]; retval.c[1] = tmpval.c[0]; return retval.ui; } else return value; } private uint32_t getu32(int swap, uint32_t value) { union { uint32_t ui; char c[4]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[3]; retval.c[1] = tmpval.c[2]; retval.c[2] = tmpval.c[1]; retval.c[3] = tmpval.c[0]; return retval.ui; } else return value; } private uint64_t getu64(int swap, uint64_t value) { union { uint64_t ui; char c[8]; } retval, tmpval; if (swap) { tmpval.ui = value; retval.c[0] = tmpval.c[7]; retval.c[1] = tmpval.c[6]; retval.c[2] = tmpval.c[5]; retval.c[3] = tmpval.c[4]; retval.c[4] = tmpval.c[3]; retval.c[5] = tmpval.c[2]; retval.c[6] = tmpval.c[1]; retval.c[7] = tmpval.c[0]; return retval.ui; } else return value; } #define elf_getu16(swap, value) getu16(swap, value) #define elf_getu32(swap, value) getu32(swap, value) #define elf_getu64(swap, value) getu64(swap, value) #define xsh_addr (clazz == ELFCLASS32 \ ? (void *)&sh32 \ : (void *)&sh64) #define xsh_sizeof (clazz == ELFCLASS32 \ ? sizeof(sh32) \ : sizeof(sh64)) #define xsh_size (size_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_size) \ : elf_getu64(swap, sh64.sh_size)) #define xsh_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_offset) \ : elf_getu64(swap, sh64.sh_offset)) #define xsh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_type) \ : elf_getu32(swap, sh64.sh_type)) #define xsh_name (clazz == ELFCLASS32 \ ? elf_getu32(swap, sh32.sh_name) \ : elf_getu32(swap, sh64.sh_name)) #define xph_addr (clazz == ELFCLASS32 \ ? (void *) &ph32 \ : (void *) &ph64) #define xph_sizeof (clazz == ELFCLASS32 \ ? sizeof(ph32) \ : sizeof(ph64)) #define xph_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_type) \ : elf_getu32(swap, ph64.p_type)) #define xph_offset (off_t)(clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_offset) \ : elf_getu64(swap, ph64.p_offset)) #define xph_align (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_align ? \ elf_getu32(swap, ph32.p_align) : 4) \ : (off_t) (ph64.p_align ? \ elf_getu64(swap, ph64.p_align) : 4))) #define xph_vaddr (size_t)((clazz == ELFCLASS32 \ ? (off_t) (ph32.p_vaddr ? \ elf_getu32(swap, ph32.p_vaddr) : 4) \ : (off_t) (ph64.p_vaddr ? \ elf_getu64(swap, ph64.p_vaddr) : 4))) #define xph_filesz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_filesz) \ : elf_getu64(swap, ph64.p_filesz))) #define xnh_addr (clazz == ELFCLASS32 \ ? (void *)&nh32 \ : (void *)&nh64) #define xph_memsz (size_t)((clazz == ELFCLASS32 \ ? elf_getu32(swap, ph32.p_memsz) \ : elf_getu64(swap, ph64.p_memsz))) #define xnh_sizeof (clazz == ELFCLASS32 \ ? sizeof(nh32) \ : sizeof(nh64)) #define xnh_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_type) \ : elf_getu32(swap, nh64.n_type)) #define xnh_namesz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_namesz) \ : elf_getu32(swap, nh64.n_namesz)) #define xnh_descsz (clazz == ELFCLASS32 \ ? elf_getu32(swap, nh32.n_descsz) \ : elf_getu32(swap, nh64.n_descsz)) #define prpsoffsets(i) (clazz == ELFCLASS32 \ ? prpsoffsets32[i] \ : prpsoffsets64[i]) #define xcap_addr (clazz == ELFCLASS32 \ ? (void *)&cap32 \ : (void *)&cap64) #define xcap_sizeof (clazz == ELFCLASS32 \ ? sizeof cap32 \ : sizeof cap64) #define xcap_tag (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_tag) \ : elf_getu64(swap, cap64.c_tag)) #define xcap_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, cap32.c_un.c_val) \ : elf_getu64(swap, cap64.c_un.c_val)) #define xauxv_addr (clazz == ELFCLASS32 \ ? (void *)&auxv32 \ : (void *)&auxv64) #define xauxv_sizeof (clazz == ELFCLASS32 \ ? sizeof(auxv32) \ : sizeof(auxv64)) #define xauxv_type (clazz == ELFCLASS32 \ ? elf_getu32(swap, auxv32.a_type) \ : elf_getu64(swap, auxv64.a_type)) #define xauxv_val (clazz == ELFCLASS32 \ ? elf_getu32(swap, auxv32.a_v) \ : elf_getu64(swap, auxv64.a_v)) #ifdef ELFCORE /* * Try larger offsets first to avoid false matches * from earlier data that happen to look like strings. */ static const size_t prpsoffsets32[] = { #ifdef USE_NT_PSINFO 104, /* SunOS 5.x (command line) */ 88, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 100, /* SunOS 5.x (command line) */ 84, /* SunOS 5.x (short name) */ 44, /* Linux (command line) */ 28, /* Linux 2.0.36 (short name) */ 8, /* FreeBSD */ }; static const size_t prpsoffsets64[] = { #ifdef USE_NT_PSINFO 152, /* SunOS 5.x (command line) */ 136, /* SunOS 5.x (short name) */ #endif /* USE_NT_PSINFO */ 136, /* SunOS 5.x, 64-bit (command line) */ 120, /* SunOS 5.x, 64-bit (short name) */ 56, /* Linux (command line) */ 40, /* Linux (tested on core from 2.4.x, short name) */ 16, /* FreeBSD, 64-bit */ }; #define NOFFSETS32 (sizeof prpsoffsets32 / sizeof prpsoffsets32[0]) #define NOFFSETS64 (sizeof prpsoffsets64 / sizeof prpsoffsets64[0]) #define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64) /* * Look through the program headers of an executable image, searching * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or * "FreeBSD"; if one is found, try looking in various places in its * contents for a 16-character string containing only printable * characters - if found, that string should be the name of the program * that dropped core. Note: right after that 16-character string is, * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and * Linux, a longer string (80 characters, in 5.x, probably other * SVR4-flavored systems, and Linux) containing the start of the * command line for that program. * * SunOS 5.x core files contain two PT_NOTE sections, with the types * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the * same info about the command name and command line, so it probably * isn't worthwhile to look for NT_PSINFO, but the offsets are provided * above (see USE_NT_PSINFO), in case we ever decide to do so. The * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent; * the SunOS 5.x file command relies on this (and prefers the latter). * * The signal number probably appears in a section of type NT_PRSTATUS, * but that's also rather OS-dependent, in ways that are harder to * dissect with heuristics, so I'm not bothering with the signal number. * (I suppose the signal number could be of interest in situations where * you don't have the binary of the program that dropped core; if you * *do* have that binary, the debugger will probably tell you what * signal it was.) */ #define OS_STYLE_SVR4 0 #define OS_STYLE_FREEBSD 1 #define OS_STYLE_NETBSD 2 private const char os_style_names[][8] = { "SVR4", "FreeBSD", "NetBSD", }; #define FLAGS_CORE_STYLE 0x003 #define FLAGS_DID_CORE 0x004 #define FLAGS_DID_OS_NOTE 0x008 #define FLAGS_DID_BUILD_ID 0x010 #define FLAGS_DID_CORE_STYLE 0x020 #define FLAGS_DID_NETBSD_PAX 0x040 #define FLAGS_DID_NETBSD_MARCH 0x080 #define FLAGS_DID_NETBSD_CMODEL 0x100 #define FLAGS_DID_NETBSD_UNKNOWN 0x200 #define FLAGS_IS_CORE 0x400 #define FLAGS_DID_AUXV 0x800 private int dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int *flags, uint16_t *notecount) { Elf32_Phdr ph32; Elf64_Phdr ph64; size_t offset, len; unsigned char nbuf[BUFSIZ]; ssize_t bufsize; off_t ph_off = off; int ph_num = num; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } /* * Loop through all the program headers. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (xph_type != PT_NOTE) continue; /* * This is a PT_NOTE section; loop through all the notes * in the section. */ len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); if ((bufsize = pread(fd, nbuf, len, xph_offset)) == -1) { file_badread(ms); return -1; } offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, 4, flags, notecount, fd, ph_off, ph_num, fsize); if (offset == 0) break; } } return 0; } #endif static void do_note_netbsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for NetBSD") == -1) return; /* * The version number used to be stuck as 199905, and was thus * basically content-free. Newer versions of NetBSD have fixed * this and now use the encoding of __NetBSD_Version__: * * MMmmrrpp00 * * M = major version * m = minor version * r = release ["",A-Z,Z[A-Z] but numeric] * p = patchlevel */ if (desc > 100000000U) { uint32_t ver_patch = (desc / 100) % 100; uint32_t ver_rel = (desc / 10000) % 100; uint32_t ver_min = (desc / 1000000) % 100; uint32_t ver_maj = desc / 100000000; if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1) return; if (ver_rel == 0 && ver_patch != 0) { if (file_printf(ms, ".%u", ver_patch) == -1) return; } else if (ver_rel != 0) { while (ver_rel > 26) { if (file_printf(ms, "Z") == -1) return; ver_rel -= 26; } if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1) return; } } } static void do_note_freebsd_version(struct magic_set *ms, int swap, void *v) { uint32_t desc; (void)memcpy(&desc, v, sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, ", for FreeBSD") == -1) return; /* * Contents is __FreeBSD_version, whose relation to OS * versions is defined by a huge table in the Porter's * Handbook. This is the general scheme: * * Releases: * Mmp000 (before 4.10) * Mmi0p0 (before 5.0) * Mmm0p0 * * Development branches: * Mmpxxx (before 4.6) * Mmp1xx (before 4.10) * Mmi1xx (before 5.0) * M000xx (pre-M.0) * Mmm1xx * * M = major version * m = minor version * i = minor version increment (491000 -> 4.10) * p = patchlevel * x = revision * * The first release of FreeBSD to use ELF by default * was version 3.0. */ if (desc == 460002) { if (file_printf(ms, " 4.6.2") == -1) return; } else if (desc < 460100) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10) == -1) return; if (desc / 1000 % 10 > 0) if (file_printf(ms, ".%d", desc / 1000 % 10) == -1) return; if ((desc % 1000 > 0) || (desc % 100000 == 0)) if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc < 500000) { if (file_printf(ms, " %d.%d", desc / 100000, desc / 10000 % 10 + desc / 1000 % 10) == -1) return; if (desc / 100 % 10 > 0) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } else { if (file_printf(ms, " %d.%d", desc / 100000, desc / 1000 % 100) == -1) return; if ((desc / 100 % 10 > 0) || (desc % 100000 / 100 == 0)) { if (file_printf(ms, " (%d)", desc) == -1) return; } else if (desc / 10 % 10 > 0) { if (file_printf(ms, ".%d", desc / 10 % 10) == -1) return; } } } private int /*ARGSUSED*/ do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz >= 4 && descsz <= 20)) { uint8_t desc[20]; const char *btype; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; switch (descsz) { case 8: btype = "xxHash"; break; case 16: btype = "md5/uuid"; break; case 20: btype = "sha1"; break; default: btype = "unknown"; break; } if (file_printf(ms, ", BuildID[%s]=", btype) == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; } private int do_os_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 5 && strcmp((char *)&nbuf[noff], "SuSE") == 0 && type == NT_GNU_VERSION && descsz == 2) { *flags |= FLAGS_DID_OS_NOTE; file_printf(ms, ", for SuSE %d.%d", nbuf[doff], nbuf[doff + 1]); return 1; } if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_VERSION && descsz == 16) { uint32_t desc[4]; (void)memcpy(desc, &nbuf[doff], sizeof(desc)); *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for GNU/") == -1) return 1; switch (elf_getu32(swap, desc[0])) { case GNU_OS_LINUX: if (file_printf(ms, "Linux") == -1) return 1; break; case GNU_OS_HURD: if (file_printf(ms, "Hurd") == -1) return 1; break; case GNU_OS_SOLARIS: if (file_printf(ms, "Solaris") == -1) return 1; break; case GNU_OS_KFREEBSD: if (file_printf(ms, "kFreeBSD") == -1) return 1; break; case GNU_OS_KNETBSD: if (file_printf(ms, "kNetBSD") == -1) return 1; break; default: if (file_printf(ms, "<unknown>") == -1) return 1; } if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]), elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1) return 1; return 1; } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { if (type == NT_NETBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; do_note_netbsd_version(ms, swap, &nbuf[doff]); return 1; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0) { if (type == NT_FREEBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; do_note_freebsd_version(ms, swap, &nbuf[doff]); return 1; } } if (namesz == 8 && strcmp((char *)&nbuf[noff], "OpenBSD") == 0 && type == NT_OPENBSD_VERSION && descsz == 4) { *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for OpenBSD") == -1) return 1; /* Content of note is always 0 */ return 1; } if (namesz == 10 && strcmp((char *)&nbuf[noff], "DragonFly") == 0 && type == NT_DRAGONFLY_VERSION && descsz == 4) { uint32_t desc; *flags |= FLAGS_DID_OS_NOTE; if (file_printf(ms, ", for DragonFly") == -1) return 1; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (file_printf(ms, " %d.%d.%d", desc / 100000, desc / 10000 % 10, desc % 10000) == -1) return 1; return 1; } return 0; } private int do_pax_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "PaX") == 0 && type == NT_NETBSD_PAX && descsz == 4) { static const char *pax[] = { "+mprotect", "-mprotect", "+segvguard", "-segvguard", "+ASLR", "-ASLR", }; uint32_t desc; size_t i; int did = 0; *flags |= FLAGS_DID_NETBSD_PAX; (void)memcpy(&desc, &nbuf[doff], sizeof(desc)); desc = elf_getu32(swap, desc); if (desc && file_printf(ms, ", PaX: ") == -1) return 1; for (i = 0; i < __arraycount(pax); i++) { if (((1 << (int)i) & desc) == 0) continue; if (file_printf(ms, "%s%s", did++ ? "," : "", pax[i]) == -1) return 1; } return 1; } return 0; } private int do_core_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags, size_t size, int clazz) { #ifdef ELFCORE int os_style = -1; /* * Sigh. The 2.0.36 kernel in Debian 2.1, at * least, doesn't correctly implement name * sections, in core dumps, as specified by * the "Program Linking" section of "UNIX(R) System * V Release 4 Programmer's Guide: ANSI C and * Programming Support Tools", because my copy * clearly says "The first 'namesz' bytes in 'name' * contain a *null-terminated* [emphasis mine] * character representation of the entry's owner * or originator", but the 2.0.36 kernel code * doesn't include the terminating null in the * name.... */ if ((namesz == 4 && strncmp((char *)&nbuf[noff], "CORE", 4) == 0) || (namesz == 5 && strcmp((char *)&nbuf[noff], "CORE") == 0)) { os_style = OS_STYLE_SVR4; } if ((namesz == 8 && strcmp((char *)&nbuf[noff], "FreeBSD") == 0)) { os_style = OS_STYLE_FREEBSD; } if ((namesz >= 11 && strncmp((char *)&nbuf[noff], "NetBSD-CORE", 11) == 0)) { os_style = OS_STYLE_NETBSD; } if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) { if (file_printf(ms, ", %s-style", os_style_names[os_style]) == -1) return 1; *flags |= FLAGS_DID_CORE_STYLE; *flags |= os_style; } switch (os_style) { case OS_STYLE_NETBSD: if (type == NT_NETBSD_CORE_PROCINFO) { char sbuf[512]; struct NetBSD_elfcore_procinfo pi; memset(&pi, 0, sizeof(pi)); memcpy(&pi, nbuf + doff, descsz); if (file_printf(ms, ", from '%.31s', pid=%u, uid=%u, " "gid=%u, nlwps=%u, lwp=%u (signal %u/code %u)", file_printable(sbuf, sizeof(sbuf), CAST(char *, pi.cpi_name)), elf_getu32(swap, pi.cpi_pid), elf_getu32(swap, pi.cpi_euid), elf_getu32(swap, pi.cpi_egid), elf_getu32(swap, pi.cpi_nlwps), elf_getu32(swap, pi.cpi_siglwp), elf_getu32(swap, pi.cpi_signo), elf_getu32(swap, pi.cpi_sigcode)) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; } break; default: if (type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) { size_t i, j; unsigned char c; /* * Extract the program name. We assume * it to be 16 characters (that's what it * is in SunOS 5.x and Linux). * * Unfortunately, it's at a different offset * in various OSes, so try multiple offsets. * If the characters aren't all printable, * reject it. */ for (i = 0; i < NOFFSETS; i++) { unsigned char *cname, *cp; size_t reloffset = prpsoffsets(i); size_t noffset = doff + reloffset; size_t k; for (j = 0; j < 16; j++, noffset++, reloffset++) { /* * Make sure we're not past * the end of the buffer; if * we are, just give up. */ if (noffset >= size) goto tryanother; /* * Make sure we're not past * the end of the contents; * if we are, this obviously * isn't the right offset. */ if (reloffset >= descsz) goto tryanother; c = nbuf[noffset]; if (c == '\0') { /* * A '\0' at the * beginning is * obviously wrong. * Any other '\0' * means we're done. */ if (j == 0) goto tryanother; else break; } else { /* * A nonprintable * character is also * wrong. */ if (!isprint(c) || isquote(c)) goto tryanother; } } /* * Well, that worked. */ /* * Try next offsets, in case this match is * in the middle of a string. */ for (k = i + 1 ; k < NOFFSETS; k++) { size_t no; int adjust = 1; if (prpsoffsets(k) >= prpsoffsets(i)) continue; for (no = doff + prpsoffsets(k); no < doff + prpsoffsets(i); no++) adjust = adjust && isprint(nbuf[no]); if (adjust) i = k; } cname = (unsigned char *) &nbuf[doff + prpsoffsets(i)]; for (cp = cname; *cp && isprint(*cp); cp++) continue; /* * Linux apparently appends a space at the end * of the command line: remove it. */ while (cp > cname && isspace(cp[-1])) cp--; if (file_printf(ms, ", from '%.*s'", (int)(cp - cname), cname) == -1) return 1; *flags |= FLAGS_DID_CORE; return 1; tryanother: ; } } break; } #endif return 0; } private off_t get_offset_from_virtaddr(struct magic_set *ms, int swap, int clazz, int fd, off_t off, int num, off_t fsize, uint64_t virtaddr) { Elf32_Phdr ph32; Elf64_Phdr ph64; /* * Loop through all the program headers and find the header with * virtual address in which the "virtaddr" belongs to. */ for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += xph_sizeof; if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Perhaps warn here */ continue; } if (virtaddr >= xph_vaddr && virtaddr < xph_vaddr + xph_filesz) return xph_offset + (virtaddr - xph_vaddr); } return 0; } private size_t get_string_on_virtaddr(struct magic_set *ms, int swap, int clazz, int fd, off_t ph_off, int ph_num, off_t fsize, uint64_t virtaddr, char *buf, ssize_t buflen) { char *bptr; off_t offset; if (buflen == 0) return 0; offset = get_offset_from_virtaddr(ms, swap, clazz, fd, ph_off, ph_num, fsize, virtaddr); if ((buflen = pread(fd, buf, CAST(size_t, buflen), offset)) <= 0) { file_badread(ms); return 0; } buf[buflen - 1] = '\0'; /* We expect only printable characters, so return if buffer contains * non-printable character before the '\0' or just '\0'. */ for (bptr = buf; *bptr && isprint((unsigned char)*bptr); bptr++) continue; if (*bptr != '\0') return 0; return bptr - buf; } private int do_auxv_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap, uint32_t namesz __attribute__((__unused__)), uint32_t descsz __attribute__((__unused__)), size_t noff __attribute__((__unused__)), size_t doff, int *flags, size_t size __attribute__((__unused__)), int clazz, int fd, off_t ph_off, int ph_num, off_t fsize) { #ifdef ELFCORE Aux32Info auxv32; Aux64Info auxv64; size_t elsize = xauxv_sizeof; const char *tag; int is_string; size_t nval; if ((*flags & (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE)) != (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE)) return 0; switch (*flags & FLAGS_CORE_STYLE) { case OS_STYLE_SVR4: if (type != NT_AUXV) return 0; break; #ifdef notyet case OS_STYLE_NETBSD: if (type != NT_NETBSD_CORE_AUXV) return 0; break; case OS_STYLE_FREEBSD: if (type != NT_FREEBSD_PROCSTAT_AUXV) return 0; break; #endif default: return 0; } *flags |= FLAGS_DID_AUXV; nval = 0; for (size_t off = 0; off + elsize <= descsz; off += elsize) { (void)memcpy(xauxv_addr, &nbuf[doff + off], xauxv_sizeof); /* Limit processing to 50 vector entries to prevent DoS */ if (nval++ >= 50) { file_error(ms, 0, "Too many ELF Auxv elements"); return 1; } switch(xauxv_type) { case AT_LINUX_EXECFN: is_string = 1; tag = "execfn"; break; case AT_LINUX_PLATFORM: is_string = 1; tag = "platform"; break; case AT_LINUX_UID: is_string = 0; tag = "real uid"; break; case AT_LINUX_GID: is_string = 0; tag = "real gid"; break; case AT_LINUX_EUID: is_string = 0; tag = "effective uid"; break; case AT_LINUX_EGID: is_string = 0; tag = "effective gid"; break; default: is_string = 0; tag = NULL; break; } if (tag == NULL) continue; if (is_string) { char buf[256]; ssize_t buflen; buflen = get_string_on_virtaddr(ms, swap, clazz, fd, ph_off, ph_num, fsize, xauxv_val, buf, sizeof(buf)); if (buflen == 0) continue; if (file_printf(ms, ", %s: '%s'", tag, buf) == -1) return 0; } else { if (file_printf(ms, ", %s: %d", tag, (int) xauxv_val) == -1) return 0; } } return 1; #else return 0; #endif } private size_t donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size, int clazz, int swap, size_t align, int *flags, uint16_t *notecount, int fd, off_t ph_off, int ph_num, off_t fsize) { Elf32_Nhdr nh32; Elf64_Nhdr nh64; size_t noff, doff; uint32_t namesz, descsz; unsigned char *nbuf = CAST(unsigned char *, vbuf); if (*notecount == 0) return 0; --*notecount; if (xnh_sizeof + offset > size) { /* * We're out of note headers. */ return xnh_sizeof + offset; } (void)memcpy(xnh_addr, &nbuf[offset], xnh_sizeof); offset += xnh_sizeof; namesz = xnh_namesz; descsz = xnh_descsz; if ((namesz == 0) && (descsz == 0)) { /* * We're out of note headers. */ return (offset >= size) ? offset : size; } if (namesz & 0x80000000) { (void)file_printf(ms, ", bad note name size %#lx", (unsigned long)namesz); return 0; } if (descsz & 0x80000000) { (void)file_printf(ms, ", bad note description size %#lx", (unsigned long)descsz); return 0; } noff = offset; doff = ELF_ALIGN(offset + namesz); if (offset + namesz > size) { /* * We're past the end of the buffer. */ return doff; } offset = ELF_ALIGN(doff + descsz); if (doff + descsz > size) { /* * We're past the end of the buffer. */ return (offset >= size) ? offset : size; } if ((*flags & FLAGS_DID_OS_NOTE) == 0) { if (do_os_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return offset; } if ((*flags & FLAGS_DID_BUILD_ID) == 0) { if (do_bid_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return offset; } if ((*flags & FLAGS_DID_NETBSD_PAX) == 0) { if (do_pax_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags)) return offset; } if ((*flags & FLAGS_DID_CORE) == 0) { if (do_core_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags, size, clazz)) return offset; } if ((*flags & FLAGS_DID_AUXV) == 0) { if (do_auxv_note(ms, nbuf, xnh_type, swap, namesz, descsz, noff, doff, flags, size, clazz, fd, ph_off, ph_num, fsize)) return offset; } if (namesz == 7 && strcmp((char *)&nbuf[noff], "NetBSD") == 0) { if (descsz > 100) descsz = 100; switch (xnh_type) { case NT_NETBSD_VERSION: return offset; case NT_NETBSD_MARCH: if (*flags & FLAGS_DID_NETBSD_MARCH) return offset; *flags |= FLAGS_DID_NETBSD_MARCH; if (file_printf(ms, ", compiled for: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return offset; break; case NT_NETBSD_CMODEL: if (*flags & FLAGS_DID_NETBSD_CMODEL) return offset; *flags |= FLAGS_DID_NETBSD_CMODEL; if (file_printf(ms, ", compiler model: %.*s", (int)descsz, (const char *)&nbuf[doff]) == -1) return offset; break; default: if (*flags & FLAGS_DID_NETBSD_UNKNOWN) return offset; *flags |= FLAGS_DID_NETBSD_UNKNOWN; if (file_printf(ms, ", note=%u", xnh_type) == -1) return offset; break; } return offset; } return offset; } /* SunOS 5.x hardware capability descriptions */ typedef struct cap_desc { uint64_t cd_mask; const char *cd_name; } cap_desc_t; static const cap_desc_t cap_desc_sparc[] = { { AV_SPARC_MUL32, "MUL32" }, { AV_SPARC_DIV32, "DIV32" }, { AV_SPARC_FSMULD, "FSMULD" }, { AV_SPARC_V8PLUS, "V8PLUS" }, { AV_SPARC_POPC, "POPC" }, { AV_SPARC_VIS, "VIS" }, { AV_SPARC_VIS2, "VIS2" }, { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" }, { AV_SPARC_FMAF, "FMAF" }, { AV_SPARC_FJFMAU, "FJFMAU" }, { AV_SPARC_IMA, "IMA" }, { 0, NULL } }; static const cap_desc_t cap_desc_386[] = { { AV_386_FPU, "FPU" }, { AV_386_TSC, "TSC" }, { AV_386_CX8, "CX8" }, { AV_386_SEP, "SEP" }, { AV_386_AMD_SYSC, "AMD_SYSC" }, { AV_386_CMOV, "CMOV" }, { AV_386_MMX, "MMX" }, { AV_386_AMD_MMX, "AMD_MMX" }, { AV_386_AMD_3DNow, "AMD_3DNow" }, { AV_386_AMD_3DNowx, "AMD_3DNowx" }, { AV_386_FXSR, "FXSR" }, { AV_386_SSE, "SSE" }, { AV_386_SSE2, "SSE2" }, { AV_386_PAUSE, "PAUSE" }, { AV_386_SSE3, "SSE3" }, { AV_386_MON, "MON" }, { AV_386_CX16, "CX16" }, { AV_386_AHF, "AHF" }, { AV_386_TSCP, "TSCP" }, { AV_386_AMD_SSE4A, "AMD_SSE4A" }, { AV_386_POPCNT, "POPCNT" }, { AV_386_AMD_LZCNT, "AMD_LZCNT" }, { AV_386_SSSE3, "SSSE3" }, { AV_386_SSE4_1, "SSE4.1" }, { AV_386_SSE4_2, "SSE4.2" }, { 0, NULL } }; private int doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int mach, int strtab, int *flags, uint16_t *notecount) { Elf32_Shdr sh32; Elf64_Shdr sh64; int stripped = 1, has_debug_info = 0; size_t nbadcap = 0; void *nbuf; off_t noff, coff, name_off; uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilities */ uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilities */ char name[50]; ssize_t namesize; if (size != xsh_sizeof) { if (file_printf(ms, ", corrupted section header size") == -1) return -1; return 0; } /* Read offset of name section to be able to read section names later */ if (pread(fd, xsh_addr, xsh_sizeof, CAST(off_t, (off + size * strtab))) < (ssize_t)xsh_sizeof) { if (file_printf(ms, ", missing section headers") == -1) return -1; return 0; } name_off = xsh_offset; for ( ; num; num--) { /* Read the name of this section. */ if ((namesize = pread(fd, name, sizeof(name) - 1, name_off + xsh_name)) == -1) { file_badread(ms); return -1; } name[namesize] = '\0'; if (strcmp(name, ".debug_info") == 0) { has_debug_info = 1; stripped = 0; } if (pread(fd, xsh_addr, xsh_sizeof, off) < (ssize_t)xsh_sizeof) { file_badread(ms); return -1; } off += size; /* Things we can determine before we seek */ switch (xsh_type) { case SHT_SYMTAB: #if 0 case SHT_DYNSYM: #endif stripped = 0; break; default: if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) { /* Perhaps warn here */ continue; } break; } /* Things we can determine when we seek */ switch (xsh_type) { case SHT_NOTE: if ((uintmax_t)(xsh_size + xsh_offset) > (uintmax_t)fsize) { if (file_printf(ms, ", note offset/size %#" INTMAX_T_FORMAT "x+%#" INTMAX_T_FORMAT "x exceeds" " file size %#" INTMAX_T_FORMAT "x", (uintmax_t)xsh_offset, (uintmax_t)xsh_size, (uintmax_t)fsize) == -1) return -1; return 0; } if ((nbuf = malloc(xsh_size)) == NULL) { file_error(ms, errno, "Cannot allocate memory" " for note"); return -1; } if (pread(fd, nbuf, xsh_size, xsh_offset) < (ssize_t)xsh_size) { file_badread(ms); free(nbuf); return -1; } noff = 0; for (;;) { if (noff >= (off_t)xsh_size) break; noff = donote(ms, nbuf, (size_t)noff, xsh_size, clazz, swap, 4, flags, notecount, fd, 0, 0, 0); if (noff == 0) break; } free(nbuf); break; case SHT_SUNW_cap: switch (mach) { case EM_SPARC: case EM_SPARCV9: case EM_IA_64: case EM_386: case EM_AMD64: break; default: goto skip; } if (nbadcap > 5) break; if (lseek(fd, xsh_offset, SEEK_SET) == (off_t)-1) { file_badseek(ms); return -1; } coff = 0; for (;;) { Elf32_Cap cap32; Elf64_Cap cap64; char cbuf[/*CONSTCOND*/ MAX(sizeof cap32, sizeof cap64)]; if ((coff += xcap_sizeof) > (off_t)xsh_size) break; if (read(fd, cbuf, (size_t)xcap_sizeof) != (ssize_t)xcap_sizeof) { file_badread(ms); return -1; } if (cbuf[0] == 'A') { #ifdef notyet char *p = cbuf + 1; uint32_t len, tag; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (memcmp("gnu", p, 3) != 0) { if (file_printf(ms, ", unknown capability %.3s", p) == -1) return -1; break; } p += strlen(p) + 1; tag = *p++; memcpy(&len, p, sizeof(len)); p += 4; len = getu32(swap, len); if (tag != 1) { if (file_printf(ms, ", unknown gnu" " capability tag %d", tag) == -1) return -1; break; } // gnu attributes #endif break; } (void)memcpy(xcap_addr, cbuf, xcap_sizeof); switch (xcap_tag) { case CA_SUNW_NULL: break; case CA_SUNW_HW_1: cap_hw1 |= xcap_val; break; case CA_SUNW_SF_1: cap_sf1 |= xcap_val; break; default: if (file_printf(ms, ", with unknown capability " "%#" INT64_T_FORMAT "x = %#" INT64_T_FORMAT "x", (unsigned long long)xcap_tag, (unsigned long long)xcap_val) == -1) return -1; if (nbadcap++ > 2) coff = xsh_size; break; } } /*FALLTHROUGH*/ skip: default: break; } } if (has_debug_info) { if (file_printf(ms, ", with debug_info") == -1) return -1; } if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1) return -1; if (cap_hw1) { const cap_desc_t *cdp; switch (mach) { case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: cdp = cap_desc_sparc; break; case EM_386: case EM_IA_64: case EM_AMD64: cdp = cap_desc_386; break; default: cdp = NULL; break; } if (file_printf(ms, ", uses") == -1) return -1; if (cdp) { while (cdp->cd_name) { if (cap_hw1 & cdp->cd_mask) { if (file_printf(ms, " %s", cdp->cd_name) == -1) return -1; cap_hw1 &= ~cdp->cd_mask; } ++cdp; } if (cap_hw1) if (file_printf(ms, " unknown hardware capability %#" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } else { if (file_printf(ms, " hardware capability %#" INT64_T_FORMAT "x", (unsigned long long)cap_hw1) == -1) return -1; } } if (cap_sf1) { if (cap_sf1 & SF1_SUNW_FPUSED) { if (file_printf(ms, (cap_sf1 & SF1_SUNW_FPKNWN) ? ", uses frame pointer" : ", not known to use frame pointer") == -1) return -1; } cap_sf1 &= ~SF1_SUNW_MASK; if (cap_sf1) if (file_printf(ms, ", with unknown software capability %#" INT64_T_FORMAT "x", (unsigned long long)cap_sf1) == -1) return -1; } return 0; } /* * Look through the program headers of an executable image, searching * for a PT_INTERP section; if one is found, it's dynamically linked, * otherwise it's statically linked. */ private int dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num, size_t size, off_t fsize, int sh_num, int *flags, uint16_t *notecount) { Elf32_Phdr ph32; Elf64_Phdr ph64; const char *linking_style = "statically"; const char *interp = ""; unsigned char nbuf[BUFSIZ]; char ibuf[BUFSIZ]; ssize_t bufsize; size_t offset, align, len; if (size != xph_sizeof) { if (file_printf(ms, ", corrupted program header size") == -1) return -1; return 0; } for ( ; num; num--) { if (pread(fd, xph_addr, xph_sizeof, off) < (ssize_t)xph_sizeof) { file_badread(ms); return -1; } off += size; bufsize = 0; align = 4; /* Things we can determine before we seek */ switch (xph_type) { case PT_DYNAMIC: linking_style = "dynamically"; break; case PT_NOTE: if (sh_num) /* Did this through section headers */ continue; if (((align = xph_align) & 0x80000000UL) != 0 || align < 4) { if (file_printf(ms, ", invalid note alignment %#lx", (unsigned long)align) == -1) return -1; align = 4; } /*FALLTHROUGH*/ case PT_INTERP: len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf); bufsize = pread(fd, nbuf, len, xph_offset); if (bufsize == -1) { file_badread(ms); return -1; } break; default: if (fsize != SIZE_UNKNOWN && xph_offset > fsize) { /* Maybe warn here? */ continue; } break; } /* Things we can determine when we seek */ switch (xph_type) { case PT_INTERP: if (bufsize && nbuf[0]) { nbuf[bufsize - 1] = '\0'; interp = (const char *)nbuf; } else interp = "*empty*"; break; case PT_NOTE: /* * This is a PT_NOTE section; loop through all the notes * in the section. */ offset = 0; for (;;) { if (offset >= (size_t)bufsize) break; offset = donote(ms, nbuf, offset, (size_t)bufsize, clazz, swap, align, flags, notecount, fd, 0, 0, 0); if (offset == 0) break; } break; default: break; } } if (file_printf(ms, ", %s linked", linking_style) == -1) return -1; if (interp[0]) if (file_printf(ms, ", interpreter %s", file_printable(ibuf, sizeof(ibuf), interp)) == -1) return -1; return 0; } protected int file_tryelf(struct magic_set *ms, int fd, const unsigned char *buf, size_t nbytes) { union { int32_t l; char c[sizeof (int32_t)]; } u; int clazz; int swap; struct stat st; off_t fsize; int flags = 0; Elf32_Ehdr elf32hdr; Elf64_Ehdr elf64hdr; uint16_t type, phnum, shnum, notecount; if (ms->flags & (MAGIC_MIME|MAGIC_APPLE|MAGIC_EXTENSION)) return 0; /* * ELF executables have multiple section headers in arbitrary * file locations and thus file(1) cannot determine it from easily. * Instead we traverse thru all section headers until a symbol table * one is found or else the binary is stripped. * Return immediately if it's not ELF (so we avoid pipe2file unless needed). */ if (buf[EI_MAG0] != ELFMAG0 || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1) || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3) return 0; /* * If we cannot seek, it must be a pipe, socket or fifo. */ if((lseek(fd, (off_t)0, SEEK_SET) == (off_t)-1) && (errno == ESPIPE)) fd = file_pipe2file(ms, fd, buf, nbytes); if (fstat(fd, &st) == -1) { file_badread(ms); return -1; } if (S_ISREG(st.st_mode) || st.st_size != 0) fsize = st.st_size; else fsize = SIZE_UNKNOWN; clazz = buf[EI_CLASS]; switch (clazz) { case ELFCLASS32: #undef elf_getu #define elf_getu(a, b) elf_getu32(a, b) #undef elfhdr #define elfhdr elf32hdr #include "elfclass.h" case ELFCLASS64: #undef elf_getu #define elf_getu(a, b) elf_getu64(a, b) #undef elfhdr #define elfhdr elf64hdr #include "elfclass.h" default: if (file_printf(ms, ", unknown class %d", clazz) == -1) return -1; break; } return 0; } #endif
do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz >= 4 || descsz <= 20)) { uint8_t desc[20]; const char *btype; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; switch (descsz) { case 8: btype = "xxHash"; break; case 16: btype = "md5/uuid"; break; case 20: btype = "sha1"; break; default: btype = "unknown"; break; } if (file_printf(ms, ", BuildID[%s]=", btype) == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; }
do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type, int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz, size_t noff, size_t doff, int *flags) { if (namesz == 4 && strcmp((char *)&nbuf[noff], "GNU") == 0 && type == NT_GNU_BUILD_ID && (descsz >= 4 && descsz <= 20)) { uint8_t desc[20]; const char *btype; uint32_t i; *flags |= FLAGS_DID_BUILD_ID; switch (descsz) { case 8: btype = "xxHash"; break; case 16: btype = "md5/uuid"; break; case 20: btype = "sha1"; break; default: btype = "unknown"; break; } if (file_printf(ms, ", BuildID[%s]=", btype) == -1) return 1; (void)memcpy(desc, &nbuf[doff], descsz); for (i = 0; i < descsz; i++) if (file_printf(ms, "%02x", desc[i]) == -1) return 1; return 1; } return 0; }
{'added': [(30, 'FILE_RCSID("@(#)$File: readelf.c,v 1.138 2017/08/27 07:55:02 christos Exp $")'), (514, '\t type == NT_GNU_BUILD_ID && (descsz >= 4 && descsz <= 20)) {')], 'deleted': [(30, 'FILE_RCSID("@(#)$File: readelf.c,v 1.137 2017/08/13 00:21:47 christos Exp $")'), (514, '\t type == NT_GNU_BUILD_ID && (descsz >= 4 || descsz <= 20)) {')]}
2
2
1,136
6,923
34
213
12
https://github.com/file/file
CVE-2017-1000249
CWE-119
761
BlockCodec.cpp
C++
BlockCodec::runPull
/* Audio File Library Copyright (C) 2013 Michael Pruett <michael@68k.org> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "BlockCodec.h" #include "Track.h" #include <assert.h> BlockCodec::BlockCodec(Mode mode, Track *track, File *fh, bool canSeek) : FileModule(mode, track, fh, canSeek), m_bytesPerPacket(-1), m_framesPerPacket(-1), m_framesToIgnore(-1), m_savedPositionNextFrame(-1), m_savedNextFrame(-1) { m_framesPerPacket = track->f.framesPerPacket; m_bytesPerPacket = track->f.bytesPerPacket; } void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount); framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; } void BlockCodec::reset1() { AFframecount nextTrackFrame = m_track->nextfframe; m_track->nextfframe = (nextTrackFrame / m_framesPerPacket) * m_framesPerPacket; m_framesToIgnore = nextTrackFrame - m_track->nextfframe; } void BlockCodec::reset2() { m_track->fpos_next_frame = m_track->fpos_first_frame + m_bytesPerPacket * (m_track->nextfframe / m_framesPerPacket); m_track->frames2ignore += m_framesToIgnore; assert(m_track->nextfframe % m_framesPerPacket == 0); } void BlockCodec::runPush() { AFframecount framesToWrite = m_inChunk->frameCount; int channelCount = m_inChunk->f.channelCount; int blockCount = (framesToWrite + m_framesPerPacket - 1) / m_framesPerPacket; for (int i=0; i<blockCount; i++) { encodeBlock(static_cast<const int16_t *>(m_inChunk->buffer) + i * m_framesPerPacket * channelCount, static_cast<uint8_t *>(m_outChunk->buffer) + i * m_bytesPerPacket); } ssize_t bytesWritten = write(m_outChunk->buffer, m_bytesPerPacket * blockCount); ssize_t blocksWritten = bytesWritten >= 0 ? bytesWritten / m_bytesPerPacket : 0; AFframecount framesWritten = std::min((AFframecount) blocksWritten * m_framesPerPacket, framesToWrite); m_track->nextfframe += framesWritten; m_track->totalfframes = m_track->nextfframe; assert(tell() == m_track->fpos_next_frame); if (framesWritten < framesToWrite) reportWriteError(framesWritten, framesToWrite); } void BlockCodec::sync1() { m_savedPositionNextFrame = m_track->fpos_next_frame; m_savedNextFrame = m_track->nextfframe; } void BlockCodec::sync2() { assert(tell() == m_track->fpos_next_frame); m_track->fpos_after_data = tell(); m_track->fpos_next_frame = m_savedPositionNextFrame; m_track->nextfframe = m_savedNextFrame; }
/* Audio File Library Copyright (C) 2013 Michael Pruett <michael@68k.org> This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "config.h" #include "BlockCodec.h" #include "Track.h" #include <assert.h> BlockCodec::BlockCodec(Mode mode, Track *track, File *fh, bool canSeek) : FileModule(mode, track, fh, canSeek), m_bytesPerPacket(-1), m_framesPerPacket(-1), m_framesToIgnore(-1), m_savedPositionNextFrame(-1), m_savedNextFrame(-1) { m_framesPerPacket = track->f.framesPerPacket; m_bytesPerPacket = track->f.bytesPerPacket; } void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { if (decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount)==0) break; framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; } void BlockCodec::reset1() { AFframecount nextTrackFrame = m_track->nextfframe; m_track->nextfframe = (nextTrackFrame / m_framesPerPacket) * m_framesPerPacket; m_framesToIgnore = nextTrackFrame - m_track->nextfframe; } void BlockCodec::reset2() { m_track->fpos_next_frame = m_track->fpos_first_frame + m_bytesPerPacket * (m_track->nextfframe / m_framesPerPacket); m_track->frames2ignore += m_framesToIgnore; assert(m_track->nextfframe % m_framesPerPacket == 0); } void BlockCodec::runPush() { AFframecount framesToWrite = m_inChunk->frameCount; int channelCount = m_inChunk->f.channelCount; int blockCount = (framesToWrite + m_framesPerPacket - 1) / m_framesPerPacket; for (int i=0; i<blockCount; i++) { encodeBlock(static_cast<const int16_t *>(m_inChunk->buffer) + i * m_framesPerPacket * channelCount, static_cast<uint8_t *>(m_outChunk->buffer) + i * m_bytesPerPacket); } ssize_t bytesWritten = write(m_outChunk->buffer, m_bytesPerPacket * blockCount); ssize_t blocksWritten = bytesWritten >= 0 ? bytesWritten / m_bytesPerPacket : 0; AFframecount framesWritten = std::min((AFframecount) blocksWritten * m_framesPerPacket, framesToWrite); m_track->nextfframe += framesWritten; m_track->totalfframes = m_track->nextfframe; assert(tell() == m_track->fpos_next_frame); if (framesWritten < framesToWrite) reportWriteError(framesWritten, framesToWrite); } void BlockCodec::sync1() { m_savedPositionNextFrame = m_track->fpos_next_frame; m_savedNextFrame = m_track->nextfframe; } void BlockCodec::sync2() { assert(tell() == m_track->fpos_next_frame); m_track->fpos_after_data = tell(); m_track->fpos_next_frame = m_savedPositionNextFrame; m_track->nextfframe = m_savedNextFrame; }
void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount); framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; }
void BlockCodec::runPull() { AFframecount framesToRead = m_outChunk->frameCount; AFframecount framesRead = 0; assert(framesToRead % m_framesPerPacket == 0); int blockCount = framesToRead / m_framesPerPacket; // Read the compressed data. ssize_t bytesRead = read(m_inChunk->buffer, m_bytesPerPacket * blockCount); int blocksRead = bytesRead >= 0 ? bytesRead / m_bytesPerPacket : 0; // Decompress into m_outChunk. for (int i=0; i<blocksRead; i++) { if (decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket, static_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount)==0) break; framesRead += m_framesPerPacket; } m_track->nextfframe += framesRead; assert(tell() == m_track->fpos_next_frame); if (framesRead < framesToRead) reportReadError(framesRead, framesToRead); m_outChunk->frameCount = framesRead; }
{'added': [(55, '\t\tif (decodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket,'), (56, '\t\t\tstatic_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount)==0)'), (57, '\t\t\tbreak;')], 'deleted': [(55, '\t\tdecodeBlock(static_cast<const uint8_t *>(m_inChunk->buffer) + i * m_bytesPerPacket,'), (56, '\t\t\tstatic_cast<int16_t *>(m_outChunk->buffer) + i * m_framesPerPacket * m_track->f.channelCount);')]}
3
2
81
558
20
158
4
https://github.com/antlarr/audiofile
CVE-2017-6839
CWE-190
2,735
assign_variable.cc
C++
tflite::ops::custom::assign_variable::Prepare
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/core/subgraph.h" #include "tensorflow/lite/experimental/resource/resource_variable.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace assign_variable { constexpr int kInputVariableId = 0; constexpr int kInputValue = 1; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); resource::CreateResourceVariableIfNotAvailable(&resources, resource_id); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); variable->AssignFrom(input_value_tensor); return kTfLiteOk; } } // namespace assign_variable TfLiteRegistration* Register_ASSIGN_VARIABLE() { static TfLiteRegistration r = {nullptr, nullptr, assign_variable::Prepare, assign_variable::Eval}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/core/subgraph.h" #include "tensorflow/lite/experimental/resource/resource_variable.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace ops { namespace custom { namespace assign_variable { constexpr int kInputVariableId = 0; constexpr int kInputValue = 1; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId, &input_resource_id_tensor)); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId, &input_resource_id_tensor)); const TfLiteTensor* input_value_tensor; TF_LITE_ENSURE_OK( context, GetInputSafe(context, node, kInputValue, &input_value_tensor)); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); resource::CreateResourceVariableIfNotAvailable(&resources, resource_id); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); variable->AssignFrom(input_value_tensor); return kTfLiteOk; } } // namespace assign_variable TfLiteRegistration* Register_ASSIGN_VARIABLE() { static TfLiteRegistration r = {nullptr, nullptr, assign_variable::Prepare, assign_variable::Eval}; return &r; } } // namespace custom } // namespace ops } // namespace tflite
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; }
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId, &input_resource_id_tensor)); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; }
{'added': [(43, ' const TfLiteTensor* input_resource_id_tensor;'), (44, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId,'), (45, ' &input_resource_id_tensor));'), (55, ' const TfLiteTensor* input_resource_id_tensor;'), (56, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputVariableId,'), (57, ' &input_resource_id_tensor));'), (58, ' const TfLiteTensor* input_value_tensor;'), (59, ' TF_LITE_ENSURE_OK('), (60, ' context, GetInputSafe(context, node, kInputValue, &input_value_tensor));')], 'deleted': [(43, ' const TfLiteTensor* input_resource_id_tensor ='), (44, ' GetInput(context, node, kInputVariableId);'), (54, ' const TfLiteTensor* input_resource_id_tensor ='), (55, ' GetInput(context, node, kInputVariableId);'), (56, ' const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue);')]}
9
5
47
291
9
76
1
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
794
jsparse.c
C++
jspeFactorDelete
/* * This file is part of Espruino, a JavaScript interpreter for Microcontrollers * * Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * ---------------------------------------------------------------------------- * Recursive descent parser for code execution * ---------------------------------------------------------------------------- */ #include "jsparse.h" #include "jsinteractive.h" #include "jswrapper.h" #include "jsnative.h" #include "jswrap_object.h" // for function_replacewith #include "jswrap_functions.h" // insane check for eval in jspeFunctionCall #include "jswrap_json.h" // for jsfPrintJSON #include "jswrap_espruino.h" // for jswrap_espruino_memoryArea #ifndef SAVE_ON_FLASH #include "jswrap_regexp.h" // for jswrap_regexp_constructor #endif /* Info about execution when Parsing - this saves passing it on the stack * for each call */ JsExecInfo execInfo; // ----------------------------------------------- Forward decls JsVar *jspeAssignmentExpression(); JsVar *jspeExpression(); JsVar *jspeUnaryExpression(); void jspeBlock(); void jspeBlockNoBrackets(); JsVar *jspeStatement(); JsVar *jspeFactor(); void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName); #ifndef SAVE_ON_FLASH JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a); #endif // ----------------------------------------------- Utils #define JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, CLEANUP_CODE, RETURN_VAL) { if (!jslMatch((TOKEN))) { CLEANUP_CODE; return RETURN_VAL; } } #define JSP_MATCH_WITH_RETURN(TOKEN, RETURN_VAL) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , RETURN_VAL) #define JSP_MATCH(TOKEN) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , 0) // Match where the user could have given us the wrong token #define JSP_ASSERT_MATCH(TOKEN) { assert(lex->tk==(TOKEN));jslGetNextToken(); } // Match where if we have the wrong token, it's an internal error #define JSP_SHOULD_EXECUTE (((execInfo.execute)&EXEC_RUN_MASK)==EXEC_YES) #define JSP_SAVE_EXECUTE() JsExecFlags oldExecute = execInfo.execute #define JSP_RESTORE_EXECUTE() execInfo.execute = (execInfo.execute&(JsExecFlags)(~EXEC_SAVE_RESTORE_MASK)) | (oldExecute&EXEC_SAVE_RESTORE_MASK); #define JSP_HAS_ERROR (((execInfo.execute)&EXEC_ERROR_MASK)!=0) #define JSP_SHOULDNT_PARSE (((execInfo.execute)&EXEC_NO_PARSE_MASK)!=0) ALWAYS_INLINE void jspDebuggerLoopIfCtrlC() { #ifdef USE_DEBUGGER if (execInfo.execute & EXEC_CTRL_C_WAIT && JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } /// if interrupting execution, this is set bool jspIsInterrupted() { return (execInfo.execute & EXEC_INTERRUPTED)!=0; } /// if interrupting execution, this is set void jspSetInterrupted(bool interrupt) { if (interrupt) execInfo.execute = execInfo.execute | EXEC_INTERRUPTED; else execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_INTERRUPTED; } /// Set the error flag - set lineReported if we've already output the line number void jspSetError(bool lineReported) { execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_YES) | EXEC_ERROR; if (lineReported) execInfo.execute |= EXEC_ERROR_LINE_REPORTED; } bool jspHasError() { return JSP_HAS_ERROR; } void jspeiClearScopes() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } bool jspeiAddScope(JsVar *scope) { if (!execInfo.scopesVar) execInfo.scopesVar = jsvNewEmptyArray(); if (!execInfo.scopesVar) return false; jsvArrayPush(execInfo.scopesVar, scope); return true; } void jspeiRemoveScope() { if (!execInfo.scopesVar || !jsvGetArrayLength(execInfo.scopesVar)) { jsExceptionHere(JSET_INTERNALERROR, "Too many scopes removed"); jspSetError(false); return; } jsvUnLock(jsvArrayPop(execInfo.scopesVar)); if (!jsvGetFirstChild(execInfo.scopesVar)) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } } JsVar *jspeiFindInScopes(const char *name) { if (execInfo.scopesVar) { JsVar *it = jsvLockSafe(jsvGetLastChild(execInfo.scopesVar)); while (it) { JsVar *scope = jsvSkipName(it); JsVarRef next = jsvGetPrevSibling(it); JsVar *ref = jsvFindChildFromString(scope, name, false); jsvUnLock2(it, scope); if (ref) return ref; it = jsvLockSafe(next); } } return jsvFindChildFromString(execInfo.root, name, false); } /// Return the topmost scope (and lock it) JsVar *jspeiGetTopScope() { if (execInfo.scopesVar) { JsVar *scope = jsvGetLastArrayItem(execInfo.scopesVar); if (scope) return scope; } return jsvLockAgain(execInfo.root); } JsVar *jspeiFindOnTop(const char *name, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromString(scope, name, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspeiFindNameOnTop(JsVar *childName, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromVar(scope, childName, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspFindPrototypeFor(const char *className) { JsVar *obj = jsvObjectGetChild(execInfo.root, className, 0); if (!obj) return 0; JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); jsvUnLock(obj); return proto; } /** Here we assume that we have already looked in the parent itself - * and are now going down looking at the stuff it inherited */ JsVar *jspeiFindChildFromStringInParents(JsVar *parent, const char *name) { if (jsvIsObject(parent)) { // If an object, look for an 'inherits' var JsVar *inheritsFrom = jsvObjectGetChild(parent, JSPARSE_INHERITS_VAR, 0); // if there's no inheritsFrom, just default to 'Object.prototype' if (!inheritsFrom) inheritsFrom = jspFindPrototypeFor("Object"); if (inheritsFrom && inheritsFrom!=parent) { // we have what it inherits from (this is ACTUALLY the prototype var) // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/Object/proto JsVar *child = jsvFindChildFromString(inheritsFrom, name, false); if (!child) child = jspeiFindChildFromStringInParents(inheritsFrom, name); jsvUnLock(inheritsFrom); if (child) return child; } else jsvUnLock(inheritsFrom); } else { // Not actually an object - but might be an array/string/etc const char *objectName = jswGetBasicObjectName(parent); while (objectName) { JsVar *objName = jsvFindChildFromString(execInfo.root, objectName, false); if (objName) { JsVar *result = 0; JsVar *obj = jsvSkipNameAndUnLock(objName); // could be something the user has made - eg. 'Array=1' if (jsvHasChildren(obj)) { // We have found an object with this name - search for the prototype var JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); if (proto) { result = jsvFindChildFromString(proto, name, false); jsvUnLock(proto); } } jsvUnLock(obj); if (result) return result; } /* We haven't found anything in the actual object, we should check the 'Object' itself eg, we tried 'String', so now we should try 'Object'. Built-in types don't have room for a prototype field, so we hard-code it */ objectName = jswGetBasicObjectPrototypeName(objectName); } } // no luck! return 0; } JsVar *jspeiGetScopesAsVar() { if (!execInfo.scopesVar) return 0; // no scopes! // If just one element, return it (no array) if (jsvGetArrayLength(execInfo.scopesVar)==1) { JsVar *v = jsvGetLastArrayItem(execInfo.scopesVar); // this is faster than getting by index return v; } // Copy this - because if we just returned it, the underlying array would get altered return jsvCopy(execInfo.scopesVar, true); } void jspeiLoadScopesFromVar(JsVar *arr) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; if (arr) { if (jsvIsArray(arr)) { // TODO: copy on write? would make function calls faster execInfo.scopesVar = jsvCopy(arr, true); } else { // just a single item,but we must package it in an array execInfo.scopesVar = jsvNewArray(&arr, 1); } } } // ----------------------------------------------- /// Check that we have enough stack to recurse. Return true if all ok, error if not. bool jspCheckStackPosition() { if (jsuGetFreeStack() < 512) { // giving us 512 bytes leeway jsExceptionHere(JSET_ERROR, "Too much recursion - the stack is about to overflow"); jspSetInterrupted(true); return false; } return true; } // Set execFlags such that we are not executing void jspSetNoExecute() { execInfo.execute = (execInfo.execute & (JsExecFlags)(int)~EXEC_RUN_MASK) | EXEC_NO; } void jspAppendStackTrace(JsVar *stackTrace) { JsvStringIterator it; jsvStringIteratorNew(&it, stackTrace, 0); jsvStringIteratorGotoEnd(&it); jslPrintPosition((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart); jslPrintTokenLineMarker((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart, 0); jsvStringIteratorFree(&it); } /// We had an exception (argument is the exception's value) void jspSetException(JsVar *value) { // Add the exception itself to a variable in root scope JsVar *exception = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, true); if (exception) { jsvSetValueOfName(exception, value); jsvUnLock(exception); } // Set the exception flag execInfo.execute = execInfo.execute | EXEC_EXCEPTION; // Try and do a stack trace if (lex) { JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, " at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); // stop us from printing the trace in the same block execInfo.execute = execInfo.execute | EXEC_ERROR_LINE_REPORTED; } } } /** Return the reported exception if there was one (and clear it) */ JsVar *jspGetException() { JsVar *exceptionName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, false); if (exceptionName) { JsVar *exception = jsvSkipName(exceptionName); jsvRemoveChild(execInfo.hiddenRoot, exceptionName); jsvUnLock(exceptionName); JsVar *stack = jspGetStackTrace(); if (stack && jsvHasChildren(exception)) { jsvObjectSetChild(exception, "stack", stack); } jsvUnLock(stack); return exception; } return 0; } /** Return a stack trace string if there was one (and clear it) */ JsVar *jspGetStackTrace() { JsVar *stackTraceName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, false); if (stackTraceName) { JsVar *stackTrace = jsvSkipName(stackTraceName); jsvRemoveChild(execInfo.hiddenRoot, stackTraceName); jsvUnLock(stackTraceName); return stackTrace; } return 0; } // ---------------------------------------------- // we return a value so that JSP_MATCH can return 0 if it fails (if we pass 0, we just parse all args) NO_INLINE bool jspeFunctionArguments(JsVar *funcVar) { JSP_MATCH('('); while (lex->tk!=')') { if (funcVar) { char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; strcpy(&buf[1], jslGetTokenValueAsString()); JsVar *param = jsvAddNamedChild(funcVar, 0, buf); if (!param) { // out of memory jspSetError(false); return false; } jsvMakeFunctionParameter(param); // force this to be called a function parameter jsvUnLock(param); } JSP_MATCH(LEX_ID); if (lex->tk!=')') JSP_MATCH(','); } JSP_MATCH(')'); return true; } // Parse function, assuming we're on '{'. funcVar can be 0. returns 'true' is the function included the 'this' keyword NO_INLINE bool jspeFunctionDefinitionInternal(JsVar *funcVar, bool expressionOnly) { bool forcePretokenise = false; if (expressionOnly) { if (funcVar) funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; } else { JSP_MATCH('{'); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_STR) { if (!strcmp(jslGetTokenValueAsString(), "compiled")) jsWarn("Function marked with \"compiled\" uploaded in source form"); if (lex->tk==LEX_STR && !strcmp(jslGetTokenValueAsString(), "ram")) { JSP_ASSERT_MATCH(LEX_STR); forcePretokenise = true; } } #endif /* If the function starts with return, treat it specially - * we don't want to store the 'return' part of it */ if (funcVar && lex->tk==LEX_R_RETURN) { funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; JSP_ASSERT_MATCH(LEX_R_RETURN); } } #ifndef ESPR_NO_LINE_NUMBERS // Get the line number (if needed) JsVarInt lineNumber = 0; if (funcVar && lex->lineNumberOffset && !(forcePretokenise||jsfGetFlag(JSF_PRETOKENISE))) { // jslGetLineNumber is slow, so we only do it if we have debug info lineNumber = (JsVarInt)jslGetLineNumber() + (JsVarInt)lex->lineNumberOffset - 1; } #endif // Get the code - parse it and figure out where it stops JslCharPos funcBegin; jslSkipWhiteSpace(); jslCharPosNew(&funcBegin, lex->sourceVar, lex->tokenStart); int lastTokenEnd = -1; lex->hadThisKeyword = lex->tk == LEX_R_THIS; if (!expressionOnly) { int brackets = 0; while (lex->tk && (brackets || lex->tk != '}')) { if (lex->tk == '{') brackets++; if (lex->tk == '}') brackets--; lastTokenEnd = (int)jsvStringIteratorGetIndex(&lex->it)-1; JSP_ASSERT_MATCH(lex->tk); } // FIXME: we might be including whitespace after the last token } else { JsExecFlags oldExec = execInfo.execute; execInfo.execute = EXEC_NO; jsvUnLock(jspeAssignmentExpression()); execInfo.execute = oldExec; lastTokenEnd = (int)lex->tokenStart; } bool hadThisKeyword = lex->hadThisKeyword; // Then create var and set (if there was any code!) if (funcVar && lastTokenEnd>0) { // code var JsVar *funcCodeVar; if (!forcePretokenise && jsvIsNativeString(lex->sourceVar)) { /* If we're parsing from a Native String (eg. E.memoryArea, E.setBootCode) then use another Native String to load function code straight from flash */ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewNativeString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #ifdef SPIFLASH_BASE } else if (!forcePretokenise && jsvIsFlashString(lex->sourceVar)) { /* If we're parsing from a Flash String (eg. loaded from Storage on Bangle.js) then use another Flash String to load function code straight from flash*/ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewFlashString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #endif } else { if (jsfGetFlag(JSF_PRETOKENISE) || forcePretokenise) { funcCodeVar = jslNewTokenisedStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } else { funcCodeVar = jslNewStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } } jsvUnLock2(jsvAddNamedChild(funcVar, funcCodeVar, JSPARSE_FUNCTION_CODE_NAME), funcCodeVar); // scope var JsVar *funcScopeVar = jspeiGetScopesAsVar(); if (funcScopeVar) { jsvUnLock2(jsvAddNamedChild(funcVar, funcScopeVar, JSPARSE_FUNCTION_SCOPE_NAME), funcScopeVar); } #ifndef ESPR_NO_LINE_NUMBERS // If we've got a line number, add a var for it if (lineNumber) { JsVar *funcLineNumber = jsvNewFromInteger(lineNumber); if (funcLineNumber) { jsvUnLock2(jsvAddNamedChild(funcVar, funcLineNumber, JSPARSE_FUNCTION_LINENUMBER_NAME), funcLineNumber); } } #endif } jslCharPosFree(&funcBegin); if (!expressionOnly) JSP_MATCH('}'); return hadThisKeyword; } // Parse function (after 'function' has occurred NO_INLINE JsVar *jspeFunctionDefinition(bool parseNamedFunction) { // actually parse a function... We assume that the LEX_FUNCTION and name // have already been parsed JsVar *funcVar = 0; bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) funcVar = jsvNewWithFlags(JSV_FUNCTION); JsVar *functionInternalName = 0; if (parseNamedFunction && lex->tk==LEX_ID) { // you can do `var a = function foo() { foo(); };` - so cope with this if (funcVar) functionInternalName = jslGetTokenValueAsVar(); // note that we don't add it to the beginning, because it would mess up our function call code JSP_ASSERT_MATCH(LEX_ID); } // Get arguments save them to the structure if (!jspeFunctionArguments(funcVar)) { jsvUnLock2(functionInternalName, funcVar); // parse failed return 0; } // Parse the actual function block jspeFunctionDefinitionInternal(funcVar, false); // if we had a function name, add it to the end (if we don't it gets confused with arguments) if (funcVar && functionInternalName) jsvObjectSetChildAndUnLock(funcVar, JSPARSE_FUNCTION_NAME_NAME, functionInternalName); return funcVar; } /* Parse just the brackets of a function - and throw * everything away */ NO_INLINE bool jspeParseFunctionCallBrackets() { assert(!JSP_SHOULD_EXECUTE); JSP_MATCH('('); while (!JSP_SHOULDNT_PARSE && lex->tk != ')') { jsvUnLock(jspeAssignmentExpression()); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ARROW_FUNCTION) { jsvUnLock(jspeArrowFunction(0, 0)); } #endif if (lex->tk!=')') JSP_MATCH(','); } if (!JSP_SHOULDNT_PARSE) JSP_MATCH(')'); return 0; } /** Handle a function call (assumes we've parsed the function name and we're * on the start bracket). 'thisArg' is the value of the 'this' variable when the * function is executed (it's usually the parent object) * * * NOTE: this does not set the execInfo flags - so if execInfo==EXEC_NO, it won't execute * * If !isParsing and arg0!=0, argument 0 is set to what is supplied (same with arg1) * * functionName is used only for error reporting - and can be 0 */ NO_INLINE JsVar *jspeFunctionCall(JsVar *function, JsVar *functionName, JsVar *thisArg, bool isParsing, int argCount, JsVar **argPtr) { if (JSP_SHOULD_EXECUTE && !function) { if (functionName) jsExceptionHere(JSET_ERROR, "Function %q not found!", functionName); else jsExceptionHere(JSET_ERROR, "Function not found!", functionName); return 0; } if (JSP_SHOULD_EXECUTE) if (!jspCheckStackPosition()) return 0; // try and ensure that we won't overflow our stack if (JSP_SHOULD_EXECUTE && function) { JsVar *returnVar = 0; if (!jsvIsFunction(function)) { jsExceptionHere(JSET_ERROR, "Expecting a function to call, got %t", function); return 0; } JsVar *thisVar = jsvLockAgainSafe(thisArg); if (isParsing) JSP_MATCH('('); /* Ok, so we have 4 options here. * * 1: we're native. * a) args have been pre-parsed, which is awesome * b) we have to parse our own args into an array * 2: we're not native * a) args were pre-parsed and we have to populate the function * b) we parse our own args, which is possibly better */ if (jsvIsNativeFunction(function)) { // ------------------------------------- NATIVE unsigned int argPtrSize = 0; int boundArgs = 0; // Add 'bound' parameters if there were any JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); while (jsvIsFunctionParameter(param)) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack if needed unsigned int newArgPtrSize = (argPtrSize?argPtrSize:(unsigned int)argCount)*4; size_t newArgPtrByteSize = sizeof(JsVar*)*newArgPtrSize; if (jsuGetFreeStack() < 256+newArgPtrByteSize) { jsExceptionHere(JSET_ERROR, "Insufficient stack for this many arguments"); jsvUnLock(thisVar); return 0; } JsVar **newArgPtr = (JsVar**)alloca(newArgPtrByteSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } // if we already had arguments - shift them up... int i; for (i=argCount-1;i>=boundArgs;i--) argPtr[i+1] = argPtr[i]; // add bound argument argPtr[boundArgs] = jsvSkipName(param); argCount++; boundArgs++; jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } // check if 'this' was defined while (param) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); break; } jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } jsvUnLock(param); jsvObjectIteratorFree(&it); // Now, if we're parsing add the rest of the arguments int allocatedArgCount = boundArgs; if (isParsing) { while (!JSP_HAS_ERROR && lex->tk!=')' && lex->tk!=LEX_EOF) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack unsigned int newArgPtrSize = argPtrSize?argPtrSize*4:16; JsVar **newArgPtr = (JsVar**)alloca(sizeof(JsVar*)*newArgPtrSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } argPtr[argCount++] = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',',jsvUnLockMany((unsigned)argCount, argPtr);jsvUnLock(thisVar);, 0); } JSP_MATCH(')'); allocatedArgCount = argCount; } void *nativePtr = jsvGetNativeFunctionPtr(function); JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else { if (nativePtr==jswrap_eval) { // eval gets to use the current scope /* Note: proper JS has some utterly insane code that depends on whether * eval is an lvalue or not: * * http://stackoverflow.com/questions/9107240/1-evalthis-vs-evalthis-in-javascript * * Doing this in Espruino is quite an upheaval for that one * slightly insane case - so it's not implemented. */ if (execInfo.thisVar) execInfo.thisVar = jsvRef(execInfo.thisVar); } else { execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root } } if (nativePtr && !JSP_HAS_ERROR) { returnVar = jsnCallFunction(nativePtr, function->varData.native.argTypes, thisVar, argPtr, argCount); assert(!jsvIsName(returnVar)); } else { returnVar = 0; } // unlock values if we locked them jsvUnLockMany((unsigned)allocatedArgCount, argPtr); /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; } else { // ----------------------------------------------------- NOT NATIVE // create a new symbol table entry for execution of this function // OPT: can we cache this function execution environment + param variables? // OPT: Probably when calling a function ONCE, use it, otherwise when recursing, make new? JsVar *functionRoot = jsvNewWithFlags(JSV_FUNCTION); if (!functionRoot) { // out of memory jspSetError(false); jsvUnLock(thisVar); return 0; } JsVar *functionScope = 0; JsVar *functionCode = 0; JsVar *functionInternalName = 0; #ifndef ESPR_NO_LINE_NUMBERS uint16_t functionLineNumber = 0; #endif /** NOTE: We expect that the function object will have: * * * Parameters * * Code/Scope/Name * * IN THAT ORDER. */ JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); JsVar *value = jsvObjectIteratorGetValue(&it); while (jsvIsFunctionParameter(param) && value) { jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), value); jsvUnLock2(value, param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); value = jsvObjectIteratorGetValue(&it); } jsvUnLock2(value, param); if (isParsing) { int hadParams = 0; // grab in all parameters. We go around this loop until we've run out // of named parameters AND we've parsed all the supplied arguments while (!JSP_SHOULDNT_PARSE && lex->tk!=')') { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); if (lex->tk!=')' || paramDefined) { hadParams++; JsVar *value = 0; // ONLY parse this if it was supplied, otherwise leave 0 (undefined) if (lex->tk!=')') value = jspeAssignmentExpression(); // and if execute, copy it over value = jsvSkipNameAndUnLock(value); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, value); jsvUnLock(value); if (lex->tk!=')') JSP_MATCH(','); } jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } JSP_MATCH(')'); } else { // and NOT isParsing int args = 0; while (args<argCount) { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, argPtr[args]); args++; jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } } // Now go through what's left while (jsvObjectIteratorHasValue(&it)) { JsVar *param = jsvObjectIteratorGetKey(&it); if (jsvIsString(param)) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_SCOPE_NAME)) functionScope = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_CODE_NAME)) functionCode = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_NAME_NAME)) functionInternalName = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); } #ifndef ESPR_NO_LINE_NUMBERS else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_LINENUMBER_NAME)) functionLineNumber = (uint16_t)jsvGetIntegerAndUnLock(jsvSkipName(param)); #endif else if (jsvIsFunctionParameter(param)) { JsVar *defaultVal = jsvSkipName(param); jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), defaultVal); jsvUnLock(defaultVal); } } jsvUnLock(param); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); // setup a the function's name (if a named function) if (functionInternalName) { JsVar *name = jsvMakeIntoVariableName(jsvNewFromStringVar(functionInternalName,0,JSVAPPENDSTRINGVAR_MAXLENGTH), function); jsvAddName(functionRoot, name); jsvUnLock2(name, functionInternalName); } if (!JSP_HAS_ERROR) { // save old scopes and reset scope list JsVar *oldScopeVar = execInfo.scopesVar; execInfo.scopesVar = 0; // if we have a scope var, load it up. We may not have one if there were no scopes apart from root if (functionScope) { jspeiLoadScopesFromVar(functionScope); jsvUnLock(functionScope); } // add the function's execute space to the symbol table so we can recurse if (jspeiAddScope(functionRoot)) { /* Adding scope may have failed - we may have descended too deep - so be sure * not to pull somebody else's scope off */ JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root /* we just want to execute the block, but something could * have messed up and left us with the wrong Lexer, so * we want to be careful here... */ if (functionCode) { #ifdef USE_DEBUGGER bool hadDebuggerNextLineOnly = false; if (execInfo.execute&EXEC_DEBUGGER_STEP_INTO) { if (functionName) jsiConsolePrintf("Stepping into %v\n", functionName); else jsiConsolePrintf("Stepping into function\n", functionName); } else { hadDebuggerNextLineOnly = execInfo.execute&EXEC_DEBUGGER_NEXT_LINE; if (hadDebuggerNextLineOnly) execInfo.execute &= (JsExecFlags)~EXEC_DEBUGGER_NEXT_LINE; } #endif JsLex newLex; JsLex *oldLex = jslSetLex(&newLex); jslInit(functionCode); #ifndef ESPR_NO_LINE_NUMBERS newLex.lineNumberOffset = functionLineNumber; #endif JSP_SAVE_EXECUTE(); // force execute without any previous state #ifdef USE_DEBUGGER execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK|EXEC_DEBUGGER_NEXT_LINE)); #else execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK)); #endif if (jsvIsFunctionReturn(function)) { #ifdef USE_DEBUGGER // we didn't parse a statement so wouldn't trigger the debugger otherwise if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif // implicit return - we just need an expression (optional) if (lex->tk != ';' && lex->tk != '}') returnVar = jsvSkipNameAndUnLock(jspeExpression()); } else { // setup a return variable JsVar *returnVarName = jsvAddNamedChild(functionRoot, 0, JSPARSE_RETURN_VAR); // parse the whole block jspeBlockNoBrackets(); /* get the real return var before we remove it from our function. * We can unlock below because returnVarName is still part of * functionRoot, so won't get freed. */ returnVar = jsvSkipNameAndUnLock(returnVarName); if (returnVarName) // could have failed with out of memory jsvSetValueOfName(returnVarName, 0); // remove return value (which helps stops circular references) } // Store a stack trace if we had an error JsExecFlags hasError = execInfo.execute&EXEC_ERROR_MASK; JSP_RESTORE_EXECUTE(); // because return will probably have set execute to false #ifdef USE_DEBUGGER bool calledDebugger = false; if (execInfo.execute & EXEC_DEBUGGER_MASK) { jsiConsolePrint("Value returned is ="); jsfPrintJSON(returnVar, JSON_LIMIT | JSON_SOME_NEWLINES | JSON_PRETTY | JSON_SHOW_DEVICES); jsiConsolePrintChar('\n'); if (execInfo.execute & EXEC_DEBUGGER_FINISH_FUNCTION) { calledDebugger = true; jsiDebuggerLoop(); } } if (hadDebuggerNextLineOnly && !calledDebugger) execInfo.execute |= EXEC_DEBUGGER_NEXT_LINE; #endif jslKill(); jslSetLex(oldLex); if (hasError) { execInfo.execute |= hasError; // propogate error JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, jsvIsString(functionName)?"in function %q called from ": "in function called from ", functionName); if (lex) { jspAppendStackTrace(stackTrace); } else jsvAppendPrintf(stackTrace, "system\n"); jsvUnLock(stackTrace); } } } /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; jspeiRemoveScope(); } // Unlock scopes and restore old ones jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = oldScopeVar; } jsvUnLock(functionCode); jsvUnLock(functionRoot); } jsvUnLock(thisVar); return returnVar; } else if (isParsing) { // ---------------------------------- function, but not executing - just parse args and be done jspeParseFunctionCallBrackets(); /* Do not return function, as it will be unlocked! */ return 0; } else return 0; } // Find a variable (or built-in function) based on the current scopes JsVar *jspGetNamedVariable(const char *tokenName) { JsVar *a = JSP_SHOULD_EXECUTE ? jspeiFindInScopes(tokenName) : 0; if (JSP_SHOULD_EXECUTE && !a) { /* Special case! We haven't found the variable, so check out * and see if it's one of our builtins... */ if (jswIsBuiltInObject(tokenName)) { // Check if we have a built-in function for it // OPT: Could we instead have jswIsBuiltInObjectWithoutConstructor? JsVar *obj = jswFindBuiltInFunction(0, tokenName); // If not, make one if (!obj) obj = jspNewBuiltin(tokenName); if (obj) { // not out of memory a = jsvAddNamedChild(execInfo.root, obj, tokenName); jsvUnLock(obj); } } else { a = jswFindBuiltInFunction(0, tokenName); if (!a) { /* Variable doesn't exist! JavaScript says we should create it * (we won't add it here. This is done in the assignment operator)*/ a = jsvMakeIntoVariableName(jsvNewFromString(tokenName), 0); } } } return a; } /// Used by jspGetNamedField / jspGetVarNamedField static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { child = jspNewPrototype(objName); } } } return child; } /** Get the named function/variable on the object - whether it's built in, or predefined. * If !returnName, returns the function/variable itself or undefined, but * if returnName, return a name (could be fake) referencing the parent. * * NOTE: ArrayBuffer/Strings are not handled here. We assume that if we're * passing a char* rather than a JsVar it's because we're looking up via * a symbol rather than a variable. To handle these use jspGetVarNamedField */ JsVar *jspGetNamedField(JsVar *object, const char* name, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromString(object, name, false); if (!child) { child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// see jspGetNamedField - note that nameVar should have had jsvAsArrayIndex called on it first JsVar *jspGetVarNamedField(JsVar *object, JsVar *nameVar, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromVar(object, nameVar, false); if (!child) { if (jsvIsArrayBuffer(object) && jsvIsInt(nameVar)) { // for array buffers, we actually create a NAME, and hand that back - then when we assign (or use SkipName) we pull out the correct data child = jsvMakeIntoVariableName(jsvNewFromInteger(jsvGetInteger(nameVar)), object); if (child) // turn into an 'array buffer name' child->flags = (child->flags & ~JSV_VARTYPEMASK) | JSV_ARRAYBUFFERNAME; } else if (jsvIsString(object) && jsvIsInt(nameVar)) { JsVarInt idx = jsvGetInteger(nameVar); if (idx>=0 && idx<(JsVarInt)jsvGetStringLength(object)) { char ch = jsvGetCharInString(object, (size_t)idx); child = jsvNewStringOfLength(1, &ch); } else if (returnName) child = jsvCreateNewChild(object, nameVar, 0); // just return *something* to show this is handled } else { // get the name as a string char name[JSLEX_MAX_TOKEN_LENGTH]; jsvGetString(nameVar, name, JSLEX_MAX_TOKEN_LENGTH); // try and find it in parents child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && jsvIsStringEqual(nameVar, JSPARSE_PROTOTYPE_VAR)) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// Call the named function on the object - whether it's built in, or predefined. Returns the return value of the function. JsVar *jspCallNamedFunction(JsVar *object, char* name, int argCount, JsVar **argPtr) { JsVar *child = jspGetNamedField(object, name, false); JsVar *r = 0; if (jsvIsFunction(child)) r = jspeFunctionCall(child, 0, object, false, argCount, argPtr); jsvUnLock(child); return r; } NO_INLINE JsVar *jspeFactorMember(JsVar *a, JsVar **parentResult) { /* The parent if we're executing a method call */ JsVar *parent = 0; while (lex->tk=='.' || lex->tk=='[') { if (lex->tk == '.') { // ------------------------------------- Record Access JSP_ASSERT_MATCH('.'); if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) { // Note: name will go away when we parse something else! const char *name = jslGetTokenValueAsString(); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetNamedField(aVar, name, true); if (!child) { if (!jsvIsUndefined(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written JsVar *nameVar = jslGetTokenValueAsVar(); child = jsvCreateNewChild(aVar, nameVar, 0); jsvUnLock(nameVar); } else { // could have been a string... jsExceptionHere(JSET_ERROR, "Cannot read property '%s' of undefined", name); } } jsvUnLock(parent); parent = aVar; jsvUnLock(a); a = child; } // skip over current token (we checked above that it was an ID or reserved word) jslGetNextToken(); } else { // incorrect token - force a match fail by asking for an ID JSP_MATCH_WITH_RETURN(LEX_ID, a); } } else if (lex->tk == '[') { // ------------------------------------- Array Access JsVar *index; JSP_ASSERT_MATCH('['); if (!jspCheckStackPosition()) return parent; index = jsvSkipNameAndUnLock(jspeAssignmentExpression()); JSP_MATCH_WITH_CLEANUP_AND_RETURN(']', jsvUnLock2(parent, index);, a); if (JSP_SHOULD_EXECUTE) { index = jsvAsArrayIndexAndUnLock(index); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetVarNamedField(aVar, index, true); if (!child) { if (jsvHasChildren(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written child = jsvCreateNewChild(aVar, index, 0); } else { jsExceptionHere(JSET_ERROR, "Field or method %q does not already exist, and can't create it on %t", index, aVar); } } jsvUnLock(parent); parent = jsvLockAgainSafe(aVar); jsvUnLock(a); a = child; jsvUnLock(aVar); } jsvUnLock(index); } else { assert(0); } } if (parentResult) *parentResult = parent; else jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeConstruct(JsVar *func, JsVar *funcName, bool hasArgs) { assert(JSP_SHOULD_EXECUTE); if (!jsvIsFunction(func)) { jsExceptionHere(JSET_ERROR, "Constructor should be a function, but is %t", func); return 0; } JsVar *thisObj = jsvNewObject(); if (!thisObj) return 0; // out of memory // Make sure the function has a 'prototype' var JsVar *prototypeName = jsvFindChildFromString(func, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(func, prototypeName); // make sure it's an object JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(thisObj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName); JsVar *a = jspeFunctionCall(func, funcName, thisObj, hasArgs, 0, 0); /* FIXME: we should ignore return values that aren't objects (bug #848), but then we need * to be aware of `new String()` and `new Uint8Array()`. Ideally we'd let through * arrays/etc, and then String/etc should return 'boxed' values. * * But they don't return boxed values at the moment, so let's just * pass the return value through. If you try and return a string from * a function it's broken JS code anyway. */ if (a) { jsvUnLock(thisObj); thisObj = a; } else { jsvUnLock(a); } return thisObj; } NO_INLINE JsVar *jspeFactorFunctionCall() { /* The parent if we're executing a method call */ bool isConstructor = false; if (lex->tk==LEX_R_NEW) { JSP_ASSERT_MATCH(LEX_R_NEW); isConstructor = true; if (lex->tk==LEX_R_NEW) { jsExceptionHere(JSET_ERROR, "Nesting 'new' operators is unsupported"); jspSetError(false); return 0; } } JsVar *parent = 0; #ifndef SAVE_ON_FLASH bool wasSuper = lex->tk==LEX_R_SUPER; #endif JsVar *a = jspeFactorMember(jspeFactor(), &parent); #ifndef SAVE_ON_FLASH if (wasSuper) { /* if this was 'super.something' then we need * to overwrite the parent, because it'll be * set to the prototype otherwise. */ jsvUnLock(parent); parent = jsvLockAgainSafe(execInfo.thisVar); } #endif while ((lex->tk=='(' || (isConstructor && JSP_SHOULD_EXECUTE)) && !jspIsInterrupted()) { JsVar *funcName = a; JsVar *func = jsvSkipName(funcName); /* The constructor function doesn't change parsing, so if we're * not executing, just short-cut it. */ if (isConstructor && JSP_SHOULD_EXECUTE) { // If we have '(' parse an argument list, otherwise don't look for any args bool parseArgs = lex->tk=='('; a = jspeConstruct(func, funcName, parseArgs); isConstructor = false; // don't treat subsequent brackets as constructors } else a = jspeFunctionCall(func, funcName, parent, true, 0, 0); jsvUnLock3(funcName, func, parent); parent=0; a = jspeFactorMember(a, &parent); } #ifndef SAVE_ON_FLASH /* If we've got something that we care about the parent of (eg. a getter/setter) * then we repackage it into a 'NewChild' name that references the parent before * we leave. Note: You can't do this on everything because normally NewChild * forces a new child to be blindly created. It works on Getters/Setters because * we *always* run those rather than adding them. */ if (parent && jsvIsBasicName(a) && !jsvIsNewChild(a)) { JsVar *value = jsvLockSafe(jsvGetFirstChild(a)); if (jsvIsGetterOrSetter(value)) { // no need to do this for functions since we've just executed whatever we needed to JsVar *nameVar = jsvCopyNameOnly(a,false,true); JsVar *newChild = jsvCreateNewChild(parent, nameVar, value); jsvUnLock2(nameVar, a); a = newChild; } jsvUnLock(value); } #endif jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeFactorObject() { if (JSP_SHOULD_EXECUTE) { JsVar *contents = jsvNewObject(); if (!contents) { // out of memory jspSetError(false); return 0; } /* JSON-style object definition */ JSP_MATCH_WITH_RETURN('{', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != '}') { JsVar *varName = 0; // we only allow strings or IDs on the left hand side of an initialisation if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) varName = jslGetTokenValueAsVar(); jslGetNextToken(); // skip over current token } else if ( lex->tk==LEX_STR || lex->tk==LEX_FLOAT || lex->tk==LEX_INT || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED) { varName = jspeFactor(); } else { JSP_MATCH_WITH_RETURN(LEX_ID, contents); } #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ID && jsvIsString(varName)) { bool isGetter = jsvIsStringEqual(varName, "get"); bool isSetter = jsvIsStringEqual(varName, "set"); if (isGetter || isSetter) { jsvUnLock(varName); varName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); JsVar *method = jspeFunctionDefinition(false); jsvAddGetterOrSetter(contents, varName, isGetter, method); jsvUnLock(method); } } else #endif { JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock(varName), contents); if (JSP_SHOULD_EXECUTE) { varName = jsvAsArrayIndexAndUnLock(varName); JsVar *contentsName = jsvFindChildFromVar(contents, varName, true); if (contentsName) { JsVar *value = jsvSkipNameAndUnLock(jspeAssignmentExpression()); // value can be 0 (could be undefined!) jsvUnLock2(jsvSetValueOfName(contentsName, value), value); } } } jsvUnLock(varName); // no need to clean here, as it will definitely be used if (lex->tk != '}') JSP_MATCH_WITH_RETURN(',', contents); } JSP_MATCH_WITH_RETURN('}', contents); return contents; } else { // Not executing so do fast skip jspeBlock(); return 0; } } NO_INLINE JsVar *jspeFactorArray() { int idx = 0; JsVar *contents = 0; if (JSP_SHOULD_EXECUTE) { contents = jsvNewEmptyArray(); if (!contents) { // out of memory jspSetError(false); return 0; } } /* JSON-style array */ JSP_MATCH_WITH_RETURN('[', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != ']') { if (JSP_SHOULD_EXECUTE) { JsVar *aVar = 0; JsVar *indexName = 0; if (lex->tk != ',') { // #287 - [,] and [1,2,,4] are allowed aVar = jsvSkipNameAndUnLock(jspeAssignmentExpression()); indexName = jsvMakeIntoVariableName(jsvNewFromInteger(idx), aVar); } if (indexName) { // could be out of memory jsvAddName(contents, indexName); jsvUnLock(indexName); } jsvUnLock(aVar); } else { jsvUnLock(jspeAssignmentExpression()); } // no need to clean here, as it will definitely be used if (lex->tk != ']') JSP_MATCH_WITH_RETURN(',', contents); idx++; } if (contents) jsvSetArrayLength(contents, idx, false); JSP_MATCH_WITH_RETURN(']', contents); return contents; } NO_INLINE void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName) { if (!prototypeName) return; JsVar *prototypeVar = jsvSkipName(prototypeName); if (!(jsvIsObject(prototypeVar) || jsvIsFunction(prototypeVar))) { if (!jsvIsUndefined(prototypeVar)) jsExceptionHere(JSET_TYPEERROR, "Prototype should be an object, got %t", prototypeVar); jsvUnLock(prototypeVar); prototypeVar = jsvNewObject(); // prototype is supposed to be an object JsVar *lastName = jsvSkipToLastName(prototypeName); jsvSetValueOfName(lastName, prototypeVar); jsvUnLock(lastName); } JsVar *constructor = jsvFindChildFromString(prototypeVar, JSPARSE_CONSTRUCTOR_VAR, true); if (constructor) jsvSetValueOfName(constructor, instanceOf); jsvUnLock2(constructor, prototypeVar); } NO_INLINE JsVar *jspeFactorTypeOf() { JSP_ASSERT_MATCH(LEX_R_TYPEOF); JsVar *a = jspeUnaryExpression(); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { if (!jsvIsVariableDefined(a)) { // so we don't get a ReferenceError when accessing an undefined var result=jsvNewFromString("undefined"); } else { a = jsvSkipNameAndUnLock(a); result=jsvNewFromString(jsvGetTypeOf(a)); } } jsvUnLock(a); return result; } NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); if (jsvHasChildren(parent)) { // else remove properly. if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; } #ifndef SAVE_ON_FLASH JsVar *jspeTemplateLiteral() { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { JsVar *template = jslGetTokenValueAsVar(); a = jsvNewFromEmptyString(); if (a && template) { JsvStringIterator it, dit; jsvStringIteratorNew(&it, template, 0); jsvStringIteratorNew(&dit, a, 0); while (jsvStringIteratorHasChar(&it)) { char ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='$') { ch = jsvStringIteratorGetChar(&it); if (ch=='{') { // Now parse out the expression jsvStringIteratorNext(&it); int brackets = 1; JsVar *expr = jsvNewFromEmptyString(); if (!expr) break; JsvStringIterator eit; jsvStringIteratorNew(&eit, expr, 0); while (jsvStringIteratorHasChar(&it)) { ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='{') brackets++; if (ch=='}') { brackets--; if (!brackets) break; } jsvStringIteratorAppend(&eit, ch); } jsvStringIteratorFree(&eit); JsVar *result = jspEvaluateExpressionVar(expr); jsvUnLock(expr); result = jsvAsStringAndUnLock(result); jsvStringIteratorAppendString(&dit, result, 0, JSVAPPENDSTRINGVAR_MAXLENGTH); jsvUnLock(result); } else { jsvStringIteratorAppend(&dit, '$'); } } else { jsvStringIteratorAppend(&dit, ch); } } jsvStringIteratorFree(&it); jsvStringIteratorFree(&dit); } jsvUnLock(template); } JSP_ASSERT_MATCH(LEX_TEMPLATE_LITERAL); return a; } #endif NO_INLINE JsVar *jspeAddNamedFunctionParameter(JsVar *funcVar, JsVar *name) { if (!funcVar) funcVar = jsvNewWithFlags(JSV_FUNCTION); char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; size_t l = jsvGetString(name, &buf[1], JSLEX_MAX_TOKEN_LENGTH); buf[l+1] = 0; // zero terminate since jsvGetString doesn't add one JsVar *param = jsvAddNamedChild(funcVar, 0, buf); jsvMakeFunctionParameter(param); jsvUnLock(param); return funcVar; } #ifndef SAVE_ON_FLASH // parse an arrow function NO_INLINE JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a) { assert(!a || jsvIsName(a)); JSP_ASSERT_MATCH(LEX_ARROW_FUNCTION); funcVar = jspeAddNamedFunctionParameter(funcVar, a); bool expressionOnly = lex->tk!='{'; bool fnIncludesThis = jspeFunctionDefinitionInternal(funcVar, expressionOnly); /* Arrow functions store the value of 'this' when they were defined. In order to differentiate between normal functions we usually have to store 'this' even if 'this' was just the global object. Very few arrow functions actually use 'this' though - usually they are just used as a shorthand, and so we end up wasting a whole extra var for every single arrow function. So... while parsing the function's body we check of the 'this' keyword is used. If it isn't, we just don't include it. */ if (fnIncludesThis) jsvObjectSetChild(funcVar, JSPARSE_FUNCTION_THIS_NAME, execInfo.thisVar); return funcVar; } // parse expressions with commas, maybe followed by an arrow function (bracket already matched) NO_INLINE JsVar *jspeExpressionOrArrowFunction() { JsVar *a = 0; JsVar *funcVar = 0; bool allNames = true; while (lex->tk!=')' && !JSP_SHOULDNT_PARSE) { if (allNames && a) { // we never get here if this isn't a name and a string funcVar = jspeAddNamedFunctionParameter(funcVar, a); } jsvUnLock(a); a = jspeAssignmentExpression(); /* if we're not executing, `a` will always be undefined so don't do the check for allNames - just assume all is good. We'll properly check when we execute. */ if (JSP_SHOULD_EXECUTE && !(jsvIsName(a) && jsvIsString(a))) allNames = false; if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',', jsvUnLock2(a,funcVar), 0); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(a,funcVar), 0); // if all names inside brackets and an arrow is found, create a function if (allNames && lex->tk==LEX_ARROW_FUNCTION) { funcVar = jspeArrowFunction(funcVar, a); jsvUnLock(a); return funcVar; } else { jsvUnLock(funcVar); return a; } } /// Parse an ES6 class, expects LEX_R_CLASS already parsed NO_INLINE JsVar *jspeClassDefinition(bool parseNamedClass) { JsVar *classFunction = 0; JsVar *classPrototype = 0; JsVar *classInternalName = 0; bool actuallyCreateClass = JSP_SHOULD_EXECUTE; if (actuallyCreateClass) { classFunction = jsvNewWithFlags(JSV_FUNCTION); JsVar *scopeVar = jspeiGetScopesAsVar(); if (scopeVar) jsvUnLock2(jsvAddNamedChild(classFunction, scopeVar, JSPARSE_FUNCTION_SCOPE_NAME), scopeVar); } if (parseNamedClass && lex->tk==LEX_ID) { if (classFunction) classInternalName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } if (classFunction) { JsVar *prototypeName = jsvFindChildFromString(classFunction, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(classFunction, prototypeName); // make sure it's an object classPrototype = jsvSkipName(prototypeName); jsvUnLock(prototypeName); } if (lex->tk==LEX_R_EXTENDS) { JSP_ASSERT_MATCH(LEX_R_EXTENDS); JsVar *extendsFrom = actuallyCreateClass ? jsvSkipNameAndUnLock(jspGetNamedVariable(jslGetTokenValueAsString())) : 0; JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(extendsFrom,classFunction,classInternalName,classPrototype),0); if (classPrototype) { if (jsvIsFunction(extendsFrom)) { JsVar *extendsFromProto = jsvObjectGetChild(extendsFrom, JSPARSE_PROTOTYPE_VAR, 0); if (extendsFromProto) { jsvObjectSetChild(classPrototype, JSPARSE_INHERITS_VAR, extendsFromProto); // link in default constructor if ours isn't supplied jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_CODE_NAME, jsvNewFromString("if(this.__proto__.__proto__.constructor)this.__proto__.__proto__.constructor.apply(this,arguments)")); jsvUnLock(extendsFromProto); } } else jsExceptionHere(JSET_SYNTAXERROR, "'extends' argument should be a function, got %t", extendsFrom); } jsvUnLock(extendsFrom); } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{',jsvUnLock3(classFunction,classInternalName,classPrototype),0); while ((lex->tk==LEX_ID || lex->tk==LEX_R_STATIC) && !jspIsInterrupted()) { bool isStatic = lex->tk==LEX_R_STATIC; if (isStatic) JSP_ASSERT_MATCH(LEX_R_STATIC); JsVar *funcName = jslGetTokenValueAsVar(); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(funcName,classFunction,classInternalName,classPrototype),0); #ifndef SAVE_ON_FLASH bool isGetter = false, isSetter = false; if (lex->tk==LEX_ID) { isGetter = jsvIsStringEqual(funcName, "get"); isSetter = jsvIsStringEqual(funcName, "set"); if (isGetter || isSetter) { jsvUnLock(funcName); funcName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } } #endif JsVar *method = jspeFunctionDefinition(false); if (classFunction && classPrototype) { JsVar *obj = isStatic ? classFunction : classPrototype; if (jsvIsStringEqual(funcName, "constructor")) { jswrap_function_replaceWith(classFunction, method); #ifndef SAVE_ON_FLASH } else if (isGetter || isSetter) { jsvAddGetterOrSetter(obj, funcName, isGetter, method); #endif } else { funcName = jsvMakeIntoVariableName(funcName, 0); jsvSetValueOfName(funcName, method); jsvAddName(obj, funcName); } } jsvUnLock2(method,funcName); } jsvUnLock(classPrototype); // If we had a name, add it to the end (or it gets confused with the constructor arguments) if (classInternalName) jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_NAME_NAME, classInternalName); JSP_MATCH_WITH_CLEANUP_AND_RETURN('}',jsvUnLock(classFunction),0); return classFunction; } #endif NO_INLINE JsVar *jspeFactor() { if (lex->tk==LEX_ID) { JsVar *a = jspGetNamedVariable(jslGetTokenValueAsString()); JSP_ASSERT_MATCH(LEX_ID); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_TEMPLATE_LITERAL) jsExceptionHere(JSET_SYNTAXERROR, "Tagged template literals not supported"); else if (lex->tk==LEX_ARROW_FUNCTION && (jsvIsName(a) || (a==0 && !JSP_SHOULD_EXECUTE))) { // 'a' needs to be a name, *or* we're not executing so 0 gets returned anyway JsVar *funcVar = jspeArrowFunction(0,a); jsvUnLock(a); a=funcVar; } #endif return a; } else if (lex->tk==LEX_INT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromLongInteger(stringToInt(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_INT); return v; } else if (lex->tk==LEX_FLOAT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromFloat(stringToFloat(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_FLOAT); return v; } else if (lex->tk=='(') { JSP_ASSERT_MATCH('('); if (!jspCheckStackPosition()) return 0; #ifdef SAVE_ON_FLASH // Just parse a normal expression (which can include commas) JsVar *a = jspeExpression(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN(')',a); return a; #else return jspeExpressionOrArrowFunction(); #endif } else if (lex->tk==LEX_R_TRUE) { JSP_ASSERT_MATCH(LEX_R_TRUE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(true) : 0; } else if (lex->tk==LEX_R_FALSE) { JSP_ASSERT_MATCH(LEX_R_FALSE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(false) : 0; } else if (lex->tk==LEX_R_NULL) { JSP_ASSERT_MATCH(LEX_R_NULL); return JSP_SHOULD_EXECUTE ? jsvNewWithFlags(JSV_NULL) : 0; } else if (lex->tk==LEX_R_UNDEFINED) { JSP_ASSERT_MATCH(LEX_R_UNDEFINED); return 0; } else if (lex->tk==LEX_STR) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) a = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_STR); return a; #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_TEMPLATE_LITERAL) { return jspeTemplateLiteral(); #endif } else if (lex->tk==LEX_REGEX) { JsVar *a = 0; #ifdef SAVE_ON_FLASH jsExceptionHere(JSET_SYNTAXERROR, "RegEx are not supported in this version of Espruino\n"); #else JsVar *regex = jslGetTokenValueAsVar(); size_t regexEnd = 0, regexLen = 0; JsvStringIterator it; jsvStringIteratorNew(&it, regex, 0); while (jsvStringIteratorHasChar(&it)) { regexLen++; if (jsvStringIteratorGetCharAndNext(&it)=='/') regexEnd = regexLen; } jsvStringIteratorFree(&it); JsVar *flags = 0; if (regexEnd < regexLen) flags = jsvNewFromStringVar(regex, regexEnd, JSVAPPENDSTRINGVAR_MAXLENGTH); JsVar *regexSource = jsvNewFromStringVar(regex, 1, regexEnd-2); a = jswrap_regexp_constructor(regexSource, flags); jsvUnLock3(regex, flags, regexSource); #endif JSP_ASSERT_MATCH(LEX_REGEX); return a; } else if (lex->tk=='{') { if (!jspCheckStackPosition()) return 0; return jspeFactorObject(); } else if (lex->tk=='[') { if (!jspCheckStackPosition()) return 0; return jspeFactorArray(); } else if (lex->tk==LEX_R_FUNCTION) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_FUNCTION); return jspeFunctionDefinition(true); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_CLASS); return jspeClassDefinition(true); } else if (lex->tk==LEX_R_SUPER) { JSP_ASSERT_MATCH(LEX_R_SUPER); /* This is kind of nasty, since super appears to do three different things. * In the constructor it references the extended class's constructor * in a method it references the constructor's prototype. * in a static method it references the extended class's constructor (but this is different) */ if (jsvIsObject(execInfo.thisVar)) { // 'this' is an object - must be calling a normal method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_INHERITS_VAR, 0); // if we're in a method, get __proto__ first JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; // still in method, get __proto__.__proto__ jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } // If we're doing super() we want the constructor if (lex->tk=='(') { JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } // But if we're doing something else - eg 'super.' or 'super[' then it needs to reference the prototype return proto2; } else if (jsvIsFunction(execInfo.thisVar)) { // 'this' is a function - must be calling a static method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_PROTOTYPE_VAR, 0); JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; #endif } else if (lex->tk==LEX_R_THIS) { JSP_ASSERT_MATCH(LEX_R_THIS); return jsvLockAgain( execInfo.thisVar ? execInfo.thisVar : execInfo.root ); } else if (lex->tk==LEX_R_DELETE) { if (!jspCheckStackPosition()) return 0; return jspeFactorDelete(); } else if (lex->tk==LEX_R_TYPEOF) { if (!jspCheckStackPosition()) return 0; return jspeFactorTypeOf(); } else if (lex->tk==LEX_R_VOID) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_VOID); jsvUnLock(jspeUnaryExpression()); return 0; } JSP_MATCH(LEX_EOF); jsExceptionHere(JSET_SYNTAXERROR, "Unexpected end of Input\n"); return 0; } NO_INLINE JsVar *__jspePostfixExpression(JsVar *a) { while (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *oldValue = jsvAsNumberAndUnLock(jsvSkipName(a)); // keep the old value (but convert to number) JsVar *res = jsvMathsOpSkipNames(oldValue, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); // but then use the old value jsvUnLock(a); a = oldValue; } } return a; } NO_INLINE JsVar *jspePostfixExpression() { JsVar *a; // TODO: should be in jspeUnaryExpression if (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); a = jspePostfixExpression(); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *res = jsvMathsOpSkipNames(a, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); } } else a = jspeFactorFunctionCall(); return __jspePostfixExpression(a); } NO_INLINE JsVar *jspeUnaryExpression() { if (lex->tk=='!' || lex->tk=='~' || lex->tk=='-' || lex->tk=='+') { short tk = lex->tk; JSP_ASSERT_MATCH(tk); if (!JSP_SHOULD_EXECUTE) { return jspeUnaryExpression(); } if (tk=='!') { // logical not return jsvNewFromBool(!jsvGetBoolAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='~') { // bitwise not return jsvNewFromInteger(~jsvGetIntegerAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='-') { // unary minus return jsvNegateAndUnLock(jspeUnaryExpression()); // names already skipped } else if (tk=='+') { // unary plus (convert to number) JsVar *v = jsvSkipNameAndUnLock(jspeUnaryExpression()); JsVar *r = jsvAsNumber(v); // names already skipped jsvUnLock(v); return r; } assert(0); return 0; } else return jspePostfixExpression(); } // Get the precedence of a BinaryExpression - or return 0 if not one unsigned int jspeGetBinaryExpressionPrecedence(int op) { switch (op) { case LEX_OROR: return 1; break; case LEX_ANDAND: return 2; break; case '|' : return 3; break; case '^' : return 4; break; case '&' : return 5; break; case LEX_EQUAL: case LEX_NEQUAL: case LEX_TYPEEQUAL: case LEX_NTYPEEQUAL: return 6; case LEX_LEQUAL: case LEX_GEQUAL: case '<': case '>': case LEX_R_INSTANCEOF: return 7; case LEX_R_IN: return (execInfo.execute&EXEC_FOR_INIT)?0:7; case LEX_LSHIFT: case LEX_RSHIFT: case LEX_RSHIFTUNSIGNED: return 8; case '+': case '-': return 9; case '*': case '/': case '%': return 10; default: return 0; } } NO_INLINE JsVar *__jspeBinaryExpression(JsVar *a, unsigned int lastPrecedence) { /* This one's a bit strange. Basically all the ops have their own precedence, it's not * like & and | share the same precedence. We don't want to recurse for each one, * so instead we do this. * * We deal with an expression in recursion ONLY if it's of higher precedence * than the current one, otherwise we stick in the while loop. */ unsigned int precedence = jspeGetBinaryExpressionPrecedence(lex->tk); while (precedence && precedence>lastPrecedence) { int op = lex->tk; JSP_ASSERT_MATCH(op); // if we have short-circuit ops, then if we know the outcome // we don't bother to execute the other op. Even if not // we need to tell mathsOp it's an & or | if (op==LEX_ANDAND || op==LEX_OROR) { bool aValue = jsvGetBoolAndUnLock(jsvSkipName(a)); if ((!aValue && op==LEX_ANDAND) || (aValue && op==LEX_OROR)) { // use first argument (A) JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(__jspeBinaryExpression(jspeUnaryExpression(),precedence)); JSP_RESTORE_EXECUTE(); } else { // use second argument (B) jsvUnLock(a); a = __jspeBinaryExpression(jspeUnaryExpression(),precedence); } } else { // else it's a more 'normal' logical expression - just use Maths JsVar *b = __jspeBinaryExpression(jspeUnaryExpression(),precedence); if (JSP_SHOULD_EXECUTE) { if (op==LEX_R_IN) { JsVar *av = jsvSkipName(a); // needle JsVar *bv = jsvSkipName(b); // haystack if (jsvHasChildren(bv)) { // search keys, NOT values av = jsvAsArrayIndexAndUnLock(av); JsVar *varFound = jspGetVarNamedField( bv, av, true); jsvUnLock2(a,varFound); a = jsvNewFromBool(varFound!=0); } else { // else maybe it's a fake object... const JswSymList *syms = jswGetSymbolListForObjectProto(bv); if (syms) { JsVar *varFound = 0; char nameBuf[JSLEX_MAX_TOKEN_LENGTH]; if (jsvGetString(av, nameBuf, sizeof(nameBuf)) < sizeof(nameBuf)) varFound = jswBinarySearch(syms, bv, nameBuf); bool found = varFound!=0; jsvUnLock2(a, varFound); if (!found && jsvIsArrayBuffer(bv)) { JsVarFloat f = jsvGetFloat(av); // if not a number this will be NaN, f==floor(f) fails if (f==floor(f) && f>=0 && f<jsvGetArrayBufferLength(bv)) found = true; } a = jsvNewFromBool(found); } else { // not built-in, just assume we can't do it jsExceptionHere(JSET_ERROR, "Cannot use 'in' operator to search a %t", bv); jsvUnLock(a); a = 0; } } jsvUnLock2(av, bv); } else if (op==LEX_R_INSTANCEOF) { bool inst = false; JsVar *av = jsvSkipName(a); JsVar *bv = jsvSkipName(b); if (!jsvIsFunction(bv)) { jsExceptionHere(JSET_ERROR, "Expecting a function on RHS in instanceof check, got %t", bv); } else { if (jsvIsObject(av) || jsvIsFunction(av)) { JsVar *bproto = jspGetNamedField(bv, JSPARSE_PROTOTYPE_VAR, false); JsVar *proto = jsvObjectGetChild(av, JSPARSE_INHERITS_VAR, 0); while (proto) { if (proto == bproto) inst=true; // search prototype chain JsVar *childProto = jsvObjectGetChild(proto, JSPARSE_INHERITS_VAR, 0); jsvUnLock(proto); proto = childProto; } if (jspIsConstructor(bv, "Object")) inst = true; jsvUnLock(bproto); } if (!inst) { const char *name = jswGetBasicObjectName(av); if (name) { inst = jspIsConstructor(bv, name); } // Hack for built-ins that should also be instances of Object if (!inst && (jsvIsArray(av) || jsvIsArrayBuffer(av)) && jspIsConstructor(bv, "Object")) inst = true; } } jsvUnLock3(av, bv, a); a = jsvNewFromBool(inst); } else { // --------------------------------------------- NORMAL JsVar *res = jsvMathsOpSkipNames(a, b, op); jsvUnLock(a); a = res; } } jsvUnLock(b); } precedence = jspeGetBinaryExpressionPrecedence(lex->tk); } return a; } JsVar *jspeBinaryExpression() { return __jspeBinaryExpression(jspeUnaryExpression(),0); } NO_INLINE JsVar *__jspeConditionalExpression(JsVar *lhs) { if (lex->tk=='?') { JSP_ASSERT_MATCH('?'); if (!JSP_SHOULD_EXECUTE) { // just let lhs pass through jsvUnLock(jspeAssignmentExpression()); JSP_MATCH(':'); jsvUnLock(jspeAssignmentExpression()); } else { bool first = jsvGetBoolAndUnLock(jsvSkipName(lhs)); jsvUnLock(lhs); if (first) { lhs = jspeAssignmentExpression(); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); } else { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); JSP_MATCH(':'); lhs = jspeAssignmentExpression(); } } } return lhs; } JsVar *jspeConditionalExpression() { return __jspeConditionalExpression(jspeBinaryExpression()); } NO_INLINE JsVar *__jspeAssignmentExpression(JsVar *lhs) { if (lex->tk=='=' || lex->tk==LEX_PLUSEQUAL || lex->tk==LEX_MINUSEQUAL || lex->tk==LEX_MULEQUAL || lex->tk==LEX_DIVEQUAL || lex->tk==LEX_MODEQUAL || lex->tk==LEX_ANDEQUAL || lex->tk==LEX_OREQUAL || lex->tk==LEX_XOREQUAL || lex->tk==LEX_RSHIFTEQUAL || lex->tk==LEX_LSHIFTEQUAL || lex->tk==LEX_RSHIFTUNSIGNEDEQUAL) { JsVar *rhs; int op = lex->tk; JSP_ASSERT_MATCH(op); rhs = jspeAssignmentExpression(); rhs = jsvSkipNameAndUnLock(rhs); // ensure we get rid of any references on the RHS if (JSP_SHOULD_EXECUTE && lhs) { if (op=='=') { jsvReplaceWithOrAddToRoot(lhs, rhs); } else { if (op==LEX_PLUSEQUAL) op='+'; else if (op==LEX_MINUSEQUAL) op='-'; else if (op==LEX_MULEQUAL) op='*'; else if (op==LEX_DIVEQUAL) op='/'; else if (op==LEX_MODEQUAL) op='%'; else if (op==LEX_ANDEQUAL) op='&'; else if (op==LEX_OREQUAL) op='|'; else if (op==LEX_XOREQUAL) op='^'; else if (op==LEX_RSHIFTEQUAL) op=LEX_RSHIFT; else if (op==LEX_LSHIFTEQUAL) op=LEX_LSHIFT; else if (op==LEX_RSHIFTUNSIGNEDEQUAL) op=LEX_RSHIFTUNSIGNED; if (op=='+' && jsvIsName(lhs)) { JsVar *currentValue = jsvSkipName(lhs); if (jsvIsBasicString(currentValue) && jsvGetRefs(currentValue)==1 && rhs!=currentValue) { /* A special case for string += where this is the only use of the string * and we're not appending to ourselves. In this case we can do a * simple append (rather than clone + append)*/ JsVar *str = jsvAsString(rhs); jsvAppendStringVarComplete(currentValue, str); jsvUnLock(str); op = 0; } jsvUnLock(currentValue); } if (op) { /* Fallback which does a proper add */ JsVar *res = jsvMathsOpSkipNames(lhs,rhs,op); jsvReplaceWith(lhs, res); jsvUnLock(res); } } } jsvUnLock(rhs); } return lhs; } JsVar *jspeAssignmentExpression() { return __jspeAssignmentExpression(jspeConditionalExpression()); } // ',' is allowed to add multiple expressions, this is not allowed in jspeAssignmentExpression NO_INLINE JsVar *jspeExpression() { while (!JSP_SHOULDNT_PARSE) { JsVar *a = jspeAssignmentExpression(); if (lex->tk!=',') return a; // if we get a comma, we just forget this data and parse the next bit... jsvCheckReferenceError(a); jsvUnLock(a); JSP_ASSERT_MATCH(','); } return 0; } /** Parse a block `{ ... }` */ NO_INLINE void jspeSkipBlock() { // fast skip of blocks int brackets = 1; while (lex->tk && brackets) { if (lex->tk == '{') brackets++; else if (lex->tk == '}') { brackets--; if (!brackets) return; } JSP_ASSERT_MATCH(lex->tk); } } /** Parse a block `{ ... }` but assume brackets are already parsed */ NO_INLINE void jspeBlockNoBrackets() { if (JSP_SHOULD_EXECUTE) { while (lex->tk && lex->tk!='}') { JsVar *a = jspeStatement(); jsvCheckReferenceError(a); jsvUnLock(a); if (JSP_HAS_ERROR) { if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) { execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED); JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, "at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); } } } if (JSP_SHOULDNT_PARSE) return; if (!JSP_SHOULD_EXECUTE) { jspeSkipBlock(); return; } } } else { jspeSkipBlock(); } return; } /** Parse a block `{ ... }` */ NO_INLINE void jspeBlock() { JSP_MATCH_WITH_RETURN('{',); jspeBlockNoBrackets(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN('}',); return; } NO_INLINE JsVar *jspeBlockOrStatement() { if (lex->tk=='{') { jspeBlock(); return 0; } else { JsVar *v = jspeStatement(); if (lex->tk==';') JSP_ASSERT_MATCH(';'); return v; } } /** Parse using current lexer until we hit the end of * input or there was some problem. */ NO_INLINE JsVar *jspParse() { JsVar *v = 0; while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) { jsvUnLock(v); v = jspeBlockOrStatement(); jsvCheckReferenceError(v); } return v; } NO_INLINE JsVar *jspeStatementVar() { JsVar *lastDefined = 0; /* variable creation. TODO - we need a better way of parsing the left * hand side. Maybe just have a flag called can_create_var that we * set and then we parse as if we're doing a normal equals.*/ assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST); jslGetNextToken(); ///TODO: Correctly implement CONST and LET - we just treat them like 'var' at the moment bool hasComma = true; // for first time in loop while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { a = jspeiFindOnTop(jslGetTokenValueAsString(), true); if (!a) { // out of memory jspSetError(false); return lastDefined; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined); // sort out initialiser if (lex->tk == '=') { JsVar *var; JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined); var = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (JSP_SHOULD_EXECUTE) jsvReplaceWith(a, var); jsvUnLock(var); } jsvUnLock(lastDefined); lastDefined = a; hasComma = lex->tk == ','; if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined); } return lastDefined; } NO_INLINE JsVar *jspeStatementIf() { bool cond; JsVar *var, *result = 0; JSP_ASSERT_MATCH(LEX_R_IF); JSP_MATCH('('); var = jspeExpression(); if (JSP_SHOULDNT_PARSE) return var; JSP_MATCH(')'); cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var)); jsvUnLock(var); JSP_SAVE_EXECUTE(); if (!cond) jspSetNoExecute(); JsExecFlags hasError = 0; JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (!cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } if (lex->tk==LEX_R_ELSE) { JSP_ASSERT_MATCH(LEX_R_ELSE); JSP_SAVE_EXECUTE(); if (cond) jspSetNoExecute(); JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } } return result; } NO_INLINE JsVar *jspeStatementSwitch() { JSP_ASSERT_MATCH(LEX_R_SWITCH); JSP_MATCH('('); JsVar *switchOn = jspeExpression(); JSP_SAVE_EXECUTE(); bool execute = JSP_SHOULD_EXECUTE; JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0); // shortcut if not executing... if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0); bool executeDefault = true; if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH; while (lex->tk==LEX_R_CASE) { JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0); JsExecFlags oldFlags = execInfo.execute; if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; JsVar *test = jspeAssignmentExpression(); execInfo.execute = oldFlags|EXEC_IN_SWITCH;; JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0); bool cond = false; if (execute) cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL)); if (cond) executeDefault = false; jsvUnLock(test); if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}') jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns } jsvUnLock(switchOn); if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) { execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; } else { executeDefault = true; } JSP_RESTORE_EXECUTE(); if (lex->tk==LEX_R_DEFAULT) { JSP_ASSERT_MATCH(LEX_R_DEFAULT); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); if (!executeDefault) jspSetNoExecute(); else execInfo.execute |= EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}' && lex->tk!=LEX_R_CASE) jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK; JSP_RESTORE_EXECUTE(); } if (lex->tk==LEX_R_CASE) { jsExceptionHere(JSET_SYNTAXERROR, "Espruino doesn't support CASE after DEFAULT"); return 0; } JSP_MATCH('}'); return 0; } // Check whether we received a break/continue while parsing previously. Return true if we had a 'break; static NO_INLINE bool jspeCheckBreakContinue() { if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; return true; } return false; } NO_INLINE JsVar *jspeStatementDoOrWhile(bool isWhile) { JsVar *cond; bool loopCond = true; // true for do...while loops bool hasHadBreak = false; JslCharPos whileCondStart; // We do repetition by pulling out the string representing our statement // there's definitely some opportunity for optimisation here bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; JslCharPos whileBodyStart; if (isWhile) { // while loop JSP_ASSERT_MATCH(LEX_R_WHILE); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } else { jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_DO, jslCharPosFree(&whileBodyStart);,0); } JSP_SAVE_EXECUTE(); // actually try and execute first bit of while loop (we'll do the rest in the actual loop later) if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); if (!loopCond) JSP_RESTORE_EXECUTE(); if (!isWhile) { // do..while loop JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_WHILE,jslCharPosFree(&whileBodyStart);,0); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } JslCharPos whileBodyEnd; jslCharPosNew(&whileBodyEnd, lex->sourceVar, lex->tokenStart); int loopCount = 0; while (!hasHadBreak && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount<JSPARSE_MAX_LOOP_ITERATIONS #endif ) { if (isWhile || loopCount) { // don't check the start condition a second time if we're in a do..while loop jslSeekToP(&whileCondStart); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (loopCond) { jslSeekToP(&whileBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } loopCount++; } jslSeekToP(&whileBodyEnd); jslCharPosFree(&whileCondStart); jslCharPosFree(&whileBodyStart); jslCharPosFree(&whileBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount > JSPARSE_MAX_LOOP_ITERATIONS) { jsExceptionHere(JSET_ERROR, "WHILE Loop exceeded the maximum number of iterations (" STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS) ")"); } #endif return 0; } NO_INLINE JsVar *jspGetBuiltinPrototype(JsVar *obj) { if (jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Array"); if (v) return v; } if (jsvIsObject(obj) || jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Object"); if (v==obj) { // don't return ourselves jsvUnLock(v); v = 0; } return v; } return 0; } NO_INLINE JsVar *jspeStatementFor() { JSP_ASSERT_MATCH(LEX_R_FOR); JSP_MATCH('('); bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; execInfo.execute |= EXEC_FOR_INIT; // initialisation JsVar *forStatement = 0; // we could have 'for (;;)' - so don't munch up our semicolon if that's all we have if (lex->tk != ';') forStatement = jspeStatement(); if (jspIsInterrupted()) { jsvUnLock(forStatement); return 0; } execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT; #ifndef SAVE_ON_FLASH_EXTREME if (lex->tk == LEX_R_IN || lex->tk == LEX_R_OF) { bool isForOf = lex->tk == LEX_R_OF; // for (i in array) or for (i of array) // where i = forStatement if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) { jsvUnLock(forStatement); jsExceptionHere(JSET_ERROR, "for(a %s b) - 'a' must be a variable name, not %t", isForOf?"of":"in", forStatement); return 0; } JSP_ASSERT_MATCH(lex->tk); // skip over in/of JsVar *array = jsvSkipNameAndUnLock(jspeExpression()); JslCharPos forBodyStart; jslCharPosFromLex(&forBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array);jslCharPosFree(&forBodyStart), 0); // Simply scan over the loop the first time without executing to figure out where it ends // OPT: we could skip the first parse and actually execute the first time JSP_SAVE_EXECUTE(); jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; JSP_RESTORE_EXECUTE(); // Now start executing properly if (JSP_SHOULD_EXECUTE) { if (jsvIsIterable(array)) { JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array); JsVar *foundPrototype = 0; if (!isForOf) // for..in foundPrototype = jspGetBuiltinPrototype(array); JsvIterator it; jsvIteratorNew(&it, array, isForOf ? /* for of */ JSIF_EVERY_ARRAY_ELEMENT : /* for in */ JSIF_DEFINED_ARRAY_ElEMENTS); bool hasHadBreak = false; while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) { JsVar *loopIndexVar = jsvIteratorGetKey(&it); bool ignore = false; if (checkerFunction && checkerFunction(loopIndexVar)) { ignore = true; if (jsvIsString(loopIndexVar) && jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR)) foundPrototype = jsvSkipName(loopIndexVar); } if (!ignore) { JsVar *iteratorValue; if (isForOf) { // for (... of ...) iteratorValue = jsvIteratorGetValue(&it); } else { // for (... in ...) iteratorValue = jsvIsName(loopIndexVar) ? jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) : loopIndexVar; assert(jsvGetRefs(iteratorValue)==0); } if (isForOf || iteratorValue) { // could be out of memory assert(!jsvIsName(iteratorValue)); jsvReplaceWithOrAddToRoot(forStatement, iteratorValue); if (iteratorValue!=loopIndexVar) jsvUnLock(iteratorValue); jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } } jsvIteratorNext(&it); jsvUnLock(loopIndexVar); // if using for..in we'll skip down the prototype chain when we reach the end of the current one if (!jsvIteratorHasElement(&it) && !isForOf && foundPrototype) { jsvIteratorFree(&it); JsVar *iterable = foundPrototype; jsvIteratorNew(&it, iterable, JSIF_DEFINED_ARRAY_ElEMENTS); checkerFunction = jsvGetInternalFunctionCheckerFor(iterable); foundPrototype = jspGetBuiltinPrototype(iterable); jsvUnLock(iterable); } } assert(!foundPrototype); jsvIteratorFree(&it); } else if (!jsvIsUndefined(array)) { jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); jsvUnLock2(forStatement, array); #else // SAVE_ON_FLASH_EXTREME if (false) { #endif // SAVE_ON_FLASH_EXTREME } else { // ----------------------------------------------- NORMAL FOR LOOP #ifdef JSPARSE_MAX_LOOP_ITERATIONS int loopCount = JSPARSE_MAX_LOOP_ITERATIONS; #endif bool loopCond = true; bool hasHadBreak = false; jsvUnLock(forStatement); JslCharPos forCondStart; jslCharPosFromLex(&forCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0); if (lex->tk != ';') { JsVar *cond = jspeExpression(); // condition loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } JslCharPos forIterStart; jslCharPosFromLex(&forIterStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0); if (lex->tk != ')') { // we could have 'for (;;)' JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeExpression()); // iterator JSP_RESTORE_EXECUTE(); } JslCharPos forBodyStart; jslSkipWhiteSpace(); jslCharPosFromLex(&forBodyStart); // actual for body JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);jslCharPosFree(&forBodyStart);,0); JSP_SAVE_EXECUTE(); if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslSkipWhiteSpace(); jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (loopCond || !JSP_SHOULD_EXECUTE) { hasHadBreak |= jspeCheckBreakContinue(); } if (!loopCond) JSP_RESTORE_EXECUTE(); if (loopCond) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount-->0 #endif ) { jslSeekToP(&forCondStart); ; if (lex->tk == ';') { loopCond = true; } else { JsVar *cond = jspeExpression(); loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (JSP_SHOULD_EXECUTE && loopCond) { jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forCondStart); jslCharPosFree(&forIterStart); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount<=0) { jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")"); } #endif } return 0; } NO_INLINE JsVar *jspeStatementTry() { // execute the try block JSP_ASSERT_MATCH(LEX_R_TRY); bool shouldExecuteBefore = JSP_SHOULD_EXECUTE; jspeBlock(); bool hadException = shouldExecuteBefore && ((execInfo.execute & EXEC_EXCEPTION)!=0); bool hadCatch = false; if (lex->tk == LEX_R_CATCH) { JSP_ASSERT_MATCH(LEX_R_CATCH); hadCatch = true; JSP_MATCH('('); JsVar *scope = 0; JsVar *exceptionVar = 0; if (hadException) { scope = jsvNewObject(); if (scope) exceptionVar = jsvFindChildFromString(scope, jslGetTokenValueAsString(), true); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock2(scope,exceptionVar),0); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jsvUnLock2(scope,exceptionVar),0); if (exceptionVar) { // set the exception var up properly JsVar *exception = jspGetException(); if (exception) { jsvSetValueOfName(exceptionVar, exception); jsvUnLock(exception); } // Now clear the exception flag (it's handled - we hope!) execInfo.execute = execInfo.execute & (JsExecFlags)~(EXEC_EXCEPTION|EXEC_ERROR_LINE_REPORTED); jsvUnLock(exceptionVar); } if (shouldExecuteBefore && !hadException) { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jspeBlock(); JSP_RESTORE_EXECUTE(); } else { if (!scope || jspeiAddScope(scope)) { jspeBlock(); if (scope) jspeiRemoveScope(); } } jsvUnLock(scope); } if (lex->tk == LEX_R_FINALLY || (!hadCatch && ((execInfo.execute&(EXEC_ERROR|EXEC_INTERRUPTED))==0))) { JSP_MATCH(LEX_R_FINALLY); // clear the exception flag - but only momentarily! if (hadException) execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_EXCEPTION; jspeBlock(); // put the flag back! if (hadException && !hadCatch) execInfo.execute = execInfo.execute | EXEC_EXCEPTION; } return 0; } NO_INLINE JsVar *jspeStatementReturn() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_RETURN); if (lex->tk != ';' && lex->tk != '}') { // we only want the value, so skip the name if there was one result = jsvSkipNameAndUnLock(jspeExpression()); } if (JSP_SHOULD_EXECUTE) { JsVar *resultVar = jspeiFindInScopes(JSPARSE_RETURN_VAR); if (resultVar) { jsvReplaceWith(resultVar, result); jsvUnLock(resultVar); execInfo.execute |= EXEC_RETURN; // Stop anything else in this function executing } else { jsExceptionHere(JSET_SYNTAXERROR, "RETURN statement, but not in a function.\n"); } } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementThrow() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_THROW); result = jsvSkipNameAndUnLock(jspeExpression()); if (JSP_SHOULD_EXECUTE) { jspSetException(result); // Stop anything else in this function executing } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementFunctionDecl(bool isClass) { JsVar *funcName = 0; JsVar *funcVar; #ifndef SAVE_ON_FLASH JSP_ASSERT_MATCH(isClass ? LEX_R_CLASS : LEX_R_FUNCTION); #else JSP_ASSERT_MATCH(LEX_R_FUNCTION); #endif bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) { funcName = jsvMakeIntoVariableName(jslGetTokenValueAsVar(), 0); if (!funcName) { // out of memory return 0; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(funcName), 0); #ifndef SAVE_ON_FLASH funcVar = isClass ? jspeClassDefinition(false) : jspeFunctionDefinition(false); #else funcVar = jspeFunctionDefinition(false); #endif if (actuallyCreateFunction) { // find a function with the same name (or make one) // OPT: can Find* use just a JsVar that is a 'name'? JsVar *existingName = jspeiFindNameOnTop(funcName, true); JsVar *existingFunc = jsvSkipName(existingName); if (jsvIsFunction(existingFunc)) { // 'proper' replace, that keeps the original function var and swaps the children funcVar = jsvSkipNameAndUnLock(funcVar); jswrap_function_replaceWith(existingFunc, funcVar); } else { jsvReplaceWith(existingName, funcVar); } jsvUnLock(funcName); funcName = existingName; jsvUnLock(existingFunc); // existingName is used - don't UnLock } jsvUnLock(funcVar); return funcName; } NO_INLINE JsVar *jspeStatement() { #ifdef USE_DEBUGGER if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && lex->tk!=';' && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif if (lex->tk==LEX_ID || lex->tk==LEX_INT || lex->tk==LEX_FLOAT || lex->tk==LEX_STR || lex->tk==LEX_TEMPLATE_LITERAL || lex->tk==LEX_REGEX || lex->tk==LEX_R_NEW || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_THIS || lex->tk==LEX_R_DELETE || lex->tk==LEX_R_TYPEOF || lex->tk==LEX_R_VOID || lex->tk==LEX_R_SUPER || lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS || lex->tk=='!' || lex->tk=='-' || lex->tk=='+' || lex->tk=='~' || lex->tk=='[' || lex->tk=='(') { /* Execute a simple statement that only contains basic arithmetic... */ return jspeExpression(); } else if (lex->tk=='{') { /* A block of code */ if (!jspCheckStackPosition()) return 0; jspeBlock(); return 0; } else if (lex->tk==';') { /* Empty statement - to allow things like ;;; */ JSP_ASSERT_MATCH(';'); return 0; } else if (lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST) { return jspeStatementVar(); } else if (lex->tk==LEX_R_IF) { return jspeStatementIf(); } else if (lex->tk==LEX_R_DO) { return jspeStatementDoOrWhile(false); } else if (lex->tk==LEX_R_WHILE) { return jspeStatementDoOrWhile(true); } else if (lex->tk==LEX_R_FOR) { return jspeStatementFor(); } else if (lex->tk==LEX_R_TRY) { return jspeStatementTry(); } else if (lex->tk==LEX_R_RETURN) { return jspeStatementReturn(); } else if (lex->tk==LEX_R_THROW) { return jspeStatementThrow(); } else if (lex->tk==LEX_R_FUNCTION) { return jspeStatementFunctionDecl(false/* function */); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { return jspeStatementFunctionDecl(true/* class */); #endif } else if (lex->tk==LEX_R_CONTINUE) { JSP_ASSERT_MATCH(LEX_R_CONTINUE); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & EXEC_IN_LOOP)) jsExceptionHere(JSET_SYNTAXERROR, "CONTINUE statement outside of FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_CONTINUE; } } else if (lex->tk==LEX_R_BREAK) { JSP_ASSERT_MATCH(LEX_R_BREAK); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & (EXEC_IN_LOOP|EXEC_IN_SWITCH))) jsExceptionHere(JSET_SYNTAXERROR, "BREAK statement outside of SWITCH, FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_BREAK; } } else if (lex->tk==LEX_R_SWITCH) { return jspeStatementSwitch(); } else if (lex->tk==LEX_R_DEBUGGER) { JSP_ASSERT_MATCH(LEX_R_DEBUGGER); #ifdef USE_DEBUGGER if (JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } else JSP_MATCH(LEX_EOF); return 0; } // ----------------------------------------------------------------------------- /// Create a new built-in object that jswrapper can use to check for built-in functions JsVar *jspNewBuiltin(const char *instanceOf) { JsVar *objFunc = jswFindBuiltInFunction(0, instanceOf); if (!objFunc) return 0; // out of memory return objFunc; } /// Create a new Class of the given instance and return its prototype NO_INLINE JsVar *jspNewPrototype(const char *instanceOf) { JsVar *objFuncName = jsvFindChildFromString(execInfo.root, instanceOf, true); if (!objFuncName) // out of memory return 0; JsVar *objFunc = jsvSkipName(objFuncName); if (!objFunc) { objFunc = jspNewBuiltin(instanceOf); if (!objFunc) { // out of memory jsvUnLock(objFuncName); return 0; } // set up name jsvSetValueOfName(objFuncName, objFunc); } JsVar *prototypeName = jsvFindChildFromString(objFunc, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(objFunc, prototypeName); // make sure it's an object jsvUnLock2(objFunc, objFuncName); return prototypeName; } /** Create a new object of the given instance and add it to root with name 'name'. * If name!=0, added to root with name, and the name is returned * If name==0, not added to root and Object itself returned */ NO_INLINE JsVar *jspNewObject(const char *name, const char *instanceOf) { JsVar *prototypeName = jspNewPrototype(instanceOf); JsVar *obj = jsvNewObject(); if (!obj) { // out of memory jsvUnLock(prototypeName); return 0; } if (name) { // If it's a device, set the device number up as the Object data // See jsiGetDeviceFromClass IOEventFlags device = jshFromDeviceString(name); if (device!=EV_NONE) { obj->varData.str[0] = 'D'; obj->varData.str[1] = 'E'; obj->varData.str[2] = 'V'; obj->varData.str[3] = (char)device; } } // add __proto__ JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(obj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName);prototypeName=0; if (name) { JsVar *objName = jsvFindChildFromString(execInfo.root, name, true); if (objName) jsvSetValueOfName(objName, obj); jsvUnLock(obj); if (!objName) { // out of memory return 0; } return objName; } else return obj; } /** Returns true if the constructor function given is the same as that * of the object with the given name. */ bool jspIsConstructor(JsVar *constructor, const char *constructorName) { JsVar *objFunc = jsvObjectGetChild(execInfo.root, constructorName, 0); if (!objFunc) return false; bool isConstructor = objFunc == constructor; jsvUnLock(objFunc); return isConstructor; } /** Get the prototype of the given object, or return 0 if not found, or not an object */ JsVar *jspGetPrototype(JsVar *object) { if (!jsvIsObject(object)) return 0; JsVar *proto = jsvObjectGetChild(object, JSPARSE_INHERITS_VAR, 0); if (jsvIsObject(proto)) return proto; jsvUnLock(proto); return 0; } /** Get the constructor of the given object, or return 0 if not found, or not a function */ JsVar *jspGetConstructor(JsVar *object) { JsVar *proto = jspGetPrototype(object); if (proto) { JsVar *constr = jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0); if (jsvIsFunction(constr)) { jsvUnLock(proto); return constr; } jsvUnLock2(constr, proto); } return 0; } // ----------------------------------------------------------------------------- void jspSoftInit() { execInfo.root = jsvFindOrCreateRoot(); // Root now has a lock and a ref execInfo.hiddenRoot = jsvObjectGetChild(execInfo.root, JS_HIDDEN_CHAR_STR, JSV_OBJECT); execInfo.execute = EXEC_YES; } void jspSoftKill() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; jsvUnLock(execInfo.hiddenRoot); execInfo.hiddenRoot = 0; jsvUnLock(execInfo.root); execInfo.root = 0; // Root is now left with just a ref } void jspInit() { jspSoftInit(); } void jspKill() { jspSoftKill(); // Unreffing this should completely kill everything attached to root JsVar *r = jsvFindOrCreateRoot(); jsvUnRef(r); jsvUnLock(r); } /** Evaluate the given variable as an expression (in current scope) */ JsVar *jspEvaluateExpressionVar(JsVar *str) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = oldLex->lineNumberOffset; #endif // actually do the parsing JsVar *v = jspeExpression(); jslKill(); jslSetLex(oldLex); return jsvSkipNameAndUnLock(v); } /** Execute code form a variable and return the result. If lineNumberOffset * is nonzero it's added to the line numbers that get reported for errors/debug */ JsVar *jspEvaluateVar(JsVar *str, JsVar *scope, uint16_t lineNumberOffset) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = lineNumberOffset; #endif JsExecInfo oldExecInfo = execInfo; execInfo.execute = EXEC_YES; if (scope) { // if we're adding a scope, make sure it's the *only* scope execInfo.scopesVar = 0; if (scope!=execInfo.root) jspeiAddScope(scope); // it's searched by default anyway } // actually do the parsing JsVar *v = jspParse(); // clean up if (scope) jspeiClearScopes(); jslKill(); jslSetLex(oldLex); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute & EXEC_PERSIST; execInfo = oldExecInfo; // It may have returned a reference, but we just want the value... return jsvSkipNameAndUnLock(v); } JsVar *jspEvaluate(const char *str, bool stringIsStatic) { /* using a memory area is more efficient, but the interpreter * may use substrings from it for function code. This means that * if the string goes away, everything gets corrupted - hence * the option here. */ JsVar *evCode; if (stringIsStatic) evCode = jsvNewNativeString((char*)str, strlen(str)); else evCode = jsvNewFromString(str); if (!evCode) return 0; JsVar *v = 0; if (!jsvIsMemoryFull()) v = jspEvaluateVar(evCode, 0, 0); jsvUnLock(evCode); return v; } JsVar *jspExecuteJSFunction(const char *jsCode, JsVar *thisArg, int argCount, JsVar **argPtr) { JsVar *fn = jspEvaluate(jsCode,true); JsVar *result = jspExecuteFunction(fn,thisArg,argCount,argPtr); jsvUnLock(fn); return result; } JsVar *jspExecuteFunction(JsVar *func, JsVar *thisArg, int argCount, JsVar **argPtr) { JsExecInfo oldExecInfo = execInfo; execInfo.scopesVar = 0; execInfo.execute = EXEC_YES; execInfo.thisVar = 0; JsVar *result = jspeFunctionCall(func, 0, thisArg, false, argCount, argPtr); // clean up jspeiClearScopes(); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute&EXEC_PERSIST; jspeiClearScopes(); execInfo = oldExecInfo; return result; } /// Evaluate a JavaScript module and return its exports JsVar *jspEvaluateModule(JsVar *moduleContents) { assert(jsvIsString(moduleContents) || jsvIsFunction(moduleContents)); if (jsvIsFunction(moduleContents)) { moduleContents = jsvObjectGetChild(moduleContents,JSPARSE_FUNCTION_CODE_NAME,0); if (!jsvIsString(moduleContents)) { jsvUnLock(moduleContents); return 0; } } else jsvLockAgain(moduleContents); JsVar *scope = jsvNewObject(); JsVar *scopeExports = jsvNewObject(); if (!scope || !scopeExports) { // out of mem jsvUnLock3(scope, scopeExports, moduleContents); return 0; } JsVar *exportsName = jsvAddNamedChild(scope, scopeExports, "exports"); jsvUnLock2(scopeExports, jsvAddNamedChild(scope, scope, "module")); JsExecFlags oldExecute = execInfo.execute; JsVar *oldThisVar = execInfo.thisVar; execInfo.thisVar = scopeExports; // set 'this' variable to exports jsvUnLock(jspEvaluateVar(moduleContents, scope, 0)); execInfo.thisVar = oldThisVar; execInfo.execute = oldExecute; // make sure we fully restore state after parsing a module jsvUnLock2(moduleContents, scope); return jsvSkipNameAndUnLock(exportsName); } /** Get the owner of the current prototype. We assume that it's * the first item in the array, because that's what we will * have added when we created it. It's safe to call this on * non-prototypes and non-objects. */ JsVar *jspGetPrototypeOwner(JsVar *proto) { if (jsvIsObject(proto) || jsvIsArray(proto)) { return jsvSkipNameAndUnLock(jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0)); } return 0; }
/* * This file is part of Espruino, a JavaScript interpreter for Microcontrollers * * Copyright (C) 2013 Gordon Williams <gw@pur3.co.uk> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * * ---------------------------------------------------------------------------- * Recursive descent parser for code execution * ---------------------------------------------------------------------------- */ #include "jsparse.h" #include "jsinteractive.h" #include "jswrapper.h" #include "jsnative.h" #include "jswrap_object.h" // for function_replacewith #include "jswrap_functions.h" // insane check for eval in jspeFunctionCall #include "jswrap_json.h" // for jsfPrintJSON #include "jswrap_espruino.h" // for jswrap_espruino_memoryArea #ifndef SAVE_ON_FLASH #include "jswrap_regexp.h" // for jswrap_regexp_constructor #endif /* Info about execution when Parsing - this saves passing it on the stack * for each call */ JsExecInfo execInfo; // ----------------------------------------------- Forward decls JsVar *jspeAssignmentExpression(); JsVar *jspeExpression(); JsVar *jspeUnaryExpression(); void jspeBlock(); void jspeBlockNoBrackets(); JsVar *jspeStatement(); JsVar *jspeFactor(); void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName); #ifndef SAVE_ON_FLASH JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a); #endif // ----------------------------------------------- Utils #define JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, CLEANUP_CODE, RETURN_VAL) { if (!jslMatch((TOKEN))) { CLEANUP_CODE; return RETURN_VAL; } } #define JSP_MATCH_WITH_RETURN(TOKEN, RETURN_VAL) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , RETURN_VAL) #define JSP_MATCH(TOKEN) JSP_MATCH_WITH_CLEANUP_AND_RETURN(TOKEN, , 0) // Match where the user could have given us the wrong token #define JSP_ASSERT_MATCH(TOKEN) { assert(lex->tk==(TOKEN));jslGetNextToken(); } // Match where if we have the wrong token, it's an internal error #define JSP_SHOULD_EXECUTE (((execInfo.execute)&EXEC_RUN_MASK)==EXEC_YES) #define JSP_SAVE_EXECUTE() JsExecFlags oldExecute = execInfo.execute #define JSP_RESTORE_EXECUTE() execInfo.execute = (execInfo.execute&(JsExecFlags)(~EXEC_SAVE_RESTORE_MASK)) | (oldExecute&EXEC_SAVE_RESTORE_MASK); #define JSP_HAS_ERROR (((execInfo.execute)&EXEC_ERROR_MASK)!=0) #define JSP_SHOULDNT_PARSE (((execInfo.execute)&EXEC_NO_PARSE_MASK)!=0) ALWAYS_INLINE void jspDebuggerLoopIfCtrlC() { #ifdef USE_DEBUGGER if (execInfo.execute & EXEC_CTRL_C_WAIT && JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } /// if interrupting execution, this is set bool jspIsInterrupted() { return (execInfo.execute & EXEC_INTERRUPTED)!=0; } /// if interrupting execution, this is set void jspSetInterrupted(bool interrupt) { if (interrupt) execInfo.execute = execInfo.execute | EXEC_INTERRUPTED; else execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_INTERRUPTED; } /// Set the error flag - set lineReported if we've already output the line number void jspSetError(bool lineReported) { execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_YES) | EXEC_ERROR; if (lineReported) execInfo.execute |= EXEC_ERROR_LINE_REPORTED; } bool jspHasError() { return JSP_HAS_ERROR; } void jspeiClearScopes() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } bool jspeiAddScope(JsVar *scope) { if (!execInfo.scopesVar) execInfo.scopesVar = jsvNewEmptyArray(); if (!execInfo.scopesVar) return false; jsvArrayPush(execInfo.scopesVar, scope); return true; } void jspeiRemoveScope() { if (!execInfo.scopesVar || !jsvGetArrayLength(execInfo.scopesVar)) { jsExceptionHere(JSET_INTERNALERROR, "Too many scopes removed"); jspSetError(false); return; } jsvUnLock(jsvArrayPop(execInfo.scopesVar)); if (!jsvGetFirstChild(execInfo.scopesVar)) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; } } JsVar *jspeiFindInScopes(const char *name) { if (execInfo.scopesVar) { JsVar *it = jsvLockSafe(jsvGetLastChild(execInfo.scopesVar)); while (it) { JsVar *scope = jsvSkipName(it); JsVarRef next = jsvGetPrevSibling(it); JsVar *ref = jsvFindChildFromString(scope, name, false); jsvUnLock2(it, scope); if (ref) return ref; it = jsvLockSafe(next); } } return jsvFindChildFromString(execInfo.root, name, false); } /// Return the topmost scope (and lock it) JsVar *jspeiGetTopScope() { if (execInfo.scopesVar) { JsVar *scope = jsvGetLastArrayItem(execInfo.scopesVar); if (scope) return scope; } return jsvLockAgain(execInfo.root); } JsVar *jspeiFindOnTop(const char *name, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromString(scope, name, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspeiFindNameOnTop(JsVar *childName, bool createIfNotFound) { JsVar *scope = jspeiGetTopScope(); JsVar *result = jsvFindChildFromVar(scope, childName, createIfNotFound); jsvUnLock(scope); return result; } JsVar *jspFindPrototypeFor(const char *className) { JsVar *obj = jsvObjectGetChild(execInfo.root, className, 0); if (!obj) return 0; JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); jsvUnLock(obj); return proto; } /** Here we assume that we have already looked in the parent itself - * and are now going down looking at the stuff it inherited */ JsVar *jspeiFindChildFromStringInParents(JsVar *parent, const char *name) { if (jsvIsObject(parent)) { // If an object, look for an 'inherits' var JsVar *inheritsFrom = jsvObjectGetChild(parent, JSPARSE_INHERITS_VAR, 0); // if there's no inheritsFrom, just default to 'Object.prototype' if (!inheritsFrom) inheritsFrom = jspFindPrototypeFor("Object"); if (inheritsFrom && inheritsFrom!=parent) { // we have what it inherits from (this is ACTUALLY the prototype var) // https://developer.mozilla.org/en-US/docs/JavaScript/Reference/Global_Objects/Object/proto JsVar *child = jsvFindChildFromString(inheritsFrom, name, false); if (!child) child = jspeiFindChildFromStringInParents(inheritsFrom, name); jsvUnLock(inheritsFrom); if (child) return child; } else jsvUnLock(inheritsFrom); } else { // Not actually an object - but might be an array/string/etc const char *objectName = jswGetBasicObjectName(parent); while (objectName) { JsVar *objName = jsvFindChildFromString(execInfo.root, objectName, false); if (objName) { JsVar *result = 0; JsVar *obj = jsvSkipNameAndUnLock(objName); // could be something the user has made - eg. 'Array=1' if (jsvHasChildren(obj)) { // We have found an object with this name - search for the prototype var JsVar *proto = jsvObjectGetChild(obj, JSPARSE_PROTOTYPE_VAR, 0); if (proto) { result = jsvFindChildFromString(proto, name, false); jsvUnLock(proto); } } jsvUnLock(obj); if (result) return result; } /* We haven't found anything in the actual object, we should check the 'Object' itself eg, we tried 'String', so now we should try 'Object'. Built-in types don't have room for a prototype field, so we hard-code it */ objectName = jswGetBasicObjectPrototypeName(objectName); } } // no luck! return 0; } JsVar *jspeiGetScopesAsVar() { if (!execInfo.scopesVar) return 0; // no scopes! // If just one element, return it (no array) if (jsvGetArrayLength(execInfo.scopesVar)==1) { JsVar *v = jsvGetLastArrayItem(execInfo.scopesVar); // this is faster than getting by index return v; } // Copy this - because if we just returned it, the underlying array would get altered return jsvCopy(execInfo.scopesVar, true); } void jspeiLoadScopesFromVar(JsVar *arr) { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; if (arr) { if (jsvIsArray(arr)) { // TODO: copy on write? would make function calls faster execInfo.scopesVar = jsvCopy(arr, true); } else { // just a single item,but we must package it in an array execInfo.scopesVar = jsvNewArray(&arr, 1); } } } // ----------------------------------------------- /// Check that we have enough stack to recurse. Return true if all ok, error if not. bool jspCheckStackPosition() { if (jsuGetFreeStack() < 512) { // giving us 512 bytes leeway jsExceptionHere(JSET_ERROR, "Too much recursion - the stack is about to overflow"); jspSetInterrupted(true); return false; } return true; } // Set execFlags such that we are not executing void jspSetNoExecute() { execInfo.execute = (execInfo.execute & (JsExecFlags)(int)~EXEC_RUN_MASK) | EXEC_NO; } void jspAppendStackTrace(JsVar *stackTrace) { JsvStringIterator it; jsvStringIteratorNew(&it, stackTrace, 0); jsvStringIteratorGotoEnd(&it); jslPrintPosition((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart); jslPrintTokenLineMarker((vcbprintf_callback)jsvStringIteratorPrintfCallback, &it, lex->tokenLastStart, 0); jsvStringIteratorFree(&it); } /// We had an exception (argument is the exception's value) void jspSetException(JsVar *value) { // Add the exception itself to a variable in root scope JsVar *exception = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, true); if (exception) { jsvSetValueOfName(exception, value); jsvUnLock(exception); } // Set the exception flag execInfo.execute = execInfo.execute | EXEC_EXCEPTION; // Try and do a stack trace if (lex) { JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, " at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); // stop us from printing the trace in the same block execInfo.execute = execInfo.execute | EXEC_ERROR_LINE_REPORTED; } } } /** Return the reported exception if there was one (and clear it) */ JsVar *jspGetException() { JsVar *exceptionName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_EXCEPTION_VAR, false); if (exceptionName) { JsVar *exception = jsvSkipName(exceptionName); jsvRemoveChild(execInfo.hiddenRoot, exceptionName); jsvUnLock(exceptionName); JsVar *stack = jspGetStackTrace(); if (stack && jsvHasChildren(exception)) { jsvObjectSetChild(exception, "stack", stack); } jsvUnLock(stack); return exception; } return 0; } /** Return a stack trace string if there was one (and clear it) */ JsVar *jspGetStackTrace() { JsVar *stackTraceName = jsvFindChildFromString(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, false); if (stackTraceName) { JsVar *stackTrace = jsvSkipName(stackTraceName); jsvRemoveChild(execInfo.hiddenRoot, stackTraceName); jsvUnLock(stackTraceName); return stackTrace; } return 0; } // ---------------------------------------------- // we return a value so that JSP_MATCH can return 0 if it fails (if we pass 0, we just parse all args) NO_INLINE bool jspeFunctionArguments(JsVar *funcVar) { JSP_MATCH('('); while (lex->tk!=')') { if (funcVar) { char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; strcpy(&buf[1], jslGetTokenValueAsString()); JsVar *param = jsvAddNamedChild(funcVar, 0, buf); if (!param) { // out of memory jspSetError(false); return false; } jsvMakeFunctionParameter(param); // force this to be called a function parameter jsvUnLock(param); } JSP_MATCH(LEX_ID); if (lex->tk!=')') JSP_MATCH(','); } JSP_MATCH(')'); return true; } // Parse function, assuming we're on '{'. funcVar can be 0. returns 'true' is the function included the 'this' keyword NO_INLINE bool jspeFunctionDefinitionInternal(JsVar *funcVar, bool expressionOnly) { bool forcePretokenise = false; if (expressionOnly) { if (funcVar) funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; } else { JSP_MATCH('{'); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_STR) { if (!strcmp(jslGetTokenValueAsString(), "compiled")) jsWarn("Function marked with \"compiled\" uploaded in source form"); if (lex->tk==LEX_STR && !strcmp(jslGetTokenValueAsString(), "ram")) { JSP_ASSERT_MATCH(LEX_STR); forcePretokenise = true; } } #endif /* If the function starts with return, treat it specially - * we don't want to store the 'return' part of it */ if (funcVar && lex->tk==LEX_R_RETURN) { funcVar->flags = (funcVar->flags & ~JSV_VARTYPEMASK) | JSV_FUNCTION_RETURN; JSP_ASSERT_MATCH(LEX_R_RETURN); } } #ifndef ESPR_NO_LINE_NUMBERS // Get the line number (if needed) JsVarInt lineNumber = 0; if (funcVar && lex->lineNumberOffset && !(forcePretokenise||jsfGetFlag(JSF_PRETOKENISE))) { // jslGetLineNumber is slow, so we only do it if we have debug info lineNumber = (JsVarInt)jslGetLineNumber() + (JsVarInt)lex->lineNumberOffset - 1; } #endif // Get the code - parse it and figure out where it stops JslCharPos funcBegin; jslSkipWhiteSpace(); jslCharPosNew(&funcBegin, lex->sourceVar, lex->tokenStart); int lastTokenEnd = -1; lex->hadThisKeyword = lex->tk == LEX_R_THIS; if (!expressionOnly) { int brackets = 0; while (lex->tk && (brackets || lex->tk != '}')) { if (lex->tk == '{') brackets++; if (lex->tk == '}') brackets--; lastTokenEnd = (int)jsvStringIteratorGetIndex(&lex->it)-1; JSP_ASSERT_MATCH(lex->tk); } // FIXME: we might be including whitespace after the last token } else { JsExecFlags oldExec = execInfo.execute; execInfo.execute = EXEC_NO; jsvUnLock(jspeAssignmentExpression()); execInfo.execute = oldExec; lastTokenEnd = (int)lex->tokenStart; } bool hadThisKeyword = lex->hadThisKeyword; // Then create var and set (if there was any code!) if (funcVar && lastTokenEnd>0) { // code var JsVar *funcCodeVar; if (!forcePretokenise && jsvIsNativeString(lex->sourceVar)) { /* If we're parsing from a Native String (eg. E.memoryArea, E.setBootCode) then use another Native String to load function code straight from flash */ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewNativeString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #ifdef SPIFLASH_BASE } else if (!forcePretokenise && jsvIsFlashString(lex->sourceVar)) { /* If we're parsing from a Flash String (eg. loaded from Storage on Bangle.js) then use another Flash String to load function code straight from flash*/ int s = (int)jsvStringIteratorGetIndex(&funcBegin.it) - 1; funcCodeVar = jsvNewFlashString(lex->sourceVar->varData.nativeStr.ptr + s, (unsigned int)(lastTokenEnd - s)); #endif } else { if (jsfGetFlag(JSF_PRETOKENISE) || forcePretokenise) { funcCodeVar = jslNewTokenisedStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } else { funcCodeVar = jslNewStringFromLexer(&funcBegin, (size_t)lastTokenEnd); } } jsvUnLock2(jsvAddNamedChild(funcVar, funcCodeVar, JSPARSE_FUNCTION_CODE_NAME), funcCodeVar); // scope var JsVar *funcScopeVar = jspeiGetScopesAsVar(); if (funcScopeVar) { jsvUnLock2(jsvAddNamedChild(funcVar, funcScopeVar, JSPARSE_FUNCTION_SCOPE_NAME), funcScopeVar); } #ifndef ESPR_NO_LINE_NUMBERS // If we've got a line number, add a var for it if (lineNumber) { JsVar *funcLineNumber = jsvNewFromInteger(lineNumber); if (funcLineNumber) { jsvUnLock2(jsvAddNamedChild(funcVar, funcLineNumber, JSPARSE_FUNCTION_LINENUMBER_NAME), funcLineNumber); } } #endif } jslCharPosFree(&funcBegin); if (!expressionOnly) JSP_MATCH('}'); return hadThisKeyword; } // Parse function (after 'function' has occurred NO_INLINE JsVar *jspeFunctionDefinition(bool parseNamedFunction) { // actually parse a function... We assume that the LEX_FUNCTION and name // have already been parsed JsVar *funcVar = 0; bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) funcVar = jsvNewWithFlags(JSV_FUNCTION); JsVar *functionInternalName = 0; if (parseNamedFunction && lex->tk==LEX_ID) { // you can do `var a = function foo() { foo(); };` - so cope with this if (funcVar) functionInternalName = jslGetTokenValueAsVar(); // note that we don't add it to the beginning, because it would mess up our function call code JSP_ASSERT_MATCH(LEX_ID); } // Get arguments save them to the structure if (!jspeFunctionArguments(funcVar)) { jsvUnLock2(functionInternalName, funcVar); // parse failed return 0; } // Parse the actual function block jspeFunctionDefinitionInternal(funcVar, false); // if we had a function name, add it to the end (if we don't it gets confused with arguments) if (funcVar && functionInternalName) jsvObjectSetChildAndUnLock(funcVar, JSPARSE_FUNCTION_NAME_NAME, functionInternalName); return funcVar; } /* Parse just the brackets of a function - and throw * everything away */ NO_INLINE bool jspeParseFunctionCallBrackets() { assert(!JSP_SHOULD_EXECUTE); JSP_MATCH('('); while (!JSP_SHOULDNT_PARSE && lex->tk != ')') { jsvUnLock(jspeAssignmentExpression()); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ARROW_FUNCTION) { jsvUnLock(jspeArrowFunction(0, 0)); } #endif if (lex->tk!=')') JSP_MATCH(','); } if (!JSP_SHOULDNT_PARSE) JSP_MATCH(')'); return 0; } /** Handle a function call (assumes we've parsed the function name and we're * on the start bracket). 'thisArg' is the value of the 'this' variable when the * function is executed (it's usually the parent object) * * * NOTE: this does not set the execInfo flags - so if execInfo==EXEC_NO, it won't execute * * If !isParsing and arg0!=0, argument 0 is set to what is supplied (same with arg1) * * functionName is used only for error reporting - and can be 0 */ NO_INLINE JsVar *jspeFunctionCall(JsVar *function, JsVar *functionName, JsVar *thisArg, bool isParsing, int argCount, JsVar **argPtr) { if (JSP_SHOULD_EXECUTE && !function) { if (functionName) jsExceptionHere(JSET_ERROR, "Function %q not found!", functionName); else jsExceptionHere(JSET_ERROR, "Function not found!", functionName); return 0; } if (JSP_SHOULD_EXECUTE) if (!jspCheckStackPosition()) return 0; // try and ensure that we won't overflow our stack if (JSP_SHOULD_EXECUTE && function) { JsVar *returnVar = 0; if (!jsvIsFunction(function)) { jsExceptionHere(JSET_ERROR, "Expecting a function to call, got %t", function); return 0; } JsVar *thisVar = jsvLockAgainSafe(thisArg); if (isParsing) JSP_MATCH('('); /* Ok, so we have 4 options here. * * 1: we're native. * a) args have been pre-parsed, which is awesome * b) we have to parse our own args into an array * 2: we're not native * a) args were pre-parsed and we have to populate the function * b) we parse our own args, which is possibly better */ if (jsvIsNativeFunction(function)) { // ------------------------------------- NATIVE unsigned int argPtrSize = 0; int boundArgs = 0; // Add 'bound' parameters if there were any JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); while (jsvIsFunctionParameter(param)) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack if needed unsigned int newArgPtrSize = (argPtrSize?argPtrSize:(unsigned int)argCount)*4; size_t newArgPtrByteSize = sizeof(JsVar*)*newArgPtrSize; if (jsuGetFreeStack() < 256+newArgPtrByteSize) { jsExceptionHere(JSET_ERROR, "Insufficient stack for this many arguments"); jsvUnLock(thisVar); return 0; } JsVar **newArgPtr = (JsVar**)alloca(newArgPtrByteSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } // if we already had arguments - shift them up... int i; for (i=argCount-1;i>=boundArgs;i--) argPtr[i+1] = argPtr[i]; // add bound argument argPtr[boundArgs] = jsvSkipName(param); argCount++; boundArgs++; jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } // check if 'this' was defined while (param) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); break; } jsvUnLock(param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); } jsvUnLock(param); jsvObjectIteratorFree(&it); // Now, if we're parsing add the rest of the arguments int allocatedArgCount = boundArgs; if (isParsing) { while (!JSP_HAS_ERROR && lex->tk!=')' && lex->tk!=LEX_EOF) { if ((unsigned)argCount>=argPtrSize) { // allocate more space on stack unsigned int newArgPtrSize = argPtrSize?argPtrSize*4:16; JsVar **newArgPtr = (JsVar**)alloca(sizeof(JsVar*)*newArgPtrSize); memcpy(newArgPtr, argPtr, (unsigned)argCount*sizeof(JsVar*)); argPtr = newArgPtr; argPtrSize = newArgPtrSize; } argPtr[argCount++] = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',',jsvUnLockMany((unsigned)argCount, argPtr);jsvUnLock(thisVar);, 0); } JSP_MATCH(')'); allocatedArgCount = argCount; } void *nativePtr = jsvGetNativeFunctionPtr(function); JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else { if (nativePtr==jswrap_eval) { // eval gets to use the current scope /* Note: proper JS has some utterly insane code that depends on whether * eval is an lvalue or not: * * http://stackoverflow.com/questions/9107240/1-evalthis-vs-evalthis-in-javascript * * Doing this in Espruino is quite an upheaval for that one * slightly insane case - so it's not implemented. */ if (execInfo.thisVar) execInfo.thisVar = jsvRef(execInfo.thisVar); } else { execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root } } if (nativePtr && !JSP_HAS_ERROR) { returnVar = jsnCallFunction(nativePtr, function->varData.native.argTypes, thisVar, argPtr, argCount); assert(!jsvIsName(returnVar)); } else { returnVar = 0; } // unlock values if we locked them jsvUnLockMany((unsigned)allocatedArgCount, argPtr); /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; } else { // ----------------------------------------------------- NOT NATIVE // create a new symbol table entry for execution of this function // OPT: can we cache this function execution environment + param variables? // OPT: Probably when calling a function ONCE, use it, otherwise when recursing, make new? JsVar *functionRoot = jsvNewWithFlags(JSV_FUNCTION); if (!functionRoot) { // out of memory jspSetError(false); jsvUnLock(thisVar); return 0; } JsVar *functionScope = 0; JsVar *functionCode = 0; JsVar *functionInternalName = 0; #ifndef ESPR_NO_LINE_NUMBERS uint16_t functionLineNumber = 0; #endif /** NOTE: We expect that the function object will have: * * * Parameters * * Code/Scope/Name * * IN THAT ORDER. */ JsvObjectIterator it; jsvObjectIteratorNew(&it, function); JsVar *param = jsvObjectIteratorGetKey(&it); JsVar *value = jsvObjectIteratorGetValue(&it); while (jsvIsFunctionParameter(param) && value) { jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), value); jsvUnLock2(value, param); jsvObjectIteratorNext(&it); param = jsvObjectIteratorGetKey(&it); value = jsvObjectIteratorGetValue(&it); } jsvUnLock2(value, param); if (isParsing) { int hadParams = 0; // grab in all parameters. We go around this loop until we've run out // of named parameters AND we've parsed all the supplied arguments while (!JSP_SHOULDNT_PARSE && lex->tk!=')') { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); if (lex->tk!=')' || paramDefined) { hadParams++; JsVar *value = 0; // ONLY parse this if it was supplied, otherwise leave 0 (undefined) if (lex->tk!=')') value = jspeAssignmentExpression(); // and if execute, copy it over value = jsvSkipNameAndUnLock(value); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, value); jsvUnLock(value); if (lex->tk!=')') JSP_MATCH(','); } jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } JSP_MATCH(')'); } else { // and NOT isParsing int args = 0; while (args<argCount) { JsVar *param = jsvObjectIteratorGetKey(&it); bool paramDefined = jsvIsFunctionParameter(param); jsvAddFunctionParameter(functionRoot, paramDefined?jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH):0, argPtr[args]); args++; jsvUnLock(param); if (paramDefined) jsvObjectIteratorNext(&it); } } // Now go through what's left while (jsvObjectIteratorHasValue(&it)) { JsVar *param = jsvObjectIteratorGetKey(&it); if (jsvIsString(param)) { if (jsvIsStringEqual(param, JSPARSE_FUNCTION_SCOPE_NAME)) functionScope = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_CODE_NAME)) functionCode = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_NAME_NAME)) functionInternalName = jsvSkipName(param); else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_THIS_NAME)) { jsvUnLock(thisVar); thisVar = jsvSkipName(param); } #ifndef ESPR_NO_LINE_NUMBERS else if (jsvIsStringEqual(param, JSPARSE_FUNCTION_LINENUMBER_NAME)) functionLineNumber = (uint16_t)jsvGetIntegerAndUnLock(jsvSkipName(param)); #endif else if (jsvIsFunctionParameter(param)) { JsVar *defaultVal = jsvSkipName(param); jsvAddFunctionParameter(functionRoot, jsvNewFromStringVar(param,1,JSVAPPENDSTRINGVAR_MAXLENGTH), defaultVal); jsvUnLock(defaultVal); } } jsvUnLock(param); jsvObjectIteratorNext(&it); } jsvObjectIteratorFree(&it); // setup a the function's name (if a named function) if (functionInternalName) { JsVar *name = jsvMakeIntoVariableName(jsvNewFromStringVar(functionInternalName,0,JSVAPPENDSTRINGVAR_MAXLENGTH), function); jsvAddName(functionRoot, name); jsvUnLock2(name, functionInternalName); } if (!JSP_HAS_ERROR) { // save old scopes and reset scope list JsVar *oldScopeVar = execInfo.scopesVar; execInfo.scopesVar = 0; // if we have a scope var, load it up. We may not have one if there were no scopes apart from root if (functionScope) { jspeiLoadScopesFromVar(functionScope); jsvUnLock(functionScope); } // add the function's execute space to the symbol table so we can recurse if (jspeiAddScope(functionRoot)) { /* Adding scope may have failed - we may have descended too deep - so be sure * not to pull somebody else's scope off */ JsVar *oldThisVar = execInfo.thisVar; if (thisVar) execInfo.thisVar = jsvRef(thisVar); else execInfo.thisVar = jsvRef(execInfo.root); // 'this' should always default to root /* we just want to execute the block, but something could * have messed up and left us with the wrong Lexer, so * we want to be careful here... */ if (functionCode) { #ifdef USE_DEBUGGER bool hadDebuggerNextLineOnly = false; if (execInfo.execute&EXEC_DEBUGGER_STEP_INTO) { if (functionName) jsiConsolePrintf("Stepping into %v\n", functionName); else jsiConsolePrintf("Stepping into function\n", functionName); } else { hadDebuggerNextLineOnly = execInfo.execute&EXEC_DEBUGGER_NEXT_LINE; if (hadDebuggerNextLineOnly) execInfo.execute &= (JsExecFlags)~EXEC_DEBUGGER_NEXT_LINE; } #endif JsLex newLex; JsLex *oldLex = jslSetLex(&newLex); jslInit(functionCode); #ifndef ESPR_NO_LINE_NUMBERS newLex.lineNumberOffset = functionLineNumber; #endif JSP_SAVE_EXECUTE(); // force execute without any previous state #ifdef USE_DEBUGGER execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK|EXEC_DEBUGGER_NEXT_LINE)); #else execInfo.execute = EXEC_YES | (execInfo.execute&(EXEC_CTRL_C_MASK|EXEC_ERROR_MASK)); #endif if (jsvIsFunctionReturn(function)) { #ifdef USE_DEBUGGER // we didn't parse a statement so wouldn't trigger the debugger otherwise if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif // implicit return - we just need an expression (optional) if (lex->tk != ';' && lex->tk != '}') returnVar = jsvSkipNameAndUnLock(jspeExpression()); } else { // setup a return variable JsVar *returnVarName = jsvAddNamedChild(functionRoot, 0, JSPARSE_RETURN_VAR); // parse the whole block jspeBlockNoBrackets(); /* get the real return var before we remove it from our function. * We can unlock below because returnVarName is still part of * functionRoot, so won't get freed. */ returnVar = jsvSkipNameAndUnLock(returnVarName); if (returnVarName) // could have failed with out of memory jsvSetValueOfName(returnVarName, 0); // remove return value (which helps stops circular references) } // Store a stack trace if we had an error JsExecFlags hasError = execInfo.execute&EXEC_ERROR_MASK; JSP_RESTORE_EXECUTE(); // because return will probably have set execute to false #ifdef USE_DEBUGGER bool calledDebugger = false; if (execInfo.execute & EXEC_DEBUGGER_MASK) { jsiConsolePrint("Value returned is ="); jsfPrintJSON(returnVar, JSON_LIMIT | JSON_SOME_NEWLINES | JSON_PRETTY | JSON_SHOW_DEVICES); jsiConsolePrintChar('\n'); if (execInfo.execute & EXEC_DEBUGGER_FINISH_FUNCTION) { calledDebugger = true; jsiDebuggerLoop(); } } if (hadDebuggerNextLineOnly && !calledDebugger) execInfo.execute |= EXEC_DEBUGGER_NEXT_LINE; #endif jslKill(); jslSetLex(oldLex); if (hasError) { execInfo.execute |= hasError; // propogate error JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, jsvIsString(functionName)?"in function %q called from ": "in function called from ", functionName); if (lex) { jspAppendStackTrace(stackTrace); } else jsvAppendPrintf(stackTrace, "system\n"); jsvUnLock(stackTrace); } } } /* Return to old 'this' var. No need to unlock as we never locked before */ if (execInfo.thisVar) jsvUnRef(execInfo.thisVar); execInfo.thisVar = oldThisVar; jspeiRemoveScope(); } // Unlock scopes and restore old ones jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = oldScopeVar; } jsvUnLock(functionCode); jsvUnLock(functionRoot); } jsvUnLock(thisVar); return returnVar; } else if (isParsing) { // ---------------------------------- function, but not executing - just parse args and be done jspeParseFunctionCallBrackets(); /* Do not return function, as it will be unlocked! */ return 0; } else return 0; } // Find a variable (or built-in function) based on the current scopes JsVar *jspGetNamedVariable(const char *tokenName) { JsVar *a = JSP_SHOULD_EXECUTE ? jspeiFindInScopes(tokenName) : 0; if (JSP_SHOULD_EXECUTE && !a) { /* Special case! We haven't found the variable, so check out * and see if it's one of our builtins... */ if (jswIsBuiltInObject(tokenName)) { // Check if we have a built-in function for it // OPT: Could we instead have jswIsBuiltInObjectWithoutConstructor? JsVar *obj = jswFindBuiltInFunction(0, tokenName); // If not, make one if (!obj) obj = jspNewBuiltin(tokenName); if (obj) { // not out of memory a = jsvAddNamedChild(execInfo.root, obj, tokenName); jsvUnLock(obj); } } else { a = jswFindBuiltInFunction(0, tokenName); if (!a) { /* Variable doesn't exist! JavaScript says we should create it * (we won't add it here. This is done in the assignment operator)*/ a = jsvMakeIntoVariableName(jsvNewFromString(tokenName), 0); } } } return a; } /// Used by jspGetNamedField / jspGetVarNamedField static NO_INLINE JsVar *jspGetNamedFieldInParents(JsVar *object, const char* name, bool returnName) { // Now look in prototypes JsVar * child = jspeiFindChildFromStringInParents(object, name); /* Check for builtins via separate function * This way we save on RAM for built-ins because everything comes out of program code */ if (!child) { child = jswFindBuiltInFunction(object, name); } /* We didn't get here if we found a child in the object itself, so * if we're here then we probably have the wrong name - so for example * with `a.b = c;` could end up setting `a.prototype.b` (bug #360) * * Also we might have got a built-in, which wouldn't have a name on it * anyway - so in both cases, strip the name if it is there, and create * a new name that references the object we actually requested the * member from.. */ if (child && returnName) { // Get rid of existing name if (jsvIsName(child)) { JsVar *t = jsvGetValueOfName(child); jsvUnLock(child); child = t; } // create a new name JsVar *nameVar = jsvNewFromString(name); JsVar *newChild = jsvCreateNewChild(object, nameVar, child); jsvUnLock2(nameVar, child); child = newChild; } // If not found and is the prototype, create it if (!child) { if (jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { // prototype is supposed to be an object JsVar *proto = jsvNewObject(); // make sure it has a 'constructor' variable that points to the object it was part of jsvObjectSetChild(proto, JSPARSE_CONSTRUCTOR_VAR, object); child = jsvAddNamedChild(object, proto, JSPARSE_PROTOTYPE_VAR); jspEnsureIsPrototype(object, child); jsvUnLock(proto); } else if (strcmp(name, JSPARSE_INHERITS_VAR)==0) { const char *objName = jswGetBasicObjectName(object); if (objName) { JsVar *p = jsvSkipNameAndUnLock(jspNewPrototype(objName)); // jspNewPrototype returns a 'prototype' name that's already a child of eg. an array // Create a new 'name' called __proto__ that links to it JsVar *i = jsvNewFromString(JSPARSE_INHERITS_VAR); if (p) child = jsvCreateNewChild(object, i, p); jsvUnLock(i); } } } return child; } /** Get the named function/variable on the object - whether it's built in, or predefined. * If !returnName, returns the function/variable itself or undefined, but * if returnName, return a name (could be fake) referencing the parent. * * NOTE: ArrayBuffer/Strings are not handled here. We assume that if we're * passing a char* rather than a JsVar it's because we're looking up via * a symbol rather than a variable. To handle these use jspGetVarNamedField */ JsVar *jspGetNamedField(JsVar *object, const char* name, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromString(object, name, false); if (!child) { child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && strcmp(name, JSPARSE_PROTOTYPE_VAR)==0) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// see jspGetNamedField - note that nameVar should have had jsvAsArrayIndex called on it first JsVar *jspGetVarNamedField(JsVar *object, JsVar *nameVar, bool returnName) { JsVar *child = 0; // if we're an object (or pretending to be one) if (jsvHasChildren(object)) child = jsvFindChildFromVar(object, nameVar, false); if (!child) { if (jsvIsArrayBuffer(object) && jsvIsInt(nameVar)) { // for array buffers, we actually create a NAME, and hand that back - then when we assign (or use SkipName) we pull out the correct data child = jsvMakeIntoVariableName(jsvNewFromInteger(jsvGetInteger(nameVar)), object); if (child) // turn into an 'array buffer name' child->flags = (child->flags & ~JSV_VARTYPEMASK) | JSV_ARRAYBUFFERNAME; } else if (jsvIsString(object) && jsvIsInt(nameVar)) { JsVarInt idx = jsvGetInteger(nameVar); if (idx>=0 && idx<(JsVarInt)jsvGetStringLength(object)) { char ch = jsvGetCharInString(object, (size_t)idx); child = jsvNewStringOfLength(1, &ch); } else if (returnName) child = jsvCreateNewChild(object, nameVar, 0); // just return *something* to show this is handled } else { // get the name as a string char name[JSLEX_MAX_TOKEN_LENGTH]; jsvGetString(nameVar, name, JSLEX_MAX_TOKEN_LENGTH); // try and find it in parents child = jspGetNamedFieldInParents(object, name, returnName); // If not found and is the prototype, create it if (!child && jsvIsFunction(object) && jsvIsStringEqual(nameVar, JSPARSE_PROTOTYPE_VAR)) { JsVar *value = jsvNewObject(); // prototype is supposed to be an object child = jsvAddNamedChild(object, value, JSPARSE_PROTOTYPE_VAR); jsvUnLock(value); } } } if (returnName) return child; else return jsvSkipNameAndUnLock(child); } /// Call the named function on the object - whether it's built in, or predefined. Returns the return value of the function. JsVar *jspCallNamedFunction(JsVar *object, char* name, int argCount, JsVar **argPtr) { JsVar *child = jspGetNamedField(object, name, false); JsVar *r = 0; if (jsvIsFunction(child)) r = jspeFunctionCall(child, 0, object, false, argCount, argPtr); jsvUnLock(child); return r; } NO_INLINE JsVar *jspeFactorMember(JsVar *a, JsVar **parentResult) { /* The parent if we're executing a method call */ JsVar *parent = 0; while (lex->tk=='.' || lex->tk=='[') { if (lex->tk == '.') { // ------------------------------------- Record Access JSP_ASSERT_MATCH('.'); if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) { // Note: name will go away when we parse something else! const char *name = jslGetTokenValueAsString(); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetNamedField(aVar, name, true); if (!child) { if (!jsvIsUndefined(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written JsVar *nameVar = jslGetTokenValueAsVar(); child = jsvCreateNewChild(aVar, nameVar, 0); jsvUnLock(nameVar); } else { // could have been a string... jsExceptionHere(JSET_ERROR, "Cannot read property '%s' of undefined", name); } } jsvUnLock(parent); parent = aVar; jsvUnLock(a); a = child; } // skip over current token (we checked above that it was an ID or reserved word) jslGetNextToken(); } else { // incorrect token - force a match fail by asking for an ID JSP_MATCH_WITH_RETURN(LEX_ID, a); } } else if (lex->tk == '[') { // ------------------------------------- Array Access JsVar *index; JSP_ASSERT_MATCH('['); if (!jspCheckStackPosition()) return parent; index = jsvSkipNameAndUnLock(jspeAssignmentExpression()); JSP_MATCH_WITH_CLEANUP_AND_RETURN(']', jsvUnLock2(parent, index);, a); if (JSP_SHOULD_EXECUTE) { index = jsvAsArrayIndexAndUnLock(index); JsVar *aVar = jsvSkipNameWithParent(a,true,parent); JsVar *child = 0; if (aVar) child = jspGetVarNamedField(aVar, index, true); if (!child) { if (jsvHasChildren(aVar)) { // if no child found, create a pointer to where it could be // as we don't want to allocate it until it's written child = jsvCreateNewChild(aVar, index, 0); } else { jsExceptionHere(JSET_ERROR, "Field or method %q does not already exist, and can't create it on %t", index, aVar); } } jsvUnLock(parent); parent = jsvLockAgainSafe(aVar); jsvUnLock(a); a = child; jsvUnLock(aVar); } jsvUnLock(index); } else { assert(0); } } if (parentResult) *parentResult = parent; else jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeConstruct(JsVar *func, JsVar *funcName, bool hasArgs) { assert(JSP_SHOULD_EXECUTE); if (!jsvIsFunction(func)) { jsExceptionHere(JSET_ERROR, "Constructor should be a function, but is %t", func); return 0; } JsVar *thisObj = jsvNewObject(); if (!thisObj) return 0; // out of memory // Make sure the function has a 'prototype' var JsVar *prototypeName = jsvFindChildFromString(func, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(func, prototypeName); // make sure it's an object JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(thisObj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName); JsVar *a = jspeFunctionCall(func, funcName, thisObj, hasArgs, 0, 0); /* FIXME: we should ignore return values that aren't objects (bug #848), but then we need * to be aware of `new String()` and `new Uint8Array()`. Ideally we'd let through * arrays/etc, and then String/etc should return 'boxed' values. * * But they don't return boxed values at the moment, so let's just * pass the return value through. If you try and return a string from * a function it's broken JS code anyway. */ if (a) { jsvUnLock(thisObj); thisObj = a; } else { jsvUnLock(a); } return thisObj; } NO_INLINE JsVar *jspeFactorFunctionCall() { /* The parent if we're executing a method call */ bool isConstructor = false; if (lex->tk==LEX_R_NEW) { JSP_ASSERT_MATCH(LEX_R_NEW); isConstructor = true; if (lex->tk==LEX_R_NEW) { jsExceptionHere(JSET_ERROR, "Nesting 'new' operators is unsupported"); jspSetError(false); return 0; } } JsVar *parent = 0; #ifndef SAVE_ON_FLASH bool wasSuper = lex->tk==LEX_R_SUPER; #endif JsVar *a = jspeFactorMember(jspeFactor(), &parent); #ifndef SAVE_ON_FLASH if (wasSuper) { /* if this was 'super.something' then we need * to overwrite the parent, because it'll be * set to the prototype otherwise. */ jsvUnLock(parent); parent = jsvLockAgainSafe(execInfo.thisVar); } #endif while ((lex->tk=='(' || (isConstructor && JSP_SHOULD_EXECUTE)) && !jspIsInterrupted()) { JsVar *funcName = a; JsVar *func = jsvSkipName(funcName); /* The constructor function doesn't change parsing, so if we're * not executing, just short-cut it. */ if (isConstructor && JSP_SHOULD_EXECUTE) { // If we have '(' parse an argument list, otherwise don't look for any args bool parseArgs = lex->tk=='('; a = jspeConstruct(func, funcName, parseArgs); isConstructor = false; // don't treat subsequent brackets as constructors } else a = jspeFunctionCall(func, funcName, parent, true, 0, 0); jsvUnLock3(funcName, func, parent); parent=0; a = jspeFactorMember(a, &parent); } #ifndef SAVE_ON_FLASH /* If we've got something that we care about the parent of (eg. a getter/setter) * then we repackage it into a 'NewChild' name that references the parent before * we leave. Note: You can't do this on everything because normally NewChild * forces a new child to be blindly created. It works on Getters/Setters because * we *always* run those rather than adding them. */ if (parent && jsvIsBasicName(a) && !jsvIsNewChild(a)) { JsVar *value = jsvLockSafe(jsvGetFirstChild(a)); if (jsvIsGetterOrSetter(value)) { // no need to do this for functions since we've just executed whatever we needed to JsVar *nameVar = jsvCopyNameOnly(a,false,true); JsVar *newChild = jsvCreateNewChild(parent, nameVar, value); jsvUnLock2(nameVar, a); a = newChild; } jsvUnLock(value); } #endif jsvUnLock(parent); return a; } NO_INLINE JsVar *jspeFactorObject() { if (JSP_SHOULD_EXECUTE) { JsVar *contents = jsvNewObject(); if (!contents) { // out of memory jspSetError(false); return 0; } /* JSON-style object definition */ JSP_MATCH_WITH_RETURN('{', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != '}') { JsVar *varName = 0; // we only allow strings or IDs on the left hand side of an initialisation if (jslIsIDOrReservedWord()) { if (JSP_SHOULD_EXECUTE) varName = jslGetTokenValueAsVar(); jslGetNextToken(); // skip over current token } else if ( lex->tk==LEX_STR || lex->tk==LEX_FLOAT || lex->tk==LEX_INT || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED) { varName = jspeFactor(); } else { JSP_MATCH_WITH_RETURN(LEX_ID, contents); } #ifndef SAVE_ON_FLASH if (lex->tk==LEX_ID && jsvIsString(varName)) { bool isGetter = jsvIsStringEqual(varName, "get"); bool isSetter = jsvIsStringEqual(varName, "set"); if (isGetter || isSetter) { jsvUnLock(varName); varName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); JsVar *method = jspeFunctionDefinition(false); jsvAddGetterOrSetter(contents, varName, isGetter, method); jsvUnLock(method); } } else #endif { JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock(varName), contents); if (JSP_SHOULD_EXECUTE) { varName = jsvAsArrayIndexAndUnLock(varName); JsVar *contentsName = jsvFindChildFromVar(contents, varName, true); if (contentsName) { JsVar *value = jsvSkipNameAndUnLock(jspeAssignmentExpression()); // value can be 0 (could be undefined!) jsvUnLock2(jsvSetValueOfName(contentsName, value), value); } } } jsvUnLock(varName); // no need to clean here, as it will definitely be used if (lex->tk != '}') JSP_MATCH_WITH_RETURN(',', contents); } JSP_MATCH_WITH_RETURN('}', contents); return contents; } else { // Not executing so do fast skip jspeBlock(); return 0; } } NO_INLINE JsVar *jspeFactorArray() { int idx = 0; JsVar *contents = 0; if (JSP_SHOULD_EXECUTE) { contents = jsvNewEmptyArray(); if (!contents) { // out of memory jspSetError(false); return 0; } } /* JSON-style array */ JSP_MATCH_WITH_RETURN('[', contents); while (!JSP_SHOULDNT_PARSE && lex->tk != ']') { if (JSP_SHOULD_EXECUTE) { JsVar *aVar = 0; JsVar *indexName = 0; if (lex->tk != ',') { // #287 - [,] and [1,2,,4] are allowed aVar = jsvSkipNameAndUnLock(jspeAssignmentExpression()); indexName = jsvMakeIntoVariableName(jsvNewFromInteger(idx), aVar); } if (indexName) { // could be out of memory jsvAddName(contents, indexName); jsvUnLock(indexName); } jsvUnLock(aVar); } else { jsvUnLock(jspeAssignmentExpression()); } // no need to clean here, as it will definitely be used if (lex->tk != ']') JSP_MATCH_WITH_RETURN(',', contents); idx++; } if (contents) jsvSetArrayLength(contents, idx, false); JSP_MATCH_WITH_RETURN(']', contents); return contents; } NO_INLINE void jspEnsureIsPrototype(JsVar *instanceOf, JsVar *prototypeName) { if (!prototypeName) return; JsVar *prototypeVar = jsvSkipName(prototypeName); if (!(jsvIsObject(prototypeVar) || jsvIsFunction(prototypeVar))) { if (!jsvIsUndefined(prototypeVar)) jsExceptionHere(JSET_TYPEERROR, "Prototype should be an object, got %t", prototypeVar); jsvUnLock(prototypeVar); prototypeVar = jsvNewObject(); // prototype is supposed to be an object JsVar *lastName = jsvSkipToLastName(prototypeName); jsvSetValueOfName(lastName, prototypeVar); jsvUnLock(lastName); } JsVar *constructor = jsvFindChildFromString(prototypeVar, JSPARSE_CONSTRUCTOR_VAR, true); if (constructor) jsvSetValueOfName(constructor, instanceOf); jsvUnLock2(constructor, prototypeVar); } NO_INLINE JsVar *jspeFactorTypeOf() { JSP_ASSERT_MATCH(LEX_R_TYPEOF); JsVar *a = jspeUnaryExpression(); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { if (!jsvIsVariableDefined(a)) { // so we don't get a ReferenceError when accessing an undefined var result=jsvNewFromString("undefined"); } else { a = jsvSkipNameAndUnLock(a); result=jsvNewFromString(jsvGetTypeOf(a)); } } jsvUnLock(a); return result; } NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); #ifdef DEBUG if (jsvHasChildren(parent)) assert(jsvIsChild(parent, a)); #endif if (jsvHasChildren(parent) && jsvIsChild(parent, a)) { // else remove properly. /* we use jsvIsChild here just in case. delete probably isn't called that often so it pays to be safe */ if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; } #ifndef SAVE_ON_FLASH JsVar *jspeTemplateLiteral() { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { JsVar *template = jslGetTokenValueAsVar(); a = jsvNewFromEmptyString(); if (a && template) { JsvStringIterator it, dit; jsvStringIteratorNew(&it, template, 0); jsvStringIteratorNew(&dit, a, 0); while (jsvStringIteratorHasChar(&it)) { char ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='$') { ch = jsvStringIteratorGetChar(&it); if (ch=='{') { // Now parse out the expression jsvStringIteratorNext(&it); int brackets = 1; JsVar *expr = jsvNewFromEmptyString(); if (!expr) break; JsvStringIterator eit; jsvStringIteratorNew(&eit, expr, 0); while (jsvStringIteratorHasChar(&it)) { ch = jsvStringIteratorGetCharAndNext(&it); if (ch=='{') brackets++; if (ch=='}') { brackets--; if (!brackets) break; } jsvStringIteratorAppend(&eit, ch); } jsvStringIteratorFree(&eit); JsVar *result = jspEvaluateExpressionVar(expr); jsvUnLock(expr); result = jsvAsStringAndUnLock(result); jsvStringIteratorAppendString(&dit, result, 0, JSVAPPENDSTRINGVAR_MAXLENGTH); jsvUnLock(result); } else { jsvStringIteratorAppend(&dit, '$'); } } else { jsvStringIteratorAppend(&dit, ch); } } jsvStringIteratorFree(&it); jsvStringIteratorFree(&dit); } jsvUnLock(template); } JSP_ASSERT_MATCH(LEX_TEMPLATE_LITERAL); return a; } #endif NO_INLINE JsVar *jspeAddNamedFunctionParameter(JsVar *funcVar, JsVar *name) { if (!funcVar) funcVar = jsvNewWithFlags(JSV_FUNCTION); char buf[JSLEX_MAX_TOKEN_LENGTH+1]; buf[0] = '\xFF'; size_t l = jsvGetString(name, &buf[1], JSLEX_MAX_TOKEN_LENGTH); buf[l+1] = 0; // zero terminate since jsvGetString doesn't add one JsVar *param = jsvAddNamedChild(funcVar, 0, buf); jsvMakeFunctionParameter(param); jsvUnLock(param); return funcVar; } #ifndef SAVE_ON_FLASH // parse an arrow function NO_INLINE JsVar *jspeArrowFunction(JsVar *funcVar, JsVar *a) { assert(!a || jsvIsName(a)); JSP_ASSERT_MATCH(LEX_ARROW_FUNCTION); funcVar = jspeAddNamedFunctionParameter(funcVar, a); bool expressionOnly = lex->tk!='{'; bool fnIncludesThis = jspeFunctionDefinitionInternal(funcVar, expressionOnly); /* Arrow functions store the value of 'this' when they were defined. In order to differentiate between normal functions we usually have to store 'this' even if 'this' was just the global object. Very few arrow functions actually use 'this' though - usually they are just used as a shorthand, and so we end up wasting a whole extra var for every single arrow function. So... while parsing the function's body we check of the 'this' keyword is used. If it isn't, we just don't include it. */ if (fnIncludesThis) jsvObjectSetChild(funcVar, JSPARSE_FUNCTION_THIS_NAME, execInfo.thisVar); return funcVar; } // parse expressions with commas, maybe followed by an arrow function (bracket already matched) NO_INLINE JsVar *jspeExpressionOrArrowFunction() { JsVar *a = 0; JsVar *funcVar = 0; bool allNames = true; while (lex->tk!=')' && !JSP_SHOULDNT_PARSE) { if (allNames && a) { // we never get here if this isn't a name and a string funcVar = jspeAddNamedFunctionParameter(funcVar, a); } jsvUnLock(a); a = jspeAssignmentExpression(); /* if we're not executing, `a` will always be undefined so don't do the check for allNames - just assume all is good. We'll properly check when we execute. */ if (JSP_SHOULD_EXECUTE && !(jsvIsName(a) && jsvIsString(a))) allNames = false; if (lex->tk!=')') JSP_MATCH_WITH_CLEANUP_AND_RETURN(',', jsvUnLock2(a,funcVar), 0); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(a,funcVar), 0); // if all names inside brackets and an arrow is found, create a function if (allNames && lex->tk==LEX_ARROW_FUNCTION) { funcVar = jspeArrowFunction(funcVar, a); jsvUnLock(a); return funcVar; } else { jsvUnLock(funcVar); return a; } } /// Parse an ES6 class, expects LEX_R_CLASS already parsed NO_INLINE JsVar *jspeClassDefinition(bool parseNamedClass) { JsVar *classFunction = 0; JsVar *classPrototype = 0; JsVar *classInternalName = 0; bool actuallyCreateClass = JSP_SHOULD_EXECUTE; if (actuallyCreateClass) { classFunction = jsvNewWithFlags(JSV_FUNCTION); JsVar *scopeVar = jspeiGetScopesAsVar(); if (scopeVar) jsvUnLock2(jsvAddNamedChild(classFunction, scopeVar, JSPARSE_FUNCTION_SCOPE_NAME), scopeVar); } if (parseNamedClass && lex->tk==LEX_ID) { if (classFunction) classInternalName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } if (classFunction) { JsVar *prototypeName = jsvFindChildFromString(classFunction, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(classFunction, prototypeName); // make sure it's an object classPrototype = jsvSkipName(prototypeName); jsvUnLock(prototypeName); } if (lex->tk==LEX_R_EXTENDS) { JSP_ASSERT_MATCH(LEX_R_EXTENDS); JsVar *extendsFrom = actuallyCreateClass ? jsvSkipNameAndUnLock(jspGetNamedVariable(jslGetTokenValueAsString())) : 0; JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(extendsFrom,classFunction,classInternalName,classPrototype),0); if (classPrototype) { if (jsvIsFunction(extendsFrom)) { JsVar *extendsFromProto = jsvObjectGetChild(extendsFrom, JSPARSE_PROTOTYPE_VAR, 0); if (extendsFromProto) { jsvObjectSetChild(classPrototype, JSPARSE_INHERITS_VAR, extendsFromProto); // link in default constructor if ours isn't supplied jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_CODE_NAME, jsvNewFromString("if(this.__proto__.__proto__.constructor)this.__proto__.__proto__.constructor.apply(this,arguments)")); jsvUnLock(extendsFromProto); } } else jsExceptionHere(JSET_SYNTAXERROR, "'extends' argument should be a function, got %t", extendsFrom); } jsvUnLock(extendsFrom); } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{',jsvUnLock3(classFunction,classInternalName,classPrototype),0); while ((lex->tk==LEX_ID || lex->tk==LEX_R_STATIC) && !jspIsInterrupted()) { bool isStatic = lex->tk==LEX_R_STATIC; if (isStatic) JSP_ASSERT_MATCH(LEX_R_STATIC); JsVar *funcName = jslGetTokenValueAsVar(); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock4(funcName,classFunction,classInternalName,classPrototype),0); #ifndef SAVE_ON_FLASH bool isGetter = false, isSetter = false; if (lex->tk==LEX_ID) { isGetter = jsvIsStringEqual(funcName, "get"); isSetter = jsvIsStringEqual(funcName, "set"); if (isGetter || isSetter) { jsvUnLock(funcName); funcName = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_ID); } } #endif JsVar *method = jspeFunctionDefinition(false); if (classFunction && classPrototype) { JsVar *obj = isStatic ? classFunction : classPrototype; if (jsvIsStringEqual(funcName, "constructor")) { jswrap_function_replaceWith(classFunction, method); #ifndef SAVE_ON_FLASH } else if (isGetter || isSetter) { jsvAddGetterOrSetter(obj, funcName, isGetter, method); #endif } else { funcName = jsvMakeIntoVariableName(funcName, 0); jsvSetValueOfName(funcName, method); jsvAddName(obj, funcName); } } jsvUnLock2(method,funcName); } jsvUnLock(classPrototype); // If we had a name, add it to the end (or it gets confused with the constructor arguments) if (classInternalName) jsvObjectSetChildAndUnLock(classFunction, JSPARSE_FUNCTION_NAME_NAME, classInternalName); JSP_MATCH_WITH_CLEANUP_AND_RETURN('}',jsvUnLock(classFunction),0); return classFunction; } #endif NO_INLINE JsVar *jspeFactor() { if (lex->tk==LEX_ID) { JsVar *a = jspGetNamedVariable(jslGetTokenValueAsString()); JSP_ASSERT_MATCH(LEX_ID); #ifndef SAVE_ON_FLASH if (lex->tk==LEX_TEMPLATE_LITERAL) jsExceptionHere(JSET_SYNTAXERROR, "Tagged template literals not supported"); else if (lex->tk==LEX_ARROW_FUNCTION && (jsvIsName(a) || (a==0 && !JSP_SHOULD_EXECUTE))) { // 'a' needs to be a name, *or* we're not executing so 0 gets returned anyway JsVar *funcVar = jspeArrowFunction(0,a); jsvUnLock(a); a=funcVar; } #endif return a; } else if (lex->tk==LEX_INT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromLongInteger(stringToInt(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_INT); return v; } else if (lex->tk==LEX_FLOAT) { JsVar *v = 0; if (JSP_SHOULD_EXECUTE) { v = jsvNewFromFloat(stringToFloat(jslGetTokenValueAsString())); } JSP_ASSERT_MATCH(LEX_FLOAT); return v; } else if (lex->tk=='(') { JSP_ASSERT_MATCH('('); if (!jspCheckStackPosition()) return 0; #ifdef SAVE_ON_FLASH // Just parse a normal expression (which can include commas) JsVar *a = jspeExpression(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN(')',a); return a; #else return jspeExpressionOrArrowFunction(); #endif } else if (lex->tk==LEX_R_TRUE) { JSP_ASSERT_MATCH(LEX_R_TRUE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(true) : 0; } else if (lex->tk==LEX_R_FALSE) { JSP_ASSERT_MATCH(LEX_R_FALSE); return JSP_SHOULD_EXECUTE ? jsvNewFromBool(false) : 0; } else if (lex->tk==LEX_R_NULL) { JSP_ASSERT_MATCH(LEX_R_NULL); return JSP_SHOULD_EXECUTE ? jsvNewWithFlags(JSV_NULL) : 0; } else if (lex->tk==LEX_R_UNDEFINED) { JSP_ASSERT_MATCH(LEX_R_UNDEFINED); return 0; } else if (lex->tk==LEX_STR) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) a = jslGetTokenValueAsVar(); JSP_ASSERT_MATCH(LEX_STR); return a; #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_TEMPLATE_LITERAL) { return jspeTemplateLiteral(); #endif } else if (lex->tk==LEX_REGEX) { JsVar *a = 0; #ifdef SAVE_ON_FLASH jsExceptionHere(JSET_SYNTAXERROR, "RegEx are not supported in this version of Espruino\n"); #else JsVar *regex = jslGetTokenValueAsVar(); size_t regexEnd = 0, regexLen = 0; JsvStringIterator it; jsvStringIteratorNew(&it, regex, 0); while (jsvStringIteratorHasChar(&it)) { regexLen++; if (jsvStringIteratorGetCharAndNext(&it)=='/') regexEnd = regexLen; } jsvStringIteratorFree(&it); JsVar *flags = 0; if (regexEnd < regexLen) flags = jsvNewFromStringVar(regex, regexEnd, JSVAPPENDSTRINGVAR_MAXLENGTH); JsVar *regexSource = jsvNewFromStringVar(regex, 1, regexEnd-2); a = jswrap_regexp_constructor(regexSource, flags); jsvUnLock3(regex, flags, regexSource); #endif JSP_ASSERT_MATCH(LEX_REGEX); return a; } else if (lex->tk=='{') { if (!jspCheckStackPosition()) return 0; return jspeFactorObject(); } else if (lex->tk=='[') { if (!jspCheckStackPosition()) return 0; return jspeFactorArray(); } else if (lex->tk==LEX_R_FUNCTION) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_FUNCTION); return jspeFunctionDefinition(true); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_CLASS); return jspeClassDefinition(true); } else if (lex->tk==LEX_R_SUPER) { JSP_ASSERT_MATCH(LEX_R_SUPER); /* This is kind of nasty, since super appears to do three different things. * In the constructor it references the extended class's constructor * in a method it references the constructor's prototype. * in a static method it references the extended class's constructor (but this is different) */ if (jsvIsObject(execInfo.thisVar)) { // 'this' is an object - must be calling a normal method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_INHERITS_VAR, 0); // if we're in a method, get __proto__ first JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; // still in method, get __proto__.__proto__ jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } // If we're doing super() we want the constructor if (lex->tk=='(') { JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } // But if we're doing something else - eg 'super.' or 'super[' then it needs to reference the prototype return proto2; } else if (jsvIsFunction(execInfo.thisVar)) { // 'this' is a function - must be calling a static method JsVar *proto1 = jsvObjectGetChild(execInfo.thisVar, JSPARSE_PROTOTYPE_VAR, 0); JsVar *proto2 = jsvIsObject(proto1) ? jsvObjectGetChild(proto1, JSPARSE_INHERITS_VAR, 0) : 0; jsvUnLock(proto1); if (!proto2) { jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; } JsVar *constr = jsvObjectGetChild(proto2, JSPARSE_CONSTRUCTOR_VAR, 0); jsvUnLock(proto2); return constr; } jsExceptionHere(JSET_SYNTAXERROR, "Calling 'super' outside of class"); return 0; #endif } else if (lex->tk==LEX_R_THIS) { JSP_ASSERT_MATCH(LEX_R_THIS); return jsvLockAgain( execInfo.thisVar ? execInfo.thisVar : execInfo.root ); } else if (lex->tk==LEX_R_DELETE) { if (!jspCheckStackPosition()) return 0; return jspeFactorDelete(); } else if (lex->tk==LEX_R_TYPEOF) { if (!jspCheckStackPosition()) return 0; return jspeFactorTypeOf(); } else if (lex->tk==LEX_R_VOID) { if (!jspCheckStackPosition()) return 0; JSP_ASSERT_MATCH(LEX_R_VOID); jsvUnLock(jspeUnaryExpression()); return 0; } JSP_MATCH(LEX_EOF); jsExceptionHere(JSET_SYNTAXERROR, "Unexpected end of Input\n"); return 0; } NO_INLINE JsVar *__jspePostfixExpression(JsVar *a) { while (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *oldValue = jsvAsNumberAndUnLock(jsvSkipName(a)); // keep the old value (but convert to number) JsVar *res = jsvMathsOpSkipNames(oldValue, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); // but then use the old value jsvUnLock(a); a = oldValue; } } return a; } NO_INLINE JsVar *jspePostfixExpression() { JsVar *a; // TODO: should be in jspeUnaryExpression if (lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS) { int op = lex->tk; JSP_ASSERT_MATCH(op); a = jspePostfixExpression(); if (JSP_SHOULD_EXECUTE) { JsVar *one = jsvNewFromInteger(1); JsVar *res = jsvMathsOpSkipNames(a, one, op==LEX_PLUSPLUS ? '+' : '-'); jsvUnLock(one); // in-place add/subtract jsvReplaceWith(a, res); jsvUnLock(res); } } else a = jspeFactorFunctionCall(); return __jspePostfixExpression(a); } NO_INLINE JsVar *jspeUnaryExpression() { if (lex->tk=='!' || lex->tk=='~' || lex->tk=='-' || lex->tk=='+') { short tk = lex->tk; JSP_ASSERT_MATCH(tk); if (!JSP_SHOULD_EXECUTE) { return jspeUnaryExpression(); } if (tk=='!') { // logical not return jsvNewFromBool(!jsvGetBoolAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='~') { // bitwise not return jsvNewFromInteger(~jsvGetIntegerAndUnLock(jsvSkipNameAndUnLock(jspeUnaryExpression()))); } else if (tk=='-') { // unary minus return jsvNegateAndUnLock(jspeUnaryExpression()); // names already skipped } else if (tk=='+') { // unary plus (convert to number) JsVar *v = jsvSkipNameAndUnLock(jspeUnaryExpression()); JsVar *r = jsvAsNumber(v); // names already skipped jsvUnLock(v); return r; } assert(0); return 0; } else return jspePostfixExpression(); } // Get the precedence of a BinaryExpression - or return 0 if not one unsigned int jspeGetBinaryExpressionPrecedence(int op) { switch (op) { case LEX_OROR: return 1; break; case LEX_ANDAND: return 2; break; case '|' : return 3; break; case '^' : return 4; break; case '&' : return 5; break; case LEX_EQUAL: case LEX_NEQUAL: case LEX_TYPEEQUAL: case LEX_NTYPEEQUAL: return 6; case LEX_LEQUAL: case LEX_GEQUAL: case '<': case '>': case LEX_R_INSTANCEOF: return 7; case LEX_R_IN: return (execInfo.execute&EXEC_FOR_INIT)?0:7; case LEX_LSHIFT: case LEX_RSHIFT: case LEX_RSHIFTUNSIGNED: return 8; case '+': case '-': return 9; case '*': case '/': case '%': return 10; default: return 0; } } NO_INLINE JsVar *__jspeBinaryExpression(JsVar *a, unsigned int lastPrecedence) { /* This one's a bit strange. Basically all the ops have their own precedence, it's not * like & and | share the same precedence. We don't want to recurse for each one, * so instead we do this. * * We deal with an expression in recursion ONLY if it's of higher precedence * than the current one, otherwise we stick in the while loop. */ unsigned int precedence = jspeGetBinaryExpressionPrecedence(lex->tk); while (precedence && precedence>lastPrecedence) { int op = lex->tk; JSP_ASSERT_MATCH(op); // if we have short-circuit ops, then if we know the outcome // we don't bother to execute the other op. Even if not // we need to tell mathsOp it's an & or | if (op==LEX_ANDAND || op==LEX_OROR) { bool aValue = jsvGetBoolAndUnLock(jsvSkipName(a)); if ((!aValue && op==LEX_ANDAND) || (aValue && op==LEX_OROR)) { // use first argument (A) JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(__jspeBinaryExpression(jspeUnaryExpression(),precedence)); JSP_RESTORE_EXECUTE(); } else { // use second argument (B) jsvUnLock(a); a = __jspeBinaryExpression(jspeUnaryExpression(),precedence); } } else { // else it's a more 'normal' logical expression - just use Maths JsVar *b = __jspeBinaryExpression(jspeUnaryExpression(),precedence); if (JSP_SHOULD_EXECUTE) { if (op==LEX_R_IN) { JsVar *av = jsvSkipName(a); // needle JsVar *bv = jsvSkipName(b); // haystack if (jsvHasChildren(bv)) { // search keys, NOT values av = jsvAsArrayIndexAndUnLock(av); JsVar *varFound = jspGetVarNamedField( bv, av, true); jsvUnLock2(a,varFound); a = jsvNewFromBool(varFound!=0); } else { // else maybe it's a fake object... const JswSymList *syms = jswGetSymbolListForObjectProto(bv); if (syms) { JsVar *varFound = 0; char nameBuf[JSLEX_MAX_TOKEN_LENGTH]; if (jsvGetString(av, nameBuf, sizeof(nameBuf)) < sizeof(nameBuf)) varFound = jswBinarySearch(syms, bv, nameBuf); bool found = varFound!=0; jsvUnLock2(a, varFound); if (!found && jsvIsArrayBuffer(bv)) { JsVarFloat f = jsvGetFloat(av); // if not a number this will be NaN, f==floor(f) fails if (f==floor(f) && f>=0 && f<jsvGetArrayBufferLength(bv)) found = true; } a = jsvNewFromBool(found); } else { // not built-in, just assume we can't do it jsExceptionHere(JSET_ERROR, "Cannot use 'in' operator to search a %t", bv); jsvUnLock(a); a = 0; } } jsvUnLock2(av, bv); } else if (op==LEX_R_INSTANCEOF) { bool inst = false; JsVar *av = jsvSkipName(a); JsVar *bv = jsvSkipName(b); if (!jsvIsFunction(bv)) { jsExceptionHere(JSET_ERROR, "Expecting a function on RHS in instanceof check, got %t", bv); } else { if (jsvIsObject(av) || jsvIsFunction(av)) { JsVar *bproto = jspGetNamedField(bv, JSPARSE_PROTOTYPE_VAR, false); JsVar *proto = jsvObjectGetChild(av, JSPARSE_INHERITS_VAR, 0); while (proto) { if (proto == bproto) inst=true; // search prototype chain JsVar *childProto = jsvObjectGetChild(proto, JSPARSE_INHERITS_VAR, 0); jsvUnLock(proto); proto = childProto; } if (jspIsConstructor(bv, "Object")) inst = true; jsvUnLock(bproto); } if (!inst) { const char *name = jswGetBasicObjectName(av); if (name) { inst = jspIsConstructor(bv, name); } // Hack for built-ins that should also be instances of Object if (!inst && (jsvIsArray(av) || jsvIsArrayBuffer(av)) && jspIsConstructor(bv, "Object")) inst = true; } } jsvUnLock3(av, bv, a); a = jsvNewFromBool(inst); } else { // --------------------------------------------- NORMAL JsVar *res = jsvMathsOpSkipNames(a, b, op); jsvUnLock(a); a = res; } } jsvUnLock(b); } precedence = jspeGetBinaryExpressionPrecedence(lex->tk); } return a; } JsVar *jspeBinaryExpression() { return __jspeBinaryExpression(jspeUnaryExpression(),0); } NO_INLINE JsVar *__jspeConditionalExpression(JsVar *lhs) { if (lex->tk=='?') { JSP_ASSERT_MATCH('?'); if (!JSP_SHOULD_EXECUTE) { // just let lhs pass through jsvUnLock(jspeAssignmentExpression()); JSP_MATCH(':'); jsvUnLock(jspeAssignmentExpression()); } else { bool first = jsvGetBoolAndUnLock(jsvSkipName(lhs)); jsvUnLock(lhs); if (first) { lhs = jspeAssignmentExpression(); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); } else { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeAssignmentExpression()); JSP_RESTORE_EXECUTE(); JSP_MATCH(':'); lhs = jspeAssignmentExpression(); } } } return lhs; } JsVar *jspeConditionalExpression() { return __jspeConditionalExpression(jspeBinaryExpression()); } NO_INLINE JsVar *__jspeAssignmentExpression(JsVar *lhs) { if (lex->tk=='=' || lex->tk==LEX_PLUSEQUAL || lex->tk==LEX_MINUSEQUAL || lex->tk==LEX_MULEQUAL || lex->tk==LEX_DIVEQUAL || lex->tk==LEX_MODEQUAL || lex->tk==LEX_ANDEQUAL || lex->tk==LEX_OREQUAL || lex->tk==LEX_XOREQUAL || lex->tk==LEX_RSHIFTEQUAL || lex->tk==LEX_LSHIFTEQUAL || lex->tk==LEX_RSHIFTUNSIGNEDEQUAL) { JsVar *rhs; int op = lex->tk; JSP_ASSERT_MATCH(op); rhs = jspeAssignmentExpression(); rhs = jsvSkipNameAndUnLock(rhs); // ensure we get rid of any references on the RHS if (JSP_SHOULD_EXECUTE && lhs) { if (op=='=') { jsvReplaceWithOrAddToRoot(lhs, rhs); } else { if (op==LEX_PLUSEQUAL) op='+'; else if (op==LEX_MINUSEQUAL) op='-'; else if (op==LEX_MULEQUAL) op='*'; else if (op==LEX_DIVEQUAL) op='/'; else if (op==LEX_MODEQUAL) op='%'; else if (op==LEX_ANDEQUAL) op='&'; else if (op==LEX_OREQUAL) op='|'; else if (op==LEX_XOREQUAL) op='^'; else if (op==LEX_RSHIFTEQUAL) op=LEX_RSHIFT; else if (op==LEX_LSHIFTEQUAL) op=LEX_LSHIFT; else if (op==LEX_RSHIFTUNSIGNEDEQUAL) op=LEX_RSHIFTUNSIGNED; if (op=='+' && jsvIsName(lhs)) { JsVar *currentValue = jsvSkipName(lhs); if (jsvIsBasicString(currentValue) && jsvGetRefs(currentValue)==1 && rhs!=currentValue) { /* A special case for string += where this is the only use of the string * and we're not appending to ourselves. In this case we can do a * simple append (rather than clone + append)*/ JsVar *str = jsvAsString(rhs); jsvAppendStringVarComplete(currentValue, str); jsvUnLock(str); op = 0; } jsvUnLock(currentValue); } if (op) { /* Fallback which does a proper add */ JsVar *res = jsvMathsOpSkipNames(lhs,rhs,op); jsvReplaceWith(lhs, res); jsvUnLock(res); } } } jsvUnLock(rhs); } return lhs; } JsVar *jspeAssignmentExpression() { return __jspeAssignmentExpression(jspeConditionalExpression()); } // ',' is allowed to add multiple expressions, this is not allowed in jspeAssignmentExpression NO_INLINE JsVar *jspeExpression() { while (!JSP_SHOULDNT_PARSE) { JsVar *a = jspeAssignmentExpression(); if (lex->tk!=',') return a; // if we get a comma, we just forget this data and parse the next bit... jsvCheckReferenceError(a); jsvUnLock(a); JSP_ASSERT_MATCH(','); } return 0; } /** Parse a block `{ ... }` */ NO_INLINE void jspeSkipBlock() { // fast skip of blocks int brackets = 1; while (lex->tk && brackets) { if (lex->tk == '{') brackets++; else if (lex->tk == '}') { brackets--; if (!brackets) return; } JSP_ASSERT_MATCH(lex->tk); } } /** Parse a block `{ ... }` but assume brackets are already parsed */ NO_INLINE void jspeBlockNoBrackets() { if (JSP_SHOULD_EXECUTE) { while (lex->tk && lex->tk!='}') { JsVar *a = jspeStatement(); jsvCheckReferenceError(a); jsvUnLock(a); if (JSP_HAS_ERROR) { if (lex && !(execInfo.execute&EXEC_ERROR_LINE_REPORTED)) { execInfo.execute = (JsExecFlags)(execInfo.execute | EXEC_ERROR_LINE_REPORTED); JsVar *stackTrace = jsvObjectGetChild(execInfo.hiddenRoot, JSPARSE_STACKTRACE_VAR, JSV_STRING_0); if (stackTrace) { jsvAppendPrintf(stackTrace, "at "); jspAppendStackTrace(stackTrace); jsvUnLock(stackTrace); } } } if (JSP_SHOULDNT_PARSE) return; if (!JSP_SHOULD_EXECUTE) { jspeSkipBlock(); return; } } } else { jspeSkipBlock(); } return; } /** Parse a block `{ ... }` */ NO_INLINE void jspeBlock() { JSP_MATCH_WITH_RETURN('{',); jspeBlockNoBrackets(); if (!JSP_SHOULDNT_PARSE) JSP_MATCH_WITH_RETURN('}',); return; } NO_INLINE JsVar *jspeBlockOrStatement() { if (lex->tk=='{') { jspeBlock(); return 0; } else { JsVar *v = jspeStatement(); if (lex->tk==';') JSP_ASSERT_MATCH(';'); return v; } } /** Parse using current lexer until we hit the end of * input or there was some problem. */ NO_INLINE JsVar *jspParse() { JsVar *v = 0; while (!JSP_SHOULDNT_PARSE && lex->tk != LEX_EOF) { jsvUnLock(v); v = jspeBlockOrStatement(); jsvCheckReferenceError(v); } return v; } NO_INLINE JsVar *jspeStatementVar() { JsVar *lastDefined = 0; /* variable creation. TODO - we need a better way of parsing the left * hand side. Maybe just have a flag called can_create_var that we * set and then we parse as if we're doing a normal equals.*/ assert(lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST); jslGetNextToken(); ///TODO: Correctly implement CONST and LET - we just treat them like 'var' at the moment bool hasComma = true; // for first time in loop while (hasComma && lex->tk == LEX_ID && !jspIsInterrupted()) { JsVar *a = 0; if (JSP_SHOULD_EXECUTE) { a = jspeiFindOnTop(jslGetTokenValueAsString(), true); if (!a) { // out of memory jspSetError(false); return lastDefined; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(a), lastDefined); // sort out initialiser if (lex->tk == '=') { JsVar *var; JSP_MATCH_WITH_CLEANUP_AND_RETURN('=', jsvUnLock(a), lastDefined); var = jsvSkipNameAndUnLock(jspeAssignmentExpression()); if (JSP_SHOULD_EXECUTE) jsvReplaceWith(a, var); jsvUnLock(var); } jsvUnLock(lastDefined); lastDefined = a; hasComma = lex->tk == ','; if (hasComma) JSP_MATCH_WITH_RETURN(',', lastDefined); } return lastDefined; } NO_INLINE JsVar *jspeStatementIf() { bool cond; JsVar *var, *result = 0; JSP_ASSERT_MATCH(LEX_R_IF); JSP_MATCH('('); var = jspeExpression(); if (JSP_SHOULDNT_PARSE) return var; JSP_MATCH(')'); cond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(var)); jsvUnLock(var); JSP_SAVE_EXECUTE(); if (!cond) jspSetNoExecute(); JsExecFlags hasError = 0; JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (!cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } if (lex->tk==LEX_R_ELSE) { JSP_ASSERT_MATCH(LEX_R_ELSE); JSP_SAVE_EXECUTE(); if (cond) jspSetNoExecute(); JsVar *a = jspeBlockOrStatement(); hasError |= execInfo.execute&EXEC_ERROR_MASK; if (cond) { jsvUnLock(a); JSP_RESTORE_EXECUTE(); execInfo.execute |= hasError; } else { result = a; } } return result; } NO_INLINE JsVar *jspeStatementSwitch() { JSP_ASSERT_MATCH(LEX_R_SWITCH); JSP_MATCH('('); JsVar *switchOn = jspeExpression(); JSP_SAVE_EXECUTE(); bool execute = JSP_SHOULD_EXECUTE; JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock(switchOn), 0); // shortcut if not executing... if (!execute) { jsvUnLock(switchOn); jspeBlock(); return 0; } JSP_MATCH_WITH_CLEANUP_AND_RETURN('{', jsvUnLock(switchOn), 0); bool executeDefault = true; if (execute) execInfo.execute=EXEC_NO|EXEC_IN_SWITCH; while (lex->tk==LEX_R_CASE) { JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_CASE, jsvUnLock(switchOn), 0); JsExecFlags oldFlags = execInfo.execute; if (execute) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; JsVar *test = jspeAssignmentExpression(); execInfo.execute = oldFlags|EXEC_IN_SWITCH;; JSP_MATCH_WITH_CLEANUP_AND_RETURN(':', jsvUnLock2(switchOn, test), 0); bool cond = false; if (execute) cond = jsvGetBoolAndUnLock(jsvMathsOpSkipNames(switchOn, test, LEX_TYPEEQUAL)); if (cond) executeDefault = false; jsvUnLock(test); if (cond && (execInfo.execute&EXEC_RUN_MASK)==EXEC_NO) execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!=LEX_R_CASE && lex->tk!=LEX_R_DEFAULT && lex->tk!='}') jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns } jsvUnLock(switchOn); if (execute && (execInfo.execute&EXEC_RUN_MASK)==EXEC_BREAK) { execInfo.execute=EXEC_YES|EXEC_IN_SWITCH; } else { executeDefault = true; } JSP_RESTORE_EXECUTE(); if (lex->tk==LEX_R_DEFAULT) { JSP_ASSERT_MATCH(LEX_R_DEFAULT); JSP_MATCH(':'); JSP_SAVE_EXECUTE(); if (!executeDefault) jspSetNoExecute(); else execInfo.execute |= EXEC_IN_SWITCH; while (!JSP_SHOULDNT_PARSE && lex->tk!=LEX_EOF && lex->tk!='}' && lex->tk!=LEX_R_CASE) jsvUnLock(jspeBlockOrStatement()); oldExecute |= execInfo.execute & (EXEC_ERROR_MASK|EXEC_RETURN); // copy across any errors/exceptions/returns execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_BREAK; JSP_RESTORE_EXECUTE(); } if (lex->tk==LEX_R_CASE) { jsExceptionHere(JSET_SYNTAXERROR, "Espruino doesn't support CASE after DEFAULT"); return 0; } JSP_MATCH('}'); return 0; } // Check whether we received a break/continue while parsing previously. Return true if we had a 'break; static NO_INLINE bool jspeCheckBreakContinue() { if (execInfo.execute & EXEC_CONTINUE) execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; else if (execInfo.execute & EXEC_BREAK) { execInfo.execute = (execInfo.execute & ~EXEC_RUN_MASK) | EXEC_YES; return true; } return false; } NO_INLINE JsVar *jspeStatementDoOrWhile(bool isWhile) { JsVar *cond; bool loopCond = true; // true for do...while loops bool hasHadBreak = false; JslCharPos whileCondStart; // We do repetition by pulling out the string representing our statement // there's definitely some opportunity for optimisation here bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; JslCharPos whileBodyStart; if (isWhile) { // while loop JSP_ASSERT_MATCH(LEX_R_WHILE); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } else { jslCharPosFromLex(&whileBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_DO, jslCharPosFree(&whileBodyStart);,0); } JSP_SAVE_EXECUTE(); // actually try and execute first bit of while loop (we'll do the rest in the actual loop later) if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); if (!loopCond) JSP_RESTORE_EXECUTE(); if (!isWhile) { // do..while loop JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_R_WHILE,jslCharPosFree(&whileBodyStart);,0); jslCharPosFromLex(&whileCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN('(',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&whileBodyStart);jslCharPosFree(&whileCondStart);,0); } JslCharPos whileBodyEnd; jslCharPosNew(&whileBodyEnd, lex->sourceVar, lex->tokenStart); int loopCount = 0; while (!hasHadBreak && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount<JSPARSE_MAX_LOOP_ITERATIONS #endif ) { if (isWhile || loopCount) { // don't check the start condition a second time if we're in a do..while loop jslSeekToP(&whileCondStart); cond = jspeExpression(); loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (loopCond) { jslSeekToP(&whileBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } loopCount++; } jslSeekToP(&whileBodyEnd); jslCharPosFree(&whileCondStart); jslCharPosFree(&whileBodyStart); jslCharPosFree(&whileBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount > JSPARSE_MAX_LOOP_ITERATIONS) { jsExceptionHere(JSET_ERROR, "WHILE Loop exceeded the maximum number of iterations (" STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS) ")"); } #endif return 0; } NO_INLINE JsVar *jspGetBuiltinPrototype(JsVar *obj) { if (jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Array"); if (v) return v; } if (jsvIsObject(obj) || jsvIsArray(obj)) { JsVar *v = jspFindPrototypeFor("Object"); if (v==obj) { // don't return ourselves jsvUnLock(v); v = 0; } return v; } return 0; } NO_INLINE JsVar *jspeStatementFor() { JSP_ASSERT_MATCH(LEX_R_FOR); JSP_MATCH('('); bool wasInLoop = (execInfo.execute&EXEC_IN_LOOP)!=0; execInfo.execute |= EXEC_FOR_INIT; // initialisation JsVar *forStatement = 0; // we could have 'for (;;)' - so don't munch up our semicolon if that's all we have if (lex->tk != ';') forStatement = jspeStatement(); if (jspIsInterrupted()) { jsvUnLock(forStatement); return 0; } execInfo.execute &= (JsExecFlags)~EXEC_FOR_INIT; #ifndef SAVE_ON_FLASH_EXTREME if (lex->tk == LEX_R_IN || lex->tk == LEX_R_OF) { bool isForOf = lex->tk == LEX_R_OF; // for (i in array) or for (i of array) // where i = forStatement if (JSP_SHOULD_EXECUTE && !jsvIsName(forStatement)) { jsvUnLock(forStatement); jsExceptionHere(JSET_ERROR, "for(a %s b) - 'a' must be a variable name, not %t", isForOf?"of":"in", forStatement); return 0; } JSP_ASSERT_MATCH(lex->tk); // skip over in/of JsVar *array = jsvSkipNameAndUnLock(jspeExpression()); JslCharPos forBodyStart; jslCharPosFromLex(&forBodyStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')', jsvUnLock2(forStatement, array);jslCharPosFree(&forBodyStart), 0); // Simply scan over the loop the first time without executing to figure out where it ends // OPT: we could skip the first parse and actually execute the first time JSP_SAVE_EXECUTE(); jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; JSP_RESTORE_EXECUTE(); // Now start executing properly if (JSP_SHOULD_EXECUTE) { if (jsvIsIterable(array)) { JsvIsInternalChecker checkerFunction = jsvGetInternalFunctionCheckerFor(array); JsVar *foundPrototype = 0; if (!isForOf) // for..in foundPrototype = jspGetBuiltinPrototype(array); JsvIterator it; jsvIteratorNew(&it, array, isForOf ? /* for of */ JSIF_EVERY_ARRAY_ELEMENT : /* for in */ JSIF_DEFINED_ARRAY_ElEMENTS); bool hasHadBreak = false; while (JSP_SHOULD_EXECUTE && jsvIteratorHasElement(&it) && !hasHadBreak) { JsVar *loopIndexVar = jsvIteratorGetKey(&it); bool ignore = false; if (checkerFunction && checkerFunction(loopIndexVar)) { ignore = true; if (jsvIsString(loopIndexVar) && jsvIsStringEqual(loopIndexVar, JSPARSE_INHERITS_VAR)) foundPrototype = jsvSkipName(loopIndexVar); } if (!ignore) { JsVar *iteratorValue; if (isForOf) { // for (... of ...) iteratorValue = jsvIteratorGetValue(&it); } else { // for (... in ...) iteratorValue = jsvIsName(loopIndexVar) ? jsvCopyNameOnly(loopIndexVar, false/*no copy children*/, false/*not a name*/) : loopIndexVar; assert(jsvGetRefs(iteratorValue)==0); } if (isForOf || iteratorValue) { // could be out of memory assert(!jsvIsName(iteratorValue)); jsvReplaceWithOrAddToRoot(forStatement, iteratorValue); if (iteratorValue!=loopIndexVar) jsvUnLock(iteratorValue); jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } } jsvIteratorNext(&it); jsvUnLock(loopIndexVar); // if using for..in we'll skip down the prototype chain when we reach the end of the current one if (!jsvIteratorHasElement(&it) && !isForOf && foundPrototype) { jsvIteratorFree(&it); JsVar *iterable = foundPrototype; jsvIteratorNew(&it, iterable, JSIF_DEFINED_ARRAY_ElEMENTS); checkerFunction = jsvGetInternalFunctionCheckerFor(iterable); foundPrototype = jspGetBuiltinPrototype(iterable); jsvUnLock(iterable); } } assert(!foundPrototype); jsvIteratorFree(&it); } else if (!jsvIsUndefined(array)) { jsExceptionHere(JSET_ERROR, "FOR loop can only iterate over Arrays, Strings or Objects, not %t", array); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); jsvUnLock2(forStatement, array); #else // SAVE_ON_FLASH_EXTREME if (false) { #endif // SAVE_ON_FLASH_EXTREME } else { // ----------------------------------------------- NORMAL FOR LOOP #ifdef JSPARSE_MAX_LOOP_ITERATIONS int loopCount = JSPARSE_MAX_LOOP_ITERATIONS; #endif bool loopCond = true; bool hasHadBreak = false; jsvUnLock(forStatement); JslCharPos forCondStart; jslCharPosFromLex(&forCondStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);,0); if (lex->tk != ';') { JsVar *cond = jspeExpression(); // condition loopCond = JSP_SHOULD_EXECUTE && jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } JslCharPos forIterStart; jslCharPosFromLex(&forIterStart); JSP_MATCH_WITH_CLEANUP_AND_RETURN(';',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);,0); if (lex->tk != ')') { // we could have 'for (;;)' JSP_SAVE_EXECUTE(); jspSetNoExecute(); jsvUnLock(jspeExpression()); // iterator JSP_RESTORE_EXECUTE(); } JslCharPos forBodyStart; jslSkipWhiteSpace(); jslCharPosFromLex(&forBodyStart); // actual for body JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jslCharPosFree(&forCondStart);jslCharPosFree(&forIterStart);jslCharPosFree(&forBodyStart);,0); JSP_SAVE_EXECUTE(); if (!loopCond) jspSetNoExecute(); execInfo.execute |= EXEC_IN_LOOP; jsvUnLock(jspeBlockOrStatement()); JslCharPos forBodyEnd; jslSkipWhiteSpace(); jslCharPosNew(&forBodyEnd, lex->sourceVar, lex->tokenStart); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; if (loopCond || !JSP_SHOULD_EXECUTE) { hasHadBreak |= jspeCheckBreakContinue(); } if (!loopCond) JSP_RESTORE_EXECUTE(); if (loopCond) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } while (!hasHadBreak && JSP_SHOULD_EXECUTE && loopCond #ifdef JSPARSE_MAX_LOOP_ITERATIONS && loopCount-->0 #endif ) { jslSeekToP(&forCondStart); ; if (lex->tk == ';') { loopCond = true; } else { JsVar *cond = jspeExpression(); loopCond = jsvGetBoolAndUnLock(jsvSkipName(cond)); jsvUnLock(cond); } if (JSP_SHOULD_EXECUTE && loopCond) { jslSeekToP(&forBodyStart); execInfo.execute |= EXEC_IN_LOOP; jspDebuggerLoopIfCtrlC(); jsvUnLock(jspeBlockOrStatement()); if (!wasInLoop) execInfo.execute &= (JsExecFlags)~EXEC_IN_LOOP; hasHadBreak |= jspeCheckBreakContinue(); } if (JSP_SHOULD_EXECUTE && loopCond && !hasHadBreak) { jslSeekToP(&forIterStart); if (lex->tk != ')') jsvUnLock(jspeExpression()); } } jslSeekToP(&forBodyEnd); jslCharPosFree(&forCondStart); jslCharPosFree(&forIterStart); jslCharPosFree(&forBodyStart); jslCharPosFree(&forBodyEnd); #ifdef JSPARSE_MAX_LOOP_ITERATIONS if (loopCount<=0) { jsExceptionHere(JSET_ERROR, "FOR Loop exceeded the maximum number of iterations ("STRINGIFY(JSPARSE_MAX_LOOP_ITERATIONS)")"); } #endif } return 0; } NO_INLINE JsVar *jspeStatementTry() { // execute the try block JSP_ASSERT_MATCH(LEX_R_TRY); bool shouldExecuteBefore = JSP_SHOULD_EXECUTE; jspeBlock(); bool hadException = shouldExecuteBefore && ((execInfo.execute & EXEC_EXCEPTION)!=0); bool hadCatch = false; if (lex->tk == LEX_R_CATCH) { JSP_ASSERT_MATCH(LEX_R_CATCH); hadCatch = true; JSP_MATCH('('); JsVar *scope = 0; JsVar *exceptionVar = 0; if (hadException) { scope = jsvNewObject(); if (scope) exceptionVar = jsvFindChildFromString(scope, jslGetTokenValueAsString(), true); } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID,jsvUnLock2(scope,exceptionVar),0); JSP_MATCH_WITH_CLEANUP_AND_RETURN(')',jsvUnLock2(scope,exceptionVar),0); if (exceptionVar) { // set the exception var up properly JsVar *exception = jspGetException(); if (exception) { jsvSetValueOfName(exceptionVar, exception); jsvUnLock(exception); } // Now clear the exception flag (it's handled - we hope!) execInfo.execute = execInfo.execute & (JsExecFlags)~(EXEC_EXCEPTION|EXEC_ERROR_LINE_REPORTED); jsvUnLock(exceptionVar); } if (shouldExecuteBefore && !hadException) { JSP_SAVE_EXECUTE(); jspSetNoExecute(); jspeBlock(); JSP_RESTORE_EXECUTE(); } else { if (!scope || jspeiAddScope(scope)) { jspeBlock(); if (scope) jspeiRemoveScope(); } } jsvUnLock(scope); } if (lex->tk == LEX_R_FINALLY || (!hadCatch && ((execInfo.execute&(EXEC_ERROR|EXEC_INTERRUPTED))==0))) { JSP_MATCH(LEX_R_FINALLY); // clear the exception flag - but only momentarily! if (hadException) execInfo.execute = execInfo.execute & (JsExecFlags)~EXEC_EXCEPTION; jspeBlock(); // put the flag back! if (hadException && !hadCatch) execInfo.execute = execInfo.execute | EXEC_EXCEPTION; } return 0; } NO_INLINE JsVar *jspeStatementReturn() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_RETURN); if (lex->tk != ';' && lex->tk != '}') { // we only want the value, so skip the name if there was one result = jsvSkipNameAndUnLock(jspeExpression()); } if (JSP_SHOULD_EXECUTE) { JsVar *resultVar = jspeiFindInScopes(JSPARSE_RETURN_VAR); if (resultVar) { jsvReplaceWith(resultVar, result); jsvUnLock(resultVar); execInfo.execute |= EXEC_RETURN; // Stop anything else in this function executing } else { jsExceptionHere(JSET_SYNTAXERROR, "RETURN statement, but not in a function.\n"); } } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementThrow() { JsVar *result = 0; JSP_ASSERT_MATCH(LEX_R_THROW); result = jsvSkipNameAndUnLock(jspeExpression()); if (JSP_SHOULD_EXECUTE) { jspSetException(result); // Stop anything else in this function executing } jsvUnLock(result); return 0; } NO_INLINE JsVar *jspeStatementFunctionDecl(bool isClass) { JsVar *funcName = 0; JsVar *funcVar; #ifndef SAVE_ON_FLASH JSP_ASSERT_MATCH(isClass ? LEX_R_CLASS : LEX_R_FUNCTION); #else JSP_ASSERT_MATCH(LEX_R_FUNCTION); #endif bool actuallyCreateFunction = JSP_SHOULD_EXECUTE; if (actuallyCreateFunction) { funcName = jsvMakeIntoVariableName(jslGetTokenValueAsVar(), 0); if (!funcName) { // out of memory return 0; } } JSP_MATCH_WITH_CLEANUP_AND_RETURN(LEX_ID, jsvUnLock(funcName), 0); #ifndef SAVE_ON_FLASH funcVar = isClass ? jspeClassDefinition(false) : jspeFunctionDefinition(false); #else funcVar = jspeFunctionDefinition(false); #endif if (actuallyCreateFunction) { // find a function with the same name (or make one) // OPT: can Find* use just a JsVar that is a 'name'? JsVar *existingName = jspeiFindNameOnTop(funcName, true); JsVar *existingFunc = jsvSkipName(existingName); if (jsvIsFunction(existingFunc)) { // 'proper' replace, that keeps the original function var and swaps the children funcVar = jsvSkipNameAndUnLock(funcVar); jswrap_function_replaceWith(existingFunc, funcVar); } else { jsvReplaceWith(existingName, funcVar); } jsvUnLock(funcName); funcName = existingName; jsvUnLock(existingFunc); // existingName is used - don't UnLock } jsvUnLock(funcVar); return funcName; } NO_INLINE JsVar *jspeStatement() { #ifdef USE_DEBUGGER if (execInfo.execute&EXEC_DEBUGGER_NEXT_LINE && lex->tk!=';' && JSP_SHOULD_EXECUTE) { lex->tokenLastStart = lex->tokenStart; jsiDebuggerLoop(); } #endif if (lex->tk==LEX_ID || lex->tk==LEX_INT || lex->tk==LEX_FLOAT || lex->tk==LEX_STR || lex->tk==LEX_TEMPLATE_LITERAL || lex->tk==LEX_REGEX || lex->tk==LEX_R_NEW || lex->tk==LEX_R_NULL || lex->tk==LEX_R_UNDEFINED || lex->tk==LEX_R_TRUE || lex->tk==LEX_R_FALSE || lex->tk==LEX_R_THIS || lex->tk==LEX_R_DELETE || lex->tk==LEX_R_TYPEOF || lex->tk==LEX_R_VOID || lex->tk==LEX_R_SUPER || lex->tk==LEX_PLUSPLUS || lex->tk==LEX_MINUSMINUS || lex->tk=='!' || lex->tk=='-' || lex->tk=='+' || lex->tk=='~' || lex->tk=='[' || lex->tk=='(') { /* Execute a simple statement that only contains basic arithmetic... */ return jspeExpression(); } else if (lex->tk=='{') { /* A block of code */ if (!jspCheckStackPosition()) return 0; jspeBlock(); return 0; } else if (lex->tk==';') { /* Empty statement - to allow things like ;;; */ JSP_ASSERT_MATCH(';'); return 0; } else if (lex->tk==LEX_R_VAR || lex->tk==LEX_R_LET || lex->tk==LEX_R_CONST) { return jspeStatementVar(); } else if (lex->tk==LEX_R_IF) { return jspeStatementIf(); } else if (lex->tk==LEX_R_DO) { return jspeStatementDoOrWhile(false); } else if (lex->tk==LEX_R_WHILE) { return jspeStatementDoOrWhile(true); } else if (lex->tk==LEX_R_FOR) { return jspeStatementFor(); } else if (lex->tk==LEX_R_TRY) { return jspeStatementTry(); } else if (lex->tk==LEX_R_RETURN) { return jspeStatementReturn(); } else if (lex->tk==LEX_R_THROW) { return jspeStatementThrow(); } else if (lex->tk==LEX_R_FUNCTION) { return jspeStatementFunctionDecl(false/* function */); #ifndef SAVE_ON_FLASH } else if (lex->tk==LEX_R_CLASS) { return jspeStatementFunctionDecl(true/* class */); #endif } else if (lex->tk==LEX_R_CONTINUE) { JSP_ASSERT_MATCH(LEX_R_CONTINUE); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & EXEC_IN_LOOP)) jsExceptionHere(JSET_SYNTAXERROR, "CONTINUE statement outside of FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_CONTINUE; } } else if (lex->tk==LEX_R_BREAK) { JSP_ASSERT_MATCH(LEX_R_BREAK); if (JSP_SHOULD_EXECUTE) { if (!(execInfo.execute & (EXEC_IN_LOOP|EXEC_IN_SWITCH))) jsExceptionHere(JSET_SYNTAXERROR, "BREAK statement outside of SWITCH, FOR or WHILE loop"); else execInfo.execute = (execInfo.execute & (JsExecFlags)~EXEC_RUN_MASK) | EXEC_BREAK; } } else if (lex->tk==LEX_R_SWITCH) { return jspeStatementSwitch(); } else if (lex->tk==LEX_R_DEBUGGER) { JSP_ASSERT_MATCH(LEX_R_DEBUGGER); #ifdef USE_DEBUGGER if (JSP_SHOULD_EXECUTE) jsiDebuggerLoop(); #endif } else JSP_MATCH(LEX_EOF); return 0; } // ----------------------------------------------------------------------------- /// Create a new built-in object that jswrapper can use to check for built-in functions JsVar *jspNewBuiltin(const char *instanceOf) { JsVar *objFunc = jswFindBuiltInFunction(0, instanceOf); if (!objFunc) return 0; // out of memory return objFunc; } /// Create a new Class of the given instance and return its prototype (as a name 'prototype') NO_INLINE JsVar *jspNewPrototype(const char *instanceOf) { JsVar *objFuncName = jsvFindChildFromString(execInfo.root, instanceOf, true); if (!objFuncName) // out of memory return 0; JsVar *objFunc = jsvSkipName(objFuncName); if (!objFunc) { objFunc = jspNewBuiltin(instanceOf); if (!objFunc) { // out of memory jsvUnLock(objFuncName); return 0; } // set up name jsvSetValueOfName(objFuncName, objFunc); } JsVar *prototypeName = jsvFindChildFromString(objFunc, JSPARSE_PROTOTYPE_VAR, true); jspEnsureIsPrototype(objFunc, prototypeName); // make sure it's an object jsvUnLock2(objFunc, objFuncName); return prototypeName; } /** Create a new object of the given instance and add it to root with name 'name'. * If name!=0, added to root with name, and the name is returned * If name==0, not added to root and Object itself returned */ NO_INLINE JsVar *jspNewObject(const char *name, const char *instanceOf) { JsVar *prototypeName = jspNewPrototype(instanceOf); JsVar *obj = jsvNewObject(); if (!obj) { // out of memory jsvUnLock(prototypeName); return 0; } if (name) { // If it's a device, set the device number up as the Object data // See jsiGetDeviceFromClass IOEventFlags device = jshFromDeviceString(name); if (device!=EV_NONE) { obj->varData.str[0] = 'D'; obj->varData.str[1] = 'E'; obj->varData.str[2] = 'V'; obj->varData.str[3] = (char)device; } } // add __proto__ JsVar *prototypeVar = jsvSkipName(prototypeName); jsvUnLock3(jsvAddNamedChild(obj, prototypeVar, JSPARSE_INHERITS_VAR), prototypeVar, prototypeName);prototypeName=0; if (name) { JsVar *objName = jsvFindChildFromString(execInfo.root, name, true); if (objName) jsvSetValueOfName(objName, obj); jsvUnLock(obj); if (!objName) { // out of memory return 0; } return objName; } else return obj; } /** Returns true if the constructor function given is the same as that * of the object with the given name. */ bool jspIsConstructor(JsVar *constructor, const char *constructorName) { JsVar *objFunc = jsvObjectGetChild(execInfo.root, constructorName, 0); if (!objFunc) return false; bool isConstructor = objFunc == constructor; jsvUnLock(objFunc); return isConstructor; } /** Get the prototype of the given object, or return 0 if not found, or not an object */ JsVar *jspGetPrototype(JsVar *object) { if (!jsvIsObject(object)) return 0; JsVar *proto = jsvObjectGetChild(object, JSPARSE_INHERITS_VAR, 0); if (jsvIsObject(proto)) return proto; jsvUnLock(proto); return 0; } /** Get the constructor of the given object, or return 0 if not found, or not a function */ JsVar *jspGetConstructor(JsVar *object) { JsVar *proto = jspGetPrototype(object); if (proto) { JsVar *constr = jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0); if (jsvIsFunction(constr)) { jsvUnLock(proto); return constr; } jsvUnLock2(constr, proto); } return 0; } // ----------------------------------------------------------------------------- void jspSoftInit() { execInfo.root = jsvFindOrCreateRoot(); // Root now has a lock and a ref execInfo.hiddenRoot = jsvObjectGetChild(execInfo.root, JS_HIDDEN_CHAR_STR, JSV_OBJECT); execInfo.execute = EXEC_YES; } void jspSoftKill() { jsvUnLock(execInfo.scopesVar); execInfo.scopesVar = 0; jsvUnLock(execInfo.hiddenRoot); execInfo.hiddenRoot = 0; jsvUnLock(execInfo.root); execInfo.root = 0; // Root is now left with just a ref } void jspInit() { jspSoftInit(); } void jspKill() { jspSoftKill(); // Unreffing this should completely kill everything attached to root JsVar *r = jsvFindOrCreateRoot(); jsvUnRef(r); jsvUnLock(r); } /** Evaluate the given variable as an expression (in current scope) */ JsVar *jspEvaluateExpressionVar(JsVar *str) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = oldLex->lineNumberOffset; #endif // actually do the parsing JsVar *v = jspeExpression(); jslKill(); jslSetLex(oldLex); return jsvSkipNameAndUnLock(v); } /** Execute code form a variable and return the result. If lineNumberOffset * is nonzero it's added to the line numbers that get reported for errors/debug */ JsVar *jspEvaluateVar(JsVar *str, JsVar *scope, uint16_t lineNumberOffset) { JsLex lex; assert(jsvIsString(str)); JsLex *oldLex = jslSetLex(&lex); jslInit(str); #ifndef ESPR_NO_LINE_NUMBERS lex.lineNumberOffset = lineNumberOffset; #endif JsExecInfo oldExecInfo = execInfo; execInfo.execute = EXEC_YES; if (scope) { // if we're adding a scope, make sure it's the *only* scope execInfo.scopesVar = 0; if (scope!=execInfo.root) jspeiAddScope(scope); // it's searched by default anyway } // actually do the parsing JsVar *v = jspParse(); // clean up if (scope) jspeiClearScopes(); jslKill(); jslSetLex(oldLex); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute & EXEC_PERSIST; execInfo = oldExecInfo; // It may have returned a reference, but we just want the value... return jsvSkipNameAndUnLock(v); } JsVar *jspEvaluate(const char *str, bool stringIsStatic) { /* using a memory area is more efficient, but the interpreter * may use substrings from it for function code. This means that * if the string goes away, everything gets corrupted - hence * the option here. */ JsVar *evCode; if (stringIsStatic) evCode = jsvNewNativeString((char*)str, strlen(str)); else evCode = jsvNewFromString(str); if (!evCode) return 0; JsVar *v = 0; if (!jsvIsMemoryFull()) v = jspEvaluateVar(evCode, 0, 0); jsvUnLock(evCode); return v; } JsVar *jspExecuteJSFunction(const char *jsCode, JsVar *thisArg, int argCount, JsVar **argPtr) { JsVar *fn = jspEvaluate(jsCode,true); JsVar *result = jspExecuteFunction(fn,thisArg,argCount,argPtr); jsvUnLock(fn); return result; } JsVar *jspExecuteFunction(JsVar *func, JsVar *thisArg, int argCount, JsVar **argPtr) { JsExecInfo oldExecInfo = execInfo; execInfo.scopesVar = 0; execInfo.execute = EXEC_YES; execInfo.thisVar = 0; JsVar *result = jspeFunctionCall(func, 0, thisArg, false, argCount, argPtr); // clean up jspeiClearScopes(); // restore state and execInfo (keep error flags & ctrl-c) oldExecInfo.execute |= execInfo.execute&EXEC_PERSIST; jspeiClearScopes(); execInfo = oldExecInfo; return result; } /// Evaluate a JavaScript module and return its exports JsVar *jspEvaluateModule(JsVar *moduleContents) { assert(jsvIsString(moduleContents) || jsvIsFunction(moduleContents)); if (jsvIsFunction(moduleContents)) { moduleContents = jsvObjectGetChild(moduleContents,JSPARSE_FUNCTION_CODE_NAME,0); if (!jsvIsString(moduleContents)) { jsvUnLock(moduleContents); return 0; } } else jsvLockAgain(moduleContents); JsVar *scope = jsvNewObject(); JsVar *scopeExports = jsvNewObject(); if (!scope || !scopeExports) { // out of mem jsvUnLock3(scope, scopeExports, moduleContents); return 0; } JsVar *exportsName = jsvAddNamedChild(scope, scopeExports, "exports"); jsvUnLock2(scopeExports, jsvAddNamedChild(scope, scope, "module")); JsExecFlags oldExecute = execInfo.execute; JsVar *oldThisVar = execInfo.thisVar; execInfo.thisVar = scopeExports; // set 'this' variable to exports jsvUnLock(jspEvaluateVar(moduleContents, scope, 0)); execInfo.thisVar = oldThisVar; execInfo.execute = oldExecute; // make sure we fully restore state after parsing a module jsvUnLock2(moduleContents, scope); return jsvSkipNameAndUnLock(exportsName); } /** Get the owner of the current prototype. We assume that it's * the first item in the array, because that's what we will * have added when we created it. It's safe to call this on * non-prototypes and non-objects. */ JsVar *jspGetPrototypeOwner(JsVar *proto) { if (jsvIsObject(proto) || jsvIsArray(proto)) { return jsvSkipNameAndUnLock(jsvObjectGetChild(proto, JSPARSE_CONSTRUCTOR_VAR, 0)); } return 0; }
NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); if (jsvHasChildren(parent)) { // else remove properly. if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; }
NO_INLINE JsVar *jspeFactorDelete() { JSP_ASSERT_MATCH(LEX_R_DELETE); JsVar *parent = 0; JsVar *a = jspeFactorMember(jspeFactor(), &parent); JsVar *result = 0; if (JSP_SHOULD_EXECUTE) { bool ok = false; if (jsvIsName(a) && !jsvIsNewChild(a)) { // if no parent, check in root? if (!parent && jsvIsChild(execInfo.root, a)) parent = jsvLockAgain(execInfo.root); #ifdef DEBUG if (jsvHasChildren(parent)) assert(jsvIsChild(parent, a)); #endif if (jsvHasChildren(parent) && jsvIsChild(parent, a)) { // else remove properly. /* we use jsvIsChild here just in case. delete probably isn't called that often so it pays to be safe */ if (jsvIsArray(parent)) { // For arrays, we must make sure we don't change the length JsVarInt l = jsvGetArrayLength(parent); jsvRemoveChild(parent, a); jsvSetArrayLength(parent, l, false); } else { jsvRemoveChild(parent, a); } ok = true; } } result = jsvNewFromBool(ok); } jsvUnLock2(a, parent); return result; }
{'added': [(955, ' JsVar *p = jsvSkipNameAndUnLock(jspNewPrototype(objName));'), (956, " // jspNewPrototype returns a 'prototype' name that's already a child of eg. an array"), (957, " // Create a new 'name' called __proto__ that links to it"), (958, ' JsVar *i = jsvNewFromString(JSPARSE_INHERITS_VAR);'), (959, ' if (p) child = jsvCreateNewChild(object, i, p);'), (960, ' jsvUnLock(i);'), (1380, '#ifdef DEBUG'), (1381, ' if (jsvHasChildren(parent)) assert(jsvIsChild(parent, a));'), (1382, '#endif'), (1383, ' if (jsvHasChildren(parent) && jsvIsChild(parent, a)) {'), (1385, " /* we use jsvIsChild here just in case. delete probably isn't called"), (1386, ' that often so it pays to be safe */'), (2861, "/// Create a new Class of the given instance and return its prototype (as a name 'prototype')")], 'deleted': [(955, ' child = jspNewPrototype(objName);'), (1375, ' if (jsvHasChildren(parent)) {'), (2851, '/// Create a new Class of the given instance and return its prototype')]}
13
3
2,428
16,105
26
158
8
https://github.com/espruino/Espruino
CVE-2022-25044
CWE-787
3,055
comparisons.cc
C++
tflite::ops::builtin::comparisons::GreaterEqualEval
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/comparisons.h" #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace comparisons { namespace { constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); } TfLiteStatus ComparisonPrepare(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, false); } TfLiteStatus ComparisonPrepareStringAllowed(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, true); } template <typename input_dtype, reference_ops::ComparisonFn<int32> opname> void ComparisonQuantized(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; const int left_shift = 8; int32 input1_multiplier; int input1_shift; QuantizeMultiplierSmallerThanOneExp(input1->params.scale, &input1_multiplier, &input1_shift); int32 input2_multiplier; int input2_shift; QuantizeMultiplierSmallerThanOneExp(input2->params.scale, &input2_multiplier, &input2_shift); ComparisonParams op_params; op_params.left_shift = left_shift; op_params.input1_offset = input1_offset; op_params.input1_multiplier = input1_multiplier; op_params.input1_shift = input1_shift; op_params.input2_offset = input2_offset; op_params.input2_multiplier = input2_multiplier; op_params.input2_shift = input2_shift; if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } else { reference_ops::ComparisonWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } } } template <typename T, reference_ops::ComparisonFn<T> opname> void Comparison(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { ComparisonParams op_params; requires_broadcast ? reference_ops::BroadcastComparison4DSlowImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)) : reference_ops::ComparisonImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } void ComparisonString(bool (*opname)(const StringRef&, const StringRef&), const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { bool* output_data = GetTensorData<bool>(output); if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowStringImpl( opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } else { reference_ops::ComparisonStringImpl(opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } } TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } } // namespace } // namespace comparisons TfLiteRegistration* Register_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::EqualEval}; return &r; } TfLiteRegistration* Register_NOT_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::NotEqualEval}; return &r; } TfLiteRegistration* Register_GREATER() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEval}; return &r; } TfLiteRegistration* Register_GREATER_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEqualEval}; return &r; } TfLiteRegistration* Register_LESS() { static TfLiteRegistration r = { nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEval}; return &r; } TfLiteRegistration* Register_LESS_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEqualEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/lite/kernels/internal/reference/comparisons.h" #include <stdint.h> #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #include "tensorflow/lite/string_util.h" namespace tflite { namespace ops { namespace builtin { namespace comparisons { namespace { constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); } TfLiteStatus ComparisonPrepare(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, false); } TfLiteStatus ComparisonPrepareStringAllowed(TfLiteContext* context, TfLiteNode* node) { return ComparisonPrepareCommon(context, node, true); } template <typename input_dtype, reference_ops::ComparisonFn<int32> opname> void ComparisonQuantized(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; const int left_shift = 8; int32 input1_multiplier; int input1_shift; QuantizeMultiplierSmallerThanOneExp(input1->params.scale, &input1_multiplier, &input1_shift); int32 input2_multiplier; int input2_shift; QuantizeMultiplierSmallerThanOneExp(input2->params.scale, &input2_multiplier, &input2_shift); ComparisonParams op_params; op_params.left_shift = left_shift; op_params.input1_offset = input1_offset; op_params.input1_multiplier = input1_multiplier; op_params.input1_shift = input1_shift; op_params.input2_offset = input2_offset; op_params.input2_multiplier = input2_multiplier; op_params.input2_shift = input2_shift; if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } else { reference_ops::ComparisonWithScaling<input_dtype, opname>( op_params, GetTensorShape(input1), GetTensorData<input_dtype>(input1), GetTensorShape(input2), GetTensorData<input_dtype>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } } } template <typename T, reference_ops::ComparisonFn<T> opname> void Comparison(const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { ComparisonParams op_params; requires_broadcast ? reference_ops::BroadcastComparison4DSlowImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)) : reference_ops::ComparisonImpl<T, opname>( op_params, GetTensorShape(input1), GetTensorData<T>(input1), GetTensorShape(input2), GetTensorData<T>(input2), GetTensorShape(output), GetTensorData<bool>(output)); } void ComparisonString(bool (*opname)(const StringRef&, const StringRef&), const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, bool requires_broadcast) { bool* output_data = GetTensorData<bool>(output); if (requires_broadcast) { reference_ops::BroadcastComparison4DSlowStringImpl( opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } else { reference_ops::ComparisonStringImpl(opname, GetTensorShape(input1), input1, GetTensorShape(input2), input2, GetTensorShape(output), output_data); } } TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; } } // namespace } // namespace comparisons TfLiteRegistration* Register_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::EqualEval}; return &r; } TfLiteRegistration* Register_NOT_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepareStringAllowed, comparisons::NotEqualEval}; return &r; } TfLiteRegistration* Register_GREATER() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEval}; return &r; } TfLiteRegistration* Register_GREATER_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::GreaterEqualEval}; return &r; } TfLiteRegistration* Register_LESS() { static TfLiteRegistration r = { nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEval}; return &r; } TfLiteRegistration* Register_LESS_EQUAL() { static TfLiteRegistration r = {nullptr, nullptr, comparisons::ComparisonPrepare, comparisons::LessEqualEval}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor1, &input1)); const TfLiteTensor* input2; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor2, &input2)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, kOutputTensor, &output)); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
{'added': [(44, ' const TfLiteTensor* input1;'), (45, ' TF_LITE_ENSURE_OK(context,'), (46, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (47, ' const TfLiteTensor* input2;'), (48, ' TF_LITE_ENSURE_OK(context,'), (49, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (50, ' TfLiteTensor* output;'), (51, ' TF_LITE_ENSURE_OK(context,'), (52, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (154, ' const TfLiteTensor* input1;'), (155, ' TF_LITE_ENSURE_OK(context,'), (156, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (157, ' const TfLiteTensor* input2;'), (158, ' TF_LITE_ENSURE_OK(context,'), (159, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (160, ' TfLiteTensor* output;'), (161, ' TF_LITE_ENSURE_OK(context,'), (162, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (204, ' const TfLiteTensor* input1;'), (205, ' TF_LITE_ENSURE_OK(context,'), (206, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (207, ' const TfLiteTensor* input2;'), (208, ' TF_LITE_ENSURE_OK(context,'), (209, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (210, ' TfLiteTensor* output;'), (211, ' TF_LITE_ENSURE_OK(context,'), (212, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (254, ' const TfLiteTensor* input1;'), (255, ' TF_LITE_ENSURE_OK(context,'), (256, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (257, ' const TfLiteTensor* input2;'), (258, ' TF_LITE_ENSURE_OK(context,'), (259, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (260, ' TfLiteTensor* output;'), (261, ' TF_LITE_ENSURE_OK(context,'), (262, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (295, ' const TfLiteTensor* input1;'), (296, ' TF_LITE_ENSURE_OK(context,'), (297, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (298, ' const TfLiteTensor* input2;'), (299, ' TF_LITE_ENSURE_OK(context,'), (300, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (301, ' TfLiteTensor* output;'), (302, ' TF_LITE_ENSURE_OK(context,'), (303, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (336, ' const TfLiteTensor* input1;'), (337, ' TF_LITE_ENSURE_OK(context,'), (338, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (339, ' const TfLiteTensor* input2;'), (340, ' TF_LITE_ENSURE_OK(context,'), (341, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (342, ' TfLiteTensor* output;'), (343, ' TF_LITE_ENSURE_OK(context,'), (344, ' GetOutputSafe(context, node, kOutputTensor, &output));'), (377, ' const TfLiteTensor* input1;'), (378, ' TF_LITE_ENSURE_OK(context,'), (379, ' GetInputSafe(context, node, kInputTensor1, &input1));'), (380, ' const TfLiteTensor* input2;'), (381, ' TF_LITE_ENSURE_OK(context,'), (382, ' GetInputSafe(context, node, kInputTensor2, &input2));'), (383, ' TfLiteTensor* output;'), (384, ' TF_LITE_ENSURE_OK(context,'), (385, ' GetOutputSafe(context, node, kOutputTensor, &output));')], 'deleted': [(44, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (45, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (46, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (148, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (149, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (150, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (192, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (193, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (194, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (236, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (237, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (238, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (271, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (272, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (273, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (306, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (307, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (308, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);'), (341, ' const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1);'), (342, ' const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2);'), (343, ' TfLiteTensor* output = GetOutput(context, node, kOutputTensor);')]}
63
21
416
2,500
34
208
6
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
16
unpack50mt.cpp
C++
Unpack::ProcessDecoded
#define UNP_READ_SIZE_MT 0x400000 #define UNP_BLOCKS_PER_THREAD 2 struct UnpackThreadDataList { UnpackThreadData *D; uint BlockCount; }; THREAD_PROC(UnpackDecodeThread) { UnpackThreadDataList *DL=(UnpackThreadDataList *)Data; for (uint I=0;I<DL->BlockCount;I++) DL->D->UnpackPtr->UnpackDecode(DL->D[I]); } void Unpack::InitMT() { if (ReadBufMT==NULL) { // Even getbits32 can read up to 3 additional bytes after current // and our block header and table reading code can look much further. // Let's allocate the additional space here, so we do not need to check // bounds for every bit field access. const size_t Overflow=1024; ReadBufMT=new byte[UNP_READ_SIZE_MT+Overflow]; memset(ReadBufMT,0,UNP_READ_SIZE_MT+Overflow); } if (UnpThreadData==NULL) { uint MaxItems=MaxUserThreads*UNP_BLOCKS_PER_THREAD; UnpThreadData=new UnpackThreadData[MaxItems]; memset(UnpThreadData,0,sizeof(UnpackThreadData)*MaxItems); for (uint I=0;I<MaxItems;I++) { UnpackThreadData *CurData=UnpThreadData+I; if (CurData->Decoded==NULL) { // Typical number of items in RAR blocks does not exceed 0x4000. CurData->DecodedAllocated=0x4100; // It will be freed in the object destructor, not in this file. CurData->Decoded=(UnpackDecodedItem *)malloc(CurData->DecodedAllocated*sizeof(UnpackDecodedItem)); if (CurData->Decoded==NULL) ErrHandler.MemoryError(); } } } } void Unpack::Unpack5MT(bool Solid) { InitMT(); UnpInitData(Solid); for (uint I=0;I<MaxUserThreads*UNP_BLOCKS_PER_THREAD;I++) { UnpackThreadData *CurData=UnpThreadData+I; CurData->LargeBlock=false; CurData->Incomplete=false; } UnpThreadData[0].BlockHeader=BlockHeader; UnpThreadData[0].BlockTables=BlockTables; uint LastBlockNum=0; int DataSize=0; int BlockStart=0; // 'true' if we found a block too large for multithreaded extraction, // so we switched to single threaded mode until the end of file. // Large blocks could cause too high memory use in multithreaded mode. bool LargeBlock=false; bool Done=false; while (!Done) { // Data amount, which is guaranteed to fit block header and tables, // so we can safely read them without additional checks. const int TooSmallToProcess=1024; int ReadSize=UnpIO->UnpRead(ReadBufMT+DataSize,(UNP_READ_SIZE_MT-DataSize)&~0xf); if (ReadSize<0) break; DataSize+=ReadSize; if (DataSize==0) break; // First read chunk can be small if we are near the end of volume // and we want it to fit block header and tables. if (ReadSize>0 && DataSize<TooSmallToProcess) continue; while (BlockStart<DataSize && !Done) { uint BlockNumber=0,BlockNumberMT=0; while (BlockNumber<MaxUserThreads*UNP_BLOCKS_PER_THREAD) { UnpackThreadData *CurData=UnpThreadData+BlockNumber; LastBlockNum=BlockNumber; CurData->UnpackPtr=this; // 'Incomplete' thread is present. This is a thread processing block // in the end of buffer, split between two read operations. if (CurData->Incomplete) CurData->DataSize=DataSize; else { CurData->Inp.SetExternalBuffer(ReadBufMT+BlockStart); CurData->Inp.InitBitInput(); CurData->DataSize=DataSize-BlockStart; if (CurData->DataSize==0) break; CurData->DamagedData=false; CurData->HeaderRead=false; CurData->TableRead=false; } // We should not use 'last block in file' block flag here unless // we'll check the block size, because even if block is last in file, // it can exceed the current buffer and require more reading. CurData->NoDataLeft=(ReadSize==0); CurData->Incomplete=false; CurData->ThreadNumber=BlockNumber; if (!CurData->HeaderRead) { CurData->HeaderRead=true; if (!ReadBlockHeader(CurData->Inp,CurData->BlockHeader) || !CurData->BlockHeader.TablePresent && !TablesRead5) { Done=true; break; } TablesRead5=true; } // To prevent too high memory use we switch to single threaded mode // if block exceeds this size. Typically RAR blocks do not exceed // 64 KB, so this protection should not affect most of valid archives. const int LargeBlockSize=0x20000; if (LargeBlock || CurData->BlockHeader.BlockSize>LargeBlockSize) LargeBlock=CurData->LargeBlock=true; else BlockNumberMT++; // Number of normal blocks processed in MT mode. BlockStart+=CurData->BlockHeader.HeaderSize+CurData->BlockHeader.BlockSize; BlockNumber++; int DataLeft=DataSize-BlockStart; if (DataLeft>=0 && CurData->BlockHeader.LastBlockInFile) break; // For second and following threads we move smaller blocks to buffer // start to ensure that we have enough data to fit block header // and tables. if (DataLeft<TooSmallToProcess) break; } //#undef USE_THREADS UnpackThreadDataList UTDArray[MaxPoolThreads]; uint UTDArrayPos=0; uint MaxBlockPerThread=BlockNumberMT/MaxUserThreads; if (BlockNumberMT%MaxUserThreads!=0) MaxBlockPerThread++; // Decode all normal blocks until the first 'large' if any. for (uint CurBlock=0;CurBlock<BlockNumberMT;CurBlock+=MaxBlockPerThread) { UnpackThreadDataList *UTD=UTDArray+UTDArrayPos++; UTD->D=UnpThreadData+CurBlock; UTD->BlockCount=Min(MaxBlockPerThread,BlockNumberMT-CurBlock); #ifdef USE_THREADS if (BlockNumber==1) UnpackDecode(*UTD->D); else UnpThreadPool->AddTask(UnpackDecodeThread,(void*)UTD); #else for (uint I=0;I<UTD->BlockCount;I++) UnpackDecode(UTD->D[I]); #endif } if (BlockNumber==0) break; #ifdef USE_THREADS UnpThreadPool->WaitDone(); #endif bool IncompleteThread=false; for (uint Block=0;Block<BlockNumber;Block++) { UnpackThreadData *CurData=UnpThreadData+Block; if (!CurData->LargeBlock && !ProcessDecoded(*CurData) || CurData->LargeBlock && !UnpackLargeBlock(*CurData) || CurData->DamagedData) { Done=true; break; } if (CurData->Incomplete) { int BufPos=int(CurData->Inp.InBuf+CurData->Inp.InAddr-ReadBufMT); if (DataSize<=BufPos) // Thread exceeded input buffer boundary. { Done=true; break; } IncompleteThread=true; memmove(ReadBufMT,ReadBufMT+BufPos,DataSize-BufPos); CurData->BlockHeader.BlockSize-=CurData->Inp.InAddr-CurData->BlockHeader.BlockStart; CurData->BlockHeader.HeaderSize=0; CurData->BlockHeader.BlockStart=0; CurData->Inp.InBuf=ReadBufMT; CurData->Inp.InAddr=0; if (Block!=0) { // Move the incomplete thread entry to the first position, // so we'll start processing from it. Preserve the original // buffer for decoded data. UnpackDecodedItem *Decoded=UnpThreadData[0].Decoded; uint DecodedAllocated=UnpThreadData[0].DecodedAllocated; UnpThreadData[0]=*CurData; UnpThreadData[0].Decoded=Decoded; UnpThreadData[0].DecodedAllocated=DecodedAllocated; CurData->Incomplete=false; } BlockStart=0; DataSize-=BufPos; break; } else if (CurData->BlockHeader.LastBlockInFile) { Done=true; break; } } if (IncompleteThread || Done) break; // Current buffer is done, read more data or quit. else { int DataLeft=DataSize-BlockStart; if (DataLeft<TooSmallToProcess) { if (DataLeft<0) // Invalid data, must not happen in valid archive. { Done=true; break; } // If we do not have incomplete thread and have some data // in the end of buffer, too small for single thread, // let's move it to beginning of next buffer. if (DataLeft>0) memmove(ReadBufMT,ReadBufMT+BlockStart,DataLeft); DataSize=DataLeft; BlockStart=0; break; // Current buffer is done, try to read more data. } } } } UnpPtr&=MaxWinMask; // ProcessDecoded and maybe others can leave UnpPtr > MaxWinMask here. UnpWriteBuf(); BlockHeader=UnpThreadData[LastBlockNum].BlockHeader; BlockTables=UnpThreadData[LastBlockNum].BlockTables; } // Decode Huffman block and save decoded data to memory. void Unpack::UnpackDecode(UnpackThreadData &D) { if (!D.TableRead) { D.TableRead=true; if (!ReadTables(D.Inp,D.BlockHeader,D.BlockTables)) { D.DamagedData=true; return; } } if (D.Inp.InAddr>D.BlockHeader.HeaderSize+D.BlockHeader.BlockSize) { D.DamagedData=true; return; } D.DecodedSize=0; int BlockBorder=D.BlockHeader.BlockStart+D.BlockHeader.BlockSize-1; // Reserve enough space even for filter entry. int DataBorder=D.DataSize-16; int ReadBorder=Min(BlockBorder,DataBorder); while (true) { if (D.Inp.InAddr>=ReadBorder) { if (D.Inp.InAddr>BlockBorder || D.Inp.InAddr==BlockBorder && D.Inp.InBit>=D.BlockHeader.BlockBitSize) break; // If we do not have any more data in file to read, we must process // what we have until last byte. Otherwise we can return and append // more data to unprocessed few bytes. if ((D.Inp.InAddr>=DataBorder) && !D.NoDataLeft || D.Inp.InAddr>=D.DataSize) { D.Incomplete=true; break; } } if (D.DecodedSize>D.DecodedAllocated-8) // Filter can use several slots. { D.DecodedAllocated=D.DecodedAllocated*2; void *Decoded=realloc(D.Decoded,D.DecodedAllocated*sizeof(UnpackDecodedItem)); if (Decoded==NULL) ErrHandler.MemoryError(); // D.Decoded will be freed in the destructor. D.Decoded=(UnpackDecodedItem *)Decoded; } UnpackDecodedItem *CurItem=D.Decoded+D.DecodedSize++; uint MainSlot=DecodeNumber(D.Inp,&D.BlockTables.LD); if (MainSlot<256) { if (D.DecodedSize>1) { UnpackDecodedItem *PrevItem=CurItem-1; if (PrevItem->Type==UNPDT_LITERAL && PrevItem->Length<3) { PrevItem->Length++; PrevItem->Literal[PrevItem->Length]=(byte)MainSlot; D.DecodedSize--; continue; } } CurItem->Type=UNPDT_LITERAL; CurItem->Literal[0]=(byte)MainSlot; CurItem->Length=0; continue; } if (MainSlot>=262) { uint Length=SlotToLength(D.Inp,MainSlot-262); uint DBits,Distance=1,DistSlot=DecodeNumber(D.Inp,&D.BlockTables.DD); if (DistSlot<4) { DBits=0; Distance+=DistSlot; } else { DBits=DistSlot/2 - 1; Distance+=(2 | (DistSlot & 1)) << DBits; } if (DBits>0) { if (DBits>=4) { if (DBits>4) { Distance+=((D.Inp.getbits32()>>(36-DBits))<<4); D.Inp.addbits(DBits-4); } uint LowDist=DecodeNumber(D.Inp,&D.BlockTables.LDD); Distance+=LowDist; } else { Distance+=D.Inp.getbits32()>>(32-DBits); D.Inp.addbits(DBits); } } if (Distance>0x100) { Length++; if (Distance>0x2000) { Length++; if (Distance>0x40000) Length++; } } CurItem->Type=UNPDT_MATCH; CurItem->Length=(ushort)Length; CurItem->Distance=Distance; continue; } if (MainSlot==256) { UnpackFilter Filter; ReadFilter(D.Inp,Filter); CurItem->Type=UNPDT_FILTER; CurItem->Length=Filter.Type; CurItem->Distance=Filter.BlockStart; CurItem=D.Decoded+D.DecodedSize++; CurItem->Type=UNPDT_FILTER; CurItem->Length=Filter.Channels; CurItem->Distance=Filter.BlockLength; continue; } if (MainSlot==257) { CurItem->Type=UNPDT_FULLREP; continue; } if (MainSlot<262) { CurItem->Type=UNPDT_REP; CurItem->Distance=MainSlot-258; uint LengthSlot=DecodeNumber(D.Inp,&D.BlockTables.RD); uint Length=SlotToLength(D.Inp,LengthSlot); CurItem->Length=(ushort)Length; continue; } } } // Process decoded Huffman block data. bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; } // For large blocks we decode and process in same function in single threaded // mode, so we do not need to store intermediate data in memory. bool Unpack::UnpackLargeBlock(UnpackThreadData &D) { if (!D.TableRead) { D.TableRead=true; if (!ReadTables(D.Inp,D.BlockHeader,D.BlockTables)) { D.DamagedData=true; return false; } } if (D.Inp.InAddr>D.BlockHeader.HeaderSize+D.BlockHeader.BlockSize) { D.DamagedData=true; return false; } int BlockBorder=D.BlockHeader.BlockStart+D.BlockHeader.BlockSize-1; // Reserve enough space even for filter entry. int DataBorder=D.DataSize-16; int ReadBorder=Min(BlockBorder,DataBorder); while (true) { UnpPtr&=MaxWinMask; if (D.Inp.InAddr>=ReadBorder) { if (D.Inp.InAddr>BlockBorder || D.Inp.InAddr==BlockBorder && D.Inp.InBit>=D.BlockHeader.BlockBitSize) break; // If we do not have any more data in file to read, we must process // what we have until last byte. Otherwise we can return and append // more data to unprocessed few bytes. if ((D.Inp.InAddr>=DataBorder) && !D.NoDataLeft || D.Inp.InAddr>=D.DataSize) { D.Incomplete=true; break; } } if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } uint MainSlot=DecodeNumber(D.Inp,&D.BlockTables.LD); if (MainSlot<256) { Window[UnpPtr++]=(byte)MainSlot; continue; } if (MainSlot>=262) { uint Length=SlotToLength(D.Inp,MainSlot-262); uint DBits,Distance=1,DistSlot=DecodeNumber(D.Inp,&D.BlockTables.DD); if (DistSlot<4) { DBits=0; Distance+=DistSlot; } else { DBits=DistSlot/2 - 1; Distance+=(2 | (DistSlot & 1)) << DBits; } if (DBits>0) { if (DBits>=4) { if (DBits>4) { Distance+=((D.Inp.getbits32()>>(36-DBits))<<4); D.Inp.addbits(DBits-4); } uint LowDist=DecodeNumber(D.Inp,&D.BlockTables.LDD); Distance+=LowDist; } else { Distance+=D.Inp.getbits32()>>(32-DBits); D.Inp.addbits(DBits); } } if (Distance>0x100) { Length++; if (Distance>0x2000) { Length++; if (Distance>0x40000) Length++; } } InsertOldDist(Distance); LastLength=Length; CopyString(Length,Distance); continue; } if (MainSlot==256) { UnpackFilter Filter; if (!ReadFilter(D.Inp,Filter) || !AddFilter(Filter)) break; continue; } if (MainSlot==257) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); continue; } if (MainSlot<262) { uint DistNum=MainSlot-258; uint Distance=OldDist[DistNum]; for (uint I=DistNum;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; uint LengthSlot=DecodeNumber(D.Inp,&D.BlockTables.RD); uint Length=SlotToLength(D.Inp,LengthSlot); LastLength=Length; CopyString(Length,Distance); continue; } } return true; }
#define UNP_READ_SIZE_MT 0x400000 #define UNP_BLOCKS_PER_THREAD 2 struct UnpackThreadDataList { UnpackThreadData *D; uint BlockCount; }; THREAD_PROC(UnpackDecodeThread) { UnpackThreadDataList *DL=(UnpackThreadDataList *)Data; for (uint I=0;I<DL->BlockCount;I++) DL->D->UnpackPtr->UnpackDecode(DL->D[I]); } void Unpack::InitMT() { if (ReadBufMT==NULL) { // Even getbits32 can read up to 3 additional bytes after current // and our block header and table reading code can look much further. // Let's allocate the additional space here, so we do not need to check // bounds for every bit field access. const size_t Overflow=1024; ReadBufMT=new byte[UNP_READ_SIZE_MT+Overflow]; memset(ReadBufMT,0,UNP_READ_SIZE_MT+Overflow); } if (UnpThreadData==NULL) { uint MaxItems=MaxUserThreads*UNP_BLOCKS_PER_THREAD; UnpThreadData=new UnpackThreadData[MaxItems]; memset(UnpThreadData,0,sizeof(UnpackThreadData)*MaxItems); for (uint I=0;I<MaxItems;I++) { UnpackThreadData *CurData=UnpThreadData+I; if (CurData->Decoded==NULL) { // Typical number of items in RAR blocks does not exceed 0x4000. CurData->DecodedAllocated=0x4100; // It will be freed in the object destructor, not in this file. CurData->Decoded=(UnpackDecodedItem *)malloc(CurData->DecodedAllocated*sizeof(UnpackDecodedItem)); if (CurData->Decoded==NULL) ErrHandler.MemoryError(); } } } } void Unpack::Unpack5MT(bool Solid) { InitMT(); UnpInitData(Solid); for (uint I=0;I<MaxUserThreads*UNP_BLOCKS_PER_THREAD;I++) { UnpackThreadData *CurData=UnpThreadData+I; CurData->LargeBlock=false; CurData->Incomplete=false; } UnpThreadData[0].BlockHeader=BlockHeader; UnpThreadData[0].BlockTables=BlockTables; uint LastBlockNum=0; int DataSize=0; int BlockStart=0; // 'true' if we found a block too large for multithreaded extraction, // so we switched to single threaded mode until the end of file. // Large blocks could cause too high memory use in multithreaded mode. bool LargeBlock=false; bool Done=false; while (!Done) { // Data amount, which is guaranteed to fit block header and tables, // so we can safely read them without additional checks. const int TooSmallToProcess=1024; int ReadSize=UnpIO->UnpRead(ReadBufMT+DataSize,(UNP_READ_SIZE_MT-DataSize)&~0xf); if (ReadSize<0) break; DataSize+=ReadSize; if (DataSize==0) break; // First read chunk can be small if we are near the end of volume // and we want it to fit block header and tables. if (ReadSize>0 && DataSize<TooSmallToProcess) continue; while (BlockStart<DataSize && !Done) { uint BlockNumber=0,BlockNumberMT=0; while (BlockNumber<MaxUserThreads*UNP_BLOCKS_PER_THREAD) { UnpackThreadData *CurData=UnpThreadData+BlockNumber; LastBlockNum=BlockNumber; CurData->UnpackPtr=this; // 'Incomplete' thread is present. This is a thread processing block // in the end of buffer, split between two read operations. if (CurData->Incomplete) CurData->DataSize=DataSize; else { CurData->Inp.SetExternalBuffer(ReadBufMT+BlockStart); CurData->Inp.InitBitInput(); CurData->DataSize=DataSize-BlockStart; if (CurData->DataSize==0) break; CurData->DamagedData=false; CurData->HeaderRead=false; CurData->TableRead=false; } // We should not use 'last block in file' block flag here unless // we'll check the block size, because even if block is last in file, // it can exceed the current buffer and require more reading. CurData->NoDataLeft=(ReadSize==0); CurData->Incomplete=false; CurData->ThreadNumber=BlockNumber; if (!CurData->HeaderRead) { CurData->HeaderRead=true; if (!ReadBlockHeader(CurData->Inp,CurData->BlockHeader) || !CurData->BlockHeader.TablePresent && !TablesRead5) { Done=true; break; } TablesRead5=true; } // To prevent too high memory use we switch to single threaded mode // if block exceeds this size. Typically RAR blocks do not exceed // 64 KB, so this protection should not affect most of valid archives. const int LargeBlockSize=0x20000; if (LargeBlock || CurData->BlockHeader.BlockSize>LargeBlockSize) LargeBlock=CurData->LargeBlock=true; else BlockNumberMT++; // Number of normal blocks processed in MT mode. BlockStart+=CurData->BlockHeader.HeaderSize+CurData->BlockHeader.BlockSize; BlockNumber++; int DataLeft=DataSize-BlockStart; if (DataLeft>=0 && CurData->BlockHeader.LastBlockInFile) break; // For second and following threads we move smaller blocks to buffer // start to ensure that we have enough data to fit block header // and tables. if (DataLeft<TooSmallToProcess) break; } //#undef USE_THREADS UnpackThreadDataList UTDArray[MaxPoolThreads]; uint UTDArrayPos=0; uint MaxBlockPerThread=BlockNumberMT/MaxUserThreads; if (BlockNumberMT%MaxUserThreads!=0) MaxBlockPerThread++; // Decode all normal blocks until the first 'large' if any. for (uint CurBlock=0;CurBlock<BlockNumberMT;CurBlock+=MaxBlockPerThread) { UnpackThreadDataList *UTD=UTDArray+UTDArrayPos++; UTD->D=UnpThreadData+CurBlock; UTD->BlockCount=Min(MaxBlockPerThread,BlockNumberMT-CurBlock); #ifdef USE_THREADS if (BlockNumber==1) UnpackDecode(*UTD->D); else UnpThreadPool->AddTask(UnpackDecodeThread,(void*)UTD); #else for (uint I=0;I<UTD->BlockCount;I++) UnpackDecode(UTD->D[I]); #endif } if (BlockNumber==0) break; #ifdef USE_THREADS UnpThreadPool->WaitDone(); #endif bool IncompleteThread=false; for (uint Block=0;Block<BlockNumber;Block++) { UnpackThreadData *CurData=UnpThreadData+Block; if (!CurData->LargeBlock && !ProcessDecoded(*CurData) || CurData->LargeBlock && !UnpackLargeBlock(*CurData) || CurData->DamagedData) { Done=true; break; } if (CurData->Incomplete) { int BufPos=int(CurData->Inp.InBuf+CurData->Inp.InAddr-ReadBufMT); if (DataSize<=BufPos) // Thread exceeded input buffer boundary. { Done=true; break; } IncompleteThread=true; memmove(ReadBufMT,ReadBufMT+BufPos,DataSize-BufPos); CurData->BlockHeader.BlockSize-=CurData->Inp.InAddr-CurData->BlockHeader.BlockStart; CurData->BlockHeader.HeaderSize=0; CurData->BlockHeader.BlockStart=0; CurData->Inp.InBuf=ReadBufMT; CurData->Inp.InAddr=0; if (Block!=0) { // Move the incomplete thread entry to the first position, // so we'll start processing from it. Preserve the original // buffer for decoded data. UnpackDecodedItem *Decoded=UnpThreadData[0].Decoded; uint DecodedAllocated=UnpThreadData[0].DecodedAllocated; UnpThreadData[0]=*CurData; UnpThreadData[0].Decoded=Decoded; UnpThreadData[0].DecodedAllocated=DecodedAllocated; CurData->Incomplete=false; } BlockStart=0; DataSize-=BufPos; break; } else if (CurData->BlockHeader.LastBlockInFile) { Done=true; break; } } if (IncompleteThread || Done) break; // Current buffer is done, read more data or quit. else { int DataLeft=DataSize-BlockStart; if (DataLeft<TooSmallToProcess) { if (DataLeft<0) // Invalid data, must not happen in valid archive. { Done=true; break; } // If we do not have incomplete thread and have some data // in the end of buffer, too small for single thread, // let's move it to beginning of next buffer. if (DataLeft>0) memmove(ReadBufMT,ReadBufMT+BlockStart,DataLeft); DataSize=DataLeft; BlockStart=0; break; // Current buffer is done, try to read more data. } } } } UnpPtr&=MaxWinMask; // ProcessDecoded and maybe others can leave UnpPtr > MaxWinMask here. UnpWriteBuf(); BlockHeader=UnpThreadData[LastBlockNum].BlockHeader; BlockTables=UnpThreadData[LastBlockNum].BlockTables; } // Decode Huffman block and save decoded data to memory. void Unpack::UnpackDecode(UnpackThreadData &D) { if (!D.TableRead) { D.TableRead=true; if (!ReadTables(D.Inp,D.BlockHeader,D.BlockTables)) { D.DamagedData=true; return; } } if (D.Inp.InAddr>D.BlockHeader.HeaderSize+D.BlockHeader.BlockSize) { D.DamagedData=true; return; } D.DecodedSize=0; int BlockBorder=D.BlockHeader.BlockStart+D.BlockHeader.BlockSize-1; // Reserve enough space even for filter entry. int DataBorder=D.DataSize-16; int ReadBorder=Min(BlockBorder,DataBorder); while (true) { if (D.Inp.InAddr>=ReadBorder) { if (D.Inp.InAddr>BlockBorder || D.Inp.InAddr==BlockBorder && D.Inp.InBit>=D.BlockHeader.BlockBitSize) break; // If we do not have any more data in file to read, we must process // what we have until last byte. Otherwise we can return and append // more data to unprocessed few bytes. if ((D.Inp.InAddr>=DataBorder) && !D.NoDataLeft || D.Inp.InAddr>=D.DataSize) { D.Incomplete=true; break; } } if (D.DecodedSize>D.DecodedAllocated-8) // Filter can use several slots. { D.DecodedAllocated=D.DecodedAllocated*2; void *Decoded=realloc(D.Decoded,D.DecodedAllocated*sizeof(UnpackDecodedItem)); if (Decoded==NULL) ErrHandler.MemoryError(); // D.Decoded will be freed in the destructor. D.Decoded=(UnpackDecodedItem *)Decoded; } UnpackDecodedItem *CurItem=D.Decoded+D.DecodedSize++; uint MainSlot=DecodeNumber(D.Inp,&D.BlockTables.LD); if (MainSlot<256) { if (D.DecodedSize>1) { UnpackDecodedItem *PrevItem=CurItem-1; if (PrevItem->Type==UNPDT_LITERAL && PrevItem->Length<3) { PrevItem->Length++; PrevItem->Literal[PrevItem->Length]=(byte)MainSlot; D.DecodedSize--; continue; } } CurItem->Type=UNPDT_LITERAL; CurItem->Literal[0]=(byte)MainSlot; CurItem->Length=0; continue; } if (MainSlot>=262) { uint Length=SlotToLength(D.Inp,MainSlot-262); uint DBits,Distance=1,DistSlot=DecodeNumber(D.Inp,&D.BlockTables.DD); if (DistSlot<4) { DBits=0; Distance+=DistSlot; } else { DBits=DistSlot/2 - 1; Distance+=(2 | (DistSlot & 1)) << DBits; } if (DBits>0) { if (DBits>=4) { if (DBits>4) { Distance+=((D.Inp.getbits32()>>(36-DBits))<<4); D.Inp.addbits(DBits-4); } uint LowDist=DecodeNumber(D.Inp,&D.BlockTables.LDD); Distance+=LowDist; } else { Distance+=D.Inp.getbits32()>>(32-DBits); D.Inp.addbits(DBits); } } if (Distance>0x100) { Length++; if (Distance>0x2000) { Length++; if (Distance>0x40000) Length++; } } CurItem->Type=UNPDT_MATCH; CurItem->Length=(ushort)Length; CurItem->Distance=Distance; continue; } if (MainSlot==256) { UnpackFilter Filter; ReadFilter(D.Inp,Filter); CurItem->Type=UNPDT_FILTER; CurItem->Length=Filter.Type; CurItem->Distance=Filter.BlockStart; CurItem=D.Decoded+D.DecodedSize++; CurItem->Type=UNPDT_FILTER; CurItem->Length=Filter.Channels; CurItem->Distance=Filter.BlockLength; continue; } if (MainSlot==257) { CurItem->Type=UNPDT_FULLREP; continue; } if (MainSlot<262) { CurItem->Type=UNPDT_REP; CurItem->Distance=MainSlot-258; uint LengthSlot=DecodeNumber(D.Inp,&D.BlockTables.RD); uint Length=SlotToLength(D.Inp,LengthSlot); CurItem->Length=(ushort)Length; continue; } } } // Process decoded Huffman block data. bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_INC_LZ_MATCH && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; } // For large blocks we decode and process in same function in single threaded // mode, so we do not need to store intermediate data in memory. bool Unpack::UnpackLargeBlock(UnpackThreadData &D) { if (!D.TableRead) { D.TableRead=true; if (!ReadTables(D.Inp,D.BlockHeader,D.BlockTables)) { D.DamagedData=true; return false; } } if (D.Inp.InAddr>D.BlockHeader.HeaderSize+D.BlockHeader.BlockSize) { D.DamagedData=true; return false; } int BlockBorder=D.BlockHeader.BlockStart+D.BlockHeader.BlockSize-1; // Reserve enough space even for filter entry. int DataBorder=D.DataSize-16; int ReadBorder=Min(BlockBorder,DataBorder); while (true) { UnpPtr&=MaxWinMask; if (D.Inp.InAddr>=ReadBorder) { if (D.Inp.InAddr>BlockBorder || D.Inp.InAddr==BlockBorder && D.Inp.InBit>=D.BlockHeader.BlockBitSize) break; // If we do not have any more data in file to read, we must process // what we have until last byte. Otherwise we can return and append // more data to unprocessed few bytes. if ((D.Inp.InAddr>=DataBorder) && !D.NoDataLeft || D.Inp.InAddr>=D.DataSize) { D.Incomplete=true; break; } } if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_INC_LZ_MATCH && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } uint MainSlot=DecodeNumber(D.Inp,&D.BlockTables.LD); if (MainSlot<256) { Window[UnpPtr++]=(byte)MainSlot; continue; } if (MainSlot>=262) { uint Length=SlotToLength(D.Inp,MainSlot-262); uint DBits,Distance=1,DistSlot=DecodeNumber(D.Inp,&D.BlockTables.DD); if (DistSlot<4) { DBits=0; Distance+=DistSlot; } else { DBits=DistSlot/2 - 1; Distance+=(2 | (DistSlot & 1)) << DBits; } if (DBits>0) { if (DBits>=4) { if (DBits>4) { Distance+=((D.Inp.getbits32()>>(36-DBits))<<4); D.Inp.addbits(DBits-4); } uint LowDist=DecodeNumber(D.Inp,&D.BlockTables.LDD); Distance+=LowDist; } else { Distance+=D.Inp.getbits32()>>(32-DBits); D.Inp.addbits(DBits); } } if (Distance>0x100) { Length++; if (Distance>0x2000) { Length++; if (Distance>0x40000) Length++; } } InsertOldDist(Distance); LastLength=Length; CopyString(Length,Distance); continue; } if (MainSlot==256) { UnpackFilter Filter; if (!ReadFilter(D.Inp,Filter) || !AddFilter(Filter)) break; continue; } if (MainSlot==257) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); continue; } if (MainSlot<262) { uint DistNum=MainSlot-258; uint Distance=OldDist[DistNum]; for (uint I=DistNum;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; uint LengthSlot=DecodeNumber(D.Inp,&D.BlockTables.RD); uint Length=SlotToLength(D.Inp,LengthSlot); LastLength=Length; CopyString(Length,Distance); continue; } } return true; }
bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; }
bool Unpack::ProcessDecoded(UnpackThreadData &D) { UnpackDecodedItem *Item=D.Decoded,*Border=D.Decoded+D.DecodedSize; while (Item<Border) { UnpPtr&=MaxWinMask; if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_INC_LZ_MATCH && WriteBorder!=UnpPtr) { UnpWriteBuf(); if (WrittenFileSize>DestUnpSize) return false; } if (Item->Type==UNPDT_LITERAL) { #if defined(LITTLE_ENDIAN) && defined(ALLOW_MISALIGNED) if (Item->Length==3 && UnpPtr<MaxWinSize-4) { *(uint32 *)(Window+UnpPtr)=*(uint32 *)Item->Literal; UnpPtr+=4; } else #endif for (uint I=0;I<=Item->Length;I++) Window[UnpPtr++ & MaxWinMask]=Item->Literal[I]; } else if (Item->Type==UNPDT_MATCH) { InsertOldDist(Item->Distance); LastLength=Item->Length; CopyString(Item->Length,Item->Distance); } else if (Item->Type==UNPDT_REP) { uint Distance=OldDist[Item->Distance]; for (uint I=Item->Distance;I>0;I--) OldDist[I]=OldDist[I-1]; OldDist[0]=Distance; LastLength=Item->Length; CopyString(Item->Length,Distance); } else if (Item->Type==UNPDT_FULLREP) { if (LastLength!=0) CopyString(LastLength,OldDist[0]); } else if (Item->Type==UNPDT_FILTER) { UnpackFilter Filter; Filter.Type=(byte)Item->Length; Filter.BlockStart=Item->Distance; Item++; Filter.Channels=(byte)Item->Length; Filter.BlockLength=Item->Distance; AddFilter(Filter); } Item++; } return true; }
{'added': [(454, ' if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_INC_LZ_MATCH && WriteBorder!=UnpPtr)'), (562, ' if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_INC_LZ_MATCH && WriteBorder!=UnpPtr)')], 'deleted': [(454, ' if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr)'), (562, ' if (((WriteBorder-UnpPtr) & MaxWinMask)<MAX_LZ_MATCH+3 && WriteBorder!=UnpPtr)')]}
2
2
524
2,999
61
357
16
https://github.com/aawc/unrar
CVE-2017-20006
CWE-787
3,061
lookup.c
C
label
/* lookup.c - implementation of IDNA2008 lookup functions Copyright (C) 2011-2017 Simon Josefsson Libidn2 is free software: you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include "idn2.h" #include <errno.h> /* errno */ #include <stdlib.h> /* malloc, free */ #include "punycode.h" #include <unitypes.h> #include <uniconv.h> /* u8_strconv_from_locale */ #include <uninorm.h> /* u32_normalize */ #include <unistr.h> /* u8_to_u32 */ #include "idna.h" /* _idn2_label_test */ #include "tr46map.h" /* definition for tr46map.c */ static int set_default_flags(int *flags) { if (((*flags) & IDN2_TRANSITIONAL) && ((*flags) & IDN2_NONTRANSITIONAL)) return IDN2_INVALID_FLAGS; if (((*flags) & (IDN2_TRANSITIONAL|IDN2_NONTRANSITIONAL)) && ((*flags) & IDN2_NO_TR46)) return IDN2_INVALID_FLAGS; if (!((*flags) & (IDN2_NO_TR46|IDN2_TRANSITIONAL))) *flags |= IDN2_NONTRANSITIONAL; return IDN2_OK; } static int label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; int rc; size_t tmpl; if (_idn2_ascii_p (src, srclen)) { if (flags & IDN2_ALABEL_ROUNDTRIP) /* FIXME implement this MAY: If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ return IDN2_INVALID_FLAGS; if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free(p); return rc; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) return rc; *dstlen = 4 + tmpl; return IDN2_OK; } #define TR46_TRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_TRANSITIONAL) #define TR46_NONTRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_NONTRANSITIONAL) static int _tr46 (const uint8_t * domain_u8, uint8_t ** out, int flags) { size_t len, it; uint32_t *domain_u32; int err = IDN2_OK, rc; int transitional = 0; int test_flags; if (flags & IDN2_TRANSITIONAL) transitional = 1; /* convert UTF-8 to UTF-32 */ if (!(domain_u32 = u8_to_u32 (domain_u8, u8_strlen (domain_u8) + 1, NULL, &len))) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } size_t len2 = 0; for (it = 0; it < len - 1; it++) { IDNAMap map; get_idna_map (domain_u32[it], &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { if (domain_u32[it]) { free (domain_u32); return IDN2_DISALLOWED; } len2++; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += map.nmappings; } else if (map_is (&map, TR46_FLG_VALID)) { len2++; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += map.nmappings; } else len2++; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { /* valid because UseSTD3ASCIIRules=false, see #TR46 5 */ len2++; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { /* mapped because UseSTD3ASCIIRules=false, see #TR46 5 */ len2 += map.nmappings; } } } /* Exit early if result is too long. * This avoids excessive CPU usage in punycode encoding, which is O(N^2). */ if (len2 >= IDN2_DOMAIN_MAX_LENGTH) { free (domain_u32); return IDN2_TOO_BIG_DOMAIN; } uint32_t *tmp = (uint32_t *) malloc ((len2 + 1) * sizeof (uint32_t)); if (!tmp) { free (domain_u32); return IDN2_MALLOC; } len2 = 0; for (it = 0; it < len - 1; it++) { uint32_t c = domain_u32[it]; IDNAMap map; get_idna_map (c, &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } else if (map_is (&map, TR46_FLG_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += get_map_data (tmp + len2, &map); } else tmp[len2++] = c; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } } } free (domain_u32); /* Normalize to NFC */ tmp[len2] = 0; domain_u32 = u32_normalize (UNINORM_NFC, tmp, len2 + 1, NULL, &len); free (tmp); tmp = NULL; if (!domain_u32) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } /* split into labels and check */ uint32_t *e, *s; for (e = s = domain_u32; *e; s = e) { while (*e && *e != '.') e++; if (e - s >= 4 && s[0] == 'x' && s[1] == 'n' && s[2] == '-' && s[3] == '-') { /* decode punycode and check result non-transitional */ size_t ace_len; uint32_t name_u32[IDN2_LABEL_MAX_LENGTH]; size_t name_len = IDN2_LABEL_MAX_LENGTH; uint8_t *ace; ace = u32_to_u8 (s + 4, e - s - 4, NULL, &ace_len); if (!ace) { free (domain_u32); if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = _idn2_punycode_decode (ace_len, (char *) ace, &name_len, name_u32); free (ace); if (rc) { free (domain_u32); return rc; } test_flags = TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, name_u32, name_len))) err = rc; } else { test_flags = transitional ? TR46_TRANSITIONAL_CHECK : TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, s, e - s))) err = rc; } if (*e) e++; } if (err == IDN2_OK && out) { uint8_t *_out = u32_to_u8 (domain_u32, len, NULL, &len); free (domain_u32); if (!_out) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } *out = _out; } else free (domain_u32); return err; } /** * idn2_lookup_u8: * @src: input zero-terminated UTF-8 string in Unicode NFC normalized form. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input string * must be encoded in UTF-8 and be in Unicode NFC form. * * Pass %IDN2_NFC_INPUT in @flags to convert input to NFC form before * further processing. %IDN2_TRANSITIONAL and %IDN2_NONTRANSITIONAL * do already imply %IDN2_NFC_INPUT. * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to * convert any input A-labels to U-labels and perform additional * testing (not implemented yet). * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 * transitional processing, and %IDN2_NONTRANSITIONAL to enable * Unicode TR46 non-transitional processing. Multiple flags may be * specified by binary or:ing them together. * * After version 2.0.3: %IDN2_USE_STD3_ASCII_RULES disabled by default. * Previously we were eliminating non-STD3 characters from domain strings * such as _443._tcp.example.com, or IPs 1.2.3.4/24 provided to libidn2 * functions. That was an unexpected regression for applications switching * from libidn and thus it is no longer applied by default. * Use %IDN2_USE_STD3_ASCII_RULES to enable that behavior again. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if the * output domain or any label would have been too long * %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_u8 (const uint8_t * src, uint8_t ** lookupname, int flags) { size_t lookupnamelen = 0; uint8_t _lookupname[IDN2_DOMAIN_MAX_LENGTH + 1]; uint8_t _mapped[IDN2_DOMAIN_MAX_LENGTH + 1]; int rc; if (src == NULL) { if (lookupname) *lookupname = NULL; return IDN2_OK; } rc = set_default_flags(&flags); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_NO_TR46)) { uint8_t *out; size_t outlen; rc = _tr46 (src, &out, flags); if (rc != IDN2_OK) return rc; outlen = u8_strlen (out); if (outlen >= sizeof (_mapped)) { free (out); return IDN2_TOO_BIG_DOMAIN; } memcpy (_mapped, out, outlen + 1); src = _mapped; free (out); } do { const uint8_t *end = (uint8_t *) strchrnul ((const char *) src, '.'); /* XXX Do we care about non-U+002E dots such as U+3002, U+FF0E and U+FF61 here? Perhaps when IDN2_NFC_INPUT? */ size_t labellen = end - src; uint8_t tmp[IDN2_LABEL_MAX_LENGTH]; size_t tmplen = IDN2_LABEL_MAX_LENGTH; rc = label (src, labellen, tmp, &tmplen, flags); if (rc != IDN2_OK) return rc; if (lookupnamelen + tmplen > IDN2_DOMAIN_MAX_LENGTH - (tmplen == 0 && *end == '\0' ? 1 : 2)) return IDN2_TOO_BIG_DOMAIN; memcpy (_lookupname + lookupnamelen, tmp, tmplen); lookupnamelen += tmplen; if (*end == '.') { if (lookupnamelen + 1 > IDN2_DOMAIN_MAX_LENGTH) return IDN2_TOO_BIG_DOMAIN; _lookupname[lookupnamelen] = '.'; lookupnamelen++; } _lookupname[lookupnamelen] = '\0'; src = end; } while (*src++); if (lookupname) { uint8_t *tmp = (uint8_t *) malloc (lookupnamelen + 1); if (tmp == NULL) return IDN2_MALLOC; memcpy (tmp, _lookupname, lookupnamelen + 1); *lookupname = tmp; } return IDN2_OK; } /** * idn2_lookup_ul: * @src: input zero-terminated locale encoded string. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input is assumed * to be encoded in the locale's default coding system, and will be * transcoded to UTF-8 and NFC normalized by this function. * * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to convert any input A-labels * to U-labels and perform additional testing. Pass * %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing, * and %IDN2_NONTRANSITIONAL to enable Unicode TR46 non-transitional * processing. Multiple flags may be specified by binary or:ing them * together, for example %IDN2_ALABEL_ROUNDTRIP | * %IDN2_NONTRANSITIONAL. The %IDN2_NFC_INPUT in @flags is always * enabled in this function. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if * conversion from locale to UTF-8 fails then %IDN2_ICONV_FAIL is * returned, if the output domain or any label would have been too * long %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_ul (const char * src, char ** lookupname, int flags) { uint8_t *utf8src = NULL; int rc; if (src) { const char *encoding = locale_charset (); utf8src = u8_strconv_from_encoding (src, encoding, iconveh_error); if (!utf8src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ICONV_FAIL; } } rc = idn2_lookup_u8 (utf8src, (uint8_t **) lookupname, flags | IDN2_NFC_INPUT); free (utf8src); return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: output zero terminated string that must have room for at least 63 characters plus the terminating zero. * @flags: optional #idn2_flags to modify behaviour. * * THIS FUNCTION HAS BEEN DEPRECATED DUE TO A DESIGN FLAW. USE idn2_to_ascii_4i2() INSTEAD ! * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4i (const uint32_t * input, size_t inlen, char * output, int flags) { char *out; int rc; if (!input) { if (output) *output = 0; return IDN2_OK; } rc = idn2_to_ascii_4i2 (input, inlen, &out, flags); if (rc == IDN2_OK) { size_t len = strlen(out); if (len > 63) rc = IDN2_TOO_BIG_DOMAIN; else if (output) memcpy (output, out, len); free (out); } return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.1.1 **/ int idn2_to_ascii_4i2 (const uint32_t * input, size_t inlen, char ** output, int flags) { uint32_t *input_u32; uint8_t *input_u8, *output_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u32 = (uint32_t *) malloc ((inlen + 1) * sizeof(uint32_t)); if (!input_u32) return IDN2_MALLOC; u32_cpy (input_u32, input, inlen); input_u32[inlen] = 0; input_u8 = u32_to_u8 (input_u32, inlen + 1, NULL, &length); free (input_u32); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, &output_u8, flags); free (input_u8); if (rc == IDN2_OK) { if (output) *output = (char *) output_u8; else free (output_u8); } return rc; } /** * idn2_to_ascii_4z: * @input: zero terminated input Unicode (UCS-4) string. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UCS-4 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4z (const uint32_t * input, char ** output, int flags) { uint8_t *input_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u8 = u32_to_u8 (input, u32_strlen(input) + 1, NULL, &length); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, (uint8_t **) output, flags); free (input_u8); return rc; } /** * idn2_to_ascii_8z: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UTF-8 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_8z (const char * input, char ** output, int flags) { return idn2_lookup_u8 ((const uint8_t *) input, (uint8_t **) output, flags); } /** * idn2_to_ascii_lz: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert a domain name in locale's encoding to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Returns: %IDN2_OK on success, or error code. * Same as described in idn2_lookup_ul() documentation. * * Since: 2.0.0 **/ int idn2_to_ascii_lz (const char * input, char ** output, int flags) { return idn2_lookup_ul (input, output, flags); }
/* lookup.c - implementation of IDNA2008 lookup functions Copyright (C) 2011-2017 Simon Josefsson Libidn2 is free software: you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <config.h> #include "idn2.h" #include <errno.h> /* errno */ #include <stdlib.h> /* malloc, free */ #include "punycode.h" #include <unitypes.h> #include <uniconv.h> /* u8_strconv_from_locale */ #include <uninorm.h> /* u32_normalize */ #include <unistr.h> /* u8_to_u32 */ #include "idna.h" /* _idn2_label_test */ #include "tr46map.h" /* definition for tr46map.c */ static int set_default_flags(int *flags) { if (((*flags) & IDN2_TRANSITIONAL) && ((*flags) & IDN2_NONTRANSITIONAL)) return IDN2_INVALID_FLAGS; if (((*flags) & (IDN2_TRANSITIONAL|IDN2_NONTRANSITIONAL)) && ((*flags) & IDN2_NO_TR46)) return IDN2_INVALID_FLAGS; if (((*flags) & IDN2_ALABEL_ROUNDTRIP) && ((*flags) & IDN2_NO_ALABEL_ROUNDTRIP)) return IDN2_INVALID_FLAGS; if (!((*flags) & (IDN2_NO_TR46|IDN2_TRANSITIONAL))) *flags |= IDN2_NONTRANSITIONAL; return IDN2_OK; } static int label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; const uint8_t *src_org = NULL; uint8_t *src_allocated = NULL; int rc, check_roundtrip = 0; size_t tmpl, srclen_org = 0; uint32_t label_u32[IDN2_LABEL_MAX_LENGTH]; size_t label32_len = IDN2_LABEL_MAX_LENGTH; if (_idn2_ascii_p (src, srclen)) { if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) { /* If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32); if (rc) return rc; check_roundtrip = 1; src_org = src; srclen_org = srclen; srclen = IDN2_LABEL_MAX_LENGTH; src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen); if (!src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } } else { if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) goto out; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free (p); goto out; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) goto out; *dstlen = 4 + tmpl; if (check_roundtrip) { if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org)) { rc = IDN2_ALABEL_ROUNDTRIP_FAILED; goto out; } } rc = IDN2_OK; out: free (src_allocated); return rc; } #define TR46_TRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_TRANSITIONAL) #define TR46_NONTRANSITIONAL_CHECK \ (TEST_NFC | TEST_2HYPHEN | TEST_HYPHEN_STARTEND | TEST_LEADING_COMBINING | TEST_NONTRANSITIONAL) static int _tr46 (const uint8_t * domain_u8, uint8_t ** out, int flags) { size_t len, it; uint32_t *domain_u32; int err = IDN2_OK, rc; int transitional = 0; int test_flags; if (flags & IDN2_TRANSITIONAL) transitional = 1; /* convert UTF-8 to UTF-32 */ if (!(domain_u32 = u8_to_u32 (domain_u8, u8_strlen (domain_u8) + 1, NULL, &len))) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } size_t len2 = 0; for (it = 0; it < len - 1; it++) { IDNAMap map; get_idna_map (domain_u32[it], &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { if (domain_u32[it]) { free (domain_u32); return IDN2_DISALLOWED; } len2++; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += map.nmappings; } else if (map_is (&map, TR46_FLG_VALID)) { len2++; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += map.nmappings; } else len2++; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { /* valid because UseSTD3ASCIIRules=false, see #TR46 5 */ len2++; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { /* mapped because UseSTD3ASCIIRules=false, see #TR46 5 */ len2 += map.nmappings; } } } /* Exit early if result is too long. * This avoids excessive CPU usage in punycode encoding, which is O(N^2). */ if (len2 >= IDN2_DOMAIN_MAX_LENGTH) { free (domain_u32); return IDN2_TOO_BIG_DOMAIN; } uint32_t *tmp = (uint32_t *) malloc ((len2 + 1) * sizeof (uint32_t)); if (!tmp) { free (domain_u32); return IDN2_MALLOC; } len2 = 0; for (it = 0; it < len - 1; it++) { uint32_t c = domain_u32[it]; IDNAMap map; get_idna_map (c, &map); if (map_is (&map, TR46_FLG_DISALLOWED)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } else if (map_is (&map, TR46_FLG_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_IGNORED)) { continue; } else if (map_is (&map, TR46_FLG_DEVIATION)) { if (transitional) { len2 += get_map_data (tmp + len2, &map); } else tmp[len2++] = c; } else if (!(flags & IDN2_USE_STD3_ASCII_RULES)) { if (map_is (&map, TR46_FLG_DISALLOWED_STD3_VALID)) { tmp[len2++] = c; } else if (map_is (&map, TR46_FLG_DISALLOWED_STD3_MAPPED)) { len2 += get_map_data (tmp + len2, &map); } } } free (domain_u32); /* Normalize to NFC */ tmp[len2] = 0; domain_u32 = u32_normalize (UNINORM_NFC, tmp, len2 + 1, NULL, &len); free (tmp); tmp = NULL; if (!domain_u32) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } /* split into labels and check */ uint32_t *e, *s; for (e = s = domain_u32; *e; s = e) { while (*e && *e != '.') e++; if (e - s >= 4 && s[0] == 'x' && s[1] == 'n' && s[2] == '-' && s[3] == '-') { /* decode punycode and check result non-transitional */ size_t ace_len; uint32_t name_u32[IDN2_LABEL_MAX_LENGTH]; size_t name_len = IDN2_LABEL_MAX_LENGTH; uint8_t *ace; ace = u32_to_u8 (s + 4, e - s - 4, NULL, &ace_len); if (!ace) { free (domain_u32); if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = _idn2_punycode_decode (ace_len, (char *) ace, &name_len, name_u32); free (ace); if (rc) { free (domain_u32); return rc; } test_flags = TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, name_u32, name_len))) err = rc; } else { test_flags = transitional ? TR46_TRANSITIONAL_CHECK : TR46_NONTRANSITIONAL_CHECK; if (!(flags & IDN2_USE_STD3_ASCII_RULES)) test_flags |= TEST_ALLOW_STD3_DISALLOWED; if ((rc = _idn2_label_test (test_flags, s, e - s))) err = rc; } if (*e) e++; } if (err == IDN2_OK && out) { uint8_t *_out = u32_to_u8 (domain_u32, len, NULL, &len); free (domain_u32); if (!_out) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } *out = _out; } else free (domain_u32); return err; } /** * idn2_lookup_u8: * @src: input zero-terminated UTF-8 string in Unicode NFC normalized form. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input string * must be encoded in UTF-8 and be in Unicode NFC form. * * Pass %IDN2_NFC_INPUT in @flags to convert input to NFC form before * further processing. %IDN2_TRANSITIONAL and %IDN2_NONTRANSITIONAL * do already imply %IDN2_NFC_INPUT. * * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to * convert any input A-labels to U-labels and perform additional * testing. This is default since version 2.2. * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP * * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 * transitional processing, and %IDN2_NONTRANSITIONAL to enable * Unicode TR46 non-transitional processing. * * Multiple flags may be specified by binary or:ing them together. * * After version 2.0.3: %IDN2_USE_STD3_ASCII_RULES disabled by default. * Previously we were eliminating non-STD3 characters from domain strings * such as _443._tcp.example.com, or IPs 1.2.3.4/24 provided to libidn2 * functions. That was an unexpected regression for applications switching * from libidn and thus it is no longer applied by default. * Use %IDN2_USE_STD3_ASCII_RULES to enable that behavior again. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if the * output domain or any label would have been too long * %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_u8 (const uint8_t * src, uint8_t ** lookupname, int flags) { size_t lookupnamelen = 0; uint8_t _lookupname[IDN2_DOMAIN_MAX_LENGTH + 1]; uint8_t _mapped[IDN2_DOMAIN_MAX_LENGTH + 1]; int rc; if (src == NULL) { if (lookupname) *lookupname = NULL; return IDN2_OK; } rc = set_default_flags(&flags); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_NO_TR46)) { uint8_t *out; size_t outlen; rc = _tr46 (src, &out, flags); if (rc != IDN2_OK) return rc; outlen = u8_strlen (out); if (outlen >= sizeof (_mapped)) { free (out); return IDN2_TOO_BIG_DOMAIN; } memcpy (_mapped, out, outlen + 1); src = _mapped; free (out); } do { const uint8_t *end = (uint8_t *) strchrnul ((const char *) src, '.'); /* XXX Do we care about non-U+002E dots such as U+3002, U+FF0E and U+FF61 here? Perhaps when IDN2_NFC_INPUT? */ size_t labellen = end - src; uint8_t tmp[IDN2_LABEL_MAX_LENGTH]; size_t tmplen = IDN2_LABEL_MAX_LENGTH; rc = label (src, labellen, tmp, &tmplen, flags); if (rc != IDN2_OK) return rc; if (lookupnamelen + tmplen > IDN2_DOMAIN_MAX_LENGTH - (tmplen == 0 && *end == '\0' ? 1 : 2)) return IDN2_TOO_BIG_DOMAIN; memcpy (_lookupname + lookupnamelen, tmp, tmplen); lookupnamelen += tmplen; if (*end == '.') { if (lookupnamelen + 1 > IDN2_DOMAIN_MAX_LENGTH) return IDN2_TOO_BIG_DOMAIN; _lookupname[lookupnamelen] = '.'; lookupnamelen++; } _lookupname[lookupnamelen] = '\0'; src = end; } while (*src++); if (lookupname) { uint8_t *tmp = (uint8_t *) malloc (lookupnamelen + 1); if (tmp == NULL) return IDN2_MALLOC; memcpy (tmp, _lookupname, lookupnamelen + 1); *lookupname = tmp; } return IDN2_OK; } /** * idn2_lookup_ul: * @src: input zero-terminated locale encoded string. * @lookupname: newly allocated output variable with name to lookup in DNS. * @flags: optional #idn2_flags to modify behaviour. * * Perform IDNA2008 lookup string conversion on domain name @src, as * described in section 5 of RFC 5891. Note that the input is assumed * to be encoded in the locale's default coding system, and will be * transcoded to UTF-8 and NFC normalized by this function. * * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to * convert any input A-labels to U-labels and perform additional * testing. This is default since version 2.2. * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP * * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing, * and %IDN2_NONTRANSITIONAL to enable Unicode TR46 non-transitional * processing. * * Multiple flags may be specified by binary or:ing them together, for * example %IDN2_ALABEL_ROUNDTRIP | %IDN2_NONTRANSITIONAL. * * The %IDN2_NFC_INPUT in @flags is always enabled in this function. * * After version 0.11: @lookupname may be NULL to test lookup of @src * without allocating memory. * * Returns: On successful conversion %IDN2_OK is returned, if * conversion from locale to UTF-8 fails then %IDN2_ICONV_FAIL is * returned, if the output domain or any label would have been too * long %IDN2_TOO_BIG_DOMAIN or %IDN2_TOO_BIG_LABEL is returned, or * another error code is returned. * * Since: 0.1 **/ int idn2_lookup_ul (const char * src, char ** lookupname, int flags) { uint8_t *utf8src = NULL; int rc; if (src) { const char *encoding = locale_charset (); utf8src = u8_strconv_from_encoding (src, encoding, iconveh_error); if (!utf8src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ICONV_FAIL; } } rc = idn2_lookup_u8 (utf8src, (uint8_t **) lookupname, flags | IDN2_NFC_INPUT); free (utf8src); return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: output zero terminated string that must have room for at least 63 characters plus the terminating zero. * @flags: optional #idn2_flags to modify behaviour. * * THIS FUNCTION HAS BEEN DEPRECATED DUE TO A DESIGN FLAW. USE idn2_to_ascii_4i2() INSTEAD ! * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4i (const uint32_t * input, size_t inlen, char * output, int flags) { char *out; int rc; if (!input) { if (output) *output = 0; return IDN2_OK; } rc = idn2_to_ascii_4i2 (input, inlen, &out, flags); if (rc == IDN2_OK) { size_t len = strlen(out); if (len > 63) rc = IDN2_TOO_BIG_DOMAIN; else if (output) memcpy (output, out, len); free (out); } return rc; } /** * idn2_to_ascii_4i: * @input: zero terminated input Unicode (UCS-4) string. * @inlen: number of elements in @input. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * The ToASCII operation takes a sequence of Unicode code points that make * up one domain label and transforms it into a sequence of code points in * the ASCII range (0..7F). If ToASCII succeeds, the original sequence and * the resulting sequence are equivalent labels. * * It is important to note that the ToASCII operation can fail. * ToASCII fails if any step of it fails. If any step of the * ToASCII operation fails on any label in a domain name, that domain * name MUST NOT be used as an internationalized domain name. * The method for dealing with this failure is application-specific. * * The inputs to ToASCII are a sequence of code points. * * ToASCII never alters a sequence of code points that are all in the ASCII * range to begin with (although it could fail). Applying the ToASCII operation multiple * effect as applying it just once. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.1.1 **/ int idn2_to_ascii_4i2 (const uint32_t * input, size_t inlen, char ** output, int flags) { uint32_t *input_u32; uint8_t *input_u8, *output_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u32 = (uint32_t *) malloc ((inlen + 1) * sizeof(uint32_t)); if (!input_u32) return IDN2_MALLOC; u32_cpy (input_u32, input, inlen); input_u32[inlen] = 0; input_u8 = u32_to_u8 (input_u32, inlen + 1, NULL, &length); free (input_u32); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, &output_u8, flags); free (input_u8); if (rc == IDN2_OK) { if (output) *output = (char *) output_u8; else free (output_u8); } return rc; } /** * idn2_to_ascii_4z: * @input: zero terminated input Unicode (UCS-4) string. * @output: pointer to newly allocated zero-terminated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UCS-4 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_4z (const uint32_t * input, char ** output, int flags) { uint8_t *input_u8; size_t length; int rc; if (!input) { if (output) *output = NULL; return IDN2_OK; } input_u8 = u32_to_u8 (input, u32_strlen(input) + 1, NULL, &length); if (!input_u8) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } rc = idn2_lookup_u8 (input_u8, (uint8_t **) output, flags); free (input_u8); return rc; } /** * idn2_to_ascii_8z: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert UTF-8 domain name to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Return value: Returns %IDN2_OK on success, or error code. * * Since: 2.0.0 **/ int idn2_to_ascii_8z (const char * input, char ** output, int flags) { return idn2_lookup_u8 ((const uint8_t *) input, (uint8_t **) output, flags); } /** * idn2_to_ascii_lz: * @input: zero terminated input UTF-8 string. * @output: pointer to newly allocated output string. * @flags: optional #idn2_flags to modify behaviour. * * Convert a domain name in locale's encoding to ASCII string using the IDNA2008 * rules. The domain name may contain several labels, separated by dots. * The output buffer must be deallocated by the caller. * * The default behavior of this function (when flags are zero) is to apply * the IDNA2008 rules without the TR46 amendments. As the TR46 * non-transitional processing is nowadays ubiquitous, when unsure, it is * recommended to call this function with the %IDN2_NONTRANSITIONAL * and the %IDN2_NFC_INPUT flags for compatibility with other software. * * Returns: %IDN2_OK on success, or error code. * Same as described in idn2_lookup_ul() documentation. * * Since: 2.0.0 **/ int idn2_to_ascii_lz (const char * input, char ** output, int flags) { return idn2_lookup_ul (input, output, flags); }
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; int rc; size_t tmpl; if (_idn2_ascii_p (src, srclen)) { if (flags & IDN2_ALABEL_ROUNDTRIP) /* FIXME implement this MAY: If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ return IDN2_INVALID_FLAGS; if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) return rc; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free(p); return rc; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) return rc; *dstlen = 4 + tmpl; return IDN2_OK; }
label (const uint8_t * src, size_t srclen, uint8_t * dst, size_t * dstlen, int flags) { size_t plen; uint32_t *p; const uint8_t *src_org = NULL; uint8_t *src_allocated = NULL; int rc, check_roundtrip = 0; size_t tmpl, srclen_org = 0; uint32_t label_u32[IDN2_LABEL_MAX_LENGTH]; size_t label32_len = IDN2_LABEL_MAX_LENGTH; if (_idn2_ascii_p (src, srclen)) { if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) { /* If the input to this procedure appears to be an A-label (i.e., it starts in "xn--", interpreted case-insensitively), the lookup application MAY attempt to convert it to a U-label, first ensuring that the A-label is entirely in lowercase (converting it to lowercase if necessary), and apply the tests of Section 5.4 and the conversion of Section 5.5 to that form. */ rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32); if (rc) return rc; check_roundtrip = 1; src_org = src; srclen_org = srclen; srclen = IDN2_LABEL_MAX_LENGTH; src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen); if (!src) { if (errno == ENOMEM) return IDN2_MALLOC; return IDN2_ENCODING_ERROR; } } else { if (srclen > IDN2_LABEL_MAX_LENGTH) return IDN2_TOO_BIG_LABEL; if (srclen > *dstlen) return IDN2_TOO_BIG_DOMAIN; memcpy (dst, src, srclen); *dstlen = srclen; return IDN2_OK; } } rc = _idn2_u8_to_u32_nfc (src, srclen, &p, &plen, flags & IDN2_NFC_INPUT); if (rc != IDN2_OK) goto out; if (!(flags & IDN2_TRANSITIONAL)) { rc = _idn2_label_test( TEST_NFC | TEST_2HYPHEN | TEST_LEADING_COMBINING | TEST_DISALLOWED | TEST_CONTEXTJ_RULE | TEST_CONTEXTO_WITH_RULE | TEST_UNASSIGNED | TEST_BIDI | ((flags & IDN2_NONTRANSITIONAL) ? TEST_NONTRANSITIONAL : 0) | ((flags & IDN2_USE_STD3_ASCII_RULES) ? 0 : TEST_ALLOW_STD3_DISALLOWED), p, plen); if (rc != IDN2_OK) { free (p); goto out; } } dst[0] = 'x'; dst[1] = 'n'; dst[2] = '-'; dst[3] = '-'; tmpl = *dstlen - 4; rc = _idn2_punycode_encode (plen, p, &tmpl, (char *) dst + 4); free (p); if (rc != IDN2_OK) goto out; *dstlen = 4 + tmpl; if (check_roundtrip) { if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org)) { rc = IDN2_ALABEL_ROUNDTRIP_FAILED; goto out; } } rc = IDN2_OK; out: free (src_allocated); return rc; }
{'added': [(54, ' if (((*flags) & IDN2_ALABEL_ROUNDTRIP) && ((*flags) & IDN2_NO_ALABEL_ROUNDTRIP))'), (55, ' return IDN2_INVALID_FLAGS;'), (56, ''), (69, ' const uint8_t *src_org = NULL;'), (70, ' uint8_t *src_allocated = NULL;'), (71, ' int rc, check_roundtrip = 0;'), (72, ' size_t tmpl, srclen_org = 0;'), (73, ' uint32_t label_u32[IDN2_LABEL_MAX_LENGTH];'), (74, ' size_t label32_len = IDN2_LABEL_MAX_LENGTH;'), (75, ''), (76, ' if (_idn2_ascii_p (src, srclen)) {'), (77, ' if (!(flags & IDN2_NO_ALABEL_ROUNDTRIP) && srclen >= 4 && memcmp (src, "xn--", 4) == 0) {'), (78, ' /*'), (79, '\t If the input to this procedure appears to be an A-label'), (80, '\t (i.e., it starts in "xn--", interpreted'), (81, '\t case-insensitively), the lookup application MAY attempt to'), (82, '\t convert it to a U-label, first ensuring that the A-label is'), (83, '\t entirely in lowercase (converting it to lowercase if'), (84, '\t necessary), and apply the tests of Section 5.4 and the'), (85, '\t conversion of Section 5.5 to that form. */'), (86, ' rc = _idn2_punycode_decode (srclen - 4, (char *) src + 4, &label32_len, label_u32);'), (87, ' if (rc)'), (88, '\treturn rc;'), (90, ' check_roundtrip = 1;'), (91, ' src_org = src;'), (92, ' srclen_org = srclen;'), (93, ''), (94, ' srclen = IDN2_LABEL_MAX_LENGTH;'), (95, ' src = src_allocated = u32_to_u8 (label_u32, label32_len, NULL, &srclen);'), (96, ' if (!src) {'), (97, '\tif (errno == ENOMEM)'), (98, '\t return IDN2_MALLOC;'), (99, '\treturn IDN2_ENCODING_ERROR;'), (100, ' }'), (101, ' } else {'), (111, ' }'), (115, ' goto out;'), (133, '\t free (p);'), (134, '\t goto out;'), (147, ' goto out;'), (148, ''), (152, ' if (check_roundtrip)'), (153, ' {'), (154, ' if (srclen_org != *dstlen || memcmp (src_org, dst, srclen_org))'), (155, ' {'), (156, ' rc = IDN2_ALABEL_ROUNDTRIP_FAILED;'), (157, '\tgoto out;'), (158, ' }'), (159, ' }'), (160, ''), (161, ' rc = IDN2_OK;'), (162, ''), (163, 'out:'), (164, ' free (src_allocated);'), (165, ' return rc;'), (416, ' *'), (419, ' * testing. This is default since version 2.2.'), (420, ' * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP'), (421, ' *'), (424, ' * Unicode TR46 non-transitional processing.'), (425, ' *'), (426, ' * Multiple flags may be specified by binary or:ing them together.'), (544, ' * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to'), (545, ' * convert any input A-labels to U-labels and perform additional'), (546, ' * testing. This is default since version 2.2.'), (547, ' * To switch this behavior off, pass IDN2_NO_ALABEL_ROUNDTRIP'), (548, ' *'), (549, ' * Pass %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing,'), (551, ' * processing.'), (552, ' *'), (553, ' * Multiple flags may be specified by binary or:ing them together, for'), (554, ' * example %IDN2_ALABEL_ROUNDTRIP | %IDN2_NONTRANSITIONAL.'), (555, ' *'), (556, ' * The %IDN2_NFC_INPUT in @flags is always enabled in this function.')], 'deleted': [(66, ' int rc;'), (67, ' size_t tmpl;'), (68, ''), (69, ' if (_idn2_ascii_p (src, srclen))'), (70, ' {'), (71, ' if (flags & IDN2_ALABEL_ROUNDTRIP)'), (72, '\t/* FIXME implement this MAY:'), (73, ''), (74, '\t If the input to this procedure appears to be an A-label'), (75, '\t (i.e., it starts in "xn--", interpreted'), (76, '\t case-insensitively), the lookup application MAY attempt to'), (77, '\t convert it to a U-label, first ensuring that the A-label is'), (78, '\t entirely in lowercase (converting it to lowercase if'), (79, '\t necessary), and apply the tests of Section 5.4 and the'), (80, '\t conversion of Section 5.5 to that form. */'), (81, '\treturn IDN2_INVALID_FLAGS;'), (95, ' return rc;'), (113, '\t free(p);'), (114, '\t return rc;'), (127, ' return rc;'), (131, ' return IDN2_OK;'), (384, ' * testing (not implemented yet).'), (387, ' * Unicode TR46 non-transitional processing. Multiple flags may be'), (388, ' * specified by binary or:ing them together.'), (506, ' * Pass %IDN2_ALABEL_ROUNDTRIP in @flags to convert any input A-labels'), (507, ' * to U-labels and perform additional testing. Pass'), (508, ' * %IDN2_TRANSITIONAL to enable Unicode TR46 transitional processing,'), (510, ' * processing. Multiple flags may be specified by binary or:ing them'), (511, ' * together, for example %IDN2_ALABEL_ROUNDTRIP |'), (512, ' * %IDN2_NONTRANSITIONAL. The %IDN2_NFC_INPUT in @flags is always'), (513, ' * enabled in this function.')]}
74
31
480
2,448
53
275
11
https://gitlab.com/libidn/libidn2
CVE-2019-12290
CWE-20
503
trusted.c
C
trusted_update
/* * Copyright (C) 2010 IBM Corporation * * Author: * David Safford <safford@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * See Documentation/security/keys/trusted-encrypted.rst */ #include <crypto/hash_info.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <linux/key-type.h> #include <linux/rcupdate.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <linux/capability.h> #include <linux/tpm.h> #include <linux/tpm_command.h> #include "trusted.h" static const char hmac_alg[] = "hmac(sha1)"; static const char hash_alg[] = "sha1"; struct sdesc { struct shash_desc shash; char ctx[]; }; static struct crypto_shash *hashalg; static struct crypto_shash *hmacalg; static struct sdesc *init_sdesc(struct crypto_shash *alg) { struct sdesc *sdesc; int size; size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) return ERR_PTR(-ENOMEM); sdesc->shash.tfm = alg; sdesc->shash.flags = 0x0; return sdesc; } static int TSS_sha1(const unsigned char *data, unsigned int datalen, unsigned char *digest) { struct sdesc *sdesc; int ret; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); kzfree(sdesc); return ret; } static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, ...) { struct sdesc *sdesc; va_list argp; unsigned int dlen; unsigned char *data; int ret; sdesc = init_sdesc(hmacalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hmac_alg); return PTR_ERR(sdesc); } ret = crypto_shash_setkey(hmacalg, key, keylen); if (ret < 0) goto out; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (data == NULL) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: kzfree(sdesc); return ret; } /* * calculate authorization info fields to send to TPM */ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned char h3, ...) { unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned char *data; unsigned char c; int ret; va_list argp; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } c = h3; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, h3); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (!data) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (!ret) ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: kzfree(sdesc); return ret; } /* * verify the AUTH1_COMMAND (Seal) result from TPM */ static int TSS_checkhmac1(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key, unsigned int keylen, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce; unsigned char *continueflag; unsigned char *authdata; unsigned char testhmac[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH1_COMMAND) return -EINVAL; authdata = buffer + bufsize - SHA1_DIGEST_SIZE; continueflag = authdata - 1; enonce = continueflag - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 1, continueflag, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kzfree(sdesc); return ret; } /* * verify the AUTH2_COMMAND (unseal) result from TPM */ static int TSS_checkhmac2(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key1, unsigned int keylen1, const unsigned char *key2, unsigned int keylen2, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce1; unsigned char *continueflag1; unsigned char *authdata1; unsigned char *enonce2; unsigned char *continueflag2; unsigned char *authdata2; unsigned char testhmac1[SHA1_DIGEST_SIZE]; unsigned char testhmac2[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH2_COMMAND) return -EINVAL; authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1 + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE); authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE); continueflag1 = authdata1 - 1; continueflag2 = authdata2 - 1; enonce1 = continueflag1 - TPM_NONCE_SIZE; enonce2 = continueflag2 - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen2); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce1, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { ret = -EINVAL; goto out; } ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce2, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kzfree(sdesc); return ret; } /* * For key specific tpm requests, we will generate and send our * own TPM command packets using the drivers send function. */ static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd, size_t buflen) { int rc; dump_tpm_buf(cmd); rc = tpm_send(chip_num, cmd, buflen); dump_tpm_buf(cmd); if (rc > 0) /* Can't return positive return codes values to keyctl */ rc = -EPERM; return rc; } /* * Lock a trusted key, by extending a selected PCR. * * Prevents a trusted key that is sealed to PCRs from being accessed. * This uses the tpm driver's extend function. */ static int pcrlock(const int pcrnum) { unsigned char hash[SHA1_DIGEST_SIZE]; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE); if (ret != SHA1_DIGEST_SIZE) return ret; return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0; } /* * Create an object specific authorisation protocol (OSAP) session */ static int osap(struct tpm_buf *tb, struct osapsess *s, const unsigned char *key, uint16_t type, uint32_t handle) { unsigned char enonce[TPM_NONCE_SIZE]; unsigned char ononce[TPM_NONCE_SIZE]; int ret; ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) return ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OSAP_SIZE); store32(tb, TPM_ORD_OSAP); store16(tb, type); store32(tb, handle); storebytes(tb, ononce, TPM_NONCE_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]), TPM_NONCE_SIZE); memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) + TPM_NONCE_SIZE]), TPM_NONCE_SIZE); return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 0, 0); } /* * Create an object independent authorisation protocol (oiap) session */ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) { int ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OIAP_SIZE); store32(tb, TPM_ORD_OIAP); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; *handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)], TPM_NONCE_SIZE); return 0; } struct tpm_digests { unsigned char encauth[SHA1_DIGEST_SIZE]; unsigned char pubauth[SHA1_DIGEST_SIZE]; unsigned char xorwork[SHA1_DIGEST_SIZE * 2]; unsigned char xorhash[SHA1_DIGEST_SIZE]; unsigned char nonceodd[TPM_NONCE_SIZE]; }; /* * Have the TPM seal(encrypt) the trusted key, possibly based on * Platform Configuration Registers (PCRs). AUTH1 for sealing key. */ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *data, uint32_t datalen, unsigned char *blob, uint32_t *bloblen, const unsigned char *blobauth, const unsigned char *pcrinfo, uint32_t pcrinfosize) { struct osapsess sess; struct tpm_digests *td; unsigned char cont; uint32_t ordinal; uint32_t pcrsize; uint32_t datsize; int sealinfosize; int encdatasize; int storedsize; int ret; int i; /* alloc some work space for all the hashes */ td = kmalloc(sizeof *td, GFP_KERNEL); if (!td) return -ENOMEM; /* get session for sealing key */ ret = osap(tb, &sess, keyauth, keytype, keyhandle); if (ret < 0) goto out; dump_sess(&sess); /* calculate encrypted authorization value */ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE); memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE); ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); if (ret < 0) goto out; ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) goto out; ordinal = htonl(TPM_ORD_SEAL); datsize = htonl(datalen); pcrsize = htonl(pcrinfosize); cont = 0; /* encrypt data authorization key */ for (i = 0; i < SHA1_DIGEST_SIZE; ++i) td->encauth[i] = td->xorhash[i] ^ blobauth[i]; /* calculate authorization HMAC value */ if (pcrinfosize == 0) { /* no pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } else { /* pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, pcrinfosize, pcrinfo, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } if (ret < 0) goto out; /* build and send the TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen); store32(tb, TPM_ORD_SEAL); store32(tb, keyhandle); storebytes(tb, td->encauth, SHA1_DIGEST_SIZE); store32(tb, pcrinfosize); storebytes(tb, pcrinfo, pcrinfosize); store32(tb, datalen); storebytes(tb, data, datalen); store32(tb, sess.handle); storebytes(tb, td->nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) goto out; /* calculate the size of the returned Blob */ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t)); encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize); storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize + sizeof(uint32_t) + encdatasize; /* check the HMAC in the response */ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret, SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0, 0); /* copy the returned blob to caller */ if (!ret) { memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize); *bloblen = storedsize; } out: kzfree(td); return ret; } /* * use the AUTH2_COMMAND form of unseal, to authorize both key and blob */ static int tpm_unseal(struct tpm_buf *tb, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *blob, int bloblen, const unsigned char *blobauth, unsigned char *data, unsigned int *datalen) { unsigned char nonceodd[TPM_NONCE_SIZE]; unsigned char enonce1[TPM_NONCE_SIZE]; unsigned char enonce2[TPM_NONCE_SIZE]; unsigned char authdata1[SHA1_DIGEST_SIZE]; unsigned char authdata2[SHA1_DIGEST_SIZE]; uint32_t authhandle1 = 0; uint32_t authhandle2 = 0; unsigned char cont = 0; uint32_t ordinal; uint32_t keyhndl; int ret; /* sessions for unsealing key and data */ ret = oiap(tb, &authhandle1, enonce1); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ret = oiap(tb, &authhandle2, enonce2); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ordinal = htonl(TPM_ORD_UNSEAL); keyhndl = htonl(SRKHANDLE); ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) { pr_info("trusted_key: tpm_get_random failed (%d)\n", ret); return ret; } ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE, enonce1, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE, enonce2, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; /* build and send TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH2_COMMAND); store32(tb, TPM_UNSEAL_SIZE + bloblen); store32(tb, TPM_ORD_UNSEAL); store32(tb, keyhandle); storebytes(tb, blob, bloblen); store32(tb, authhandle1); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata1, SHA1_DIGEST_SIZE); store32(tb, authhandle2); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata2, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) { pr_info("trusted_key: authhmac failed (%d)\n", ret); return ret; } *datalen = LOAD32(tb->data, TPM_DATA_OFFSET); ret = TSS_checkhmac2(tb->data, ordinal, nonceodd, keyauth, SHA1_DIGEST_SIZE, blobauth, SHA1_DIGEST_SIZE, sizeof(uint32_t), TPM_DATA_OFFSET, *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0, 0); if (ret < 0) { pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret); return ret; } memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen); return 0; } /* * Have the TPM seal(encrypt) the symmetric key */ static int key_seal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; /* include migratable flag at end of sealed key */ p->key[p->key_len] = p->migratable; ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth, p->key, p->key_len + 1, p->blob, &p->blob_len, o->blobauth, o->pcrinfo, o->pcrinfo_len); if (ret < 0) pr_info("trusted_key: srkseal failed (%d)\n", ret); kzfree(tb); return ret; } /* * Have the TPM unseal(decrypt) the symmetric key */ static int key_unseal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len, o->blobauth, p->key, &p->key_len); if (ret < 0) pr_info("trusted_key: srkunseal failed (%d)\n", ret); else /* pull migratable flag out of sealed key */ p->migratable = p->key[--p->key_len]; kzfree(tb); return ret; } enum { Opt_err = -1, Opt_new, Opt_load, Opt_update, Opt_keyhandle, Opt_keyauth, Opt_blobauth, Opt_pcrinfo, Opt_pcrlock, Opt_migratable, Opt_hash, Opt_policydigest, Opt_policyhandle, }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_keyhandle, "keyhandle=%s"}, {Opt_keyauth, "keyauth=%s"}, {Opt_blobauth, "blobauth=%s"}, {Opt_pcrinfo, "pcrinfo=%s"}, {Opt_pcrlock, "pcrlock=%s"}, {Opt_migratable, "migratable=%s"}, {Opt_hash, "hash=%s"}, {Opt_policydigest, "policydigest=%s"}, {Opt_policyhandle, "policyhandle=%s"}, {Opt_err, NULL} }; /* can have zero or more token= options */ static int getoptions(char *c, struct trusted_key_payload *pay, struct trusted_key_options *opt) { substring_t args[MAX_OPT_ARGS]; char *p = c; int token; int res; unsigned long handle; unsigned long lock; unsigned long token_mask = 0; unsigned int digest_len; int i; int tpm2; tpm2 = tpm_is_tpm2(TPM_ANY_NUM); if (tpm2 < 0) return tpm2; opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1; while ((p = strsep(&c, " \t"))) { if (*p == '\0' || *p == ' ' || *p == '\t') continue; token = match_token(p, key_tokens, args); if (test_and_set_bit(token, &token_mask)) return -EINVAL; switch (token) { case Opt_pcrinfo: opt->pcrinfo_len = strlen(args[0].from) / 2; if (opt->pcrinfo_len > MAX_PCRINFO_SIZE) return -EINVAL; res = hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len); if (res < 0) return -EINVAL; break; case Opt_keyhandle: res = kstrtoul(args[0].from, 16, &handle); if (res < 0) return -EINVAL; opt->keytype = SEAL_keytype; opt->keyhandle = handle; break; case Opt_keyauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_blobauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; else return -EINVAL; break; case Opt_pcrlock: res = kstrtoul(args[0].from, 10, &lock); if (res < 0) return -EINVAL; opt->pcrlock = lock; break; case Opt_hash: if (test_bit(Opt_policydigest, &token_mask)) return -EINVAL; for (i = 0; i < HASH_ALGO__LAST; i++) { if (!strcmp(args[0].from, hash_algo_name[i])) { opt->hash = i; break; } } if (i == HASH_ALGO__LAST) return -EINVAL; if (!tpm2 && i != HASH_ALGO_SHA1) { pr_info("trusted_key: TPM 1.x only supports SHA-1.\n"); return -EINVAL; } break; case Opt_policydigest: digest_len = hash_digest_size[opt->hash]; if (!tpm2 || strlen(args[0].from) != (2 * digest_len)) return -EINVAL; res = hex2bin(opt->policydigest, args[0].from, digest_len); if (res < 0) return -EINVAL; opt->policydigest_len = digest_len; break; case Opt_policyhandle: if (!tpm2) return -EINVAL; res = kstrtoul(args[0].from, 16, &handle); if (res < 0) return -EINVAL; opt->policyhandle = handle; break; default: return -EINVAL; } } return 0; } /* * datablob_parse - parse the keyctl data and fill in the * payload and options structures * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, struct trusted_key_payload *p, struct trusted_key_options *o) { substring_t args[MAX_OPT_ARGS]; long keylen; int ret = -EINVAL; int key_cmd; char *c; /* main command */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; key_cmd = match_token(c, key_tokens, args); switch (key_cmd) { case Opt_new: /* first argument is key size */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; ret = kstrtol(c, 10, &keylen); if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) return -EINVAL; p->key_len = keylen; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_new; break; case Opt_load: /* first argument is sealed blob */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; p->blob_len = strlen(c) / 2; if (p->blob_len > MAX_BLOB_SIZE) return -EINVAL; ret = hex2bin(p->blob, c, p->blob_len); if (ret < 0) return -EINVAL; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_load; break; case Opt_update: /* all arguments are options */ ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_update; break; case Opt_err: return -EINVAL; break; } return ret; } static struct trusted_key_options *trusted_options_alloc(void) { struct trusted_key_options *options; int tpm2; tpm2 = tpm_is_tpm2(TPM_ANY_NUM); if (tpm2 < 0) return NULL; options = kzalloc(sizeof *options, GFP_KERNEL); if (options) { /* set any non-zero defaults */ options->keytype = SRK_keytype; if (!tpm2) options->keyhandle = SRKHANDLE; } return options; } static struct trusted_key_payload *trusted_payload_alloc(struct key *key) { struct trusted_key_payload *p = NULL; int ret; ret = key_payload_reserve(key, sizeof *p); if (ret < 0) return p; p = kzalloc(sizeof *p, GFP_KERNEL); if (p) p->migratable = 1; /* migratable by default */ return p; } /* * trusted_instantiate - create a new trusted key * * Unseal an existing trusted blob or, for a new key, get a * random key, then seal and create a trusted key-type key, * adding it to the specified keyring. * * On success, return 0. Otherwise return errno. */ static int trusted_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *payload = NULL; struct trusted_key_options *options = NULL; size_t datalen = prep->datalen; char *datablob; int ret = 0; int key_cmd; size_t key_len; int tpm2; tpm2 = tpm_is_tpm2(TPM_ANY_NUM); if (tpm2 < 0) return tpm2; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; options = trusted_options_alloc(); if (!options) { ret = -ENOMEM; goto out; } payload = trusted_payload_alloc(key); if (!payload) { ret = -ENOMEM; goto out; } key_cmd = datablob_parse(datablob, payload, options); if (key_cmd < 0) { ret = key_cmd; goto out; } if (!options->keyhandle) { ret = -EINVAL; goto out; } dump_payload(payload); dump_options(options); switch (key_cmd) { case Opt_load: if (tpm2) ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options); else ret = key_unseal(payload, options); dump_payload(payload); dump_options(options); if (ret < 0) pr_info("trusted_key: key_unseal failed (%d)\n", ret); break; case Opt_new: key_len = payload->key_len; ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len); if (ret != key_len) { pr_info("trusted_key: key_create failed (%d)\n", ret); goto out; } if (tpm2) ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options); else ret = key_seal(payload, options); if (ret < 0) pr_info("trusted_key: key_seal failed (%d)\n", ret); break; default: ret = -EINVAL; goto out; } if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: kzfree(datablob); kzfree(options); if (!ret) rcu_assign_keypointer(key, payload); else kzfree(payload); return ret; } static void trusted_rcu_free(struct rcu_head *rcu) { struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); kzfree(p); } /* * trusted_update - reseal an existing key with new PCR values */ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) return -ENOKEY; p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kzfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kzfree(datablob); kzfree(new_o); return ret; } /* * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ static long trusted_read(const struct key *key, char __user *buffer, size_t buflen) { const struct trusted_key_payload *p; char *ascii_buf; char *bufp; int i; p = dereference_key_locked(key); if (!p) return -EINVAL; if (!buffer || buflen <= 0) return 2 * p->blob_len; ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); if (!ascii_buf) return -ENOMEM; bufp = ascii_buf; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { kzfree(ascii_buf); return -EFAULT; } kzfree(ascii_buf); return 2 * p->blob_len; } /* * trusted_destroy - clear and free the key's payload */ static void trusted_destroy(struct key *key) { kzfree(key->payload.data[0]); } struct key_type key_type_trusted = { .name = "trusted", .instantiate = trusted_instantiate, .update = trusted_update, .destroy = trusted_destroy, .describe = user_describe, .read = trusted_read, }; EXPORT_SYMBOL_GPL(key_type_trusted); static void trusted_shash_release(void) { if (hashalg) crypto_free_shash(hashalg); if (hmacalg) crypto_free_shash(hmacalg); } static int __init trusted_shash_alloc(void) { int ret; hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmacalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hmac_alg); return PTR_ERR(hmacalg); } hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hashalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hash_alg); ret = PTR_ERR(hashalg); goto hashalg_fail; } return 0; hashalg_fail: crypto_free_shash(hmacalg); return ret; } static int __init init_trusted(void) { int ret; ret = trusted_shash_alloc(); if (ret < 0) return ret; ret = register_key_type(&key_type_trusted); if (ret < 0) trusted_shash_release(); return ret; } static void __exit cleanup_trusted(void) { trusted_shash_release(); unregister_key_type(&key_type_trusted); } late_initcall(init_trusted); module_exit(cleanup_trusted); MODULE_LICENSE("GPL");
/* * Copyright (C) 2010 IBM Corporation * * Author: * David Safford <safford@us.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * See Documentation/security/keys/trusted-encrypted.rst */ #include <crypto/hash_info.h> #include <linux/uaccess.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/parser.h> #include <linux/string.h> #include <linux/err.h> #include <keys/user-type.h> #include <keys/trusted-type.h> #include <linux/key-type.h> #include <linux/rcupdate.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <linux/capability.h> #include <linux/tpm.h> #include <linux/tpm_command.h> #include "trusted.h" static const char hmac_alg[] = "hmac(sha1)"; static const char hash_alg[] = "sha1"; struct sdesc { struct shash_desc shash; char ctx[]; }; static struct crypto_shash *hashalg; static struct crypto_shash *hmacalg; static struct sdesc *init_sdesc(struct crypto_shash *alg) { struct sdesc *sdesc; int size; size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) return ERR_PTR(-ENOMEM); sdesc->shash.tfm = alg; sdesc->shash.flags = 0x0; return sdesc; } static int TSS_sha1(const unsigned char *data, unsigned int datalen, unsigned char *digest) { struct sdesc *sdesc; int ret; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); kzfree(sdesc); return ret; } static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, ...) { struct sdesc *sdesc; va_list argp; unsigned int dlen; unsigned char *data; int ret; sdesc = init_sdesc(hmacalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hmac_alg); return PTR_ERR(sdesc); } ret = crypto_shash_setkey(hmacalg, key, keylen); if (ret < 0) goto out; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (data == NULL) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, digest); out: kzfree(sdesc); return ret; } /* * calculate authorization info fields to send to TPM */ static int TSS_authhmac(unsigned char *digest, const unsigned char *key, unsigned int keylen, unsigned char *h1, unsigned char *h2, unsigned char h3, ...) { unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned char *data; unsigned char c; int ret; va_list argp; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } c = h3; ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; va_start(argp, h3); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; data = va_arg(argp, unsigned char *); if (!data) { ret = -EINVAL; break; } ret = crypto_shash_update(&sdesc->shash, data, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (!ret) ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, h1, TPM_NONCE_SIZE, h2, 1, &c, 0, 0); out: kzfree(sdesc); return ret; } /* * verify the AUTH1_COMMAND (Seal) result from TPM */ static int TSS_checkhmac1(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key, unsigned int keylen, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce; unsigned char *continueflag; unsigned char *authdata; unsigned char testhmac[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH1_COMMAND) return -EINVAL; authdata = buffer + bufsize - SHA1_DIGEST_SIZE; continueflag = authdata - 1; enonce = continueflag - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 1, continueflag, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kzfree(sdesc); return ret; } /* * verify the AUTH2_COMMAND (unseal) result from TPM */ static int TSS_checkhmac2(unsigned char *buffer, const uint32_t command, const unsigned char *ononce, const unsigned char *key1, unsigned int keylen1, const unsigned char *key2, unsigned int keylen2, ...) { uint32_t bufsize; uint16_t tag; uint32_t ordinal; uint32_t result; unsigned char *enonce1; unsigned char *continueflag1; unsigned char *authdata1; unsigned char *enonce2; unsigned char *continueflag2; unsigned char *authdata2; unsigned char testhmac1[SHA1_DIGEST_SIZE]; unsigned char testhmac2[SHA1_DIGEST_SIZE]; unsigned char paramdigest[SHA1_DIGEST_SIZE]; struct sdesc *sdesc; unsigned int dlen; unsigned int dpos; va_list argp; int ret; bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); tag = LOAD16(buffer, 0); ordinal = command; result = LOAD32N(buffer, TPM_RETURN_OFFSET); if (tag == TPM_TAG_RSP_COMMAND) return 0; if (tag != TPM_TAG_RSP_AUTH2_COMMAND) return -EINVAL; authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1 + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE); authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE); continueflag1 = authdata1 - 1; continueflag2 = authdata2 - 1; enonce1 = continueflag1 - TPM_NONCE_SIZE; enonce2 = continueflag2 - TPM_NONCE_SIZE; sdesc = init_sdesc(hashalg); if (IS_ERR(sdesc)) { pr_info("trusted_key: can't alloc %s\n", hash_alg); return PTR_ERR(sdesc); } ret = crypto_shash_init(&sdesc->shash); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, sizeof result); if (ret < 0) goto out; ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, sizeof ordinal); if (ret < 0) goto out; va_start(argp, keylen2); for (;;) { dlen = va_arg(argp, unsigned int); if (dlen == 0) break; dpos = va_arg(argp, unsigned int); ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); if (ret < 0) break; } va_end(argp); if (!ret) ret = crypto_shash_final(&sdesc->shash, paramdigest); if (ret < 0) goto out; ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce1, TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { ret = -EINVAL; goto out; } ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE, paramdigest, TPM_NONCE_SIZE, enonce2, TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); if (ret < 0) goto out; if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) ret = -EINVAL; out: kzfree(sdesc); return ret; } /* * For key specific tpm requests, we will generate and send our * own TPM command packets using the drivers send function. */ static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd, size_t buflen) { int rc; dump_tpm_buf(cmd); rc = tpm_send(chip_num, cmd, buflen); dump_tpm_buf(cmd); if (rc > 0) /* Can't return positive return codes values to keyctl */ rc = -EPERM; return rc; } /* * Lock a trusted key, by extending a selected PCR. * * Prevents a trusted key that is sealed to PCRs from being accessed. * This uses the tpm driver's extend function. */ static int pcrlock(const int pcrnum) { unsigned char hash[SHA1_DIGEST_SIZE]; int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE); if (ret != SHA1_DIGEST_SIZE) return ret; return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0; } /* * Create an object specific authorisation protocol (OSAP) session */ static int osap(struct tpm_buf *tb, struct osapsess *s, const unsigned char *key, uint16_t type, uint32_t handle) { unsigned char enonce[TPM_NONCE_SIZE]; unsigned char ononce[TPM_NONCE_SIZE]; int ret; ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) return ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OSAP_SIZE); store32(tb, TPM_ORD_OSAP); store16(tb, type); store32(tb, handle); storebytes(tb, ononce, TPM_NONCE_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]), TPM_NONCE_SIZE); memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) + TPM_NONCE_SIZE]), TPM_NONCE_SIZE); return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, 0, 0); } /* * Create an object independent authorisation protocol (oiap) session */ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) { int ret; INIT_BUF(tb); store16(tb, TPM_TAG_RQU_COMMAND); store32(tb, TPM_OIAP_SIZE); store32(tb, TPM_ORD_OIAP); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) return ret; *handle = LOAD32(tb->data, TPM_DATA_OFFSET); memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)], TPM_NONCE_SIZE); return 0; } struct tpm_digests { unsigned char encauth[SHA1_DIGEST_SIZE]; unsigned char pubauth[SHA1_DIGEST_SIZE]; unsigned char xorwork[SHA1_DIGEST_SIZE * 2]; unsigned char xorhash[SHA1_DIGEST_SIZE]; unsigned char nonceodd[TPM_NONCE_SIZE]; }; /* * Have the TPM seal(encrypt) the trusted key, possibly based on * Platform Configuration Registers (PCRs). AUTH1 for sealing key. */ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *data, uint32_t datalen, unsigned char *blob, uint32_t *bloblen, const unsigned char *blobauth, const unsigned char *pcrinfo, uint32_t pcrinfosize) { struct osapsess sess; struct tpm_digests *td; unsigned char cont; uint32_t ordinal; uint32_t pcrsize; uint32_t datsize; int sealinfosize; int encdatasize; int storedsize; int ret; int i; /* alloc some work space for all the hashes */ td = kmalloc(sizeof *td, GFP_KERNEL); if (!td) return -ENOMEM; /* get session for sealing key */ ret = osap(tb, &sess, keyauth, keytype, keyhandle); if (ret < 0) goto out; dump_sess(&sess); /* calculate encrypted authorization value */ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE); memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE); ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); if (ret < 0) goto out; ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) goto out; ordinal = htonl(TPM_ORD_SEAL); datsize = htonl(datalen); pcrsize = htonl(pcrinfosize); cont = 0; /* encrypt data authorization key */ for (i = 0; i < SHA1_DIGEST_SIZE; ++i) td->encauth[i] = td->xorhash[i] ^ blobauth[i]; /* calculate authorization HMAC value */ if (pcrinfosize == 0) { /* no pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } else { /* pcr info specified */ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, sess.enonce, td->nonceodd, cont, sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, td->encauth, sizeof(uint32_t), &pcrsize, pcrinfosize, pcrinfo, sizeof(uint32_t), &datsize, datalen, data, 0, 0); } if (ret < 0) goto out; /* build and send the TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen); store32(tb, TPM_ORD_SEAL); store32(tb, keyhandle); storebytes(tb, td->encauth, SHA1_DIGEST_SIZE); store32(tb, pcrinfosize); storebytes(tb, pcrinfo, pcrinfosize); store32(tb, datalen); storebytes(tb, data, datalen); store32(tb, sess.handle); storebytes(tb, td->nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) goto out; /* calculate the size of the returned Blob */ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t)); encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize); storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize + sizeof(uint32_t) + encdatasize; /* check the HMAC in the response */ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret, SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0, 0); /* copy the returned blob to caller */ if (!ret) { memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize); *bloblen = storedsize; } out: kzfree(td); return ret; } /* * use the AUTH2_COMMAND form of unseal, to authorize both key and blob */ static int tpm_unseal(struct tpm_buf *tb, uint32_t keyhandle, const unsigned char *keyauth, const unsigned char *blob, int bloblen, const unsigned char *blobauth, unsigned char *data, unsigned int *datalen) { unsigned char nonceodd[TPM_NONCE_SIZE]; unsigned char enonce1[TPM_NONCE_SIZE]; unsigned char enonce2[TPM_NONCE_SIZE]; unsigned char authdata1[SHA1_DIGEST_SIZE]; unsigned char authdata2[SHA1_DIGEST_SIZE]; uint32_t authhandle1 = 0; uint32_t authhandle2 = 0; unsigned char cont = 0; uint32_t ordinal; uint32_t keyhndl; int ret; /* sessions for unsealing key and data */ ret = oiap(tb, &authhandle1, enonce1); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ret = oiap(tb, &authhandle2, enonce2); if (ret < 0) { pr_info("trusted_key: oiap failed (%d)\n", ret); return ret; } ordinal = htonl(TPM_ORD_UNSEAL); keyhndl = htonl(SRKHANDLE); ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE); if (ret != TPM_NONCE_SIZE) { pr_info("trusted_key: tpm_get_random failed (%d)\n", ret); return ret; } ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE, enonce1, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE, enonce2, nonceodd, cont, sizeof(uint32_t), &ordinal, bloblen, blob, 0, 0); if (ret < 0) return ret; /* build and send TPM request packet */ INIT_BUF(tb); store16(tb, TPM_TAG_RQU_AUTH2_COMMAND); store32(tb, TPM_UNSEAL_SIZE + bloblen); store32(tb, TPM_ORD_UNSEAL); store32(tb, keyhandle); storebytes(tb, blob, bloblen); store32(tb, authhandle1); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata1, SHA1_DIGEST_SIZE); store32(tb, authhandle2); storebytes(tb, nonceodd, TPM_NONCE_SIZE); store8(tb, cont); storebytes(tb, authdata2, SHA1_DIGEST_SIZE); ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); if (ret < 0) { pr_info("trusted_key: authhmac failed (%d)\n", ret); return ret; } *datalen = LOAD32(tb->data, TPM_DATA_OFFSET); ret = TSS_checkhmac2(tb->data, ordinal, nonceodd, keyauth, SHA1_DIGEST_SIZE, blobauth, SHA1_DIGEST_SIZE, sizeof(uint32_t), TPM_DATA_OFFSET, *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0, 0); if (ret < 0) { pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret); return ret; } memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen); return 0; } /* * Have the TPM seal(encrypt) the symmetric key */ static int key_seal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; /* include migratable flag at end of sealed key */ p->key[p->key_len] = p->migratable; ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth, p->key, p->key_len + 1, p->blob, &p->blob_len, o->blobauth, o->pcrinfo, o->pcrinfo_len); if (ret < 0) pr_info("trusted_key: srkseal failed (%d)\n", ret); kzfree(tb); return ret; } /* * Have the TPM unseal(decrypt) the symmetric key */ static int key_unseal(struct trusted_key_payload *p, struct trusted_key_options *o) { struct tpm_buf *tb; int ret; tb = kzalloc(sizeof *tb, GFP_KERNEL); if (!tb) return -ENOMEM; ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len, o->blobauth, p->key, &p->key_len); if (ret < 0) pr_info("trusted_key: srkunseal failed (%d)\n", ret); else /* pull migratable flag out of sealed key */ p->migratable = p->key[--p->key_len]; kzfree(tb); return ret; } enum { Opt_err = -1, Opt_new, Opt_load, Opt_update, Opt_keyhandle, Opt_keyauth, Opt_blobauth, Opt_pcrinfo, Opt_pcrlock, Opt_migratable, Opt_hash, Opt_policydigest, Opt_policyhandle, }; static const match_table_t key_tokens = { {Opt_new, "new"}, {Opt_load, "load"}, {Opt_update, "update"}, {Opt_keyhandle, "keyhandle=%s"}, {Opt_keyauth, "keyauth=%s"}, {Opt_blobauth, "blobauth=%s"}, {Opt_pcrinfo, "pcrinfo=%s"}, {Opt_pcrlock, "pcrlock=%s"}, {Opt_migratable, "migratable=%s"}, {Opt_hash, "hash=%s"}, {Opt_policydigest, "policydigest=%s"}, {Opt_policyhandle, "policyhandle=%s"}, {Opt_err, NULL} }; /* can have zero or more token= options */ static int getoptions(char *c, struct trusted_key_payload *pay, struct trusted_key_options *opt) { substring_t args[MAX_OPT_ARGS]; char *p = c; int token; int res; unsigned long handle; unsigned long lock; unsigned long token_mask = 0; unsigned int digest_len; int i; int tpm2; tpm2 = tpm_is_tpm2(TPM_ANY_NUM); if (tpm2 < 0) return tpm2; opt->hash = tpm2 ? HASH_ALGO_SHA256 : HASH_ALGO_SHA1; while ((p = strsep(&c, " \t"))) { if (*p == '\0' || *p == ' ' || *p == '\t') continue; token = match_token(p, key_tokens, args); if (test_and_set_bit(token, &token_mask)) return -EINVAL; switch (token) { case Opt_pcrinfo: opt->pcrinfo_len = strlen(args[0].from) / 2; if (opt->pcrinfo_len > MAX_PCRINFO_SIZE) return -EINVAL; res = hex2bin(opt->pcrinfo, args[0].from, opt->pcrinfo_len); if (res < 0) return -EINVAL; break; case Opt_keyhandle: res = kstrtoul(args[0].from, 16, &handle); if (res < 0) return -EINVAL; opt->keytype = SEAL_keytype; opt->keyhandle = handle; break; case Opt_keyauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->keyauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_blobauth: if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) return -EINVAL; res = hex2bin(opt->blobauth, args[0].from, SHA1_DIGEST_SIZE); if (res < 0) return -EINVAL; break; case Opt_migratable: if (*args[0].from == '0') pay->migratable = 0; else return -EINVAL; break; case Opt_pcrlock: res = kstrtoul(args[0].from, 10, &lock); if (res < 0) return -EINVAL; opt->pcrlock = lock; break; case Opt_hash: if (test_bit(Opt_policydigest, &token_mask)) return -EINVAL; for (i = 0; i < HASH_ALGO__LAST; i++) { if (!strcmp(args[0].from, hash_algo_name[i])) { opt->hash = i; break; } } if (i == HASH_ALGO__LAST) return -EINVAL; if (!tpm2 && i != HASH_ALGO_SHA1) { pr_info("trusted_key: TPM 1.x only supports SHA-1.\n"); return -EINVAL; } break; case Opt_policydigest: digest_len = hash_digest_size[opt->hash]; if (!tpm2 || strlen(args[0].from) != (2 * digest_len)) return -EINVAL; res = hex2bin(opt->policydigest, args[0].from, digest_len); if (res < 0) return -EINVAL; opt->policydigest_len = digest_len; break; case Opt_policyhandle: if (!tpm2) return -EINVAL; res = kstrtoul(args[0].from, 16, &handle); if (res < 0) return -EINVAL; opt->policyhandle = handle; break; default: return -EINVAL; } } return 0; } /* * datablob_parse - parse the keyctl data and fill in the * payload and options structures * * On success returns 0, otherwise -EINVAL. */ static int datablob_parse(char *datablob, struct trusted_key_payload *p, struct trusted_key_options *o) { substring_t args[MAX_OPT_ARGS]; long keylen; int ret = -EINVAL; int key_cmd; char *c; /* main command */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; key_cmd = match_token(c, key_tokens, args); switch (key_cmd) { case Opt_new: /* first argument is key size */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; ret = kstrtol(c, 10, &keylen); if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) return -EINVAL; p->key_len = keylen; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_new; break; case Opt_load: /* first argument is sealed blob */ c = strsep(&datablob, " \t"); if (!c) return -EINVAL; p->blob_len = strlen(c) / 2; if (p->blob_len > MAX_BLOB_SIZE) return -EINVAL; ret = hex2bin(p->blob, c, p->blob_len); if (ret < 0) return -EINVAL; ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_load; break; case Opt_update: /* all arguments are options */ ret = getoptions(datablob, p, o); if (ret < 0) return ret; ret = Opt_update; break; case Opt_err: return -EINVAL; break; } return ret; } static struct trusted_key_options *trusted_options_alloc(void) { struct trusted_key_options *options; int tpm2; tpm2 = tpm_is_tpm2(TPM_ANY_NUM); if (tpm2 < 0) return NULL; options = kzalloc(sizeof *options, GFP_KERNEL); if (options) { /* set any non-zero defaults */ options->keytype = SRK_keytype; if (!tpm2) options->keyhandle = SRKHANDLE; } return options; } static struct trusted_key_payload *trusted_payload_alloc(struct key *key) { struct trusted_key_payload *p = NULL; int ret; ret = key_payload_reserve(key, sizeof *p); if (ret < 0) return p; p = kzalloc(sizeof *p, GFP_KERNEL); if (p) p->migratable = 1; /* migratable by default */ return p; } /* * trusted_instantiate - create a new trusted key * * Unseal an existing trusted blob or, for a new key, get a * random key, then seal and create a trusted key-type key, * adding it to the specified keyring. * * On success, return 0. Otherwise return errno. */ static int trusted_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *payload = NULL; struct trusted_key_options *options = NULL; size_t datalen = prep->datalen; char *datablob; int ret = 0; int key_cmd; size_t key_len; int tpm2; tpm2 = tpm_is_tpm2(TPM_ANY_NUM); if (tpm2 < 0) return tpm2; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; options = trusted_options_alloc(); if (!options) { ret = -ENOMEM; goto out; } payload = trusted_payload_alloc(key); if (!payload) { ret = -ENOMEM; goto out; } key_cmd = datablob_parse(datablob, payload, options); if (key_cmd < 0) { ret = key_cmd; goto out; } if (!options->keyhandle) { ret = -EINVAL; goto out; } dump_payload(payload); dump_options(options); switch (key_cmd) { case Opt_load: if (tpm2) ret = tpm_unseal_trusted(TPM_ANY_NUM, payload, options); else ret = key_unseal(payload, options); dump_payload(payload); dump_options(options); if (ret < 0) pr_info("trusted_key: key_unseal failed (%d)\n", ret); break; case Opt_new: key_len = payload->key_len; ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len); if (ret != key_len) { pr_info("trusted_key: key_create failed (%d)\n", ret); goto out; } if (tpm2) ret = tpm_seal_trusted(TPM_ANY_NUM, payload, options); else ret = key_seal(payload, options); if (ret < 0) pr_info("trusted_key: key_seal failed (%d)\n", ret); break; default: ret = -EINVAL; goto out; } if (!ret && options->pcrlock) ret = pcrlock(options->pcrlock); out: kzfree(datablob); kzfree(options); if (!ret) rcu_assign_keypointer(key, payload); else kzfree(payload); return ret; } static void trusted_rcu_free(struct rcu_head *rcu) { struct trusted_key_payload *p; p = container_of(rcu, struct trusted_key_payload, rcu); kzfree(p); } /* * trusted_update - reseal an existing key with new PCR values */ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (key_is_negative(key)) return -ENOKEY; p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kzfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kzfree(datablob); kzfree(new_o); return ret; } /* * trusted_read - copy the sealed blob data to userspace in hex. * On success, return to userspace the trusted key datablob size. */ static long trusted_read(const struct key *key, char __user *buffer, size_t buflen) { const struct trusted_key_payload *p; char *ascii_buf; char *bufp; int i; p = dereference_key_locked(key); if (!p) return -EINVAL; if (!buffer || buflen <= 0) return 2 * p->blob_len; ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); if (!ascii_buf) return -ENOMEM; bufp = ascii_buf; for (i = 0; i < p->blob_len; i++) bufp = hex_byte_pack(bufp, p->blob[i]); if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { kzfree(ascii_buf); return -EFAULT; } kzfree(ascii_buf); return 2 * p->blob_len; } /* * trusted_destroy - clear and free the key's payload */ static void trusted_destroy(struct key *key) { kzfree(key->payload.data[0]); } struct key_type key_type_trusted = { .name = "trusted", .instantiate = trusted_instantiate, .update = trusted_update, .destroy = trusted_destroy, .describe = user_describe, .read = trusted_read, }; EXPORT_SYMBOL_GPL(key_type_trusted); static void trusted_shash_release(void) { if (hashalg) crypto_free_shash(hashalg); if (hmacalg) crypto_free_shash(hmacalg); } static int __init trusted_shash_alloc(void) { int ret; hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hmacalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hmac_alg); return PTR_ERR(hmacalg); } hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hashalg)) { pr_info("trusted_key: could not allocate crypto %s\n", hash_alg); ret = PTR_ERR(hashalg); goto hashalg_fail; } return 0; hashalg_fail: crypto_free_shash(hmacalg); return ret; } static int __init init_trusted(void) { int ret; ret = trusted_shash_alloc(); if (ret < 0) return ret; ret = register_key_type(&key_type_trusted); if (ret < 0) trusted_shash_release(); return ret; } static void __exit cleanup_trusted(void) { trusted_shash_release(); unregister_key_type(&key_type_trusted); } late_initcall(init_trusted); module_exit(cleanup_trusted); MODULE_LICENSE("GPL");
static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) return -ENOKEY; p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kzfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kzfree(datablob); kzfree(new_o); return ret; }
static int trusted_update(struct key *key, struct key_preparsed_payload *prep) { struct trusted_key_payload *p; struct trusted_key_payload *new_p; struct trusted_key_options *new_o; size_t datalen = prep->datalen; char *datablob; int ret = 0; if (key_is_negative(key)) return -ENOKEY; p = key->payload.data[0]; if (!p->migratable) return -EPERM; if (datalen <= 0 || datalen > 32767 || !prep->data) return -EINVAL; datablob = kmalloc(datalen + 1, GFP_KERNEL); if (!datablob) return -ENOMEM; new_o = trusted_options_alloc(); if (!new_o) { ret = -ENOMEM; goto out; } new_p = trusted_payload_alloc(key); if (!new_p) { ret = -ENOMEM; goto out; } memcpy(datablob, prep->data, datalen); datablob[datalen] = '\0'; ret = datablob_parse(datablob, new_p, new_o); if (ret != Opt_update) { ret = -EINVAL; kzfree(new_p); goto out; } if (!new_o->keyhandle) { ret = -EINVAL; kzfree(new_p); goto out; } /* copy old key values, and reseal with new pcrs */ new_p->migratable = p->migratable; new_p->key_len = p->key_len; memcpy(new_p->key, p->key, p->key_len); dump_payload(p); dump_payload(new_p); ret = key_seal(new_p, new_o); if (ret < 0) { pr_info("trusted_key: key_seal failed (%d)\n", ret); kzfree(new_p); goto out; } if (new_o->pcrlock) { ret = pcrlock(new_o->pcrlock); if (ret < 0) { pr_info("trusted_key: pcrlock failed (%d)\n", ret); kzfree(new_p); goto out; } } rcu_assign_keypointer(key, new_p); call_rcu(&p->rcu, trusted_rcu_free); out: kzfree(datablob); kzfree(new_o); return ret; }
{'added': [(1069, '\tif (key_is_negative(key))')], 'deleted': [(1069, '\tif (test_bit(KEY_FLAG_NEGATIVE, &key->flags))')]}
1
1
1,026
6,151
67
382
14
https://github.com/torvalds/linux
CVE-2017-15951
CWE-20
2,477
orders.c
C
update_write_brush
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 }; static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 }; static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 }; static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { brush->index = brush->hatch; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId]; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp]; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId]; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp]; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ if (iBitmapFormat >= ARRAYSIZE(BMF_BPP)) goto fail; cache_brush->bpp = BMF_BPP[iBitmapFormat]; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = BPP_BMF[cache_brush->bpp]; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
/** * FreeRDP: A Remote Desktop Protocol Implementation * Drawing Orders * * Copyright 2011 Marc-Andre Moreau <marcandre.moreau@gmail.com> * Copyright 2016 Armin Novak <armin.novak@thincast.com> * Copyright 2016 Thincast Technologies GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "window.h" #include <winpr/wtypes.h> #include <winpr/crt.h> #include <freerdp/api.h> #include <freerdp/log.h> #include <freerdp/graphics.h> #include <freerdp/codec/bitmap.h> #include <freerdp/gdi/gdi.h> #include "orders.h" #include "../cache/glyph.h" #include "../cache/bitmap.h" #include "../cache/brush.h" #include "../cache/cache.h" #define TAG FREERDP_TAG("core.orders") BYTE get_primary_drawing_order_field_bytes(UINT32 orderType, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (orderType) { case 0: return DSTBLT_ORDER_FIELD_BYTES; case 1: return PATBLT_ORDER_FIELD_BYTES; case 2: return SCRBLT_ORDER_FIELD_BYTES; case 3: return 0; case 4: return 0; case 5: return 0; case 6: return 0; case 7: return DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 8: return MULTI_DRAW_NINE_GRID_ORDER_FIELD_BYTES; case 9: return LINE_TO_ORDER_FIELD_BYTES; case 10: return OPAQUE_RECT_ORDER_FIELD_BYTES; case 11: return SAVE_BITMAP_ORDER_FIELD_BYTES; case 12: return 0; case 13: return MEMBLT_ORDER_FIELD_BYTES; case 14: return MEM3BLT_ORDER_FIELD_BYTES; case 15: return MULTI_DSTBLT_ORDER_FIELD_BYTES; case 16: return MULTI_PATBLT_ORDER_FIELD_BYTES; case 17: return MULTI_SCRBLT_ORDER_FIELD_BYTES; case 18: return MULTI_OPAQUE_RECT_ORDER_FIELD_BYTES; case 19: return FAST_INDEX_ORDER_FIELD_BYTES; case 20: return POLYGON_SC_ORDER_FIELD_BYTES; case 21: return POLYGON_CB_ORDER_FIELD_BYTES; case 22: return POLYLINE_ORDER_FIELD_BYTES; case 23: return 0; case 24: return FAST_GLYPH_ORDER_FIELD_BYTES; case 25: return ELLIPSE_SC_ORDER_FIELD_BYTES; case 26: return ELLIPSE_CB_ORDER_FIELD_BYTES; case 27: return GLYPH_INDEX_ORDER_FIELD_BYTES; default: if (pValid) *pValid = FALSE; WLog_WARN(TAG, "Invalid orderType 0x%08X received", orderType); return 0; } } static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bmf) { case 1: return 1; case 3: return 8; case 4: return 16; case 5: return 24; case 6: return 32; default: WLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf); if (pValid) *pValid = FALSE; return 0; } } static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid) { if (pValid) *pValid = TRUE; switch (bpp) { case 1: return 1; case 8: return 3; case 16: return 4; case 24: return 5; case 32: return 6; default: WLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp); if (pValid) *pValid = FALSE; return 0; } } static BOOL check_order_activated(wLog* log, rdpSettings* settings, const char* orderName, BOOL condition) { if (!condition) { if (settings->AllowUnanouncedOrdersFromServer) { WLog_Print(log, WLOG_WARN, "%s - SERVER BUG: The support for this feature was not announced!", orderName); return TRUE; } else { WLog_Print(log, WLOG_ERROR, "%s - SERVER BUG: The support for this feature was not announced! Use " "/relax-order-checks to ignore", orderName); return FALSE; } } return TRUE; } static BOOL check_alt_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: case ORDER_TYPE_SWITCH_SURFACE: condition = settings->OffscreenSupportLevel != 0; break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: condition = settings->DrawNineGridEnabled; break; case ORDER_TYPE_FRAME_MARKER: condition = settings->FrameMarkerCommandEnabled; break; case ORDER_TYPE_GDIPLUS_FIRST: case ORDER_TYPE_GDIPLUS_NEXT: case ORDER_TYPE_GDIPLUS_END: case ORDER_TYPE_GDIPLUS_CACHE_FIRST: case ORDER_TYPE_GDIPLUS_CACHE_NEXT: case ORDER_TYPE_GDIPLUS_CACHE_END: condition = settings->DrawGdiPlusCacheEnabled; break; case ORDER_TYPE_WINDOW: condition = settings->RemoteWndSupportLevel != WINDOW_LEVEL_NOT_SUPPORTED; break; case ORDER_TYPE_STREAM_BITMAP_FIRST: case ORDER_TYPE_STREAM_BITMAP_NEXT: case ORDER_TYPE_COMPDESK_FIRST: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "%s - Alternate Secondary Drawing Order UNKNOWN", orderName); condition = FALSE; break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_secondary_order_supported(wLog* log, rdpSettings* settings, BYTE orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: condition = settings->BitmapCacheEnabled; break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: condition = settings->BitmapCacheV3Enabled; break; case ORDER_TYPE_CACHE_COLOR_TABLE: condition = (settings->OrderSupport[NEG_MEMBLT_INDEX] || settings->OrderSupport[NEG_MEM3BLT_INDEX]); break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: case GLYPH_SUPPORT_ENCODE: condition = TRUE; break; case GLYPH_SUPPORT_NONE: default: condition = FALSE; break; } } break; case ORDER_TYPE_CACHE_BRUSH: condition = TRUE; break; default: WLog_Print(log, WLOG_WARN, "SECONDARY ORDER %s not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static BOOL check_primary_order_supported(wLog* log, rdpSettings* settings, UINT32 orderType, const char* orderName) { BOOL condition = FALSE; switch (orderType) { case ORDER_TYPE_DSTBLT: condition = settings->OrderSupport[NEG_DSTBLT_INDEX]; break; case ORDER_TYPE_SCRBLT: condition = settings->OrderSupport[NEG_SCRBLT_INDEX]; break; case ORDER_TYPE_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: condition = settings->OrderSupport[NEG_MULTI_DRAWNINEGRID_INDEX]; break; case ORDER_TYPE_LINE_TO: condition = settings->OrderSupport[NEG_LINETO_INDEX]; break; /* [MS-RDPEGDI] 2.2.2.2.1.1.2.5 OpaqueRect (OPAQUERECT_ORDER) * suggests that PatBlt and OpaqueRect imply each other. */ case ORDER_TYPE_PATBLT: case ORDER_TYPE_OPAQUE_RECT: condition = settings->OrderSupport[NEG_OPAQUE_RECT_INDEX] || settings->OrderSupport[NEG_PATBLT_INDEX]; break; case ORDER_TYPE_SAVE_BITMAP: condition = settings->OrderSupport[NEG_SAVEBITMAP_INDEX]; break; case ORDER_TYPE_MEMBLT: condition = settings->OrderSupport[NEG_MEMBLT_INDEX]; break; case ORDER_TYPE_MEM3BLT: condition = settings->OrderSupport[NEG_MEM3BLT_INDEX]; break; case ORDER_TYPE_MULTI_DSTBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_PATBLT: condition = settings->OrderSupport[NEG_MULTIPATBLT_INDEX]; break; case ORDER_TYPE_MULTI_SCRBLT: condition = settings->OrderSupport[NEG_MULTIDSTBLT_INDEX]; break; case ORDER_TYPE_MULTI_OPAQUE_RECT: condition = settings->OrderSupport[NEG_MULTIOPAQUERECT_INDEX]; break; case ORDER_TYPE_FAST_INDEX: condition = settings->OrderSupport[NEG_FAST_INDEX_INDEX]; break; case ORDER_TYPE_POLYGON_SC: condition = settings->OrderSupport[NEG_POLYGON_SC_INDEX]; break; case ORDER_TYPE_POLYGON_CB: condition = settings->OrderSupport[NEG_POLYGON_CB_INDEX]; break; case ORDER_TYPE_POLYLINE: condition = settings->OrderSupport[NEG_POLYLINE_INDEX]; break; case ORDER_TYPE_FAST_GLYPH: condition = settings->OrderSupport[NEG_FAST_GLYPH_INDEX]; break; case ORDER_TYPE_ELLIPSE_SC: condition = settings->OrderSupport[NEG_ELLIPSE_SC_INDEX]; break; case ORDER_TYPE_ELLIPSE_CB: condition = settings->OrderSupport[NEG_ELLIPSE_CB_INDEX]; break; case ORDER_TYPE_GLYPH_INDEX: condition = settings->OrderSupport[NEG_GLYPH_INDEX_INDEX]; break; default: WLog_Print(log, WLOG_WARN, "%s Primary Drawing Order not supported", orderName); break; } return check_order_activated(log, settings, orderName, condition); } static const char* primary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] DstBlt", "[0x%02" PRIx8 "] PatBlt", "[0x%02" PRIx8 "] ScrBlt", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] DrawNineGrid", "[0x%02" PRIx8 "] MultiDrawNineGrid", "[0x%02" PRIx8 "] LineTo", "[0x%02" PRIx8 "] OpaqueRect", "[0x%02" PRIx8 "] SaveBitmap", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] MemBlt", "[0x%02" PRIx8 "] Mem3Blt", "[0x%02" PRIx8 "] MultiDstBlt", "[0x%02" PRIx8 "] MultiPatBlt", "[0x%02" PRIx8 "] MultiScrBlt", "[0x%02" PRIx8 "] MultiOpaqueRect", "[0x%02" PRIx8 "] FastIndex", "[0x%02" PRIx8 "] PolygonSC", "[0x%02" PRIx8 "] PolygonCB", "[0x%02" PRIx8 "] Polyline", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] FastGlyph", "[0x%02" PRIx8 "] EllipseSC", "[0x%02" PRIx8 "] EllipseCB", "[0x%02" PRIx8 "] GlyphIndex" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* secondary_order_string(UINT32 orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Cache Bitmap", "[0x%02" PRIx8 "] Cache Color Table", "[0x%02" PRIx8 "] Cache Bitmap (Compressed)", "[0x%02" PRIx8 "] Cache Glyph", "[0x%02" PRIx8 "] Cache Bitmap V2", "[0x%02" PRIx8 "] Cache Bitmap V2 (Compressed)", "[0x%02" PRIx8 "] UNUSED", "[0x%02" PRIx8 "] Cache Brush", "[0x%02" PRIx8 "] Cache Bitmap V3" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static const char* altsec_order_string(BYTE orderType) { const char* orders[] = { "[0x%02" PRIx8 "] Switch Surface", "[0x%02" PRIx8 "] Create Offscreen Bitmap", "[0x%02" PRIx8 "] Stream Bitmap First", "[0x%02" PRIx8 "] Stream Bitmap Next", "[0x%02" PRIx8 "] Create NineGrid Bitmap", "[0x%02" PRIx8 "] Draw GDI+ First", "[0x%02" PRIx8 "] Draw GDI+ Next", "[0x%02" PRIx8 "] Draw GDI+ End", "[0x%02" PRIx8 "] Draw GDI+ Cache First", "[0x%02" PRIx8 "] Draw GDI+ Cache Next", "[0x%02" PRIx8 "] Draw GDI+ Cache End", "[0x%02" PRIx8 "] Windowing", "[0x%02" PRIx8 "] Desktop Composition", "[0x%02" PRIx8 "] Frame Marker" }; const char* fmt = "[0x%02" PRIx8 "] UNKNOWN"; static char buffer[64] = { 0 }; if (orderType < ARRAYSIZE(orders)) fmt = orders[orderType]; sprintf_s(buffer, ARRAYSIZE(buffer), fmt, orderType); return buffer; } static INLINE BOOL update_read_coord(wStream* s, INT32* coord, BOOL delta) { INT8 lsi8; INT16 lsi16; if (delta) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_INT8(s, lsi8); *coord += lsi8; } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_INT16(s, lsi16); *coord = lsi16; } return TRUE; } static INLINE BOOL update_write_coord(wStream* s, INT32 coord) { Stream_Write_UINT16(s, coord); return TRUE; } static INLINE BOOL update_read_color(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 3) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = (UINT32)byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8) & 0xFF00; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16) & 0xFF0000; return TRUE; } static INLINE BOOL update_write_color(wStream* s, UINT32 color) { BYTE byte; byte = (color & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 8) & 0xFF); Stream_Write_UINT8(s, byte); byte = ((color >> 16) & 0xFF); Stream_Write_UINT8(s, byte); return TRUE; } static INLINE BOOL update_read_colorref(wStream* s, UINT32* color) { BYTE byte; if (Stream_GetRemainingLength(s) < 4) return FALSE; *color = 0; Stream_Read_UINT8(s, byte); *color = byte; Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 8); Stream_Read_UINT8(s, byte); *color |= ((UINT32)byte << 16); Stream_Seek_UINT8(s); return TRUE; } static INLINE BOOL update_read_color_quad(wStream* s, UINT32* color) { return update_read_colorref(s, color); } static INLINE void update_write_color_quad(wStream* s, UINT32 color) { BYTE byte; byte = (color >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (color >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = color & 0xFF; Stream_Write_UINT8(s, byte); } static INLINE BOOL update_read_2byte_unsigned(wStream* s, UINT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; *value = (byte & 0x7F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; } else { *value = (byte & 0x7F); } return TRUE; } static INLINE BOOL update_write_2byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value > 0x7FFF) return FALSE; if (value >= 0x7F) { byte = ((value & 0x7F00) >> 8); Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x7F); Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_2byte_signed(wStream* s, INT32* value) { BYTE byte; BOOL negative; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); negative = (byte & 0x40) ? TRUE : FALSE; *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } if (negative) *value *= -1; return TRUE; } static INLINE BOOL update_write_2byte_signed(wStream* s, INT32 value) { BYTE byte; BOOL negative = FALSE; if (value < 0) { negative = TRUE; value *= -1; } if (value > 0x3FFF) return FALSE; if (value >= 0x3F) { byte = ((value & 0x3F00) >> 8); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte | 0x80); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else { byte = (value & 0x3F); if (negative) byte |= 0x40; Stream_Write_UINT8(s, byte); } return TRUE; } static INLINE BOOL update_read_4byte_unsigned(wStream* s, UINT32* value) { BYTE byte; BYTE count; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); count = (byte & 0xC0) >> 6; if (Stream_GetRemainingLength(s) < count) return FALSE; switch (count) { case 0: *value = (byte & 0x3F); break; case 1: *value = (byte & 0x3F) << 8; Stream_Read_UINT8(s, byte); *value |= byte; break; case 2: *value = (byte & 0x3F) << 16; Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; case 3: *value = (byte & 0x3F) << 24; Stream_Read_UINT8(s, byte); *value |= (byte << 16); Stream_Read_UINT8(s, byte); *value |= (byte << 8); Stream_Read_UINT8(s, byte); *value |= byte; break; default: break; } return TRUE; } static INLINE BOOL update_write_4byte_unsigned(wStream* s, UINT32 value) { BYTE byte; if (value <= 0x3F) { Stream_Write_UINT8(s, value); } else if (value <= 0x3FFF) { byte = (value >> 8) & 0x3F; Stream_Write_UINT8(s, byte | 0x40); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFF) { byte = (value >> 16) & 0x3F; Stream_Write_UINT8(s, byte | 0x80); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else if (value <= 0x3FFFFFFF) { byte = (value >> 24) & 0x3F; Stream_Write_UINT8(s, byte | 0xC0); byte = (value >> 16) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (value & 0xFF); Stream_Write_UINT8(s, byte); } else return FALSE; return TRUE; } static INLINE BOOL update_read_delta(wStream* s, INT32* value) { BYTE byte; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); if (byte & 0x40) *value = (byte | ~0x3F); else *value = (byte & 0x3F); if (byte & 0x80) { if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, byte); *value = (*value << 8) | byte; } return TRUE; } #if 0 static INLINE void update_read_glyph_delta(wStream* s, UINT16* value) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte == 0x80) Stream_Read_UINT16(s, *value); else *value = (byte & 0x3F); } static INLINE void update_seek_glyph_delta(wStream* s) { BYTE byte; Stream_Read_UINT8(s, byte); if (byte & 0x80) Stream_Seek_UINT8(s); } #endif static INLINE BOOL update_read_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->style); } if (fieldFlags & ORDER_FIELD_04) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, brush->hatch); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->index = brush->hatch; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 7) return FALSE; brush->data = (BYTE*)brush->p8x8; Stream_Read_UINT8(s, brush->data[7]); Stream_Read_UINT8(s, brush->data[6]); Stream_Read_UINT8(s, brush->data[5]); Stream_Read_UINT8(s, brush->data[4]); Stream_Read_UINT8(s, brush->data[3]); Stream_Read_UINT8(s, brush->data[2]); Stream_Read_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; } static INLINE BOOL update_read_delta_rects(wStream* s, DELTA_RECT* rectangles, UINT32* nr) { UINT32 number = *nr; UINT32 i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; if (number > 45) { WLog_WARN(TAG, "Invalid number of delta rectangles %" PRIu32, number); return FALSE; } zeroBitsSize = ((number + 1) / 2); if (Stream_GetRemainingLength(s) < zeroBitsSize) return FALSE; Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(rectangles, sizeof(DELTA_RECT) * number); for (i = 0; i < number; i++) { if (i % 2 == 0) flags = zeroBits[i / 2]; if ((~flags & 0x80) && !update_read_delta(s, &rectangles[i].left)) return FALSE; if ((~flags & 0x40) && !update_read_delta(s, &rectangles[i].top)) return FALSE; if (~flags & 0x20) { if (!update_read_delta(s, &rectangles[i].width)) return FALSE; } else if (i > 0) rectangles[i].width = rectangles[i - 1].width; else rectangles[i].width = 0; if (~flags & 0x10) { if (!update_read_delta(s, &rectangles[i].height)) return FALSE; } else if (i > 0) rectangles[i].height = rectangles[i - 1].height; else rectangles[i].height = 0; if (i > 0) { rectangles[i].left += rectangles[i - 1].left; rectangles[i].top += rectangles[i - 1].top; } flags <<= 4; } return TRUE; } static INLINE BOOL update_read_delta_points(wStream* s, DELTA_POINT* points, int number, INT16 x, INT16 y) { int i; BYTE flags = 0; BYTE* zeroBits; UINT32 zeroBitsSize; zeroBitsSize = ((number + 3) / 4); if (Stream_GetRemainingLength(s) < zeroBitsSize) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < %" PRIu32 "", zeroBitsSize); return FALSE; } Stream_GetPointer(s, zeroBits); Stream_Seek(s, zeroBitsSize); ZeroMemory(points, sizeof(DELTA_POINT) * number); for (i = 0; i < number; i++) { if (i % 4 == 0) flags = zeroBits[i / 4]; if ((~flags & 0x80) && !update_read_delta(s, &points[i].x)) { WLog_ERR(TAG, "update_read_delta(x) failed"); return FALSE; } if ((~flags & 0x40) && !update_read_delta(s, &points[i].y)) { WLog_ERR(TAG, "update_read_delta(y) failed"); return FALSE; } flags <<= 2; } return TRUE; } #define ORDER_FIELD_BYTE(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 1) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_2BYTE(NO, TARGET1, TARGET2) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s or %s", #TARGET1, #TARGET2); \ return FALSE; \ } \ Stream_Read_UINT8(s, TARGET1); \ Stream_Read_UINT8(s, TARGET2); \ } \ } while (0) #define ORDER_FIELD_UINT16(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 2) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT16(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_UINT32(NO, TARGET) \ do \ { \ if (orderInfo->fieldFlags & (1 << (NO - 1))) \ { \ if (Stream_GetRemainingLength(s) < 4) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ Stream_Read_UINT32(s, TARGET); \ } \ } while (0) #define ORDER_FIELD_COORD(NO, TARGET) \ do \ { \ if ((orderInfo->fieldFlags & (1 << (NO - 1))) && \ !update_read_coord(s, &TARGET, orderInfo->deltaCoordinates)) \ { \ WLog_ERR(TAG, "error reading %s", #TARGET); \ return FALSE; \ } \ } while (0) static INLINE BOOL ORDER_FIELD_COLOR(const ORDER_INFO* orderInfo, wStream* s, UINT32 NO, UINT32* TARGET) { if (!TARGET || !orderInfo) return FALSE; if ((orderInfo->fieldFlags & (1 << (NO - 1))) && !update_read_color(s, TARGET)) return FALSE; return TRUE; } static INLINE BOOL FIELD_SKIP_BUFFER16(wStream* s, UINT32 TARGET_LEN) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, TARGET_LEN); if (!Stream_SafeSeek(s, TARGET_LEN)) { WLog_ERR(TAG, "error skipping %" PRIu32 " bytes", TARGET_LEN); return FALSE; } return TRUE; } /* Primary Drawing Orders */ static BOOL update_read_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, DSTBLT_ORDER* dstblt) { ORDER_FIELD_COORD(1, dstblt->nLeftRect); ORDER_FIELD_COORD(2, dstblt->nTopRect); ORDER_FIELD_COORD(3, dstblt->nWidth); ORDER_FIELD_COORD(4, dstblt->nHeight); ORDER_FIELD_BYTE(5, dstblt->bRop); return TRUE; } int update_approximate_dstblt_order(ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { return 32; } BOOL update_write_dstblt_order(wStream* s, ORDER_INFO* orderInfo, const DSTBLT_ORDER* dstblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_dstblt_order(orderInfo, dstblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, dstblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, dstblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, dstblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, dstblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, dstblt->bRop); return TRUE; } static BOOL update_read_patblt_order(wStream* s, const ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { ORDER_FIELD_COORD(1, patblt->nLeftRect); ORDER_FIELD_COORD(2, patblt->nTopRect); ORDER_FIELD_COORD(3, patblt->nWidth); ORDER_FIELD_COORD(4, patblt->nHeight); ORDER_FIELD_BYTE(5, patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &patblt->foreColor); return update_read_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); } int update_approximate_patblt_order(ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { return 32; } BOOL update_write_patblt_order(wStream* s, ORDER_INFO* orderInfo, PATBLT_ORDER* patblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_patblt_order(orderInfo, patblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, patblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, patblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, patblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, patblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, patblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, patblt->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_color(s, patblt->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_08; orderInfo->fieldFlags |= ORDER_FIELD_09; orderInfo->fieldFlags |= ORDER_FIELD_10; orderInfo->fieldFlags |= ORDER_FIELD_11; orderInfo->fieldFlags |= ORDER_FIELD_12; update_write_brush(s, &patblt->brush, orderInfo->fieldFlags >> 7); return TRUE; } static BOOL update_read_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, SCRBLT_ORDER* scrblt) { ORDER_FIELD_COORD(1, scrblt->nLeftRect); ORDER_FIELD_COORD(2, scrblt->nTopRect); ORDER_FIELD_COORD(3, scrblt->nWidth); ORDER_FIELD_COORD(4, scrblt->nHeight); ORDER_FIELD_BYTE(5, scrblt->bRop); ORDER_FIELD_COORD(6, scrblt->nXSrc); ORDER_FIELD_COORD(7, scrblt->nYSrc); return TRUE; } int update_approximate_scrblt_order(ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { return 32; } BOOL update_write_scrblt_order(wStream* s, ORDER_INFO* orderInfo, const SCRBLT_ORDER* scrblt) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_scrblt_order(orderInfo, scrblt))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, scrblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, scrblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, scrblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, scrblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; Stream_Write_UINT8(s, scrblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_coord(s, scrblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, scrblt->nYSrc); return TRUE; } static BOOL update_read_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, opaque_rect->nTopRect); ORDER_FIELD_COORD(3, opaque_rect->nWidth); ORDER_FIELD_COORD(4, opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); opaque_rect->color = (opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } return TRUE; } int update_approximate_opaque_rect_order(ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { return 32; } BOOL update_write_opaque_rect_order(wStream* s, ORDER_INFO* orderInfo, const OPAQUE_RECT_ORDER* opaque_rect) { BYTE byte; int inf = update_approximate_opaque_rect_order(orderInfo, opaque_rect); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; // TODO: Color format conversion orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; update_write_coord(s, opaque_rect->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, opaque_rect->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, opaque_rect->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, opaque_rect->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_05; byte = opaque_rect->color & 0x000000FF; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_06; byte = (opaque_rect->color & 0x0000FF00) >> 8; Stream_Write_UINT8(s, byte); orderInfo->fieldFlags |= ORDER_FIELD_07; byte = (opaque_rect->color & 0x00FF0000) >> 16; Stream_Write_UINT8(s, byte); return TRUE; } static BOOL update_read_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, DRAW_NINE_GRID_ORDER* draw_nine_grid) { ORDER_FIELD_COORD(1, draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, draw_nine_grid->bitmapId); return TRUE; } static BOOL update_read_multi_dstblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DSTBLT_ORDER* multi_dstblt) { ORDER_FIELD_COORD(1, multi_dstblt->nLeftRect); ORDER_FIELD_COORD(2, multi_dstblt->nTopRect); ORDER_FIELD_COORD(3, multi_dstblt->nWidth); ORDER_FIELD_COORD(4, multi_dstblt->nHeight); ORDER_FIELD_BYTE(5, multi_dstblt->bRop); ORDER_FIELD_BYTE(6, multi_dstblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_dstblt->cbData); return update_read_delta_rects(s, multi_dstblt->rectangles, &multi_dstblt->numRectangles); } return TRUE; } static BOOL update_read_multi_patblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_PATBLT_ORDER* multi_patblt) { ORDER_FIELD_COORD(1, multi_patblt->nLeftRect); ORDER_FIELD_COORD(2, multi_patblt->nTopRect); ORDER_FIELD_COORD(3, multi_patblt->nWidth); ORDER_FIELD_COORD(4, multi_patblt->nHeight); ORDER_FIELD_BYTE(5, multi_patblt->bRop); ORDER_FIELD_COLOR(orderInfo, s, 6, &multi_patblt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 7, &multi_patblt->foreColor); if (!update_read_brush(s, &multi_patblt->brush, orderInfo->fieldFlags >> 7)) return FALSE; ORDER_FIELD_BYTE(13, multi_patblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_14) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_patblt->cbData); if (!update_read_delta_rects(s, multi_patblt->rectangles, &multi_patblt->numRectangles)) return FALSE; } return TRUE; } static BOOL update_read_multi_scrblt_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_SCRBLT_ORDER* multi_scrblt) { ORDER_FIELD_COORD(1, multi_scrblt->nLeftRect); ORDER_FIELD_COORD(2, multi_scrblt->nTopRect); ORDER_FIELD_COORD(3, multi_scrblt->nWidth); ORDER_FIELD_COORD(4, multi_scrblt->nHeight); ORDER_FIELD_BYTE(5, multi_scrblt->bRop); ORDER_FIELD_COORD(6, multi_scrblt->nXSrc); ORDER_FIELD_COORD(7, multi_scrblt->nYSrc); ORDER_FIELD_BYTE(8, multi_scrblt->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_scrblt->cbData); return update_read_delta_rects(s, multi_scrblt->rectangles, &multi_scrblt->numRectangles); } return TRUE; } static BOOL update_read_multi_opaque_rect_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_OPAQUE_RECT_ORDER* multi_opaque_rect) { BYTE byte; ORDER_FIELD_COORD(1, multi_opaque_rect->nLeftRect); ORDER_FIELD_COORD(2, multi_opaque_rect->nTopRect); ORDER_FIELD_COORD(3, multi_opaque_rect->nWidth); ORDER_FIELD_COORD(4, multi_opaque_rect->nHeight); if (orderInfo->fieldFlags & ORDER_FIELD_05) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FFFF00) | ((UINT32)byte); } if (orderInfo->fieldFlags & ORDER_FIELD_06) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x00FF00FF) | ((UINT32)byte << 8); } if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, byte); multi_opaque_rect->color = (multi_opaque_rect->color & 0x0000FFFF) | ((UINT32)byte << 16); } ORDER_FIELD_BYTE(8, multi_opaque_rect->numRectangles); if (orderInfo->fieldFlags & ORDER_FIELD_09) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_opaque_rect->cbData); return update_read_delta_rects(s, multi_opaque_rect->rectangles, &multi_opaque_rect->numRectangles); } return TRUE; } static BOOL update_read_multi_draw_nine_grid_order(wStream* s, const ORDER_INFO* orderInfo, MULTI_DRAW_NINE_GRID_ORDER* multi_draw_nine_grid) { ORDER_FIELD_COORD(1, multi_draw_nine_grid->srcLeft); ORDER_FIELD_COORD(2, multi_draw_nine_grid->srcTop); ORDER_FIELD_COORD(3, multi_draw_nine_grid->srcRight); ORDER_FIELD_COORD(4, multi_draw_nine_grid->srcBottom); ORDER_FIELD_UINT16(5, multi_draw_nine_grid->bitmapId); ORDER_FIELD_BYTE(6, multi_draw_nine_grid->nDeltaEntries); if (orderInfo->fieldFlags & ORDER_FIELD_07) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, multi_draw_nine_grid->cbData); return update_read_delta_rects(s, multi_draw_nine_grid->rectangles, &multi_draw_nine_grid->nDeltaEntries); } return TRUE; } static BOOL update_read_line_to_order(wStream* s, const ORDER_INFO* orderInfo, LINE_TO_ORDER* line_to) { ORDER_FIELD_UINT16(1, line_to->backMode); ORDER_FIELD_COORD(2, line_to->nXStart); ORDER_FIELD_COORD(3, line_to->nYStart); ORDER_FIELD_COORD(4, line_to->nXEnd); ORDER_FIELD_COORD(5, line_to->nYEnd); ORDER_FIELD_COLOR(orderInfo, s, 6, &line_to->backColor); ORDER_FIELD_BYTE(7, line_to->bRop2); ORDER_FIELD_BYTE(8, line_to->penStyle); ORDER_FIELD_BYTE(9, line_to->penWidth); ORDER_FIELD_COLOR(orderInfo, s, 10, &line_to->penColor); return TRUE; } int update_approximate_line_to_order(ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { return 32; } BOOL update_write_line_to_order(wStream* s, ORDER_INFO* orderInfo, const LINE_TO_ORDER* line_to) { if (!Stream_EnsureRemainingCapacity(s, update_approximate_line_to_order(orderInfo, line_to))) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, line_to->backMode); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, line_to->nXStart); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, line_to->nYStart); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, line_to->nXEnd); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, line_to->nYEnd); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, line_to->backColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT8(s, line_to->bRop2); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT8(s, line_to->penStyle); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT8(s, line_to->penWidth); orderInfo->fieldFlags |= ORDER_FIELD_10; update_write_color(s, line_to->penColor); return TRUE; } static BOOL update_read_polyline_order(wStream* s, const ORDER_INFO* orderInfo, POLYLINE_ORDER* polyline) { UINT16 word; UINT32 new_num = polyline->numDeltaEntries; ORDER_FIELD_COORD(1, polyline->xStart); ORDER_FIELD_COORD(2, polyline->yStart); ORDER_FIELD_BYTE(3, polyline->bRop2); ORDER_FIELD_UINT16(4, word); ORDER_FIELD_COLOR(orderInfo, s, 5, &polyline->penColor); ORDER_FIELD_BYTE(6, new_num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* new_points; if (new_num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) { WLog_ERR(TAG, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, polyline->cbData); new_points = (DELTA_POINT*)realloc(polyline->points, sizeof(DELTA_POINT) * new_num); if (!new_points) { WLog_ERR(TAG, "realloc(%" PRIu32 ") failed", new_num); return FALSE; } polyline->points = new_points; polyline->numDeltaEntries = new_num; return update_read_delta_points(s, polyline->points, polyline->numDeltaEntries, polyline->xStart, polyline->yStart); } return TRUE; } static BOOL update_read_memblt_order(wStream* s, const ORDER_INFO* orderInfo, MEMBLT_ORDER* memblt) { if (!s || !orderInfo || !memblt) return FALSE; ORDER_FIELD_UINT16(1, memblt->cacheId); ORDER_FIELD_COORD(2, memblt->nLeftRect); ORDER_FIELD_COORD(3, memblt->nTopRect); ORDER_FIELD_COORD(4, memblt->nWidth); ORDER_FIELD_COORD(5, memblt->nHeight); ORDER_FIELD_BYTE(6, memblt->bRop); ORDER_FIELD_COORD(7, memblt->nXSrc); ORDER_FIELD_COORD(8, memblt->nYSrc); ORDER_FIELD_UINT16(9, memblt->cacheIndex); memblt->colorIndex = (memblt->cacheId >> 8); memblt->cacheId = (memblt->cacheId & 0xFF); memblt->bitmap = NULL; return TRUE; } int update_approximate_memblt_order(ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { return 64; } BOOL update_write_memblt_order(wStream* s, ORDER_INFO* orderInfo, const MEMBLT_ORDER* memblt) { UINT16 cacheId; if (!Stream_EnsureRemainingCapacity(s, update_approximate_memblt_order(orderInfo, memblt))) return FALSE; cacheId = (memblt->cacheId & 0xFF) | ((memblt->colorIndex & 0xFF) << 8); orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT16(s, cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; update_write_coord(s, memblt->nLeftRect); orderInfo->fieldFlags |= ORDER_FIELD_03; update_write_coord(s, memblt->nTopRect); orderInfo->fieldFlags |= ORDER_FIELD_04; update_write_coord(s, memblt->nWidth); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_coord(s, memblt->nHeight); orderInfo->fieldFlags |= ORDER_FIELD_06; Stream_Write_UINT8(s, memblt->bRop); orderInfo->fieldFlags |= ORDER_FIELD_07; update_write_coord(s, memblt->nXSrc); orderInfo->fieldFlags |= ORDER_FIELD_08; update_write_coord(s, memblt->nYSrc); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, memblt->cacheIndex); return TRUE; } static BOOL update_read_mem3blt_order(wStream* s, const ORDER_INFO* orderInfo, MEM3BLT_ORDER* mem3blt) { ORDER_FIELD_UINT16(1, mem3blt->cacheId); ORDER_FIELD_COORD(2, mem3blt->nLeftRect); ORDER_FIELD_COORD(3, mem3blt->nTopRect); ORDER_FIELD_COORD(4, mem3blt->nWidth); ORDER_FIELD_COORD(5, mem3blt->nHeight); ORDER_FIELD_BYTE(6, mem3blt->bRop); ORDER_FIELD_COORD(7, mem3blt->nXSrc); ORDER_FIELD_COORD(8, mem3blt->nYSrc); ORDER_FIELD_COLOR(orderInfo, s, 9, &mem3blt->backColor); ORDER_FIELD_COLOR(orderInfo, s, 10, &mem3blt->foreColor); if (!update_read_brush(s, &mem3blt->brush, orderInfo->fieldFlags >> 10)) return FALSE; ORDER_FIELD_UINT16(16, mem3blt->cacheIndex); mem3blt->colorIndex = (mem3blt->cacheId >> 8); mem3blt->cacheId = (mem3blt->cacheId & 0xFF); mem3blt->bitmap = NULL; return TRUE; } static BOOL update_read_save_bitmap_order(wStream* s, const ORDER_INFO* orderInfo, SAVE_BITMAP_ORDER* save_bitmap) { ORDER_FIELD_UINT32(1, save_bitmap->savedBitmapPosition); ORDER_FIELD_COORD(2, save_bitmap->nLeftRect); ORDER_FIELD_COORD(3, save_bitmap->nTopRect); ORDER_FIELD_COORD(4, save_bitmap->nRightRect); ORDER_FIELD_COORD(5, save_bitmap->nBottomRect); ORDER_FIELD_BYTE(6, save_bitmap->operation); return TRUE; } static BOOL update_read_glyph_index_order(wStream* s, const ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { ORDER_FIELD_BYTE(1, glyph_index->cacheId); ORDER_FIELD_BYTE(2, glyph_index->flAccel); ORDER_FIELD_BYTE(3, glyph_index->ulCharInc); ORDER_FIELD_BYTE(4, glyph_index->fOpRedundant); ORDER_FIELD_COLOR(orderInfo, s, 5, &glyph_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &glyph_index->foreColor); ORDER_FIELD_UINT16(7, glyph_index->bkLeft); ORDER_FIELD_UINT16(8, glyph_index->bkTop); ORDER_FIELD_UINT16(9, glyph_index->bkRight); ORDER_FIELD_UINT16(10, glyph_index->bkBottom); ORDER_FIELD_UINT16(11, glyph_index->opLeft); ORDER_FIELD_UINT16(12, glyph_index->opTop); ORDER_FIELD_UINT16(13, glyph_index->opRight); ORDER_FIELD_UINT16(14, glyph_index->opBottom); if (!update_read_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14)) return FALSE; ORDER_FIELD_UINT16(20, glyph_index->x); ORDER_FIELD_UINT16(21, glyph_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_22) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, glyph_index->cbData); if (Stream_GetRemainingLength(s) < glyph_index->cbData) return FALSE; CopyMemory(glyph_index->data, Stream_Pointer(s), glyph_index->cbData); Stream_Seek(s, glyph_index->cbData); } return TRUE; } int update_approximate_glyph_index_order(ORDER_INFO* orderInfo, const GLYPH_INDEX_ORDER* glyph_index) { return 64; } BOOL update_write_glyph_index_order(wStream* s, ORDER_INFO* orderInfo, GLYPH_INDEX_ORDER* glyph_index) { int inf = update_approximate_glyph_index_order(orderInfo, glyph_index); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; orderInfo->fieldFlags = 0; orderInfo->fieldFlags |= ORDER_FIELD_01; Stream_Write_UINT8(s, glyph_index->cacheId); orderInfo->fieldFlags |= ORDER_FIELD_02; Stream_Write_UINT8(s, glyph_index->flAccel); orderInfo->fieldFlags |= ORDER_FIELD_03; Stream_Write_UINT8(s, glyph_index->ulCharInc); orderInfo->fieldFlags |= ORDER_FIELD_04; Stream_Write_UINT8(s, glyph_index->fOpRedundant); orderInfo->fieldFlags |= ORDER_FIELD_05; update_write_color(s, glyph_index->backColor); orderInfo->fieldFlags |= ORDER_FIELD_06; update_write_color(s, glyph_index->foreColor); orderInfo->fieldFlags |= ORDER_FIELD_07; Stream_Write_UINT16(s, glyph_index->bkLeft); orderInfo->fieldFlags |= ORDER_FIELD_08; Stream_Write_UINT16(s, glyph_index->bkTop); orderInfo->fieldFlags |= ORDER_FIELD_09; Stream_Write_UINT16(s, glyph_index->bkRight); orderInfo->fieldFlags |= ORDER_FIELD_10; Stream_Write_UINT16(s, glyph_index->bkBottom); orderInfo->fieldFlags |= ORDER_FIELD_11; Stream_Write_UINT16(s, glyph_index->opLeft); orderInfo->fieldFlags |= ORDER_FIELD_12; Stream_Write_UINT16(s, glyph_index->opTop); orderInfo->fieldFlags |= ORDER_FIELD_13; Stream_Write_UINT16(s, glyph_index->opRight); orderInfo->fieldFlags |= ORDER_FIELD_14; Stream_Write_UINT16(s, glyph_index->opBottom); orderInfo->fieldFlags |= ORDER_FIELD_15; orderInfo->fieldFlags |= ORDER_FIELD_16; orderInfo->fieldFlags |= ORDER_FIELD_17; orderInfo->fieldFlags |= ORDER_FIELD_18; orderInfo->fieldFlags |= ORDER_FIELD_19; update_write_brush(s, &glyph_index->brush, orderInfo->fieldFlags >> 14); orderInfo->fieldFlags |= ORDER_FIELD_20; Stream_Write_UINT16(s, glyph_index->x); orderInfo->fieldFlags |= ORDER_FIELD_21; Stream_Write_UINT16(s, glyph_index->y); orderInfo->fieldFlags |= ORDER_FIELD_22; Stream_Write_UINT8(s, glyph_index->cbData); Stream_Write(s, glyph_index->data, glyph_index->cbData); return TRUE; } static BOOL update_read_fast_index_order(wStream* s, const ORDER_INFO* orderInfo, FAST_INDEX_ORDER* fast_index) { ORDER_FIELD_BYTE(1, fast_index->cacheId); ORDER_FIELD_2BYTE(2, fast_index->ulCharInc, fast_index->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fast_index->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fast_index->foreColor); ORDER_FIELD_COORD(5, fast_index->bkLeft); ORDER_FIELD_COORD(6, fast_index->bkTop); ORDER_FIELD_COORD(7, fast_index->bkRight); ORDER_FIELD_COORD(8, fast_index->bkBottom); ORDER_FIELD_COORD(9, fast_index->opLeft); ORDER_FIELD_COORD(10, fast_index->opTop); ORDER_FIELD_COORD(11, fast_index->opRight); ORDER_FIELD_COORD(12, fast_index->opBottom); ORDER_FIELD_COORD(13, fast_index->x); ORDER_FIELD_COORD(14, fast_index->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fast_index->cbData); if (Stream_GetRemainingLength(s) < fast_index->cbData) return FALSE; CopyMemory(fast_index->data, Stream_Pointer(s), fast_index->cbData); Stream_Seek(s, fast_index->cbData); } return TRUE; } static BOOL update_read_fast_glyph_order(wStream* s, const ORDER_INFO* orderInfo, FAST_GLYPH_ORDER* fastGlyph) { GLYPH_DATA_V2* glyph = &fastGlyph->glyphData; ORDER_FIELD_BYTE(1, fastGlyph->cacheId); ORDER_FIELD_2BYTE(2, fastGlyph->ulCharInc, fastGlyph->flAccel); ORDER_FIELD_COLOR(orderInfo, s, 3, &fastGlyph->backColor); ORDER_FIELD_COLOR(orderInfo, s, 4, &fastGlyph->foreColor); ORDER_FIELD_COORD(5, fastGlyph->bkLeft); ORDER_FIELD_COORD(6, fastGlyph->bkTop); ORDER_FIELD_COORD(7, fastGlyph->bkRight); ORDER_FIELD_COORD(8, fastGlyph->bkBottom); ORDER_FIELD_COORD(9, fastGlyph->opLeft); ORDER_FIELD_COORD(10, fastGlyph->opTop); ORDER_FIELD_COORD(11, fastGlyph->opRight); ORDER_FIELD_COORD(12, fastGlyph->opBottom); ORDER_FIELD_COORD(13, fastGlyph->x); ORDER_FIELD_COORD(14, fastGlyph->y); if (orderInfo->fieldFlags & ORDER_FIELD_15) { if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; CopyMemory(fastGlyph->data, Stream_Pointer(s), fastGlyph->cbData); if (Stream_GetRemainingLength(s) < fastGlyph->cbData) return FALSE; if (!Stream_SafeSeek(s, 1)) return FALSE; if (fastGlyph->cbData > 1) { UINT32 new_cb; /* parse optional glyph data */ glyph->cacheIndex = fastGlyph->data[0]; if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) return FALSE; glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; new_cb = ((glyph->cx + 7) / 8) * glyph->cy; new_cb += ((new_cb % 4) > 0) ? 4 - (new_cb % 4) : 0; if (fastGlyph->cbData < new_cb) return FALSE; if (new_cb > 0) { BYTE* new_aj; new_aj = (BYTE*)realloc(glyph->aj, new_cb); if (!new_aj) return FALSE; glyph->aj = new_aj; glyph->cb = new_cb; Stream_Read(s, glyph->aj, glyph->cb); } Stream_Seek(s, fastGlyph->cbData - new_cb); } } return TRUE; } static BOOL update_read_polygon_sc_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_SC_ORDER* polygon_sc) { UINT32 num = polygon_sc->numPoints; ORDER_FIELD_COORD(1, polygon_sc->xStart); ORDER_FIELD_COORD(2, polygon_sc->yStart); ORDER_FIELD_BYTE(3, polygon_sc->bRop2); ORDER_FIELD_BYTE(4, polygon_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_sc->brushColor); ORDER_FIELD_BYTE(6, num); if (orderInfo->fieldFlags & ORDER_FIELD_07) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_sc->cbData); newpoints = (DELTA_POINT*)realloc(polygon_sc->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_sc->points = newpoints; polygon_sc->numPoints = num; return update_read_delta_points(s, polygon_sc->points, polygon_sc->numPoints, polygon_sc->xStart, polygon_sc->yStart); } return TRUE; } static BOOL update_read_polygon_cb_order(wStream* s, const ORDER_INFO* orderInfo, POLYGON_CB_ORDER* polygon_cb) { UINT32 num = polygon_cb->numPoints; ORDER_FIELD_COORD(1, polygon_cb->xStart); ORDER_FIELD_COORD(2, polygon_cb->yStart); ORDER_FIELD_BYTE(3, polygon_cb->bRop2); ORDER_FIELD_BYTE(4, polygon_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 5, &polygon_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 6, &polygon_cb->foreColor); if (!update_read_brush(s, &polygon_cb->brush, orderInfo->fieldFlags >> 6)) return FALSE; ORDER_FIELD_BYTE(12, num); if (orderInfo->fieldFlags & ORDER_FIELD_13) { DELTA_POINT* newpoints; if (num == 0) return FALSE; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, polygon_cb->cbData); newpoints = (DELTA_POINT*)realloc(polygon_cb->points, sizeof(DELTA_POINT) * num); if (!newpoints) return FALSE; polygon_cb->points = newpoints; polygon_cb->numPoints = num; if (!update_read_delta_points(s, polygon_cb->points, polygon_cb->numPoints, polygon_cb->xStart, polygon_cb->yStart)) return FALSE; } polygon_cb->backMode = (polygon_cb->bRop2 & 0x80) ? BACKMODE_TRANSPARENT : BACKMODE_OPAQUE; polygon_cb->bRop2 = (polygon_cb->bRop2 & 0x1F); return TRUE; } static BOOL update_read_ellipse_sc_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_SC_ORDER* ellipse_sc) { ORDER_FIELD_COORD(1, ellipse_sc->leftRect); ORDER_FIELD_COORD(2, ellipse_sc->topRect); ORDER_FIELD_COORD(3, ellipse_sc->rightRect); ORDER_FIELD_COORD(4, ellipse_sc->bottomRect); ORDER_FIELD_BYTE(5, ellipse_sc->bRop2); ORDER_FIELD_BYTE(6, ellipse_sc->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_sc->color); return TRUE; } static BOOL update_read_ellipse_cb_order(wStream* s, const ORDER_INFO* orderInfo, ELLIPSE_CB_ORDER* ellipse_cb) { ORDER_FIELD_COORD(1, ellipse_cb->leftRect); ORDER_FIELD_COORD(2, ellipse_cb->topRect); ORDER_FIELD_COORD(3, ellipse_cb->rightRect); ORDER_FIELD_COORD(4, ellipse_cb->bottomRect); ORDER_FIELD_BYTE(5, ellipse_cb->bRop2); ORDER_FIELD_BYTE(6, ellipse_cb->fillMode); ORDER_FIELD_COLOR(orderInfo, s, 7, &ellipse_cb->backColor); ORDER_FIELD_COLOR(orderInfo, s, 8, &ellipse_cb->foreColor); return update_read_brush(s, &ellipse_cb->brush, orderInfo->fieldFlags >> 8); } /* Secondary Drawing Orders */ static CACHE_BITMAP_ORDER* update_read_cache_bitmap_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { CACHE_BITMAP_ORDER* cache_bitmap; if (!update || !s) return NULL; cache_bitmap = calloc(1, sizeof(CACHE_BITMAP_ORDER)); if (!cache_bitmap) goto fail; if (Stream_GetRemainingLength(s) < 9) goto fail; Stream_Read_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Read_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((cache_bitmap->bitmapBpp < 1) || (cache_bitmap->bitmapBpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bitmap bpp %" PRIu32 "", cache_bitmap->bitmapBpp); goto fail; } Stream_Read_UINT16(s, cache_bitmap->bitmapLength); /* bitmapLength (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ cache_bitmap->bitmapLength -= 8; } } if (cache_bitmap->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap->bitmapLength) goto fail; cache_bitmap->bitmapDataStream = malloc(cache_bitmap->bitmapLength); if (!cache_bitmap->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap->bitmapDataStream, cache_bitmap->bitmapLength); cache_bitmap->compressed = compressed; return cache_bitmap; fail: free_cache_bitmap_order(update->context, cache_bitmap); return NULL; } int update_approximate_cache_bitmap_order(const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap->bitmapLength; } BOOL update_write_cache_bitmap_order(wStream* s, const CACHE_BITMAP_ORDER* cache_bitmap, BOOL compressed, UINT16* flags) { UINT32 bitmapLength = cache_bitmap->bitmapLength; int inf = update_approximate_cache_bitmap_order(cache_bitmap, compressed, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = NO_BITMAP_COMPRESSION_HDR; if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) bitmapLength += 8; Stream_Write_UINT8(s, cache_bitmap->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, 0); /* pad1Octet (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapWidth); /* bitmapWidth (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapHeight); /* bitmapHeight (1 byte) */ Stream_Write_UINT8(s, cache_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ Stream_Write_UINT16(s, bitmapLength); /* bitmapLength (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap->cacheIndex); /* cacheIndex (2 bytes) */ if (compressed) { if ((*flags & NO_BITMAP_COMPRESSION_HDR) == 0) { BYTE* bitmapComprHdr = (BYTE*)&(cache_bitmap->bitmapComprHdr); Stream_Write(s, bitmapComprHdr, 8); /* bitmapComprHdr (8 bytes) */ bitmapLength -= 8; } Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } else { Stream_Write(s, cache_bitmap->bitmapDataStream, bitmapLength); } return TRUE; } static CACHE_BITMAP_V2_ORDER* update_read_cache_bitmap_v2_order(rdpUpdate* update, wStream* s, BOOL compressed, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; CACHE_BITMAP_V2_ORDER* cache_bitmap_v2; if (!update || !s) return NULL; cache_bitmap_v2 = calloc(1, sizeof(CACHE_BITMAP_V2_ORDER)); if (!cache_bitmap_v2) goto fail; cache_bitmap_v2->cacheId = flags & 0x0003; cache_bitmap_v2->flags = (flags & 0xFF80) >> 7; bitsPerPixelId = (flags & 0x0078) >> 3; cache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ goto fail; cache_bitmap_v2->bitmapHeight = cache_bitmap_v2->bitmapWidth; } else { if (!update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ goto fail; } if (!update_read_4byte_unsigned(s, &cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_read_2byte_unsigned(s, &cache_bitmap_v2->cacheIndex)) /* cacheIndex */ goto fail; if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { if (Stream_GetRemainingLength(s) < 8) goto fail; Stream_Read_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Read_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Read_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } } if (cache_bitmap_v2->bitmapLength == 0) goto fail; if (Stream_GetRemainingLength(s) < cache_bitmap_v2->bitmapLength) goto fail; if (cache_bitmap_v2->bitmapLength == 0) goto fail; cache_bitmap_v2->bitmapDataStream = malloc(cache_bitmap_v2->bitmapLength); if (!cache_bitmap_v2->bitmapDataStream) goto fail; Stream_Read(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); cache_bitmap_v2->compressed = compressed; return cache_bitmap_v2; fail: free_cache_bitmap_v2_order(update->context, cache_bitmap_v2); return NULL; } int update_approximate_cache_bitmap_v2_order(CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { return 64 + cache_bitmap_v2->bitmapLength; } BOOL update_write_cache_bitmap_v2_order(wStream* s, CACHE_BITMAP_V2_ORDER* cache_bitmap_v2, BOOL compressed, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v2_order(cache_bitmap_v2, compressed, flags))) return FALSE; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v2->cacheId & 0x0003) | (bitsPerPixelId << 3) | ((cache_bitmap_v2->flags << 7) & 0xFF80); if (cache_bitmap_v2->flags & CBR2_PERSISTENT_KEY_PRESENT) { Stream_Write_UINT32(s, cache_bitmap_v2->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v2->key2); /* key2 (4 bytes) */ } if (cache_bitmap_v2->flags & CBR2_HEIGHT_SAME_AS_WIDTH) { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth)) /* bitmapWidth */ return FALSE; } else { if (!update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapWidth) || /* bitmapWidth */ !update_write_2byte_unsigned(s, cache_bitmap_v2->bitmapHeight)) /* bitmapHeight */ return FALSE; } if (cache_bitmap_v2->flags & CBR2_DO_NOT_CACHE) cache_bitmap_v2->cacheIndex = BITMAP_CACHE_WAITING_LIST_INDEX; if (!update_write_4byte_unsigned(s, cache_bitmap_v2->bitmapLength) || /* bitmapLength */ !update_write_2byte_unsigned(s, cache_bitmap_v2->cacheIndex)) /* cacheIndex */ return FALSE; if (compressed) { if (!(cache_bitmap_v2->flags & CBR2_NO_BITMAP_COMPRESSION_HDR)) { Stream_Write_UINT16( s, cache_bitmap_v2->cbCompFirstRowSize); /* cbCompFirstRowSize (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbCompMainBodySize); /* cbCompMainBodySize (2 bytes) */ Stream_Write_UINT16(s, cache_bitmap_v2->cbScanWidth); /* cbScanWidth (2 bytes) */ Stream_Write_UINT16( s, cache_bitmap_v2->cbUncompressedSize); /* cbUncompressedSize (2 bytes) */ cache_bitmap_v2->bitmapLength = cache_bitmap_v2->cbCompMainBodySize; } if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } else { if (!Stream_EnsureRemainingCapacity(s, cache_bitmap_v2->bitmapLength)) return FALSE; Stream_Write(s, cache_bitmap_v2->bitmapDataStream, cache_bitmap_v2->bitmapLength); } cache_bitmap_v2->compressed = compressed; return TRUE; } static CACHE_BITMAP_V3_ORDER* update_read_cache_bitmap_v3_order(rdpUpdate* update, wStream* s, UINT16 flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; UINT32 new_len; BYTE* new_data; CACHE_BITMAP_V3_ORDER* cache_bitmap_v3; if (!update || !s) return NULL; cache_bitmap_v3 = calloc(1, sizeof(CACHE_BITMAP_V3_ORDER)); if (!cache_bitmap_v3) goto fail; cache_bitmap_v3->cacheId = flags & 0x00000003; cache_bitmap_v3->flags = (flags & 0x0000FF80) >> 7; bitsPerPixelId = (flags & 0x00000078) >> 3; cache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc); if (!rc) goto fail; if (Stream_GetRemainingLength(s) < 21) goto fail; Stream_Read_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Read_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ bitmapData = &cache_bitmap_v3->bitmapData; Stream_Read_UINT8(s, bitmapData->bpp); if ((bitmapData->bpp < 1) || (bitmapData->bpp > 32)) { WLog_Print(update->log, WLOG_ERROR, "invalid bpp value %" PRIu32 "", bitmapData->bpp); goto fail; } Stream_Seek_UINT8(s); /* reserved1 (1 byte) */ Stream_Seek_UINT8(s); /* reserved2 (1 byte) */ Stream_Read_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Read_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Read_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Read_UINT32(s, new_len); /* length (4 bytes) */ if ((new_len == 0) || (Stream_GetRemainingLength(s) < new_len)) goto fail; new_data = (BYTE*)realloc(bitmapData->data, new_len); if (!new_data) goto fail; bitmapData->data = new_data; bitmapData->length = new_len; Stream_Read(s, bitmapData->data, bitmapData->length); return cache_bitmap_v3; fail: free_cache_bitmap_v3_order(update->context, cache_bitmap_v3); return NULL; } int update_approximate_cache_bitmap_v3_order(CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BITMAP_DATA_EX* bitmapData = &cache_bitmap_v3->bitmapData; return 64 + bitmapData->length; } BOOL update_write_cache_bitmap_v3_order(wStream* s, CACHE_BITMAP_V3_ORDER* cache_bitmap_v3, UINT16* flags) { BOOL rc; BYTE bitsPerPixelId; BITMAP_DATA_EX* bitmapData; if (!Stream_EnsureRemainingCapacity( s, update_approximate_cache_bitmap_v3_order(cache_bitmap_v3, flags))) return FALSE; bitmapData = &cache_bitmap_v3->bitmapData; bitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc); if (!rc) return FALSE; *flags = (cache_bitmap_v3->cacheId & 0x00000003) | ((cache_bitmap_v3->flags << 7) & 0x0000FF80) | ((bitsPerPixelId << 3) & 0x00000078); Stream_Write_UINT16(s, cache_bitmap_v3->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key1); /* key1 (4 bytes) */ Stream_Write_UINT32(s, cache_bitmap_v3->key2); /* key2 (4 bytes) */ Stream_Write_UINT8(s, bitmapData->bpp); Stream_Write_UINT8(s, 0); /* reserved1 (1 byte) */ Stream_Write_UINT8(s, 0); /* reserved2 (1 byte) */ Stream_Write_UINT8(s, bitmapData->codecID); /* codecID (1 byte) */ Stream_Write_UINT16(s, bitmapData->width); /* width (2 bytes) */ Stream_Write_UINT16(s, bitmapData->height); /* height (2 bytes) */ Stream_Write_UINT32(s, bitmapData->length); /* length (4 bytes) */ Stream_Write(s, bitmapData->data, bitmapData->length); return TRUE; } static CACHE_COLOR_TABLE_ORDER* update_read_cache_color_table_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; UINT32* colorTable; CACHE_COLOR_TABLE_ORDER* cache_color_table = calloc(1, sizeof(CACHE_COLOR_TABLE_ORDER)); if (!cache_color_table) goto fail; if (Stream_GetRemainingLength(s) < 3) goto fail; Stream_Read_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Read_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ if (cache_color_table->numberColors != 256) { /* This field MUST be set to 256 */ goto fail; } if (Stream_GetRemainingLength(s) < cache_color_table->numberColors * 4) goto fail; colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) update_read_color_quad(s, &colorTable[i]); return cache_color_table; fail: free_cache_color_table_order(update->context, cache_color_table); return NULL; } int update_approximate_cache_color_table_order(const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { return 16 + (256 * 4); } BOOL update_write_cache_color_table_order(wStream* s, const CACHE_COLOR_TABLE_ORDER* cache_color_table, UINT16* flags) { int i, inf; UINT32* colorTable; if (cache_color_table->numberColors != 256) return FALSE; inf = update_approximate_cache_color_table_order(cache_color_table, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_color_table->cacheIndex); /* cacheIndex (1 byte) */ Stream_Write_UINT16(s, cache_color_table->numberColors); /* numberColors (2 bytes) */ colorTable = (UINT32*)&cache_color_table->colorTable; for (i = 0; i < (int)cache_color_table->numberColors; i++) { update_write_color_quad(s, colorTable[i]); } return TRUE; } static CACHE_GLYPH_ORDER* update_read_cache_glyph_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_ORDER* cache_glyph_order = calloc(1, sizeof(CACHE_GLYPH_ORDER)); if (!cache_glyph_order || !update || !s) goto fail; if (Stream_GetRemainingLength(s) < 2) goto fail; Stream_Read_UINT8(s, cache_glyph_order->cacheId); /* cacheId (1 byte) */ Stream_Read_UINT8(s, cache_glyph_order->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < cache_glyph_order->cGlyphs; i++) { GLYPH_DATA* glyph = &cache_glyph_order->glyphData[i]; if (Stream_GetRemainingLength(s) < 10) goto fail; Stream_Read_UINT16(s, glyph->cacheIndex); Stream_Read_INT16(s, glyph->x); Stream_Read_INT16(s, glyph->y); Stream_Read_UINT16(s, glyph->cx); Stream_Read_UINT16(s, glyph->cy); glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_order->cGlyphs > 0)) { cache_glyph_order->unicodeCharacters = calloc(cache_glyph_order->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_order->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_order->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_order->unicodeCharacters, cache_glyph_order->cGlyphs); } return cache_glyph_order; fail: free_cache_glyph_order(update->context, cache_glyph_order); return NULL; } int update_approximate_cache_glyph_order(const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { return 2 + cache_glyph->cGlyphs * 32; } BOOL update_write_cache_glyph_order(wStream* s, const CACHE_GLYPH_ORDER* cache_glyph, UINT16* flags) { int i, inf; INT16 lsi16; const GLYPH_DATA* glyph; inf = update_approximate_cache_glyph_order(cache_glyph, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT8(s, cache_glyph->cacheId); /* cacheId (1 byte) */ Stream_Write_UINT8(s, cache_glyph->cGlyphs); /* cGlyphs (1 byte) */ for (i = 0; i < (int)cache_glyph->cGlyphs; i++) { UINT32 cb; glyph = &cache_glyph->glyphData[i]; Stream_Write_UINT16(s, glyph->cacheIndex); /* cacheIndex (2 bytes) */ lsi16 = glyph->x; Stream_Write_UINT16(s, lsi16); /* x (2 bytes) */ lsi16 = glyph->y; Stream_Write_UINT16(s, lsi16); /* y (2 bytes) */ Stream_Write_UINT16(s, glyph->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, glyph->cy); /* cy (2 bytes) */ cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph->cGlyphs * 2); } return TRUE; } static CACHE_GLYPH_V2_ORDER* update_read_cache_glyph_v2_order(rdpUpdate* update, wStream* s, UINT16 flags) { UINT32 i; CACHE_GLYPH_V2_ORDER* cache_glyph_v2 = calloc(1, sizeof(CACHE_GLYPH_V2_ORDER)); if (!cache_glyph_v2) goto fail; cache_glyph_v2->cacheId = (flags & 0x000F); cache_glyph_v2->flags = (flags & 0x00F0) >> 4; cache_glyph_v2->cGlyphs = (flags & 0xFF00) >> 8; for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; if (Stream_GetRemainingLength(s) < 1) goto fail; Stream_Read_UINT8(s, glyph->cacheIndex); if (!update_read_2byte_signed(s, &glyph->x) || !update_read_2byte_signed(s, &glyph->y) || !update_read_2byte_unsigned(s, &glyph->cx) || !update_read_2byte_unsigned(s, &glyph->cy)) { goto fail; } glyph->cb = ((glyph->cx + 7) / 8) * glyph->cy; glyph->cb += ((glyph->cb % 4) > 0) ? 4 - (glyph->cb % 4) : 0; if (Stream_GetRemainingLength(s) < glyph->cb) goto fail; glyph->aj = (BYTE*)malloc(glyph->cb); if (!glyph->aj) goto fail; Stream_Read(s, glyph->aj, glyph->cb); } if ((flags & CG_GLYPH_UNICODE_PRESENT) && (cache_glyph_v2->cGlyphs > 0)) { cache_glyph_v2->unicodeCharacters = calloc(cache_glyph_v2->cGlyphs, sizeof(WCHAR)); if (!cache_glyph_v2->unicodeCharacters) goto fail; if (Stream_GetRemainingLength(s) < sizeof(WCHAR) * cache_glyph_v2->cGlyphs) goto fail; Stream_Read_UTF16_String(s, cache_glyph_v2->unicodeCharacters, cache_glyph_v2->cGlyphs); } return cache_glyph_v2; fail: free_cache_glyph_v2_order(update->context, cache_glyph_v2); return NULL; } int update_approximate_cache_glyph_v2_order(const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { return 8 + cache_glyph_v2->cGlyphs * 32; } BOOL update_write_cache_glyph_v2_order(wStream* s, const CACHE_GLYPH_V2_ORDER* cache_glyph_v2, UINT16* flags) { UINT32 i, inf; inf = update_approximate_cache_glyph_v2_order(cache_glyph_v2, flags); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; *flags = (cache_glyph_v2->cacheId & 0x000F) | ((cache_glyph_v2->flags & 0x000F) << 4) | ((cache_glyph_v2->cGlyphs & 0x00FF) << 8); for (i = 0; i < cache_glyph_v2->cGlyphs; i++) { UINT32 cb; const GLYPH_DATA_V2* glyph = &cache_glyph_v2->glyphData[i]; Stream_Write_UINT8(s, glyph->cacheIndex); if (!update_write_2byte_signed(s, glyph->x) || !update_write_2byte_signed(s, glyph->y) || !update_write_2byte_unsigned(s, glyph->cx) || !update_write_2byte_unsigned(s, glyph->cy)) { return FALSE; } cb = ((glyph->cx + 7) / 8) * glyph->cy; cb += ((cb % 4) > 0) ? 4 - (cb % 4) : 0; Stream_Write(s, glyph->aj, cb); } if (*flags & CG_GLYPH_UNICODE_PRESENT) { Stream_Zero(s, cache_glyph_v2->cGlyphs * 2); } return TRUE; } static BOOL update_decompress_brush(wStream* s, BYTE* output, size_t outSize, BYTE bpp) { INT32 x, y, k; BYTE byte = 0; const BYTE* palette = Stream_Pointer(s) + 16; const INT32 bytesPerPixel = ((bpp + 1) / 8); if (!Stream_SafeSeek(s, 16ULL + 7ULL * bytesPerPixel)) // 64 / 4 return FALSE; for (y = 7; y >= 0; y--) { for (x = 0; x < 8; x++) { UINT32 index; if ((x % 4) == 0) Stream_Read_UINT8(s, byte); index = ((byte >> ((3 - (x % 4)) * 2)) & 0x03); for (k = 0; k < bytesPerPixel; k++) { const size_t dstIndex = ((y * 8 + x) * bytesPerPixel) + k; const size_t srcIndex = (index * bytesPerPixel) + k; if (dstIndex >= outSize) return FALSE; output[dstIndex] = palette[srcIndex]; } } } return TRUE; } static BOOL update_compress_brush(wStream* s, const BYTE* input, BYTE bpp) { return FALSE; } static CACHE_BRUSH_ORDER* update_read_cache_brush_order(rdpUpdate* update, wStream* s, UINT16 flags) { int i; BOOL rc; BYTE iBitmapFormat; BOOL compressed = FALSE; CACHE_BRUSH_ORDER* cache_brush = calloc(1, sizeof(CACHE_BRUSH_ORDER)); if (!cache_brush) goto fail; if (Stream_GetRemainingLength(s) < 6) goto fail; Stream_Read_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Read_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ cache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc); if (!rc) goto fail; Stream_Read_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Read_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Read_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Read_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_Print(update->log, WLOG_ERROR, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); goto fail; } /* rows are encoded in reverse order */ if (Stream_GetRemainingLength(s) < 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_decompress_brush(s, cache_brush->data, sizeof(cache_brush->data), cache_brush->bpp)) goto fail; } else { /* uncompressed brush */ UINT32 scanline = (cache_brush->bpp / 8) * 8; if (Stream_GetRemainingLength(s) < scanline * 8) goto fail; for (i = 7; i >= 0; i--) { Stream_Read(s, &cache_brush->data[i * scanline], scanline); } } } } return cache_brush; fail: free_cache_brush_order(update->context, cache_brush); return NULL; } int update_approximate_cache_brush_order(const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { return 64; } BOOL update_write_cache_brush_order(wStream* s, const CACHE_BRUSH_ORDER* cache_brush, UINT16* flags) { int i; BYTE iBitmapFormat; BOOL rc; BOOL compressed = FALSE; if (!Stream_EnsureRemainingCapacity(s, update_approximate_cache_brush_order(cache_brush, flags))) return FALSE; iBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc); if (!rc) return FALSE; Stream_Write_UINT8(s, cache_brush->index); /* cacheEntry (1 byte) */ Stream_Write_UINT8(s, iBitmapFormat); /* iBitmapFormat (1 byte) */ Stream_Write_UINT8(s, cache_brush->cx); /* cx (1 byte) */ Stream_Write_UINT8(s, cache_brush->cy); /* cy (1 byte) */ Stream_Write_UINT8(s, cache_brush->style); /* style (1 byte) */ Stream_Write_UINT8(s, cache_brush->length); /* iBytes (1 byte) */ if ((cache_brush->cx == 8) && (cache_brush->cy == 8)) { if (cache_brush->bpp == 1) { if (cache_brush->length != 8) { WLog_ERR(TAG, "incompatible 1bpp brush of length:%" PRIu32 "", cache_brush->length); return FALSE; } for (i = 7; i >= 0; i--) { Stream_Write_UINT8(s, cache_brush->data[i]); } } else { if ((iBitmapFormat == BMF_8BPP) && (cache_brush->length == 20)) compressed = TRUE; else if ((iBitmapFormat == BMF_16BPP) && (cache_brush->length == 24)) compressed = TRUE; else if ((iBitmapFormat == BMF_32BPP) && (cache_brush->length == 32)) compressed = TRUE; if (compressed != FALSE) { /* compressed brush */ if (!update_compress_brush(s, cache_brush->data, cache_brush->bpp)) return FALSE; } else { /* uncompressed brush */ int scanline = (cache_brush->bpp / 8) * 8; for (i = 7; i >= 0; i--) { Stream_Write(s, &cache_brush->data[i * scanline], scanline); } } } } return TRUE; } /* Alternate Secondary Drawing Orders */ static BOOL update_read_create_offscreen_bitmap_order(wStream* s, CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; OFFSCREEN_DELETE_LIST* deleteList; if (Stream_GetRemainingLength(s) < 6) return FALSE; Stream_Read_UINT16(s, flags); /* flags (2 bytes) */ create_offscreen_bitmap->id = flags & 0x7FFF; deleteListPresent = (flags & 0x8000) ? TRUE : FALSE; Stream_Read_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Read_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ deleteList = &(create_offscreen_bitmap->deleteList); if (deleteListPresent) { UINT32 i; if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, deleteList->cIndices); if (deleteList->cIndices > deleteList->sIndices) { UINT16* new_indices; new_indices = (UINT16*)realloc(deleteList->indices, deleteList->cIndices * 2); if (!new_indices) return FALSE; deleteList->sIndices = deleteList->cIndices; deleteList->indices = new_indices; } if (Stream_GetRemainingLength(s) < 2 * deleteList->cIndices) return FALSE; for (i = 0; i < deleteList->cIndices; i++) { Stream_Read_UINT16(s, deleteList->indices[i]); } } else { deleteList->cIndices = 0; } return TRUE; } int update_approximate_create_offscreen_bitmap_order( const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { const OFFSCREEN_DELETE_LIST* deleteList = &(create_offscreen_bitmap->deleteList); return 32 + deleteList->cIndices * 2; } BOOL update_write_create_offscreen_bitmap_order( wStream* s, const CREATE_OFFSCREEN_BITMAP_ORDER* create_offscreen_bitmap) { UINT16 flags; BOOL deleteListPresent; const OFFSCREEN_DELETE_LIST* deleteList; if (!Stream_EnsureRemainingCapacity( s, update_approximate_create_offscreen_bitmap_order(create_offscreen_bitmap))) return FALSE; deleteList = &(create_offscreen_bitmap->deleteList); flags = create_offscreen_bitmap->id & 0x7FFF; deleteListPresent = (deleteList->cIndices > 0) ? TRUE : FALSE; if (deleteListPresent) flags |= 0x8000; Stream_Write_UINT16(s, flags); /* flags (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cx); /* cx (2 bytes) */ Stream_Write_UINT16(s, create_offscreen_bitmap->cy); /* cy (2 bytes) */ if (deleteListPresent) { int i; Stream_Write_UINT16(s, deleteList->cIndices); for (i = 0; i < (int)deleteList->cIndices; i++) { Stream_Write_UINT16(s, deleteList->indices[i]); } } return TRUE; } static BOOL update_read_switch_surface_order(wStream* s, SWITCH_SURFACE_ORDER* switch_surface) { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } int update_approximate_switch_surface_order(const SWITCH_SURFACE_ORDER* switch_surface) { return 2; } BOOL update_write_switch_surface_order(wStream* s, const SWITCH_SURFACE_ORDER* switch_surface) { int inf = update_approximate_switch_surface_order(switch_surface); if (!Stream_EnsureRemainingCapacity(s, inf)) return FALSE; Stream_Write_UINT16(s, switch_surface->bitmapId); /* bitmapId (2 bytes) */ return TRUE; } static BOOL update_read_create_nine_grid_bitmap_order(wStream* s, CREATE_NINE_GRID_BITMAP_ORDER* create_nine_grid_bitmap) { NINE_GRID_BITMAP_INFO* nineGridInfo; if (Stream_GetRemainingLength(s) < 19) return FALSE; Stream_Read_UINT8(s, create_nine_grid_bitmap->bitmapBpp); /* bitmapBpp (1 byte) */ if ((create_nine_grid_bitmap->bitmapBpp < 1) || (create_nine_grid_bitmap->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", create_nine_grid_bitmap->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, create_nine_grid_bitmap->bitmapId); /* bitmapId (2 bytes) */ nineGridInfo = &(create_nine_grid_bitmap->nineGridInfo); Stream_Read_UINT32(s, nineGridInfo->flFlags); /* flFlags (4 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulLeftWidth); /* ulLeftWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulRightWidth); /* ulRightWidth (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulTopHeight); /* ulTopHeight (2 bytes) */ Stream_Read_UINT16(s, nineGridInfo->ulBottomHeight); /* ulBottomHeight (2 bytes) */ update_read_colorref(s, &nineGridInfo->crTransparent); /* crTransparent (4 bytes) */ return TRUE; } static BOOL update_read_frame_marker_order(wStream* s, FRAME_MARKER_ORDER* frame_marker) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, frame_marker->action); /* action (4 bytes) */ return TRUE; } static BOOL update_read_stream_bitmap_first_order(wStream* s, STREAM_BITMAP_FIRST_ORDER* stream_bitmap_first) { if (Stream_GetRemainingLength(s) < 10) // 8 + 2 at least return FALSE; Stream_Read_UINT8(s, stream_bitmap_first->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT8(s, stream_bitmap_first->bitmapBpp); /* bitmapBpp (1 byte) */ if ((stream_bitmap_first->bitmapBpp < 1) || (stream_bitmap_first->bitmapBpp > 32)) { WLog_ERR(TAG, "invalid bpp value %" PRIu32 "", stream_bitmap_first->bitmapBpp); return FALSE; } Stream_Read_UINT16(s, stream_bitmap_first->bitmapType); /* bitmapType (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapWidth); /* bitmapWidth (2 bytes) */ Stream_Read_UINT16(s, stream_bitmap_first->bitmapHeight); /* bitmapHeigth (2 bytes) */ if (stream_bitmap_first->bitmapFlags & STREAM_BITMAP_V2) { if (Stream_GetRemainingLength(s) < 4) return FALSE; Stream_Read_UINT32(s, stream_bitmap_first->bitmapSize); /* bitmapSize (4 bytes) */ } else { if (Stream_GetRemainingLength(s) < 2) return FALSE; Stream_Read_UINT16(s, stream_bitmap_first->bitmapSize); /* bitmapSize (2 bytes) */ } FIELD_SKIP_BUFFER16( s, stream_bitmap_first->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_stream_bitmap_next_order(wStream* s, STREAM_BITMAP_NEXT_ORDER* stream_bitmap_next) { if (Stream_GetRemainingLength(s) < 5) return FALSE; Stream_Read_UINT8(s, stream_bitmap_next->bitmapFlags); /* bitmapFlags (1 byte) */ Stream_Read_UINT16(s, stream_bitmap_next->bitmapType); /* bitmapType (2 bytes) */ FIELD_SKIP_BUFFER16( s, stream_bitmap_next->bitmapBlockSize); /* bitmapBlockSize(2 bytes) + bitmapBlock */ return TRUE; } static BOOL update_read_draw_gdiplus_first_order(wStream* s, DRAW_GDIPLUS_FIRST_ORDER* draw_gdiplus_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_first->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_next_order(wStream* s, DRAW_GDIPLUS_NEXT_ORDER* draw_gdiplus_next) { if (Stream_GetRemainingLength(s) < 3) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_end_order(wStream* s, DRAW_GDIPLUS_END_ORDER* draw_gdiplus_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Seek_UINT8(s); /* pad1Octet (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalSize); /* cbTotalSize (4 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_end->cbTotalEmfSize); /* cbTotalEmfSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_end->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_first_order(wStream* s, DRAW_GDIPLUS_CACHE_FIRST_ORDER* draw_gdiplus_cache_first) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_first->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_first->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_first->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_first->cbSize); /* emfRecords */ } static BOOL update_read_draw_gdiplus_cache_next_order(wStream* s, DRAW_GDIPLUS_CACHE_NEXT_ORDER* draw_gdiplus_cache_next) { if (Stream_GetRemainingLength(s) < 7) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_next->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_next->cacheIndex); /* cacheIndex (2 bytes) */ FIELD_SKIP_BUFFER16(s, draw_gdiplus_cache_next->cbSize); /* cbSize(2 bytes) + emfRecords */ return TRUE; } static BOOL update_read_draw_gdiplus_cache_end_order(wStream* s, DRAW_GDIPLUS_CACHE_END_ORDER* draw_gdiplus_cache_end) { if (Stream_GetRemainingLength(s) < 11) return FALSE; Stream_Read_UINT8(s, draw_gdiplus_cache_end->flags); /* flags (1 byte) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheType); /* cacheType (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cacheIndex); /* cacheIndex (2 bytes) */ Stream_Read_UINT16(s, draw_gdiplus_cache_end->cbSize); /* cbSize (2 bytes) */ Stream_Read_UINT32(s, draw_gdiplus_cache_end->cbTotalSize); /* cbTotalSize (4 bytes) */ return Stream_SafeSeek(s, draw_gdiplus_cache_end->cbSize); /* emfRecords */ } static BOOL update_read_field_flags(wStream* s, UINT32* fieldFlags, BYTE flags, BYTE fieldBytes) { int i; BYTE byte; if (flags & ORDER_ZERO_FIELD_BYTE_BIT0) fieldBytes--; if (flags & ORDER_ZERO_FIELD_BYTE_BIT1) { if (fieldBytes > 1) fieldBytes -= 2; else fieldBytes = 0; } if (Stream_GetRemainingLength(s) < fieldBytes) return FALSE; *fieldFlags = 0; for (i = 0; i < fieldBytes; i++) { Stream_Read_UINT8(s, byte); *fieldFlags |= byte << (i * 8); } return TRUE; } BOOL update_write_field_flags(wStream* s, UINT32 fieldFlags, BYTE flags, BYTE fieldBytes) { BYTE byte; if (fieldBytes == 1) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 2) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); } else if (fieldBytes == 3) { byte = fieldFlags & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 8) & 0xFF; Stream_Write_UINT8(s, byte); byte = (fieldFlags >> 16) & 0xFF; Stream_Write_UINT8(s, byte); } else { return FALSE; } return TRUE; } static BOOL update_read_bounds(wStream* s, rdpBounds* bounds) { BYTE flags; if (Stream_GetRemainingLength(s) < 1) return FALSE; Stream_Read_UINT8(s, flags); /* field flags */ if (flags & BOUND_LEFT) { if (!update_read_coord(s, &bounds->left, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_LEFT) { if (!update_read_coord(s, &bounds->left, TRUE)) return FALSE; } if (flags & BOUND_TOP) { if (!update_read_coord(s, &bounds->top, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_TOP) { if (!update_read_coord(s, &bounds->top, TRUE)) return FALSE; } if (flags & BOUND_RIGHT) { if (!update_read_coord(s, &bounds->right, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_RIGHT) { if (!update_read_coord(s, &bounds->right, TRUE)) return FALSE; } if (flags & BOUND_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, FALSE)) return FALSE; } else if (flags & BOUND_DELTA_BOTTOM) { if (!update_read_coord(s, &bounds->bottom, TRUE)) return FALSE; } return TRUE; } BOOL update_write_bounds(wStream* s, ORDER_INFO* orderInfo) { if (!(orderInfo->controlFlags & ORDER_BOUNDS)) return TRUE; if (orderInfo->controlFlags & ORDER_ZERO_BOUNDS_DELTAS) return TRUE; Stream_Write_UINT8(s, orderInfo->boundsFlags); /* field flags */ if (orderInfo->boundsFlags & BOUND_LEFT) { if (!update_write_coord(s, orderInfo->bounds.left)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_LEFT) { } if (orderInfo->boundsFlags & BOUND_TOP) { if (!update_write_coord(s, orderInfo->bounds.top)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_TOP) { } if (orderInfo->boundsFlags & BOUND_RIGHT) { if (!update_write_coord(s, orderInfo->bounds.right)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_RIGHT) { } if (orderInfo->boundsFlags & BOUND_BOTTOM) { if (!update_write_coord(s, orderInfo->bounds.bottom)) return FALSE; } else if (orderInfo->boundsFlags & BOUND_DELTA_BOTTOM) { } return TRUE; } static BOOL read_primary_order(wLog* log, const char* orderName, wStream* s, const ORDER_INFO* orderInfo, rdpPrimaryUpdate* primary) { BOOL rc = FALSE; if (!s || !orderInfo || !primary || !orderName) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: rc = update_read_dstblt_order(s, orderInfo, &(primary->dstblt)); break; case ORDER_TYPE_PATBLT: rc = update_read_patblt_order(s, orderInfo, &(primary->patblt)); break; case ORDER_TYPE_SCRBLT: rc = update_read_scrblt_order(s, orderInfo, &(primary->scrblt)); break; case ORDER_TYPE_OPAQUE_RECT: rc = update_read_opaque_rect_order(s, orderInfo, &(primary->opaque_rect)); break; case ORDER_TYPE_DRAW_NINE_GRID: rc = update_read_draw_nine_grid_order(s, orderInfo, &(primary->draw_nine_grid)); break; case ORDER_TYPE_MULTI_DSTBLT: rc = update_read_multi_dstblt_order(s, orderInfo, &(primary->multi_dstblt)); break; case ORDER_TYPE_MULTI_PATBLT: rc = update_read_multi_patblt_order(s, orderInfo, &(primary->multi_patblt)); break; case ORDER_TYPE_MULTI_SCRBLT: rc = update_read_multi_scrblt_order(s, orderInfo, &(primary->multi_scrblt)); break; case ORDER_TYPE_MULTI_OPAQUE_RECT: rc = update_read_multi_opaque_rect_order(s, orderInfo, &(primary->multi_opaque_rect)); break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: rc = update_read_multi_draw_nine_grid_order(s, orderInfo, &(primary->multi_draw_nine_grid)); break; case ORDER_TYPE_LINE_TO: rc = update_read_line_to_order(s, orderInfo, &(primary->line_to)); break; case ORDER_TYPE_POLYLINE: rc = update_read_polyline_order(s, orderInfo, &(primary->polyline)); break; case ORDER_TYPE_MEMBLT: rc = update_read_memblt_order(s, orderInfo, &(primary->memblt)); break; case ORDER_TYPE_MEM3BLT: rc = update_read_mem3blt_order(s, orderInfo, &(primary->mem3blt)); break; case ORDER_TYPE_SAVE_BITMAP: rc = update_read_save_bitmap_order(s, orderInfo, &(primary->save_bitmap)); break; case ORDER_TYPE_GLYPH_INDEX: rc = update_read_glyph_index_order(s, orderInfo, &(primary->glyph_index)); break; case ORDER_TYPE_FAST_INDEX: rc = update_read_fast_index_order(s, orderInfo, &(primary->fast_index)); break; case ORDER_TYPE_FAST_GLYPH: rc = update_read_fast_glyph_order(s, orderInfo, &(primary->fast_glyph)); break; case ORDER_TYPE_POLYGON_SC: rc = update_read_polygon_sc_order(s, orderInfo, &(primary->polygon_sc)); break; case ORDER_TYPE_POLYGON_CB: rc = update_read_polygon_cb_order(s, orderInfo, &(primary->polygon_cb)); break; case ORDER_TYPE_ELLIPSE_SC: rc = update_read_ellipse_sc_order(s, orderInfo, &(primary->ellipse_sc)); break; case ORDER_TYPE_ELLIPSE_CB: rc = update_read_ellipse_cb_order(s, orderInfo, &(primary->ellipse_cb)); break; default: WLog_Print(log, WLOG_WARN, "Primary Drawing Order %s not supported, ignoring", orderName); rc = TRUE; break; } if (!rc) { WLog_Print(log, WLOG_ERROR, "%s - update_read_dstblt_order() failed", orderName); return FALSE; } return TRUE; } static BOOL update_recv_primary_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE field; BOOL rc = FALSE; rdpContext* context = update->context; rdpPrimaryUpdate* primary = update->primary; ORDER_INFO* orderInfo = &(primary->order_info); rdpSettings* settings = context->settings; const char* orderName; if (flags & ORDER_TYPE_CHANGE) { if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, orderInfo->orderType); /* orderType (1 byte) */ } orderName = primary_order_string(orderInfo->orderType); if (!check_primary_order_supported(update->log, settings, orderInfo->orderType, orderName)) return FALSE; field = get_primary_drawing_order_field_bytes(orderInfo->orderType, &rc); if (!rc) return FALSE; if (!update_read_field_flags(s, &(orderInfo->fieldFlags), flags, field)) { WLog_Print(update->log, WLOG_ERROR, "update_read_field_flags() failed"); return FALSE; } if (flags & ORDER_BOUNDS) { if (!(flags & ORDER_ZERO_BOUNDS_DELTAS)) { if (!update_read_bounds(s, &orderInfo->bounds)) { WLog_Print(update->log, WLOG_ERROR, "update_read_bounds() failed"); return FALSE; } } rc = IFCALLRESULT(FALSE, update->SetBounds, context, &orderInfo->bounds); if (!rc) return FALSE; } orderInfo->deltaCoordinates = (flags & ORDER_DELTA_COORDINATES) ? TRUE : FALSE; if (!read_primary_order(update->log, orderName, s, orderInfo, primary)) return FALSE; switch (orderInfo->orderType) { case ORDER_TYPE_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->dstblt.bRop), gdi_rop3_code(primary->dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->DstBlt, context, &primary->dstblt); } break; case ORDER_TYPE_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->patblt.bRop), gdi_rop3_code(primary->patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->PatBlt, context, &primary->patblt); } break; case ORDER_TYPE_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->scrblt.bRop), gdi_rop3_code(primary->scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->ScrBlt, context, &primary->scrblt); } break; case ORDER_TYPE_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->OpaqueRect, context, &primary->opaque_rect); } break; case ORDER_TYPE_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->DrawNineGrid, context, &primary->draw_nine_grid); } break; case ORDER_TYPE_MULTI_DSTBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_dstblt.bRop), gdi_rop3_code(primary->multi_dstblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiDstBlt, context, &primary->multi_dstblt); } break; case ORDER_TYPE_MULTI_PATBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_patblt.bRop), gdi_rop3_code(primary->multi_patblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiPatBlt, context, &primary->multi_patblt); } break; case ORDER_TYPE_MULTI_SCRBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->multi_scrblt.bRop), gdi_rop3_code(primary->multi_scrblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MultiScrBlt, context, &primary->multi_scrblt); } break; case ORDER_TYPE_MULTI_OPAQUE_RECT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiOpaqueRect, context, &primary->multi_opaque_rect); } break; case ORDER_TYPE_MULTI_DRAW_NINE_GRID: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->MultiDrawNineGrid, context, &primary->multi_draw_nine_grid); } break; case ORDER_TYPE_LINE_TO: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->LineTo, context, &primary->line_to); } break; case ORDER_TYPE_POLYLINE: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->Polyline, context, &primary->polyline); } break; case ORDER_TYPE_MEMBLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->memblt.bRop), gdi_rop3_code(primary->memblt.bRop)); rc = IFCALLRESULT(FALSE, primary->MemBlt, context, &primary->memblt); } break; case ORDER_TYPE_MEM3BLT: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s rop=%s [0x%08" PRIx32 "]", orderName, gdi_rop3_code_string(primary->mem3blt.bRop), gdi_rop3_code(primary->mem3blt.bRop)); rc = IFCALLRESULT(FALSE, primary->Mem3Blt, context, &primary->mem3blt); } break; case ORDER_TYPE_SAVE_BITMAP: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->SaveBitmap, context, &primary->save_bitmap); } break; case ORDER_TYPE_GLYPH_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->GlyphIndex, context, &primary->glyph_index); } break; case ORDER_TYPE_FAST_INDEX: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastIndex, context, &primary->fast_index); } break; case ORDER_TYPE_FAST_GLYPH: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->FastGlyph, context, &primary->fast_glyph); } break; case ORDER_TYPE_POLYGON_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonSC, context, &primary->polygon_sc); } break; case ORDER_TYPE_POLYGON_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->PolygonCB, context, &primary->polygon_cb); } break; case ORDER_TYPE_ELLIPSE_SC: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseSC, context, &primary->ellipse_sc); } break; case ORDER_TYPE_ELLIPSE_CB: { WLog_Print(update->log, WLOG_DEBUG, "Primary Drawing Order %s", orderName); rc = IFCALLRESULT(FALSE, primary->EllipseCB, context, &primary->ellipse_cb); } break; default: WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s not supported", orderName); break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Primary Drawing Order %s failed", orderName); return FALSE; } if (flags & ORDER_BOUNDS) { rc = IFCALLRESULT(FALSE, update->SetBounds, context, NULL); } return rc; } static BOOL update_recv_secondary_order(rdpUpdate* update, wStream* s, BYTE flags) { BOOL rc = FALSE; size_t start, end, diff; BYTE orderType; UINT16 extraFlags; UINT16 orderLength; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpSecondaryUpdate* secondary = update->secondary; const char* name; if (Stream_GetRemainingLength(s) < 5) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 5"); return FALSE; } Stream_Read_UINT16(s, orderLength); /* orderLength (2 bytes) */ Stream_Read_UINT16(s, extraFlags); /* extraFlags (2 bytes) */ Stream_Read_UINT8(s, orderType); /* orderType (1 byte) */ if (Stream_GetRemainingLength(s) < orderLength + 7U) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) %" PRIuz " < %" PRIu16, Stream_GetRemainingLength(s), orderLength + 7); return FALSE; } start = Stream_GetPosition(s); name = secondary_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Secondary Drawing Order %s", name); if (!check_secondary_order_supported(update->log, settings, orderType, name)) return FALSE; switch (orderType) { case ORDER_TYPE_BITMAP_UNCOMPRESSED: case ORDER_TYPE_CACHE_BITMAP_COMPRESSED: { const BOOL compressed = (orderType == ORDER_TYPE_CACHE_BITMAP_COMPRESSED); CACHE_BITMAP_ORDER* order = update_read_cache_bitmap_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmap, context, order); free_cache_bitmap_order(context, order); } } break; case ORDER_TYPE_BITMAP_UNCOMPRESSED_V2: case ORDER_TYPE_BITMAP_COMPRESSED_V2: { const BOOL compressed = (orderType == ORDER_TYPE_BITMAP_COMPRESSED_V2); CACHE_BITMAP_V2_ORDER* order = update_read_cache_bitmap_v2_order(update, s, compressed, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV2, context, order); free_cache_bitmap_v2_order(context, order); } } break; case ORDER_TYPE_BITMAP_COMPRESSED_V3: { CACHE_BITMAP_V3_ORDER* order = update_read_cache_bitmap_v3_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBitmapV3, context, order); free_cache_bitmap_v3_order(context, order); } } break; case ORDER_TYPE_CACHE_COLOR_TABLE: { CACHE_COLOR_TABLE_ORDER* order = update_read_cache_color_table_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheColorTable, context, order); free_cache_color_table_order(context, order); } } break; case ORDER_TYPE_CACHE_GLYPH: { switch (settings->GlyphSupportLevel) { case GLYPH_SUPPORT_PARTIAL: case GLYPH_SUPPORT_FULL: { CACHE_GLYPH_ORDER* order = update_read_cache_glyph_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyph, context, order); free_cache_glyph_order(context, order); } } break; case GLYPH_SUPPORT_ENCODE: { CACHE_GLYPH_V2_ORDER* order = update_read_cache_glyph_v2_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheGlyphV2, context, order); free_cache_glyph_v2_order(context, order); } } break; case GLYPH_SUPPORT_NONE: default: break; } } break; case ORDER_TYPE_CACHE_BRUSH: /* [MS-RDPEGDI] 2.2.2.2.1.2.7 Cache Brush (CACHE_BRUSH_ORDER) */ { CACHE_BRUSH_ORDER* order = update_read_cache_brush_order(update, s, extraFlags); if (order) { rc = IFCALLRESULT(FALSE, secondary->CacheBrush, context, order); free_cache_brush_order(context, order); } } break; default: WLog_Print(update->log, WLOG_WARN, "SECONDARY ORDER %s not supported", name); break; } if (!rc) { WLog_Print(update->log, WLOG_ERROR, "SECONDARY ORDER %s failed", name); } start += orderLength + 7; end = Stream_GetPosition(s); if (start > end) { WLog_Print(update->log, WLOG_WARN, "SECONDARY_ORDER %s: read %" PRIuz "bytes too much", name, end - start); return FALSE; } diff = start - end; if (diff > 0) { WLog_Print(update->log, WLOG_DEBUG, "SECONDARY_ORDER %s: read %" PRIuz "bytes short, skipping", name, diff); Stream_Seek(s, diff); } return rc; } static BOOL read_altsec_order(wStream* s, BYTE orderType, rdpAltSecUpdate* altsec) { BOOL rc = FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: rc = update_read_create_offscreen_bitmap_order(s, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: rc = update_read_switch_surface_order(s, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: rc = update_read_create_nine_grid_bitmap_order(s, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: rc = update_read_frame_marker_order(s, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: rc = update_read_stream_bitmap_first_order(s, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: rc = update_read_stream_bitmap_next_order(s, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: rc = update_read_draw_gdiplus_first_order(s, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: rc = update_read_draw_gdiplus_next_order(s, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: rc = update_read_draw_gdiplus_end_order(s, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: rc = update_read_draw_gdiplus_cache_first_order(s, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: rc = update_read_draw_gdiplus_cache_next_order(s, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: rc = update_read_draw_gdiplus_cache_end_order(s, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: /* This order is handled elsewhere. */ rc = TRUE; break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } return rc; } static BOOL update_recv_altsec_order(rdpUpdate* update, wStream* s, BYTE flags) { BYTE orderType = flags >>= 2; /* orderType is in higher 6 bits of flags field */ BOOL rc = FALSE; rdpContext* context = update->context; rdpSettings* settings = context->settings; rdpAltSecUpdate* altsec = update->altsec; const char* orderName = altsec_order_string(orderType); WLog_Print(update->log, WLOG_DEBUG, "Alternate Secondary Drawing Order %s", orderName); if (!check_alt_order_supported(update->log, settings, orderType, orderName)) return FALSE; if (!read_altsec_order(s, orderType, altsec)) return FALSE; switch (orderType) { case ORDER_TYPE_CREATE_OFFSCREEN_BITMAP: IFCALLRET(altsec->CreateOffscreenBitmap, rc, context, &(altsec->create_offscreen_bitmap)); break; case ORDER_TYPE_SWITCH_SURFACE: IFCALLRET(altsec->SwitchSurface, rc, context, &(altsec->switch_surface)); break; case ORDER_TYPE_CREATE_NINE_GRID_BITMAP: IFCALLRET(altsec->CreateNineGridBitmap, rc, context, &(altsec->create_nine_grid_bitmap)); break; case ORDER_TYPE_FRAME_MARKER: IFCALLRET(altsec->FrameMarker, rc, context, &(altsec->frame_marker)); break; case ORDER_TYPE_STREAM_BITMAP_FIRST: IFCALLRET(altsec->StreamBitmapFirst, rc, context, &(altsec->stream_bitmap_first)); break; case ORDER_TYPE_STREAM_BITMAP_NEXT: IFCALLRET(altsec->StreamBitmapNext, rc, context, &(altsec->stream_bitmap_next)); break; case ORDER_TYPE_GDIPLUS_FIRST: IFCALLRET(altsec->DrawGdiPlusFirst, rc, context, &(altsec->draw_gdiplus_first)); break; case ORDER_TYPE_GDIPLUS_NEXT: IFCALLRET(altsec->DrawGdiPlusNext, rc, context, &(altsec->draw_gdiplus_next)); break; case ORDER_TYPE_GDIPLUS_END: IFCALLRET(altsec->DrawGdiPlusEnd, rc, context, &(altsec->draw_gdiplus_end)); break; case ORDER_TYPE_GDIPLUS_CACHE_FIRST: IFCALLRET(altsec->DrawGdiPlusCacheFirst, rc, context, &(altsec->draw_gdiplus_cache_first)); break; case ORDER_TYPE_GDIPLUS_CACHE_NEXT: IFCALLRET(altsec->DrawGdiPlusCacheNext, rc, context, &(altsec->draw_gdiplus_cache_next)); break; case ORDER_TYPE_GDIPLUS_CACHE_END: IFCALLRET(altsec->DrawGdiPlusCacheEnd, rc, context, &(altsec->draw_gdiplus_cache_end)); break; case ORDER_TYPE_WINDOW: rc = update_recv_altsec_window_order(update, s); break; case ORDER_TYPE_COMPDESK_FIRST: rc = TRUE; break; default: break; } if (!rc) { WLog_Print(update->log, WLOG_WARN, "Alternate Secondary Drawing Order %s failed", orderName); } return rc; } BOOL update_recv_order(rdpUpdate* update, wStream* s) { BOOL rc; BYTE controlFlags; if (Stream_GetRemainingLength(s) < 1) { WLog_Print(update->log, WLOG_ERROR, "Stream_GetRemainingLength(s) < 1"); return FALSE; } Stream_Read_UINT8(s, controlFlags); /* controlFlags (1 byte) */ if (!(controlFlags & ORDER_STANDARD)) rc = update_recv_altsec_order(update, s, controlFlags); else if (controlFlags & ORDER_SECONDARY) rc = update_recv_secondary_order(update, s, controlFlags); else rc = update_recv_primary_order(update, s, controlFlags); if (!rc) WLog_Print(update->log, WLOG_ERROR, "order flags %02" PRIx8 " failed", controlFlags); return rc; }
static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { brush->hatch = brush->index; brush->bpp = BMF_BPP[brush->style & 0x07]; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
static INLINE BOOL update_write_brush(wStream* s, rdpBrush* brush, BYTE fieldFlags) { if (fieldFlags & ORDER_FIELD_01) { Stream_Write_UINT8(s, brush->x); } if (fieldFlags & ORDER_FIELD_02) { Stream_Write_UINT8(s, brush->y); } if (fieldFlags & ORDER_FIELD_03) { Stream_Write_UINT8(s, brush->style); } if (brush->style & CACHED_BRUSH) { BOOL rc; brush->hatch = brush->index; brush->bpp = get_bmf_bpp(brush->style, &rc); if (!rc) return FALSE; if (brush->bpp == 0) brush->bpp = 1; } if (fieldFlags & ORDER_FIELD_04) { Stream_Write_UINT8(s, brush->hatch); } if (fieldFlags & ORDER_FIELD_05) { brush->data = (BYTE*)brush->p8x8; Stream_Write_UINT8(s, brush->data[7]); Stream_Write_UINT8(s, brush->data[6]); Stream_Write_UINT8(s, brush->data[5]); Stream_Write_UINT8(s, brush->data[4]); Stream_Write_UINT8(s, brush->data[3]); Stream_Write_UINT8(s, brush->data[2]); Stream_Write_UINT8(s, brush->data[1]); brush->data[0] = brush->hatch; } return TRUE; }
{'added': [(116, 'static BYTE get_cbr2_bpp(UINT32 bpp, BOOL* pValid)'), (117, '{'), (118, '\tif (pValid)'), (119, '\t\t*pValid = TRUE;'), (120, '\tswitch (bpp)'), (121, '\t{'), (122, '\t\tcase 3:'), (123, '\t\t\treturn 8;'), (124, '\t\tcase 4:'), (125, '\t\t\treturn 16;'), (126, '\t\tcase 5:'), (127, '\t\t\treturn 24;'), (128, '\t\tcase 6:'), (129, '\t\t\treturn 32;'), (130, '\t\tdefault:'), (131, '\t\t\tWLog_WARN(TAG, "Invalid bpp %" PRIu32, bpp);'), (132, '\t\t\tif (pValid)'), (133, '\t\t\t\t*pValid = FALSE;'), (134, '\t\t\treturn 0;'), (135, '\t}'), (136, '}'), (138, 'static BYTE get_bmf_bpp(UINT32 bmf, BOOL* pValid)'), (139, '{'), (140, '\tif (pValid)'), (141, '\t\t*pValid = TRUE;'), (142, '\tswitch (bmf)'), (143, '\t{'), (144, '\t\tcase 1:'), (145, '\t\t\treturn 1;'), (146, '\t\tcase 3:'), (147, '\t\t\treturn 8;'), (148, '\t\tcase 4:'), (149, '\t\t\treturn 16;'), (150, '\t\tcase 5:'), (151, '\t\t\treturn 24;'), (152, '\t\tcase 6:'), (153, '\t\t\treturn 32;'), (154, '\t\tdefault:'), (155, '\t\t\tWLog_WARN(TAG, "Invalid bmf %" PRIu32, bmf);'), (156, '\t\t\tif (pValid)'), (157, '\t\t\t\t*pValid = FALSE;'), (158, '\t\t\treturn 0;'), (159, '\t}'), (160, '}'), (161, 'static BYTE get_bpp_bmf(UINT32 bpp, BOOL* pValid)'), (162, '{'), (163, '\tif (pValid)'), (164, '\t\t*pValid = TRUE;'), (165, '\tswitch (bpp)'), (166, '\t{'), (167, '\t\tcase 1:'), (168, '\t\t\treturn 1;'), (169, '\t\tcase 8:'), (170, '\t\t\treturn 3;'), (171, '\t\tcase 16:'), (172, '\t\t\treturn 4;'), (173, '\t\tcase 24:'), (174, '\t\t\treturn 5;'), (175, '\t\tcase 32:'), (176, '\t\t\treturn 6;'), (177, '\t\tdefault:'), (178, '\t\t\tWLog_WARN(TAG, "Invalid color depth %" PRIu32, bpp);'), (179, '\t\t\tif (pValid)'), (180, '\t\t\t\t*pValid = FALSE;'), (181, '\t\t\treturn 0;'), (182, '\t}'), (183, '}'), (871, '\t\tBOOL rc;'), (873, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (874, '\t\tif (!rc)'), (875, '\t\t\treturn FALSE;'), (917, '\t\tBOOL rc;'), (919, '\t\tbrush->bpp = get_bmf_bpp(brush->style, &rc);'), (920, '\t\tif (!rc)'), (921, '\t\t\treturn FALSE;'), (2077, '\tBOOL rc;'), (2092, '\tcache_bitmap_v2->bitmapBpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2093, '\tif (!rc)'), (2094, '\t\tgoto fail;'), (2173, '\tBOOL rc;'), (2180, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v2->bitmapBpp, &rc);'), (2181, '\tif (!rc)'), (2182, '\t\treturn FALSE;'), (2244, '\tBOOL rc;'), (2262, '\tcache_bitmap_v3->bpp = get_cbr2_bpp(bitsPerPixelId, &rc);'), (2263, '\tif (!rc)'), (2264, '\t\tgoto fail;'), (2312, '\tBOOL rc;'), (2321, '\tbitsPerPixelId = get_bpp_bmf(cache_bitmap_v3->bpp, &rc);'), (2322, '\tif (!rc)'), (2323, '\t\treturn FALSE;'), (2647, '\tBOOL rc;'), (2661, '\tcache_brush->bpp = get_bmf_bpp(iBitmapFormat, &rc);'), (2662, '\tif (!rc)'), (2735, '\tBOOL rc;'), (2742, '\tiBitmapFormat = get_bpp_bmf(cache_brush->bpp, &rc);'), (2743, '\tif (!rc)'), (2744, '\t\treturn FALSE;')], 'deleted': [(116, 'static const BYTE CBR2_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (117, ''), (118, 'static const BYTE BPP_CBR2[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (119, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (120, ''), (121, 'static const BYTE CBR23_BPP[] = { 0, 0, 0, 8, 16, 24, 32 };'), (122, ''), (123, 'static const BYTE BPP_CBR23[] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (124, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (125, ''), (126, 'static const BYTE BMF_BPP[] = { 0, 1, 0, 8, 16, 24, 32, 0 };'), (128, 'static const BYTE BPP_BMF[] = { 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0,'), (129, '\t 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0 };'), (818, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (819, ''), (862, '\t\tbrush->bpp = BMF_BPP[brush->style & 0x07];'), (863, ''), (2033, '\tcache_bitmap_v2->bitmapBpp = CBR2_BPP[bitsPerPixelId];'), (2118, '\tbitsPerPixelId = BPP_CBR2[cache_bitmap_v2->bitmapBpp];'), (2197, '\tcache_bitmap_v3->bpp = CBR23_BPP[bitsPerPixelId];'), (2253, '\tbitsPerPixelId = BPP_CBR23[cache_bitmap_v3->bpp];'), (2590, '\tif (iBitmapFormat >= ARRAYSIZE(BMF_BPP))'), (2593, '\tcache_brush->bpp = BMF_BPP[iBitmapFormat];'), (2670, '\tiBitmapFormat = BPP_BMF[cache_brush->bpp];')]}
98
24
3,271
19,873
39
246
8
https://github.com/FreeRDP/FreeRDP
CVE-2020-11096
CWE-125
984
gup.c
C
gup_huge_pmd
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) get_page(page); if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { get_page(page); spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4d; struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_pud_mask(vma, address, p4d, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_p4d_mask(vma, address, pgd, flags, ctx); } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; vm_fault_t ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EBUSY: ret = 0; /* FALLTHRU */ case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { struct mm_struct *mm = current->mm; int locked = 1; long ret; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, vmas, NULL, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); #ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_GUP #ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = compound_head(page); if (!page_cache_get_speculative(head)) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); if (pgmap) put_dev_pagemap(pgmap); return 1; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pmd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pud_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (!pgd_access_permitted(orig, write)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); refs = 0; page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pgd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * Note a difference with get_user_pages_fast: this always returns the * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long len, end; unsigned long flags; int nr = 0; start &= PAGE_MASK; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok((void __user *)start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(start, end, write, pages, &nr); local_irq_restore(flags); } return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (nr_pages <= 0) return 0; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; if (gup_fast_permitted(start, nr_pages, write)) { local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); ret = nr; } if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_GUP */
#include <linux/kernel.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/memremap.h> #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/sched/signal.h> #include <linux/rwsem.h> #include <linux/hugetlb.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "internal.h" struct follow_page_context { struct dev_pagemap *pgmap; unsigned int page_mask; }; static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags) { /* * When core dumping an enormous anonymous area that nobody * has touched so far, we don't want to allocate unnecessary pages or * page tables. Return error instead of NULL to skip handle_mm_fault, * then get_dump_page() will return NULL to leave a hole in the dump. * But we can only make this optimization where a hole would surely * be zero-filled if handle_mm_fault() actually did handle it. */ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault)) return ERR_PTR(-EFAULT); return NULL; } static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { /* No page to get reference */ if (flags & FOLL_GET) return -EFAULT; if (flags & FOLL_TOUCH) { pte_t entry = *pte; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); if (!pte_same(*pte, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } } /* Proper page table entry exists, but no corresponding struct page */ return -EEXIST; } /* * FOLL_FORCE can write to even unwritable pte's, but only * after we've gone through a COW cycle and they are dirty. */ static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) { return pte_write(pte) || ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); } static struct page *follow_page_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd, unsigned int flags, struct dev_pagemap **pgmap) { struct mm_struct *mm = vma->vm_mm; struct page *page; spinlock_t *ptl; pte_t *ptep, pte; retry: if (unlikely(pmd_bad(*pmd))) return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; if (!pte_present(pte)) { swp_entry_t entry; /* * KSM's break_ksm() relies upon recognizing a ksm page * even while it is being migrated, so for that case we * need migration_entry_wait(). */ if (likely(!(flags & FOLL_MIGRATION))) goto no_page; if (pte_none(pte)) goto no_page; entry = pte_to_swp_entry(pte); if (!is_migration_entry(entry)) goto no_page; pte_unmap_unlock(ptep, ptl); migration_entry_wait(mm, pmd, address); goto retry; } if ((flags & FOLL_NUMA) && pte_protnone(pte)) goto no_page; if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { pte_unmap_unlock(ptep, ptl); return NULL; } page = vm_normal_page(vma, address, pte); if (!page && pte_devmap(pte) && (flags & FOLL_GET)) { /* * Only return device mapping pages in the FOLL_GET case since * they are only valid while holding the pgmap reference. */ *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap); if (*pgmap) page = pte_page(pte); else goto no_page; } else if (unlikely(!page)) { if (flags & FOLL_DUMP) { /* Avoid special (like zero) pages in core dumps */ page = ERR_PTR(-EFAULT); goto out; } if (is_zero_pfn(pte_pfn(pte))) { page = pte_page(pte); } else { int ret; ret = follow_pfn_pte(vma, address, ptep, flags); page = ERR_PTR(ret); goto out; } } if (flags & FOLL_SPLIT && PageTransCompound(page)) { int ret; get_page(page); pte_unmap_unlock(ptep, ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (ret) return ERR_PTR(ret); goto retry; } if (flags & FOLL_GET) { if (unlikely(!try_get_page(page))) { page = ERR_PTR(-ENOMEM); goto out; } } if (flags & FOLL_TOUCH) { if ((flags & FOLL_WRITE) && !pte_dirty(pte) && !PageDirty(page)) set_page_dirty(page); /* * pte_mkyoung() would be more correct here, but atomic care * is needed to avoid losing the dirty bit: it is easier to use * mark_page_accessed(). */ mark_page_accessed(page); } if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { /* Do not mlock pte-mapped THP */ if (PageTransCompound(page)) goto out; /* * The preliminary mapping check is mainly to avoid the * pointless overhead of lock_page on the ZERO_PAGE * which might bounce very badly if there is contention. * * If the page is already locked, we don't need to * handle it now - vmscan will handle it later if and * when it attempts to reclaim the page. */ if (page->mapping && trylock_page(page)) { lru_add_drain(); /* push cached pages to LRU */ /* * Because we lock page here, and migration is * blocked by the pte's page reference, and we * know the page is still mapped, we don't even * need to check for file-cache page truncation. */ mlock_vma_page(page); unlock_page(page); } } out: pte_unmap_unlock(ptep, ptl); return page; no_page: pte_unmap_unlock(ptep, ptl); if (!pte_none(pte)) return NULL; return no_page_table(vma, flags); } static struct page *follow_pmd_mask(struct vm_area_struct *vma, unsigned long address, pud_t *pudp, unsigned int flags, struct follow_page_context *ctx) { pmd_t *pmd, pmdval; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); /* * The READ_ONCE() will stabilize the pmdval in a register or * on the stack so that it will stop changing under the code. */ pmdval = READ_ONCE(*pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (pmd_huge(pmdval) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pmd(mm, address, pmd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pmd_val(pmdval)))) { page = follow_huge_pd(vma, address, __hugepd(pmd_val(pmdval)), flags, PMD_SHIFT); if (page) return page; return no_page_table(vma, flags); } retry: if (!pmd_present(pmdval)) { if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(pmdval)); if (is_pmd_migration_entry(pmdval)) pmd_migration_entry_wait(mm, pmd); pmdval = READ_ONCE(*pmd); /* * MADV_DONTNEED may convert the pmd to null because * mmap_sem is held in read mode */ if (pmd_none(pmdval)) return no_page_table(vma, flags); goto retry; } if (pmd_devmap(pmdval)) { ptl = pmd_lock(mm, pmd); page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (likely(!pmd_trans_huge(pmdval))) return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); if ((flags & FOLL_NUMA) && pmd_protnone(pmdval)) return no_page_table(vma, flags); retry_locked: ptl = pmd_lock(mm, pmd); if (unlikely(pmd_none(*pmd))) { spin_unlock(ptl); return no_page_table(vma, flags); } if (unlikely(!pmd_present(*pmd))) { spin_unlock(ptl); if (likely(!(flags & FOLL_MIGRATION))) return no_page_table(vma, flags); pmd_migration_entry_wait(mm, pmd); goto retry_locked; } if (unlikely(!pmd_trans_huge(*pmd))) { spin_unlock(ptl); return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT) { int ret; page = pmd_page(*pmd); if (is_huge_zero_page(page)) { spin_unlock(ptl); ret = 0; split_huge_pmd(vma, pmd, address); if (pmd_trans_unstable(pmd)) ret = -EBUSY; } else { if (unlikely(!try_get_page(page))) { spin_unlock(ptl); return ERR_PTR(-ENOMEM); } spin_unlock(ptl); lock_page(page); ret = split_huge_page(page); unlock_page(page); put_page(page); if (pmd_none(*pmd)) return no_page_table(vma, flags); } return ret ? ERR_PTR(ret) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); spin_unlock(ptl); ctx->page_mask = HPAGE_PMD_NR - 1; return page; } static struct page *follow_pud_mask(struct vm_area_struct *vma, unsigned long address, p4d_t *p4dp, unsigned int flags, struct follow_page_context *ctx) { pud_t *pud; spinlock_t *ptl; struct page *page; struct mm_struct *mm = vma->vm_mm; pud = pud_offset(p4dp, address); if (pud_none(*pud)) return no_page_table(vma, flags); if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { page = follow_huge_pud(mm, address, pud, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pud_val(*pud)))) { page = follow_huge_pd(vma, address, __hugepd(pud_val(*pud)), flags, PUD_SHIFT); if (page) return page; return no_page_table(vma, flags); } if (pud_devmap(*pud)) { ptl = pud_lock(mm, pud); page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap); spin_unlock(ptl); if (page) return page; } if (unlikely(pud_bad(*pud))) return no_page_table(vma, flags); return follow_pmd_mask(vma, address, pud, flags, ctx); } static struct page *follow_p4d_mask(struct vm_area_struct *vma, unsigned long address, pgd_t *pgdp, unsigned int flags, struct follow_page_context *ctx) { p4d_t *p4d; struct page *page; p4d = p4d_offset(pgdp, address); if (p4d_none(*p4d)) return no_page_table(vma, flags); BUILD_BUG_ON(p4d_huge(*p4d)); if (unlikely(p4d_bad(*p4d))) return no_page_table(vma, flags); if (is_hugepd(__hugepd(p4d_val(*p4d)))) { page = follow_huge_pd(vma, address, __hugepd(p4d_val(*p4d)), flags, P4D_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_pud_mask(vma, address, p4d, flags, ctx); } /** * follow_page_mask - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address * @address: virtual address to look up * @flags: flags modifying lookup behaviour * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a * pointer to output page_mask * * @flags can have FOLL_ flags set, defined in <linux/mm.h> * * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches * the device's dev_pagemap metadata to avoid repeating expensive lookups. * * On output, the @ctx->page_mask is set according to the size of the page. * * Return: the mapped (struct page *), %NULL if no mapping exists, or * an error pointer if there is a mapping to something not represented * by a page descriptor (see also vm_normal_page()). */ struct page *follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags, struct follow_page_context *ctx) { pgd_t *pgd; struct page *page; struct mm_struct *mm = vma->vm_mm; ctx->page_mask = 0; /* make this handle hugepd */ page = follow_huge_addr(mm, address, flags & FOLL_WRITE); if (!IS_ERR(page)) { BUG_ON(flags & FOLL_GET); return page; } pgd = pgd_offset(mm, address); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) return no_page_table(vma, flags); if (pgd_huge(*pgd)) { page = follow_huge_pgd(mm, address, pgd, flags); if (page) return page; return no_page_table(vma, flags); } if (is_hugepd(__hugepd(pgd_val(*pgd)))) { page = follow_huge_pd(vma, address, __hugepd(pgd_val(*pgd)), flags, PGDIR_SHIFT); if (page) return page; return no_page_table(vma, flags); } return follow_p4d_mask(vma, address, pgd, flags, ctx); } struct page *follow_page(struct vm_area_struct *vma, unsigned long address, unsigned int foll_flags) { struct follow_page_context ctx = { NULL }; struct page *page; page = follow_page_mask(vma, address, foll_flags, &ctx); if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return page; } static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); p4d = p4d_offset(pgd, address); BUG_ON(p4d_none(*p4d)); pud = pud_offset(p4d, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); /* * This should never happen (a device public page in the gate * area). */ if (is_device_public_page(*page)) goto unmap; } if (unlikely(!try_get_page(*page))) { ret = -ENOMEM; goto unmap; } out: ret = 0; unmap: pte_unmap(pte); return ret; } /* * mmap_sem must be held on entry. If @nonblocking != NULL and * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released. * If it is, *@nonblocking will be set to 0 and -EBUSY returned. */ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long address, unsigned int *flags, int *nonblocking) { unsigned int fault_flags = 0; vm_fault_t ret; /* mlock all present pages, but do not fault in new pages */ if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) return -ENOENT; if (*flags & FOLL_WRITE) fault_flags |= FAULT_FLAG_WRITE; if (*flags & FOLL_REMOTE) fault_flags |= FAULT_FLAG_REMOTE; if (nonblocking) fault_flags |= FAULT_FLAG_ALLOW_RETRY; if (*flags & FOLL_NOWAIT) fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT; if (*flags & FOLL_TRIED) { VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY); fault_flags |= FAULT_FLAG_TRIED; } ret = handle_mm_fault(vma, address, fault_flags); if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, *flags); if (err) return err; BUG(); } if (tsk) { if (ret & VM_FAULT_MAJOR) tsk->maj_flt++; else tsk->min_flt++; } if (ret & VM_FAULT_RETRY) { if (nonblocking && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) *nonblocking = 0; return -EBUSY; } /* * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when * necessary, even if maybe_mkwrite decided not to set pte_write. We * can thus safely do subsequent page lookups as if they were reads. * But only do so when looping for pte_write is futile: in some cases * userspace may also be wanting to write to the gotten user page, * which a read fault here might prevent (a readonly page might get * reCOWed by userspace write). */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) *flags |= FOLL_COW; return 0; } static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) return -EFAULT; if (write) { if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could * set a breakpoint in a read-only mapping of an * executable, without corrupting the file (yet only * when that file had been opened for writing!). * Anon pages in shared mappings are surprising: now * just reject it. */ if (!is_cow_mapping(vm_flags)) return -EFAULT; } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; /* * Is there actually any vma we can reach here which does not * have VM_MAYREAD set? */ if (!(vm_flags & VM_MAYREAD)) return -EFAULT; } /* * gups are always data accesses, not instruction * fetches, so execute=false here */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return -EFAULT; return 0; } /** * __get_user_pages() - pin user pages in memory * @tsk: task_struct of target task * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying pin behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @nonblocking: whether waiting for disk IO or mmap_sem contention * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * __get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If * the page is written to, set_page_dirty (or set_page_dirty_lock, as * appropriate) must be called after the page is finished with, and * before put_page is called. * * If @nonblocking != NULL, __get_user_pages will not wait for disk IO * or mmap_sem contention, and if waiting is needed to pin all pages, * *@nonblocking will be set to 0. Further, if @gup_flags does not * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in * this case. * * A caller using such a combination of @nonblocking and @gup_flags * must therefore hold the mmap_sem for reading only, and recognize * when it's been released. Otherwise, it must be held for either * reading or writing and will not be released. * * In most cases, get_user_pages or get_user_pages_fast should be used * instead of __get_user_pages. __get_user_pages should be used only if * you need some special @gup_flags. */ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *nonblocking) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; struct follow_page_context ctx = { NULL }; if (!nr_pages) return 0; VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET)); /* * If FOLL_FORCE is set then do not force a full fault as the hinting * fault information is unrelated to the reference behaviour of a task * using the address space */ if (!(gup_flags & FOLL_FORCE)) gup_flags |= FOLL_NUMA; do { struct page *page; unsigned int foll_flags = gup_flags; unsigned int page_increm; /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { vma = find_extend_vma(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, pages ? &pages[i] : NULL); if (ret) goto out; ctx.page_mask = 0; goto next_page; } if (!vma || check_vma_flags(vma, gup_flags)) { ret = -EFAULT; goto out; } if (is_vm_hugetlb_page(vma)) { i = follow_hugetlb_page(mm, vma, pages, vmas, &start, &nr_pages, i, gup_flags, nonblocking); continue; } } retry: /* * If we have a pending SIGKILL, don't keep faulting pages and * potentially allocating memory. */ if (fatal_signal_pending(current)) { ret = -ERESTARTSYS; goto out; } cond_resched(); page = follow_page_mask(vma, start, foll_flags, &ctx); if (!page) { ret = faultin_page(tsk, vma, start, &foll_flags, nonblocking); switch (ret) { case 0: goto retry; case -EBUSY: ret = 0; /* FALLTHRU */ case -EFAULT: case -ENOMEM: case -EHWPOISON: goto out; case -ENOENT: goto next_page; } BUG(); } else if (PTR_ERR(page) == -EEXIST) { /* * Proper page table entry exists, but no corresponding * struct page. */ goto next_page; } else if (IS_ERR(page)) { ret = PTR_ERR(page); goto out; } if (pages) { pages[i] = page; flush_anon_page(vma, page, start); flush_dcache_page(page); ctx.page_mask = 0; } next_page: if (vmas) { vmas[i] = vma; ctx.page_mask = 0; } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; i += page_increm; start += page_increm * PAGE_SIZE; nr_pages -= page_increm; } while (nr_pages); out: if (ctx.pgmap) put_dev_pagemap(ctx.pgmap); return i ? i : ret; } static bool vma_permits_fault(struct vm_area_struct *vma, unsigned int fault_flags) { bool write = !!(fault_flags & FAULT_FLAG_WRITE); bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE); vm_flags_t vm_flags = write ? VM_WRITE : VM_READ; if (!(vm_flags & vma->vm_flags)) return false; /* * The architecture might have a hardware protection * mechanism other than read/write that can deny access. * * gup always represents data access, not instruction * fetches, so execute=false here: */ if (!arch_vma_access_permitted(vma, write, false, foreign)) return false; return true; } /* * fixup_user_fault() - manually resolve a user page fault * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @address: user address * @fault_flags:flags to pass down to handle_mm_fault() * @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller * does not allow retry * * This is meant to be called in the specific scenario where for locking reasons * we try to access user memory in atomic context (within a pagefault_disable() * section), this returns -EFAULT, and we want to resolve the user fault before * trying again. * * Typically this is meant to be used by the futex code. * * The main difference with get_user_pages() is that this function will * unconditionally call handle_mm_fault() which will in turn perform all the * necessary SW fixup of the dirty and young bits in the PTE, while * get_user_pages() only guarantees to update these in the struct page. * * This is important for some architectures where those bits also gate the * access permission to the page because they are maintained in software. On * such architectures, gup() will not be enough to make a subsequent access * succeed. * * This function will not return with an unlocked mmap_sem. So it has not the * same semantics wrt the @mm->mmap_sem as does filemap_fault(). */ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, unsigned long address, unsigned int fault_flags, bool *unlocked) { struct vm_area_struct *vma; vm_fault_t ret, major = 0; if (unlocked) fault_flags |= FAULT_FLAG_ALLOW_RETRY; retry: vma = find_extend_vma(mm, address); if (!vma || address < vma->vm_start) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) return -EFAULT; ret = handle_mm_fault(vma, address, fault_flags); major |= ret & VM_FAULT_MAJOR; if (ret & VM_FAULT_ERROR) { int err = vm_fault_to_errno(ret, 0); if (err) return err; BUG(); } if (ret & VM_FAULT_RETRY) { down_read(&mm->mmap_sem); if (!(fault_flags & FAULT_FLAG_TRIED)) { *unlocked = true; fault_flags &= ~FAULT_FLAG_ALLOW_RETRY; fault_flags |= FAULT_FLAG_TRIED; goto retry; } } if (tsk) { if (major) tsk->maj_flt++; else tsk->min_flt++; } return 0; } EXPORT_SYMBOL_GPL(fixup_user_fault); static __always_inline long __get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) { long ret, pages_done; bool lock_dropped; if (locked) { /* if VM_FAULT_RETRY can be returned, vmas become invalid */ BUG_ON(vmas); /* check caller initialized locked */ BUG_ON(*locked != 1); } if (pages) flags |= FOLL_GET; pages_done = 0; lock_dropped = false; for (;;) { ret = __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas, locked); if (!locked) /* VM_FAULT_RETRY couldn't trigger, bypass */ return ret; /* VM_FAULT_RETRY cannot return errors */ if (!*locked) { BUG_ON(ret < 0); BUG_ON(ret >= nr_pages); } if (!pages) /* If it's a prefault don't insist harder */ return ret; if (ret > 0) { nr_pages -= ret; pages_done += ret; if (!nr_pages) break; } if (*locked) { /* * VM_FAULT_RETRY didn't trigger or it was a * FOLL_NOWAIT. */ if (!pages_done) pages_done = ret; break; } /* VM_FAULT_RETRY triggered, so seek to the faulting offset */ pages += ret; start += ret << PAGE_SHIFT; /* * Repeat on the address that fired VM_FAULT_RETRY * without FAULT_FLAG_ALLOW_RETRY but with * FAULT_FLAG_TRIED. */ *locked = 1; lock_dropped = true; down_read(&mm->mmap_sem); ret = __get_user_pages(tsk, mm, start, 1, flags | FOLL_TRIED, pages, NULL, NULL); if (ret != 1) { BUG_ON(ret > 1); if (!pages_done) pages_done = ret; break; } nr_pages--; pages_done++; if (!nr_pages) break; pages++; start += PAGE_SIZE; } if (lock_dropped && *locked) { /* * We must let the caller know we temporarily dropped the lock * and so the critical section protected by it was lost. */ up_read(&mm->mmap_sem); *locked = 0; } return pages_done; } /* * We can leverage the VM_FAULT_RETRY functionality in the page fault * paths better by using either get_user_pages_locked() or * get_user_pages_unlocked(). * * get_user_pages_locked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * do_something() * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * to: * * int locked = 1; * down_read(&mm->mmap_sem); * do_something() * get_user_pages_locked(tsk, mm, ..., pages, &locked); * if (locked) * up_read(&mm->mmap_sem); */ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, NULL, locked, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages_locked); /* * get_user_pages_unlocked() is suitable to replace the form: * * down_read(&mm->mmap_sem); * get_user_pages(tsk, mm, ..., pages, NULL); * up_read(&mm->mmap_sem); * * with: * * get_user_pages_unlocked(tsk, mm, ..., pages); * * It is functionally equivalent to get_user_pages_fast so * get_user_pages_fast should be used instead if specific gup_flags * (e.g. FOLL_FORCE) are not required. */ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) { struct mm_struct *mm = current->mm; int locked = 1; long ret; down_read(&mm->mmap_sem); ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL, &locked, gup_flags | FOLL_TOUCH); if (locked) up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL(get_user_pages_unlocked); /* * get_user_pages_remote() - pin user pages in memory * @tsk: the task_struct to use for page fault accounting, or * NULL if faults are not to be recorded. * @mm: mm_struct of target mm * @start: starting user address * @nr_pages: number of pages from start to pin * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. * @vmas: array of pointers to vmas corresponding to each page. * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. Each page returned must be released * with a put_page() call when it is finished with. vmas will only * remain valid while mmap_sem is held. * * Must be called with mmap_sem held for read or write. * * get_user_pages walks a process's page tables and takes a reference to * each struct page that each user address corresponds to at a given * instant. That is, it takes the page that would be accessed if a user * thread accesses the given user virtual address at that instant. * * This does not guarantee that the page exists in the user mappings when * get_user_pages returns, and there may even be a completely different * page there in some cases (eg. if mmapped pagecache has been invalidated * and subsequently re faulted). However it does guarantee that the page * won't be freed completely. And mostly callers simply care that the page * contains data that was valid *at some point in time*. Typically, an IO * or similar operation cannot guarantee anything stronger anyway because * locks can't be held over the syscall boundary. * * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must * be called after the page is finished with, and before put_page is called. * * get_user_pages is typically used for fewer-copy IO operations, to get a * handle on the memory by some means other than accesses via the user virtual * addresses. The pages may be submitted for DMA to devices or accessed via * their kernel linear mapping (via the kmap APIs). Care should be taken to * use the correct cache flushing APIs. * * See also get_user_pages_fast, for performance critical applications. * * get_user_pages should be phased out in favor of * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing * should use get_user_pages because it cannot pass * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault. */ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) { return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, locked, gup_flags | FOLL_TOUCH | FOLL_REMOTE); } EXPORT_SYMBOL(get_user_pages_remote); /* * This is the same as get_user_pages_remote(), just with a * less-flexible calling convention where we assume that the task * and mm being operated on are the current task's and don't allow * passing of a locked parameter. We also obviously don't pass * FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) { return __get_user_pages_locked(current, current->mm, start, nr_pages, pages, vmas, NULL, gup_flags | FOLL_TOUCH); } EXPORT_SYMBOL(get_user_pages); #ifdef CONFIG_FS_DAX /* * This is the same as get_user_pages() in that it assumes we are * operating on the current task's mm, but it goes further to validate * that the vmas associated with the address range are suitable for * longterm elevated page reference counts. For example, filesystem-dax * mappings are subject to the lifetime enforced by the filesystem and * we need guarantees that longterm users like RDMA and V4L2 only * establish mappings that have a kernel enforced revocation mechanism. * * "longterm" == userspace controlled elevated page count lifetime. * Contrast this to iov_iter_get_pages() usages which are transient. */ long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas_arg) { struct vm_area_struct **vmas = vmas_arg; struct vm_area_struct *vma_prev = NULL; long rc, i; if (!pages) return -EINVAL; if (!vmas) { vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *), GFP_KERNEL); if (!vmas) return -ENOMEM; } rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas); for (i = 0; i < rc; i++) { struct vm_area_struct *vma = vmas[i]; if (vma == vma_prev) continue; vma_prev = vma; if (vma_is_fsdax(vma)) break; } /* * Either get_user_pages() failed, or the vma validation * succeeded, in either case we don't need to put_page() before * returning. */ if (i >= rc) goto out; for (i = 0; i < rc; i++) put_page(pages[i]); rc = -EOPNOTSUPP; out: if (vmas != vmas_arg) kfree(vmas); return rc; } EXPORT_SYMBOL(get_user_pages_longterm); #endif /* CONFIG_FS_DAX */ /** * populate_vma_page_range() - populate a range of pages in the vma. * @vma: target vma * @start: start address * @end: end address * @nonblocking: * * This takes care of mlocking the pages too if VM_LOCKED is set. * * return 0 on success, negative error code on error. * * vma->vm_mm->mmap_sem must be held. * * If @nonblocking is NULL, it may be held for read or write and will * be unperturbed. * * If @nonblocking is non-NULL, it must held for read only and may be * released. If it's released, *@nonblocking will be set to 0. */ long populate_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, int *nonblocking) { struct mm_struct *mm = vma->vm_mm; unsigned long nr_pages = (end - start) / PAGE_SIZE; int gup_flags; VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); VM_BUG_ON_VMA(start < vma->vm_start, vma); VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; if (vma->vm_flags & VM_LOCKONFAULT) gup_flags &= ~FOLL_POPULATE; /* * We want to touch writable mappings with a write fault in order * to break COW, except for shared mappings because these don't COW * and we would not want to dirty them for nothing. */ if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) gup_flags |= FOLL_WRITE; /* * We want mlock to succeed for regions that have any permissions * other than PROT_NONE. */ if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) gup_flags |= FOLL_FORCE; /* * We made sure addr is within a VMA, so the following will * not result in a stack expansion that recurses back here. */ return __get_user_pages(current, mm, start, nr_pages, gup_flags, NULL, NULL, nonblocking); } /* * __mm_populate - populate and/or mlock pages within a range of address space. * * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap * flags. VMAs must be already marked with the desired vm_flags, and * mmap_sem must not be held. */ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) { struct mm_struct *mm = current->mm; unsigned long end, nstart, nend; struct vm_area_struct *vma = NULL; int locked = 0; long ret = 0; end = start + len; for (nstart = start; nstart < end; nstart = nend) { /* * We want to fault in pages for [nstart; end) address range. * Find first corresponding VMA. */ if (!locked) { locked = 1; down_read(&mm->mmap_sem); vma = find_vma(mm, nstart); } else if (nstart >= vma->vm_end) vma = vma->vm_next; if (!vma || vma->vm_start >= end) break; /* * Set [nstart; nend) to intersection of desired address * range with the first VMA. Also, skip undesirable VMA types. */ nend = min(end, vma->vm_end); if (vma->vm_flags & (VM_IO | VM_PFNMAP)) continue; if (nstart < vma->vm_start) nstart = vma->vm_start; /* * Now fault in a range of pages. populate_vma_page_range() * double checks the vma flags, so that it won't mlock pages * if the vma was already munlocked. */ ret = populate_vma_page_range(vma, nstart, nend, &locked); if (ret < 0) { if (ignore_errors) { ret = 0; continue; /* continue at next VMA */ } break; } nend = nstart + ret * PAGE_SIZE; ret = 0; } if (locked) up_read(&mm->mmap_sem); return ret; /* 0 or negative error code */ } /** * get_dump_page() - pin user page in memory while writing it to core dump * @addr: user address * * Returns struct page pointer of user page pinned for dump, * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found - * allowing a hole to be left in the corefile to save diskspace. * * Called without mmap_sem, but after all other threads have been killed. */ #ifdef CONFIG_ELF_CORE struct page *get_dump_page(unsigned long addr) { struct vm_area_struct *vma; struct page *page; if (__get_user_pages(current, current->mm, addr, 1, FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma, NULL) < 1) return NULL; flush_cache_page(vma, addr, page_to_pfn(page)); return page; } #endif /* CONFIG_ELF_CORE */ /* * Generic Fast GUP * * get_user_pages_fast attempts to pin user pages by walking the page * tables directly and avoids taking locks. Thus the walker needs to be * protected from page table pages being freed from under it, and should * block any THP splits. * * One way to achieve this is to have the walker disable interrupts, and * rely on IPIs from the TLB flushing code blocking before the page table * pages are freed. This is unsuitable for architectures that do not need * to broadcast an IPI when invalidating TLBs. * * Another way to achieve this is to batch up page table containing pages * belonging to more than one mm_user, then rcu_sched a callback to free those * pages. Disabling interrupts will allow the fast_gup walker to both block * the rcu_sched callback, and an IPI that we broadcast for splitting THPs * (which is a relatively rare event). The code below adopts this strategy. * * Before activating this code, please be aware that the following assumptions * are currently made: * * *) Either HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to * free pages containing page tables or TLB flushing requires IPI broadcast. * * *) ptes can be read atomically by the architecture. * * *) access_ok is sufficient to validate userspace address ranges. * * The last two assumptions can be relaxed by the addition of helper functions. * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ #ifdef CONFIG_HAVE_GENERIC_GUP #ifndef gup_get_pte /* * We assume that the PTE can be read atomically. If this is not the case for * your architecture, please provide the helper. */ static inline pte_t gup_get_pte(pte_t *ptep) { return READ_ONCE(*ptep); } #endif static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages) { while ((*nr) - nr_start) { struct page *page = pages[--(*nr)]; ClearPageReferenced(page); put_page(page); } } /* * Return the compund head page with ref appropriately incremented, * or NULL if that failed. */ static inline struct page *try_get_compound_head(struct page *page, int refs) { struct page *head = compound_head(page); if (WARN_ON_ONCE(page_ref_count(head) < 0)) return NULL; if (unlikely(!page_cache_add_speculative(head, refs))) return NULL; return head; } #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; int nr_start = *nr, ret = 0; pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *head, *page; /* * Similar to the PMD case below, NUMA hinting must take slow * path using the pte_protnone check. */ if (pte_protnone(pte)) goto pte_unmap; if (!pte_access_permitted(pte, write)) goto pte_unmap; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); goto pte_unmap; } } else if (pte_special(pte)) goto pte_unmap; VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); head = try_get_compound_head(page, 1); if (!head) goto pte_unmap; if (unlikely(pte_val(pte) != pte_val(*ptep))) { put_page(head); goto pte_unmap; } VM_BUG_ON_PAGE(compound_head(page) != head, page); SetPageReferenced(page); pages[*nr] = page; (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); ret = 1; pte_unmap: if (pgmap) put_dev_pagemap(pgmap); pte_unmap(ptem); return ret; } #else /* * If we can't determine whether or not a pte is special, then fail immediately * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not * to be special. * * For a futex to be placed on a THP tail page, get_futex_key requires a * __get_user_pages_fast implementation that can pin pages. Thus it's still * useful to have gup_huge_pmd even if we can't operate on ptes. */ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { return 0; } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ #if defined(__HAVE_ARCH_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, struct page **pages, int *nr) { int nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { struct page *page = pfn_to_page(pfn); pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); return 0; } SetPageReferenced(page); pages[*nr] = page; get_page(page); (*nr)++; pfn++; } while (addr += PAGE_SIZE, addr != end); if (pgmap) put_dev_pagemap(pgmap); return 1; } static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { unsigned long fault_pfn; int nr_start = *nr; fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); if (!__gup_device_huge(fault_pfn, addr, end, pages, nr)) return 0; if (unlikely(pud_val(orig) != pud_val(*pudp))) { undo_dev_pagemap(nr, nr_start, pages); return 0; } return 1; } #else static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, struct page **pages, int *nr) { BUILD_BUG(); return 0; } #endif static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pmd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pud_access_permitted(orig, write)) return 0; if (pud_devmap(orig)) return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr); refs = 0; page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pud_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pud_val(orig) != pud_val(*pudp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { int refs; struct page *head, *page; if (!pgd_access_permitted(orig, write)) return 0; BUILD_BUG_ON(pgd_devmap(orig)); refs = 0; page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pgd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; } static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; pmdp = pmd_offset(&pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); next = pmd_addr_end(addr, end); if (!pmd_present(pmd)) return 0; if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))) { /* * NUMA hinting faults need to be handled in the GUP * slowpath for accounting purposes and so that they * can be serialised against THP migration. */ if (pmd_protnone(pmd)) return 0; if (!gup_huge_pmd(pmd, pmdp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) { /* * architecture have different format for hugetlbfs * pmd format and THP pmd format */ if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr, PMD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) return 0; } while (pmdp++, addr = next, addr != end); return 1; } static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; pudp = pud_offset(&p4d, addr); do { pud_t pud = READ_ONCE(*pudp); next = pud_addr_end(addr, end); if (pud_none(pud)) return 0; if (unlikely(pud_huge(pud))) { if (!gup_huge_pud(pud, pudp, addr, next, write, pages, nr)) return 0; } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) { if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; p4dp = p4d_offset(&pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); next = p4d_addr_end(addr, end); if (p4d_none(p4d)) return 0; BUILD_BUG_ON(p4d_huge(p4d)); if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) { if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, write, pages, nr)) return 0; } else if (!gup_pud_range(p4d, addr, next, write, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); return 1; } static void gup_pgd_range(unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { unsigned long next; pgd_t *pgdp; pgdp = pgd_offset(current->mm, addr); do { pgd_t pgd = READ_ONCE(*pgdp); next = pgd_addr_end(addr, end); if (pgd_none(pgd)) return; if (unlikely(pgd_huge(pgd))) { if (!gup_huge_pgd(pgd, pgdp, addr, next, write, pages, nr)) return; } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, write, pages, nr)) return; } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } #ifndef gup_fast_permitted /* * Check if it's allowed to use __get_user_pages_fast() for the range, or * we need to fall back to the slow version: */ bool gup_fast_permitted(unsigned long start, int nr_pages, int write) { unsigned long len, end; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; return end >= start; } #endif /* * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to * the regular GUP. * Note a difference with get_user_pages_fast: this always returns the * number of pages pinned, 0 if no pages were pinned. */ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long len, end; unsigned long flags; int nr = 0; start &= PAGE_MASK; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (unlikely(!access_ok((void __user *)start, len))) return 0; /* * Disable interrupts. We use the nested form as we can already have * interrupts disabled by get_futex_key. * * With interrupts disabled, we block page table pages from being * freed from under us. See struct mmu_table_batch comments in * include/asm-generic/tlb.h for more details. * * We do not adopt an rcu_read_lock(.) here as we also want to * block IPIs that come from THPs splitting. */ if (gup_fast_permitted(start, nr_pages, write)) { local_irq_save(flags); gup_pgd_range(start, end, write, pages, &nr); local_irq_restore(flags); } return nr; } /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Attempt to pin user pages in memory without taking mm->mmap_sem. * If not successful, it will fall back to taking the lock and * calling get_user_pages(). * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. */ int get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { unsigned long addr, len, end; int nr = 0, ret = 0; start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; end = start + len; if (nr_pages <= 0) return 0; if (unlikely(!access_ok((void __user *)start, len))) return -EFAULT; if (gup_fast_permitted(start, nr_pages, write)) { local_irq_disable(); gup_pgd_range(addr, end, write, pages, &nr); local_irq_enable(); ret = nr; } if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; pages += nr; ret = get_user_pages_unlocked(start, nr_pages - nr, pages, write ? FOLL_WRITE : 0); /* Have to be a bit careful with return values */ if (nr > 0) { if (ret < 0) ret = nr; else ret += nr; } } return ret; } #endif /* CONFIG_HAVE_GENERIC_GUP */
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = compound_head(pmd_page(orig)); if (!page_cache_add_speculative(head, refs)) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct page *head, *page; int refs; if (!pmd_access_permitted(orig, write)) return 0; if (pmd_devmap(orig)) return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr); refs = 0; page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); do { pages[*nr] = page; (*nr)++; page++; refs++; } while (addr += PAGE_SIZE, addr != end); head = try_get_compound_head(pmd_page(orig), refs); if (!head) { *nr -= refs; return 0; } if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) { *nr -= refs; while (refs--) put_page(head); return 0; } SetPageReferenced(head); return 1; }
{'added': [(160, '\tif (flags & FOLL_GET) {'), (161, '\t\tif (unlikely(!try_get_page(page))) {'), (162, '\t\t\tpage = ERR_PTR(-ENOMEM);'), (163, '\t\t\tgoto out;'), (164, '\t\t}'), (165, '\t}'), (302, '\t\t\tif (unlikely(!try_get_page(page))) {'), (303, '\t\t\t\tspin_unlock(ptl);'), (304, '\t\t\t\treturn ERR_PTR(-ENOMEM);'), (305, '\t\t\t}'), (507, '\tif (unlikely(!try_get_page(*page))) {'), (508, '\t\tret = -ENOMEM;'), (509, '\t\tgoto unmap;'), (510, '\t}'), (1406, '/*'), (1407, ' * Return the compund head page with ref appropriately incremented,'), (1408, ' * or NULL if that failed.'), (1409, ' */'), (1410, 'static inline struct page *try_get_compound_head(struct page *page, int refs)'), (1411, '{'), (1412, '\tstruct page *head = compound_head(page);'), (1413, '\tif (WARN_ON_ONCE(page_ref_count(head) < 0))'), (1414, '\t\treturn NULL;'), (1415, '\tif (unlikely(!page_cache_add_speculative(head, refs)))'), (1416, '\t\treturn NULL;'), (1417, '\treturn head;'), (1418, '}'), (1419, ''), (1455, '\t\thead = try_get_compound_head(page, 1);'), (1456, '\t\tif (!head)'), (1595, '\thead = try_get_compound_head(pmd_page(orig), refs);'), (1596, '\tif (!head) {'), (1633, '\thead = try_get_compound_head(pud_page(orig), refs);'), (1634, '\tif (!head) {'), (1670, '\thead = try_get_compound_head(pgd_page(orig), refs);'), (1671, '\tif (!head) {')], 'deleted': [(160, '\tif (flags & FOLL_GET)'), (161, '\t\tget_page(page);'), (298, '\t\t\tget_page(page);'), (500, '\tget_page(*page);'), (1430, '\t\thead = compound_head(page);'), (1432, '\t\tif (!page_cache_get_speculative(head))'), (1571, '\thead = compound_head(pmd_page(orig));'), (1572, '\tif (!page_cache_add_speculative(head, refs)) {'), (1609, '\thead = compound_head(pud_page(orig));'), (1610, '\tif (!page_cache_add_speculative(head, refs)) {'), (1646, '\thead = compound_head(pgd_page(orig));'), (1647, '\tif (!page_cache_add_speculative(head, refs)) {')]}
36
12
1,194
7,593
31
208
7
https://github.com/torvalds/linux
CVE-2019-11487
CWE-416
2,975
sparse_fill_empty_rows_op.cc
C++
tensorflow::SparseFillEmptyRowsOpImpl
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/sparse_fill_empty_rows_op.h" #include <algorithm> #include <numeric> #include <unordered_map> #include <utility> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; namespace functor { template <typename T, typename Tindex> struct SparseFillEmptyRows<CPUDevice, T, Tindex> { Status operator()(OpKernelContext* context, const Tensor& default_value_t, const Tensor& indices_t, const Tensor& values_t, const Tensor& dense_shape_t, typename AsyncOpKernel::DoneCallback done) { (void)done; // Unused (only used in GPU implementation) const int kOutputIndicesOutput = 0; const int kOutputValuesOutput = 1; const int kEmptyRowIndicatorOutput = 2; const int kReverseIndexMapOutput = 3; const T& default_value = default_value_t.scalar<T>()(); const auto indices = indices_t.matrix<Tindex>(); const auto values = values_t.vec<T>(); const auto dense_shape = dense_shape_t.vec<Tindex>(); const Tindex N = indices_t.shape().dim_size(0); const Tindex dense_rows = dense_shape(0); bool* empty_row_indicator = nullptr; if (context->output_required(kEmptyRowIndicatorOutput)) { Tensor* empty_row_indicator_t = nullptr; TF_RETURN_IF_ERROR(context->allocate_output(kEmptyRowIndicatorOutput, TensorShape({dense_rows}), &empty_row_indicator_t)); empty_row_indicator = empty_row_indicator_t->vec<bool>().data(); } Tindex* reverse_index_map = nullptr; if (context->output_required(kReverseIndexMapOutput)) { Tensor* reverse_index_map_t = nullptr; TF_RETURN_IF_ERROR(context->allocate_output( kReverseIndexMapOutput, TensorShape({N}), &reverse_index_map_t)); reverse_index_map = reverse_index_map_t->vec<Tindex>().data(); } int rank = indices_t.shape().dim_size(1); if (dense_rows == 0) { if (N != 0) { return errors::InvalidArgument( "Received SparseTensor with dense_shape[0] = 0 but " "indices.shape[0] = ", N); } Tensor* output_indices_t; TensorShape output_indices_shape({0, rank}); TF_RETURN_IF_ERROR(context->allocate_output( kOutputIndicesOutput, output_indices_shape, &output_indices_t)); Tensor* output_values_t; TF_RETURN_IF_ERROR(context->allocate_output( kOutputValuesOutput, TensorShape({0}), &output_values_t)); // Exit early, nothing more to do. return Status::OK(); } bool rows_are_ordered = true; Tindex last_indices_row = 0; std::vector<Tindex> csr_offset(dense_rows, 0); for (int i = 0; i < N; ++i) { const Tindex row = indices(i, 0); if (row < 0 || row >= dense_rows) { return errors::InvalidArgument("indices(", i, ", 0) is invalid: ", row, " >= ", dense_rows); } ++csr_offset[row]; rows_are_ordered = rows_are_ordered & (row >= last_indices_row); last_indices_row = row; } bool all_rows_full = true; for (int row = 0; row < dense_rows; ++row) { // csr_offset here describes the number of elements in this dense row bool row_empty = (csr_offset[row] == 0); if (empty_row_indicator) { empty_row_indicator[row] = row_empty; } all_rows_full = all_rows_full & !row_empty; // In filled version, each row has at least one element. csr_offset[row] = std::max(csr_offset[row], Tindex{1}); // Update csr_offset to represent the number of elements up to and // including dense_row + 1: // csr_offset(0) == #{elements of row 0} // csr_offset(1) == #{elements of row 1} + #{elements of row 0} // .. // csr_offset(i) == starting index for elements in row i + 1. if (row > 0) { csr_offset[row] += csr_offset[row - 1]; } } if (all_rows_full && rows_are_ordered) { context->set_output(kOutputIndicesOutput, indices_t); context->set_output(kOutputValuesOutput, values_t); if (reverse_index_map) { for (Tindex i = 0; i < N; ++i) { reverse_index_map[i] = i; } } } else { Tensor* output_indices_t; const Tindex N_full = csr_offset[dense_rows - 1]; TensorShape output_indices_shape({N_full, rank}); TF_RETURN_IF_ERROR(context->allocate_output( kOutputIndicesOutput, output_indices_shape, &output_indices_t)); auto output_indices = output_indices_t->matrix<Tindex>(); Tensor* output_values_t; TF_RETURN_IF_ERROR(context->allocate_output( kOutputValuesOutput, TensorShape({N_full}), &output_values_t)); auto output_values = output_values_t->vec<T>(); std::vector<Tindex> filled_count(dense_rows, 0); // Fill in values for rows that are not missing for (Tindex i = 0; i < N; ++i) { const Tindex row = indices(i, 0); Tindex& offset = filled_count[row]; const Tindex output_i = ((row == 0) ? 0 : csr_offset[row - 1]) + offset; offset++; // Increment the filled count for this row. std::copy_n(&indices(i, 0), rank, &output_indices(output_i, 0)); output_values(output_i) = values(i); // We'll need this reverse index map to backprop correctly. if (reverse_index_map) { reverse_index_map[i] = output_i; } } // Fill in values for rows that are missing for (Tindex row = 0; row < dense_rows; ++row) { const Tindex row_count = filled_count[row]; if (row_count == 0) { // We haven't filled this row const Tindex starting_index = (row == 0) ? 0 : csr_offset[row - 1]; // Remaining index values were set to zero already. // Just need to set the row index in the right location. output_indices(starting_index, 0) = row; for (Tindex col = 1; col < rank; ++col) { output_indices(starting_index, col) = 0; } output_values(starting_index) = default_value; } } } return Status::OK(); } }; } // namespace functor namespace { template <typename Device, typename T, typename Tindex> void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); } } // namespace template <typename Device, typename T, typename Tindex> class SparseFillEmptyRowsOp : public OpKernel { public: explicit SparseFillEmptyRowsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { SparseFillEmptyRowsOpImpl<Device, T, Tindex>(context); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \ .Device(DEVICE_##D) \ .HostMemory("dense_shape") \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_ALL_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if 0 && (GOOGLE_CUDA || TENSORFLOW_USE_ROCM) // The GPU implementation is async because it requires waiting for a // host->device memcpy before the output is allocated (similar to // SegmentSumGPUOp). template <typename T, typename Tindex> class SparseFillEmptyRowsGPUOp : public AsyncOpKernel { public: explicit SparseFillEmptyRowsGPUOp(OpKernelConstruction* context) : AsyncOpKernel(context) {} void ComputeAsync(OpKernelContext* context, DoneCallback done) override { SparseFillEmptyRowsOpImpl<GPUDevice, T, Tindex>(context, done); } }; #define REGISTER_KERNELS(T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \ .Device(DEVICE_GPU) \ .HostMemory("dense_shape") \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsGPUOp<T, Tindex>) // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T, Tindex) \ template <> \ Status SparseFillEmptyRows<GPUDevice, T, Tindex>::operator()( \ OpKernelContext* context, const Tensor& default_value_t, \ const Tensor& indices_t, const Tensor& values_t, \ const Tensor& dense_shape_t, typename AsyncOpKernel::DoneCallback done); \ extern template struct SparseFillEmptyRows<GPUDevice, T, Tindex>; #define DECLARE_GPU_SPEC_INT64(T) DECLARE_GPU_SPEC(T, int64) TF_CALL_POD_TYPES(DECLARE_GPU_SPEC_INT64) #undef DECLARE_GPU_SPEC_INT64 #undef DECLARE_GPU_SPEC } // namespace functor #define REGISTER_KERNELS_TINDEX(T) REGISTER_KERNELS(T, int64) TF_CALL_POD_TYPES(REGISTER_KERNELS_TINDEX) #undef REGISTER_KERNELS_TINDEX #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace functor { template <typename T, typename Tindex> struct SparseFillEmptyRowsGrad<CPUDevice, T, Tindex> { Status operator()(OpKernelContext* context, typename TTypes<Tindex>::ConstVec reverse_index_map, typename TTypes<T>::ConstVec grad_values, typename TTypes<T>::Vec d_values, typename TTypes<T>::Scalar d_default_value) { const CPUDevice& device = context->eigen_device<CPUDevice>(); const Tindex N = reverse_index_map.dimension(0); const Tindex N_full = grad_values.dimension(0); T& d_default_value_scalar = d_default_value(); d_default_value_scalar = T(); Tensor visited_t; TF_RETURN_IF_ERROR( context->allocate_temp(DT_BOOL, TensorShape({N_full}), &visited_t)); auto visited = visited_t.vec<bool>(); visited.device(device) = visited.constant(false); for (int i = 0; i < N; ++i) { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. int64 reverse_index = reverse_index_map(i); if (reverse_index < 0 || reverse_index >= N_full) { return errors::InvalidArgument( "Elements in reverse index must be in [0, ", N_full, ") but got ", reverse_index); } d_values(i) = grad_values(reverse_index); visited(reverse_index) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of // the backprop values (since the default value was used to fill // in these slots in the forward calculation). if (!visited(j)) { d_default_value_scalar += grad_values(j); } } return Status::OK(); } }; } // namespace functor template <typename Device, typename T, typename Tindex> class SparseFillEmptyRowsGradOp : public OpKernel { public: explicit SparseFillEmptyRowsGradOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()), errors::InvalidArgument("grad_values must be a vector, saw: ", grad_values_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<Tindex>(); const auto grad_values = grad_values_t->vec<T>(); const Tindex N = reverse_index_map_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); auto d_default_value = d_default_value_t->scalar<T>(); OP_REQUIRES_OK(context, functor::SparseFillEmptyRowsGrad<Device, T, Tindex>()( context, reverse_index_map, grad_values, d_values, d_default_value)); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRowsGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsGradOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if 0 && (GOOGLE_CUDA || TENSORFLOW_USE_ROCM) // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T, Tindex) \ template <> \ Status SparseFillEmptyRowsGrad<GPUDevice, T, Tindex>::operator()( \ OpKernelContext* context, \ typename TTypes<Tindex>::ConstVec reverse_index_map, \ typename TTypes<T>::ConstVec grad_values, \ typename TTypes<T>::Vec d_values, \ typename TTypes<T>::Scalar d_default_value); \ extern template struct SparseFillEmptyRowsGrad<GPUDevice, T, Tindex>; #define DECLARE_GPU_SPEC_INT64(T) DECLARE_GPU_SPEC(T, int64) TF_CALL_REAL_NUMBER_TYPES(DECLARE_GPU_SPEC_INT64); #undef DECLARE_GPU_SPEC_INT64 #undef DECLARE_GPU_SPEC } // namespace functor #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T, int64) TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_KERNELS } // namespace tensorflow
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #define EIGEN_USE_THREADS #include "tensorflow/core/kernels/sparse_fill_empty_rows_op.h" #include <algorithm> #include <numeric> #include <unordered_map> #include <utility> #include <vector> #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_util.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/gtl/inlined_vector.h" #include "tensorflow/core/util/sparse/sparse_tensor.h" namespace tensorflow { using CPUDevice = Eigen::ThreadPoolDevice; using GPUDevice = Eigen::GpuDevice; namespace functor { template <typename T, typename Tindex> struct SparseFillEmptyRows<CPUDevice, T, Tindex> { Status operator()(OpKernelContext* context, const Tensor& default_value_t, const Tensor& indices_t, const Tensor& values_t, const Tensor& dense_shape_t, typename AsyncOpKernel::DoneCallback done) { (void)done; // Unused (only used in GPU implementation) const int kOutputIndicesOutput = 0; const int kOutputValuesOutput = 1; const int kEmptyRowIndicatorOutput = 2; const int kReverseIndexMapOutput = 3; const T& default_value = default_value_t.scalar<T>()(); const auto indices = indices_t.matrix<Tindex>(); const auto values = values_t.vec<T>(); const auto dense_shape = dense_shape_t.vec<Tindex>(); const Tindex N = indices_t.shape().dim_size(0); const Tindex dense_rows = dense_shape(0); bool* empty_row_indicator = nullptr; if (context->output_required(kEmptyRowIndicatorOutput)) { Tensor* empty_row_indicator_t = nullptr; TF_RETURN_IF_ERROR(context->allocate_output(kEmptyRowIndicatorOutput, TensorShape({dense_rows}), &empty_row_indicator_t)); empty_row_indicator = empty_row_indicator_t->vec<bool>().data(); } Tindex* reverse_index_map = nullptr; if (context->output_required(kReverseIndexMapOutput)) { Tensor* reverse_index_map_t = nullptr; TF_RETURN_IF_ERROR(context->allocate_output( kReverseIndexMapOutput, TensorShape({N}), &reverse_index_map_t)); reverse_index_map = reverse_index_map_t->vec<Tindex>().data(); } int rank = indices_t.shape().dim_size(1); if (dense_rows == 0) { if (N != 0) { return errors::InvalidArgument( "Received SparseTensor with dense_shape[0] = 0 but " "indices.shape[0] = ", N); } Tensor* output_indices_t; TensorShape output_indices_shape({0, rank}); TF_RETURN_IF_ERROR(context->allocate_output( kOutputIndicesOutput, output_indices_shape, &output_indices_t)); Tensor* output_values_t; TF_RETURN_IF_ERROR(context->allocate_output( kOutputValuesOutput, TensorShape({0}), &output_values_t)); // Exit early, nothing more to do. return Status::OK(); } bool rows_are_ordered = true; Tindex last_indices_row = 0; std::vector<Tindex> csr_offset(dense_rows, 0); for (int i = 0; i < N; ++i) { const Tindex row = indices(i, 0); if (row < 0 || row >= dense_rows) { return errors::InvalidArgument("indices(", i, ", 0) is invalid: ", row, " >= ", dense_rows); } ++csr_offset[row]; rows_are_ordered = rows_are_ordered & (row >= last_indices_row); last_indices_row = row; } bool all_rows_full = true; for (int row = 0; row < dense_rows; ++row) { // csr_offset here describes the number of elements in this dense row bool row_empty = (csr_offset[row] == 0); if (empty_row_indicator) { empty_row_indicator[row] = row_empty; } all_rows_full = all_rows_full & !row_empty; // In filled version, each row has at least one element. csr_offset[row] = std::max(csr_offset[row], Tindex{1}); // Update csr_offset to represent the number of elements up to and // including dense_row + 1: // csr_offset(0) == #{elements of row 0} // csr_offset(1) == #{elements of row 1} + #{elements of row 0} // .. // csr_offset(i) == starting index for elements in row i + 1. if (row > 0) { csr_offset[row] += csr_offset[row - 1]; } } if (all_rows_full && rows_are_ordered) { context->set_output(kOutputIndicesOutput, indices_t); context->set_output(kOutputValuesOutput, values_t); if (reverse_index_map) { for (Tindex i = 0; i < N; ++i) { reverse_index_map[i] = i; } } } else { Tensor* output_indices_t; const Tindex N_full = csr_offset[dense_rows - 1]; TensorShape output_indices_shape({N_full, rank}); TF_RETURN_IF_ERROR(context->allocate_output( kOutputIndicesOutput, output_indices_shape, &output_indices_t)); auto output_indices = output_indices_t->matrix<Tindex>(); Tensor* output_values_t; TF_RETURN_IF_ERROR(context->allocate_output( kOutputValuesOutput, TensorShape({N_full}), &output_values_t)); auto output_values = output_values_t->vec<T>(); std::vector<Tindex> filled_count(dense_rows, 0); // Fill in values for rows that are not missing for (Tindex i = 0; i < N; ++i) { const Tindex row = indices(i, 0); Tindex& offset = filled_count[row]; const Tindex output_i = ((row == 0) ? 0 : csr_offset[row - 1]) + offset; offset++; // Increment the filled count for this row. std::copy_n(&indices(i, 0), rank, &output_indices(output_i, 0)); output_values(output_i) = values(i); // We'll need this reverse index map to backprop correctly. if (reverse_index_map) { reverse_index_map[i] = output_i; } } // Fill in values for rows that are missing for (Tindex row = 0; row < dense_rows; ++row) { const Tindex row_count = filled_count[row]; if (row_count == 0) { // We haven't filled this row const Tindex starting_index = (row == 0) ? 0 : csr_offset[row - 1]; // Remaining index values were set to zero already. // Just need to set the row index in the right location. output_indices(starting_index, 0) = row; for (Tindex col = 1; col < rank; ++col) { output_indices(starting_index, col) = 0; } output_values(starting_index) = default_value; } } } return Status::OK(); } }; } // namespace functor namespace { template <typename Device, typename T, typename Tindex> void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // Also add check that dense rank > 0. OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0, errors::InvalidArgument("Dense shape cannot be empty."), done); using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); } } // namespace template <typename Device, typename T, typename Tindex> class SparseFillEmptyRowsOp : public OpKernel { public: explicit SparseFillEmptyRowsOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { SparseFillEmptyRowsOpImpl<Device, T, Tindex>(context); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \ .Device(DEVICE_##D) \ .HostMemory("dense_shape") \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_ALL_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #undef REGISTER_KERNELS #if 0 && (GOOGLE_CUDA || TENSORFLOW_USE_ROCM) // The GPU implementation is async because it requires waiting for a // host->device memcpy before the output is allocated (similar to // SegmentSumGPUOp). template <typename T, typename Tindex> class SparseFillEmptyRowsGPUOp : public AsyncOpKernel { public: explicit SparseFillEmptyRowsGPUOp(OpKernelConstruction* context) : AsyncOpKernel(context) {} void ComputeAsync(OpKernelContext* context, DoneCallback done) override { SparseFillEmptyRowsOpImpl<GPUDevice, T, Tindex>(context, done); } }; #define REGISTER_KERNELS(T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRows") \ .Device(DEVICE_GPU) \ .HostMemory("dense_shape") \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsGPUOp<T, Tindex>) // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T, Tindex) \ template <> \ Status SparseFillEmptyRows<GPUDevice, T, Tindex>::operator()( \ OpKernelContext* context, const Tensor& default_value_t, \ const Tensor& indices_t, const Tensor& values_t, \ const Tensor& dense_shape_t, typename AsyncOpKernel::DoneCallback done); \ extern template struct SparseFillEmptyRows<GPUDevice, T, Tindex>; #define DECLARE_GPU_SPEC_INT64(T) DECLARE_GPU_SPEC(T, int64) TF_CALL_POD_TYPES(DECLARE_GPU_SPEC_INT64) #undef DECLARE_GPU_SPEC_INT64 #undef DECLARE_GPU_SPEC } // namespace functor #define REGISTER_KERNELS_TINDEX(T) REGISTER_KERNELS(T, int64) TF_CALL_POD_TYPES(REGISTER_KERNELS_TINDEX) #undef REGISTER_KERNELS_TINDEX #undef REGISTER_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM namespace functor { template <typename T, typename Tindex> struct SparseFillEmptyRowsGrad<CPUDevice, T, Tindex> { Status operator()(OpKernelContext* context, typename TTypes<Tindex>::ConstVec reverse_index_map, typename TTypes<T>::ConstVec grad_values, typename TTypes<T>::Vec d_values, typename TTypes<T>::Scalar d_default_value) { const CPUDevice& device = context->eigen_device<CPUDevice>(); const Tindex N = reverse_index_map.dimension(0); const Tindex N_full = grad_values.dimension(0); T& d_default_value_scalar = d_default_value(); d_default_value_scalar = T(); Tensor visited_t; TF_RETURN_IF_ERROR( context->allocate_temp(DT_BOOL, TensorShape({N_full}), &visited_t)); auto visited = visited_t.vec<bool>(); visited.device(device) = visited.constant(false); for (int i = 0; i < N; ++i) { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. int64 reverse_index = reverse_index_map(i); if (reverse_index < 0 || reverse_index >= N_full) { return errors::InvalidArgument( "Elements in reverse index must be in [0, ", N_full, ") but got ", reverse_index); } d_values(i) = grad_values(reverse_index); visited(reverse_index) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of // the backprop values (since the default value was used to fill // in these slots in the forward calculation). if (!visited(j)) { d_default_value_scalar += grad_values(j); } } return Status::OK(); } }; } // namespace functor template <typename Device, typename T, typename Tindex> class SparseFillEmptyRowsGradOp : public OpKernel { public: explicit SparseFillEmptyRowsGradOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(grad_values_t->shape()), errors::InvalidArgument("grad_values must be a vector, saw: ", grad_values_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<Tindex>(); const auto grad_values = grad_values_t->vec<T>(); const Tindex N = reverse_index_map_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); auto d_default_value = d_default_value_t->scalar<T>(); OP_REQUIRES_OK(context, functor::SparseFillEmptyRowsGrad<Device, T, Tindex>()( context, reverse_index_map, grad_values, d_values, d_default_value)); } }; #define REGISTER_KERNELS(D, T, Tindex) \ REGISTER_KERNEL_BUILDER(Name("SparseFillEmptyRowsGrad") \ .Device(DEVICE_##D) \ .TypeConstraint<T>("T"), \ SparseFillEmptyRowsGradOp<D##Device, T, Tindex>) #define REGISTER_CPU_KERNELS(T) REGISTER_KERNELS(CPU, T, int64) TF_CALL_NUMBER_TYPES(REGISTER_CPU_KERNELS); #undef REGISTER_CPU_KERNELS #if 0 && (GOOGLE_CUDA || TENSORFLOW_USE_ROCM) // Forward declarations of the functor specializations for GPU. namespace functor { #define DECLARE_GPU_SPEC(T, Tindex) \ template <> \ Status SparseFillEmptyRowsGrad<GPUDevice, T, Tindex>::operator()( \ OpKernelContext* context, \ typename TTypes<Tindex>::ConstVec reverse_index_map, \ typename TTypes<T>::ConstVec grad_values, \ typename TTypes<T>::Vec d_values, \ typename TTypes<T>::Scalar d_default_value); \ extern template struct SparseFillEmptyRowsGrad<GPUDevice, T, Tindex>; #define DECLARE_GPU_SPEC_INT64(T) DECLARE_GPU_SPEC(T, int64) TF_CALL_REAL_NUMBER_TYPES(DECLARE_GPU_SPEC_INT64); #undef DECLARE_GPU_SPEC_INT64 #undef DECLARE_GPU_SPEC } // namespace functor #define REGISTER_GPU_KERNELS(T) REGISTER_KERNELS(GPU, T, int64) TF_CALL_REAL_NUMBER_TYPES(REGISTER_GPU_KERNELS); #undef REGISTER_GPU_KERNELS #endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM #undef REGISTER_KERNELS } // namespace tensorflow
void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); }
void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // Also add check that dense rank > 0. OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0, errors::InvalidArgument("Dense shape cannot be empty."), done); using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); }
{'added': [(231, ' // Also add check that dense rank > 0.'), (232, ' OP_REQUIRES_ASYNC(context, dense_shape_t.NumElements() != 0,'), (233, ' errors::InvalidArgument("Dense shape cannot be empty."),'), (234, ' done);')], 'deleted': [(231, ' // dense_shape. Also add check that dense rank > 0.')]}
4
1
283
2,031
37
279
2
https://github.com/tensorflow/tensorflow
CVE-2021-29565
CWE-476
2,194
interface.c
C
PHP_FUNCTION
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sterling Hughes <sterling@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #define ZEND_INCLUDE_FULL_WINDOWS_HEADERS #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #if HAVE_CURL #include <stdio.h> #include <string.h> #ifdef PHP_WIN32 #include <winsock2.h> #include <sys/types.h> #endif #include <curl/curl.h> #include <curl/easy.h> /* As of curl 7.11.1 this is no longer defined inside curl.h */ #ifndef HttpPost #define HttpPost curl_httppost #endif /* {{{ cruft for thread safe SSL crypto locks */ #if defined(ZTS) && defined(HAVE_CURL_SSL) # ifdef PHP_WIN32 # define PHP_CURL_NEED_OPENSSL_TSL # include <openssl/crypto.h> # else /* !PHP_WIN32 */ # if defined(HAVE_CURL_OPENSSL) # if defined(HAVE_OPENSSL_CRYPTO_H) # define PHP_CURL_NEED_OPENSSL_TSL # include <openssl/crypto.h> # else # warning \ "libcurl was compiled with OpenSSL support, but configure could not find " \ "openssl/crypto.h; thus no SSL crypto locking callbacks will be set, which may " \ "cause random crashes on SSL requests" # endif # elif defined(HAVE_CURL_GNUTLS) # if defined(HAVE_GCRYPT_H) # define PHP_CURL_NEED_GNUTLS_TSL # include <gcrypt.h> # else # warning \ "libcurl was compiled with GnuTLS support, but configure could not find " \ "gcrypt.h; thus no SSL crypto locking callbacks will be set, which may " \ "cause random crashes on SSL requests" # endif # else # warning \ "libcurl was compiled with SSL support, but configure could not determine which" \ "library was used; thus no SSL crypto locking callbacks will be set, which may " \ "cause random crashes on SSL requests" # endif /* HAVE_CURL_OPENSSL || HAVE_CURL_GNUTLS */ # endif /* PHP_WIN32 */ #endif /* ZTS && HAVE_CURL_SSL */ /* }}} */ #define SMART_STR_PREALLOC 4096 #include "zend_smart_str.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/url.h" #include "php_curl.h" int le_curl; int le_curl_multi_handle; int le_curl_share_handle; #ifdef PHP_CURL_NEED_OPENSSL_TSL /* {{{ */ static MUTEX_T *php_curl_openssl_tsl = NULL; static void php_curl_ssl_lock(int mode, int n, const char * file, int line) { if (mode & CRYPTO_LOCK) { tsrm_mutex_lock(php_curl_openssl_tsl[n]); } else { tsrm_mutex_unlock(php_curl_openssl_tsl[n]); } } static unsigned long php_curl_ssl_id(void) { return (unsigned long) tsrm_thread_id(); } #endif /* }}} */ #ifdef PHP_CURL_NEED_GNUTLS_TSL /* {{{ */ static int php_curl_ssl_mutex_create(void **m) { if (*((MUTEX_T *) m) = tsrm_mutex_alloc()) { return SUCCESS; } else { return FAILURE; } } static int php_curl_ssl_mutex_destroy(void **m) { tsrm_mutex_free(*((MUTEX_T *) m)); return SUCCESS; } static int php_curl_ssl_mutex_lock(void **m) { return tsrm_mutex_lock(*((MUTEX_T *) m)); } static int php_curl_ssl_mutex_unlock(void **m) { return tsrm_mutex_unlock(*((MUTEX_T *) m)); } static struct gcry_thread_cbs php_curl_gnutls_tsl = { GCRY_THREAD_OPTION_USER, NULL, php_curl_ssl_mutex_create, php_curl_ssl_mutex_destroy, php_curl_ssl_mutex_lock, php_curl_ssl_mutex_unlock }; #endif /* }}} */ static void _php_curl_close_ex(php_curl *ch); static void _php_curl_close(zend_resource *rsrc); #define SAVE_CURL_ERROR(__handle, __err) (__handle)->err.no = (int) __err; #define CAAL(s, v) add_assoc_long_ex(return_value, s, sizeof(s) - 1, (zend_long) v); #define CAAD(s, v) add_assoc_double_ex(return_value, s, sizeof(s) - 1, (double) v); #define CAAS(s, v) add_assoc_string_ex(return_value, s, sizeof(s) - 1, (char *) (v ? v : "")); #define CAASTR(s, v) add_assoc_str_ex(return_value, s, sizeof(s) - 1, \ v ? zend_string_copy(v) : ZSTR_EMPTY_ALLOC()); #define CAAZ(s, v) add_assoc_zval_ex(return_value, s, sizeof(s) -1 , (zval *) v); #if defined(PHP_WIN32) || defined(__GNUC__) # define php_curl_ret(__ret) RETVAL_FALSE; return __ret; #else # define php_curl_ret(__ret) RETVAL_FALSE; return; #endif static int php_curl_option_str(php_curl *ch, zend_long option, const char *str, const int len, zend_bool make_copy) { CURLcode error = CURLE_OK; if (strlen(str) != len) { php_error_docref(NULL, E_WARNING, "Curl option contains invalid characters (\\0)"); return FAILURE; } #if LIBCURL_VERSION_NUM >= 0x071100 if (make_copy) { #endif char *copystr; /* Strings passed to libcurl as 'char *' arguments, are copied by the library since 7.17.0 */ copystr = estrndup(str, len); error = curl_easy_setopt(ch->cp, option, copystr); zend_llist_add_element(&ch->to_free->str, &copystr); #if LIBCURL_VERSION_NUM >= 0x071100 } else { error = curl_easy_setopt(ch->cp, option, str); } #endif SAVE_CURL_ERROR(ch, error) return error == CURLE_OK ? SUCCESS : FAILURE; } static int php_curl_option_url(php_curl *ch, const char *url, const int len) /* {{{ */ { /* Disable file:// if open_basedir are used */ if (PG(open_basedir) && *PG(open_basedir)) { #if LIBCURL_VERSION_NUM >= 0x071304 curl_easy_setopt(ch->cp, CURLOPT_PROTOCOLS, CURLPROTO_ALL & ~CURLPROTO_FILE); #else php_url *uri; if (!(uri = php_url_parse_ex(url, len))) { php_error_docref(NULL, E_WARNING, "Invalid URL '%s'", url); return FAILURE; } if (uri->scheme && !strncasecmp("file", uri->scheme, sizeof("file"))) { php_error_docref(NULL, E_WARNING, "Protocol 'file' disabled in cURL"); php_url_free(uri); return FAILURE; } php_url_free(uri); #endif } return php_curl_option_str(ch, CURLOPT_URL, url, len, 0); } /* }}} */ void _php_curl_verify_handlers(php_curl *ch, int reporterror) /* {{{ */ { php_stream *stream; ZEND_ASSERT(ch && ch->handlers); if (!Z_ISUNDEF(ch->handlers->std_err)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->std_err, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_STDERR resource has gone away, resetting to stderr"); } zval_ptr_dtor(&ch->handlers->std_err); ZVAL_UNDEF(&ch->handlers->std_err); curl_easy_setopt(ch->cp, CURLOPT_STDERR, stderr); } } if (ch->handlers->read && !Z_ISUNDEF(ch->handlers->read->stream)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->read->stream, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_INFILE resource has gone away, resetting to default"); } zval_ptr_dtor(&ch->handlers->read->stream); ZVAL_UNDEF(&ch->handlers->read->stream); ch->handlers->read->res = NULL; ch->handlers->read->fp = 0; curl_easy_setopt(ch->cp, CURLOPT_INFILE, (void *) ch); } } if (ch->handlers->write_header && !Z_ISUNDEF(ch->handlers->write_header->stream)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->write_header->stream, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_WRITEHEADER resource has gone away, resetting to default"); } zval_ptr_dtor(&ch->handlers->write_header->stream); ZVAL_UNDEF(&ch->handlers->write_header->stream); ch->handlers->write_header->fp = 0; ch->handlers->write_header->method = PHP_CURL_IGNORE; curl_easy_setopt(ch->cp, CURLOPT_WRITEHEADER, (void *) ch); } } if (ch->handlers->write && !Z_ISUNDEF(ch->handlers->write->stream)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->write->stream, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_FILE resource has gone away, resetting to default"); } zval_ptr_dtor(&ch->handlers->write->stream); ZVAL_UNDEF(&ch->handlers->write->stream); ch->handlers->write->fp = 0; ch->handlers->write->method = PHP_CURL_STDOUT; curl_easy_setopt(ch->cp, CURLOPT_FILE, (void *) ch); } } return; } /* }}} */ /* {{{ arginfo */ ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_version, 0, 0, 0) ZEND_ARG_INFO(0, version) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_init, 0, 0, 0) ZEND_ARG_INFO(0, url) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_copy_handle, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_setopt, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, option) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_setopt_array, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_ARRAY_INFO(0, options, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_exec, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_getinfo, 0, 0, 1) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, option) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_error, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_errno, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_close, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_reset, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() #endif #if LIBCURL_VERSION_NUM > 0x070f03 /* 7.15.4 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_escape, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, str) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_unescape, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, str) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_setopt, 0) ZEND_ARG_INFO(0, sh) ZEND_ARG_INFO(0, option) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_init, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_add_handle, 0) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_remove_handle, 0) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_multi_select, 0, 0, 1) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(0, timeout) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_multi_exec, 0, 0, 1) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(1, still_running) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_getcontent, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_multi_info_read, 0, 0, 1) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(1, msgs_in_queue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_close, 0) ZEND_ARG_INFO(0, mh) ZEND_END_ARG_INFO() #if LIBCURL_VERSION_NUM >= 0x070c00 /* Available since 7.12.0 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_strerror, 0) ZEND_ARG_INFO(0, errornum) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_strerror, 0) ZEND_ARG_INFO(0, errornum) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_curl_share_init, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_share_close, 0) ZEND_ARG_INFO(0, sh) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_share_setopt, 0) ZEND_ARG_INFO(0, sh) ZEND_ARG_INFO(0, option) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() #if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_pause, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, bitmask) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_curlfile_create, 0, 0, 1) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, mimetype) ZEND_ARG_INFO(0, postname) ZEND_END_ARG_INFO() /* }}} */ /* {{{ curl_functions[] */ const zend_function_entry curl_functions[] = { PHP_FE(curl_init, arginfo_curl_init) PHP_FE(curl_copy_handle, arginfo_curl_copy_handle) PHP_FE(curl_version, arginfo_curl_version) PHP_FE(curl_setopt, arginfo_curl_setopt) PHP_FE(curl_setopt_array, arginfo_curl_setopt_array) PHP_FE(curl_exec, arginfo_curl_exec) PHP_FE(curl_getinfo, arginfo_curl_getinfo) PHP_FE(curl_error, arginfo_curl_error) PHP_FE(curl_errno, arginfo_curl_errno) PHP_FE(curl_close, arginfo_curl_close) #if LIBCURL_VERSION_NUM >= 0x070c00 /* 7.12.0 */ PHP_FE(curl_strerror, arginfo_curl_strerror) PHP_FE(curl_multi_strerror, arginfo_curl_multi_strerror) #endif #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ PHP_FE(curl_reset, arginfo_curl_reset) #endif #if LIBCURL_VERSION_NUM >= 0x070f04 /* 7.15.4 */ PHP_FE(curl_escape, arginfo_curl_escape) PHP_FE(curl_unescape, arginfo_curl_unescape) #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* 7.18.0 */ PHP_FE(curl_pause, arginfo_curl_pause) #endif PHP_FE(curl_multi_init, arginfo_curl_multi_init) PHP_FE(curl_multi_add_handle, arginfo_curl_multi_add_handle) PHP_FE(curl_multi_remove_handle, arginfo_curl_multi_remove_handle) PHP_FE(curl_multi_select, arginfo_curl_multi_select) PHP_FE(curl_multi_exec, arginfo_curl_multi_exec) PHP_FE(curl_multi_getcontent, arginfo_curl_multi_getcontent) PHP_FE(curl_multi_info_read, arginfo_curl_multi_info_read) PHP_FE(curl_multi_close, arginfo_curl_multi_close) #if LIBCURL_VERSION_NUM >= 0x070f04 /* 7.15.4 */ PHP_FE(curl_multi_setopt, arginfo_curl_multi_setopt) #endif PHP_FE(curl_share_init, arginfo_curl_share_init) PHP_FE(curl_share_close, arginfo_curl_share_close) PHP_FE(curl_share_setopt, arginfo_curl_share_setopt) PHP_FE(curl_file_create, arginfo_curlfile_create) PHP_FE_END }; /* }}} */ /* {{{ curl_module_entry */ zend_module_entry curl_module_entry = { STANDARD_MODULE_HEADER, "curl", curl_functions, PHP_MINIT(curl), PHP_MSHUTDOWN(curl), NULL, NULL, PHP_MINFO(curl), PHP_CURL_VERSION, STANDARD_MODULE_PROPERTIES }; /* }}} */ #ifdef COMPILE_DL_CURL ZEND_GET_MODULE (curl) #endif /* {{{ PHP_INI_BEGIN */ PHP_INI_BEGIN() PHP_INI_ENTRY("curl.cainfo", "", PHP_INI_SYSTEM, NULL) PHP_INI_END() /* }}} */ /* {{{ PHP_MINFO_FUNCTION */ PHP_MINFO_FUNCTION(curl) { curl_version_info_data *d; char **p; char str[1024]; size_t n = 0; d = curl_version_info(CURLVERSION_NOW); php_info_print_table_start(); php_info_print_table_row(2, "cURL support", "enabled"); php_info_print_table_row(2, "cURL Information", d->version); sprintf(str, "%d", d->age); php_info_print_table_row(2, "Age", str); /* To update on each new cURL release using src/main.c in cURL sources */ if (d->features) { struct feat { const char *name; int bitmask; }; unsigned int i; static const struct feat feats[] = { #if LIBCURL_VERSION_NUM >= 0x070a07 /* 7.10.7 */ {"AsynchDNS", CURL_VERSION_ASYNCHDNS}, #endif #if LIBCURL_VERSION_NUM >= 0x070f04 /* 7.15.4 */ {"CharConv", CURL_VERSION_CONV}, #endif #if LIBCURL_VERSION_NUM >= 0x070a06 /* 7.10.6 */ {"Debug", CURL_VERSION_DEBUG}, {"GSS-Negotiate", CURL_VERSION_GSSNEGOTIATE}, #endif #if LIBCURL_VERSION_NUM >= 0x070c00 /* 7.12.0 */ {"IDN", CURL_VERSION_IDN}, #endif {"IPv6", CURL_VERSION_IPV6}, {"krb4", CURL_VERSION_KERBEROS4}, #if LIBCURL_VERSION_NUM >= 0x070b01 /* 7.11.1 */ {"Largefile", CURL_VERSION_LARGEFILE}, #endif {"libz", CURL_VERSION_LIBZ}, #if LIBCURL_VERSION_NUM >= 0x070a06 /* 7.10.6 */ {"NTLM", CURL_VERSION_NTLM}, #endif #if LIBCURL_VERSION_NUM >= 0x071600 /* 7.22.0 */ {"NTLMWB", CURL_VERSION_NTLM_WB}, #endif #if LIBCURL_VERSION_NUM >= 0x070a08 /* 7.10.8 */ {"SPNEGO", CURL_VERSION_SPNEGO}, #endif {"SSL", CURL_VERSION_SSL}, #if LIBCURL_VERSION_NUM >= 0x070d02 /* 7.13.2 */ {"SSPI", CURL_VERSION_SSPI}, #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* 7.21.4 */ {"TLS-SRP", CURL_VERSION_TLSAUTH_SRP}, #endif #if LIBCURL_VERSION_NUM >= 0x072100 /* 7.33.0 */ {"HTTP2", CURL_VERSION_HTTP2}, #endif #if LIBCURL_VERSION_NUM >= 0x072600 /* 7.38.0 */ {"GSSAPI", CURL_VERSION_GSSAPI}, #endif #if LIBCURL_VERSION_NUM >= 0x072800 /* 7.40.0 */ {"KERBEROS5", CURL_VERSION_KERBEROS5}, {"UNIX_SOCKETS", CURL_VERSION_UNIX_SOCKETS}, #endif #if LIBCURL_VERSION_NUM >= 0x072f00 /* 7.47.0 */ {"PSL", CURL_VERSION_PSL}, #endif {NULL, 0} }; php_info_print_table_row(1, "Features"); for(i=0; i<sizeof(feats)/sizeof(feats[0]); i++) { if (feats[i].name) { php_info_print_table_row(2, feats[i].name, d->features & feats[i].bitmask ? "Yes" : "No"); } } } n = 0; p = (char **) d->protocols; while (*p != NULL) { n += sprintf(str + n, "%s%s", *p, *(p + 1) != NULL ? ", " : ""); p++; } php_info_print_table_row(2, "Protocols", str); php_info_print_table_row(2, "Host", d->host); if (d->ssl_version) { php_info_print_table_row(2, "SSL Version", d->ssl_version); } if (d->libz_version) { php_info_print_table_row(2, "ZLib Version", d->libz_version); } #if defined(CURLVERSION_SECOND) && CURLVERSION_NOW >= CURLVERSION_SECOND if (d->ares) { php_info_print_table_row(2, "ZLib Version", d->ares); } #endif #if defined(CURLVERSION_THIRD) && CURLVERSION_NOW >= CURLVERSION_THIRD if (d->libidn) { php_info_print_table_row(2, "libIDN Version", d->libidn); } #endif #if LIBCURL_VERSION_NUM >= 0x071300 if (d->iconv_ver_num) { php_info_print_table_row(2, "IconV Version", d->iconv_ver_num); } if (d->libssh_version) { php_info_print_table_row(2, "libSSH Version", d->libssh_version); } #endif php_info_print_table_end(); } /* }}} */ #define REGISTER_CURL_CONSTANT(__c) REGISTER_LONG_CONSTANT(#__c, __c, CONST_CS | CONST_PERSISTENT) /* {{{ PHP_MINIT_FUNCTION */ PHP_MINIT_FUNCTION(curl) { le_curl = zend_register_list_destructors_ex(_php_curl_close, NULL, "curl", module_number); le_curl_multi_handle = zend_register_list_destructors_ex(_php_curl_multi_close, NULL, "curl_multi", module_number); le_curl_share_handle = zend_register_list_destructors_ex(_php_curl_share_close, NULL, "curl_share", module_number); REGISTER_INI_ENTRIES(); /* See http://curl.haxx.se/lxr/source/docs/libcurl/symbols-in-versions or curl src/docs/libcurl/symbols-in-versions for a (almost) complete list of options and which version they were introduced */ /* Constants for curl_setopt() */ REGISTER_CURL_CONSTANT(CURLOPT_AUTOREFERER); REGISTER_CURL_CONSTANT(CURLOPT_BINARYTRANSFER); REGISTER_CURL_CONSTANT(CURLOPT_BUFFERSIZE); REGISTER_CURL_CONSTANT(CURLOPT_CAINFO); REGISTER_CURL_CONSTANT(CURLOPT_CAPATH); REGISTER_CURL_CONSTANT(CURLOPT_CONNECTTIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_COOKIE); REGISTER_CURL_CONSTANT(CURLOPT_COOKIEFILE); REGISTER_CURL_CONSTANT(CURLOPT_COOKIEJAR); REGISTER_CURL_CONSTANT(CURLOPT_COOKIESESSION); REGISTER_CURL_CONSTANT(CURLOPT_CRLF); REGISTER_CURL_CONSTANT(CURLOPT_CUSTOMREQUEST); REGISTER_CURL_CONSTANT(CURLOPT_DNS_CACHE_TIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_DNS_USE_GLOBAL_CACHE); REGISTER_CURL_CONSTANT(CURLOPT_EGDSOCKET); REGISTER_CURL_CONSTANT(CURLOPT_ENCODING); REGISTER_CURL_CONSTANT(CURLOPT_FAILONERROR); REGISTER_CURL_CONSTANT(CURLOPT_FILE); REGISTER_CURL_CONSTANT(CURLOPT_FILETIME); REGISTER_CURL_CONSTANT(CURLOPT_FOLLOWLOCATION); REGISTER_CURL_CONSTANT(CURLOPT_FORBID_REUSE); REGISTER_CURL_CONSTANT(CURLOPT_FRESH_CONNECT); REGISTER_CURL_CONSTANT(CURLOPT_FTPAPPEND); REGISTER_CURL_CONSTANT(CURLOPT_FTPLISTONLY); REGISTER_CURL_CONSTANT(CURLOPT_FTPPORT); REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_EPRT); REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_EPSV); REGISTER_CURL_CONSTANT(CURLOPT_HEADER); REGISTER_CURL_CONSTANT(CURLOPT_HEADERFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_HTTP200ALIASES); REGISTER_CURL_CONSTANT(CURLOPT_HTTPGET); REGISTER_CURL_CONSTANT(CURLOPT_HTTPHEADER); REGISTER_CURL_CONSTANT(CURLOPT_HTTPPROXYTUNNEL); REGISTER_CURL_CONSTANT(CURLOPT_HTTP_VERSION); REGISTER_CURL_CONSTANT(CURLOPT_INFILE); REGISTER_CURL_CONSTANT(CURLOPT_INFILESIZE); REGISTER_CURL_CONSTANT(CURLOPT_INTERFACE); REGISTER_CURL_CONSTANT(CURLOPT_KRB4LEVEL); REGISTER_CURL_CONSTANT(CURLOPT_LOW_SPEED_LIMIT); REGISTER_CURL_CONSTANT(CURLOPT_LOW_SPEED_TIME); REGISTER_CURL_CONSTANT(CURLOPT_MAXCONNECTS); REGISTER_CURL_CONSTANT(CURLOPT_MAXREDIRS); REGISTER_CURL_CONSTANT(CURLOPT_NETRC); REGISTER_CURL_CONSTANT(CURLOPT_NOBODY); REGISTER_CURL_CONSTANT(CURLOPT_NOPROGRESS); REGISTER_CURL_CONSTANT(CURLOPT_NOSIGNAL); REGISTER_CURL_CONSTANT(CURLOPT_PORT); REGISTER_CURL_CONSTANT(CURLOPT_POST); REGISTER_CURL_CONSTANT(CURLOPT_POSTFIELDS); REGISTER_CURL_CONSTANT(CURLOPT_POSTQUOTE); REGISTER_CURL_CONSTANT(CURLOPT_PREQUOTE); REGISTER_CURL_CONSTANT(CURLOPT_PRIVATE); REGISTER_CURL_CONSTANT(CURLOPT_PROGRESSFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_PROXY); REGISTER_CURL_CONSTANT(CURLOPT_PROXYPORT); REGISTER_CURL_CONSTANT(CURLOPT_PROXYTYPE); REGISTER_CURL_CONSTANT(CURLOPT_PROXYUSERPWD); REGISTER_CURL_CONSTANT(CURLOPT_PUT); REGISTER_CURL_CONSTANT(CURLOPT_QUOTE); REGISTER_CURL_CONSTANT(CURLOPT_RANDOM_FILE); REGISTER_CURL_CONSTANT(CURLOPT_RANGE); REGISTER_CURL_CONSTANT(CURLOPT_READDATA); REGISTER_CURL_CONSTANT(CURLOPT_READFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_REFERER); REGISTER_CURL_CONSTANT(CURLOPT_RESUME_FROM); REGISTER_CURL_CONSTANT(CURLOPT_RETURNTRANSFER); REGISTER_CURL_CONSTANT(CURLOPT_SHARE); REGISTER_CURL_CONSTANT(CURLOPT_SSLCERT); REGISTER_CURL_CONSTANT(CURLOPT_SSLCERTPASSWD); REGISTER_CURL_CONSTANT(CURLOPT_SSLCERTTYPE); REGISTER_CURL_CONSTANT(CURLOPT_SSLENGINE); REGISTER_CURL_CONSTANT(CURLOPT_SSLENGINE_DEFAULT); REGISTER_CURL_CONSTANT(CURLOPT_SSLKEY); REGISTER_CURL_CONSTANT(CURLOPT_SSLKEYPASSWD); REGISTER_CURL_CONSTANT(CURLOPT_SSLKEYTYPE); REGISTER_CURL_CONSTANT(CURLOPT_SSLVERSION); REGISTER_CURL_CONSTANT(CURLOPT_SSL_CIPHER_LIST); REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYHOST); REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYPEER); REGISTER_CURL_CONSTANT(CURLOPT_STDERR); REGISTER_CURL_CONSTANT(CURLOPT_TELNETOPTIONS); REGISTER_CURL_CONSTANT(CURLOPT_TIMECONDITION); REGISTER_CURL_CONSTANT(CURLOPT_TIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_TIMEVALUE); REGISTER_CURL_CONSTANT(CURLOPT_TRANSFERTEXT); REGISTER_CURL_CONSTANT(CURLOPT_UNRESTRICTED_AUTH); REGISTER_CURL_CONSTANT(CURLOPT_UPLOAD); REGISTER_CURL_CONSTANT(CURLOPT_URL); REGISTER_CURL_CONSTANT(CURLOPT_USERAGENT); REGISTER_CURL_CONSTANT(CURLOPT_USERPWD); REGISTER_CURL_CONSTANT(CURLOPT_VERBOSE); REGISTER_CURL_CONSTANT(CURLOPT_WRITEFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_WRITEHEADER); /* */ REGISTER_CURL_CONSTANT(CURLE_ABORTED_BY_CALLBACK); REGISTER_CURL_CONSTANT(CURLE_BAD_CALLING_ORDER); REGISTER_CURL_CONSTANT(CURLE_BAD_CONTENT_ENCODING); REGISTER_CURL_CONSTANT(CURLE_BAD_DOWNLOAD_RESUME); REGISTER_CURL_CONSTANT(CURLE_BAD_FUNCTION_ARGUMENT); REGISTER_CURL_CONSTANT(CURLE_BAD_PASSWORD_ENTERED); REGISTER_CURL_CONSTANT(CURLE_COULDNT_CONNECT); REGISTER_CURL_CONSTANT(CURLE_COULDNT_RESOLVE_HOST); REGISTER_CURL_CONSTANT(CURLE_COULDNT_RESOLVE_PROXY); REGISTER_CURL_CONSTANT(CURLE_FAILED_INIT); REGISTER_CURL_CONSTANT(CURLE_FILE_COULDNT_READ_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_ACCESS_DENIED); REGISTER_CURL_CONSTANT(CURLE_FTP_BAD_DOWNLOAD_RESUME); REGISTER_CURL_CONSTANT(CURLE_FTP_CANT_GET_HOST); REGISTER_CURL_CONSTANT(CURLE_FTP_CANT_RECONNECT); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_GET_SIZE); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_RETR_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_SET_ASCII); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_SET_BINARY); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_STOR_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_USE_REST); REGISTER_CURL_CONSTANT(CURLE_FTP_PARTIAL_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_PORT_FAILED); REGISTER_CURL_CONSTANT(CURLE_FTP_QUOTE_ERROR); REGISTER_CURL_CONSTANT(CURLE_FTP_USER_PASSWORD_INCORRECT); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_227_FORMAT); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_PASS_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_PASV_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_SERVER_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_USER_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WRITE_ERROR); REGISTER_CURL_CONSTANT(CURLE_FUNCTION_NOT_FOUND); REGISTER_CURL_CONSTANT(CURLE_GOT_NOTHING); REGISTER_CURL_CONSTANT(CURLE_HTTP_NOT_FOUND); REGISTER_CURL_CONSTANT(CURLE_HTTP_PORT_FAILED); REGISTER_CURL_CONSTANT(CURLE_HTTP_POST_ERROR); REGISTER_CURL_CONSTANT(CURLE_HTTP_RANGE_ERROR); REGISTER_CURL_CONSTANT(CURLE_HTTP_RETURNED_ERROR); REGISTER_CURL_CONSTANT(CURLE_LDAP_CANNOT_BIND); REGISTER_CURL_CONSTANT(CURLE_LDAP_SEARCH_FAILED); REGISTER_CURL_CONSTANT(CURLE_LIBRARY_NOT_FOUND); REGISTER_CURL_CONSTANT(CURLE_MALFORMAT_USER); REGISTER_CURL_CONSTANT(CURLE_OBSOLETE); REGISTER_CURL_CONSTANT(CURLE_OK); REGISTER_CURL_CONSTANT(CURLE_OPERATION_TIMEDOUT); REGISTER_CURL_CONSTANT(CURLE_OPERATION_TIMEOUTED); REGISTER_CURL_CONSTANT(CURLE_OUT_OF_MEMORY); REGISTER_CURL_CONSTANT(CURLE_PARTIAL_FILE); REGISTER_CURL_CONSTANT(CURLE_READ_ERROR); REGISTER_CURL_CONSTANT(CURLE_RECV_ERROR); REGISTER_CURL_CONSTANT(CURLE_SEND_ERROR); REGISTER_CURL_CONSTANT(CURLE_SHARE_IN_USE); REGISTER_CURL_CONSTANT(CURLE_SSL_CACERT); REGISTER_CURL_CONSTANT(CURLE_SSL_CERTPROBLEM); REGISTER_CURL_CONSTANT(CURLE_SSL_CIPHER); REGISTER_CURL_CONSTANT(CURLE_SSL_CONNECT_ERROR); REGISTER_CURL_CONSTANT(CURLE_SSL_ENGINE_NOTFOUND); REGISTER_CURL_CONSTANT(CURLE_SSL_ENGINE_SETFAILED); REGISTER_CURL_CONSTANT(CURLE_SSL_PEER_CERTIFICATE); REGISTER_CURL_CONSTANT(CURLE_TELNET_OPTION_SYNTAX); REGISTER_CURL_CONSTANT(CURLE_TOO_MANY_REDIRECTS); REGISTER_CURL_CONSTANT(CURLE_UNKNOWN_TELNET_OPTION); REGISTER_CURL_CONSTANT(CURLE_UNSUPPORTED_PROTOCOL); REGISTER_CURL_CONSTANT(CURLE_URL_MALFORMAT); REGISTER_CURL_CONSTANT(CURLE_URL_MALFORMAT_USER); REGISTER_CURL_CONSTANT(CURLE_WRITE_ERROR); /* cURL info constants */ REGISTER_CURL_CONSTANT(CURLINFO_CONNECT_TIME); REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_LENGTH_DOWNLOAD); REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_LENGTH_UPLOAD); REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_TYPE); REGISTER_CURL_CONSTANT(CURLINFO_EFFECTIVE_URL); REGISTER_CURL_CONSTANT(CURLINFO_FILETIME); REGISTER_CURL_CONSTANT(CURLINFO_HEADER_OUT); REGISTER_CURL_CONSTANT(CURLINFO_HEADER_SIZE); REGISTER_CURL_CONSTANT(CURLINFO_HTTP_CODE); REGISTER_CURL_CONSTANT(CURLINFO_LASTONE); REGISTER_CURL_CONSTANT(CURLINFO_NAMELOOKUP_TIME); REGISTER_CURL_CONSTANT(CURLINFO_PRETRANSFER_TIME); REGISTER_CURL_CONSTANT(CURLINFO_PRIVATE); REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_COUNT); REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_TIME); REGISTER_CURL_CONSTANT(CURLINFO_REQUEST_SIZE); REGISTER_CURL_CONSTANT(CURLINFO_SIZE_DOWNLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SIZE_UPLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SPEED_DOWNLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SPEED_UPLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SSL_VERIFYRESULT); REGISTER_CURL_CONSTANT(CURLINFO_STARTTRANSFER_TIME); REGISTER_CURL_CONSTANT(CURLINFO_TOTAL_TIME); /* Other */ REGISTER_CURL_CONSTANT(CURLMSG_DONE); REGISTER_CURL_CONSTANT(CURLVERSION_NOW); /* Curl Multi Constants */ REGISTER_CURL_CONSTANT(CURLM_BAD_EASY_HANDLE); REGISTER_CURL_CONSTANT(CURLM_BAD_HANDLE); REGISTER_CURL_CONSTANT(CURLM_CALL_MULTI_PERFORM); REGISTER_CURL_CONSTANT(CURLM_INTERNAL_ERROR); REGISTER_CURL_CONSTANT(CURLM_OK); REGISTER_CURL_CONSTANT(CURLM_OUT_OF_MEMORY); #if LIBCURL_VERSION_NUM >= 0x072001 /* Available since 7.32.1 */ REGISTER_CURL_CONSTANT(CURLM_ADDED_ALREADY); #endif /* Curl proxy constants */ REGISTER_CURL_CONSTANT(CURLPROXY_HTTP); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS4); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS5); /* Curl Share constants */ REGISTER_CURL_CONSTANT(CURLSHOPT_NONE); REGISTER_CURL_CONSTANT(CURLSHOPT_SHARE); REGISTER_CURL_CONSTANT(CURLSHOPT_UNSHARE); /* Curl Http Version constants (CURLOPT_HTTP_VERSION) */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_1_0); REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_1_1); REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_NONE); /* Curl Lock constants */ REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_COOKIE); REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_DNS); REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_SSL_SESSION); /* Curl NETRC constants (CURLOPT_NETRC) */ REGISTER_CURL_CONSTANT(CURL_NETRC_IGNORED); REGISTER_CURL_CONSTANT(CURL_NETRC_OPTIONAL); REGISTER_CURL_CONSTANT(CURL_NETRC_REQUIRED); /* Curl SSL Version constants (CURLOPT_SSLVERSION) */ REGISTER_CURL_CONSTANT(CURL_SSLVERSION_DEFAULT); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_SSLv2); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_SSLv3); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1); /* Curl TIMECOND constants (CURLOPT_TIMECONDITION) */ REGISTER_CURL_CONSTANT(CURL_TIMECOND_IFMODSINCE); REGISTER_CURL_CONSTANT(CURL_TIMECOND_IFUNMODSINCE); REGISTER_CURL_CONSTANT(CURL_TIMECOND_LASTMOD); REGISTER_CURL_CONSTANT(CURL_TIMECOND_NONE); /* Curl version constants */ REGISTER_CURL_CONSTANT(CURL_VERSION_IPV6); REGISTER_CURL_CONSTANT(CURL_VERSION_KERBEROS4); REGISTER_CURL_CONSTANT(CURL_VERSION_LIBZ); REGISTER_CURL_CONSTANT(CURL_VERSION_SSL); #if LIBCURL_VERSION_NUM >= 0x070a06 /* Available since 7.10.6 */ REGISTER_CURL_CONSTANT(CURLOPT_HTTPAUTH); /* http authentication options */ REGISTER_CURL_CONSTANT(CURLAUTH_ANY); REGISTER_CURL_CONSTANT(CURLAUTH_ANYSAFE); REGISTER_CURL_CONSTANT(CURLAUTH_BASIC); REGISTER_CURL_CONSTANT(CURLAUTH_DIGEST); REGISTER_CURL_CONSTANT(CURLAUTH_GSSNEGOTIATE); REGISTER_CURL_CONSTANT(CURLAUTH_NONE); REGISTER_CURL_CONSTANT(CURLAUTH_NTLM); #endif #if LIBCURL_VERSION_NUM >= 0x070a07 /* Available since 7.10.7 */ REGISTER_CURL_CONSTANT(CURLINFO_HTTP_CONNECTCODE); REGISTER_CURL_CONSTANT(CURLOPT_FTP_CREATE_MISSING_DIRS); REGISTER_CURL_CONSTANT(CURLOPT_PROXYAUTH); #endif #if LIBCURL_VERSION_NUM >= 0x070a08 /* Available since 7.10.8 */ REGISTER_CURL_CONSTANT(CURLE_FILESIZE_EXCEEDED); REGISTER_CURL_CONSTANT(CURLE_LDAP_INVALID_URL); REGISTER_CURL_CONSTANT(CURLINFO_HTTPAUTH_AVAIL); REGISTER_CURL_CONSTANT(CURLINFO_RESPONSE_CODE); REGISTER_CURL_CONSTANT(CURLINFO_PROXYAUTH_AVAIL); REGISTER_CURL_CONSTANT(CURLOPT_FTP_RESPONSE_TIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_IPRESOLVE); REGISTER_CURL_CONSTANT(CURLOPT_MAXFILESIZE); REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_V4); REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_V6); REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_WHATEVER); #endif #if LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */ REGISTER_CURL_CONSTANT(CURLE_FTP_SSL_FAILED); REGISTER_CURL_CONSTANT(CURLFTPSSL_ALL); REGISTER_CURL_CONSTANT(CURLFTPSSL_CONTROL); REGISTER_CURL_CONSTANT(CURLFTPSSL_NONE); REGISTER_CURL_CONSTANT(CURLFTPSSL_TRY); REGISTER_CURL_CONSTANT(CURLOPT_FTP_SSL); REGISTER_CURL_CONSTANT(CURLOPT_NETRC_FILE); #endif #if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */ REGISTER_CURL_CONSTANT(CURLFTPAUTH_DEFAULT); REGISTER_CURL_CONSTANT(CURLFTPAUTH_SSL); REGISTER_CURL_CONSTANT(CURLFTPAUTH_TLS); REGISTER_CURL_CONSTANT(CURLOPT_FTPSSLAUTH); #endif #if LIBCURL_VERSION_NUM >= 0x070d00 /* Available since 7.13.0 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_ACCOUNT); #endif #if LIBCURL_VERSION_NUM >= 0x070b02 /* Available since 7.11.2 */ REGISTER_CURL_CONSTANT(CURLOPT_TCP_NODELAY); #endif #if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */ REGISTER_CURL_CONSTANT(CURLINFO_OS_ERRNO); #endif #if LIBCURL_VERSION_NUM >= 0x070c03 /* Available since 7.12.3 */ REGISTER_CURL_CONSTANT(CURLINFO_NUM_CONNECTS); REGISTER_CURL_CONSTANT(CURLINFO_SSL_ENGINES); #endif #if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */ REGISTER_CURL_CONSTANT(CURLINFO_COOKIELIST); REGISTER_CURL_CONSTANT(CURLOPT_COOKIELIST); REGISTER_CURL_CONSTANT(CURLOPT_IGNORE_CONTENT_LENGTH); #endif #if LIBCURL_VERSION_NUM >= 0x070f00 /* Available since 7.15.0 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_SKIP_PASV_IP); #endif #if LIBCURL_VERSION_NUM >= 0x070f01 /* Available since 7.15.1 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_FILEMETHOD); #endif #if LIBCURL_VERSION_NUM >= 0x070f02 /* Available since 7.15.2 */ REGISTER_CURL_CONSTANT(CURLOPT_CONNECT_ONLY); REGISTER_CURL_CONSTANT(CURLOPT_LOCALPORT); REGISTER_CURL_CONSTANT(CURLOPT_LOCALPORTRANGE); #endif #if LIBCURL_VERSION_NUM >= 0x070f03 /* Available since 7.15.3 */ REGISTER_CURL_CONSTANT(CURLFTPMETHOD_MULTICWD); REGISTER_CURL_CONSTANT(CURLFTPMETHOD_NOCWD); REGISTER_CURL_CONSTANT(CURLFTPMETHOD_SINGLECWD); #endif #if LIBCURL_VERSION_NUM >= 0x070f04 /* Available since 7.15.4 */ REGISTER_CURL_CONSTANT(CURLINFO_FTP_ENTRY_PATH); #endif #if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_ALTERNATIVE_TO_USER); REGISTER_CURL_CONSTANT(CURLOPT_MAX_RECV_SPEED_LARGE); REGISTER_CURL_CONSTANT(CURLOPT_MAX_SEND_SPEED_LARGE); #endif #if LIBCURL_VERSION_NUM >= 0x071000 /* Available since 7.16.0 */ REGISTER_CURL_CONSTANT(CURLE_SSL_CACERT_BADFILE); REGISTER_CURL_CONSTANT(CURLOPT_SSL_SESSIONID_CACHE); REGISTER_CURL_CONSTANT(CURLMOPT_PIPELINING); #endif #if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */ REGISTER_CURL_CONSTANT(CURLE_SSH); REGISTER_CURL_CONSTANT(CURLOPT_FTP_SSL_CCC); REGISTER_CURL_CONSTANT(CURLOPT_SSH_AUTH_TYPES); REGISTER_CURL_CONSTANT(CURLOPT_SSH_PRIVATE_KEYFILE); REGISTER_CURL_CONSTANT(CURLOPT_SSH_PUBLIC_KEYFILE); REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_ACTIVE); REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_NONE); REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_PASSIVE); #endif #if LIBCURL_VERSION_NUM >= 0x071002 /* Available since 7.16.2 */ REGISTER_CURL_CONSTANT(CURLOPT_CONNECTTIMEOUT_MS); REGISTER_CURL_CONSTANT(CURLOPT_HTTP_CONTENT_DECODING); REGISTER_CURL_CONSTANT(CURLOPT_HTTP_TRANSFER_DECODING); REGISTER_CURL_CONSTANT(CURLOPT_TIMEOUT_MS); #endif #if LIBCURL_VERSION_NUM >= 0x071003 /* Available since 7.16.3 */ REGISTER_CURL_CONSTANT(CURLMOPT_MAXCONNECTS); #endif #if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */ REGISTER_CURL_CONSTANT(CURLOPT_KRBLEVEL); REGISTER_CURL_CONSTANT(CURLOPT_NEW_DIRECTORY_PERMS); REGISTER_CURL_CONSTANT(CURLOPT_NEW_FILE_PERMS); #endif #if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */ REGISTER_CURL_CONSTANT(CURLOPT_APPEND); REGISTER_CURL_CONSTANT(CURLOPT_DIRLISTONLY); REGISTER_CURL_CONSTANT(CURLOPT_USE_SSL); /* Curl SSL Constants */ REGISTER_CURL_CONSTANT(CURLUSESSL_ALL); REGISTER_CURL_CONSTANT(CURLUSESSL_CONTROL); REGISTER_CURL_CONSTANT(CURLUSESSL_NONE); REGISTER_CURL_CONSTANT(CURLUSESSL_TRY); #endif #if LIBCURL_VERSION_NUM >= 0x071101 /* Available since 7.17.1 */ REGISTER_CURL_CONSTANT(CURLOPT_SSH_HOST_PUBLIC_KEY_MD5); #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */ REGISTER_CURL_CONSTANT(CURLOPT_PROXY_TRANSFER_MODE); REGISTER_CURL_CONSTANT(CURLPAUSE_ALL); REGISTER_CURL_CONSTANT(CURLPAUSE_CONT); REGISTER_CURL_CONSTANT(CURLPAUSE_RECV); REGISTER_CURL_CONSTANT(CURLPAUSE_RECV_CONT); REGISTER_CURL_CONSTANT(CURLPAUSE_SEND); REGISTER_CURL_CONSTANT(CURLPAUSE_SEND_CONT); REGISTER_CURL_CONSTANT(CURL_READFUNC_PAUSE); REGISTER_CURL_CONSTANT(CURL_WRITEFUNC_PAUSE); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS4A); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS5_HOSTNAME); #endif #if LIBCURL_VERSION_NUM >= 0x071202 /* Available since 7.18.2 */ REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_URL); #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ REGISTER_CURL_CONSTANT(CURLINFO_APPCONNECT_TIME); REGISTER_CURL_CONSTANT(CURLINFO_PRIMARY_IP); REGISTER_CURL_CONSTANT(CURLOPT_ADDRESS_SCOPE); REGISTER_CURL_CONSTANT(CURLOPT_CRLFILE); REGISTER_CURL_CONSTANT(CURLOPT_ISSUERCERT); REGISTER_CURL_CONSTANT(CURLOPT_KEYPASSWD); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_ANY); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_DEFAULT); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_HOST); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_KEYBOARD); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_NONE); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_PASSWORD); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_PUBLICKEY); #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ REGISTER_CURL_CONSTANT(CURLINFO_CERTINFO); REGISTER_CURL_CONSTANT(CURLOPT_CERTINFO); REGISTER_CURL_CONSTANT(CURLOPT_PASSWORD); REGISTER_CURL_CONSTANT(CURLOPT_POSTREDIR); REGISTER_CURL_CONSTANT(CURLOPT_PROXYPASSWORD); REGISTER_CURL_CONSTANT(CURLOPT_PROXYUSERNAME); REGISTER_CURL_CONSTANT(CURLOPT_USERNAME); REGISTER_CURL_CONSTANT(CURL_REDIR_POST_301); REGISTER_CURL_CONSTANT(CURL_REDIR_POST_302); REGISTER_CURL_CONSTANT(CURL_REDIR_POST_ALL); #endif #if LIBCURL_VERSION_NUM >= 0x071303 /* Available since 7.19.3 */ REGISTER_CURL_CONSTANT(CURLAUTH_DIGEST_IE); #endif #if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */ REGISTER_CURL_CONSTANT(CURLINFO_CONDITION_UNMET); REGISTER_CURL_CONSTANT(CURLOPT_NOPROXY); REGISTER_CURL_CONSTANT(CURLOPT_PROTOCOLS); REGISTER_CURL_CONSTANT(CURLOPT_REDIR_PROTOCOLS); REGISTER_CURL_CONSTANT(CURLOPT_SOCKS5_GSSAPI_NEC); REGISTER_CURL_CONSTANT(CURLOPT_SOCKS5_GSSAPI_SERVICE); REGISTER_CURL_CONSTANT(CURLOPT_TFTP_BLKSIZE); REGISTER_CURL_CONSTANT(CURLPROTO_ALL); REGISTER_CURL_CONSTANT(CURLPROTO_DICT); REGISTER_CURL_CONSTANT(CURLPROTO_FILE); REGISTER_CURL_CONSTANT(CURLPROTO_FTP); REGISTER_CURL_CONSTANT(CURLPROTO_FTPS); REGISTER_CURL_CONSTANT(CURLPROTO_HTTP); REGISTER_CURL_CONSTANT(CURLPROTO_HTTPS); REGISTER_CURL_CONSTANT(CURLPROTO_LDAP); REGISTER_CURL_CONSTANT(CURLPROTO_LDAPS); REGISTER_CURL_CONSTANT(CURLPROTO_SCP); REGISTER_CURL_CONSTANT(CURLPROTO_SFTP); REGISTER_CURL_CONSTANT(CURLPROTO_TELNET); REGISTER_CURL_CONSTANT(CURLPROTO_TFTP); REGISTER_CURL_CONSTANT(CURLPROXY_HTTP_1_0); REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR); REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR_NONE); REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR_RETRY); #endif #if LIBCURL_VERSION_NUM >= 0x071306 /* Available since 7.19.6 */ REGISTER_CURL_CONSTANT(CURLOPT_SSH_KNOWNHOSTS); #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ REGISTER_CURL_CONSTANT(CURLINFO_RTSP_CLIENT_CSEQ); REGISTER_CURL_CONSTANT(CURLINFO_RTSP_CSEQ_RECV); REGISTER_CURL_CONSTANT(CURLINFO_RTSP_SERVER_CSEQ); REGISTER_CURL_CONSTANT(CURLINFO_RTSP_SESSION_ID); REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_PRET); REGISTER_CURL_CONSTANT(CURLOPT_MAIL_FROM); REGISTER_CURL_CONSTANT(CURLOPT_MAIL_RCPT); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_CLIENT_CSEQ); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_REQUEST); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_SERVER_CSEQ); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_SESSION_ID); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_STREAM_URI); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_TRANSPORT); REGISTER_CURL_CONSTANT(CURLPROTO_IMAP); REGISTER_CURL_CONSTANT(CURLPROTO_IMAPS); REGISTER_CURL_CONSTANT(CURLPROTO_POP3); REGISTER_CURL_CONSTANT(CURLPROTO_POP3S); REGISTER_CURL_CONSTANT(CURLPROTO_RTSP); REGISTER_CURL_CONSTANT(CURLPROTO_SMTP); REGISTER_CURL_CONSTANT(CURLPROTO_SMTPS); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_ANNOUNCE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_DESCRIBE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_GET_PARAMETER); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_OPTIONS); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_PAUSE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_PLAY); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_RECEIVE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_RECORD); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_SET_PARAMETER); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_SETUP); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_TEARDOWN); #endif #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ REGISTER_CURL_CONSTANT(CURLINFO_LOCAL_IP); REGISTER_CURL_CONSTANT(CURLINFO_LOCAL_PORT); REGISTER_CURL_CONSTANT(CURLINFO_PRIMARY_PORT); REGISTER_CURL_CONSTANT(CURLOPT_FNMATCH_FUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_WILDCARDMATCH); REGISTER_CURL_CONSTANT(CURLPROTO_RTMP); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPE); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPS); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPT); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPTE); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPTS); REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_FAIL); REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_MATCH); REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_NOMATCH); #endif #if LIBCURL_VERSION_NUM >= 0x071502 /* Available since 7.21.2 */ REGISTER_CURL_CONSTANT(CURLPROTO_GOPHER); #endif #if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */ REGISTER_CURL_CONSTANT(CURLAUTH_ONLY); REGISTER_CURL_CONSTANT(CURLOPT_RESOLVE); #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */ REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_PASSWORD); REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_TYPE); REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_USERNAME); REGISTER_CURL_CONSTANT(CURL_TLSAUTH_SRP); #endif #if LIBCURL_VERSION_NUM >= 0x071506 /* Available since 7.21.6 */ REGISTER_CURL_CONSTANT(CURLOPT_ACCEPT_ENCODING); REGISTER_CURL_CONSTANT(CURLOPT_TRANSFER_ENCODING); #endif #if LIBCURL_VERSION_NUM >= 0x071600 /* Available since 7.22.0 */ REGISTER_CURL_CONSTANT(CURLAUTH_NTLM_WB); REGISTER_CURL_CONSTANT(CURLGSSAPI_DELEGATION_FLAG); REGISTER_CURL_CONSTANT(CURLGSSAPI_DELEGATION_POLICY_FLAG); REGISTER_CURL_CONSTANT(CURLOPT_GSSAPI_DELEGATION); #endif #if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */ REGISTER_CURL_CONSTANT(CURLOPT_ACCEPTTIMEOUT_MS); REGISTER_CURL_CONSTANT(CURLOPT_DNS_SERVERS); #endif #if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */ REGISTER_CURL_CONSTANT(CURLOPT_MAIL_AUTH); REGISTER_CURL_CONSTANT(CURLOPT_SSL_OPTIONS); REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPALIVE); REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPIDLE); REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPINTVL); REGISTER_CURL_CONSTANT(CURLSSLOPT_ALLOW_BEAST); #endif #if LIBCURL_VERSION_NUM >= 0x071901 /* Available since 7.25.1 */ REGISTER_CURL_CONSTANT(CURL_REDIR_POST_303); #endif #if LIBCURL_VERSION_NUM >= 0x071c00 /* Available since 7.28.0 */ REGISTER_CURL_CONSTANT(CURLSSH_AUTH_AGENT); #endif #if LIBCURL_VERSION_NUM >= 0x071e00 /* Available since 7.30.0 */ REGISTER_CURL_CONSTANT(CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE); REGISTER_CURL_CONSTANT(CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE); REGISTER_CURL_CONSTANT(CURLMOPT_MAX_HOST_CONNECTIONS); REGISTER_CURL_CONSTANT(CURLMOPT_MAX_PIPELINE_LENGTH); REGISTER_CURL_CONSTANT(CURLMOPT_MAX_TOTAL_CONNECTIONS); #endif #if LIBCURL_VERSION_NUM >= 0x071f00 /* Available since 7.31.0 */ REGISTER_CURL_CONSTANT(CURLOPT_SASL_IR); #endif #if LIBCURL_VERSION_NUM >= 0x072100 /* Available since 7.33.0 */ REGISTER_CURL_CONSTANT(CURLOPT_DNS_INTERFACE); REGISTER_CURL_CONSTANT(CURLOPT_DNS_LOCAL_IP4); REGISTER_CURL_CONSTANT(CURLOPT_DNS_LOCAL_IP6); REGISTER_CURL_CONSTANT(CURLOPT_XOAUTH2_BEARER); REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2_0); REGISTER_CURL_CONSTANT(CURL_VERSION_HTTP2); #endif #if LIBCURL_VERSION_NUM >= 0x072200 /* Available since 7.34.0 */ REGISTER_CURL_CONSTANT(CURLOPT_LOGIN_OPTIONS); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_0); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_1); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_2); #endif #if LIBCURL_VERSION_NUM >= 0x072400 /* Available since 7.36.0 */ REGISTER_CURL_CONSTANT(CURLOPT_EXPECT_100_TIMEOUT_MS); REGISTER_CURL_CONSTANT(CURLOPT_SSL_ENABLE_ALPN); REGISTER_CURL_CONSTANT(CURLOPT_SSL_ENABLE_NPN); #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ REGISTER_CURL_CONSTANT(CURLHEADER_SEPARATE); REGISTER_CURL_CONSTANT(CURLHEADER_UNIFIED); REGISTER_CURL_CONSTANT(CURLOPT_HEADEROPT); REGISTER_CURL_CONSTANT(CURLOPT_PROXYHEADER); #endif #if LIBCURL_VERSION_NUM >= 0x072600 /* Available since 7.38.0 */ REGISTER_CURL_CONSTANT(CURLAUTH_NEGOTIATE); #endif #if LIBCURL_VERSION_NUM >= 0x072700 /* Available since 7.39.0 */ REGISTER_CURL_CONSTANT(CURLOPT_PINNEDPUBLICKEY); #endif #if LIBCURL_VERSION_NUM >= 0x072800 /* Available since 7.40.0 */ REGISTER_CURL_CONSTANT(CURLOPT_UNIX_SOCKET_PATH); REGISTER_CURL_CONSTANT(CURLPROTO_SMB); REGISTER_CURL_CONSTANT(CURLPROTO_SMBS); #endif #if LIBCURL_VERSION_NUM >= 0x072900 /* Available since 7.41.0 */ REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYSTATUS); #endif #if LIBCURL_VERSION_NUM >= 0x072a00 /* Available since 7.42.0 */ REGISTER_CURL_CONSTANT(CURLOPT_PATH_AS_IS); REGISTER_CURL_CONSTANT(CURLOPT_SSL_FALSESTART); #endif #if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2); REGISTER_CURL_CONSTANT(CURLOPT_PIPEWAIT); REGISTER_CURL_CONSTANT(CURLOPT_PROXY_SERVICE_NAME); REGISTER_CURL_CONSTANT(CURLOPT_SERVICE_NAME); REGISTER_CURL_CONSTANT(CURLPIPE_NOTHING); REGISTER_CURL_CONSTANT(CURLPIPE_HTTP1); REGISTER_CURL_CONSTANT(CURLPIPE_MULTIPLEX); #endif #if LIBCURL_VERSION_NUM >= 0x072c00 /* Available since 7.44.0 */ REGISTER_CURL_CONSTANT(CURLSSLOPT_NO_REVOKE); #endif #if LIBCURL_VERSION_NUM >= 0x072d00 /* Available since 7.45.0 */ REGISTER_CURL_CONSTANT(CURLOPT_DEFAULT_PROTOCOL); #endif #if LIBCURL_VERSION_NUM >= 0x072e00 /* Available since 7.46.0 */ REGISTER_CURL_CONSTANT(CURLOPT_STREAM_WEIGHT); #endif #if LIBCURL_VERSION_NUM >= 0x072f00 /* Available since 7.47.0 */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2TLS); #endif #if LIBCURL_VERSION_NUM >= 0x073000 /* Available since 7.48.0 */ REGISTER_CURL_CONSTANT(CURLOPT_TFTP_NO_OPTIONS); #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE); REGISTER_CURL_CONSTANT(CURLOPT_CONNECT_TO); REGISTER_CURL_CONSTANT(CURLOPT_TCP_FASTOPEN); #endif #if CURLOPT_FTPASCII != 0 REGISTER_CURL_CONSTANT(CURLOPT_FTPASCII); #endif #if CURLOPT_MUTE != 0 REGISTER_CURL_CONSTANT(CURLOPT_MUTE); #endif #if CURLOPT_PASSWDFUNCTION != 0 REGISTER_CURL_CONSTANT(CURLOPT_PASSWDFUNCTION); #endif REGISTER_CURL_CONSTANT(CURLOPT_SAFE_UPLOAD); #ifdef PHP_CURL_NEED_OPENSSL_TSL if (!CRYPTO_get_id_callback()) { int i, c = CRYPTO_num_locks(); php_curl_openssl_tsl = malloc(c * sizeof(MUTEX_T)); if (!php_curl_openssl_tsl) { return FAILURE; } for (i = 0; i < c; ++i) { php_curl_openssl_tsl[i] = tsrm_mutex_alloc(); } CRYPTO_set_id_callback(php_curl_ssl_id); CRYPTO_set_locking_callback(php_curl_ssl_lock); } #endif #ifdef PHP_CURL_NEED_GNUTLS_TSL gcry_control(GCRYCTL_SET_THREAD_CBS, &php_curl_gnutls_tsl); #endif if (curl_global_init(CURL_GLOBAL_DEFAULT) != CURLE_OK) { return FAILURE; } curlfile_register_class(); return SUCCESS; } /* }}} */ /* {{{ PHP_MSHUTDOWN_FUNCTION */ PHP_MSHUTDOWN_FUNCTION(curl) { curl_global_cleanup(); #ifdef PHP_CURL_NEED_OPENSSL_TSL if (php_curl_openssl_tsl) { int i, c = CRYPTO_num_locks(); CRYPTO_set_id_callback(NULL); CRYPTO_set_locking_callback(NULL); for (i = 0; i < c; ++i) { tsrm_mutex_free(php_curl_openssl_tsl[i]); } free(php_curl_openssl_tsl); php_curl_openssl_tsl = NULL; } #endif UNREGISTER_INI_ENTRIES(); return SUCCESS; } /* }}} */ /* {{{ curl_write_nothing * Used as a work around. See _php_curl_close_ex */ static size_t curl_write_nothing(char *data, size_t size, size_t nmemb, void *ctx) { return size * nmemb; } /* }}} */ /* {{{ curl_write */ static size_t curl_write(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *) ctx; php_curl_write *t = ch->handlers->write; size_t length = size * nmemb; #if PHP_CURL_DEBUG fprintf(stderr, "curl_write() called\n"); fprintf(stderr, "data = %s, size = %d, nmemb = %d, ctx = %x\n", data, size, nmemb, ctx); #endif switch (t->method) { case PHP_CURL_STDOUT: PHPWRITE(data, length); break; case PHP_CURL_FILE: return fwrite(data, size, nmemb, t->fp); case PHP_CURL_RETURN: if (length > 0) { smart_str_appendl(&t->buf, data, (int) length); } break; case PHP_CURL_USER: { zval argv[2]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRINGL(&argv[1], data, length); fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.object = NULL; ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.retval = &retval; fci.param_count = 2; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_WRITEFUNCTION"); length = -1; } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); length = zval_get_long(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); break; } } return length; } /* }}} */ #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ /* {{{ curl_fnmatch */ static int curl_fnmatch(void *ctx, const char *pattern, const char *string) { php_curl *ch = (php_curl *) ctx; php_curl_fnmatch *t = ch->handlers->fnmatch; int rval = CURL_FNMATCHFUNC_FAIL; switch (t->method) { case PHP_CURL_USER: { zval argv[3]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRING(&argv[1], pattern); ZVAL_STRING(&argv[2], string); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.object = NULL; fci.retval = &retval; fci.param_count = 3; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Cannot call the CURLOPT_FNMATCH_FUNCTION"); } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); rval = zval_get_long(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); break; } } return rval; } /* }}} */ #endif /* {{{ curl_progress */ static size_t curl_progress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow) { php_curl *ch = (php_curl *)clientp; php_curl_progress *t = ch->handlers->progress; size_t rval = 0; #if PHP_CURL_DEBUG fprintf(stderr, "curl_progress() called\n"); fprintf(stderr, "clientp = %x, dltotal = %f, dlnow = %f, ultotal = %f, ulnow = %f\n", clientp, dltotal, dlnow, ultotal, ulnow); #endif switch (t->method) { case PHP_CURL_USER: { zval argv[5]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_LONG(&argv[1], (zend_long)dltotal); ZVAL_LONG(&argv[2], (zend_long)dlnow); ZVAL_LONG(&argv[3], (zend_long)ultotal); ZVAL_LONG(&argv[4], (zend_long)ulnow); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.object = NULL; fci.retval = &retval; fci.param_count = 5; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Cannot call the CURLOPT_PROGRESSFUNCTION"); } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); if (0 != zval_get_long(&retval)) { rval = 1; } } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); zval_ptr_dtor(&argv[3]); zval_ptr_dtor(&argv[4]); break; } } return rval; } /* }}} */ /* {{{ curl_read */ static size_t curl_read(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *)ctx; php_curl_read *t = ch->handlers->read; int length = 0; switch (t->method) { case PHP_CURL_DIRECT: if (t->fp) { length = fread(data, size, nmemb, t->fp); } break; case PHP_CURL_USER: { zval argv[3]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); if (t->res) { ZVAL_RES(&argv[1], t->res); Z_ADDREF(argv[1]); } else { ZVAL_NULL(&argv[1]); } ZVAL_LONG(&argv[2], (int)size * nmemb); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.object = NULL; fci.retval = &retval; fci.param_count = 3; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Cannot call the CURLOPT_READFUNCTION"); #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ length = CURL_READFUNC_ABORT; #endif } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); if (Z_TYPE(retval) == IS_STRING) { length = MIN((int) (size * nmemb), Z_STRLEN(retval)); memcpy(data, Z_STRVAL(retval), length); } zval_ptr_dtor(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); break; } } return length; } /* }}} */ /* {{{ curl_write_header */ static size_t curl_write_header(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *) ctx; php_curl_write *t = ch->handlers->write_header; size_t length = size * nmemb; switch (t->method) { case PHP_CURL_STDOUT: /* Handle special case write when we're returning the entire transfer */ if (ch->handlers->write->method == PHP_CURL_RETURN && length > 0) { smart_str_appendl(&ch->handlers->write->buf, data, (int) length); } else { PHPWRITE(data, length); } break; case PHP_CURL_FILE: return fwrite(data, size, nmemb, t->fp); case PHP_CURL_USER: { zval argv[2]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRINGL(&argv[1], data, length); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.symbol_table = NULL; fci.object = NULL; fci.retval = &retval; fci.param_count = 2; fci.params = argv; fci.no_separation = 0; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_HEADERFUNCTION"); length = -1; } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); length = zval_get_long(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); break; } case PHP_CURL_IGNORE: return length; default: return -1; } return length; } /* }}} */ static int curl_debug(CURL *cp, curl_infotype type, char *buf, size_t buf_len, void *ctx) /* {{{ */ { php_curl *ch = (php_curl *)ctx; if (type == CURLINFO_HEADER_OUT) { if (ch->header.str) { zend_string_release(ch->header.str); } if (buf_len > 0) { ch->header.str = zend_string_init(buf, buf_len, 0); } } return 0; } /* }}} */ #if CURLOPT_PASSWDFUNCTION != 0 /* {{{ curl_passwd */ static size_t curl_passwd(void *ctx, char *prompt, char *buf, int buflen) { php_curl *ch = (php_curl *) ctx; zval *func = &ch->handlers->passwd; zval argv[3]; zval retval; int error; int ret = -1; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRING(&argv[1], prompt); ZVAL_LONG(&argv[2], buflen); error = call_user_function(EG(function_table), NULL, func, &retval, 2, argv); if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_PASSWDFUNCTION"); } else if (Z_TYPE(retval) == IS_STRING) { if (Z_STRLEN(retval) > buflen) { php_error_docref(NULL, E_WARNING, "Returned password is too long for libcurl to handle"); } else { memcpy(buf, Z_STRVAL(retval), Z_STRLEN(retval) + 1); } } else { php_error_docref(NULL, E_WARNING, "User handler '%s' did not return a string", Z_STRVAL_P(func)); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); zval_ptr_dtor(&retval); return ret; } /* }}} */ #endif /* {{{ curl_free_string */ static void curl_free_string(void **string) { efree((char *)*string); } /* }}} */ /* {{{ curl_free_post */ static void curl_free_post(void **post) { curl_formfree((struct HttpPost *)*post); } /* }}} */ /* {{{ curl_free_slist */ static void curl_free_slist(zval *el) { curl_slist_free_all(((struct curl_slist *)Z_PTR_P(el))); } /* }}} */ /* {{{ proto array curl_version([int version]) Return cURL version information. */ PHP_FUNCTION(curl_version) { curl_version_info_data *d; zend_long uversion = CURLVERSION_NOW; if (zend_parse_parameters(ZEND_NUM_ARGS(), "|l", &uversion) == FAILURE) { return; } d = curl_version_info(uversion); if (d == NULL) { RETURN_FALSE; } array_init(return_value); CAAL("version_number", d->version_num); CAAL("age", d->age); CAAL("features", d->features); CAAL("ssl_version_number", d->ssl_version_num); CAAS("version", d->version); CAAS("host", d->host); CAAS("ssl_version", d->ssl_version); CAAS("libz_version", d->libz_version); /* Add an array of protocols */ { char **p = (char **) d->protocols; zval protocol_list; array_init(&protocol_list); while (*p != NULL) { add_next_index_string(&protocol_list, *p); p++; } CAAZ("protocols", &protocol_list); } } /* }}} */ /* {{{ alloc_curl_handle */ static php_curl *alloc_curl_handle() { php_curl *ch = ecalloc(1, sizeof(php_curl)); ch->to_free = ecalloc(1, sizeof(struct _php_curl_free)); ch->handlers = ecalloc(1, sizeof(php_curl_handlers)); ch->handlers->write = ecalloc(1, sizeof(php_curl_write)); ch->handlers->write_header = ecalloc(1, sizeof(php_curl_write)); ch->handlers->read = ecalloc(1, sizeof(php_curl_read)); ch->handlers->progress = NULL; #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ ch->handlers->fnmatch = NULL; #endif ch->clone = emalloc(sizeof(uint32_t)); *ch->clone = 1; memset(&ch->err, 0, sizeof(struct _php_curl_error)); zend_llist_init(&ch->to_free->str, sizeof(char *), (llist_dtor_func_t)curl_free_string, 0); zend_llist_init(&ch->to_free->post, sizeof(struct HttpPost *), (llist_dtor_func_t)curl_free_post, 0); ch->to_free->slist = emalloc(sizeof(HashTable)); zend_hash_init(ch->to_free->slist, 4, NULL, curl_free_slist, 0); return ch; } /* }}} */ #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ /* {{{ create_certinfo */ static void create_certinfo(struct curl_certinfo *ci, zval *listcode) { int i; if (ci) { zval certhash; for (i=0; i<ci->num_of_certs; i++) { struct curl_slist *slist; array_init(&certhash); for (slist = ci->certinfo[i]; slist; slist = slist->next) { int len; char s[64]; char *tmp; strncpy(s, slist->data, 64); tmp = memchr(s, ':', 64); if(tmp) { *tmp = '\0'; len = strlen(s); add_assoc_string(&certhash, s, &slist->data[len+1]); } else { php_error_docref(NULL, E_WARNING, "Could not extract hash key from certificate info"); } } add_next_index_zval(listcode, &certhash); } } } /* }}} */ #endif /* {{{ _php_curl_set_default_options() Set default options for a handle */ static void _php_curl_set_default_options(php_curl *ch) { char *cainfo; curl_easy_setopt(ch->cp, CURLOPT_NOPROGRESS, 1); curl_easy_setopt(ch->cp, CURLOPT_VERBOSE, 0); curl_easy_setopt(ch->cp, CURLOPT_ERRORBUFFER, ch->err.str); curl_easy_setopt(ch->cp, CURLOPT_WRITEFUNCTION, curl_write); curl_easy_setopt(ch->cp, CURLOPT_FILE, (void *) ch); curl_easy_setopt(ch->cp, CURLOPT_READFUNCTION, curl_read); curl_easy_setopt(ch->cp, CURLOPT_INFILE, (void *) ch); curl_easy_setopt(ch->cp, CURLOPT_HEADERFUNCTION, curl_write_header); curl_easy_setopt(ch->cp, CURLOPT_WRITEHEADER, (void *) ch); #if !defined(ZTS) curl_easy_setopt(ch->cp, CURLOPT_DNS_USE_GLOBAL_CACHE, 1); #endif curl_easy_setopt(ch->cp, CURLOPT_DNS_CACHE_TIMEOUT, 120); curl_easy_setopt(ch->cp, CURLOPT_MAXREDIRS, 20); /* prevent infinite redirects */ cainfo = INI_STR("openssl.cafile"); if (!(cainfo && cainfo[0] != '\0')) { cainfo = INI_STR("curl.cainfo"); } if (cainfo && cainfo[0] != '\0') { curl_easy_setopt(ch->cp, CURLOPT_CAINFO, cainfo); } #if defined(ZTS) curl_easy_setopt(ch->cp, CURLOPT_NOSIGNAL, 1); #endif } /* }}} */ /* {{{ proto resource curl_init([string url]) Initialize a cURL session */ PHP_FUNCTION(curl_init) { php_curl *ch; CURL *cp; char *url = NULL; size_t url_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "|s", &url, &url_len) == FAILURE) { return; } cp = curl_easy_init(); if (!cp) { php_error_docref(NULL, E_WARNING, "Could not initialize a new cURL handle"); RETURN_FALSE; } ch = alloc_curl_handle(); ch->cp = cp; ch->handlers->write->method = PHP_CURL_STDOUT; ch->handlers->read->method = PHP_CURL_DIRECT; ch->handlers->write_header->method = PHP_CURL_IGNORE; _php_curl_set_default_options(ch); if (url) { if (php_curl_option_url(ch, url, url_len) == FAILURE) { _php_curl_close_ex(ch); RETURN_FALSE; } } ZVAL_RES(return_value, zend_register_resource(ch, le_curl)); ch->res = Z_RES_P(return_value); } /* }}} */ /* {{{ proto resource curl_copy_handle(resource ch) Copy a cURL handle along with all of it's preferences */ PHP_FUNCTION(curl_copy_handle) { CURL *cp; zval *zid; php_curl *ch, *dupch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } cp = curl_easy_duphandle(ch->cp); if (!cp) { php_error_docref(NULL, E_WARNING, "Cannot duplicate cURL handle"); RETURN_FALSE; } dupch = alloc_curl_handle(); dupch->cp = cp; Z_ADDREF_P(zid); if (!Z_ISUNDEF(ch->handlers->write->stream)) { Z_ADDREF(ch->handlers->write->stream); } dupch->handlers->write->stream = ch->handlers->write->stream; dupch->handlers->write->method = ch->handlers->write->method; if (!Z_ISUNDEF(ch->handlers->read->stream)) { Z_ADDREF(ch->handlers->read->stream); } dupch->handlers->read->stream = ch->handlers->read->stream; dupch->handlers->read->method = ch->handlers->read->method; dupch->handlers->write_header->method = ch->handlers->write_header->method; if (!Z_ISUNDEF(ch->handlers->write_header->stream)) { Z_ADDREF(ch->handlers->write_header->stream); } dupch->handlers->write_header->stream = ch->handlers->write_header->stream; dupch->handlers->write->fp = ch->handlers->write->fp; dupch->handlers->write_header->fp = ch->handlers->write_header->fp; dupch->handlers->read->fp = ch->handlers->read->fp; dupch->handlers->read->res = ch->handlers->read->res; #if CURLOPT_PASSWDDATA != 0 if (!Z_ISUNDEF(ch->handlers->passwd)) { ZVAL_COPY(&dupch->handlers->passwd, &ch->handlers->passwd); curl_easy_setopt(ch->cp, CURLOPT_PASSWDDATA, (void *) dupch); } #endif if (!Z_ISUNDEF(ch->handlers->write->func_name)) { ZVAL_COPY(&dupch->handlers->write->func_name, &ch->handlers->write->func_name); } if (!Z_ISUNDEF(ch->handlers->read->func_name)) { ZVAL_COPY(&dupch->handlers->read->func_name, &ch->handlers->read->func_name); } if (!Z_ISUNDEF(ch->handlers->write_header->func_name)) { ZVAL_COPY(&dupch->handlers->write_header->func_name, &ch->handlers->write_header->func_name); } curl_easy_setopt(dupch->cp, CURLOPT_ERRORBUFFER, dupch->err.str); curl_easy_setopt(dupch->cp, CURLOPT_FILE, (void *) dupch); curl_easy_setopt(dupch->cp, CURLOPT_INFILE, (void *) dupch); curl_easy_setopt(dupch->cp, CURLOPT_WRITEHEADER, (void *) dupch); if (ch->handlers->progress) { dupch->handlers->progress = ecalloc(1, sizeof(php_curl_progress)); if (!Z_ISUNDEF(ch->handlers->progress->func_name)) { ZVAL_COPY(&dupch->handlers->progress->func_name, &ch->handlers->progress->func_name); } dupch->handlers->progress->method = ch->handlers->progress->method; curl_easy_setopt(dupch->cp, CURLOPT_PROGRESSDATA, (void *) dupch); } /* Available since 7.21.0 */ #if LIBCURL_VERSION_NUM >= 0x071500 if (ch->handlers->fnmatch) { dupch->handlers->fnmatch = ecalloc(1, sizeof(php_curl_fnmatch)); if (!Z_ISUNDEF(ch->handlers->fnmatch->func_name)) { ZVAL_COPY(&dupch->handlers->fnmatch->func_name, &ch->handlers->fnmatch->func_name); } dupch->handlers->fnmatch->method = ch->handlers->fnmatch->method; curl_easy_setopt(dupch->cp, CURLOPT_FNMATCH_DATA, (void *) dupch); } #endif efree(dupch->to_free->slist); efree(dupch->to_free); dupch->to_free = ch->to_free; efree(dupch->clone); dupch->clone = ch->clone; /* Keep track of cloned copies to avoid invoking curl destructors for every clone */ (*ch->clone)++; ZVAL_RES(return_value, zend_register_resource(dupch, le_curl)); dupch->res = Z_RES_P(return_value); } /* }}} */ static int _php_curl_setopt(php_curl *ch, zend_long option, zval *zvalue) /* {{{ */ { CURLcode error = CURLE_OK; zend_long lval; ZVAL_DEREF(zvalue); switch (option) { /* Long options */ case CURLOPT_SSL_VERIFYHOST: lval = zval_get_long(zvalue); if (lval == 1) { #if LIBCURL_VERSION_NUM <= 0x071c00 /* 7.28.0 */ php_error_docref(NULL, E_NOTICE, "CURLOPT_SSL_VERIFYHOST with value 1 is deprecated and will be removed as of libcurl 7.28.1. It is recommended to use value 2 instead"); #else php_error_docref(NULL, E_NOTICE, "CURLOPT_SSL_VERIFYHOST no longer accepts the value 1, value 2 will be used instead"); error = curl_easy_setopt(ch->cp, option, 2); break; #endif } case CURLOPT_AUTOREFERER: case CURLOPT_BUFFERSIZE: case CURLOPT_CONNECTTIMEOUT: case CURLOPT_COOKIESESSION: case CURLOPT_CRLF: case CURLOPT_DNS_CACHE_TIMEOUT: case CURLOPT_DNS_USE_GLOBAL_CACHE: case CURLOPT_FAILONERROR: case CURLOPT_FILETIME: case CURLOPT_FORBID_REUSE: case CURLOPT_FRESH_CONNECT: case CURLOPT_FTP_USE_EPRT: case CURLOPT_FTP_USE_EPSV: case CURLOPT_HEADER: case CURLOPT_HTTPGET: case CURLOPT_HTTPPROXYTUNNEL: case CURLOPT_HTTP_VERSION: case CURLOPT_INFILESIZE: case CURLOPT_LOW_SPEED_LIMIT: case CURLOPT_LOW_SPEED_TIME: case CURLOPT_MAXCONNECTS: case CURLOPT_MAXREDIRS: case CURLOPT_NETRC: case CURLOPT_NOBODY: case CURLOPT_NOPROGRESS: case CURLOPT_NOSIGNAL: case CURLOPT_PORT: case CURLOPT_POST: case CURLOPT_PROXYPORT: case CURLOPT_PROXYTYPE: case CURLOPT_PUT: case CURLOPT_RESUME_FROM: case CURLOPT_SSLVERSION: case CURLOPT_SSL_VERIFYPEER: case CURLOPT_TIMECONDITION: case CURLOPT_TIMEOUT: case CURLOPT_TIMEVALUE: case CURLOPT_TRANSFERTEXT: case CURLOPT_UNRESTRICTED_AUTH: case CURLOPT_UPLOAD: case CURLOPT_VERBOSE: #if LIBCURL_VERSION_NUM >= 0x070a06 /* Available since 7.10.6 */ case CURLOPT_HTTPAUTH: #endif #if LIBCURL_VERSION_NUM >= 0x070a07 /* Available since 7.10.7 */ case CURLOPT_FTP_CREATE_MISSING_DIRS: case CURLOPT_PROXYAUTH: #endif #if LIBCURL_VERSION_NUM >= 0x070a08 /* Available since 7.10.8 */ case CURLOPT_FTP_RESPONSE_TIMEOUT: case CURLOPT_IPRESOLVE: case CURLOPT_MAXFILESIZE: #endif #if LIBCURL_VERSION_NUM >= 0x070b02 /* Available since 7.11.2 */ case CURLOPT_TCP_NODELAY: #endif #if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */ case CURLOPT_FTPSSLAUTH: #endif #if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */ case CURLOPT_IGNORE_CONTENT_LENGTH: #endif #if LIBCURL_VERSION_NUM >= 0x070f00 /* Available since 7.15.0 */ case CURLOPT_FTP_SKIP_PASV_IP: #endif #if LIBCURL_VERSION_NUM >= 0x070f01 /* Available since 7.15.1 */ case CURLOPT_FTP_FILEMETHOD: #endif #if LIBCURL_VERSION_NUM >= 0x070f02 /* Available since 7.15.2 */ case CURLOPT_CONNECT_ONLY: case CURLOPT_LOCALPORT: case CURLOPT_LOCALPORTRANGE: #endif #if LIBCURL_VERSION_NUM >= 0x071000 /* Available since 7.16.0 */ case CURLOPT_SSL_SESSIONID_CACHE: #endif #if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */ case CURLOPT_FTP_SSL_CCC: case CURLOPT_SSH_AUTH_TYPES: #endif #if LIBCURL_VERSION_NUM >= 0x071002 /* Available since 7.16.2 */ case CURLOPT_CONNECTTIMEOUT_MS: case CURLOPT_HTTP_CONTENT_DECODING: case CURLOPT_HTTP_TRANSFER_DECODING: case CURLOPT_TIMEOUT_MS: #endif #if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */ case CURLOPT_NEW_DIRECTORY_PERMS: case CURLOPT_NEW_FILE_PERMS: #endif #if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */ case CURLOPT_USE_SSL: #elif LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */ case CURLOPT_FTP_SSL: #endif #if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */ case CURLOPT_APPEND: case CURLOPT_DIRLISTONLY: #else case CURLOPT_FTPAPPEND: case CURLOPT_FTPLISTONLY: #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */ case CURLOPT_PROXY_TRANSFER_MODE: #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ case CURLOPT_ADDRESS_SCOPE: #endif #if LIBCURL_VERSION_NUM > 0x071301 /* Available since 7.19.1 */ case CURLOPT_CERTINFO: #endif #if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */ case CURLOPT_PROTOCOLS: case CURLOPT_REDIR_PROTOCOLS: case CURLOPT_SOCKS5_GSSAPI_NEC: case CURLOPT_TFTP_BLKSIZE: #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_FTP_USE_PRET: case CURLOPT_RTSP_CLIENT_CSEQ: case CURLOPT_RTSP_REQUEST: case CURLOPT_RTSP_SERVER_CSEQ: #endif #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ case CURLOPT_WILDCARDMATCH: #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */ case CURLOPT_TLSAUTH_TYPE: #endif #if LIBCURL_VERSION_NUM >= 0x071600 /* Available since 7.22.0 */ case CURLOPT_GSSAPI_DELEGATION: #endif #if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */ case CURLOPT_ACCEPTTIMEOUT_MS: #endif #if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */ case CURLOPT_SSL_OPTIONS: case CURLOPT_TCP_KEEPALIVE: case CURLOPT_TCP_KEEPIDLE: case CURLOPT_TCP_KEEPINTVL: #endif #if LIBCURL_VERSION_NUM >= 0x071f00 /* Available since 7.31.0 */ case CURLOPT_SASL_IR: #endif #if LIBCURL_VERSION_NUM >= 0x072400 /* Available since 7.36.0 */ case CURLOPT_EXPECT_100_TIMEOUT_MS: case CURLOPT_SSL_ENABLE_ALPN: case CURLOPT_SSL_ENABLE_NPN: #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ case CURLOPT_HEADEROPT: #endif #if LIBCURL_VERSION_NUM >= 0x072900 /* Available since 7.41.0 */ case CURLOPT_SSL_VERIFYSTATUS: #endif #if LIBCURL_VERSION_NUM >= 0x072a00 /* Available since 7.42.0 */ case CURLOPT_PATH_AS_IS: case CURLOPT_SSL_FALSESTART: #endif #if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */ case CURLOPT_PIPEWAIT: #endif #if LIBCURL_VERSION_NUM >= 0x072e00 /* Available since 7.46.0 */ case CURLOPT_STREAM_WEIGHT: #endif #if LIBCURL_VERSION_NUM >= 0x073000 /* Available since 7.48.0 */ case CURLOPT_TFTP_NO_OPTIONS: #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ case CURLOPT_TCP_FASTOPEN: #endif #if CURLOPT_MUTE != 0 case CURLOPT_MUTE: #endif lval = zval_get_long(zvalue); #if LIBCURL_VERSION_NUM >= 0x71304 if ((option == CURLOPT_PROTOCOLS || option == CURLOPT_REDIR_PROTOCOLS) && (PG(open_basedir) && *PG(open_basedir)) && (lval & CURLPROTO_FILE)) { php_error_docref(NULL, E_WARNING, "CURLPROTO_FILE cannot be activated when an open_basedir is set"); return 1; } #endif # if defined(ZTS) if (option == CURLOPT_DNS_USE_GLOBAL_CACHE) { php_error_docref(NULL, E_WARNING, "CURLOPT_DNS_USE_GLOBAL_CACHE cannot be activated when thread safety is enabled"); return 1; } # endif error = curl_easy_setopt(ch->cp, option, lval); break; case CURLOPT_SAFE_UPLOAD: lval = zval_get_long(zvalue); if (lval == 0) { php_error_docref(NULL, E_WARNING, "Disabling safe uploads is no longer supported"); return FAILURE; } break; /* String options */ case CURLOPT_CAINFO: case CURLOPT_CAPATH: case CURLOPT_COOKIE: case CURLOPT_EGDSOCKET: case CURLOPT_INTERFACE: case CURLOPT_PROXY: case CURLOPT_PROXYUSERPWD: case CURLOPT_REFERER: case CURLOPT_SSLCERTTYPE: case CURLOPT_SSLENGINE: case CURLOPT_SSLENGINE_DEFAULT: case CURLOPT_SSLKEY: case CURLOPT_SSLKEYPASSWD: case CURLOPT_SSLKEYTYPE: case CURLOPT_SSL_CIPHER_LIST: case CURLOPT_USERAGENT: case CURLOPT_USERPWD: #if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */ case CURLOPT_COOKIELIST: #endif #if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */ case CURLOPT_FTP_ALTERNATIVE_TO_USER: #endif #if LIBCURL_VERSION_NUM >= 0x071101 /* Available since 7.17.1 */ case CURLOPT_SSH_HOST_PUBLIC_KEY_MD5: #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ case CURLOPT_PASSWORD: case CURLOPT_PROXYPASSWORD: case CURLOPT_PROXYUSERNAME: case CURLOPT_USERNAME: #endif #if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */ case CURLOPT_NOPROXY: case CURLOPT_SOCKS5_GSSAPI_SERVICE: #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_MAIL_FROM: case CURLOPT_RTSP_STREAM_URI: case CURLOPT_RTSP_TRANSPORT: #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */ case CURLOPT_TLSAUTH_PASSWORD: case CURLOPT_TLSAUTH_USERNAME: #endif #if LIBCURL_VERSION_NUM >= 0x071506 /* Available since 7.21.6 */ case CURLOPT_ACCEPT_ENCODING: case CURLOPT_TRANSFER_ENCODING: #else case CURLOPT_ENCODING: #endif #if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */ case CURLOPT_DNS_SERVERS: #endif #if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */ case CURLOPT_MAIL_AUTH: #endif #if LIBCURL_VERSION_NUM >= 0x072200 /* Available since 7.34.0 */ case CURLOPT_LOGIN_OPTIONS: #endif #if LIBCURL_VERSION_NUM >= 0x072700 /* Available since 7.39.0 */ case CURLOPT_PINNEDPUBLICKEY: #endif #if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */ case CURLOPT_PROXY_SERVICE_NAME: case CURLOPT_SERVICE_NAME: #endif #if LIBCURL_VERSION_NUM >= 0x072d00 /* Available since 7.45.0 */ case CURLOPT_DEFAULT_PROTOCOL: #endif { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 0); zend_string_release(str); return ret; } /* Curl nullable string options */ case CURLOPT_CUSTOMREQUEST: case CURLOPT_FTPPORT: case CURLOPT_RANGE: #if LIBCURL_VERSION_NUM >= 0x070d00 /* Available since 7.13.0 */ case CURLOPT_FTP_ACCOUNT: #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_RTSP_SESSION_ID: #endif #if LIBCURL_VERSION_NUM >= 0x072100 /* Available since 7.33.0 */ case CURLOPT_DNS_INTERFACE: case CURLOPT_DNS_LOCAL_IP4: case CURLOPT_DNS_LOCAL_IP6: case CURLOPT_XOAUTH2_BEARER: #endif #if LIBCURL_VERSION_NUM >= 0x072800 /* Available since 7.40.0 */ case CURLOPT_UNIX_SOCKET_PATH: #endif #if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */ case CURLOPT_KRBLEVEL: #else case CURLOPT_KRB4LEVEL: #endif { if (Z_ISNULL_P(zvalue)) { error = curl_easy_setopt(ch->cp, option, NULL); } else { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 0); zend_string_release(str); return ret; } break; } /* Curl private option */ case CURLOPT_PRIVATE: { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 1); zend_string_release(str); return ret; } /* Curl url option */ case CURLOPT_URL: { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_url(ch, ZSTR_VAL(str), ZSTR_LEN(str)); zend_string_release(str); return ret; } /* Curl file handle options */ case CURLOPT_FILE: case CURLOPT_INFILE: case CURLOPT_STDERR: case CURLOPT_WRITEHEADER: { FILE *fp = NULL; php_stream *what = NULL; if (Z_TYPE_P(zvalue) != IS_NULL) { what = (php_stream *)zend_fetch_resource2_ex(zvalue, "File-Handle", php_file_le_stream(), php_file_le_pstream()); if (!what) { return FAILURE; } if (FAILURE == php_stream_cast(what, PHP_STREAM_AS_STDIO, (void *) &fp, REPORT_ERRORS)) { return FAILURE; } if (!fp) { return FAILURE; } } error = CURLE_OK; switch (option) { case CURLOPT_FILE: if (!what) { if (!Z_ISUNDEF(ch->handlers->write->stream)) { zval_ptr_dtor(&ch->handlers->write->stream); ZVAL_UNDEF(&ch->handlers->write->stream); } ch->handlers->write->fp = NULL; ch->handlers->write->method = PHP_CURL_STDOUT; } else if (what->mode[0] != 'r' || what->mode[1] == '+') { zval_ptr_dtor(&ch->handlers->write->stream); ch->handlers->write->fp = fp; ch->handlers->write->method = PHP_CURL_FILE; ZVAL_COPY(&ch->handlers->write->stream, zvalue); } else { php_error_docref(NULL, E_WARNING, "the provided file handle is not writable"); return FAILURE; } break; case CURLOPT_WRITEHEADER: if (!what) { if (!Z_ISUNDEF(ch->handlers->write_header->stream)) { zval_ptr_dtor(&ch->handlers->write_header->stream); ZVAL_UNDEF(&ch->handlers->write_header->stream); } ch->handlers->write_header->fp = NULL; ch->handlers->write_header->method = PHP_CURL_IGNORE; } else if (what->mode[0] != 'r' || what->mode[1] == '+') { zval_ptr_dtor(&ch->handlers->write_header->stream); ch->handlers->write_header->fp = fp; ch->handlers->write_header->method = PHP_CURL_FILE; ZVAL_COPY(&ch->handlers->write_header->stream, zvalue);; } else { php_error_docref(NULL, E_WARNING, "the provided file handle is not writable"); return FAILURE; } break; case CURLOPT_INFILE: if (!what) { if (!Z_ISUNDEF(ch->handlers->read->stream)) { zval_ptr_dtor(&ch->handlers->read->stream); ZVAL_UNDEF(&ch->handlers->read->stream); } ch->handlers->read->fp = NULL; ch->handlers->read->res = NULL; } else { zval_ptr_dtor(&ch->handlers->read->stream); ch->handlers->read->fp = fp; ch->handlers->read->res = Z_RES_P(zvalue); ZVAL_COPY(&ch->handlers->read->stream, zvalue); } break; case CURLOPT_STDERR: if (!what) { if (!Z_ISUNDEF(ch->handlers->std_err)) { zval_ptr_dtor(&ch->handlers->std_err); ZVAL_UNDEF(&ch->handlers->std_err); } } else if (what->mode[0] != 'r' || what->mode[1] == '+') { zval_ptr_dtor(&ch->handlers->std_err); ZVAL_COPY(&ch->handlers->std_err, zvalue); } else { php_error_docref(NULL, E_WARNING, "the provided file handle is not writable"); return FAILURE; } /* break omitted intentionally */ default: error = curl_easy_setopt(ch->cp, option, fp); break; } break; } /* Curl linked list options */ case CURLOPT_HTTP200ALIASES: case CURLOPT_HTTPHEADER: case CURLOPT_POSTQUOTE: case CURLOPT_PREQUOTE: case CURLOPT_QUOTE: case CURLOPT_TELNETOPTIONS: #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_MAIL_RCPT: #endif #if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */ case CURLOPT_RESOLVE: #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ case CURLOPT_PROXYHEADER: #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ case CURLOPT_CONNECT_TO: #endif { zval *current; HashTable *ph; zend_string *val; struct curl_slist *slist = NULL; ph = HASH_OF(zvalue); if (!ph) { char *name = NULL; switch (option) { case CURLOPT_HTTPHEADER: name = "CURLOPT_HTTPHEADER"; break; case CURLOPT_QUOTE: name = "CURLOPT_QUOTE"; break; case CURLOPT_HTTP200ALIASES: name = "CURLOPT_HTTP200ALIASES"; break; case CURLOPT_POSTQUOTE: name = "CURLOPT_POSTQUOTE"; break; case CURLOPT_PREQUOTE: name = "CURLOPT_PREQUOTE"; break; case CURLOPT_TELNETOPTIONS: name = "CURLOPT_TELNETOPTIONS"; break; #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_MAIL_RCPT: name = "CURLOPT_MAIL_RCPT"; break; #endif #if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */ case CURLOPT_RESOLVE: name = "CURLOPT_RESOLVE"; break; #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ case CURLOPT_PROXYHEADER: name = "CURLOPT_PROXYHEADER"; break; #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ case CURLOPT_CONNECT_TO: name = "CURLOPT_CONNECT_TO"; break; #endif } php_error_docref(NULL, E_WARNING, "You must pass either an object or an array with the %s argument", name); return FAILURE; } ZEND_HASH_FOREACH_VAL(ph, current) { ZVAL_DEREF(current); val = zval_get_string(current); slist = curl_slist_append(slist, ZSTR_VAL(val)); zend_string_release(val); if (!slist) { php_error_docref(NULL, E_WARNING, "Could not build curl_slist"); return 1; } } ZEND_HASH_FOREACH_END(); if (slist) { if ((*ch->clone) == 1) { zend_hash_index_update_ptr(ch->to_free->slist, option, slist); } else { zend_hash_next_index_insert_ptr(ch->to_free->slist, slist); } } error = curl_easy_setopt(ch->cp, option, slist); break; } case CURLOPT_BINARYTRANSFER: /* Do nothing, just backward compatibility */ break; case CURLOPT_FOLLOWLOCATION: lval = zval_get_long(zvalue); #if LIBCURL_VERSION_NUM < 0x071304 if (PG(open_basedir) && *PG(open_basedir)) { if (lval != 0) { php_error_docref(NULL, E_WARNING, "CURLOPT_FOLLOWLOCATION cannot be activated when an open_basedir is set"); return FAILURE; } } #endif error = curl_easy_setopt(ch->cp, option, lval); break; case CURLOPT_HEADERFUNCTION: if (!Z_ISUNDEF(ch->handlers->write_header->func_name)) { zval_ptr_dtor(&ch->handlers->write_header->func_name); ch->handlers->write_header->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->write_header->func_name, zvalue); ch->handlers->write_header->method = PHP_CURL_USER; break; case CURLOPT_POSTFIELDS: if (Z_TYPE_P(zvalue) == IS_ARRAY || Z_TYPE_P(zvalue) == IS_OBJECT) { zval *current; HashTable *postfields; zend_string *string_key; zend_ulong num_key; struct HttpPost *first = NULL; struct HttpPost *last = NULL; CURLFORMcode form_error; postfields = HASH_OF(zvalue); if (!postfields) { php_error_docref(NULL, E_WARNING, "Couldn't get HashTable in CURLOPT_POSTFIELDS"); return FAILURE; } ZEND_HASH_FOREACH_KEY_VAL(postfields, num_key, string_key, current) { zend_string *postval; /* Pretend we have a string_key here */ if (!string_key) { string_key = zend_long_to_str(num_key); } else { zend_string_addref(string_key); } ZVAL_DEREF(current); if (Z_TYPE_P(current) == IS_OBJECT && instanceof_function(Z_OBJCE_P(current), curl_CURLFile_class)) { /* new-style file upload */ zval *prop, rv; char *type = NULL, *filename = NULL; prop = zend_read_property(curl_CURLFile_class, current, "name", sizeof("name")-1, 0, &rv); if (Z_TYPE_P(prop) != IS_STRING) { php_error_docref(NULL, E_WARNING, "Invalid filename for key %s", ZSTR_VAL(string_key)); } else { postval = Z_STR_P(prop); if (php_check_open_basedir(ZSTR_VAL(postval))) { return 1; } prop = zend_read_property(curl_CURLFile_class, current, "mime", sizeof("mime")-1, 0, &rv); if (Z_TYPE_P(prop) == IS_STRING && Z_STRLEN_P(prop) > 0) { type = Z_STRVAL_P(prop); } prop = zend_read_property(curl_CURLFile_class, current, "postname", sizeof("postname")-1, 0, &rv); if (Z_TYPE_P(prop) == IS_STRING && Z_STRLEN_P(prop) > 0) { filename = Z_STRVAL_P(prop); } form_error = curl_formadd(&first, &last, CURLFORM_COPYNAME, ZSTR_VAL(string_key), CURLFORM_NAMELENGTH, ZSTR_LEN(string_key), CURLFORM_FILENAME, filename ? filename : ZSTR_VAL(postval), CURLFORM_CONTENTTYPE, type ? type : "application/octet-stream", CURLFORM_FILE, ZSTR_VAL(postval), CURLFORM_END); if (form_error != CURL_FORMADD_OK) { /* Not nice to convert between enums but we only have place for one error type */ error = (CURLcode)form_error; } } zend_string_release(string_key); continue; } postval = zval_get_string(current); /* The arguments after _NAMELENGTH and _CONTENTSLENGTH * must be explicitly cast to long in curl_formadd * use since curl needs a long not an int. */ form_error = curl_formadd(&first, &last, CURLFORM_COPYNAME, ZSTR_VAL(string_key), CURLFORM_NAMELENGTH, ZSTR_LEN(string_key), CURLFORM_COPYCONTENTS, ZSTR_VAL(postval), CURLFORM_CONTENTSLENGTH, ZSTR_LEN(postval), CURLFORM_END); if (form_error != CURL_FORMADD_OK) { /* Not nice to convert between enums but we only have place for one error type */ error = (CURLcode)form_error; } zend_string_release(postval); zend_string_release(string_key); } ZEND_HASH_FOREACH_END(); SAVE_CURL_ERROR(ch, error); if (error != CURLE_OK) { return FAILURE; } if ((*ch->clone) == 1) { zend_llist_clean(&ch->to_free->post); } zend_llist_add_element(&ch->to_free->post, &first); error = curl_easy_setopt(ch->cp, CURLOPT_HTTPPOST, first); } else { #if LIBCURL_VERSION_NUM >= 0x071101 zend_string *str = zval_get_string(zvalue); /* with curl 7.17.0 and later, we can use COPYPOSTFIELDS, but we have to provide size before */ error = curl_easy_setopt(ch->cp, CURLOPT_POSTFIELDSIZE, ZSTR_LEN(str)); error = curl_easy_setopt(ch->cp, CURLOPT_COPYPOSTFIELDS, ZSTR_VAL(str)); zend_string_release(str); #else char *post = NULL; zend_string *str = zval_get_string(zvalue); post = estrndup(ZSTR_VAL(str), ZSTR_LEN(str)); zend_llist_add_element(&ch->to_free->str, &post); curl_easy_setopt(ch->cp, CURLOPT_POSTFIELDS, post); error = curl_easy_setopt(ch->cp, CURLOPT_POSTFIELDSIZE, ZSTR_LEN(str)); zend_string_release(str); #endif } break; case CURLOPT_PROGRESSFUNCTION: curl_easy_setopt(ch->cp, CURLOPT_PROGRESSFUNCTION, curl_progress); curl_easy_setopt(ch->cp, CURLOPT_PROGRESSDATA, ch); if (ch->handlers->progress == NULL) { ch->handlers->progress = ecalloc(1, sizeof(php_curl_progress)); } else if (!Z_ISUNDEF(ch->handlers->progress->func_name)) { zval_ptr_dtor(&ch->handlers->progress->func_name); ch->handlers->progress->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->progress->func_name, zvalue); ch->handlers->progress->method = PHP_CURL_USER; break; case CURLOPT_READFUNCTION: if (!Z_ISUNDEF(ch->handlers->read->func_name)) { zval_ptr_dtor(&ch->handlers->read->func_name); ch->handlers->read->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->read->func_name, zvalue); ch->handlers->read->method = PHP_CURL_USER; break; case CURLOPT_RETURNTRANSFER: lval = zval_get_long(zvalue); if (lval) { ch->handlers->write->method = PHP_CURL_RETURN; } else { ch->handlers->write->method = PHP_CURL_STDOUT; } break; case CURLOPT_WRITEFUNCTION: if (!Z_ISUNDEF(ch->handlers->write->func_name)) { zval_ptr_dtor(&ch->handlers->write->func_name); ch->handlers->write->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->write->func_name, zvalue); ch->handlers->write->method = PHP_CURL_USER; break; #if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */ case CURLOPT_MAX_RECV_SPEED_LARGE: case CURLOPT_MAX_SEND_SPEED_LARGE: lval = zval_get_long(zvalue); error = curl_easy_setopt(ch->cp, option, (curl_off_t)lval); break; #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ case CURLOPT_POSTREDIR: lval = zval_get_long(zvalue); error = curl_easy_setopt(ch->cp, CURLOPT_POSTREDIR, lval & CURL_REDIR_POST_ALL); break; #endif #if CURLOPT_PASSWDFUNCTION != 0 case CURLOPT_PASSWDFUNCTION: zval_ptr_dtor(&ch->handlers->passwd); ZVAL_COPY(&ch->handlers->passwd, zvalue); error = curl_easy_setopt(ch->cp, CURLOPT_PASSWDFUNCTION, curl_passwd); error = curl_easy_setopt(ch->cp, CURLOPT_PASSWDDATA, (void *) ch); break; #endif /* the following options deal with files, therefore the open_basedir check * is required. */ case CURLOPT_COOKIEFILE: case CURLOPT_COOKIEJAR: case CURLOPT_RANDOM_FILE: case CURLOPT_SSLCERT: #if LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */ case CURLOPT_NETRC_FILE: #endif #if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */ case CURLOPT_SSH_PRIVATE_KEYFILE: case CURLOPT_SSH_PUBLIC_KEYFILE: #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ case CURLOPT_CRLFILE: case CURLOPT_ISSUERCERT: #endif #if LIBCURL_VERSION_NUM >= 0x071306 /* Available since 7.19.6 */ case CURLOPT_SSH_KNOWNHOSTS: #endif { zend_string *str = zval_get_string(zvalue); int ret; if (ZSTR_LEN(str) && php_check_open_basedir(ZSTR_VAL(str))) { zend_string_release(str); return FAILURE; } ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 0); zend_string_release(str); return ret; } case CURLINFO_HEADER_OUT: lval = zval_get_long(zvalue); if (lval == 1) { curl_easy_setopt(ch->cp, CURLOPT_DEBUGFUNCTION, curl_debug); curl_easy_setopt(ch->cp, CURLOPT_DEBUGDATA, (void *)ch); curl_easy_setopt(ch->cp, CURLOPT_VERBOSE, 1); } else { curl_easy_setopt(ch->cp, CURLOPT_DEBUGFUNCTION, NULL); curl_easy_setopt(ch->cp, CURLOPT_DEBUGDATA, NULL); curl_easy_setopt(ch->cp, CURLOPT_VERBOSE, 0); } break; case CURLOPT_SHARE: { php_curlsh *sh; if ((sh = (php_curlsh *)zend_fetch_resource_ex(zvalue, le_curl_share_handle_name, le_curl_share_handle))) { curl_easy_setopt(ch->cp, CURLOPT_SHARE, sh->share); } } break; #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ case CURLOPT_FNMATCH_FUNCTION: curl_easy_setopt(ch->cp, CURLOPT_FNMATCH_FUNCTION, curl_fnmatch); curl_easy_setopt(ch->cp, CURLOPT_FNMATCH_DATA, ch); if (ch->handlers->fnmatch == NULL) { ch->handlers->fnmatch = ecalloc(1, sizeof(php_curl_fnmatch)); } else if (!Z_ISUNDEF(ch->handlers->fnmatch->func_name)) { zval_ptr_dtor(&ch->handlers->fnmatch->func_name); ch->handlers->fnmatch->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->fnmatch->func_name, zvalue); ch->handlers->fnmatch->method = PHP_CURL_USER; break; #endif } SAVE_CURL_ERROR(ch, error); if (error != CURLE_OK) { return FAILURE; } else { return SUCCESS; } } /* }}} */ /* {{{ proto bool curl_setopt(resource ch, int option, mixed value) Set an option for a cURL transfer */ PHP_FUNCTION(curl_setopt) { zval *zid, *zvalue; zend_long options; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlz", &zid, &options, &zvalue) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (options <= 0 && options != CURLOPT_SAFE_UPLOAD) { php_error_docref(NULL, E_WARNING, "Invalid curl configuration option"); RETURN_FALSE; } if (_php_curl_setopt(ch, options, zvalue) == SUCCESS) { RETURN_TRUE; } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool curl_setopt_array(resource ch, array options) Set an array of option for a cURL transfer */ PHP_FUNCTION(curl_setopt_array) { zval *zid, *arr, *entry; php_curl *ch; zend_ulong option; zend_string *string_key; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ra", &zid, &arr) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } ZEND_HASH_FOREACH_KEY_VAL(Z_ARRVAL_P(arr), option, string_key, entry) { if (string_key) { php_error_docref(NULL, E_WARNING, "Array keys must be CURLOPT constants or equivalent integer values"); RETURN_FALSE; } if (_php_curl_setopt(ch, (zend_long) option, entry) == FAILURE) { RETURN_FALSE; } } ZEND_HASH_FOREACH_END(); RETURN_TRUE; } /* }}} */ /* {{{ _php_curl_cleanup_handle(ch) Cleanup an execution phase */ void _php_curl_cleanup_handle(php_curl *ch) { smart_str_free(&ch->handlers->write->buf); if (ch->header.str) { zend_string_release(ch->header.str); ch->header.str = NULL; } memset(ch->err.str, 0, CURL_ERROR_SIZE + 1); ch->err.no = 0; } /* }}} */ /* {{{ proto bool curl_exec(resource ch) Perform a cURL session */ PHP_FUNCTION(curl_exec) { CURLcode error; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } _php_curl_verify_handlers(ch, 1); _php_curl_cleanup_handle(ch); error = curl_easy_perform(ch->cp); SAVE_CURL_ERROR(ch, error); /* CURLE_PARTIAL_FILE is returned by HEAD requests */ if (error != CURLE_OK && error != CURLE_PARTIAL_FILE) { smart_str_free(&ch->handlers->write->buf); RETURN_FALSE; } if (!Z_ISUNDEF(ch->handlers->std_err)) { php_stream *stream; stream = (php_stream*)zend_fetch_resource2_ex(&ch->handlers->std_err, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream) { php_stream_flush(stream); } } if (ch->handlers->write->method == PHP_CURL_RETURN && ch->handlers->write->buf.s) { smart_str_0(&ch->handlers->write->buf); RETURN_STR_COPY(ch->handlers->write->buf.s); } /* flush the file handle, so any remaining data is synched to disk */ if (ch->handlers->write->method == PHP_CURL_FILE && ch->handlers->write->fp) { fflush(ch->handlers->write->fp); } if (ch->handlers->write_header->method == PHP_CURL_FILE && ch->handlers->write_header->fp) { fflush(ch->handlers->write_header->fp); } if (ch->handlers->write->method == PHP_CURL_RETURN) { RETURN_EMPTY_STRING(); } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto mixed curl_getinfo(resource ch [, int option]) Get information regarding a specific transfer */ PHP_FUNCTION(curl_getinfo) { zval *zid; php_curl *ch; zend_long option = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &zid, &option) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ZEND_NUM_ARGS() < 2) { char *s_code; /* libcurl expects long datatype. So far no cases are known where it would be an issue. Using zend_long would truncate a 64-bit var on Win64, so the exact long datatype fits everywhere, as long as there's no 32-bit int overflow. */ long l_code; double d_code; #if LIBCURL_VERSION_NUM > 0x071301 struct curl_certinfo *ci = NULL; zval listcode; #endif array_init(return_value); if (curl_easy_getinfo(ch->cp, CURLINFO_EFFECTIVE_URL, &s_code) == CURLE_OK) { CAAS("url", s_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONTENT_TYPE, &s_code) == CURLE_OK) { if (s_code != NULL) { CAAS("content_type", s_code); } else { zval retnull; ZVAL_NULL(&retnull); CAAZ("content_type", &retnull); } } if (curl_easy_getinfo(ch->cp, CURLINFO_HTTP_CODE, &l_code) == CURLE_OK) { CAAL("http_code", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_HEADER_SIZE, &l_code) == CURLE_OK) { CAAL("header_size", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_REQUEST_SIZE, &l_code) == CURLE_OK) { CAAL("request_size", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_FILETIME, &l_code) == CURLE_OK) { CAAL("filetime", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SSL_VERIFYRESULT, &l_code) == CURLE_OK) { CAAL("ssl_verify_result", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_REDIRECT_COUNT, &l_code) == CURLE_OK) { CAAL("redirect_count", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_TOTAL_TIME, &d_code) == CURLE_OK) { CAAD("total_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_NAMELOOKUP_TIME, &d_code) == CURLE_OK) { CAAD("namelookup_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONNECT_TIME, &d_code) == CURLE_OK) { CAAD("connect_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_PRETRANSFER_TIME, &d_code) == CURLE_OK) { CAAD("pretransfer_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SIZE_UPLOAD, &d_code) == CURLE_OK) { CAAD("size_upload", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SIZE_DOWNLOAD, &d_code) == CURLE_OK) { CAAD("size_download", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SPEED_DOWNLOAD, &d_code) == CURLE_OK) { CAAD("speed_download", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SPEED_UPLOAD, &d_code) == CURLE_OK) { CAAD("speed_upload", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d_code) == CURLE_OK) { CAAD("download_content_length", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONTENT_LENGTH_UPLOAD, &d_code) == CURLE_OK) { CAAD("upload_content_length", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_STARTTRANSFER_TIME, &d_code) == CURLE_OK) { CAAD("starttransfer_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_REDIRECT_TIME, &d_code) == CURLE_OK) { CAAD("redirect_time", d_code); } #if LIBCURL_VERSION_NUM >= 0x071202 /* Available since 7.18.2 */ if (curl_easy_getinfo(ch->cp, CURLINFO_REDIRECT_URL, &s_code) == CURLE_OK) { CAAS("redirect_url", s_code); } #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ if (curl_easy_getinfo(ch->cp, CURLINFO_PRIMARY_IP, &s_code) == CURLE_OK) { CAAS("primary_ip", s_code); } #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ if (curl_easy_getinfo(ch->cp, CURLINFO_CERTINFO, &ci) == CURLE_OK) { array_init(&listcode); create_certinfo(ci, &listcode); CAAZ("certinfo", &listcode); } #endif #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ if (curl_easy_getinfo(ch->cp, CURLINFO_PRIMARY_PORT, &l_code) == CURLE_OK) { CAAL("primary_port", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_LOCAL_IP, &s_code) == CURLE_OK) { CAAS("local_ip", s_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_LOCAL_PORT, &l_code) == CURLE_OK) { CAAL("local_port", l_code); } #endif if (ch->header.str) { CAASTR("request_header", ch->header.str); } } else { switch (option) { case CURLINFO_HEADER_OUT: if (ch->header.str) { RETURN_STR_COPY(ch->header.str); } else { RETURN_FALSE; } #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ case CURLINFO_CERTINFO: { struct curl_certinfo *ci = NULL; array_init(return_value); if (curl_easy_getinfo(ch->cp, CURLINFO_CERTINFO, &ci) == CURLE_OK) { create_certinfo(ci, return_value); } else { RETURN_FALSE; } break; } #endif default: { int type = CURLINFO_TYPEMASK & option; switch (type) { case CURLINFO_STRING: { char *s_code = NULL; if (curl_easy_getinfo(ch->cp, option, &s_code) == CURLE_OK && s_code) { RETURN_STRING(s_code); } else { RETURN_FALSE; } break; } case CURLINFO_LONG: { zend_long code = 0; if (curl_easy_getinfo(ch->cp, option, &code) == CURLE_OK) { RETURN_LONG(code); } else { RETURN_FALSE; } break; } case CURLINFO_DOUBLE: { double code = 0.0; if (curl_easy_getinfo(ch->cp, option, &code) == CURLE_OK) { RETURN_DOUBLE(code); } else { RETURN_FALSE; } break; } #if LIBCURL_VERSION_NUM >= 0x070c03 /* Available since 7.12.3 */ case CURLINFO_SLIST: { struct curl_slist *slist; array_init(return_value); if (curl_easy_getinfo(ch->cp, option, &slist) == CURLE_OK) { while (slist) { add_next_index_string(return_value, slist->data); slist = slist->next; } curl_slist_free_all(slist); } else { RETURN_FALSE; } break; } #endif default: RETURN_FALSE; } } } } } /* }}} */ /* {{{ proto string curl_error(resource ch) Return a string contain the last error for the current session */ PHP_FUNCTION(curl_error) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } ch->err.str[CURL_ERROR_SIZE] = 0; RETURN_STRING(ch->err.str); } /* }}} */ /* {{{ proto int curl_errno(resource ch) Return an integer containing the last error number */ PHP_FUNCTION(curl_errno) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } RETURN_LONG(ch->err.no); } /* }}} */ /* {{{ proto void curl_close(resource ch) Close a cURL session */ PHP_FUNCTION(curl_close) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ch->in_callback) { php_error_docref(NULL, E_WARNING, "Attempt to close cURL handle from a callback"); return; } if (Z_REFCOUNT_P(zid) <= 2) { zend_list_close(Z_RES_P(zid)); } } /* }}} */ /* {{{ _php_curl_close_ex() List destructor for curl handles */ static void _php_curl_close_ex(php_curl *ch) { #if PHP_CURL_DEBUG fprintf(stderr, "DTOR CALLED, ch = %x\n", ch); #endif _php_curl_verify_handlers(ch, 0); /* * Libcurl is doing connection caching. When easy handle is cleaned up, * if the handle was previously used by the curl_multi_api, the connection * remains open un the curl multi handle is cleaned up. Some protocols are * sending content like the FTP one, and libcurl try to use the * WRITEFUNCTION or the HEADERFUNCTION. Since structures used in those * callback are freed, we need to use an other callback to which avoid * segfaults. * * Libcurl commit d021f2e8a00 fix this issue and should be part of 7.28.2 */ curl_easy_setopt(ch->cp, CURLOPT_HEADERFUNCTION, curl_write_nothing); curl_easy_setopt(ch->cp, CURLOPT_WRITEFUNCTION, curl_write_nothing); curl_easy_cleanup(ch->cp); /* cURL destructors should be invoked only by last curl handle */ if (--(*ch->clone) == 0) { zend_llist_clean(&ch->to_free->str); zend_llist_clean(&ch->to_free->post); zend_hash_destroy(ch->to_free->slist); efree(ch->to_free->slist); efree(ch->to_free); efree(ch->clone); } smart_str_free(&ch->handlers->write->buf); zval_ptr_dtor(&ch->handlers->write->func_name); zval_ptr_dtor(&ch->handlers->read->func_name); zval_ptr_dtor(&ch->handlers->write_header->func_name); #if CURLOPT_PASSWDFUNCTION != 0 zval_ptr_dtor(&ch->handlers->passwd); #endif zval_ptr_dtor(&ch->handlers->std_err); if (ch->header.str) { zend_string_release(ch->header.str); } zval_ptr_dtor(&ch->handlers->write_header->stream); zval_ptr_dtor(&ch->handlers->write->stream); zval_ptr_dtor(&ch->handlers->read->stream); efree(ch->handlers->write); efree(ch->handlers->write_header); efree(ch->handlers->read); if (ch->handlers->progress) { zval_ptr_dtor(&ch->handlers->progress->func_name); efree(ch->handlers->progress); } #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ if (ch->handlers->fnmatch) { zval_ptr_dtor(&ch->handlers->fnmatch->func_name); efree(ch->handlers->fnmatch); } #endif efree(ch->handlers); efree(ch); } /* }}} */ /* {{{ _php_curl_close() List destructor for curl handles */ static void _php_curl_close(zend_resource *rsrc) { php_curl *ch = (php_curl *) rsrc->ptr; _php_curl_close_ex(ch); } /* }}} */ #if LIBCURL_VERSION_NUM >= 0x070c00 /* Available since 7.12.0 */ /* {{{ proto bool curl_strerror(int code) return string describing error code */ PHP_FUNCTION(curl_strerror) { zend_long code; const char *str; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &code) == FAILURE) { return; } str = curl_easy_strerror(code); if (str) { RETURN_STRING(str); } else { RETURN_NULL(); } } /* }}} */ #endif #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ /* {{{ _php_curl_reset_handlers() Reset all handlers of a given php_curl */ static void _php_curl_reset_handlers(php_curl *ch) { if (!Z_ISUNDEF(ch->handlers->write->stream)) { zval_ptr_dtor(&ch->handlers->write->stream); ZVAL_UNDEF(&ch->handlers->write->stream); } ch->handlers->write->fp = NULL; ch->handlers->write->method = PHP_CURL_STDOUT; if (!Z_ISUNDEF(ch->handlers->write_header->stream)) { zval_ptr_dtor(&ch->handlers->write_header->stream); ZVAL_UNDEF(&ch->handlers->write_header->stream); } ch->handlers->write_header->fp = NULL; ch->handlers->write_header->method = PHP_CURL_IGNORE; if (!Z_ISUNDEF(ch->handlers->read->stream)) { zval_ptr_dtor(&ch->handlers->read->stream); ZVAL_UNDEF(&ch->handlers->read->stream); } ch->handlers->read->fp = NULL; ch->handlers->read->res = NULL; ch->handlers->read->method = PHP_CURL_DIRECT; if (!Z_ISUNDEF(ch->handlers->std_err)) { zval_ptr_dtor(&ch->handlers->std_err); ZVAL_UNDEF(&ch->handlers->std_err); } if (ch->handlers->progress) { zval_ptr_dtor(&ch->handlers->progress->func_name); efree(ch->handlers->progress); ch->handlers->progress = NULL; } #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ if (ch->handlers->fnmatch) { zval_ptr_dtor(&ch->handlers->fnmatch->func_name); efree(ch->handlers->fnmatch); ch->handlers->fnmatch = NULL; } #endif } /* }}} */ /* {{{ proto void curl_reset(resource ch) Reset all options of a libcurl session handle */ PHP_FUNCTION(curl_reset) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ch->in_callback) { php_error_docref(NULL, E_WARNING, "Attempt to reset cURL handle from a callback"); return; } curl_easy_reset(ch->cp); _php_curl_reset_handlers(ch); _php_curl_set_default_options(ch); } /* }}} */ #endif #if LIBCURL_VERSION_NUM > 0x070f03 /* 7.15.4 */ /* {{{ proto void curl_escape(resource ch, string str) URL encodes the given string */ PHP_FUNCTION(curl_escape) { char *str = NULL, *res = NULL; size_t str_len = 0; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if ((res = curl_easy_escape(ch->cp, str, str_len))) { RETVAL_STRING(res); curl_free(res); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto void curl_unescape(resource ch, string str) URL decodes the given string */ PHP_FUNCTION(curl_unescape) { char *str = NULL, *out = NULL; size_t str_len = 0; int out_len; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (str_len > INT_MAX) { RETURN_FALSE; } if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) { RETVAL_STRINGL(out, out_len); curl_free(out); } else { RETURN_FALSE; } } /* }}} */ #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* 7.18.0 */ /* {{{ proto void curl_pause(resource ch, int bitmask) pause and unpause a connection */ PHP_FUNCTION(curl_pause) { zend_long bitmask; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &zid, &bitmask) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } RETURN_LONG(curl_easy_pause(ch->cp, bitmask)); } /* }}} */ #endif #endif /* HAVE_CURL */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2016 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sterling Hughes <sterling@php.net> | +----------------------------------------------------------------------+ */ /* $Id$ */ #define ZEND_INCLUDE_FULL_WINDOWS_HEADERS #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php.h" #if HAVE_CURL #include <stdio.h> #include <string.h> #ifdef PHP_WIN32 #include <winsock2.h> #include <sys/types.h> #endif #include <curl/curl.h> #include <curl/easy.h> /* As of curl 7.11.1 this is no longer defined inside curl.h */ #ifndef HttpPost #define HttpPost curl_httppost #endif /* {{{ cruft for thread safe SSL crypto locks */ #if defined(ZTS) && defined(HAVE_CURL_SSL) # ifdef PHP_WIN32 # define PHP_CURL_NEED_OPENSSL_TSL # include <openssl/crypto.h> # else /* !PHP_WIN32 */ # if defined(HAVE_CURL_OPENSSL) # if defined(HAVE_OPENSSL_CRYPTO_H) # define PHP_CURL_NEED_OPENSSL_TSL # include <openssl/crypto.h> # else # warning \ "libcurl was compiled with OpenSSL support, but configure could not find " \ "openssl/crypto.h; thus no SSL crypto locking callbacks will be set, which may " \ "cause random crashes on SSL requests" # endif # elif defined(HAVE_CURL_GNUTLS) # if defined(HAVE_GCRYPT_H) # define PHP_CURL_NEED_GNUTLS_TSL # include <gcrypt.h> # else # warning \ "libcurl was compiled with GnuTLS support, but configure could not find " \ "gcrypt.h; thus no SSL crypto locking callbacks will be set, which may " \ "cause random crashes on SSL requests" # endif # else # warning \ "libcurl was compiled with SSL support, but configure could not determine which" \ "library was used; thus no SSL crypto locking callbacks will be set, which may " \ "cause random crashes on SSL requests" # endif /* HAVE_CURL_OPENSSL || HAVE_CURL_GNUTLS */ # endif /* PHP_WIN32 */ #endif /* ZTS && HAVE_CURL_SSL */ /* }}} */ #define SMART_STR_PREALLOC 4096 #include "zend_smart_str.h" #include "ext/standard/info.h" #include "ext/standard/file.h" #include "ext/standard/url.h" #include "php_curl.h" int le_curl; int le_curl_multi_handle; int le_curl_share_handle; #ifdef PHP_CURL_NEED_OPENSSL_TSL /* {{{ */ static MUTEX_T *php_curl_openssl_tsl = NULL; static void php_curl_ssl_lock(int mode, int n, const char * file, int line) { if (mode & CRYPTO_LOCK) { tsrm_mutex_lock(php_curl_openssl_tsl[n]); } else { tsrm_mutex_unlock(php_curl_openssl_tsl[n]); } } static unsigned long php_curl_ssl_id(void) { return (unsigned long) tsrm_thread_id(); } #endif /* }}} */ #ifdef PHP_CURL_NEED_GNUTLS_TSL /* {{{ */ static int php_curl_ssl_mutex_create(void **m) { if (*((MUTEX_T *) m) = tsrm_mutex_alloc()) { return SUCCESS; } else { return FAILURE; } } static int php_curl_ssl_mutex_destroy(void **m) { tsrm_mutex_free(*((MUTEX_T *) m)); return SUCCESS; } static int php_curl_ssl_mutex_lock(void **m) { return tsrm_mutex_lock(*((MUTEX_T *) m)); } static int php_curl_ssl_mutex_unlock(void **m) { return tsrm_mutex_unlock(*((MUTEX_T *) m)); } static struct gcry_thread_cbs php_curl_gnutls_tsl = { GCRY_THREAD_OPTION_USER, NULL, php_curl_ssl_mutex_create, php_curl_ssl_mutex_destroy, php_curl_ssl_mutex_lock, php_curl_ssl_mutex_unlock }; #endif /* }}} */ static void _php_curl_close_ex(php_curl *ch); static void _php_curl_close(zend_resource *rsrc); #define SAVE_CURL_ERROR(__handle, __err) (__handle)->err.no = (int) __err; #define CAAL(s, v) add_assoc_long_ex(return_value, s, sizeof(s) - 1, (zend_long) v); #define CAAD(s, v) add_assoc_double_ex(return_value, s, sizeof(s) - 1, (double) v); #define CAAS(s, v) add_assoc_string_ex(return_value, s, sizeof(s) - 1, (char *) (v ? v : "")); #define CAASTR(s, v) add_assoc_str_ex(return_value, s, sizeof(s) - 1, \ v ? zend_string_copy(v) : ZSTR_EMPTY_ALLOC()); #define CAAZ(s, v) add_assoc_zval_ex(return_value, s, sizeof(s) -1 , (zval *) v); #if defined(PHP_WIN32) || defined(__GNUC__) # define php_curl_ret(__ret) RETVAL_FALSE; return __ret; #else # define php_curl_ret(__ret) RETVAL_FALSE; return; #endif static int php_curl_option_str(php_curl *ch, zend_long option, const char *str, const int len, zend_bool make_copy) { CURLcode error = CURLE_OK; if (strlen(str) != len) { php_error_docref(NULL, E_WARNING, "Curl option contains invalid characters (\\0)"); return FAILURE; } #if LIBCURL_VERSION_NUM >= 0x071100 if (make_copy) { #endif char *copystr; /* Strings passed to libcurl as 'char *' arguments, are copied by the library since 7.17.0 */ copystr = estrndup(str, len); error = curl_easy_setopt(ch->cp, option, copystr); zend_llist_add_element(&ch->to_free->str, &copystr); #if LIBCURL_VERSION_NUM >= 0x071100 } else { error = curl_easy_setopt(ch->cp, option, str); } #endif SAVE_CURL_ERROR(ch, error) return error == CURLE_OK ? SUCCESS : FAILURE; } static int php_curl_option_url(php_curl *ch, const char *url, const int len) /* {{{ */ { /* Disable file:// if open_basedir are used */ if (PG(open_basedir) && *PG(open_basedir)) { #if LIBCURL_VERSION_NUM >= 0x071304 curl_easy_setopt(ch->cp, CURLOPT_PROTOCOLS, CURLPROTO_ALL & ~CURLPROTO_FILE); #else php_url *uri; if (!(uri = php_url_parse_ex(url, len))) { php_error_docref(NULL, E_WARNING, "Invalid URL '%s'", url); return FAILURE; } if (uri->scheme && !strncasecmp("file", uri->scheme, sizeof("file"))) { php_error_docref(NULL, E_WARNING, "Protocol 'file' disabled in cURL"); php_url_free(uri); return FAILURE; } php_url_free(uri); #endif } return php_curl_option_str(ch, CURLOPT_URL, url, len, 0); } /* }}} */ void _php_curl_verify_handlers(php_curl *ch, int reporterror) /* {{{ */ { php_stream *stream; ZEND_ASSERT(ch && ch->handlers); if (!Z_ISUNDEF(ch->handlers->std_err)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->std_err, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_STDERR resource has gone away, resetting to stderr"); } zval_ptr_dtor(&ch->handlers->std_err); ZVAL_UNDEF(&ch->handlers->std_err); curl_easy_setopt(ch->cp, CURLOPT_STDERR, stderr); } } if (ch->handlers->read && !Z_ISUNDEF(ch->handlers->read->stream)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->read->stream, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_INFILE resource has gone away, resetting to default"); } zval_ptr_dtor(&ch->handlers->read->stream); ZVAL_UNDEF(&ch->handlers->read->stream); ch->handlers->read->res = NULL; ch->handlers->read->fp = 0; curl_easy_setopt(ch->cp, CURLOPT_INFILE, (void *) ch); } } if (ch->handlers->write_header && !Z_ISUNDEF(ch->handlers->write_header->stream)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->write_header->stream, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_WRITEHEADER resource has gone away, resetting to default"); } zval_ptr_dtor(&ch->handlers->write_header->stream); ZVAL_UNDEF(&ch->handlers->write_header->stream); ch->handlers->write_header->fp = 0; ch->handlers->write_header->method = PHP_CURL_IGNORE; curl_easy_setopt(ch->cp, CURLOPT_WRITEHEADER, (void *) ch); } } if (ch->handlers->write && !Z_ISUNDEF(ch->handlers->write->stream)) { stream = (php_stream *)zend_fetch_resource2_ex(&ch->handlers->write->stream, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream == NULL) { if (reporterror) { php_error_docref(NULL, E_WARNING, "CURLOPT_FILE resource has gone away, resetting to default"); } zval_ptr_dtor(&ch->handlers->write->stream); ZVAL_UNDEF(&ch->handlers->write->stream); ch->handlers->write->fp = 0; ch->handlers->write->method = PHP_CURL_STDOUT; curl_easy_setopt(ch->cp, CURLOPT_FILE, (void *) ch); } } return; } /* }}} */ /* {{{ arginfo */ ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_version, 0, 0, 0) ZEND_ARG_INFO(0, version) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_init, 0, 0, 0) ZEND_ARG_INFO(0, url) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_copy_handle, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_setopt, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, option) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_setopt_array, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_ARRAY_INFO(0, options, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_exec, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_getinfo, 0, 0, 1) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, option) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_error, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_errno, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_close, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_reset, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() #endif #if LIBCURL_VERSION_NUM > 0x070f03 /* 7.15.4 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_escape, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, str) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_unescape, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, str) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_setopt, 0) ZEND_ARG_INFO(0, sh) ZEND_ARG_INFO(0, option) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_init, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_add_handle, 0) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_remove_handle, 0) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_multi_select, 0, 0, 1) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(0, timeout) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_multi_exec, 0, 0, 1) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(1, still_running) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_getcontent, 0) ZEND_ARG_INFO(0, ch) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_curl_multi_info_read, 0, 0, 1) ZEND_ARG_INFO(0, mh) ZEND_ARG_INFO(1, msgs_in_queue) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_close, 0) ZEND_ARG_INFO(0, mh) ZEND_END_ARG_INFO() #if LIBCURL_VERSION_NUM >= 0x070c00 /* Available since 7.12.0 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_strerror, 0) ZEND_ARG_INFO(0, errornum) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_multi_strerror, 0) ZEND_ARG_INFO(0, errornum) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO(arginfo_curl_share_init, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_share_close, 0) ZEND_ARG_INFO(0, sh) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO(arginfo_curl_share_setopt, 0) ZEND_ARG_INFO(0, sh) ZEND_ARG_INFO(0, option) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() #if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */ ZEND_BEGIN_ARG_INFO(arginfo_curl_pause, 0) ZEND_ARG_INFO(0, ch) ZEND_ARG_INFO(0, bitmask) ZEND_END_ARG_INFO() #endif ZEND_BEGIN_ARG_INFO_EX(arginfo_curlfile_create, 0, 0, 1) ZEND_ARG_INFO(0, filename) ZEND_ARG_INFO(0, mimetype) ZEND_ARG_INFO(0, postname) ZEND_END_ARG_INFO() /* }}} */ /* {{{ curl_functions[] */ const zend_function_entry curl_functions[] = { PHP_FE(curl_init, arginfo_curl_init) PHP_FE(curl_copy_handle, arginfo_curl_copy_handle) PHP_FE(curl_version, arginfo_curl_version) PHP_FE(curl_setopt, arginfo_curl_setopt) PHP_FE(curl_setopt_array, arginfo_curl_setopt_array) PHP_FE(curl_exec, arginfo_curl_exec) PHP_FE(curl_getinfo, arginfo_curl_getinfo) PHP_FE(curl_error, arginfo_curl_error) PHP_FE(curl_errno, arginfo_curl_errno) PHP_FE(curl_close, arginfo_curl_close) #if LIBCURL_VERSION_NUM >= 0x070c00 /* 7.12.0 */ PHP_FE(curl_strerror, arginfo_curl_strerror) PHP_FE(curl_multi_strerror, arginfo_curl_multi_strerror) #endif #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ PHP_FE(curl_reset, arginfo_curl_reset) #endif #if LIBCURL_VERSION_NUM >= 0x070f04 /* 7.15.4 */ PHP_FE(curl_escape, arginfo_curl_escape) PHP_FE(curl_unescape, arginfo_curl_unescape) #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* 7.18.0 */ PHP_FE(curl_pause, arginfo_curl_pause) #endif PHP_FE(curl_multi_init, arginfo_curl_multi_init) PHP_FE(curl_multi_add_handle, arginfo_curl_multi_add_handle) PHP_FE(curl_multi_remove_handle, arginfo_curl_multi_remove_handle) PHP_FE(curl_multi_select, arginfo_curl_multi_select) PHP_FE(curl_multi_exec, arginfo_curl_multi_exec) PHP_FE(curl_multi_getcontent, arginfo_curl_multi_getcontent) PHP_FE(curl_multi_info_read, arginfo_curl_multi_info_read) PHP_FE(curl_multi_close, arginfo_curl_multi_close) #if LIBCURL_VERSION_NUM >= 0x070f04 /* 7.15.4 */ PHP_FE(curl_multi_setopt, arginfo_curl_multi_setopt) #endif PHP_FE(curl_share_init, arginfo_curl_share_init) PHP_FE(curl_share_close, arginfo_curl_share_close) PHP_FE(curl_share_setopt, arginfo_curl_share_setopt) PHP_FE(curl_file_create, arginfo_curlfile_create) PHP_FE_END }; /* }}} */ /* {{{ curl_module_entry */ zend_module_entry curl_module_entry = { STANDARD_MODULE_HEADER, "curl", curl_functions, PHP_MINIT(curl), PHP_MSHUTDOWN(curl), NULL, NULL, PHP_MINFO(curl), PHP_CURL_VERSION, STANDARD_MODULE_PROPERTIES }; /* }}} */ #ifdef COMPILE_DL_CURL ZEND_GET_MODULE (curl) #endif /* {{{ PHP_INI_BEGIN */ PHP_INI_BEGIN() PHP_INI_ENTRY("curl.cainfo", "", PHP_INI_SYSTEM, NULL) PHP_INI_END() /* }}} */ /* {{{ PHP_MINFO_FUNCTION */ PHP_MINFO_FUNCTION(curl) { curl_version_info_data *d; char **p; char str[1024]; size_t n = 0; d = curl_version_info(CURLVERSION_NOW); php_info_print_table_start(); php_info_print_table_row(2, "cURL support", "enabled"); php_info_print_table_row(2, "cURL Information", d->version); sprintf(str, "%d", d->age); php_info_print_table_row(2, "Age", str); /* To update on each new cURL release using src/main.c in cURL sources */ if (d->features) { struct feat { const char *name; int bitmask; }; unsigned int i; static const struct feat feats[] = { #if LIBCURL_VERSION_NUM >= 0x070a07 /* 7.10.7 */ {"AsynchDNS", CURL_VERSION_ASYNCHDNS}, #endif #if LIBCURL_VERSION_NUM >= 0x070f04 /* 7.15.4 */ {"CharConv", CURL_VERSION_CONV}, #endif #if LIBCURL_VERSION_NUM >= 0x070a06 /* 7.10.6 */ {"Debug", CURL_VERSION_DEBUG}, {"GSS-Negotiate", CURL_VERSION_GSSNEGOTIATE}, #endif #if LIBCURL_VERSION_NUM >= 0x070c00 /* 7.12.0 */ {"IDN", CURL_VERSION_IDN}, #endif {"IPv6", CURL_VERSION_IPV6}, {"krb4", CURL_VERSION_KERBEROS4}, #if LIBCURL_VERSION_NUM >= 0x070b01 /* 7.11.1 */ {"Largefile", CURL_VERSION_LARGEFILE}, #endif {"libz", CURL_VERSION_LIBZ}, #if LIBCURL_VERSION_NUM >= 0x070a06 /* 7.10.6 */ {"NTLM", CURL_VERSION_NTLM}, #endif #if LIBCURL_VERSION_NUM >= 0x071600 /* 7.22.0 */ {"NTLMWB", CURL_VERSION_NTLM_WB}, #endif #if LIBCURL_VERSION_NUM >= 0x070a08 /* 7.10.8 */ {"SPNEGO", CURL_VERSION_SPNEGO}, #endif {"SSL", CURL_VERSION_SSL}, #if LIBCURL_VERSION_NUM >= 0x070d02 /* 7.13.2 */ {"SSPI", CURL_VERSION_SSPI}, #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* 7.21.4 */ {"TLS-SRP", CURL_VERSION_TLSAUTH_SRP}, #endif #if LIBCURL_VERSION_NUM >= 0x072100 /* 7.33.0 */ {"HTTP2", CURL_VERSION_HTTP2}, #endif #if LIBCURL_VERSION_NUM >= 0x072600 /* 7.38.0 */ {"GSSAPI", CURL_VERSION_GSSAPI}, #endif #if LIBCURL_VERSION_NUM >= 0x072800 /* 7.40.0 */ {"KERBEROS5", CURL_VERSION_KERBEROS5}, {"UNIX_SOCKETS", CURL_VERSION_UNIX_SOCKETS}, #endif #if LIBCURL_VERSION_NUM >= 0x072f00 /* 7.47.0 */ {"PSL", CURL_VERSION_PSL}, #endif {NULL, 0} }; php_info_print_table_row(1, "Features"); for(i=0; i<sizeof(feats)/sizeof(feats[0]); i++) { if (feats[i].name) { php_info_print_table_row(2, feats[i].name, d->features & feats[i].bitmask ? "Yes" : "No"); } } } n = 0; p = (char **) d->protocols; while (*p != NULL) { n += sprintf(str + n, "%s%s", *p, *(p + 1) != NULL ? ", " : ""); p++; } php_info_print_table_row(2, "Protocols", str); php_info_print_table_row(2, "Host", d->host); if (d->ssl_version) { php_info_print_table_row(2, "SSL Version", d->ssl_version); } if (d->libz_version) { php_info_print_table_row(2, "ZLib Version", d->libz_version); } #if defined(CURLVERSION_SECOND) && CURLVERSION_NOW >= CURLVERSION_SECOND if (d->ares) { php_info_print_table_row(2, "ZLib Version", d->ares); } #endif #if defined(CURLVERSION_THIRD) && CURLVERSION_NOW >= CURLVERSION_THIRD if (d->libidn) { php_info_print_table_row(2, "libIDN Version", d->libidn); } #endif #if LIBCURL_VERSION_NUM >= 0x071300 if (d->iconv_ver_num) { php_info_print_table_row(2, "IconV Version", d->iconv_ver_num); } if (d->libssh_version) { php_info_print_table_row(2, "libSSH Version", d->libssh_version); } #endif php_info_print_table_end(); } /* }}} */ #define REGISTER_CURL_CONSTANT(__c) REGISTER_LONG_CONSTANT(#__c, __c, CONST_CS | CONST_PERSISTENT) /* {{{ PHP_MINIT_FUNCTION */ PHP_MINIT_FUNCTION(curl) { le_curl = zend_register_list_destructors_ex(_php_curl_close, NULL, "curl", module_number); le_curl_multi_handle = zend_register_list_destructors_ex(_php_curl_multi_close, NULL, "curl_multi", module_number); le_curl_share_handle = zend_register_list_destructors_ex(_php_curl_share_close, NULL, "curl_share", module_number); REGISTER_INI_ENTRIES(); /* See http://curl.haxx.se/lxr/source/docs/libcurl/symbols-in-versions or curl src/docs/libcurl/symbols-in-versions for a (almost) complete list of options and which version they were introduced */ /* Constants for curl_setopt() */ REGISTER_CURL_CONSTANT(CURLOPT_AUTOREFERER); REGISTER_CURL_CONSTANT(CURLOPT_BINARYTRANSFER); REGISTER_CURL_CONSTANT(CURLOPT_BUFFERSIZE); REGISTER_CURL_CONSTANT(CURLOPT_CAINFO); REGISTER_CURL_CONSTANT(CURLOPT_CAPATH); REGISTER_CURL_CONSTANT(CURLOPT_CONNECTTIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_COOKIE); REGISTER_CURL_CONSTANT(CURLOPT_COOKIEFILE); REGISTER_CURL_CONSTANT(CURLOPT_COOKIEJAR); REGISTER_CURL_CONSTANT(CURLOPT_COOKIESESSION); REGISTER_CURL_CONSTANT(CURLOPT_CRLF); REGISTER_CURL_CONSTANT(CURLOPT_CUSTOMREQUEST); REGISTER_CURL_CONSTANT(CURLOPT_DNS_CACHE_TIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_DNS_USE_GLOBAL_CACHE); REGISTER_CURL_CONSTANT(CURLOPT_EGDSOCKET); REGISTER_CURL_CONSTANT(CURLOPT_ENCODING); REGISTER_CURL_CONSTANT(CURLOPT_FAILONERROR); REGISTER_CURL_CONSTANT(CURLOPT_FILE); REGISTER_CURL_CONSTANT(CURLOPT_FILETIME); REGISTER_CURL_CONSTANT(CURLOPT_FOLLOWLOCATION); REGISTER_CURL_CONSTANT(CURLOPT_FORBID_REUSE); REGISTER_CURL_CONSTANT(CURLOPT_FRESH_CONNECT); REGISTER_CURL_CONSTANT(CURLOPT_FTPAPPEND); REGISTER_CURL_CONSTANT(CURLOPT_FTPLISTONLY); REGISTER_CURL_CONSTANT(CURLOPT_FTPPORT); REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_EPRT); REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_EPSV); REGISTER_CURL_CONSTANT(CURLOPT_HEADER); REGISTER_CURL_CONSTANT(CURLOPT_HEADERFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_HTTP200ALIASES); REGISTER_CURL_CONSTANT(CURLOPT_HTTPGET); REGISTER_CURL_CONSTANT(CURLOPT_HTTPHEADER); REGISTER_CURL_CONSTANT(CURLOPT_HTTPPROXYTUNNEL); REGISTER_CURL_CONSTANT(CURLOPT_HTTP_VERSION); REGISTER_CURL_CONSTANT(CURLOPT_INFILE); REGISTER_CURL_CONSTANT(CURLOPT_INFILESIZE); REGISTER_CURL_CONSTANT(CURLOPT_INTERFACE); REGISTER_CURL_CONSTANT(CURLOPT_KRB4LEVEL); REGISTER_CURL_CONSTANT(CURLOPT_LOW_SPEED_LIMIT); REGISTER_CURL_CONSTANT(CURLOPT_LOW_SPEED_TIME); REGISTER_CURL_CONSTANT(CURLOPT_MAXCONNECTS); REGISTER_CURL_CONSTANT(CURLOPT_MAXREDIRS); REGISTER_CURL_CONSTANT(CURLOPT_NETRC); REGISTER_CURL_CONSTANT(CURLOPT_NOBODY); REGISTER_CURL_CONSTANT(CURLOPT_NOPROGRESS); REGISTER_CURL_CONSTANT(CURLOPT_NOSIGNAL); REGISTER_CURL_CONSTANT(CURLOPT_PORT); REGISTER_CURL_CONSTANT(CURLOPT_POST); REGISTER_CURL_CONSTANT(CURLOPT_POSTFIELDS); REGISTER_CURL_CONSTANT(CURLOPT_POSTQUOTE); REGISTER_CURL_CONSTANT(CURLOPT_PREQUOTE); REGISTER_CURL_CONSTANT(CURLOPT_PRIVATE); REGISTER_CURL_CONSTANT(CURLOPT_PROGRESSFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_PROXY); REGISTER_CURL_CONSTANT(CURLOPT_PROXYPORT); REGISTER_CURL_CONSTANT(CURLOPT_PROXYTYPE); REGISTER_CURL_CONSTANT(CURLOPT_PROXYUSERPWD); REGISTER_CURL_CONSTANT(CURLOPT_PUT); REGISTER_CURL_CONSTANT(CURLOPT_QUOTE); REGISTER_CURL_CONSTANT(CURLOPT_RANDOM_FILE); REGISTER_CURL_CONSTANT(CURLOPT_RANGE); REGISTER_CURL_CONSTANT(CURLOPT_READDATA); REGISTER_CURL_CONSTANT(CURLOPT_READFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_REFERER); REGISTER_CURL_CONSTANT(CURLOPT_RESUME_FROM); REGISTER_CURL_CONSTANT(CURLOPT_RETURNTRANSFER); REGISTER_CURL_CONSTANT(CURLOPT_SHARE); REGISTER_CURL_CONSTANT(CURLOPT_SSLCERT); REGISTER_CURL_CONSTANT(CURLOPT_SSLCERTPASSWD); REGISTER_CURL_CONSTANT(CURLOPT_SSLCERTTYPE); REGISTER_CURL_CONSTANT(CURLOPT_SSLENGINE); REGISTER_CURL_CONSTANT(CURLOPT_SSLENGINE_DEFAULT); REGISTER_CURL_CONSTANT(CURLOPT_SSLKEY); REGISTER_CURL_CONSTANT(CURLOPT_SSLKEYPASSWD); REGISTER_CURL_CONSTANT(CURLOPT_SSLKEYTYPE); REGISTER_CURL_CONSTANT(CURLOPT_SSLVERSION); REGISTER_CURL_CONSTANT(CURLOPT_SSL_CIPHER_LIST); REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYHOST); REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYPEER); REGISTER_CURL_CONSTANT(CURLOPT_STDERR); REGISTER_CURL_CONSTANT(CURLOPT_TELNETOPTIONS); REGISTER_CURL_CONSTANT(CURLOPT_TIMECONDITION); REGISTER_CURL_CONSTANT(CURLOPT_TIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_TIMEVALUE); REGISTER_CURL_CONSTANT(CURLOPT_TRANSFERTEXT); REGISTER_CURL_CONSTANT(CURLOPT_UNRESTRICTED_AUTH); REGISTER_CURL_CONSTANT(CURLOPT_UPLOAD); REGISTER_CURL_CONSTANT(CURLOPT_URL); REGISTER_CURL_CONSTANT(CURLOPT_USERAGENT); REGISTER_CURL_CONSTANT(CURLOPT_USERPWD); REGISTER_CURL_CONSTANT(CURLOPT_VERBOSE); REGISTER_CURL_CONSTANT(CURLOPT_WRITEFUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_WRITEHEADER); /* */ REGISTER_CURL_CONSTANT(CURLE_ABORTED_BY_CALLBACK); REGISTER_CURL_CONSTANT(CURLE_BAD_CALLING_ORDER); REGISTER_CURL_CONSTANT(CURLE_BAD_CONTENT_ENCODING); REGISTER_CURL_CONSTANT(CURLE_BAD_DOWNLOAD_RESUME); REGISTER_CURL_CONSTANT(CURLE_BAD_FUNCTION_ARGUMENT); REGISTER_CURL_CONSTANT(CURLE_BAD_PASSWORD_ENTERED); REGISTER_CURL_CONSTANT(CURLE_COULDNT_CONNECT); REGISTER_CURL_CONSTANT(CURLE_COULDNT_RESOLVE_HOST); REGISTER_CURL_CONSTANT(CURLE_COULDNT_RESOLVE_PROXY); REGISTER_CURL_CONSTANT(CURLE_FAILED_INIT); REGISTER_CURL_CONSTANT(CURLE_FILE_COULDNT_READ_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_ACCESS_DENIED); REGISTER_CURL_CONSTANT(CURLE_FTP_BAD_DOWNLOAD_RESUME); REGISTER_CURL_CONSTANT(CURLE_FTP_CANT_GET_HOST); REGISTER_CURL_CONSTANT(CURLE_FTP_CANT_RECONNECT); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_GET_SIZE); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_RETR_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_SET_ASCII); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_SET_BINARY); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_STOR_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_COULDNT_USE_REST); REGISTER_CURL_CONSTANT(CURLE_FTP_PARTIAL_FILE); REGISTER_CURL_CONSTANT(CURLE_FTP_PORT_FAILED); REGISTER_CURL_CONSTANT(CURLE_FTP_QUOTE_ERROR); REGISTER_CURL_CONSTANT(CURLE_FTP_USER_PASSWORD_INCORRECT); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_227_FORMAT); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_PASS_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_PASV_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_SERVER_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WEIRD_USER_REPLY); REGISTER_CURL_CONSTANT(CURLE_FTP_WRITE_ERROR); REGISTER_CURL_CONSTANT(CURLE_FUNCTION_NOT_FOUND); REGISTER_CURL_CONSTANT(CURLE_GOT_NOTHING); REGISTER_CURL_CONSTANT(CURLE_HTTP_NOT_FOUND); REGISTER_CURL_CONSTANT(CURLE_HTTP_PORT_FAILED); REGISTER_CURL_CONSTANT(CURLE_HTTP_POST_ERROR); REGISTER_CURL_CONSTANT(CURLE_HTTP_RANGE_ERROR); REGISTER_CURL_CONSTANT(CURLE_HTTP_RETURNED_ERROR); REGISTER_CURL_CONSTANT(CURLE_LDAP_CANNOT_BIND); REGISTER_CURL_CONSTANT(CURLE_LDAP_SEARCH_FAILED); REGISTER_CURL_CONSTANT(CURLE_LIBRARY_NOT_FOUND); REGISTER_CURL_CONSTANT(CURLE_MALFORMAT_USER); REGISTER_CURL_CONSTANT(CURLE_OBSOLETE); REGISTER_CURL_CONSTANT(CURLE_OK); REGISTER_CURL_CONSTANT(CURLE_OPERATION_TIMEDOUT); REGISTER_CURL_CONSTANT(CURLE_OPERATION_TIMEOUTED); REGISTER_CURL_CONSTANT(CURLE_OUT_OF_MEMORY); REGISTER_CURL_CONSTANT(CURLE_PARTIAL_FILE); REGISTER_CURL_CONSTANT(CURLE_READ_ERROR); REGISTER_CURL_CONSTANT(CURLE_RECV_ERROR); REGISTER_CURL_CONSTANT(CURLE_SEND_ERROR); REGISTER_CURL_CONSTANT(CURLE_SHARE_IN_USE); REGISTER_CURL_CONSTANT(CURLE_SSL_CACERT); REGISTER_CURL_CONSTANT(CURLE_SSL_CERTPROBLEM); REGISTER_CURL_CONSTANT(CURLE_SSL_CIPHER); REGISTER_CURL_CONSTANT(CURLE_SSL_CONNECT_ERROR); REGISTER_CURL_CONSTANT(CURLE_SSL_ENGINE_NOTFOUND); REGISTER_CURL_CONSTANT(CURLE_SSL_ENGINE_SETFAILED); REGISTER_CURL_CONSTANT(CURLE_SSL_PEER_CERTIFICATE); REGISTER_CURL_CONSTANT(CURLE_TELNET_OPTION_SYNTAX); REGISTER_CURL_CONSTANT(CURLE_TOO_MANY_REDIRECTS); REGISTER_CURL_CONSTANT(CURLE_UNKNOWN_TELNET_OPTION); REGISTER_CURL_CONSTANT(CURLE_UNSUPPORTED_PROTOCOL); REGISTER_CURL_CONSTANT(CURLE_URL_MALFORMAT); REGISTER_CURL_CONSTANT(CURLE_URL_MALFORMAT_USER); REGISTER_CURL_CONSTANT(CURLE_WRITE_ERROR); /* cURL info constants */ REGISTER_CURL_CONSTANT(CURLINFO_CONNECT_TIME); REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_LENGTH_DOWNLOAD); REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_LENGTH_UPLOAD); REGISTER_CURL_CONSTANT(CURLINFO_CONTENT_TYPE); REGISTER_CURL_CONSTANT(CURLINFO_EFFECTIVE_URL); REGISTER_CURL_CONSTANT(CURLINFO_FILETIME); REGISTER_CURL_CONSTANT(CURLINFO_HEADER_OUT); REGISTER_CURL_CONSTANT(CURLINFO_HEADER_SIZE); REGISTER_CURL_CONSTANT(CURLINFO_HTTP_CODE); REGISTER_CURL_CONSTANT(CURLINFO_LASTONE); REGISTER_CURL_CONSTANT(CURLINFO_NAMELOOKUP_TIME); REGISTER_CURL_CONSTANT(CURLINFO_PRETRANSFER_TIME); REGISTER_CURL_CONSTANT(CURLINFO_PRIVATE); REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_COUNT); REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_TIME); REGISTER_CURL_CONSTANT(CURLINFO_REQUEST_SIZE); REGISTER_CURL_CONSTANT(CURLINFO_SIZE_DOWNLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SIZE_UPLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SPEED_DOWNLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SPEED_UPLOAD); REGISTER_CURL_CONSTANT(CURLINFO_SSL_VERIFYRESULT); REGISTER_CURL_CONSTANT(CURLINFO_STARTTRANSFER_TIME); REGISTER_CURL_CONSTANT(CURLINFO_TOTAL_TIME); /* Other */ REGISTER_CURL_CONSTANT(CURLMSG_DONE); REGISTER_CURL_CONSTANT(CURLVERSION_NOW); /* Curl Multi Constants */ REGISTER_CURL_CONSTANT(CURLM_BAD_EASY_HANDLE); REGISTER_CURL_CONSTANT(CURLM_BAD_HANDLE); REGISTER_CURL_CONSTANT(CURLM_CALL_MULTI_PERFORM); REGISTER_CURL_CONSTANT(CURLM_INTERNAL_ERROR); REGISTER_CURL_CONSTANT(CURLM_OK); REGISTER_CURL_CONSTANT(CURLM_OUT_OF_MEMORY); #if LIBCURL_VERSION_NUM >= 0x072001 /* Available since 7.32.1 */ REGISTER_CURL_CONSTANT(CURLM_ADDED_ALREADY); #endif /* Curl proxy constants */ REGISTER_CURL_CONSTANT(CURLPROXY_HTTP); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS4); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS5); /* Curl Share constants */ REGISTER_CURL_CONSTANT(CURLSHOPT_NONE); REGISTER_CURL_CONSTANT(CURLSHOPT_SHARE); REGISTER_CURL_CONSTANT(CURLSHOPT_UNSHARE); /* Curl Http Version constants (CURLOPT_HTTP_VERSION) */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_1_0); REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_1_1); REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_NONE); /* Curl Lock constants */ REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_COOKIE); REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_DNS); REGISTER_CURL_CONSTANT(CURL_LOCK_DATA_SSL_SESSION); /* Curl NETRC constants (CURLOPT_NETRC) */ REGISTER_CURL_CONSTANT(CURL_NETRC_IGNORED); REGISTER_CURL_CONSTANT(CURL_NETRC_OPTIONAL); REGISTER_CURL_CONSTANT(CURL_NETRC_REQUIRED); /* Curl SSL Version constants (CURLOPT_SSLVERSION) */ REGISTER_CURL_CONSTANT(CURL_SSLVERSION_DEFAULT); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_SSLv2); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_SSLv3); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1); /* Curl TIMECOND constants (CURLOPT_TIMECONDITION) */ REGISTER_CURL_CONSTANT(CURL_TIMECOND_IFMODSINCE); REGISTER_CURL_CONSTANT(CURL_TIMECOND_IFUNMODSINCE); REGISTER_CURL_CONSTANT(CURL_TIMECOND_LASTMOD); REGISTER_CURL_CONSTANT(CURL_TIMECOND_NONE); /* Curl version constants */ REGISTER_CURL_CONSTANT(CURL_VERSION_IPV6); REGISTER_CURL_CONSTANT(CURL_VERSION_KERBEROS4); REGISTER_CURL_CONSTANT(CURL_VERSION_LIBZ); REGISTER_CURL_CONSTANT(CURL_VERSION_SSL); #if LIBCURL_VERSION_NUM >= 0x070a06 /* Available since 7.10.6 */ REGISTER_CURL_CONSTANT(CURLOPT_HTTPAUTH); /* http authentication options */ REGISTER_CURL_CONSTANT(CURLAUTH_ANY); REGISTER_CURL_CONSTANT(CURLAUTH_ANYSAFE); REGISTER_CURL_CONSTANT(CURLAUTH_BASIC); REGISTER_CURL_CONSTANT(CURLAUTH_DIGEST); REGISTER_CURL_CONSTANT(CURLAUTH_GSSNEGOTIATE); REGISTER_CURL_CONSTANT(CURLAUTH_NONE); REGISTER_CURL_CONSTANT(CURLAUTH_NTLM); #endif #if LIBCURL_VERSION_NUM >= 0x070a07 /* Available since 7.10.7 */ REGISTER_CURL_CONSTANT(CURLINFO_HTTP_CONNECTCODE); REGISTER_CURL_CONSTANT(CURLOPT_FTP_CREATE_MISSING_DIRS); REGISTER_CURL_CONSTANT(CURLOPT_PROXYAUTH); #endif #if LIBCURL_VERSION_NUM >= 0x070a08 /* Available since 7.10.8 */ REGISTER_CURL_CONSTANT(CURLE_FILESIZE_EXCEEDED); REGISTER_CURL_CONSTANT(CURLE_LDAP_INVALID_URL); REGISTER_CURL_CONSTANT(CURLINFO_HTTPAUTH_AVAIL); REGISTER_CURL_CONSTANT(CURLINFO_RESPONSE_CODE); REGISTER_CURL_CONSTANT(CURLINFO_PROXYAUTH_AVAIL); REGISTER_CURL_CONSTANT(CURLOPT_FTP_RESPONSE_TIMEOUT); REGISTER_CURL_CONSTANT(CURLOPT_IPRESOLVE); REGISTER_CURL_CONSTANT(CURLOPT_MAXFILESIZE); REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_V4); REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_V6); REGISTER_CURL_CONSTANT(CURL_IPRESOLVE_WHATEVER); #endif #if LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */ REGISTER_CURL_CONSTANT(CURLE_FTP_SSL_FAILED); REGISTER_CURL_CONSTANT(CURLFTPSSL_ALL); REGISTER_CURL_CONSTANT(CURLFTPSSL_CONTROL); REGISTER_CURL_CONSTANT(CURLFTPSSL_NONE); REGISTER_CURL_CONSTANT(CURLFTPSSL_TRY); REGISTER_CURL_CONSTANT(CURLOPT_FTP_SSL); REGISTER_CURL_CONSTANT(CURLOPT_NETRC_FILE); #endif #if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */ REGISTER_CURL_CONSTANT(CURLFTPAUTH_DEFAULT); REGISTER_CURL_CONSTANT(CURLFTPAUTH_SSL); REGISTER_CURL_CONSTANT(CURLFTPAUTH_TLS); REGISTER_CURL_CONSTANT(CURLOPT_FTPSSLAUTH); #endif #if LIBCURL_VERSION_NUM >= 0x070d00 /* Available since 7.13.0 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_ACCOUNT); #endif #if LIBCURL_VERSION_NUM >= 0x070b02 /* Available since 7.11.2 */ REGISTER_CURL_CONSTANT(CURLOPT_TCP_NODELAY); #endif #if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */ REGISTER_CURL_CONSTANT(CURLINFO_OS_ERRNO); #endif #if LIBCURL_VERSION_NUM >= 0x070c03 /* Available since 7.12.3 */ REGISTER_CURL_CONSTANT(CURLINFO_NUM_CONNECTS); REGISTER_CURL_CONSTANT(CURLINFO_SSL_ENGINES); #endif #if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */ REGISTER_CURL_CONSTANT(CURLINFO_COOKIELIST); REGISTER_CURL_CONSTANT(CURLOPT_COOKIELIST); REGISTER_CURL_CONSTANT(CURLOPT_IGNORE_CONTENT_LENGTH); #endif #if LIBCURL_VERSION_NUM >= 0x070f00 /* Available since 7.15.0 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_SKIP_PASV_IP); #endif #if LIBCURL_VERSION_NUM >= 0x070f01 /* Available since 7.15.1 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_FILEMETHOD); #endif #if LIBCURL_VERSION_NUM >= 0x070f02 /* Available since 7.15.2 */ REGISTER_CURL_CONSTANT(CURLOPT_CONNECT_ONLY); REGISTER_CURL_CONSTANT(CURLOPT_LOCALPORT); REGISTER_CURL_CONSTANT(CURLOPT_LOCALPORTRANGE); #endif #if LIBCURL_VERSION_NUM >= 0x070f03 /* Available since 7.15.3 */ REGISTER_CURL_CONSTANT(CURLFTPMETHOD_MULTICWD); REGISTER_CURL_CONSTANT(CURLFTPMETHOD_NOCWD); REGISTER_CURL_CONSTANT(CURLFTPMETHOD_SINGLECWD); #endif #if LIBCURL_VERSION_NUM >= 0x070f04 /* Available since 7.15.4 */ REGISTER_CURL_CONSTANT(CURLINFO_FTP_ENTRY_PATH); #endif #if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */ REGISTER_CURL_CONSTANT(CURLOPT_FTP_ALTERNATIVE_TO_USER); REGISTER_CURL_CONSTANT(CURLOPT_MAX_RECV_SPEED_LARGE); REGISTER_CURL_CONSTANT(CURLOPT_MAX_SEND_SPEED_LARGE); #endif #if LIBCURL_VERSION_NUM >= 0x071000 /* Available since 7.16.0 */ REGISTER_CURL_CONSTANT(CURLE_SSL_CACERT_BADFILE); REGISTER_CURL_CONSTANT(CURLOPT_SSL_SESSIONID_CACHE); REGISTER_CURL_CONSTANT(CURLMOPT_PIPELINING); #endif #if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */ REGISTER_CURL_CONSTANT(CURLE_SSH); REGISTER_CURL_CONSTANT(CURLOPT_FTP_SSL_CCC); REGISTER_CURL_CONSTANT(CURLOPT_SSH_AUTH_TYPES); REGISTER_CURL_CONSTANT(CURLOPT_SSH_PRIVATE_KEYFILE); REGISTER_CURL_CONSTANT(CURLOPT_SSH_PUBLIC_KEYFILE); REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_ACTIVE); REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_NONE); REGISTER_CURL_CONSTANT(CURLFTPSSL_CCC_PASSIVE); #endif #if LIBCURL_VERSION_NUM >= 0x071002 /* Available since 7.16.2 */ REGISTER_CURL_CONSTANT(CURLOPT_CONNECTTIMEOUT_MS); REGISTER_CURL_CONSTANT(CURLOPT_HTTP_CONTENT_DECODING); REGISTER_CURL_CONSTANT(CURLOPT_HTTP_TRANSFER_DECODING); REGISTER_CURL_CONSTANT(CURLOPT_TIMEOUT_MS); #endif #if LIBCURL_VERSION_NUM >= 0x071003 /* Available since 7.16.3 */ REGISTER_CURL_CONSTANT(CURLMOPT_MAXCONNECTS); #endif #if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */ REGISTER_CURL_CONSTANT(CURLOPT_KRBLEVEL); REGISTER_CURL_CONSTANT(CURLOPT_NEW_DIRECTORY_PERMS); REGISTER_CURL_CONSTANT(CURLOPT_NEW_FILE_PERMS); #endif #if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */ REGISTER_CURL_CONSTANT(CURLOPT_APPEND); REGISTER_CURL_CONSTANT(CURLOPT_DIRLISTONLY); REGISTER_CURL_CONSTANT(CURLOPT_USE_SSL); /* Curl SSL Constants */ REGISTER_CURL_CONSTANT(CURLUSESSL_ALL); REGISTER_CURL_CONSTANT(CURLUSESSL_CONTROL); REGISTER_CURL_CONSTANT(CURLUSESSL_NONE); REGISTER_CURL_CONSTANT(CURLUSESSL_TRY); #endif #if LIBCURL_VERSION_NUM >= 0x071101 /* Available since 7.17.1 */ REGISTER_CURL_CONSTANT(CURLOPT_SSH_HOST_PUBLIC_KEY_MD5); #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */ REGISTER_CURL_CONSTANT(CURLOPT_PROXY_TRANSFER_MODE); REGISTER_CURL_CONSTANT(CURLPAUSE_ALL); REGISTER_CURL_CONSTANT(CURLPAUSE_CONT); REGISTER_CURL_CONSTANT(CURLPAUSE_RECV); REGISTER_CURL_CONSTANT(CURLPAUSE_RECV_CONT); REGISTER_CURL_CONSTANT(CURLPAUSE_SEND); REGISTER_CURL_CONSTANT(CURLPAUSE_SEND_CONT); REGISTER_CURL_CONSTANT(CURL_READFUNC_PAUSE); REGISTER_CURL_CONSTANT(CURL_WRITEFUNC_PAUSE); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS4A); REGISTER_CURL_CONSTANT(CURLPROXY_SOCKS5_HOSTNAME); #endif #if LIBCURL_VERSION_NUM >= 0x071202 /* Available since 7.18.2 */ REGISTER_CURL_CONSTANT(CURLINFO_REDIRECT_URL); #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ REGISTER_CURL_CONSTANT(CURLINFO_APPCONNECT_TIME); REGISTER_CURL_CONSTANT(CURLINFO_PRIMARY_IP); REGISTER_CURL_CONSTANT(CURLOPT_ADDRESS_SCOPE); REGISTER_CURL_CONSTANT(CURLOPT_CRLFILE); REGISTER_CURL_CONSTANT(CURLOPT_ISSUERCERT); REGISTER_CURL_CONSTANT(CURLOPT_KEYPASSWD); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_ANY); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_DEFAULT); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_HOST); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_KEYBOARD); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_NONE); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_PASSWORD); REGISTER_CURL_CONSTANT(CURLSSH_AUTH_PUBLICKEY); #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ REGISTER_CURL_CONSTANT(CURLINFO_CERTINFO); REGISTER_CURL_CONSTANT(CURLOPT_CERTINFO); REGISTER_CURL_CONSTANT(CURLOPT_PASSWORD); REGISTER_CURL_CONSTANT(CURLOPT_POSTREDIR); REGISTER_CURL_CONSTANT(CURLOPT_PROXYPASSWORD); REGISTER_CURL_CONSTANT(CURLOPT_PROXYUSERNAME); REGISTER_CURL_CONSTANT(CURLOPT_USERNAME); REGISTER_CURL_CONSTANT(CURL_REDIR_POST_301); REGISTER_CURL_CONSTANT(CURL_REDIR_POST_302); REGISTER_CURL_CONSTANT(CURL_REDIR_POST_ALL); #endif #if LIBCURL_VERSION_NUM >= 0x071303 /* Available since 7.19.3 */ REGISTER_CURL_CONSTANT(CURLAUTH_DIGEST_IE); #endif #if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */ REGISTER_CURL_CONSTANT(CURLINFO_CONDITION_UNMET); REGISTER_CURL_CONSTANT(CURLOPT_NOPROXY); REGISTER_CURL_CONSTANT(CURLOPT_PROTOCOLS); REGISTER_CURL_CONSTANT(CURLOPT_REDIR_PROTOCOLS); REGISTER_CURL_CONSTANT(CURLOPT_SOCKS5_GSSAPI_NEC); REGISTER_CURL_CONSTANT(CURLOPT_SOCKS5_GSSAPI_SERVICE); REGISTER_CURL_CONSTANT(CURLOPT_TFTP_BLKSIZE); REGISTER_CURL_CONSTANT(CURLPROTO_ALL); REGISTER_CURL_CONSTANT(CURLPROTO_DICT); REGISTER_CURL_CONSTANT(CURLPROTO_FILE); REGISTER_CURL_CONSTANT(CURLPROTO_FTP); REGISTER_CURL_CONSTANT(CURLPROTO_FTPS); REGISTER_CURL_CONSTANT(CURLPROTO_HTTP); REGISTER_CURL_CONSTANT(CURLPROTO_HTTPS); REGISTER_CURL_CONSTANT(CURLPROTO_LDAP); REGISTER_CURL_CONSTANT(CURLPROTO_LDAPS); REGISTER_CURL_CONSTANT(CURLPROTO_SCP); REGISTER_CURL_CONSTANT(CURLPROTO_SFTP); REGISTER_CURL_CONSTANT(CURLPROTO_TELNET); REGISTER_CURL_CONSTANT(CURLPROTO_TFTP); REGISTER_CURL_CONSTANT(CURLPROXY_HTTP_1_0); REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR); REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR_NONE); REGISTER_CURL_CONSTANT(CURLFTP_CREATE_DIR_RETRY); #endif #if LIBCURL_VERSION_NUM >= 0x071306 /* Available since 7.19.6 */ REGISTER_CURL_CONSTANT(CURLOPT_SSH_KNOWNHOSTS); #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ REGISTER_CURL_CONSTANT(CURLINFO_RTSP_CLIENT_CSEQ); REGISTER_CURL_CONSTANT(CURLINFO_RTSP_CSEQ_RECV); REGISTER_CURL_CONSTANT(CURLINFO_RTSP_SERVER_CSEQ); REGISTER_CURL_CONSTANT(CURLINFO_RTSP_SESSION_ID); REGISTER_CURL_CONSTANT(CURLOPT_FTP_USE_PRET); REGISTER_CURL_CONSTANT(CURLOPT_MAIL_FROM); REGISTER_CURL_CONSTANT(CURLOPT_MAIL_RCPT); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_CLIENT_CSEQ); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_REQUEST); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_SERVER_CSEQ); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_SESSION_ID); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_STREAM_URI); REGISTER_CURL_CONSTANT(CURLOPT_RTSP_TRANSPORT); REGISTER_CURL_CONSTANT(CURLPROTO_IMAP); REGISTER_CURL_CONSTANT(CURLPROTO_IMAPS); REGISTER_CURL_CONSTANT(CURLPROTO_POP3); REGISTER_CURL_CONSTANT(CURLPROTO_POP3S); REGISTER_CURL_CONSTANT(CURLPROTO_RTSP); REGISTER_CURL_CONSTANT(CURLPROTO_SMTP); REGISTER_CURL_CONSTANT(CURLPROTO_SMTPS); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_ANNOUNCE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_DESCRIBE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_GET_PARAMETER); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_OPTIONS); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_PAUSE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_PLAY); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_RECEIVE); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_RECORD); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_SET_PARAMETER); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_SETUP); REGISTER_CURL_CONSTANT(CURL_RTSPREQ_TEARDOWN); #endif #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ REGISTER_CURL_CONSTANT(CURLINFO_LOCAL_IP); REGISTER_CURL_CONSTANT(CURLINFO_LOCAL_PORT); REGISTER_CURL_CONSTANT(CURLINFO_PRIMARY_PORT); REGISTER_CURL_CONSTANT(CURLOPT_FNMATCH_FUNCTION); REGISTER_CURL_CONSTANT(CURLOPT_WILDCARDMATCH); REGISTER_CURL_CONSTANT(CURLPROTO_RTMP); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPE); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPS); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPT); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPTE); REGISTER_CURL_CONSTANT(CURLPROTO_RTMPTS); REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_FAIL); REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_MATCH); REGISTER_CURL_CONSTANT(CURL_FNMATCHFUNC_NOMATCH); #endif #if LIBCURL_VERSION_NUM >= 0x071502 /* Available since 7.21.2 */ REGISTER_CURL_CONSTANT(CURLPROTO_GOPHER); #endif #if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */ REGISTER_CURL_CONSTANT(CURLAUTH_ONLY); REGISTER_CURL_CONSTANT(CURLOPT_RESOLVE); #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */ REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_PASSWORD); REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_TYPE); REGISTER_CURL_CONSTANT(CURLOPT_TLSAUTH_USERNAME); REGISTER_CURL_CONSTANT(CURL_TLSAUTH_SRP); #endif #if LIBCURL_VERSION_NUM >= 0x071506 /* Available since 7.21.6 */ REGISTER_CURL_CONSTANT(CURLOPT_ACCEPT_ENCODING); REGISTER_CURL_CONSTANT(CURLOPT_TRANSFER_ENCODING); #endif #if LIBCURL_VERSION_NUM >= 0x071600 /* Available since 7.22.0 */ REGISTER_CURL_CONSTANT(CURLAUTH_NTLM_WB); REGISTER_CURL_CONSTANT(CURLGSSAPI_DELEGATION_FLAG); REGISTER_CURL_CONSTANT(CURLGSSAPI_DELEGATION_POLICY_FLAG); REGISTER_CURL_CONSTANT(CURLOPT_GSSAPI_DELEGATION); #endif #if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */ REGISTER_CURL_CONSTANT(CURLOPT_ACCEPTTIMEOUT_MS); REGISTER_CURL_CONSTANT(CURLOPT_DNS_SERVERS); #endif #if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */ REGISTER_CURL_CONSTANT(CURLOPT_MAIL_AUTH); REGISTER_CURL_CONSTANT(CURLOPT_SSL_OPTIONS); REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPALIVE); REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPIDLE); REGISTER_CURL_CONSTANT(CURLOPT_TCP_KEEPINTVL); REGISTER_CURL_CONSTANT(CURLSSLOPT_ALLOW_BEAST); #endif #if LIBCURL_VERSION_NUM >= 0x071901 /* Available since 7.25.1 */ REGISTER_CURL_CONSTANT(CURL_REDIR_POST_303); #endif #if LIBCURL_VERSION_NUM >= 0x071c00 /* Available since 7.28.0 */ REGISTER_CURL_CONSTANT(CURLSSH_AUTH_AGENT); #endif #if LIBCURL_VERSION_NUM >= 0x071e00 /* Available since 7.30.0 */ REGISTER_CURL_CONSTANT(CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE); REGISTER_CURL_CONSTANT(CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE); REGISTER_CURL_CONSTANT(CURLMOPT_MAX_HOST_CONNECTIONS); REGISTER_CURL_CONSTANT(CURLMOPT_MAX_PIPELINE_LENGTH); REGISTER_CURL_CONSTANT(CURLMOPT_MAX_TOTAL_CONNECTIONS); #endif #if LIBCURL_VERSION_NUM >= 0x071f00 /* Available since 7.31.0 */ REGISTER_CURL_CONSTANT(CURLOPT_SASL_IR); #endif #if LIBCURL_VERSION_NUM >= 0x072100 /* Available since 7.33.0 */ REGISTER_CURL_CONSTANT(CURLOPT_DNS_INTERFACE); REGISTER_CURL_CONSTANT(CURLOPT_DNS_LOCAL_IP4); REGISTER_CURL_CONSTANT(CURLOPT_DNS_LOCAL_IP6); REGISTER_CURL_CONSTANT(CURLOPT_XOAUTH2_BEARER); REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2_0); REGISTER_CURL_CONSTANT(CURL_VERSION_HTTP2); #endif #if LIBCURL_VERSION_NUM >= 0x072200 /* Available since 7.34.0 */ REGISTER_CURL_CONSTANT(CURLOPT_LOGIN_OPTIONS); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_0); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_1); REGISTER_CURL_CONSTANT(CURL_SSLVERSION_TLSv1_2); #endif #if LIBCURL_VERSION_NUM >= 0x072400 /* Available since 7.36.0 */ REGISTER_CURL_CONSTANT(CURLOPT_EXPECT_100_TIMEOUT_MS); REGISTER_CURL_CONSTANT(CURLOPT_SSL_ENABLE_ALPN); REGISTER_CURL_CONSTANT(CURLOPT_SSL_ENABLE_NPN); #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ REGISTER_CURL_CONSTANT(CURLHEADER_SEPARATE); REGISTER_CURL_CONSTANT(CURLHEADER_UNIFIED); REGISTER_CURL_CONSTANT(CURLOPT_HEADEROPT); REGISTER_CURL_CONSTANT(CURLOPT_PROXYHEADER); #endif #if LIBCURL_VERSION_NUM >= 0x072600 /* Available since 7.38.0 */ REGISTER_CURL_CONSTANT(CURLAUTH_NEGOTIATE); #endif #if LIBCURL_VERSION_NUM >= 0x072700 /* Available since 7.39.0 */ REGISTER_CURL_CONSTANT(CURLOPT_PINNEDPUBLICKEY); #endif #if LIBCURL_VERSION_NUM >= 0x072800 /* Available since 7.40.0 */ REGISTER_CURL_CONSTANT(CURLOPT_UNIX_SOCKET_PATH); REGISTER_CURL_CONSTANT(CURLPROTO_SMB); REGISTER_CURL_CONSTANT(CURLPROTO_SMBS); #endif #if LIBCURL_VERSION_NUM >= 0x072900 /* Available since 7.41.0 */ REGISTER_CURL_CONSTANT(CURLOPT_SSL_VERIFYSTATUS); #endif #if LIBCURL_VERSION_NUM >= 0x072a00 /* Available since 7.42.0 */ REGISTER_CURL_CONSTANT(CURLOPT_PATH_AS_IS); REGISTER_CURL_CONSTANT(CURLOPT_SSL_FALSESTART); #endif #if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2); REGISTER_CURL_CONSTANT(CURLOPT_PIPEWAIT); REGISTER_CURL_CONSTANT(CURLOPT_PROXY_SERVICE_NAME); REGISTER_CURL_CONSTANT(CURLOPT_SERVICE_NAME); REGISTER_CURL_CONSTANT(CURLPIPE_NOTHING); REGISTER_CURL_CONSTANT(CURLPIPE_HTTP1); REGISTER_CURL_CONSTANT(CURLPIPE_MULTIPLEX); #endif #if LIBCURL_VERSION_NUM >= 0x072c00 /* Available since 7.44.0 */ REGISTER_CURL_CONSTANT(CURLSSLOPT_NO_REVOKE); #endif #if LIBCURL_VERSION_NUM >= 0x072d00 /* Available since 7.45.0 */ REGISTER_CURL_CONSTANT(CURLOPT_DEFAULT_PROTOCOL); #endif #if LIBCURL_VERSION_NUM >= 0x072e00 /* Available since 7.46.0 */ REGISTER_CURL_CONSTANT(CURLOPT_STREAM_WEIGHT); #endif #if LIBCURL_VERSION_NUM >= 0x072f00 /* Available since 7.47.0 */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2TLS); #endif #if LIBCURL_VERSION_NUM >= 0x073000 /* Available since 7.48.0 */ REGISTER_CURL_CONSTANT(CURLOPT_TFTP_NO_OPTIONS); #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ REGISTER_CURL_CONSTANT(CURL_HTTP_VERSION_2_PRIOR_KNOWLEDGE); REGISTER_CURL_CONSTANT(CURLOPT_CONNECT_TO); REGISTER_CURL_CONSTANT(CURLOPT_TCP_FASTOPEN); #endif #if CURLOPT_FTPASCII != 0 REGISTER_CURL_CONSTANT(CURLOPT_FTPASCII); #endif #if CURLOPT_MUTE != 0 REGISTER_CURL_CONSTANT(CURLOPT_MUTE); #endif #if CURLOPT_PASSWDFUNCTION != 0 REGISTER_CURL_CONSTANT(CURLOPT_PASSWDFUNCTION); #endif REGISTER_CURL_CONSTANT(CURLOPT_SAFE_UPLOAD); #ifdef PHP_CURL_NEED_OPENSSL_TSL if (!CRYPTO_get_id_callback()) { int i, c = CRYPTO_num_locks(); php_curl_openssl_tsl = malloc(c * sizeof(MUTEX_T)); if (!php_curl_openssl_tsl) { return FAILURE; } for (i = 0; i < c; ++i) { php_curl_openssl_tsl[i] = tsrm_mutex_alloc(); } CRYPTO_set_id_callback(php_curl_ssl_id); CRYPTO_set_locking_callback(php_curl_ssl_lock); } #endif #ifdef PHP_CURL_NEED_GNUTLS_TSL gcry_control(GCRYCTL_SET_THREAD_CBS, &php_curl_gnutls_tsl); #endif if (curl_global_init(CURL_GLOBAL_DEFAULT) != CURLE_OK) { return FAILURE; } curlfile_register_class(); return SUCCESS; } /* }}} */ /* {{{ PHP_MSHUTDOWN_FUNCTION */ PHP_MSHUTDOWN_FUNCTION(curl) { curl_global_cleanup(); #ifdef PHP_CURL_NEED_OPENSSL_TSL if (php_curl_openssl_tsl) { int i, c = CRYPTO_num_locks(); CRYPTO_set_id_callback(NULL); CRYPTO_set_locking_callback(NULL); for (i = 0; i < c; ++i) { tsrm_mutex_free(php_curl_openssl_tsl[i]); } free(php_curl_openssl_tsl); php_curl_openssl_tsl = NULL; } #endif UNREGISTER_INI_ENTRIES(); return SUCCESS; } /* }}} */ /* {{{ curl_write_nothing * Used as a work around. See _php_curl_close_ex */ static size_t curl_write_nothing(char *data, size_t size, size_t nmemb, void *ctx) { return size * nmemb; } /* }}} */ /* {{{ curl_write */ static size_t curl_write(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *) ctx; php_curl_write *t = ch->handlers->write; size_t length = size * nmemb; #if PHP_CURL_DEBUG fprintf(stderr, "curl_write() called\n"); fprintf(stderr, "data = %s, size = %d, nmemb = %d, ctx = %x\n", data, size, nmemb, ctx); #endif switch (t->method) { case PHP_CURL_STDOUT: PHPWRITE(data, length); break; case PHP_CURL_FILE: return fwrite(data, size, nmemb, t->fp); case PHP_CURL_RETURN: if (length > 0) { smart_str_appendl(&t->buf, data, (int) length); } break; case PHP_CURL_USER: { zval argv[2]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRINGL(&argv[1], data, length); fci.size = sizeof(fci); fci.function_table = EG(function_table); fci.object = NULL; ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.retval = &retval; fci.param_count = 2; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_WRITEFUNCTION"); length = -1; } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); length = zval_get_long(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); break; } } return length; } /* }}} */ #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ /* {{{ curl_fnmatch */ static int curl_fnmatch(void *ctx, const char *pattern, const char *string) { php_curl *ch = (php_curl *) ctx; php_curl_fnmatch *t = ch->handlers->fnmatch; int rval = CURL_FNMATCHFUNC_FAIL; switch (t->method) { case PHP_CURL_USER: { zval argv[3]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRING(&argv[1], pattern); ZVAL_STRING(&argv[2], string); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.object = NULL; fci.retval = &retval; fci.param_count = 3; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Cannot call the CURLOPT_FNMATCH_FUNCTION"); } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); rval = zval_get_long(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); break; } } return rval; } /* }}} */ #endif /* {{{ curl_progress */ static size_t curl_progress(void *clientp, double dltotal, double dlnow, double ultotal, double ulnow) { php_curl *ch = (php_curl *)clientp; php_curl_progress *t = ch->handlers->progress; size_t rval = 0; #if PHP_CURL_DEBUG fprintf(stderr, "curl_progress() called\n"); fprintf(stderr, "clientp = %x, dltotal = %f, dlnow = %f, ultotal = %f, ulnow = %f\n", clientp, dltotal, dlnow, ultotal, ulnow); #endif switch (t->method) { case PHP_CURL_USER: { zval argv[5]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_LONG(&argv[1], (zend_long)dltotal); ZVAL_LONG(&argv[2], (zend_long)dlnow); ZVAL_LONG(&argv[3], (zend_long)ultotal); ZVAL_LONG(&argv[4], (zend_long)ulnow); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.object = NULL; fci.retval = &retval; fci.param_count = 5; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Cannot call the CURLOPT_PROGRESSFUNCTION"); } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); if (0 != zval_get_long(&retval)) { rval = 1; } } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); zval_ptr_dtor(&argv[3]); zval_ptr_dtor(&argv[4]); break; } } return rval; } /* }}} */ /* {{{ curl_read */ static size_t curl_read(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *)ctx; php_curl_read *t = ch->handlers->read; int length = 0; switch (t->method) { case PHP_CURL_DIRECT: if (t->fp) { length = fread(data, size, nmemb, t->fp); } break; case PHP_CURL_USER: { zval argv[3]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); if (t->res) { ZVAL_RES(&argv[1], t->res); Z_ADDREF(argv[1]); } else { ZVAL_NULL(&argv[1]); } ZVAL_LONG(&argv[2], (int)size * nmemb); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.object = NULL; fci.retval = &retval; fci.param_count = 3; fci.params = argv; fci.no_separation = 0; fci.symbol_table = NULL; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Cannot call the CURLOPT_READFUNCTION"); #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ length = CURL_READFUNC_ABORT; #endif } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); if (Z_TYPE(retval) == IS_STRING) { length = MIN((int) (size * nmemb), Z_STRLEN(retval)); memcpy(data, Z_STRVAL(retval), length); } zval_ptr_dtor(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); break; } } return length; } /* }}} */ /* {{{ curl_write_header */ static size_t curl_write_header(char *data, size_t size, size_t nmemb, void *ctx) { php_curl *ch = (php_curl *) ctx; php_curl_write *t = ch->handlers->write_header; size_t length = size * nmemb; switch (t->method) { case PHP_CURL_STDOUT: /* Handle special case write when we're returning the entire transfer */ if (ch->handlers->write->method == PHP_CURL_RETURN && length > 0) { smart_str_appendl(&ch->handlers->write->buf, data, (int) length); } else { PHPWRITE(data, length); } break; case PHP_CURL_FILE: return fwrite(data, size, nmemb, t->fp); case PHP_CURL_USER: { zval argv[2]; zval retval; int error; zend_fcall_info fci; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRINGL(&argv[1], data, length); fci.size = sizeof(fci); fci.function_table = EG(function_table); ZVAL_COPY_VALUE(&fci.function_name, &t->func_name); fci.symbol_table = NULL; fci.object = NULL; fci.retval = &retval; fci.param_count = 2; fci.params = argv; fci.no_separation = 0; ch->in_callback = 1; error = zend_call_function(&fci, &t->fci_cache); ch->in_callback = 0; if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_HEADERFUNCTION"); length = -1; } else if (!Z_ISUNDEF(retval)) { _php_curl_verify_handlers(ch, 1); length = zval_get_long(&retval); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); break; } case PHP_CURL_IGNORE: return length; default: return -1; } return length; } /* }}} */ static int curl_debug(CURL *cp, curl_infotype type, char *buf, size_t buf_len, void *ctx) /* {{{ */ { php_curl *ch = (php_curl *)ctx; if (type == CURLINFO_HEADER_OUT) { if (ch->header.str) { zend_string_release(ch->header.str); } if (buf_len > 0) { ch->header.str = zend_string_init(buf, buf_len, 0); } } return 0; } /* }}} */ #if CURLOPT_PASSWDFUNCTION != 0 /* {{{ curl_passwd */ static size_t curl_passwd(void *ctx, char *prompt, char *buf, int buflen) { php_curl *ch = (php_curl *) ctx; zval *func = &ch->handlers->passwd; zval argv[3]; zval retval; int error; int ret = -1; ZVAL_RES(&argv[0], ch->res); Z_ADDREF(argv[0]); ZVAL_STRING(&argv[1], prompt); ZVAL_LONG(&argv[2], buflen); error = call_user_function(EG(function_table), NULL, func, &retval, 2, argv); if (error == FAILURE) { php_error_docref(NULL, E_WARNING, "Could not call the CURLOPT_PASSWDFUNCTION"); } else if (Z_TYPE(retval) == IS_STRING) { if (Z_STRLEN(retval) > buflen) { php_error_docref(NULL, E_WARNING, "Returned password is too long for libcurl to handle"); } else { memcpy(buf, Z_STRVAL(retval), Z_STRLEN(retval) + 1); } } else { php_error_docref(NULL, E_WARNING, "User handler '%s' did not return a string", Z_STRVAL_P(func)); } zval_ptr_dtor(&argv[0]); zval_ptr_dtor(&argv[1]); zval_ptr_dtor(&argv[2]); zval_ptr_dtor(&retval); return ret; } /* }}} */ #endif /* {{{ curl_free_string */ static void curl_free_string(void **string) { efree((char *)*string); } /* }}} */ /* {{{ curl_free_post */ static void curl_free_post(void **post) { curl_formfree((struct HttpPost *)*post); } /* }}} */ /* {{{ curl_free_slist */ static void curl_free_slist(zval *el) { curl_slist_free_all(((struct curl_slist *)Z_PTR_P(el))); } /* }}} */ /* {{{ proto array curl_version([int version]) Return cURL version information. */ PHP_FUNCTION(curl_version) { curl_version_info_data *d; zend_long uversion = CURLVERSION_NOW; if (zend_parse_parameters(ZEND_NUM_ARGS(), "|l", &uversion) == FAILURE) { return; } d = curl_version_info(uversion); if (d == NULL) { RETURN_FALSE; } array_init(return_value); CAAL("version_number", d->version_num); CAAL("age", d->age); CAAL("features", d->features); CAAL("ssl_version_number", d->ssl_version_num); CAAS("version", d->version); CAAS("host", d->host); CAAS("ssl_version", d->ssl_version); CAAS("libz_version", d->libz_version); /* Add an array of protocols */ { char **p = (char **) d->protocols; zval protocol_list; array_init(&protocol_list); while (*p != NULL) { add_next_index_string(&protocol_list, *p); p++; } CAAZ("protocols", &protocol_list); } } /* }}} */ /* {{{ alloc_curl_handle */ static php_curl *alloc_curl_handle() { php_curl *ch = ecalloc(1, sizeof(php_curl)); ch->to_free = ecalloc(1, sizeof(struct _php_curl_free)); ch->handlers = ecalloc(1, sizeof(php_curl_handlers)); ch->handlers->write = ecalloc(1, sizeof(php_curl_write)); ch->handlers->write_header = ecalloc(1, sizeof(php_curl_write)); ch->handlers->read = ecalloc(1, sizeof(php_curl_read)); ch->handlers->progress = NULL; #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ ch->handlers->fnmatch = NULL; #endif ch->clone = emalloc(sizeof(uint32_t)); *ch->clone = 1; memset(&ch->err, 0, sizeof(struct _php_curl_error)); zend_llist_init(&ch->to_free->str, sizeof(char *), (llist_dtor_func_t)curl_free_string, 0); zend_llist_init(&ch->to_free->post, sizeof(struct HttpPost *), (llist_dtor_func_t)curl_free_post, 0); ch->to_free->slist = emalloc(sizeof(HashTable)); zend_hash_init(ch->to_free->slist, 4, NULL, curl_free_slist, 0); return ch; } /* }}} */ #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ /* {{{ create_certinfo */ static void create_certinfo(struct curl_certinfo *ci, zval *listcode) { int i; if (ci) { zval certhash; for (i=0; i<ci->num_of_certs; i++) { struct curl_slist *slist; array_init(&certhash); for (slist = ci->certinfo[i]; slist; slist = slist->next) { int len; char s[64]; char *tmp; strncpy(s, slist->data, 64); tmp = memchr(s, ':', 64); if(tmp) { *tmp = '\0'; len = strlen(s); add_assoc_string(&certhash, s, &slist->data[len+1]); } else { php_error_docref(NULL, E_WARNING, "Could not extract hash key from certificate info"); } } add_next_index_zval(listcode, &certhash); } } } /* }}} */ #endif /* {{{ _php_curl_set_default_options() Set default options for a handle */ static void _php_curl_set_default_options(php_curl *ch) { char *cainfo; curl_easy_setopt(ch->cp, CURLOPT_NOPROGRESS, 1); curl_easy_setopt(ch->cp, CURLOPT_VERBOSE, 0); curl_easy_setopt(ch->cp, CURLOPT_ERRORBUFFER, ch->err.str); curl_easy_setopt(ch->cp, CURLOPT_WRITEFUNCTION, curl_write); curl_easy_setopt(ch->cp, CURLOPT_FILE, (void *) ch); curl_easy_setopt(ch->cp, CURLOPT_READFUNCTION, curl_read); curl_easy_setopt(ch->cp, CURLOPT_INFILE, (void *) ch); curl_easy_setopt(ch->cp, CURLOPT_HEADERFUNCTION, curl_write_header); curl_easy_setopt(ch->cp, CURLOPT_WRITEHEADER, (void *) ch); #if !defined(ZTS) curl_easy_setopt(ch->cp, CURLOPT_DNS_USE_GLOBAL_CACHE, 1); #endif curl_easy_setopt(ch->cp, CURLOPT_DNS_CACHE_TIMEOUT, 120); curl_easy_setopt(ch->cp, CURLOPT_MAXREDIRS, 20); /* prevent infinite redirects */ cainfo = INI_STR("openssl.cafile"); if (!(cainfo && cainfo[0] != '\0')) { cainfo = INI_STR("curl.cainfo"); } if (cainfo && cainfo[0] != '\0') { curl_easy_setopt(ch->cp, CURLOPT_CAINFO, cainfo); } #if defined(ZTS) curl_easy_setopt(ch->cp, CURLOPT_NOSIGNAL, 1); #endif } /* }}} */ /* {{{ proto resource curl_init([string url]) Initialize a cURL session */ PHP_FUNCTION(curl_init) { php_curl *ch; CURL *cp; char *url = NULL; size_t url_len = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "|s", &url, &url_len) == FAILURE) { return; } cp = curl_easy_init(); if (!cp) { php_error_docref(NULL, E_WARNING, "Could not initialize a new cURL handle"); RETURN_FALSE; } ch = alloc_curl_handle(); ch->cp = cp; ch->handlers->write->method = PHP_CURL_STDOUT; ch->handlers->read->method = PHP_CURL_DIRECT; ch->handlers->write_header->method = PHP_CURL_IGNORE; _php_curl_set_default_options(ch); if (url) { if (php_curl_option_url(ch, url, url_len) == FAILURE) { _php_curl_close_ex(ch); RETURN_FALSE; } } ZVAL_RES(return_value, zend_register_resource(ch, le_curl)); ch->res = Z_RES_P(return_value); } /* }}} */ /* {{{ proto resource curl_copy_handle(resource ch) Copy a cURL handle along with all of it's preferences */ PHP_FUNCTION(curl_copy_handle) { CURL *cp; zval *zid; php_curl *ch, *dupch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } cp = curl_easy_duphandle(ch->cp); if (!cp) { php_error_docref(NULL, E_WARNING, "Cannot duplicate cURL handle"); RETURN_FALSE; } dupch = alloc_curl_handle(); dupch->cp = cp; Z_ADDREF_P(zid); if (!Z_ISUNDEF(ch->handlers->write->stream)) { Z_ADDREF(ch->handlers->write->stream); } dupch->handlers->write->stream = ch->handlers->write->stream; dupch->handlers->write->method = ch->handlers->write->method; if (!Z_ISUNDEF(ch->handlers->read->stream)) { Z_ADDREF(ch->handlers->read->stream); } dupch->handlers->read->stream = ch->handlers->read->stream; dupch->handlers->read->method = ch->handlers->read->method; dupch->handlers->write_header->method = ch->handlers->write_header->method; if (!Z_ISUNDEF(ch->handlers->write_header->stream)) { Z_ADDREF(ch->handlers->write_header->stream); } dupch->handlers->write_header->stream = ch->handlers->write_header->stream; dupch->handlers->write->fp = ch->handlers->write->fp; dupch->handlers->write_header->fp = ch->handlers->write_header->fp; dupch->handlers->read->fp = ch->handlers->read->fp; dupch->handlers->read->res = ch->handlers->read->res; #if CURLOPT_PASSWDDATA != 0 if (!Z_ISUNDEF(ch->handlers->passwd)) { ZVAL_COPY(&dupch->handlers->passwd, &ch->handlers->passwd); curl_easy_setopt(ch->cp, CURLOPT_PASSWDDATA, (void *) dupch); } #endif if (!Z_ISUNDEF(ch->handlers->write->func_name)) { ZVAL_COPY(&dupch->handlers->write->func_name, &ch->handlers->write->func_name); } if (!Z_ISUNDEF(ch->handlers->read->func_name)) { ZVAL_COPY(&dupch->handlers->read->func_name, &ch->handlers->read->func_name); } if (!Z_ISUNDEF(ch->handlers->write_header->func_name)) { ZVAL_COPY(&dupch->handlers->write_header->func_name, &ch->handlers->write_header->func_name); } curl_easy_setopt(dupch->cp, CURLOPT_ERRORBUFFER, dupch->err.str); curl_easy_setopt(dupch->cp, CURLOPT_FILE, (void *) dupch); curl_easy_setopt(dupch->cp, CURLOPT_INFILE, (void *) dupch); curl_easy_setopt(dupch->cp, CURLOPT_WRITEHEADER, (void *) dupch); if (ch->handlers->progress) { dupch->handlers->progress = ecalloc(1, sizeof(php_curl_progress)); if (!Z_ISUNDEF(ch->handlers->progress->func_name)) { ZVAL_COPY(&dupch->handlers->progress->func_name, &ch->handlers->progress->func_name); } dupch->handlers->progress->method = ch->handlers->progress->method; curl_easy_setopt(dupch->cp, CURLOPT_PROGRESSDATA, (void *) dupch); } /* Available since 7.21.0 */ #if LIBCURL_VERSION_NUM >= 0x071500 if (ch->handlers->fnmatch) { dupch->handlers->fnmatch = ecalloc(1, sizeof(php_curl_fnmatch)); if (!Z_ISUNDEF(ch->handlers->fnmatch->func_name)) { ZVAL_COPY(&dupch->handlers->fnmatch->func_name, &ch->handlers->fnmatch->func_name); } dupch->handlers->fnmatch->method = ch->handlers->fnmatch->method; curl_easy_setopt(dupch->cp, CURLOPT_FNMATCH_DATA, (void *) dupch); } #endif efree(dupch->to_free->slist); efree(dupch->to_free); dupch->to_free = ch->to_free; efree(dupch->clone); dupch->clone = ch->clone; /* Keep track of cloned copies to avoid invoking curl destructors for every clone */ (*ch->clone)++; ZVAL_RES(return_value, zend_register_resource(dupch, le_curl)); dupch->res = Z_RES_P(return_value); } /* }}} */ static int _php_curl_setopt(php_curl *ch, zend_long option, zval *zvalue) /* {{{ */ { CURLcode error = CURLE_OK; zend_long lval; ZVAL_DEREF(zvalue); switch (option) { /* Long options */ case CURLOPT_SSL_VERIFYHOST: lval = zval_get_long(zvalue); if (lval == 1) { #if LIBCURL_VERSION_NUM <= 0x071c00 /* 7.28.0 */ php_error_docref(NULL, E_NOTICE, "CURLOPT_SSL_VERIFYHOST with value 1 is deprecated and will be removed as of libcurl 7.28.1. It is recommended to use value 2 instead"); #else php_error_docref(NULL, E_NOTICE, "CURLOPT_SSL_VERIFYHOST no longer accepts the value 1, value 2 will be used instead"); error = curl_easy_setopt(ch->cp, option, 2); break; #endif } case CURLOPT_AUTOREFERER: case CURLOPT_BUFFERSIZE: case CURLOPT_CONNECTTIMEOUT: case CURLOPT_COOKIESESSION: case CURLOPT_CRLF: case CURLOPT_DNS_CACHE_TIMEOUT: case CURLOPT_DNS_USE_GLOBAL_CACHE: case CURLOPT_FAILONERROR: case CURLOPT_FILETIME: case CURLOPT_FORBID_REUSE: case CURLOPT_FRESH_CONNECT: case CURLOPT_FTP_USE_EPRT: case CURLOPT_FTP_USE_EPSV: case CURLOPT_HEADER: case CURLOPT_HTTPGET: case CURLOPT_HTTPPROXYTUNNEL: case CURLOPT_HTTP_VERSION: case CURLOPT_INFILESIZE: case CURLOPT_LOW_SPEED_LIMIT: case CURLOPT_LOW_SPEED_TIME: case CURLOPT_MAXCONNECTS: case CURLOPT_MAXREDIRS: case CURLOPT_NETRC: case CURLOPT_NOBODY: case CURLOPT_NOPROGRESS: case CURLOPT_NOSIGNAL: case CURLOPT_PORT: case CURLOPT_POST: case CURLOPT_PROXYPORT: case CURLOPT_PROXYTYPE: case CURLOPT_PUT: case CURLOPT_RESUME_FROM: case CURLOPT_SSLVERSION: case CURLOPT_SSL_VERIFYPEER: case CURLOPT_TIMECONDITION: case CURLOPT_TIMEOUT: case CURLOPT_TIMEVALUE: case CURLOPT_TRANSFERTEXT: case CURLOPT_UNRESTRICTED_AUTH: case CURLOPT_UPLOAD: case CURLOPT_VERBOSE: #if LIBCURL_VERSION_NUM >= 0x070a06 /* Available since 7.10.6 */ case CURLOPT_HTTPAUTH: #endif #if LIBCURL_VERSION_NUM >= 0x070a07 /* Available since 7.10.7 */ case CURLOPT_FTP_CREATE_MISSING_DIRS: case CURLOPT_PROXYAUTH: #endif #if LIBCURL_VERSION_NUM >= 0x070a08 /* Available since 7.10.8 */ case CURLOPT_FTP_RESPONSE_TIMEOUT: case CURLOPT_IPRESOLVE: case CURLOPT_MAXFILESIZE: #endif #if LIBCURL_VERSION_NUM >= 0x070b02 /* Available since 7.11.2 */ case CURLOPT_TCP_NODELAY: #endif #if LIBCURL_VERSION_NUM >= 0x070c02 /* Available since 7.12.2 */ case CURLOPT_FTPSSLAUTH: #endif #if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */ case CURLOPT_IGNORE_CONTENT_LENGTH: #endif #if LIBCURL_VERSION_NUM >= 0x070f00 /* Available since 7.15.0 */ case CURLOPT_FTP_SKIP_PASV_IP: #endif #if LIBCURL_VERSION_NUM >= 0x070f01 /* Available since 7.15.1 */ case CURLOPT_FTP_FILEMETHOD: #endif #if LIBCURL_VERSION_NUM >= 0x070f02 /* Available since 7.15.2 */ case CURLOPT_CONNECT_ONLY: case CURLOPT_LOCALPORT: case CURLOPT_LOCALPORTRANGE: #endif #if LIBCURL_VERSION_NUM >= 0x071000 /* Available since 7.16.0 */ case CURLOPT_SSL_SESSIONID_CACHE: #endif #if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */ case CURLOPT_FTP_SSL_CCC: case CURLOPT_SSH_AUTH_TYPES: #endif #if LIBCURL_VERSION_NUM >= 0x071002 /* Available since 7.16.2 */ case CURLOPT_CONNECTTIMEOUT_MS: case CURLOPT_HTTP_CONTENT_DECODING: case CURLOPT_HTTP_TRANSFER_DECODING: case CURLOPT_TIMEOUT_MS: #endif #if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */ case CURLOPT_NEW_DIRECTORY_PERMS: case CURLOPT_NEW_FILE_PERMS: #endif #if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */ case CURLOPT_USE_SSL: #elif LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */ case CURLOPT_FTP_SSL: #endif #if LIBCURL_VERSION_NUM >= 0x071100 /* Available since 7.17.0 */ case CURLOPT_APPEND: case CURLOPT_DIRLISTONLY: #else case CURLOPT_FTPAPPEND: case CURLOPT_FTPLISTONLY: #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* Available since 7.18.0 */ case CURLOPT_PROXY_TRANSFER_MODE: #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ case CURLOPT_ADDRESS_SCOPE: #endif #if LIBCURL_VERSION_NUM > 0x071301 /* Available since 7.19.1 */ case CURLOPT_CERTINFO: #endif #if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */ case CURLOPT_PROTOCOLS: case CURLOPT_REDIR_PROTOCOLS: case CURLOPT_SOCKS5_GSSAPI_NEC: case CURLOPT_TFTP_BLKSIZE: #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_FTP_USE_PRET: case CURLOPT_RTSP_CLIENT_CSEQ: case CURLOPT_RTSP_REQUEST: case CURLOPT_RTSP_SERVER_CSEQ: #endif #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ case CURLOPT_WILDCARDMATCH: #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */ case CURLOPT_TLSAUTH_TYPE: #endif #if LIBCURL_VERSION_NUM >= 0x071600 /* Available since 7.22.0 */ case CURLOPT_GSSAPI_DELEGATION: #endif #if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */ case CURLOPT_ACCEPTTIMEOUT_MS: #endif #if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */ case CURLOPT_SSL_OPTIONS: case CURLOPT_TCP_KEEPALIVE: case CURLOPT_TCP_KEEPIDLE: case CURLOPT_TCP_KEEPINTVL: #endif #if LIBCURL_VERSION_NUM >= 0x071f00 /* Available since 7.31.0 */ case CURLOPT_SASL_IR: #endif #if LIBCURL_VERSION_NUM >= 0x072400 /* Available since 7.36.0 */ case CURLOPT_EXPECT_100_TIMEOUT_MS: case CURLOPT_SSL_ENABLE_ALPN: case CURLOPT_SSL_ENABLE_NPN: #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ case CURLOPT_HEADEROPT: #endif #if LIBCURL_VERSION_NUM >= 0x072900 /* Available since 7.41.0 */ case CURLOPT_SSL_VERIFYSTATUS: #endif #if LIBCURL_VERSION_NUM >= 0x072a00 /* Available since 7.42.0 */ case CURLOPT_PATH_AS_IS: case CURLOPT_SSL_FALSESTART: #endif #if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */ case CURLOPT_PIPEWAIT: #endif #if LIBCURL_VERSION_NUM >= 0x072e00 /* Available since 7.46.0 */ case CURLOPT_STREAM_WEIGHT: #endif #if LIBCURL_VERSION_NUM >= 0x073000 /* Available since 7.48.0 */ case CURLOPT_TFTP_NO_OPTIONS: #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ case CURLOPT_TCP_FASTOPEN: #endif #if CURLOPT_MUTE != 0 case CURLOPT_MUTE: #endif lval = zval_get_long(zvalue); #if LIBCURL_VERSION_NUM >= 0x71304 if ((option == CURLOPT_PROTOCOLS || option == CURLOPT_REDIR_PROTOCOLS) && (PG(open_basedir) && *PG(open_basedir)) && (lval & CURLPROTO_FILE)) { php_error_docref(NULL, E_WARNING, "CURLPROTO_FILE cannot be activated when an open_basedir is set"); return 1; } #endif # if defined(ZTS) if (option == CURLOPT_DNS_USE_GLOBAL_CACHE) { php_error_docref(NULL, E_WARNING, "CURLOPT_DNS_USE_GLOBAL_CACHE cannot be activated when thread safety is enabled"); return 1; } # endif error = curl_easy_setopt(ch->cp, option, lval); break; case CURLOPT_SAFE_UPLOAD: lval = zval_get_long(zvalue); if (lval == 0) { php_error_docref(NULL, E_WARNING, "Disabling safe uploads is no longer supported"); return FAILURE; } break; /* String options */ case CURLOPT_CAINFO: case CURLOPT_CAPATH: case CURLOPT_COOKIE: case CURLOPT_EGDSOCKET: case CURLOPT_INTERFACE: case CURLOPT_PROXY: case CURLOPT_PROXYUSERPWD: case CURLOPT_REFERER: case CURLOPT_SSLCERTTYPE: case CURLOPT_SSLENGINE: case CURLOPT_SSLENGINE_DEFAULT: case CURLOPT_SSLKEY: case CURLOPT_SSLKEYPASSWD: case CURLOPT_SSLKEYTYPE: case CURLOPT_SSL_CIPHER_LIST: case CURLOPT_USERAGENT: case CURLOPT_USERPWD: #if LIBCURL_VERSION_NUM >= 0x070e01 /* Available since 7.14.1 */ case CURLOPT_COOKIELIST: #endif #if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */ case CURLOPT_FTP_ALTERNATIVE_TO_USER: #endif #if LIBCURL_VERSION_NUM >= 0x071101 /* Available since 7.17.1 */ case CURLOPT_SSH_HOST_PUBLIC_KEY_MD5: #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ case CURLOPT_PASSWORD: case CURLOPT_PROXYPASSWORD: case CURLOPT_PROXYUSERNAME: case CURLOPT_USERNAME: #endif #if LIBCURL_VERSION_NUM >= 0x071304 /* Available since 7.19.4 */ case CURLOPT_NOPROXY: case CURLOPT_SOCKS5_GSSAPI_SERVICE: #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_MAIL_FROM: case CURLOPT_RTSP_STREAM_URI: case CURLOPT_RTSP_TRANSPORT: #endif #if LIBCURL_VERSION_NUM >= 0x071504 /* Available since 7.21.4 */ case CURLOPT_TLSAUTH_PASSWORD: case CURLOPT_TLSAUTH_USERNAME: #endif #if LIBCURL_VERSION_NUM >= 0x071506 /* Available since 7.21.6 */ case CURLOPT_ACCEPT_ENCODING: case CURLOPT_TRANSFER_ENCODING: #else case CURLOPT_ENCODING: #endif #if LIBCURL_VERSION_NUM >= 0x071800 /* Available since 7.24.0 */ case CURLOPT_DNS_SERVERS: #endif #if LIBCURL_VERSION_NUM >= 0x071900 /* Available since 7.25.0 */ case CURLOPT_MAIL_AUTH: #endif #if LIBCURL_VERSION_NUM >= 0x072200 /* Available since 7.34.0 */ case CURLOPT_LOGIN_OPTIONS: #endif #if LIBCURL_VERSION_NUM >= 0x072700 /* Available since 7.39.0 */ case CURLOPT_PINNEDPUBLICKEY: #endif #if LIBCURL_VERSION_NUM >= 0x072b00 /* Available since 7.43.0 */ case CURLOPT_PROXY_SERVICE_NAME: case CURLOPT_SERVICE_NAME: #endif #if LIBCURL_VERSION_NUM >= 0x072d00 /* Available since 7.45.0 */ case CURLOPT_DEFAULT_PROTOCOL: #endif { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 0); zend_string_release(str); return ret; } /* Curl nullable string options */ case CURLOPT_CUSTOMREQUEST: case CURLOPT_FTPPORT: case CURLOPT_RANGE: #if LIBCURL_VERSION_NUM >= 0x070d00 /* Available since 7.13.0 */ case CURLOPT_FTP_ACCOUNT: #endif #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_RTSP_SESSION_ID: #endif #if LIBCURL_VERSION_NUM >= 0x072100 /* Available since 7.33.0 */ case CURLOPT_DNS_INTERFACE: case CURLOPT_DNS_LOCAL_IP4: case CURLOPT_DNS_LOCAL_IP6: case CURLOPT_XOAUTH2_BEARER: #endif #if LIBCURL_VERSION_NUM >= 0x072800 /* Available since 7.40.0 */ case CURLOPT_UNIX_SOCKET_PATH: #endif #if LIBCURL_VERSION_NUM >= 0x071004 /* Available since 7.16.4 */ case CURLOPT_KRBLEVEL: #else case CURLOPT_KRB4LEVEL: #endif { if (Z_ISNULL_P(zvalue)) { error = curl_easy_setopt(ch->cp, option, NULL); } else { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 0); zend_string_release(str); return ret; } break; } /* Curl private option */ case CURLOPT_PRIVATE: { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 1); zend_string_release(str); return ret; } /* Curl url option */ case CURLOPT_URL: { zend_string *str = zval_get_string(zvalue); int ret = php_curl_option_url(ch, ZSTR_VAL(str), ZSTR_LEN(str)); zend_string_release(str); return ret; } /* Curl file handle options */ case CURLOPT_FILE: case CURLOPT_INFILE: case CURLOPT_STDERR: case CURLOPT_WRITEHEADER: { FILE *fp = NULL; php_stream *what = NULL; if (Z_TYPE_P(zvalue) != IS_NULL) { what = (php_stream *)zend_fetch_resource2_ex(zvalue, "File-Handle", php_file_le_stream(), php_file_le_pstream()); if (!what) { return FAILURE; } if (FAILURE == php_stream_cast(what, PHP_STREAM_AS_STDIO, (void *) &fp, REPORT_ERRORS)) { return FAILURE; } if (!fp) { return FAILURE; } } error = CURLE_OK; switch (option) { case CURLOPT_FILE: if (!what) { if (!Z_ISUNDEF(ch->handlers->write->stream)) { zval_ptr_dtor(&ch->handlers->write->stream); ZVAL_UNDEF(&ch->handlers->write->stream); } ch->handlers->write->fp = NULL; ch->handlers->write->method = PHP_CURL_STDOUT; } else if (what->mode[0] != 'r' || what->mode[1] == '+') { zval_ptr_dtor(&ch->handlers->write->stream); ch->handlers->write->fp = fp; ch->handlers->write->method = PHP_CURL_FILE; ZVAL_COPY(&ch->handlers->write->stream, zvalue); } else { php_error_docref(NULL, E_WARNING, "the provided file handle is not writable"); return FAILURE; } break; case CURLOPT_WRITEHEADER: if (!what) { if (!Z_ISUNDEF(ch->handlers->write_header->stream)) { zval_ptr_dtor(&ch->handlers->write_header->stream); ZVAL_UNDEF(&ch->handlers->write_header->stream); } ch->handlers->write_header->fp = NULL; ch->handlers->write_header->method = PHP_CURL_IGNORE; } else if (what->mode[0] != 'r' || what->mode[1] == '+') { zval_ptr_dtor(&ch->handlers->write_header->stream); ch->handlers->write_header->fp = fp; ch->handlers->write_header->method = PHP_CURL_FILE; ZVAL_COPY(&ch->handlers->write_header->stream, zvalue);; } else { php_error_docref(NULL, E_WARNING, "the provided file handle is not writable"); return FAILURE; } break; case CURLOPT_INFILE: if (!what) { if (!Z_ISUNDEF(ch->handlers->read->stream)) { zval_ptr_dtor(&ch->handlers->read->stream); ZVAL_UNDEF(&ch->handlers->read->stream); } ch->handlers->read->fp = NULL; ch->handlers->read->res = NULL; } else { zval_ptr_dtor(&ch->handlers->read->stream); ch->handlers->read->fp = fp; ch->handlers->read->res = Z_RES_P(zvalue); ZVAL_COPY(&ch->handlers->read->stream, zvalue); } break; case CURLOPT_STDERR: if (!what) { if (!Z_ISUNDEF(ch->handlers->std_err)) { zval_ptr_dtor(&ch->handlers->std_err); ZVAL_UNDEF(&ch->handlers->std_err); } } else if (what->mode[0] != 'r' || what->mode[1] == '+') { zval_ptr_dtor(&ch->handlers->std_err); ZVAL_COPY(&ch->handlers->std_err, zvalue); } else { php_error_docref(NULL, E_WARNING, "the provided file handle is not writable"); return FAILURE; } /* break omitted intentionally */ default: error = curl_easy_setopt(ch->cp, option, fp); break; } break; } /* Curl linked list options */ case CURLOPT_HTTP200ALIASES: case CURLOPT_HTTPHEADER: case CURLOPT_POSTQUOTE: case CURLOPT_PREQUOTE: case CURLOPT_QUOTE: case CURLOPT_TELNETOPTIONS: #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_MAIL_RCPT: #endif #if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */ case CURLOPT_RESOLVE: #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ case CURLOPT_PROXYHEADER: #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ case CURLOPT_CONNECT_TO: #endif { zval *current; HashTable *ph; zend_string *val; struct curl_slist *slist = NULL; ph = HASH_OF(zvalue); if (!ph) { char *name = NULL; switch (option) { case CURLOPT_HTTPHEADER: name = "CURLOPT_HTTPHEADER"; break; case CURLOPT_QUOTE: name = "CURLOPT_QUOTE"; break; case CURLOPT_HTTP200ALIASES: name = "CURLOPT_HTTP200ALIASES"; break; case CURLOPT_POSTQUOTE: name = "CURLOPT_POSTQUOTE"; break; case CURLOPT_PREQUOTE: name = "CURLOPT_PREQUOTE"; break; case CURLOPT_TELNETOPTIONS: name = "CURLOPT_TELNETOPTIONS"; break; #if LIBCURL_VERSION_NUM >= 0x071400 /* Available since 7.20.0 */ case CURLOPT_MAIL_RCPT: name = "CURLOPT_MAIL_RCPT"; break; #endif #if LIBCURL_VERSION_NUM >= 0x071503 /* Available since 7.21.3 */ case CURLOPT_RESOLVE: name = "CURLOPT_RESOLVE"; break; #endif #if LIBCURL_VERSION_NUM >= 0x072500 /* Available since 7.37.0 */ case CURLOPT_PROXYHEADER: name = "CURLOPT_PROXYHEADER"; break; #endif #if LIBCURL_VERSION_NUM >= 0x073100 /* Available since 7.49.0 */ case CURLOPT_CONNECT_TO: name = "CURLOPT_CONNECT_TO"; break; #endif } php_error_docref(NULL, E_WARNING, "You must pass either an object or an array with the %s argument", name); return FAILURE; } ZEND_HASH_FOREACH_VAL(ph, current) { ZVAL_DEREF(current); val = zval_get_string(current); slist = curl_slist_append(slist, ZSTR_VAL(val)); zend_string_release(val); if (!slist) { php_error_docref(NULL, E_WARNING, "Could not build curl_slist"); return 1; } } ZEND_HASH_FOREACH_END(); if (slist) { if ((*ch->clone) == 1) { zend_hash_index_update_ptr(ch->to_free->slist, option, slist); } else { zend_hash_next_index_insert_ptr(ch->to_free->slist, slist); } } error = curl_easy_setopt(ch->cp, option, slist); break; } case CURLOPT_BINARYTRANSFER: /* Do nothing, just backward compatibility */ break; case CURLOPT_FOLLOWLOCATION: lval = zval_get_long(zvalue); #if LIBCURL_VERSION_NUM < 0x071304 if (PG(open_basedir) && *PG(open_basedir)) { if (lval != 0) { php_error_docref(NULL, E_WARNING, "CURLOPT_FOLLOWLOCATION cannot be activated when an open_basedir is set"); return FAILURE; } } #endif error = curl_easy_setopt(ch->cp, option, lval); break; case CURLOPT_HEADERFUNCTION: if (!Z_ISUNDEF(ch->handlers->write_header->func_name)) { zval_ptr_dtor(&ch->handlers->write_header->func_name); ch->handlers->write_header->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->write_header->func_name, zvalue); ch->handlers->write_header->method = PHP_CURL_USER; break; case CURLOPT_POSTFIELDS: if (Z_TYPE_P(zvalue) == IS_ARRAY || Z_TYPE_P(zvalue) == IS_OBJECT) { zval *current; HashTable *postfields; zend_string *string_key; zend_ulong num_key; struct HttpPost *first = NULL; struct HttpPost *last = NULL; CURLFORMcode form_error; postfields = HASH_OF(zvalue); if (!postfields) { php_error_docref(NULL, E_WARNING, "Couldn't get HashTable in CURLOPT_POSTFIELDS"); return FAILURE; } ZEND_HASH_FOREACH_KEY_VAL(postfields, num_key, string_key, current) { zend_string *postval; /* Pretend we have a string_key here */ if (!string_key) { string_key = zend_long_to_str(num_key); } else { zend_string_addref(string_key); } ZVAL_DEREF(current); if (Z_TYPE_P(current) == IS_OBJECT && instanceof_function(Z_OBJCE_P(current), curl_CURLFile_class)) { /* new-style file upload */ zval *prop, rv; char *type = NULL, *filename = NULL; prop = zend_read_property(curl_CURLFile_class, current, "name", sizeof("name")-1, 0, &rv); if (Z_TYPE_P(prop) != IS_STRING) { php_error_docref(NULL, E_WARNING, "Invalid filename for key %s", ZSTR_VAL(string_key)); } else { postval = Z_STR_P(prop); if (php_check_open_basedir(ZSTR_VAL(postval))) { return 1; } prop = zend_read_property(curl_CURLFile_class, current, "mime", sizeof("mime")-1, 0, &rv); if (Z_TYPE_P(prop) == IS_STRING && Z_STRLEN_P(prop) > 0) { type = Z_STRVAL_P(prop); } prop = zend_read_property(curl_CURLFile_class, current, "postname", sizeof("postname")-1, 0, &rv); if (Z_TYPE_P(prop) == IS_STRING && Z_STRLEN_P(prop) > 0) { filename = Z_STRVAL_P(prop); } form_error = curl_formadd(&first, &last, CURLFORM_COPYNAME, ZSTR_VAL(string_key), CURLFORM_NAMELENGTH, ZSTR_LEN(string_key), CURLFORM_FILENAME, filename ? filename : ZSTR_VAL(postval), CURLFORM_CONTENTTYPE, type ? type : "application/octet-stream", CURLFORM_FILE, ZSTR_VAL(postval), CURLFORM_END); if (form_error != CURL_FORMADD_OK) { /* Not nice to convert between enums but we only have place for one error type */ error = (CURLcode)form_error; } } zend_string_release(string_key); continue; } postval = zval_get_string(current); /* The arguments after _NAMELENGTH and _CONTENTSLENGTH * must be explicitly cast to long in curl_formadd * use since curl needs a long not an int. */ form_error = curl_formadd(&first, &last, CURLFORM_COPYNAME, ZSTR_VAL(string_key), CURLFORM_NAMELENGTH, ZSTR_LEN(string_key), CURLFORM_COPYCONTENTS, ZSTR_VAL(postval), CURLFORM_CONTENTSLENGTH, ZSTR_LEN(postval), CURLFORM_END); if (form_error != CURL_FORMADD_OK) { /* Not nice to convert between enums but we only have place for one error type */ error = (CURLcode)form_error; } zend_string_release(postval); zend_string_release(string_key); } ZEND_HASH_FOREACH_END(); SAVE_CURL_ERROR(ch, error); if (error != CURLE_OK) { return FAILURE; } if ((*ch->clone) == 1) { zend_llist_clean(&ch->to_free->post); } zend_llist_add_element(&ch->to_free->post, &first); error = curl_easy_setopt(ch->cp, CURLOPT_HTTPPOST, first); } else { #if LIBCURL_VERSION_NUM >= 0x071101 zend_string *str = zval_get_string(zvalue); /* with curl 7.17.0 and later, we can use COPYPOSTFIELDS, but we have to provide size before */ error = curl_easy_setopt(ch->cp, CURLOPT_POSTFIELDSIZE, ZSTR_LEN(str)); error = curl_easy_setopt(ch->cp, CURLOPT_COPYPOSTFIELDS, ZSTR_VAL(str)); zend_string_release(str); #else char *post = NULL; zend_string *str = zval_get_string(zvalue); post = estrndup(ZSTR_VAL(str), ZSTR_LEN(str)); zend_llist_add_element(&ch->to_free->str, &post); curl_easy_setopt(ch->cp, CURLOPT_POSTFIELDS, post); error = curl_easy_setopt(ch->cp, CURLOPT_POSTFIELDSIZE, ZSTR_LEN(str)); zend_string_release(str); #endif } break; case CURLOPT_PROGRESSFUNCTION: curl_easy_setopt(ch->cp, CURLOPT_PROGRESSFUNCTION, curl_progress); curl_easy_setopt(ch->cp, CURLOPT_PROGRESSDATA, ch); if (ch->handlers->progress == NULL) { ch->handlers->progress = ecalloc(1, sizeof(php_curl_progress)); } else if (!Z_ISUNDEF(ch->handlers->progress->func_name)) { zval_ptr_dtor(&ch->handlers->progress->func_name); ch->handlers->progress->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->progress->func_name, zvalue); ch->handlers->progress->method = PHP_CURL_USER; break; case CURLOPT_READFUNCTION: if (!Z_ISUNDEF(ch->handlers->read->func_name)) { zval_ptr_dtor(&ch->handlers->read->func_name); ch->handlers->read->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->read->func_name, zvalue); ch->handlers->read->method = PHP_CURL_USER; break; case CURLOPT_RETURNTRANSFER: lval = zval_get_long(zvalue); if (lval) { ch->handlers->write->method = PHP_CURL_RETURN; } else { ch->handlers->write->method = PHP_CURL_STDOUT; } break; case CURLOPT_WRITEFUNCTION: if (!Z_ISUNDEF(ch->handlers->write->func_name)) { zval_ptr_dtor(&ch->handlers->write->func_name); ch->handlers->write->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->write->func_name, zvalue); ch->handlers->write->method = PHP_CURL_USER; break; #if LIBCURL_VERSION_NUM >= 0x070f05 /* Available since 7.15.5 */ case CURLOPT_MAX_RECV_SPEED_LARGE: case CURLOPT_MAX_SEND_SPEED_LARGE: lval = zval_get_long(zvalue); error = curl_easy_setopt(ch->cp, option, (curl_off_t)lval); break; #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ case CURLOPT_POSTREDIR: lval = zval_get_long(zvalue); error = curl_easy_setopt(ch->cp, CURLOPT_POSTREDIR, lval & CURL_REDIR_POST_ALL); break; #endif #if CURLOPT_PASSWDFUNCTION != 0 case CURLOPT_PASSWDFUNCTION: zval_ptr_dtor(&ch->handlers->passwd); ZVAL_COPY(&ch->handlers->passwd, zvalue); error = curl_easy_setopt(ch->cp, CURLOPT_PASSWDFUNCTION, curl_passwd); error = curl_easy_setopt(ch->cp, CURLOPT_PASSWDDATA, (void *) ch); break; #endif /* the following options deal with files, therefore the open_basedir check * is required. */ case CURLOPT_COOKIEFILE: case CURLOPT_COOKIEJAR: case CURLOPT_RANDOM_FILE: case CURLOPT_SSLCERT: #if LIBCURL_VERSION_NUM >= 0x070b00 /* Available since 7.11.0 */ case CURLOPT_NETRC_FILE: #endif #if LIBCURL_VERSION_NUM >= 0x071001 /* Available since 7.16.1 */ case CURLOPT_SSH_PRIVATE_KEYFILE: case CURLOPT_SSH_PUBLIC_KEYFILE: #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ case CURLOPT_CRLFILE: case CURLOPT_ISSUERCERT: #endif #if LIBCURL_VERSION_NUM >= 0x071306 /* Available since 7.19.6 */ case CURLOPT_SSH_KNOWNHOSTS: #endif { zend_string *str = zval_get_string(zvalue); int ret; if (ZSTR_LEN(str) && php_check_open_basedir(ZSTR_VAL(str))) { zend_string_release(str); return FAILURE; } ret = php_curl_option_str(ch, option, ZSTR_VAL(str), ZSTR_LEN(str), 0); zend_string_release(str); return ret; } case CURLINFO_HEADER_OUT: lval = zval_get_long(zvalue); if (lval == 1) { curl_easy_setopt(ch->cp, CURLOPT_DEBUGFUNCTION, curl_debug); curl_easy_setopt(ch->cp, CURLOPT_DEBUGDATA, (void *)ch); curl_easy_setopt(ch->cp, CURLOPT_VERBOSE, 1); } else { curl_easy_setopt(ch->cp, CURLOPT_DEBUGFUNCTION, NULL); curl_easy_setopt(ch->cp, CURLOPT_DEBUGDATA, NULL); curl_easy_setopt(ch->cp, CURLOPT_VERBOSE, 0); } break; case CURLOPT_SHARE: { php_curlsh *sh; if ((sh = (php_curlsh *)zend_fetch_resource_ex(zvalue, le_curl_share_handle_name, le_curl_share_handle))) { curl_easy_setopt(ch->cp, CURLOPT_SHARE, sh->share); } } break; #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ case CURLOPT_FNMATCH_FUNCTION: curl_easy_setopt(ch->cp, CURLOPT_FNMATCH_FUNCTION, curl_fnmatch); curl_easy_setopt(ch->cp, CURLOPT_FNMATCH_DATA, ch); if (ch->handlers->fnmatch == NULL) { ch->handlers->fnmatch = ecalloc(1, sizeof(php_curl_fnmatch)); } else if (!Z_ISUNDEF(ch->handlers->fnmatch->func_name)) { zval_ptr_dtor(&ch->handlers->fnmatch->func_name); ch->handlers->fnmatch->fci_cache = empty_fcall_info_cache; } ZVAL_COPY(&ch->handlers->fnmatch->func_name, zvalue); ch->handlers->fnmatch->method = PHP_CURL_USER; break; #endif } SAVE_CURL_ERROR(ch, error); if (error != CURLE_OK) { return FAILURE; } else { return SUCCESS; } } /* }}} */ /* {{{ proto bool curl_setopt(resource ch, int option, mixed value) Set an option for a cURL transfer */ PHP_FUNCTION(curl_setopt) { zval *zid, *zvalue; zend_long options; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rlz", &zid, &options, &zvalue) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (options <= 0 && options != CURLOPT_SAFE_UPLOAD) { php_error_docref(NULL, E_WARNING, "Invalid curl configuration option"); RETURN_FALSE; } if (_php_curl_setopt(ch, options, zvalue) == SUCCESS) { RETURN_TRUE; } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto bool curl_setopt_array(resource ch, array options) Set an array of option for a cURL transfer */ PHP_FUNCTION(curl_setopt_array) { zval *zid, *arr, *entry; php_curl *ch; zend_ulong option; zend_string *string_key; if (zend_parse_parameters(ZEND_NUM_ARGS(), "ra", &zid, &arr) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } ZEND_HASH_FOREACH_KEY_VAL(Z_ARRVAL_P(arr), option, string_key, entry) { if (string_key) { php_error_docref(NULL, E_WARNING, "Array keys must be CURLOPT constants or equivalent integer values"); RETURN_FALSE; } if (_php_curl_setopt(ch, (zend_long) option, entry) == FAILURE) { RETURN_FALSE; } } ZEND_HASH_FOREACH_END(); RETURN_TRUE; } /* }}} */ /* {{{ _php_curl_cleanup_handle(ch) Cleanup an execution phase */ void _php_curl_cleanup_handle(php_curl *ch) { smart_str_free(&ch->handlers->write->buf); if (ch->header.str) { zend_string_release(ch->header.str); ch->header.str = NULL; } memset(ch->err.str, 0, CURL_ERROR_SIZE + 1); ch->err.no = 0; } /* }}} */ /* {{{ proto bool curl_exec(resource ch) Perform a cURL session */ PHP_FUNCTION(curl_exec) { CURLcode error; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } _php_curl_verify_handlers(ch, 1); _php_curl_cleanup_handle(ch); error = curl_easy_perform(ch->cp); SAVE_CURL_ERROR(ch, error); /* CURLE_PARTIAL_FILE is returned by HEAD requests */ if (error != CURLE_OK && error != CURLE_PARTIAL_FILE) { smart_str_free(&ch->handlers->write->buf); RETURN_FALSE; } if (!Z_ISUNDEF(ch->handlers->std_err)) { php_stream *stream; stream = (php_stream*)zend_fetch_resource2_ex(&ch->handlers->std_err, NULL, php_file_le_stream(), php_file_le_pstream()); if (stream) { php_stream_flush(stream); } } if (ch->handlers->write->method == PHP_CURL_RETURN && ch->handlers->write->buf.s) { smart_str_0(&ch->handlers->write->buf); RETURN_STR_COPY(ch->handlers->write->buf.s); } /* flush the file handle, so any remaining data is synched to disk */ if (ch->handlers->write->method == PHP_CURL_FILE && ch->handlers->write->fp) { fflush(ch->handlers->write->fp); } if (ch->handlers->write_header->method == PHP_CURL_FILE && ch->handlers->write_header->fp) { fflush(ch->handlers->write_header->fp); } if (ch->handlers->write->method == PHP_CURL_RETURN) { RETURN_EMPTY_STRING(); } else { RETURN_TRUE; } } /* }}} */ /* {{{ proto mixed curl_getinfo(resource ch [, int option]) Get information regarding a specific transfer */ PHP_FUNCTION(curl_getinfo) { zval *zid; php_curl *ch; zend_long option = 0; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r|l", &zid, &option) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ZEND_NUM_ARGS() < 2) { char *s_code; /* libcurl expects long datatype. So far no cases are known where it would be an issue. Using zend_long would truncate a 64-bit var on Win64, so the exact long datatype fits everywhere, as long as there's no 32-bit int overflow. */ long l_code; double d_code; #if LIBCURL_VERSION_NUM > 0x071301 struct curl_certinfo *ci = NULL; zval listcode; #endif array_init(return_value); if (curl_easy_getinfo(ch->cp, CURLINFO_EFFECTIVE_URL, &s_code) == CURLE_OK) { CAAS("url", s_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONTENT_TYPE, &s_code) == CURLE_OK) { if (s_code != NULL) { CAAS("content_type", s_code); } else { zval retnull; ZVAL_NULL(&retnull); CAAZ("content_type", &retnull); } } if (curl_easy_getinfo(ch->cp, CURLINFO_HTTP_CODE, &l_code) == CURLE_OK) { CAAL("http_code", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_HEADER_SIZE, &l_code) == CURLE_OK) { CAAL("header_size", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_REQUEST_SIZE, &l_code) == CURLE_OK) { CAAL("request_size", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_FILETIME, &l_code) == CURLE_OK) { CAAL("filetime", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SSL_VERIFYRESULT, &l_code) == CURLE_OK) { CAAL("ssl_verify_result", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_REDIRECT_COUNT, &l_code) == CURLE_OK) { CAAL("redirect_count", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_TOTAL_TIME, &d_code) == CURLE_OK) { CAAD("total_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_NAMELOOKUP_TIME, &d_code) == CURLE_OK) { CAAD("namelookup_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONNECT_TIME, &d_code) == CURLE_OK) { CAAD("connect_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_PRETRANSFER_TIME, &d_code) == CURLE_OK) { CAAD("pretransfer_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SIZE_UPLOAD, &d_code) == CURLE_OK) { CAAD("size_upload", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SIZE_DOWNLOAD, &d_code) == CURLE_OK) { CAAD("size_download", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SPEED_DOWNLOAD, &d_code) == CURLE_OK) { CAAD("speed_download", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_SPEED_UPLOAD, &d_code) == CURLE_OK) { CAAD("speed_upload", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &d_code) == CURLE_OK) { CAAD("download_content_length", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_CONTENT_LENGTH_UPLOAD, &d_code) == CURLE_OK) { CAAD("upload_content_length", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_STARTTRANSFER_TIME, &d_code) == CURLE_OK) { CAAD("starttransfer_time", d_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_REDIRECT_TIME, &d_code) == CURLE_OK) { CAAD("redirect_time", d_code); } #if LIBCURL_VERSION_NUM >= 0x071202 /* Available since 7.18.2 */ if (curl_easy_getinfo(ch->cp, CURLINFO_REDIRECT_URL, &s_code) == CURLE_OK) { CAAS("redirect_url", s_code); } #endif #if LIBCURL_VERSION_NUM >= 0x071300 /* Available since 7.19.0 */ if (curl_easy_getinfo(ch->cp, CURLINFO_PRIMARY_IP, &s_code) == CURLE_OK) { CAAS("primary_ip", s_code); } #endif #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ if (curl_easy_getinfo(ch->cp, CURLINFO_CERTINFO, &ci) == CURLE_OK) { array_init(&listcode); create_certinfo(ci, &listcode); CAAZ("certinfo", &listcode); } #endif #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ if (curl_easy_getinfo(ch->cp, CURLINFO_PRIMARY_PORT, &l_code) == CURLE_OK) { CAAL("primary_port", l_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_LOCAL_IP, &s_code) == CURLE_OK) { CAAS("local_ip", s_code); } if (curl_easy_getinfo(ch->cp, CURLINFO_LOCAL_PORT, &l_code) == CURLE_OK) { CAAL("local_port", l_code); } #endif if (ch->header.str) { CAASTR("request_header", ch->header.str); } } else { switch (option) { case CURLINFO_HEADER_OUT: if (ch->header.str) { RETURN_STR_COPY(ch->header.str); } else { RETURN_FALSE; } #if LIBCURL_VERSION_NUM >= 0x071301 /* Available since 7.19.1 */ case CURLINFO_CERTINFO: { struct curl_certinfo *ci = NULL; array_init(return_value); if (curl_easy_getinfo(ch->cp, CURLINFO_CERTINFO, &ci) == CURLE_OK) { create_certinfo(ci, return_value); } else { RETURN_FALSE; } break; } #endif default: { int type = CURLINFO_TYPEMASK & option; switch (type) { case CURLINFO_STRING: { char *s_code = NULL; if (curl_easy_getinfo(ch->cp, option, &s_code) == CURLE_OK && s_code) { RETURN_STRING(s_code); } else { RETURN_FALSE; } break; } case CURLINFO_LONG: { zend_long code = 0; if (curl_easy_getinfo(ch->cp, option, &code) == CURLE_OK) { RETURN_LONG(code); } else { RETURN_FALSE; } break; } case CURLINFO_DOUBLE: { double code = 0.0; if (curl_easy_getinfo(ch->cp, option, &code) == CURLE_OK) { RETURN_DOUBLE(code); } else { RETURN_FALSE; } break; } #if LIBCURL_VERSION_NUM >= 0x070c03 /* Available since 7.12.3 */ case CURLINFO_SLIST: { struct curl_slist *slist; array_init(return_value); if (curl_easy_getinfo(ch->cp, option, &slist) == CURLE_OK) { while (slist) { add_next_index_string(return_value, slist->data); slist = slist->next; } curl_slist_free_all(slist); } else { RETURN_FALSE; } break; } #endif default: RETURN_FALSE; } } } } } /* }}} */ /* {{{ proto string curl_error(resource ch) Return a string contain the last error for the current session */ PHP_FUNCTION(curl_error) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } ch->err.str[CURL_ERROR_SIZE] = 0; RETURN_STRING(ch->err.str); } /* }}} */ /* {{{ proto int curl_errno(resource ch) Return an integer containing the last error number */ PHP_FUNCTION(curl_errno) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } RETURN_LONG(ch->err.no); } /* }}} */ /* {{{ proto void curl_close(resource ch) Close a cURL session */ PHP_FUNCTION(curl_close) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ch->in_callback) { php_error_docref(NULL, E_WARNING, "Attempt to close cURL handle from a callback"); return; } if (Z_REFCOUNT_P(zid) <= 2) { zend_list_close(Z_RES_P(zid)); } } /* }}} */ /* {{{ _php_curl_close_ex() List destructor for curl handles */ static void _php_curl_close_ex(php_curl *ch) { #if PHP_CURL_DEBUG fprintf(stderr, "DTOR CALLED, ch = %x\n", ch); #endif _php_curl_verify_handlers(ch, 0); /* * Libcurl is doing connection caching. When easy handle is cleaned up, * if the handle was previously used by the curl_multi_api, the connection * remains open un the curl multi handle is cleaned up. Some protocols are * sending content like the FTP one, and libcurl try to use the * WRITEFUNCTION or the HEADERFUNCTION. Since structures used in those * callback are freed, we need to use an other callback to which avoid * segfaults. * * Libcurl commit d021f2e8a00 fix this issue and should be part of 7.28.2 */ curl_easy_setopt(ch->cp, CURLOPT_HEADERFUNCTION, curl_write_nothing); curl_easy_setopt(ch->cp, CURLOPT_WRITEFUNCTION, curl_write_nothing); curl_easy_cleanup(ch->cp); /* cURL destructors should be invoked only by last curl handle */ if (--(*ch->clone) == 0) { zend_llist_clean(&ch->to_free->str); zend_llist_clean(&ch->to_free->post); zend_hash_destroy(ch->to_free->slist); efree(ch->to_free->slist); efree(ch->to_free); efree(ch->clone); } smart_str_free(&ch->handlers->write->buf); zval_ptr_dtor(&ch->handlers->write->func_name); zval_ptr_dtor(&ch->handlers->read->func_name); zval_ptr_dtor(&ch->handlers->write_header->func_name); #if CURLOPT_PASSWDFUNCTION != 0 zval_ptr_dtor(&ch->handlers->passwd); #endif zval_ptr_dtor(&ch->handlers->std_err); if (ch->header.str) { zend_string_release(ch->header.str); } zval_ptr_dtor(&ch->handlers->write_header->stream); zval_ptr_dtor(&ch->handlers->write->stream); zval_ptr_dtor(&ch->handlers->read->stream); efree(ch->handlers->write); efree(ch->handlers->write_header); efree(ch->handlers->read); if (ch->handlers->progress) { zval_ptr_dtor(&ch->handlers->progress->func_name); efree(ch->handlers->progress); } #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ if (ch->handlers->fnmatch) { zval_ptr_dtor(&ch->handlers->fnmatch->func_name); efree(ch->handlers->fnmatch); } #endif efree(ch->handlers); efree(ch); } /* }}} */ /* {{{ _php_curl_close() List destructor for curl handles */ static void _php_curl_close(zend_resource *rsrc) { php_curl *ch = (php_curl *) rsrc->ptr; _php_curl_close_ex(ch); } /* }}} */ #if LIBCURL_VERSION_NUM >= 0x070c00 /* Available since 7.12.0 */ /* {{{ proto bool curl_strerror(int code) return string describing error code */ PHP_FUNCTION(curl_strerror) { zend_long code; const char *str; if (zend_parse_parameters(ZEND_NUM_ARGS(), "l", &code) == FAILURE) { return; } str = curl_easy_strerror(code); if (str) { RETURN_STRING(str); } else { RETURN_NULL(); } } /* }}} */ #endif #if LIBCURL_VERSION_NUM >= 0x070c01 /* 7.12.1 */ /* {{{ _php_curl_reset_handlers() Reset all handlers of a given php_curl */ static void _php_curl_reset_handlers(php_curl *ch) { if (!Z_ISUNDEF(ch->handlers->write->stream)) { zval_ptr_dtor(&ch->handlers->write->stream); ZVAL_UNDEF(&ch->handlers->write->stream); } ch->handlers->write->fp = NULL; ch->handlers->write->method = PHP_CURL_STDOUT; if (!Z_ISUNDEF(ch->handlers->write_header->stream)) { zval_ptr_dtor(&ch->handlers->write_header->stream); ZVAL_UNDEF(&ch->handlers->write_header->stream); } ch->handlers->write_header->fp = NULL; ch->handlers->write_header->method = PHP_CURL_IGNORE; if (!Z_ISUNDEF(ch->handlers->read->stream)) { zval_ptr_dtor(&ch->handlers->read->stream); ZVAL_UNDEF(&ch->handlers->read->stream); } ch->handlers->read->fp = NULL; ch->handlers->read->res = NULL; ch->handlers->read->method = PHP_CURL_DIRECT; if (!Z_ISUNDEF(ch->handlers->std_err)) { zval_ptr_dtor(&ch->handlers->std_err); ZVAL_UNDEF(&ch->handlers->std_err); } if (ch->handlers->progress) { zval_ptr_dtor(&ch->handlers->progress->func_name); efree(ch->handlers->progress); ch->handlers->progress = NULL; } #if LIBCURL_VERSION_NUM >= 0x071500 /* Available since 7.21.0 */ if (ch->handlers->fnmatch) { zval_ptr_dtor(&ch->handlers->fnmatch->func_name); efree(ch->handlers->fnmatch); ch->handlers->fnmatch = NULL; } #endif } /* }}} */ /* {{{ proto void curl_reset(resource ch) Reset all options of a libcurl session handle */ PHP_FUNCTION(curl_reset) { zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "r", &zid) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ch->in_callback) { php_error_docref(NULL, E_WARNING, "Attempt to reset cURL handle from a callback"); return; } curl_easy_reset(ch->cp); _php_curl_reset_handlers(ch); _php_curl_set_default_options(ch); } /* }}} */ #endif #if LIBCURL_VERSION_NUM > 0x070f03 /* 7.15.4 */ /* {{{ proto void curl_escape(resource ch, string str) URL encodes the given string */ PHP_FUNCTION(curl_escape) { char *str = NULL, *res = NULL; size_t str_len = 0; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ZEND_SIZE_T_INT_OVFL(str_len)) { RETURN_FALSE; } if ((res = curl_easy_escape(ch->cp, str, str_len))) { RETVAL_STRING(res); curl_free(res); } else { RETURN_FALSE; } } /* }}} */ /* {{{ proto void curl_unescape(resource ch, string str) URL decodes the given string */ PHP_FUNCTION(curl_unescape) { char *str = NULL, *out = NULL; size_t str_len = 0; int out_len; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ZEND_SIZE_T_INT_OVFL(str_len)) { RETURN_FALSE; } if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) { RETVAL_STRINGL(out, out_len); curl_free(out); } else { RETURN_FALSE; } } /* }}} */ #endif #if LIBCURL_VERSION_NUM >= 0x071200 /* 7.18.0 */ /* {{{ proto void curl_pause(resource ch, int bitmask) pause and unpause a connection */ PHP_FUNCTION(curl_pause) { zend_long bitmask; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rl", &zid, &bitmask) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } RETURN_LONG(curl_easy_pause(ch->cp, bitmask)); } /* }}} */ #endif #endif /* HAVE_CURL */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: fdm=marker * vim: noet sw=4 ts=4 */
PHP_FUNCTION(curl_unescape) { char *str = NULL, *out = NULL; size_t str_len = 0; int out_len; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (str_len > INT_MAX) { RETURN_FALSE; } if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) { RETVAL_STRINGL(out, out_len); curl_free(out); } else { RETURN_FALSE; } }
PHP_FUNCTION(curl_unescape) { char *str = NULL, *out = NULL; size_t str_len = 0; int out_len; zval *zid; php_curl *ch; if (zend_parse_parameters(ZEND_NUM_ARGS(), "rs", &zid, &str, &str_len) == FAILURE) { return; } if ((ch = (php_curl*)zend_fetch_resource(Z_RES_P(zid), le_curl_name, le_curl)) == NULL) { RETURN_FALSE; } if (ZEND_SIZE_T_INT_OVFL(str_len)) { RETURN_FALSE; } if ((out = curl_easy_unescape(ch->cp, str, str_len, &out_len))) { RETVAL_STRINGL(out, out_len); curl_free(out); } else { RETURN_FALSE; } }
{'added': [(3520, '\tsize_t str_len = 0;'), (3532, '\tif (ZEND_SIZE_T_INT_OVFL(str_len)) {'), (3533, '\t\tRETURN_FALSE;'), (3534, '\t}'), (3535, ''), (3563, '\tif (ZEND_SIZE_T_INT_OVFL(str_len)) {')], 'deleted': [(3520, '\tsize_t str_len = 0;'), (3559, '\tif (str_len > INT_MAX) {')]}
6
2
2,517
15,553
23
136
5
https://github.com/php/php-src
CVE-2016-7134
CWE-119
1,207
ldo.c
C
luaD_callnoyield
/* ** $Id: ldo.c $ ** Stack and Call structure of Lua ** See Copyright Notice in lua.h */ #define ldo_c #define LUA_CORE #include "lprefix.h" #include <setjmp.h> #include <stdlib.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lopcodes.h" #include "lparser.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lundump.h" #include "lvm.h" #include "lzio.h" #define errorstatus(s) ((s) > LUA_YIELD) /* ** {====================================================== ** Error-recovery functions ** ======================================================= */ /* ** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By ** default, Lua handles errors with exceptions when compiling as ** C++ code, with _longjmp/_setjmp when asked to use them, and with ** longjmp/setjmp otherwise. */ #if !defined(LUAI_THROW) /* { */ #if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */ /* C++ exceptions */ #define LUAI_THROW(L,c) throw(c) #define LUAI_TRY(L,c,a) \ try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; } #define luai_jmpbuf int /* dummy variable */ #elif defined(LUA_USE_POSIX) /* }{ */ /* in POSIX, try _longjmp/_setjmp (more efficient) */ #define LUAI_THROW(L,c) _longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #else /* }{ */ /* ISO C handling with long jumps */ #define LUAI_THROW(L,c) longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #endif /* } */ #endif /* } */ /* chain list of long jump buffers */ struct lua_longjmp { struct lua_longjmp *previous; luai_jmpbuf b; volatile int status; /* error code */ }; void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) { switch (errcode) { case LUA_ERRMEM: { /* memory error? */ setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */ break; } case LUA_ERRERR: { setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling")); break; } case CLOSEPROTECT: { setnilvalue(s2v(oldtop)); /* no error message */ break; } default: { setobjs2s(L, oldtop, L->top - 1); /* error message on current top */ break; } } L->top = oldtop + 1; } l_noret luaD_throw (lua_State *L, int errcode) { if (L->errorJmp) { /* thread has an error handler? */ L->errorJmp->status = errcode; /* set status */ LUAI_THROW(L, L->errorJmp); /* jump to it */ } else { /* thread has no error handler */ global_State *g = G(L); errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */ L->status = cast_byte(errcode); /* mark it as dead */ if (g->mainthread->errorJmp) { /* main thread has a handler? */ setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */ luaD_throw(g->mainthread, errcode); /* re-throw in main thread */ } else { /* no handler at all; abort */ if (g->panic) { /* panic function? */ luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */ if (L->ci->top < L->top) L->ci->top = L->top; /* pushing msg. can break this invariant */ lua_unlock(L); g->panic(L); /* call panic function (last chance to jump out) */ } abort(); } } } int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) { global_State *g = G(L); l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci); struct lua_longjmp lj; lj.status = LUA_OK; lj.previous = L->errorJmp; /* chain new error handler */ L->errorJmp = &lj; LUAI_TRY(L, &lj, (*f)(L, ud); ); L->errorJmp = lj.previous; /* restore old error handler */ L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci; return lj.status; } /* }====================================================== */ /* ** {================================================================== ** Stack reallocation ** =================================================================== */ static void correctstack (lua_State *L, StkId oldstack, StkId newstack) { CallInfo *ci; UpVal *up; if (oldstack == newstack) return; /* stack address did not change */ L->top = (L->top - oldstack) + newstack; for (up = L->openupval; up != NULL; up = up->u.open.next) up->v = s2v((uplevel(up) - oldstack) + newstack); for (ci = L->ci; ci != NULL; ci = ci->previous) { ci->top = (ci->top - oldstack) + newstack; ci->func = (ci->func - oldstack) + newstack; if (isLua(ci)) ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */ } } /* some space for error handling */ #define ERRORSTACKSIZE (LUAI_MAXSTACK + 200) int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) { int lim = L->stacksize; StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue); lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE); lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK); if (unlikely(newstack == NULL)) { /* reallocation failed? */ if (raiseerror) luaM_error(L); else return 0; /* do not raise an error */ } for (; lim < newsize; lim++) setnilvalue(s2v(newstack + lim)); /* erase new segment */ correctstack(L, L->stack, newstack); L->stack = newstack; L->stacksize = newsize; L->stack_last = L->stack + newsize - EXTRA_STACK; return 1; } /* ** Try to grow the stack by at least 'n' elements. when 'raiseerror' ** is true, raises any error; otherwise, return 0 in case of errors. */ int luaD_growstack (lua_State *L, int n, int raiseerror) { int size = L->stacksize; int newsize = 2 * size; /* tentative new size */ if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */ if (raiseerror) luaD_throw(L, LUA_ERRERR); /* error inside message handler */ else return 0; } else { int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK; if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */ newsize = LUAI_MAXSTACK; if (newsize < needed) /* but must respect what was asked for */ newsize = needed; if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */ /* add extra size to be able to handle the error message */ luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror); if (raiseerror) luaG_runerror(L, "stack overflow"); else return 0; } } /* else no errors */ return luaD_reallocstack(L, newsize, raiseerror); } static int stackinuse (lua_State *L) { CallInfo *ci; StkId lim = L->top; for (ci = L->ci; ci != NULL; ci = ci->previous) { if (lim < ci->top) lim = ci->top; } lua_assert(lim <= L->stack_last); return cast_int(lim - L->stack) + 1; /* part of stack in use */ } void luaD_shrinkstack (lua_State *L) { int inuse = stackinuse(L); int goodsize = inuse + BASIC_STACK_SIZE; if (goodsize > LUAI_MAXSTACK) goodsize = LUAI_MAXSTACK; /* respect stack limit */ /* if thread is currently not handling a stack overflow and its good size is smaller than current size, shrink its stack */ if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize) luaD_reallocstack(L, goodsize, 0); /* ok if that fails */ else /* don't change stack */ condmovestack(L,{},{}); /* (change only for debugging) */ luaE_shrinkCI(L); /* shrink CI list */ } void luaD_inctop (lua_State *L) { luaD_checkstack(L, 1); L->top++; } /* }================================================================== */ /* ** Call a hook for the given event. Make sure there is a hook to be ** called. (Both 'L->hook' and 'L->hookmask', which trigger this ** function, can be changed asynchronously by signals.) */ void luaD_hook (lua_State *L, int event, int line, int ftransfer, int ntransfer) { lua_Hook hook = L->hook; if (hook && L->allowhook) { /* make sure there is a hook */ int mask = CIST_HOOKED; CallInfo *ci = L->ci; ptrdiff_t top = savestack(L, L->top); ptrdiff_t ci_top = savestack(L, ci->top); lua_Debug ar; ar.event = event; ar.currentline = line; ar.i_ci = ci; if (ntransfer != 0) { mask |= CIST_TRAN; /* 'ci' has transfer information */ ci->u2.transferinfo.ftransfer = ftransfer; ci->u2.transferinfo.ntransfer = ntransfer; } luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ if (L->top + LUA_MINSTACK > ci->top) ci->top = L->top + LUA_MINSTACK; L->allowhook = 0; /* cannot call hooks inside a hook */ ci->callstatus |= mask; lua_unlock(L); (*hook)(L, &ar); lua_lock(L); lua_assert(!L->allowhook); L->allowhook = 1; ci->top = restorestack(L, ci_top); L->top = restorestack(L, top); ci->callstatus &= ~mask; } } /* ** Executes a call hook for Lua functions. This function is called ** whenever 'hookmask' is not zero, so it checks whether call hooks are ** active. */ void luaD_hookcall (lua_State *L, CallInfo *ci) { int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL; Proto *p; if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */ return; /* don't call hook */ p = clLvalue(s2v(ci->func))->p; L->top = ci->top; /* prepare top */ ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */ luaD_hook(L, hook, -1, 1, p->numparams); ci->u.l.savedpc--; /* correct 'pc' */ } static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) { ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */ int delta = 0; if (isLuacode(ci)) { Proto *p = ci_func(ci)->p; if (p->is_vararg) delta = ci->u.l.nextraargs + p->numparams + 1; if (L->top < ci->top) L->top = ci->top; /* correct top to run hook */ } if (L->hookmask & LUA_MASKRET) { /* is return hook on? */ int ftransfer; ci->func += delta; /* if vararg, back to virtual 'func' */ ftransfer = cast(unsigned short, firstres - ci->func); luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */ ci->func -= delta; } if (isLua(ci = ci->previous)) L->oldpc = pcRel(ci->u.l.savedpc, ci_func(ci)->p); /* update 'oldpc' */ return restorestack(L, oldtop); } /* ** Check whether 'func' has a '__call' metafield. If so, put it in the ** stack, below original 'func', so that 'luaD_call' can call it. Raise ** an error if there is no '__call' metafield. */ void luaD_tryfuncTM (lua_State *L, StkId func) { const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL); StkId p; if (unlikely(ttisnil(tm))) luaG_typeerror(L, s2v(func), "call"); /* nothing to call */ for (p = L->top; p > func; p--) /* open space for metamethod */ setobjs2s(L, p, p-1); L->top++; /* stack space pre-allocated by the caller */ setobj2s(L, func, tm); /* metamethod is the new function to be called */ } /* ** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'. ** Handle most typical cases (zero results for commands, one result for ** expressions, multiple results for tail calls/single parameters) ** separated. */ static void moveresults (lua_State *L, StkId res, int nres, int wanted) { StkId firstresult; int i; switch (wanted) { /* handle typical cases separately */ case 0: /* no values needed */ L->top = res; return; case 1: /* one value needed */ if (nres == 0) /* no results? */ setnilvalue(s2v(res)); /* adjust with nil */ else setobjs2s(L, res, L->top - nres); /* move it to proper place */ L->top = res + 1; return; case LUA_MULTRET: wanted = nres; /* we want all results */ break; default: /* multiple results (or to-be-closed variables) */ if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */ ptrdiff_t savedres = savestack(L, res); luaF_close(L, res, LUA_OK); /* may change the stack */ res = restorestack(L, savedres); wanted = codeNresults(wanted); /* correct value */ if (wanted == LUA_MULTRET) wanted = nres; } break; } firstresult = L->top - nres; /* index of first result */ /* move all results to correct place */ for (i = 0; i < nres && i < wanted; i++) setobjs2s(L, res + i, firstresult + i); for (; i < wanted; i++) /* complete wanted number of results */ setnilvalue(s2v(res + i)); L->top = res + wanted; /* top points after the last result */ } /* ** Finishes a function call: calls hook if necessary, removes CallInfo, ** moves current number of results to proper place. */ void luaD_poscall (lua_State *L, CallInfo *ci, int nres) { if (L->hookmask) L->top = rethook(L, ci, L->top - nres, nres); L->ci = ci->previous; /* back to caller */ /* move results to proper place */ moveresults(L, ci->func, nres, ci->nresults); } #define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L)) /* ** Prepare a function for a tail call, building its call info on top ** of the current call info. 'narg1' is the number of arguments plus 1 ** (so that it includes the function itself). */ void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) { Proto *p = clLvalue(s2v(func))->p; int fsize = p->maxstacksize; /* frame size */ int nfixparams = p->numparams; int i; for (i = 0; i < narg1; i++) /* move down function and arguments */ setobjs2s(L, ci->func + i, func + i); checkstackGC(L, fsize); func = ci->func; /* moved-down function */ for (; narg1 <= nfixparams; narg1++) setnilvalue(s2v(func + narg1)); /* complete missing arguments */ ci->top = func + 1 + fsize; /* top for new function */ lua_assert(ci->top <= L->stack_last); ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus |= CIST_TAIL; L->top = func + narg1; /* set top */ } /* ** Call a function (C or Lua). The function to be called is at *func. ** The arguments are on the stack, right after the function. ** When returns, all the results are on the stack, starting at the original ** function position. */ void luaD_call (lua_State *L, StkId func, int nresults) { lua_CFunction f; retry: switch (ttypetag(s2v(func))) { case LUA_VCCL: /* C closure */ f = clCvalue(s2v(func))->f; goto Cfunc; case LUA_VLCF: /* light C function */ f = fvalue(s2v(func)); Cfunc: { int n; /* number of returns */ CallInfo *ci; checkstackGCp(L, LUA_MINSTACK, func); /* ensure minimum stack size */ L->ci = ci = next_ci(L); ci->nresults = nresults; ci->callstatus = CIST_C; ci->top = L->top + LUA_MINSTACK; ci->func = func; lua_assert(ci->top <= L->stack_last); if (L->hookmask & LUA_MASKCALL) { int narg = cast_int(L->top - func) - 1; luaD_hook(L, LUA_HOOKCALL, -1, 1, narg); } lua_unlock(L); n = (*f)(L); /* do the actual call */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); break; } case LUA_VLCL: { /* Lua function */ CallInfo *ci; Proto *p = clLvalue(s2v(func))->p; int narg = cast_int(L->top - func) - 1; /* number of real arguments */ int nfixparams = p->numparams; int fsize = p->maxstacksize; /* frame size */ checkstackGCp(L, fsize, func); L->ci = ci = next_ci(L); ci->nresults = nresults; ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus = 0; ci->top = func + 1 + fsize; ci->func = func; L->ci = ci; for (; narg < nfixparams; narg++) setnilvalue(s2v(L->top++)); /* complete missing arguments */ lua_assert(ci->top <= L->stack_last); luaV_execute(L, ci); /* run the function */ break; } default: { /* not a function */ checkstackGCp(L, 1, func); /* space for metamethod */ luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */ goto retry; /* try again with metamethod */ } } } /* ** Similar to 'luaD_call', but does not allow yields during the call. ** If there is a stack overflow, freeing all CI structures will ** force the subsequent call to invoke 'luaE_extendCI', which then ** will raise any errors. */ void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */ luaE_freeCI(L); luaD_call(L, func, nResults); decXCcalls(L); } /* ** Completes the execution of an interrupted C function, calling its ** continuation function. */ static void finishCcall (lua_State *L, int status) { CallInfo *ci = L->ci; int n; /* must have a continuation and must be able to call it */ lua_assert(ci->u.c.k != NULL && yieldable(L)); /* error status can only happen in a protected call */ lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD); if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */ ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */ L->errfunc = ci->u.c.old_errfunc; /* with the same error function */ } /* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already handled */ adjustresults(L, ci->nresults); lua_unlock(L); n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } /* ** Executes "full continuation" (everything in the stack) of a ** previously interrupted coroutine until the stack is empty (or another ** interruption long-jumps out of the loop). If the coroutine is ** recovering from an error, 'ud' points to the error status, which must ** be passed to the first continuation function (otherwise the default ** status is LUA_YIELD). */ static void unroll (lua_State *L, void *ud) { CallInfo *ci; if (ud != NULL) /* error status? */ finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */ while ((ci = L->ci) != &L->base_ci) { /* something in the stack */ if (!isLua(ci)) /* C function? */ finishCcall(L, LUA_YIELD); /* complete its execution */ else { /* Lua function */ luaV_finishOp(L); /* finish interrupted instruction */ luaV_execute(L, ci); /* execute down to higher C 'boundary' */ } } } /* ** Try to find a suspended protected call (a "recover point") for the ** given thread. */ static CallInfo *findpcall (lua_State *L) { CallInfo *ci; for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */ if (ci->callstatus & CIST_YPCALL) return ci; } return NULL; /* no pending pcall */ } /* ** Recovers from an error in a coroutine. Finds a recover point (if ** there is one) and completes the execution of the interrupted ** 'luaD_pcall'. If there is no recover point, returns zero. */ static int recover (lua_State *L, int status) { StkId oldtop; CallInfo *ci = findpcall(L); if (ci == NULL) return 0; /* no recovery point */ /* "finish" luaD_pcall */ oldtop = restorestack(L, ci->u2.funcidx); luaF_close(L, oldtop, status); /* may change the stack */ oldtop = restorestack(L, ci->u2.funcidx); luaD_seterrorobj(L, status, oldtop); L->ci = ci; L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */ luaD_shrinkstack(L); L->errfunc = ci->u.c.old_errfunc; return 1; /* continue running the coroutine */ } /* ** Signal an error in the call to 'lua_resume', not in the execution ** of the coroutine itself. (Such errors should not be handled by any ** coroutine error handler and should not kill the coroutine.) */ static int resume_error (lua_State *L, const char *msg, int narg) { L->top -= narg; /* remove args from the stack */ setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */ api_incr_top(L); lua_unlock(L); return LUA_ERRRUN; } /* ** Do the work for 'lua_resume' in protected mode. Most of the work ** depends on the status of the coroutine: initial state, suspended ** inside a hook, or regularly suspended (optionally with a continuation ** function), plus erroneous cases: non-suspended coroutine or dead ** coroutine. */ static void resume (lua_State *L, void *ud) { int n = *(cast(int*, ud)); /* number of arguments */ StkId firstArg = L->top - n; /* first argument */ CallInfo *ci = L->ci; if (L->status == LUA_OK) { /* starting a coroutine? */ luaD_call(L, firstArg - 1, LUA_MULTRET); } else { /* resuming from previous yield */ lua_assert(L->status == LUA_YIELD); L->status = LUA_OK; /* mark that it is running (again) */ if (isLua(ci)) /* yielded inside a hook? */ luaV_execute(L, ci); /* just continue running Lua code */ else { /* 'common' yield */ if (ci->u.c.k != NULL) { /* does it have a continuation function? */ lua_unlock(L); n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */ lua_lock(L); api_checknelems(L, n); } luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } unroll(L, NULL); /* run continuation */ } } LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs, int *nresults) { int status; lua_lock(L); if (L->status == LUA_OK) { /* may be starting a coroutine */ if (L->ci != &L->base_ci) /* not in base level? */ return resume_error(L, "cannot resume non-suspended coroutine", nargs); else if (L->top - (L->ci->func + 1) == nargs) /* no function? */ return resume_error(L, "cannot resume dead coroutine", nargs); } else if (L->status != LUA_YIELD) /* ended with errors? */ return resume_error(L, "cannot resume dead coroutine", nargs); if (from == NULL) L->nCcalls = CSTACKTHREAD; else /* correct 'nCcalls' for this thread */ L->nCcalls = getCcalls(from) - L->nci - CSTACKCF; if (L->nCcalls <= CSTACKERR) return resume_error(L, "C stack overflow", nargs); luai_userstateresume(L, nargs); api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs); status = luaD_rawrunprotected(L, resume, &nargs); /* continue running after recoverable errors */ while (errorstatus(status) && recover(L, status)) { /* unroll continuation */ status = luaD_rawrunprotected(L, unroll, &status); } if (likely(!errorstatus(status))) lua_assert(status == L->status); /* normal end or yield */ else { /* unrecoverable error */ L->status = cast_byte(status); /* mark thread as 'dead' */ luaD_seterrorobj(L, status, L->top); /* push error message */ L->ci->top = L->top; } *nresults = (status == LUA_YIELD) ? L->ci->u2.nyield : cast_int(L->top - (L->ci->func + 1)); lua_unlock(L); return status; } LUA_API int lua_isyieldable (lua_State *L) { return yieldable(L); } LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx, lua_KFunction k) { CallInfo *ci; luai_userstateyield(L, nresults); lua_lock(L); ci = L->ci; api_checknelems(L, nresults); if (unlikely(!yieldable(L))) { if (L != G(L)->mainthread) luaG_runerror(L, "attempt to yield across a C-call boundary"); else luaG_runerror(L, "attempt to yield from outside a coroutine"); } L->status = LUA_YIELD; if (isLua(ci)) { /* inside a hook? */ lua_assert(!isLuacode(ci)); api_check(L, k == NULL, "hooks cannot continue after yielding"); ci->u2.nyield = 0; /* no results */ } else { if ((ci->u.c.k = k) != NULL) /* is there a continuation? */ ci->u.c.ctx = ctx; /* save context */ ci->u2.nyield = nresults; /* save number of results */ luaD_throw(L, LUA_YIELD); } lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */ lua_unlock(L); return 0; /* return to 'luaD_hook' */ } /* ** Call the C function 'func' in protected mode, restoring basic ** thread information ('allowhook', etc.) and in particular ** its stack level in case of errors. */ int luaD_pcall (lua_State *L, Pfunc func, void *u, ptrdiff_t old_top, ptrdiff_t ef) { int status; CallInfo *old_ci = L->ci; lu_byte old_allowhooks = L->allowhook; ptrdiff_t old_errfunc = L->errfunc; L->errfunc = ef; status = luaD_rawrunprotected(L, func, u); if (unlikely(status != LUA_OK)) { /* an error occurred? */ StkId oldtop = restorestack(L, old_top); L->ci = old_ci; L->allowhook = old_allowhooks; status = luaF_close(L, oldtop, status); oldtop = restorestack(L, old_top); /* previous call may change stack */ luaD_seterrorobj(L, status, oldtop); luaD_shrinkstack(L); } L->errfunc = old_errfunc; return status; } /* ** Execute a protected parser. */ struct SParser { /* data to 'f_parser' */ ZIO *z; Mbuffer buff; /* dynamic structure used by the scanner */ Dyndata dyd; /* dynamic structures used by the parser */ const char *mode; const char *name; }; static void checkmode (lua_State *L, const char *mode, const char *x) { if (mode && strchr(mode, x[0]) == NULL) { luaO_pushfstring(L, "attempt to load a %s chunk (mode is '%s')", x, mode); luaD_throw(L, LUA_ERRSYNTAX); } } static void f_parser (lua_State *L, void *ud) { LClosure *cl; struct SParser *p = cast(struct SParser *, ud); int c = zgetc(p->z); /* read first character */ if (c == LUA_SIGNATURE[0]) { checkmode(L, p->mode, "binary"); cl = luaU_undump(L, p->z, p->name); } else { checkmode(L, p->mode, "text"); cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c); } lua_assert(cl->nupvalues == cl->p->sizeupvalues); luaF_initupvals(L, cl); } int luaD_protectedparser (lua_State *L, ZIO *z, const char *name, const char *mode) { struct SParser p; int status; incnny(L); /* cannot yield during parsing */ p.z = z; p.name = name; p.mode = mode; p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0; p.dyd.gt.arr = NULL; p.dyd.gt.size = 0; p.dyd.label.arr = NULL; p.dyd.label.size = 0; luaZ_initbuffer(L, &p.buff); status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc); luaZ_freebuffer(L, &p.buff); luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size); luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size); luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size); decnny(L); return status; }
/* ** $Id: ldo.c $ ** Stack and Call structure of Lua ** See Copyright Notice in lua.h */ #define ldo_c #define LUA_CORE #include "lprefix.h" #include <setjmp.h> #include <stdlib.h> #include <string.h> #include "lua.h" #include "lapi.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lopcodes.h" #include "lparser.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" #include "lundump.h" #include "lvm.h" #include "lzio.h" #define errorstatus(s) ((s) > LUA_YIELD) /* ** {====================================================== ** Error-recovery functions ** ======================================================= */ /* ** LUAI_THROW/LUAI_TRY define how Lua does exception handling. By ** default, Lua handles errors with exceptions when compiling as ** C++ code, with _longjmp/_setjmp when asked to use them, and with ** longjmp/setjmp otherwise. */ #if !defined(LUAI_THROW) /* { */ #if defined(__cplusplus) && !defined(LUA_USE_LONGJMP) /* { */ /* C++ exceptions */ #define LUAI_THROW(L,c) throw(c) #define LUAI_TRY(L,c,a) \ try { a } catch(...) { if ((c)->status == 0) (c)->status = -1; } #define luai_jmpbuf int /* dummy variable */ #elif defined(LUA_USE_POSIX) /* }{ */ /* in POSIX, try _longjmp/_setjmp (more efficient) */ #define LUAI_THROW(L,c) _longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (_setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #else /* }{ */ /* ISO C handling with long jumps */ #define LUAI_THROW(L,c) longjmp((c)->b, 1) #define LUAI_TRY(L,c,a) if (setjmp((c)->b) == 0) { a } #define luai_jmpbuf jmp_buf #endif /* } */ #endif /* } */ /* chain list of long jump buffers */ struct lua_longjmp { struct lua_longjmp *previous; luai_jmpbuf b; volatile int status; /* error code */ }; void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) { switch (errcode) { case LUA_ERRMEM: { /* memory error? */ setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */ break; } case LUA_ERRERR: { setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling")); break; } case CLOSEPROTECT: { setnilvalue(s2v(oldtop)); /* no error message */ break; } default: { setobjs2s(L, oldtop, L->top - 1); /* error message on current top */ break; } } L->top = oldtop + 1; } l_noret luaD_throw (lua_State *L, int errcode) { if (L->errorJmp) { /* thread has an error handler? */ L->errorJmp->status = errcode; /* set status */ LUAI_THROW(L, L->errorJmp); /* jump to it */ } else { /* thread has no error handler */ global_State *g = G(L); errcode = luaF_close(L, L->stack, errcode); /* close all upvalues */ L->status = cast_byte(errcode); /* mark it as dead */ if (g->mainthread->errorJmp) { /* main thread has a handler? */ setobjs2s(L, g->mainthread->top++, L->top - 1); /* copy error obj. */ luaD_throw(g->mainthread, errcode); /* re-throw in main thread */ } else { /* no handler at all; abort */ if (g->panic) { /* panic function? */ luaD_seterrorobj(L, errcode, L->top); /* assume EXTRA_STACK */ if (L->ci->top < L->top) L->ci->top = L->top; /* pushing msg. can break this invariant */ lua_unlock(L); g->panic(L); /* call panic function (last chance to jump out) */ } abort(); } } } int luaD_rawrunprotected (lua_State *L, Pfunc f, void *ud) { global_State *g = G(L); l_uint32 oldnCcalls = g->Cstacklimit - (L->nCcalls + L->nci); struct lua_longjmp lj; lj.status = LUA_OK; lj.previous = L->errorJmp; /* chain new error handler */ L->errorJmp = &lj; LUAI_TRY(L, &lj, (*f)(L, ud); ); L->errorJmp = lj.previous; /* restore old error handler */ L->nCcalls = g->Cstacklimit - oldnCcalls - L->nci; return lj.status; } /* }====================================================== */ /* ** {================================================================== ** Stack reallocation ** =================================================================== */ static void correctstack (lua_State *L, StkId oldstack, StkId newstack) { CallInfo *ci; UpVal *up; if (oldstack == newstack) return; /* stack address did not change */ L->top = (L->top - oldstack) + newstack; for (up = L->openupval; up != NULL; up = up->u.open.next) up->v = s2v((uplevel(up) - oldstack) + newstack); for (ci = L->ci; ci != NULL; ci = ci->previous) { ci->top = (ci->top - oldstack) + newstack; ci->func = (ci->func - oldstack) + newstack; if (isLua(ci)) ci->u.l.trap = 1; /* signal to update 'trap' in 'luaV_execute' */ } } /* some space for error handling */ #define ERRORSTACKSIZE (LUAI_MAXSTACK + 200) int luaD_reallocstack (lua_State *L, int newsize, int raiseerror) { int lim = L->stacksize; StkId newstack = luaM_reallocvector(L, L->stack, lim, newsize, StackValue); lua_assert(newsize <= LUAI_MAXSTACK || newsize == ERRORSTACKSIZE); lua_assert(L->stack_last - L->stack == L->stacksize - EXTRA_STACK); if (unlikely(newstack == NULL)) { /* reallocation failed? */ if (raiseerror) luaM_error(L); else return 0; /* do not raise an error */ } for (; lim < newsize; lim++) setnilvalue(s2v(newstack + lim)); /* erase new segment */ correctstack(L, L->stack, newstack); L->stack = newstack; L->stacksize = newsize; L->stack_last = L->stack + newsize - EXTRA_STACK; return 1; } /* ** Try to grow the stack by at least 'n' elements. when 'raiseerror' ** is true, raises any error; otherwise, return 0 in case of errors. */ int luaD_growstack (lua_State *L, int n, int raiseerror) { int size = L->stacksize; int newsize = 2 * size; /* tentative new size */ if (unlikely(size > LUAI_MAXSTACK)) { /* need more space after extra size? */ if (raiseerror) luaD_throw(L, LUA_ERRERR); /* error inside message handler */ else return 0; } else { int needed = cast_int(L->top - L->stack) + n + EXTRA_STACK; if (newsize > LUAI_MAXSTACK) /* cannot cross the limit */ newsize = LUAI_MAXSTACK; if (newsize < needed) /* but must respect what was asked for */ newsize = needed; if (unlikely(newsize > LUAI_MAXSTACK)) { /* stack overflow? */ /* add extra size to be able to handle the error message */ luaD_reallocstack(L, ERRORSTACKSIZE, raiseerror); if (raiseerror) luaG_runerror(L, "stack overflow"); else return 0; } } /* else no errors */ return luaD_reallocstack(L, newsize, raiseerror); } static int stackinuse (lua_State *L) { CallInfo *ci; StkId lim = L->top; for (ci = L->ci; ci != NULL; ci = ci->previous) { if (lim < ci->top) lim = ci->top; } lua_assert(lim <= L->stack_last); return cast_int(lim - L->stack) + 1; /* part of stack in use */ } void luaD_shrinkstack (lua_State *L) { int inuse = stackinuse(L); int goodsize = inuse + BASIC_STACK_SIZE; if (goodsize > LUAI_MAXSTACK) goodsize = LUAI_MAXSTACK; /* respect stack limit */ /* if thread is currently not handling a stack overflow and its good size is smaller than current size, shrink its stack */ if (inuse <= (LUAI_MAXSTACK - EXTRA_STACK) && goodsize < L->stacksize) luaD_reallocstack(L, goodsize, 0); /* ok if that fails */ else /* don't change stack */ condmovestack(L,{},{}); /* (change only for debugging) */ luaE_shrinkCI(L); /* shrink CI list */ } void luaD_inctop (lua_State *L) { luaD_checkstack(L, 1); L->top++; } /* }================================================================== */ /* ** Call a hook for the given event. Make sure there is a hook to be ** called. (Both 'L->hook' and 'L->hookmask', which trigger this ** function, can be changed asynchronously by signals.) */ void luaD_hook (lua_State *L, int event, int line, int ftransfer, int ntransfer) { lua_Hook hook = L->hook; if (hook && L->allowhook) { /* make sure there is a hook */ int mask = CIST_HOOKED; CallInfo *ci = L->ci; ptrdiff_t top = savestack(L, L->top); ptrdiff_t ci_top = savestack(L, ci->top); lua_Debug ar; ar.event = event; ar.currentline = line; ar.i_ci = ci; if (ntransfer != 0) { mask |= CIST_TRAN; /* 'ci' has transfer information */ ci->u2.transferinfo.ftransfer = ftransfer; ci->u2.transferinfo.ntransfer = ntransfer; } luaD_checkstack(L, LUA_MINSTACK); /* ensure minimum stack size */ if (L->top + LUA_MINSTACK > ci->top) ci->top = L->top + LUA_MINSTACK; L->allowhook = 0; /* cannot call hooks inside a hook */ ci->callstatus |= mask; lua_unlock(L); (*hook)(L, &ar); lua_lock(L); lua_assert(!L->allowhook); L->allowhook = 1; ci->top = restorestack(L, ci_top); L->top = restorestack(L, top); ci->callstatus &= ~mask; } } /* ** Executes a call hook for Lua functions. This function is called ** whenever 'hookmask' is not zero, so it checks whether call hooks are ** active. */ void luaD_hookcall (lua_State *L, CallInfo *ci) { int hook = (ci->callstatus & CIST_TAIL) ? LUA_HOOKTAILCALL : LUA_HOOKCALL; Proto *p; if (!(L->hookmask & LUA_MASKCALL)) /* some other hook? */ return; /* don't call hook */ p = clLvalue(s2v(ci->func))->p; L->top = ci->top; /* prepare top */ ci->u.l.savedpc++; /* hooks assume 'pc' is already incremented */ luaD_hook(L, hook, -1, 1, p->numparams); ci->u.l.savedpc--; /* correct 'pc' */ } static StkId rethook (lua_State *L, CallInfo *ci, StkId firstres, int nres) { ptrdiff_t oldtop = savestack(L, L->top); /* hook may change top */ int delta = 0; if (isLuacode(ci)) { Proto *p = ci_func(ci)->p; if (p->is_vararg) delta = ci->u.l.nextraargs + p->numparams + 1; if (L->top < ci->top) L->top = ci->top; /* correct top to run hook */ } if (L->hookmask & LUA_MASKRET) { /* is return hook on? */ int ftransfer; ci->func += delta; /* if vararg, back to virtual 'func' */ ftransfer = cast(unsigned short, firstres - ci->func); luaD_hook(L, LUA_HOOKRET, -1, ftransfer, nres); /* call it */ ci->func -= delta; } if (isLua(ci = ci->previous)) L->oldpc = pcRel(ci->u.l.savedpc, ci_func(ci)->p); /* update 'oldpc' */ return restorestack(L, oldtop); } /* ** Check whether 'func' has a '__call' metafield. If so, put it in the ** stack, below original 'func', so that 'luaD_call' can call it. Raise ** an error if there is no '__call' metafield. */ void luaD_tryfuncTM (lua_State *L, StkId func) { const TValue *tm = luaT_gettmbyobj(L, s2v(func), TM_CALL); StkId p; if (unlikely(ttisnil(tm))) luaG_typeerror(L, s2v(func), "call"); /* nothing to call */ for (p = L->top; p > func; p--) /* open space for metamethod */ setobjs2s(L, p, p-1); L->top++; /* stack space pre-allocated by the caller */ setobj2s(L, func, tm); /* metamethod is the new function to be called */ } /* ** Given 'nres' results at 'firstResult', move 'wanted' of them to 'res'. ** Handle most typical cases (zero results for commands, one result for ** expressions, multiple results for tail calls/single parameters) ** separated. */ static void moveresults (lua_State *L, StkId res, int nres, int wanted) { StkId firstresult; int i; switch (wanted) { /* handle typical cases separately */ case 0: /* no values needed */ L->top = res; return; case 1: /* one value needed */ if (nres == 0) /* no results? */ setnilvalue(s2v(res)); /* adjust with nil */ else setobjs2s(L, res, L->top - nres); /* move it to proper place */ L->top = res + 1; return; case LUA_MULTRET: wanted = nres; /* we want all results */ break; default: /* multiple results (or to-be-closed variables) */ if (hastocloseCfunc(wanted)) { /* to-be-closed variables? */ ptrdiff_t savedres = savestack(L, res); luaF_close(L, res, LUA_OK); /* may change the stack */ res = restorestack(L, savedres); wanted = codeNresults(wanted); /* correct value */ if (wanted == LUA_MULTRET) wanted = nres; } break; } firstresult = L->top - nres; /* index of first result */ /* move all results to correct place */ for (i = 0; i < nres && i < wanted; i++) setobjs2s(L, res + i, firstresult + i); for (; i < wanted; i++) /* complete wanted number of results */ setnilvalue(s2v(res + i)); L->top = res + wanted; /* top points after the last result */ } /* ** Finishes a function call: calls hook if necessary, removes CallInfo, ** moves current number of results to proper place. */ void luaD_poscall (lua_State *L, CallInfo *ci, int nres) { if (L->hookmask) L->top = rethook(L, ci, L->top - nres, nres); L->ci = ci->previous; /* back to caller */ /* move results to proper place */ moveresults(L, ci->func, nres, ci->nresults); } #define next_ci(L) (L->ci->next ? L->ci->next : luaE_extendCI(L)) /* ** Prepare a function for a tail call, building its call info on top ** of the current call info. 'narg1' is the number of arguments plus 1 ** (so that it includes the function itself). */ void luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func, int narg1) { Proto *p = clLvalue(s2v(func))->p; int fsize = p->maxstacksize; /* frame size */ int nfixparams = p->numparams; int i; for (i = 0; i < narg1; i++) /* move down function and arguments */ setobjs2s(L, ci->func + i, func + i); checkstackGC(L, fsize); func = ci->func; /* moved-down function */ for (; narg1 <= nfixparams; narg1++) setnilvalue(s2v(func + narg1)); /* complete missing arguments */ ci->top = func + 1 + fsize; /* top for new function */ lua_assert(ci->top <= L->stack_last); ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus |= CIST_TAIL; L->top = func + narg1; /* set top */ } /* ** Call a function (C or Lua). The function to be called is at *func. ** The arguments are on the stack, right after the function. ** When returns, all the results are on the stack, starting at the original ** function position. */ void luaD_call (lua_State *L, StkId func, int nresults) { lua_CFunction f; retry: switch (ttypetag(s2v(func))) { case LUA_VCCL: /* C closure */ f = clCvalue(s2v(func))->f; goto Cfunc; case LUA_VLCF: /* light C function */ f = fvalue(s2v(func)); Cfunc: { int n; /* number of returns */ CallInfo *ci; checkstackGCp(L, LUA_MINSTACK, func); /* ensure minimum stack size */ L->ci = ci = next_ci(L); ci->nresults = nresults; ci->callstatus = CIST_C; ci->top = L->top + LUA_MINSTACK; ci->func = func; lua_assert(ci->top <= L->stack_last); if (L->hookmask & LUA_MASKCALL) { int narg = cast_int(L->top - func) - 1; luaD_hook(L, LUA_HOOKCALL, -1, 1, narg); } lua_unlock(L); n = (*f)(L); /* do the actual call */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); break; } case LUA_VLCL: { /* Lua function */ CallInfo *ci; Proto *p = clLvalue(s2v(func))->p; int narg = cast_int(L->top - func) - 1; /* number of real arguments */ int nfixparams = p->numparams; int fsize = p->maxstacksize; /* frame size */ checkstackGCp(L, fsize, func); L->ci = ci = next_ci(L); ci->nresults = nresults; ci->u.l.savedpc = p->code; /* starting point */ ci->callstatus = 0; ci->top = func + 1 + fsize; ci->func = func; L->ci = ci; for (; narg < nfixparams; narg++) setnilvalue(s2v(L->top++)); /* complete missing arguments */ lua_assert(ci->top <= L->stack_last); luaV_execute(L, ci); /* run the function */ break; } default: { /* not a function */ checkstackGCp(L, 1, func); /* space for metamethod */ luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */ goto retry; /* try again with metamethod */ } } } /* ** Similar to 'luaD_call', but does not allow yields during the call. */ void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) { /* possible C stack overflow? */ luaE_exitCcall(L); /* to compensate decrement in next call */ luaE_enterCcall(L); /* check properly */ } luaD_call(L, func, nResults); decXCcalls(L); } /* ** Completes the execution of an interrupted C function, calling its ** continuation function. */ static void finishCcall (lua_State *L, int status) { CallInfo *ci = L->ci; int n; /* must have a continuation and must be able to call it */ lua_assert(ci->u.c.k != NULL && yieldable(L)); /* error status can only happen in a protected call */ lua_assert((ci->callstatus & CIST_YPCALL) || status == LUA_YIELD); if (ci->callstatus & CIST_YPCALL) { /* was inside a pcall? */ ci->callstatus &= ~CIST_YPCALL; /* continuation is also inside it */ L->errfunc = ci->u.c.old_errfunc; /* with the same error function */ } /* finish 'lua_callk'/'lua_pcall'; CIST_YPCALL and 'errfunc' already handled */ adjustresults(L, ci->nresults); lua_unlock(L); n = (*ci->u.c.k)(L, status, ci->u.c.ctx); /* call continuation function */ lua_lock(L); api_checknelems(L, n); luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } /* ** Executes "full continuation" (everything in the stack) of a ** previously interrupted coroutine until the stack is empty (or another ** interruption long-jumps out of the loop). If the coroutine is ** recovering from an error, 'ud' points to the error status, which must ** be passed to the first continuation function (otherwise the default ** status is LUA_YIELD). */ static void unroll (lua_State *L, void *ud) { CallInfo *ci; if (ud != NULL) /* error status? */ finishCcall(L, *(int *)ud); /* finish 'lua_pcallk' callee */ while ((ci = L->ci) != &L->base_ci) { /* something in the stack */ if (!isLua(ci)) /* C function? */ finishCcall(L, LUA_YIELD); /* complete its execution */ else { /* Lua function */ luaV_finishOp(L); /* finish interrupted instruction */ luaV_execute(L, ci); /* execute down to higher C 'boundary' */ } } } /* ** Try to find a suspended protected call (a "recover point") for the ** given thread. */ static CallInfo *findpcall (lua_State *L) { CallInfo *ci; for (ci = L->ci; ci != NULL; ci = ci->previous) { /* search for a pcall */ if (ci->callstatus & CIST_YPCALL) return ci; } return NULL; /* no pending pcall */ } /* ** Recovers from an error in a coroutine. Finds a recover point (if ** there is one) and completes the execution of the interrupted ** 'luaD_pcall'. If there is no recover point, returns zero. */ static int recover (lua_State *L, int status) { StkId oldtop; CallInfo *ci = findpcall(L); if (ci == NULL) return 0; /* no recovery point */ /* "finish" luaD_pcall */ oldtop = restorestack(L, ci->u2.funcidx); luaF_close(L, oldtop, status); /* may change the stack */ oldtop = restorestack(L, ci->u2.funcidx); luaD_seterrorobj(L, status, oldtop); L->ci = ci; L->allowhook = getoah(ci->callstatus); /* restore original 'allowhook' */ luaD_shrinkstack(L); L->errfunc = ci->u.c.old_errfunc; return 1; /* continue running the coroutine */ } /* ** Signal an error in the call to 'lua_resume', not in the execution ** of the coroutine itself. (Such errors should not be handled by any ** coroutine error handler and should not kill the coroutine.) */ static int resume_error (lua_State *L, const char *msg, int narg) { L->top -= narg; /* remove args from the stack */ setsvalue2s(L, L->top, luaS_new(L, msg)); /* push error message */ api_incr_top(L); lua_unlock(L); return LUA_ERRRUN; } /* ** Do the work for 'lua_resume' in protected mode. Most of the work ** depends on the status of the coroutine: initial state, suspended ** inside a hook, or regularly suspended (optionally with a continuation ** function), plus erroneous cases: non-suspended coroutine or dead ** coroutine. */ static void resume (lua_State *L, void *ud) { int n = *(cast(int*, ud)); /* number of arguments */ StkId firstArg = L->top - n; /* first argument */ CallInfo *ci = L->ci; if (L->status == LUA_OK) { /* starting a coroutine? */ luaD_call(L, firstArg - 1, LUA_MULTRET); } else { /* resuming from previous yield */ lua_assert(L->status == LUA_YIELD); L->status = LUA_OK; /* mark that it is running (again) */ if (isLua(ci)) /* yielded inside a hook? */ luaV_execute(L, ci); /* just continue running Lua code */ else { /* 'common' yield */ if (ci->u.c.k != NULL) { /* does it have a continuation function? */ lua_unlock(L); n = (*ci->u.c.k)(L, LUA_YIELD, ci->u.c.ctx); /* call continuation */ lua_lock(L); api_checknelems(L, n); } luaD_poscall(L, ci, n); /* finish 'luaD_call' */ } unroll(L, NULL); /* run continuation */ } } LUA_API int lua_resume (lua_State *L, lua_State *from, int nargs, int *nresults) { int status; lua_lock(L); if (L->status == LUA_OK) { /* may be starting a coroutine */ if (L->ci != &L->base_ci) /* not in base level? */ return resume_error(L, "cannot resume non-suspended coroutine", nargs); else if (L->top - (L->ci->func + 1) == nargs) /* no function? */ return resume_error(L, "cannot resume dead coroutine", nargs); } else if (L->status != LUA_YIELD) /* ended with errors? */ return resume_error(L, "cannot resume dead coroutine", nargs); if (from == NULL) L->nCcalls = CSTACKTHREAD; else /* correct 'nCcalls' for this thread */ L->nCcalls = getCcalls(from) - L->nci - CSTACKCF; if (L->nCcalls <= CSTACKERR) return resume_error(L, "C stack overflow", nargs); luai_userstateresume(L, nargs); api_checknelems(L, (L->status == LUA_OK) ? nargs + 1 : nargs); status = luaD_rawrunprotected(L, resume, &nargs); /* continue running after recoverable errors */ while (errorstatus(status) && recover(L, status)) { /* unroll continuation */ status = luaD_rawrunprotected(L, unroll, &status); } if (likely(!errorstatus(status))) lua_assert(status == L->status); /* normal end or yield */ else { /* unrecoverable error */ L->status = cast_byte(status); /* mark thread as 'dead' */ luaD_seterrorobj(L, status, L->top); /* push error message */ L->ci->top = L->top; } *nresults = (status == LUA_YIELD) ? L->ci->u2.nyield : cast_int(L->top - (L->ci->func + 1)); lua_unlock(L); return status; } LUA_API int lua_isyieldable (lua_State *L) { return yieldable(L); } LUA_API int lua_yieldk (lua_State *L, int nresults, lua_KContext ctx, lua_KFunction k) { CallInfo *ci; luai_userstateyield(L, nresults); lua_lock(L); ci = L->ci; api_checknelems(L, nresults); if (unlikely(!yieldable(L))) { if (L != G(L)->mainthread) luaG_runerror(L, "attempt to yield across a C-call boundary"); else luaG_runerror(L, "attempt to yield from outside a coroutine"); } L->status = LUA_YIELD; if (isLua(ci)) { /* inside a hook? */ lua_assert(!isLuacode(ci)); api_check(L, k == NULL, "hooks cannot continue after yielding"); ci->u2.nyield = 0; /* no results */ } else { if ((ci->u.c.k = k) != NULL) /* is there a continuation? */ ci->u.c.ctx = ctx; /* save context */ ci->u2.nyield = nresults; /* save number of results */ luaD_throw(L, LUA_YIELD); } lua_assert(ci->callstatus & CIST_HOOKED); /* must be inside a hook */ lua_unlock(L); return 0; /* return to 'luaD_hook' */ } /* ** Call the C function 'func' in protected mode, restoring basic ** thread information ('allowhook', etc.) and in particular ** its stack level in case of errors. */ int luaD_pcall (lua_State *L, Pfunc func, void *u, ptrdiff_t old_top, ptrdiff_t ef) { int status; CallInfo *old_ci = L->ci; lu_byte old_allowhooks = L->allowhook; ptrdiff_t old_errfunc = L->errfunc; L->errfunc = ef; status = luaD_rawrunprotected(L, func, u); if (unlikely(status != LUA_OK)) { /* an error occurred? */ StkId oldtop = restorestack(L, old_top); L->ci = old_ci; L->allowhook = old_allowhooks; status = luaF_close(L, oldtop, status); oldtop = restorestack(L, old_top); /* previous call may change stack */ luaD_seterrorobj(L, status, oldtop); luaD_shrinkstack(L); } L->errfunc = old_errfunc; return status; } /* ** Execute a protected parser. */ struct SParser { /* data to 'f_parser' */ ZIO *z; Mbuffer buff; /* dynamic structure used by the scanner */ Dyndata dyd; /* dynamic structures used by the parser */ const char *mode; const char *name; }; static void checkmode (lua_State *L, const char *mode, const char *x) { if (mode && strchr(mode, x[0]) == NULL) { luaO_pushfstring(L, "attempt to load a %s chunk (mode is '%s')", x, mode); luaD_throw(L, LUA_ERRSYNTAX); } } static void f_parser (lua_State *L, void *ud) { LClosure *cl; struct SParser *p = cast(struct SParser *, ud); int c = zgetc(p->z); /* read first character */ if (c == LUA_SIGNATURE[0]) { checkmode(L, p->mode, "binary"); cl = luaU_undump(L, p->z, p->name); } else { checkmode(L, p->mode, "text"); cl = luaY_parser(L, p->z, &p->buff, &p->dyd, p->name, c); } lua_assert(cl->nupvalues == cl->p->sizeupvalues); luaF_initupvals(L, cl); } int luaD_protectedparser (lua_State *L, ZIO *z, const char *name, const char *mode) { struct SParser p; int status; incnny(L); /* cannot yield during parsing */ p.z = z; p.name = name; p.mode = mode; p.dyd.actvar.arr = NULL; p.dyd.actvar.size = 0; p.dyd.gt.arr = NULL; p.dyd.gt.size = 0; p.dyd.label.arr = NULL; p.dyd.label.size = 0; luaZ_initbuffer(L, &p.buff); status = luaD_pcall(L, f_parser, &p, savestack(L, L->top), L->errfunc); luaZ_freebuffer(L, &p.buff); luaM_freearray(L, p.dyd.actvar.arr, p.dyd.actvar.size); luaM_freearray(L, p.dyd.gt.arr, p.dyd.gt.size); luaM_freearray(L, p.dyd.label.arr, p.dyd.label.size); decnny(L); return status; }
void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */ luaE_freeCI(L); luaD_call(L, func, nResults); decXCcalls(L); }
void luaD_callnoyield (lua_State *L, StkId func, int nResults) { incXCcalls(L); if (getCcalls(L) <= CSTACKERR) { /* possible C stack overflow? */ luaE_exitCcall(L); /* to compensate decrement in next call */ luaE_enterCcall(L); /* check properly */ } luaD_call(L, func, nResults); decXCcalls(L); }
{'added': [(521, ' if (getCcalls(L) <= CSTACKERR) { /* possible C stack overflow? */'), (522, ' luaE_exitCcall(L); /* to compensate decrement in next call */'), (523, ' luaE_enterCcall(L); /* check properly */'), (524, ' }')], 'deleted': [(518, '** If there is a stack overflow, freeing all CI structures will'), (519, "** force the subsequent call to invoke 'luaE_extendCI', which then"), (520, '** will raise any errors.'), (524, ' if (getCcalls(L) <= CSTACKERR) /* possible stack overflow? */'), (525, ' luaE_freeCI(L);')]}
4
5
581
4,293
7
47
2
https://github.com/lua/lua
CVE-2020-24342
CWE-119
3,163
verifier.c
C
check_alu_op
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; }; static DEFINE_MUTEX(bpf_verifier_lock); /* log_level controls verbosity level of eBPF verifier. * verbose() is used to dump the verification trace to the log, so the user * can figure out what's wrong with the program */ static __printf(2, 3) void verbose(struct bpf_verifier_env *env, const char *fmt, ...) { struct bpf_verifer_log *log = &env->log; unsigned int n; va_list args; if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) return; va_start(args, fmt); n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); va_end(args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *state) { struct bpf_reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d=%s", i, reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, " fp%d=%s", -MAX_BPF_STACK + i * BPF_REG_SIZE, reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_verifier_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_verifier_state(struct bpf_verifier_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { kfree(state->stack); if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_verifier_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { int err; err = realloc_verifier_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); return copy_stack_state(dst, src); } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) { struct bpf_verifier_state *parent = state->parent; if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->regs[regno].live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } if (value_regno >= 0 && is_spillable_regtype(state->regs[value_regno].type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } /* save register state */ state->stack[spi].spilled_ptr = state->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_SPILL; } else { /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = STACK_MISC; } return 0; } static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) { struct bpf_verifier_state *parent = state->parent; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = state->stack[spi].spilled_ptr; mark_stack_slot_read(state, spi); } return 0; } else { for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } } if (value_regno >= 0) /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { bool strict = env->strict_alignment; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } /* ctx accesses must be at a fixed offset, so that we can * determine what type of data were returned. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", regno, reg->off, off - reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ regs[value_regno].var_off = tnum_cast(regs[value_regno].var_off, size); __update_reg_bounds(&regs[value_regno]); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state reg) { return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_MEM || arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(*reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* bpf_xxx(..., buf, len) call will access 'len' bytes * from stack pointer 'buf'. Check it * note: regno == len, regno - 1 == buf */ if (regno == 0) { /* kernel subsystem misconfigured verifier */ verbose(env, "ARG_CONST_SIZE cannot be first argument\n"); return -EACCES; } /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap, open when use-cases appear */ case BPF_MAP_TYPE_CPUMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP) goto error; break; case BPF_FUNC_sk_redirect_map: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static int check_raw_mode(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; return count > 1 ? -EINVAL : 0; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL only function from proprietary program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; /* We only support one arg being in raw mode at the moment, which * is sufficient for the helper functions we have right now. */ err = check_raw_mode(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); if (err) return err; } regs = cur_regs(env); /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { struct bpf_insn_aux_data *insn_aux; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; insn_aux = &env->insn_aux_data[insn_idx]; if (!insn_aux->map_ptr) insn_aux->map_ptr = meta.map_ptr; else if (insn_aux->map_ptr != meta.map_ptr) insn_aux->map_ptr = BPF_MAP_PTR_POISON; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (changes_data) clear_all_pkt_pointers(env); return 0; } static void coerce_reg_to_32(struct bpf_reg_state *reg) { /* clear high 32 bits */ reg->var_off = tnum_cast(reg->var_off, 4); /* Update bounds */ __update_reg_bounds(reg); } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (!env->allow_ptr_leaks) verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ if (!env->allow_ptr_leaks) verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { if (!env->allow_ptr_leaks) verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit for now. * (However, in principle we could allow some cases, e.g. * ptr &= ~3 which would reduce min_value by 3.) */ if (!env->allow_ptr_leaks) verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->64 */ coerce_reg_to_32(dst_reg); coerce_reg_to_32(&src_reg); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val > 63) { /* Shifts greater than 63 are undefined. This includes * shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } if (src_known) dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val > 63) { /* Shifts greater than 63 are undefined. This includes * shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; if (src_known) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int rc; dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. */ if (!env->allow_ptr_leaks) { verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); return 0; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ rc = adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* scalar += unknown scalar */ __mark_reg_unknown(&off_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } else if (ptr_reg) { /* pointer += scalar */ rc = adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += scalar */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, *src_reg); } return rc; } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) { /* pointer += K */ rc = adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += K */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); /* high 32 bits are known zero. */ regs[insn->dst_reg].var_off = tnum_cast( regs[insn->dst_reg].var_off, 4); __update_reg_bounds(&regs[insn->dst_reg]); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *state, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, bool is_null) { struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; struct bpf_reg_state *regs = this_branch->regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_equals_const(dst_reg->var_off, insn->imm)) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch->regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch->regs[insn->src_reg], &other_branch->regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* if we knew anything about the old value, we're not * equal, because we can't know anything about the * scalar value of the pointer in the new value. */ return rold->umin_value == 0 && rold->umax_value == U64_MAX && rold->smin_value == S64_MIN && rold->smax_value == S64_MAX && tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at a * jump target (in the first iteration of the propagate_liveness() loop), * we didn't arrive by the straight-line code, so read marks in state must * propagate to parent regardless of state's write marks. */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ bool touched = false; /* any changes made? */ int i; if (!parent) return touched; /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (parent->regs[i].live & REG_LIVE_READ) continue; if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) continue; if (state->regs[i].live & REG_LIVE_READ) { parent->regs[i].live |= REG_LIVE_READ; touched = true; } } /* ... and stack slots */ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].slot_type[0] != STACK_SPILL) continue; if (state->stack[i].slot_type[0] != STACK_SPILL) continue; if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (writes && (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; touched = true; } } return touched; } /* "parent" is "a state from which we reach the current state", but initially * it is not the state->parent (i.e. "the state whose straight-line code leads * to the current state"), instead it is the state that happened to arrive at * a (prunable) equivalent of the current state. See comment above * do_propagate_liveness() for consequences of this. * This function is just a more efficient way of calling mark_reg_read() or * mark_stack_slot_read() on each reg in "parent" that is read in "state", * though it requires that parent != state->parent in the call arguments. */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { while (do_propagate_liveness(state, parent)) { /* Something changed, so we need to feed those changes onward */ state = parent; parent = state->parent; } } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ propagate_liveness(&sl->state, cur); return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach bpf_exit (which means it's safe) or * it will be rejected. Since there are no loops, we won't be * seeing this 'insn_idx' instruction again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->regs[i].live = REG_LIVE_NONE; for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) if (cur->stack[i].slot_type[0] == STACK_SPILL) cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; return 0; } static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { if (env->dev_ops && env->dev_ops->insn_hook) return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; fdput(f); next_insn: insn++; i++; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; return new_prog; } /* The verifier does more data flow analysis than llvm and will not explore * branches that are dead at run time. Malicious programs can have dead code * too. Therefore replace all dead at-run-time code with nops. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &nop, sizeof(nop)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(ctx_field_size - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * handlers are currently limited to 64 bit only. */ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_map_lookup_elem) { map_ptr = env->insn_aux_data[i + delta].map_ptr; if (map_ptr == BPF_MAP_PTR_POISON || !map_ptr->ops->map_gen_lookup) goto patch_call_imm; cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->imm == BPF_FUNC_redirect_map) { /* Note, we cannot use prog directly as imm as subsequent * rewrites would still change the prog pointer. The only * stable address we can use is aux, which also works with * prog clones during blinding. */ u64 addr = (unsigned long)prog->aux; struct bpf_insn r4_ld[] = { BPF_LD_IMM64(BPF_REG_4, addr), *insn, }; cnt = ARRAY_SIZE(r4_ld); new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifer_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * (*prog)->len); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (env->prog->aux->offload) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto err_unlock; } ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_bpf_prog_info() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; }; static DEFINE_MUTEX(bpf_verifier_lock); /* log_level controls verbosity level of eBPF verifier. * verbose() is used to dump the verification trace to the log, so the user * can figure out what's wrong with the program */ static __printf(2, 3) void verbose(struct bpf_verifier_env *env, const char *fmt, ...) { struct bpf_verifer_log *log = &env->log; unsigned int n; va_list args; if (!log->level || !log->ubuf || bpf_verifier_log_full(log)) return; va_start(args, fmt); n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); va_end(args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_verifier_state(struct bpf_verifier_env *env, struct bpf_verifier_state *state) { struct bpf_reg_state *reg; enum bpf_reg_type t; int i; for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d=%s", i, reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) verbose(env, " fp%d=%s", -MAX_BPF_STACK + i * BPF_REG_SIZE, reg_type_str[state->stack[i].spilled_ptr.type]); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_verifier_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_verifier_state(struct bpf_verifier_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { kfree(state->stack); if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_verifier_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src) { int err; err = realloc_verifier_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_verifier_state, allocated_stack)); return copy_stack_state(dst, src); } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_reg_state *regs) { int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) { struct bpf_verifier_state *parent = state->parent; if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->regs[regno].live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_reg_state *regs = env->cur_state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } mark_reg_read(env->cur_state, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; err = realloc_verifier_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } if (value_regno >= 0 && is_spillable_regtype(state->regs[value_regno].type)) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } /* save register state */ state->stack[spi].spilled_ptr = state->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) state->stack[spi].slot_type[i] = STACK_SPILL; } else { /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = STACK_MISC; } return 0; } static void mark_stack_slot_read(const struct bpf_verifier_state *state, int slot) { struct bpf_verifier_state *parent = state->parent; while (parent) { /* if read wasn't screened by an earlier write ... */ if (state->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_verifier_state *state, int off, int size, int value_regno) { int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = state->stack[spi].spilled_ptr; mark_stack_slot_read(state, spi); } return 0; } else { for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } } if (value_regno >= 0) /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size) { bool strict = env->strict_alignment; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } /* ctx accesses must be at a fixed offset, so that we can * determine what type of data were returned. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", regno, reg->off, off - reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state reg) { return reg.type == SCALAR_VALUE && tnum_equals_const(reg.var_off, 0); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs; int off, i, slot, spi; if (regs[regno].type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(regs[regno])) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[regs[regno].type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(regs[regno].var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); } off = regs[regno].off + regs[regno].var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (env->prog->aux->stack_depth < -off) env->prog->aux->stack_depth = -off; if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot || state->stack[spi].slot_type[slot % BPF_REG_SIZE] != STACK_MISC) { verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; } } return 0; } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_MEM || arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(*reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->key_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } if (type_is_pkt_pointer(type)) err = check_packet_access(env, regno, reg->off, meta->map_ptr->value_size, false); else err = check_stack_boundary(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* bpf_xxx(..., buf, len) call will access 'len' bytes * from stack pointer 'buf'. Check it * note: regno == len, regno - 1 == buf */ if (regno == 0) { /* kernel subsystem misconfigured verifier */ verbose(env, "ARG_CONST_SIZE cannot be first argument\n"); return -EACCES; } /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap, open when use-cases appear */ case BPF_MAP_TYPE_CPUMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP) goto error; break; case BPF_FUNC_sk_redirect_map: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static int check_raw_mode(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; return count > 1 ? -EINVAL : 0; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *state = env->cur_state; struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL only function from proprietary program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; /* We only support one arg being in raw mode at the moment, which * is sufficient for the helper functions we have right now. */ err = check_raw_mode(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1); if (err) return err; } regs = cur_regs(env); /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { struct bpf_insn_aux_data *insn_aux; regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; insn_aux = &env->insn_aux_data[insn_idx]; if (!insn_aux->map_ptr) insn_aux->map_ptr = meta.map_ptr; else if (insn_aux->map_ptr != meta.map_ptr) insn_aux->map_ptr = BPF_MAP_PTR_POISON; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if (WARN_ON_ONCE(known && (smin_val != smax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad sbounds\n"); return -EINVAL; } if (WARN_ON_ONCE(known && (umin_val != umax_val))) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: known but bad ubounds\n"); return -EINVAL; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ if (!env->allow_ptr_leaks) verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ if (!env->allow_ptr_leaks) verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { if (!env->allow_ptr_leaks) verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit for now. * (However, in principle we could allow some cases, e.g. * ptr &= ~3 which would reduce min_value by 3.) */ if (!env->allow_ptr_leaks) verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ if (!env->allow_ptr_leaks) verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->64 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val > 63) { /* Shifts greater than 63 are undefined. This includes * shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } if (src_known) dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val > 63) { /* Shifts greater than 63 are undefined. This includes * shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; if (src_known) dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); else dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); int rc; dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. */ if (!env->allow_ptr_leaks) { verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); return 0; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ rc = adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* scalar += unknown scalar */ __mark_reg_unknown(&off_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } else if (ptr_reg) { /* pointer += scalar */ rc = adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += scalar */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, *src_reg); } return rc; } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) { /* pointer += K */ rc = adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); if (rc == -EACCES && env->allow_ptr_leaks) { /* unknown scalar += K */ __mark_reg_unknown(dst_reg); return adjust_scalar_min_max_vals( env, insn, dst_reg, off_reg); } return rc; } } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, env->cur_state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *state, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, bool is_null) { struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *other_branch, *this_branch = env->cur_state; struct bpf_reg_state *regs = this_branch->regs, *dst_reg; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_equals_const(dst_reg->var_off, insn->imm)) { if (opcode == BPF_JEQ) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch->regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch->regs[insn->src_reg], &other_branch->regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch->regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, live)) == 0) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* if we knew anything about the old value, we're not * equal, because we can't know anything about the * scalar value of the pointer in the new value. */ return rold->umin_value == 0 && rold->umax_value == U64_MAX && rold->smin_value == S64_MIN && rold->smax_value == S64_MAX && tnum_is_unknown(rold->var_off); } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_STACK: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_verifier_state *old, struct bpf_verifier_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at a * jump target (in the first iteration of the propagate_liveness() loop), * we didn't arrive by the straight-line code, so read marks in state must * propagate to parent regardless of state's write marks. */ static bool do_propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { bool writes = parent == state->parent; /* Observe write marks */ bool touched = false; /* any changes made? */ int i; if (!parent) return touched; /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (parent->regs[i].live & REG_LIVE_READ) continue; if (writes && (state->regs[i].live & REG_LIVE_WRITTEN)) continue; if (state->regs[i].live & REG_LIVE_READ) { parent->regs[i].live |= REG_LIVE_READ; touched = true; } } /* ... and stack slots */ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].slot_type[0] != STACK_SPILL) continue; if (state->stack[i].slot_type[0] != STACK_SPILL) continue; if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (writes && (state->stack[i].spilled_ptr.live & REG_LIVE_WRITTEN)) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) { parent->stack[i].spilled_ptr.live |= REG_LIVE_READ; touched = true; } } return touched; } /* "parent" is "a state from which we reach the current state", but initially * it is not the state->parent (i.e. "the state whose straight-line code leads * to the current state"), instead it is the state that happened to arrive at * a (prunable) equivalent of the current state. See comment above * do_propagate_liveness() for consequences of this. * This function is just a more efficient way of calling mark_reg_read() or * mark_stack_slot_read() on each reg in "parent" that is read in "state", * though it requires that parent != state->parent in the call arguments. */ static void propagate_liveness(const struct bpf_verifier_state *state, struct bpf_verifier_state *parent) { while (do_propagate_liveness(state, parent)) { /* Something changed, so we need to feed those changes onward */ state = parent; parent = state->parent; } } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ propagate_liveness(&sl->state, cur); return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach bpf_exit (which means it's safe) or * it will be rejected. Since there are no loops, we won't be * seeing this 'insn_idx' instruction again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->regs[i].live = REG_LIVE_NONE; for (i = 0; i < cur->allocated_stack / BPF_REG_SIZE; i++) if (cur->stack[i].slot_type[0] == STACK_SPILL) cur->stack[i].spilled_ptr.live = REG_LIVE_NONE; return 0; } static int ext_analyzer_insn_hook(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { if (env->dev_ops && env->dev_ops->insn_hook) return env->dev_ops->insn_hook(env, insn_idx, prev_insn_idx); return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; env->cur_state = state; init_reg_state(env, state->regs); state->parent = NULL; insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state); do_print_state = false; } if (env->log.level) { verbose(env, "%d: ", insn_idx); print_bpf_insn(verbose, env, insn, env->allow_ptr_leaks); } err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); if (err) return err; regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } err = check_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns, stack depth %d\n", insn_processed, env->prog->aux->stack_depth); return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_bpf_prog_info() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; fdput(f); next_insn: insn++; i++; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(sizeof(struct bpf_insn_aux_data) * prog_len); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; return new_prog; } /* The verifier does more data flow analysis than llvm and will not explore * branches that are dead at run time. Malicious programs can have dead code * too. Therefore replace all dead at-run-time code with nops. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &nop, sizeof(nop)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(ctx_field_size - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * handlers are currently limited to 64 bit only. */ if (ebpf_jit_enabled() && BITS_PER_LONG == 64 && insn->imm == BPF_FUNC_map_lookup_elem) { map_ptr = env->insn_aux_data[i + delta].map_ptr; if (map_ptr == BPF_MAP_PTR_POISON || !map_ptr->ops->map_gen_lookup) goto patch_call_imm; cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->imm == BPF_FUNC_redirect_map) { /* Note, we cannot use prog directly as imm as subsequent * rewrites would still change the prog pointer. The only * stable address we can use is aux, which also works with * prog clones during blinding. */ u64 addr = (unsigned long)prog->aux; struct bpf_insn r4_ld[] = { BPF_LD_IMM64(BPF_REG_4, addr), *insn, }; cnt = ARRAY_SIZE(r4_ld); new_prog = bpf_patch_insn_data(env, i + delta, r4_ld, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifer_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(sizeof(struct bpf_insn_aux_data) * (*prog)->len); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; if (env->prog->aux->offload) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto err_unlock; } ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; ret = check_cfg(env); if (ret < 0) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_bpf_prog_info() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); /* high 32 bits are known zero. */ regs[insn->dst_reg].var_off = tnum_cast( regs[insn->dst_reg].var_off, 4); __update_reg_bounds(&regs[insn->dst_reg]); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; }
static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; }
{'added': [(1070, '/* truncate register to smaller size (in bytes)'), (1071, ' * must be called with size < BPF_REG_SIZE'), (1072, ' */'), (1073, 'static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)'), (1074, '{'), (1075, '\tu64 mask;'), (1076, ''), (1077, '\t/* clear high bits in bit representation */'), (1078, '\treg->var_off = tnum_cast(reg->var_off, size);'), (1079, ''), (1080, '\t/* fix arithmetic bounds */'), (1081, '\tmask = ((u64)1 << (size * 8)) - 1;'), (1082, '\tif ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {'), (1083, '\t\treg->umin_value &= mask;'), (1084, '\t\treg->umax_value &= mask;'), (1085, '\t} else {'), (1086, '\t\treg->umin_value = 0;'), (1087, '\t\treg->umax_value = mask;'), (1088, '\t}'), (1089, '\treg->smin_value = reg->umin_value;'), (1090, '\treg->smax_value = reg->umax_value;'), (1091, '}'), (1092, ''), (1226, '\t\tcoerce_reg_to_size(&regs[value_regno], size);'), (2033, '\t\tcoerce_reg_to_size(dst_reg, 4);'), (2034, '\t\tcoerce_reg_to_size(&src_reg, 4);'), (2414, '\t\t\t\tcoerce_reg_to_size(&regs[insn->dst_reg], 4);')], 'deleted': [(1203, '\t\tregs[value_regno].var_off ='), (1204, '\t\t\ttnum_cast(regs[value_regno].var_off, size);'), (1205, '\t\t__update_reg_bounds(&regs[value_regno]);'), (1775, 'static void coerce_reg_to_32(struct bpf_reg_state *reg)'), (1776, '{'), (1777, '\t/* clear high 32 bits */'), (1778, '\treg->var_off = tnum_cast(reg->var_off, 4);'), (1779, '\t/* Update bounds */'), (1780, '\t__update_reg_bounds(reg);'), (1781, '}'), (1782, ''), (2020, '\t\tcoerce_reg_to_32(dst_reg);'), (2021, '\t\tcoerce_reg_to_32(&src_reg);'), (2401, '\t\t\t\t/* high 32 bits are known zero. */'), (2402, '\t\t\t\tregs[insn->dst_reg].var_off = tnum_cast('), (2403, '\t\t\t\t\t\tregs[insn->dst_reg].var_off, 4);'), (2404, '\t\t\t\t__update_reg_bounds(&regs[insn->dst_reg]);')]}
27
17
3,248
20,663
117
811
49
https://github.com/torvalds/linux
CVE-2017-16996
CWE-119
2,425
activations.cc
C++
tflite::ops::builtin::activations::LogSoftmaxEval
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input = GetInput(context, node, 0); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <stddef.h> #include <algorithm> #include <cmath> #include <cstdint> #include <functional> #include <limits> #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/binary_function.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" #include "tensorflow/lite/kernels/internal/reference/logistic.h" #include "tensorflow/lite/kernels/internal/reference/prelu.h" #include "tensorflow/lite/kernels/internal/reference/reference_ops.h" #include "tensorflow/lite/kernels/internal/reference/softmax.h" #include "tensorflow/lite/kernels/internal/reference/tanh.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" #if __aarch64__ && __clang__ #include <arm_neon.h> #endif namespace tflite { namespace ops { namespace builtin { namespace activations { // TODO(b/142762739): We should figure out a multi-threading plan for most of // the activation ops below. enum KernelType { kReference, kGenericOptimized, kFixedPointOptimized, }; struct OpData { int32_t input_multiplier = 0; int input_left_shift = 0; int32_t input_range_radius = 0; int diff_min = 0; uint8_t table[256] = {0}; }; struct SoftmaxOpData { struct SoftmaxParams params = {}; float table[256]; #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT uint8_t uint8_table1[256]; uint8_t uint8_table2[256]; #endif static constexpr int kInt16LUTArraySize = 513; int16_t exp_lut[kInt16LUTArraySize]; // int16 LUT for exp(x), where x uniform // distributed between [-10.0 , 0.0] int16_t one_over_one_plus_x_lut[kInt16LUTArraySize]; // int16 LUT for 1 / // (1 + x), where x // uniform distributed // between [0.0 , 1.0] }; struct LogSoftmaxOpData : public OpData { int32_t reverse_scaling_divisor = 0; int32_t reverse_scaling_right_shift = 0; struct SoftmaxParams params = {}; float f_table[256]; }; struct LeakyReluOpData : public OpData { int32_t output_multiplier_alpha = 0; int32_t output_shift_alpha = 0; int32_t output_multiplier_identity = 0; int32_t output_shift_identity = 0; }; struct PreluOpData : public OpData { int32_t output_multiplier_1 = 0; int32_t output_shift_1 = 0; int32_t output_multiplier_2 = 0; int32_t output_shift_2 = 0; bool requires_broadcast; }; struct HardSwishData { HardSwishParams params; }; struct ReluOpData : public OpData { int32_t output_multiplier = 0; int output_shift = 0; }; namespace { TfLiteStatus CheckOutputQuantParams(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* output) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -128); } return kTfLiteOk; } template <typename T> void PopulateLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output, const std::function<float(float)>& transform) { static_assert(sizeof(T) == 1, "Lookup table valid only for 8bit"); const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits<T>::max(); int32_t minval = std::numeric_limits<T>::min(); for (int32_t val = minval; val <= maxval; ++val) { const float dequantized = input->params.scale * (val - input->params.zero_point); const float transformed = transform(dequantized); const float rescaled = std::round(transformed * inverse_scale); const int32_t quantized = static_cast<int32_t>(rescaled + output->params.zero_point); data->table[static_cast<uint8_t>(static_cast<T>(val))] = static_cast<uint8_t>( static_cast<T>(std::max(std::min(maxval, quantized), minval))); } } // TODO(b/143696793): move this to optimized_ops. void EvalUsingLookupTable(struct OpData* data, const TfLiteTensor* input, TfLiteTensor* output) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); uint8_t* output_data = GetTensorData<uint8_t>(output); const uint8_t* input_data = GetTensorData<uint8_t>(input); int i = 0; #if __aarch64__ && __clang__ // This code uses ARM64-only instructions. // TODO(b/143709993): Port to ARMv7 // Load the tables into registers. (4*4 128-bit registers) uint8x16x4_t table[4]; table[0] = vld1q_u8_x4(data->table + 16 * 4 * 0); table[1] = vld1q_u8_x4(data->table + 16 * 4 * 1); table[2] = vld1q_u8_x4(data->table + 16 * 4 * 2); table[3] = vld1q_u8_x4(data->table + 16 * 4 * 3); // Vectorized loop; process uint8x16_t (16 elements) at a time. constexpr int vectorized_16_loop_step = 16; const int vectorized_16_loop_end = size / vectorized_16_loop_step * vectorized_16_loop_step; for (; i < vectorized_16_loop_end; i += vectorized_16_loop_step) { uint8x16_t input = vld1q_u8(input_data + i); uint8x16_t output = optimized_ops::aarch64_lookup_vector(table, input); vst1q_u8(output_data + i, output); } // Postamble and non-ARM64 code: simple for loop. #endif for (; i < size; ++i) { output_data[i] = data->table[input_data[i]]; } } template <typename T> void QuantizedReluX(float act_min, float act_max, const TfLiteTensor* input, TfLiteTensor* output, const ReluOpData* data) { ReluParams params; params.quantized_activation_min = std::max(static_cast<int32_t>(std::numeric_limits<T>::min()), output->params.zero_point + static_cast<int32>(roundf(act_min / output->params.scale))); params.quantized_activation_max = act_max == std::numeric_limits<float>::infinity() ? static_cast<int32_t>(std::numeric_limits<T>::max()) : std::min( static_cast<int32_t>(std::numeric_limits<T>::max()), output->params.zero_point + static_cast<int32>(roundf(act_max / output->params.scale))); params.input_offset = input->params.zero_point; params.output_offset = output->params.zero_point; params.output_multiplier = data->output_multiplier; params.output_shift = data->output_shift; optimized_ops::ReluX(params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } } // namespace void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). return new OpData; } void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new SoftmaxOpData; } void SoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<SoftmaxOpData*>(buffer); } void* LogSoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { return new LogSoftmaxOpData; } void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { return new PreluOpData; } void Free(TfLiteContext* context, void* buffer) { delete reinterpret_cast<OpData*>(buffer); } void LogSoftmaxFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LogSoftmaxOpData*>(buffer); } void PreluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<PreluOpData*>(buffer); } void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { return new HardSwishData; } TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new ReluOpData; } void ReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<ReluOpData*>(buffer); } TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { return new LeakyReluOpData; } void LeakyReluFree(TfLiteContext* context, void* buffer) { delete reinterpret_cast<LeakyReluOpData*>(buffer); } void HardSwishFree(TfLiteContext* context, void* buffer) { delete static_cast<HardSwishData*>(buffer); } TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_STATUS(GenericPrepare(context, node)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); HardSwishParams* params = &data->params; const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); params->input_zero_point = input->params.zero_point; params->output_zero_point = output->params.zero_point; const float input_scale = input->params.scale; const float hires_input_scale = (1.0f / 128.0f) * input_scale; const float reluish_scale = 3.0f / 32768.0f; const float output_scale = output->params.scale; const float output_multiplier = hires_input_scale / output_scale; int32_t output_multiplier_fixedpoint_int32; QuantizeMultiplier(output_multiplier, &output_multiplier_fixedpoint_int32, &params->output_multiplier_exponent); DownScaleInt32ToInt16Multiplier( output_multiplier_fixedpoint_int32, &params->output_multiplier_fixedpoint_int16); TF_LITE_ENSURE(context, params->output_multiplier_exponent <= 0); const float reluish_multiplier = hires_input_scale / reluish_scale; int32_t reluish_multiplier_fixedpoint_int32; QuantizeMultiplier(reluish_multiplier, &reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_exponent); DownScaleInt32ToInt16Multiplier( reluish_multiplier_fixedpoint_int32, &params->reluish_multiplier_fixedpoint_int16); } return kTfLiteOk; } TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); double alpha_multiplier = input->params.scale * params->alpha / output->params.scale; QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, &data->output_shift_alpha); double identity_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, &data->output_shift_identity); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } template <KernelType kernel_type> TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); } TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; } TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { HardSwishData* data = static_cast<HardSwishData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::HardSwish( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { HardSwishParams& params = data->params; if (kernel_type == kReference) { reference_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { optimized_ops::HardSwish( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } // Sigmoid is also know as "Logistic". template <KernelType kernel_type> TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } TfLiteStatus SoftmaxFloat(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; op_params.beta = params->beta; optimized_ops::Softmax(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } template <typename In, typename Out> TfLiteStatus SoftmaxQuantized(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<In>(input), GetTensorShape(output), GetTensorData<Out>(output)); return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int8_t, int8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<uint8_t, uint8_t>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT optimized_ops::SoftmaxInt8LUT( data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #else optimized_ops::Softmax(data->params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); #endif return kTfLiteOk; } template <> TfLiteStatus SoftmaxQuantized<int16, int16>(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, SoftmaxOpData* data) { if (NumDimensions(input) >= 1 && NumDimensions(input) <= 4) { reference_ops::SoftmaxInt16( data->params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); return kTfLiteOk; } else { TF_LITE_KERNEL_LOG(context, "Only 1D, 2D, 3D and 4D tensors supported for int16 " "input with int16 output, got %dD.", NumDimensions(input)); return kTfLiteError; } } TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <KernelType kernel_type> TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> T ApplyPrelu(T input, T alpha) { return input >= 0.0 ? input : input * alpha; } template <KernelType kernel_type> TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); const TfLiteTensor* alpha; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } template <typename T> void QuantizeLeakyRelu(const TfLiteTensor* input, TfLiteTensor* output, const LeakyReluOpData* data) { LeakyReluParams op_params; op_params.input_offset = input->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_alpha = data->output_multiplier_alpha; op_params.output_shift_alpha = data->output_shift_alpha; op_params.output_multiplier_identity = data->output_multiplier_identity; op_params.output_shift_identity = data->output_shift_identity; reference_ops::QuantizeLeakyRelu( op_params, GetTensorShape(input), GetTensorData<T>(input), GetTensorShape(output), GetTensorData<T>(output)); } TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); const auto* params = reinterpret_cast<TfLiteLeakyReluParams*>(node->builtin_data); const LeakyReluOpData* data = reinterpret_cast<LeakyReluOpData*>(node->user_data); LeakyReluParams op_params; switch (input->type) { case kTfLiteFloat32: { op_params.alpha = params->alpha; optimized_ops::LeakyRelu( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizeLeakyRelu<uint8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizeLeakyRelu<int8_t>(input, output, data); return kTfLiteOk; } break; case kTfLiteInt16: { QuantizeLeakyRelu<int16_t>(input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, int8, int16 and uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } TfLiteStatus EluPrepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // Use LUT to handle quantized elu path. if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return value < 0.0 ? std::exp(value) - 1.0f : value; }); } return GenericPrepare(context, node); } TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Elu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteInt8: { OpData* data = reinterpret_cast<OpData*>(node->user_data); EvalUsingLookupTable(data, input, output); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and int8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } } // namespace activations TfLiteRegistration* Register_ELU() { static TfLiteRegistration r = {activations::Init, activations::Free, activations::EluPrepare, activations::EluEval}; return &r; } TfLiteRegistration* Register_RELU() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::ReluEval}; return &r; } TfLiteRegistration* Register_RELU_N1_TO_1() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu1Eval}; return &r; } TfLiteRegistration* Register_RELU6() { static TfLiteRegistration r = {activations::ReluInit, activations::ReluFree, activations::ReluPrepare, activations::Relu6Eval}; return &r; } TfLiteRegistration* Register_TANH_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kReference>, activations::TanhEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_TANH_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kGenericOptimized>, activations::TanhEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_TANH_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::TanhPrepare<activations::kFixedPointOptimized>, activations::TanhEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_TANH() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_TANH_GENERIC_OPT(); } TfLiteRegistration* Register_LOGISTIC_REF() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kReference>, activations::SigmoidEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOGISTIC_GENERIC_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kGenericOptimized>, activations::SigmoidEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC_FIXED_POINT_OPT() { static TfLiteRegistration r = { activations::Init, activations::Free, activations::SigmoidPrepare<activations::kFixedPointOptimized>, activations::SigmoidEval<activations::kFixedPointOptimized>}; return &r; } TfLiteRegistration* Register_LOGISTIC() { // TODO(b/134622898): Switch over from the LUT optimized method to the fixed // point optimized method when typical Android hardware performs better on // the latter one. return Register_LOGISTIC_GENERIC_OPT(); } TfLiteRegistration* Register_SOFTMAX() { static TfLiteRegistration r = { activations::SoftmaxInit, activations::SoftmaxFree, activations::SoftmaxPrepare, activations::SoftmaxEval}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX_REF() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_LOG_SOFTMAX() { static TfLiteRegistration r = { activations::LogSoftmaxInit, activations::LogSoftmaxFree, activations::LogSoftmaxPrepare, activations::LogSoftmaxEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_PRELU_REF() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kReference>}; return &r; } TfLiteRegistration* Register_PRELU() { static TfLiteRegistration r = { activations::PreluInit, activations::PreluFree, activations::PreluPrepare, activations::PreluEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_LEAKY_RELU() { static TfLiteRegistration r = { activations::LeakyReluInit, activations::LeakyReluFree, activations::LeakyReluPrepare, activations::LeakyReluEval}; return &r; } TfLiteRegistration* Register_HARD_SWISH() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kGenericOptimized>}; return &r; } TfLiteRegistration* Register_HARD_SWISH_REF() { static TfLiteRegistration r = { activations::HardSwishInit, activations::HardSwishFree, activations::HardSwishPrepare, activations::HardSwishEval<activations::kReference>}; return &r; } } // namespace builtin } // namespace ops } // namespace tflite
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input; TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); TfLiteTensor* output; TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
{'added': [(255, ' const TfLiteTensor* input;'), (256, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (257, ' TfLiteTensor* output;'), (258, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (277, ' const TfLiteTensor* input;'), (278, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (279, ' TfLiteTensor* output;'), (280, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (307, ' TfLiteTensor* output;'), (308, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (313, ' const TfLiteTensor* input;'), (314, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (346, ' const TfLiteTensor* input;'), (347, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (348, ' TfLiteTensor* output;'), (349, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (377, ' const TfLiteTensor* input;'), (378, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (379, ' TfLiteTensor* output;'), (380, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (464, ' const TfLiteTensor* input;'), (465, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (466, ' TfLiteTensor* output;'), (467, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (561, ' const TfLiteTensor* input;'), (562, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (563, ' TfLiteTensor* output;'), (564, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (631, ' const TfLiteTensor* input;'), (632, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (633, ' TfLiteTensor* output;'), (634, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (669, ' const TfLiteTensor* input;'), (670, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (671, ' TfLiteTensor* output;'), (672, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (673, ' const TfLiteTensor* alpha;'), (674, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (726, ' const TfLiteTensor* input;'), (727, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (728, ' TfLiteTensor* output;'), (729, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (756, ' const TfLiteTensor* input;'), (757, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (758, ' TfLiteTensor* output;'), (759, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (789, ' const TfLiteTensor* input;'), (790, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (791, ' TfLiteTensor* output;'), (792, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (842, ' const TfLiteTensor* input;'), (843, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (844, ' TfLiteTensor* output;'), (845, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (875, ' const TfLiteTensor* input;'), (876, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (877, ' TfLiteTensor* output;'), (878, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (951, ' const TfLiteTensor* input;'), (952, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (953, ' TfLiteTensor* output;'), (954, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1101, ' const TfLiteTensor* input;'), (1102, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1103, ' TfLiteTensor* output;'), (1104, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1158, ' const TfLiteTensor* input;'), (1159, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1160, ' TfLiteTensor* output;'), (1161, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1221, ' const TfLiteTensor* input;'), (1222, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1223, ' const TfLiteTensor* alpha;'), (1224, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &alpha));'), (1225, ' TfLiteTensor* output;'), (1226, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1335, ' const TfLiteTensor* input;'), (1336, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1337, ' TfLiteTensor* output;'), (1338, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1375, ' const TfLiteTensor* input;'), (1376, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1377, ' TfLiteTensor* output;'), (1378, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));'), (1391, ' const TfLiteTensor* input;'), (1392, ' TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input));'), (1393, ' TfLiteTensor* output;'), (1394, ' TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output));')], 'deleted': [(255, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (256, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (275, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (276, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (303, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (308, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (340, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (341, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (369, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (370, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (454, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (455, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (549, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (550, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (617, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (618, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (653, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (654, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (655, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (707, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (708, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (735, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (736, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (766, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (767, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (817, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (818, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (848, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (849, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (922, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (923, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1070, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1071, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1125, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1126, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1186, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1187, ' const TfLiteTensor* alpha = GetInput(context, node, 1);'), (1188, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1297, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1298, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1335, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1336, ' TfLiteTensor* output = GetOutput(context, node, 0);'), (1349, ' const TfLiteTensor* input = GetInput(context, node, 0);'), (1350, ' TfLiteTensor* output = GetOutput(context, node, 0);')]}
88
44
1,316
9,729
56
379
6
https://github.com/tensorflow/tensorflow
CVE-2020-15211
CWE-125
1,631
imagew-main.c
C
iw_process_rows_intermediate_to_final
// imagew-main.c // Part of ImageWorsener, Copyright (c) 2011 by Jason Summers. // For more information, see the readme.txt file. #include "imagew-config.h" #include <stdlib.h> #include <string.h> #include <math.h> #include "imagew-internals.h" // Given a color type having an alpha channel, returns the index of the // alpha channel. // Return value is not meaningful if type does not have an alpha channel. static int iw_imgtype_alpha_channel_index(int t) { switch(t) { case IW_IMGTYPE_RGBA: return 3; case IW_IMGTYPE_GRAYA: return 1; } return 0; } static IW_INLINE iw_tmpsample srgb_to_linear_sample(iw_tmpsample v_srgb) { if(v_srgb<=0.04045) { return v_srgb/12.92; } else { return pow( (v_srgb+0.055)/(1.055) , 2.4); } } static IW_INLINE iw_tmpsample rec709_to_linear_sample(iw_tmpsample v_rec709) { if(v_rec709 < 4.5*0.020) { return v_rec709/4.5; } else { return pow( (v_rec709+0.099)/1.099 , 1.0/0.45); } } static IW_INLINE iw_tmpsample gamma_to_linear_sample(iw_tmpsample v, double gamma) { return pow(v,gamma); } static iw_tmpsample x_to_linear_sample(iw_tmpsample v, const struct iw_csdescr *csdescr) { switch(csdescr->cstype) { case IW_CSTYPE_SRGB: return srgb_to_linear_sample(v); case IW_CSTYPE_LINEAR: return v; case IW_CSTYPE_GAMMA: return gamma_to_linear_sample(v,csdescr->gamma); case IW_CSTYPE_REC709: return rec709_to_linear_sample(v); } return srgb_to_linear_sample(v); } // Public version of x_to_linear_sample(). IW_IMPL(double) iw_convert_sample_to_linear(double v, const struct iw_csdescr *csdescr) { return (double)x_to_linear_sample(v,csdescr); } static IW_INLINE iw_tmpsample linear_to_srgb_sample(iw_tmpsample v_linear) { if(v_linear <= 0.0031308) { return 12.92*v_linear; } return 1.055*pow(v_linear,1.0/2.4) - 0.055; } static IW_INLINE iw_tmpsample linear_to_rec709_sample(iw_tmpsample v_linear) { // The cutoff point is supposed to be 0.018, but that doesn't make sense, // because the curves don't intersect there. They intersect at almost exactly // 0.020. if(v_linear < 0.020) { return 4.5*v_linear; } return 1.099*pow(v_linear,0.45) - 0.099; } static IW_INLINE iw_tmpsample linear_to_gamma_sample(iw_tmpsample v_linear, double gamma) { return pow(v_linear,1.0/gamma); } static iw_float32 iw_get_float32(const iw_byte *m) { int k; // !!! Portability warning: Using a union in this way may be nonportable. union su_union { iw_byte c[4]; iw_float32 f; } volatile su; for(k=0;k<4;k++) { su.c[k] = m[k]; } return su.f; } static void iw_put_float32(iw_byte *m, iw_float32 s) { int k; // !!! Portability warning: Using a union in this way may be nonportable. union su_union { iw_byte c[4]; iw_float32 f; } volatile su; su.f = s; for(k=0;k<4;k++) { m[k] = su.c[k]; } } static iw_tmpsample get_raw_sample_flt32(struct iw_context *ctx, int x, int y, int channel) { size_t z; z = y*ctx->img1.bpr + (ctx->img1_numchannels_physical*x + channel)*4; return (iw_tmpsample)iw_get_float32(&ctx->img1.pixels[z]); } static IW_INLINE unsigned int get_raw_sample_16(struct iw_context *ctx, int x, int y, int channel) { size_t z; unsigned short tmpui16; z = y*ctx->img1.bpr + (ctx->img1_numchannels_physical*x + channel)*2; tmpui16 = ( ((unsigned short)(ctx->img1.pixels[z+0])) <<8) | ctx->img1.pixels[z+1]; return tmpui16; } static IW_INLINE unsigned int get_raw_sample_8(struct iw_context *ctx, int x, int y, int channel) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + ctx->img1_numchannels_physical*x + channel]; return tmpui8; } // 4 bits/pixel static IW_INLINE unsigned int get_raw_sample_4(struct iw_context *ctx, int x, int y) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + x/2]; if(x&0x1) tmpui8 = tmpui8&0x0f; else tmpui8 = tmpui8>>4; return tmpui8; } // 2 bits/pixel static IW_INLINE unsigned int get_raw_sample_2(struct iw_context *ctx, int x, int y) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + x/4]; tmpui8 = ( tmpui8 >> ((3-x%4)*2) ) & 0x03; return tmpui8; } // 1 bit/pixel static IW_INLINE unsigned int get_raw_sample_1(struct iw_context *ctx, int x, int y) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + x/8]; if(tmpui8 & (1<<(7-x%8))) return 1; return 0; } // Translate a pixel position from logical to physical coordinates. static IW_INLINE void translate_coords(struct iw_context *ctx, int x, int y, int *prx, int *pry) { if(ctx->img1.orient_transform==0) { // The fast path *prx = ctx->input_start_x+x; *pry = ctx->input_start_y+y; return; } switch(ctx->img1.orient_transform) { case 1: // mirror-x *prx = ctx->img1.width - 1 - (ctx->input_start_x+x); *pry = ctx->input_start_y+y; break; case 2: // mirror-y *prx = ctx->input_start_x+x; *pry = ctx->img1.height - 1 - (ctx->input_start_y+y); break; case 3: // mirror-x, mirror-y *prx = ctx->img1.width - 1 - (ctx->input_start_x+x); *pry = ctx->img1.height - 1 - (ctx->input_start_y+y); break; case 4: // transpose *prx = ctx->input_start_y+y; *pry = ctx->input_start_x+x; break; case 5: *prx = ctx->input_start_y+y; *pry = ctx->img1.width - 1 - (ctx->input_start_x+x); break; case 6: *prx = ctx->img1.height - 1 - (ctx->input_start_y+y); *pry = ctx->input_start_x+x; break; case 7: *prx = ctx->img1.height - 1 - (ctx->input_start_y+y); *pry = ctx->img1.width - 1 - (ctx->input_start_x+x); break; default: *prx = 0; *pry = 0; break; } } // Returns a value from 0 to 2^(ctx->img1.bit_depth)-1. // x and y are logical coordinates. static unsigned int get_raw_sample_int(struct iw_context *ctx, int x, int y, int channel) { int rx,ry; // physical coordinates translate_coords(ctx,x,y,&rx,&ry); switch(ctx->img1.bit_depth) { case 8: return get_raw_sample_8(ctx,rx,ry,channel); case 1: return get_raw_sample_1(ctx,rx,ry); case 16: return get_raw_sample_16(ctx,rx,ry,channel); case 4: return get_raw_sample_4(ctx,rx,ry); case 2: return get_raw_sample_2(ctx,rx,ry); } return 0; } // Channel is the input channel number. // x and y are logical coordinates. static iw_tmpsample get_raw_sample(struct iw_context *ctx, int x, int y, int channel) { unsigned int v; if(channel>=ctx->img1_numchannels_physical) { // This is a virtual alpha channel. Return "opaque". return 1.0; } if(ctx->img1.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { int rx, ry; translate_coords(ctx,x,y,&rx,&ry); if(ctx->img1.bit_depth!=32) return 0.0; return get_raw_sample_flt32(ctx,rx,ry,channel); } v = get_raw_sample_int(ctx,x,y,channel); return ((double)v) / ctx->img1_ci[channel].maxcolorcode_dbl; } static iw_tmpsample iw_color_to_grayscale(struct iw_context *ctx, iw_tmpsample r, iw_tmpsample g, iw_tmpsample b) { iw_tmpsample v0,v1,v2; switch(ctx->grayscale_formula) { case IW_GSF_WEIGHTED: return ctx->grayscale_weight[0]*r + ctx->grayscale_weight[1]*g + ctx->grayscale_weight[2]*b; case IW_GSF_ORDERBYVALUE: // Sort the R, G, and B values, then use the corresponding weights. if(g<=r) { v0=r; v1=g; } else { v0=g; v1=r; } if(b<=v1) { v2=b; } else { v2=v1; if(b<=v0) { v1=b; } else { v1=v0; v0=b; } } return ctx->grayscale_weight[0]*v0 + ctx->grayscale_weight[1]*v1 + ctx->grayscale_weight[2]*v2; } return 0.0; } // Based on color depth of the input image. // Assumes this channel's maxcolorcode == ctx->input_maxcolorcode static iw_tmpsample cvt_int_sample_to_linear(struct iw_context *ctx, unsigned int v, const struct iw_csdescr *csdescr) { iw_tmpsample s; if(csdescr->cstype==IW_CSTYPE_LINEAR) { // Sort of a hack: This is not just an optimization for linear colorspaces, // but is necessary to handle alpha channels correctly. // The lookup table is not correct for alpha channels. return ((double)v) / ctx->input_maxcolorcode; } else if(ctx->input_color_corr_table) { // If the colorspace is not linear, assume we can use the lookup table. return ctx->input_color_corr_table[v]; } s = ((double)v) / ctx->input_maxcolorcode; return x_to_linear_sample(s,csdescr); } // Based on color depth of the output image. static iw_tmpsample cvt_int_sample_to_linear_output(struct iw_context *ctx, unsigned int v, const struct iw_csdescr *csdescr, double overall_maxcolorcode) { iw_tmpsample s; if(csdescr->cstype==IW_CSTYPE_LINEAR) { return ((double)v) / overall_maxcolorcode; } else if(ctx->output_rev_color_corr_table) { return ctx->output_rev_color_corr_table[v]; } s = ((double)v) / overall_maxcolorcode; return x_to_linear_sample(s,csdescr); } // Return a sample, converted to a linear colorspace if it isn't already in one. // Channel is the output channel number. static iw_tmpsample get_sample_cvt_to_linear(struct iw_context *ctx, int x, int y, int channel, const struct iw_csdescr *csdescr) { unsigned int v1,v2,v3; iw_tmpsample r,g,b; int ch; ch = ctx->intermed_ci[channel].corresponding_input_channel; if(ctx->img1_ci[ch].disable_fast_get_sample) { // The slow way... if(ctx->intermed_ci[channel].cvt_to_grayscale) { r = x_to_linear_sample(get_raw_sample(ctx,x,y,ch+0),csdescr); g = x_to_linear_sample(get_raw_sample(ctx,x,y,ch+1),csdescr); b = x_to_linear_sample(get_raw_sample(ctx,x,y,ch+2),csdescr); return iw_color_to_grayscale(ctx,r,g,b); } return x_to_linear_sample(get_raw_sample(ctx,x,y,ch),csdescr); } // This method is faster, because it may use a gamma lookup table. // But all channels have to have the nominal input bitdepth, and it doesn't // support floating point samples, or a virtual alpha channel. if(ctx->intermed_ci[channel].cvt_to_grayscale) { v1 = get_raw_sample_int(ctx,x,y,ch+0); v2 = get_raw_sample_int(ctx,x,y,ch+1); v3 = get_raw_sample_int(ctx,x,y,ch+2); r = cvt_int_sample_to_linear(ctx,v1,csdescr); g = cvt_int_sample_to_linear(ctx,v2,csdescr); b = cvt_int_sample_to_linear(ctx,v3,csdescr); return iw_color_to_grayscale(ctx,r,g,b); } v1 = get_raw_sample_int(ctx,x,y,ch); return cvt_int_sample_to_linear(ctx,v1,csdescr); } // s is from 0.0 to 65535.0 static IW_INLINE void put_raw_sample_16(struct iw_context *ctx, double s, int x, int y, int channel) { size_t z; unsigned short tmpui16; tmpui16 = (unsigned short)(0.5+s); z = y*ctx->img2.bpr + (ctx->img2_numchannels*x + channel)*2; ctx->img2.pixels[z+0] = (iw_byte)(tmpui16>>8); ctx->img2.pixels[z+1] = (iw_byte)(tmpui16&0xff); } // s is from 0.0 to 255.0 static IW_INLINE void put_raw_sample_8(struct iw_context *ctx, double s, int x, int y, int channel) { iw_byte tmpui8; tmpui8 = (iw_byte)(0.5+s); ctx->img2.pixels[y*ctx->img2.bpr + ctx->img2_numchannels*x + channel] = tmpui8; } // Sample must already be scaled and in the target colorspace. E.g. 255.0 might be white. static void put_raw_sample(struct iw_context *ctx, double s, int x, int y, int channel) { switch(ctx->img2.bit_depth) { case 8: put_raw_sample_8(ctx,s,x,y,channel); break; case 16: put_raw_sample_16(ctx,s,x,y,channel); break; } } // s is from 0.0 to 1.0 static void put_raw_sample_flt32(struct iw_context *ctx, double s, int x, int y, int channel) { size_t pos; pos = y*ctx->img2.bpr + (ctx->img2_numchannels*x + channel)*4; iw_put_float32(&ctx->img2.pixels[pos], (iw_float32)s); } static iw_tmpsample linear_to_x_sample(iw_tmpsample samp_lin, const struct iw_csdescr *csdescr) { if(samp_lin > 0.999999999) { // This check is done mostly because glibc's pow() function may be // very slow for some arguments near 1. return 1.0; } switch(csdescr->cstype) { case IW_CSTYPE_SRGB: return linear_to_srgb_sample(samp_lin); case IW_CSTYPE_LINEAR: return samp_lin; case IW_CSTYPE_GAMMA: return linear_to_gamma_sample(samp_lin,csdescr->gamma); case IW_CSTYPE_REC709: return linear_to_rec709_sample(samp_lin); } return linear_to_srgb_sample(samp_lin); } // Public version of linear_to_x_sample(). IW_IMPL(double) iw_convert_sample_from_linear(double v, const struct iw_csdescr *csdescr) { return (double)linear_to_x_sample(v,csdescr); } // Returns 0 if we should round down, 1 if we should round up. // TODO: It might be good to use a different-sized matrix for alpha channels // (e.g. 9x7), but I don't know how to make a good one. static int iw_ordered_dither(int dithersubtype, double fraction, int x, int y) { double threshold; static const float pattern[2][64] = { { // Dispersed ordered dither 0.5/64,48.5/64,12.5/64,60.5/64, 3.5/64,51.5/64,15.5/64,63.5/64, 32.5/64,16.5/64,44.5/64,28.5/64,35.5/64,19.5/64,47.5/64,31.5/64, 8.5/64,56.5/64, 4.5/64,52.5/64,11.5/64,59.5/64, 7.5/64,55.5/64, 40.5/64,24.5/64,36.5/64,20.5/64,43.5/64,27.5/64,39.5/64,23.5/64, 2.5/64,50.5/64,14.5/64,62.5/64, 1.5/64,49.5/64,13.5/64,61.5/64, 34.5/64,18.5/64,46.5/64,30.5/64,33.5/64,17.5/64,45.5/64,29.5/64, 10.5/64,58.5/64, 6.5/64,54.5/64, 9.5/64,57.5/64, 5.5/64,53.5/64, 42.5/64,26.5/64,38.5/64,22.5/64,41.5/64,25.5/64,37.5/64,21.5/64 }, { // Halftone ordered dither 3.5/64, 9.5/64,17.5/64,27.5/64,25.5/64,15.5/64, 7.5/64, 1.5/64, 11.5/64,29.5/64,37.5/64,45.5/64,43.5/64,35.5/64,23.5/64, 5.5/64, 19.5/64,39.5/64,51.5/64,57.5/64,55.5/64,49.5/64,33.5/64,13.5/64, 31.5/64,47.5/64,59.5/64,63.5/64,61.5/64,53.5/64,41.5/64,21.5/64, 30.5/64,46.5/64,58.5/64,62.5/64,60.5/64,52.5/64,40.5/64,20.5/64, 18.5/64,38.5/64,50.5/64,56.5/64,54.5/64,48.5/64,32.5/64,12.5/64, 10.5/64,28.5/64,36.5/64,44.5/64,42.5/64,34.5/64,22.5/64, 4.5/64, 2.5/64, 8.5/64,16.5/64,26.5/64,24.5/64,14.5/64, 6.5/64, 0.5/64 }}; threshold = pattern[dithersubtype][(x%8) + 8*(y%8)]; return (fraction >= threshold); } // Returns 0 if we should round down, 1 if we should round up. static int iw_random_dither(struct iw_context *ctx, double fraction, int x, int y, int dithersubtype, int channel) { double threshold; threshold = ((double)iwpvt_prng_rand(ctx->prng)) / (double)0xffffffff; if(fraction>=threshold) return 1; return 0; } static void iw_errdiff_dither(struct iw_context *ctx,int dithersubtype, double err,int x,int y) { int fwd; const double *m; // x 0 1 // 2 3 4 5 6 // 7 8 9 10 11 static const double matrix_list[][12] = { { 7.0/16, 0.0, // 0 = Floyd-Steinberg 0.0 , 3.0/16, 5.0/16, 1.0/16, 0.0, 0.0 , 0.0, 0.0, 0.0 , 0.0 }, { 7.0/48, 5.0/48, // 1 = JJN 3.0/48, 5.0/48, 7.0/48, 5.0/48, 3.0/48, 1.0/48, 3.0/48, 5.0/48, 3.0/48, 1.0/48 }, { 8.0/42, 4.0/42, // 2 = Stucki 2.0/42, 4.0/42, 8.0/42, 4.0/42, 2.0/42, 1.0/42, 2.0/42, 4.0/42, 2.0/42, 1.0/42 }, { 8.0/32, 4.0/32, // 3 = Burkes 2.0/32, 4.0/32, 8.0/32, 4.0/32, 2.0/32, 0.0 , 0.0 , 0.0 , 0.0 , 0.0 }, { 5.0/32, 3.0/32, // 4 = Sierra3 2.0/32, 4.0/32, 5.0/32, 4.0/32, 2.0/32, 0.0, 2.0/32, 3.0/32, 2.0/32, 0.0 }, { 4.0/16, 3.0/16, // 5 = Sierra2 1.0/16, 2.0/16, 3.0/16, 2.0/16, 1.0/16, 0.0 , 0.0 , 0.0 , 0.0 , 0.0 }, { 2.0/4 , 0.0, // 6 = Sierra42a 0.0 , 1.0/4 , 1.0/4 , 0.0 , 0.0, 0.0 , 0.0 , 0.0 , 0.0 , 0.0 }, { 1.0/8 , 1.0/8, // 7 = Atkinson 0.0 , 1.0/8 , 1.0/8 , 1.0/8 , 0.0, 0.0 , 0.0 , 1.0/8 , 0.0 , 0.0 } }; if(dithersubtype<=7) m = matrix_list[dithersubtype]; else m = matrix_list[0]; fwd = (y%2)?(-1):1; if((x-fwd)>=0 && (x-fwd)<ctx->img2.width) { if((x-2*fwd)>=0 && (x-2*fwd)<ctx->img2.width) { ctx->dither_errors[1][x-2*fwd] += err*(m[2]); ctx->dither_errors[2][x-2*fwd] += err*(m[7]); } ctx->dither_errors[1][x-fwd] += err*(m[3]); ctx->dither_errors[2][x-fwd] += err*(m[8]); } ctx->dither_errors[1][x] += err*(m[4]); ctx->dither_errors[2][x] += err*(m[9]); if((x+fwd)>=0 && (x+fwd)<ctx->img2.width) { ctx->dither_errors[0][x+fwd] += err*(m[0]); ctx->dither_errors[1][x+fwd] += err*(m[5]); ctx->dither_errors[2][x+fwd] += err*(m[10]); if((x+2*fwd)>=0 && (x+2*fwd)<ctx->img2.width) { ctx->dither_errors[0][x+2*fwd] += err*(m[1]); ctx->dither_errors[1][x+2*fwd] += err*(m[6]); ctx->dither_errors[2][x+2*fwd] += err*(m[11]); } } } // 'channel' is the output channel. static int get_nearest_valid_colors(struct iw_context *ctx, iw_tmpsample samp_lin, const struct iw_csdescr *csdescr, double *s_lin_floor_1, double *s_lin_ceil_1, double *s_cvt_floor_full, double *s_cvt_ceil_full, double overall_maxcolorcode, int color_count) { iw_tmpsample samp_cvt; double samp_cvt_expanded; unsigned int floor_int, ceil_int; // A prelimary conversion to the target color space. samp_cvt = linear_to_x_sample(samp_lin,csdescr); if(color_count==0) { // The normal case: we want to use this channel's full available depth. samp_cvt_expanded = samp_cvt * overall_maxcolorcode; if(samp_cvt_expanded>overall_maxcolorcode) samp_cvt_expanded=overall_maxcolorcode; if(samp_cvt_expanded<0.0) samp_cvt_expanded=0.0; // Find the next-smallest and next-largest valid values that // can be stored in this image. // We will use one of them, but in order to figure out *which* one, // we have to compare their distances in the *linear* color space. *s_cvt_floor_full = floor(samp_cvt_expanded); *s_cvt_ceil_full = ceil(samp_cvt_expanded); } else { // We're "posterizing": restricting to a certain number of color shades. double posterized_maxcolorcode; // Example: color_count = 4, bit_depth = 8; // Colors are from 0.0 to 3.0, mapped to 0.0 to 255.0. // Reduction factor is 255.0/3.0 = 85.0 posterized_maxcolorcode = (double)(color_count-1); samp_cvt_expanded = samp_cvt * posterized_maxcolorcode; if(samp_cvt_expanded>posterized_maxcolorcode) samp_cvt_expanded=posterized_maxcolorcode; if(samp_cvt_expanded<0.0) samp_cvt_expanded=0.0; // If the number of shades is not 2, 4, 6, 16, 18, 52, 86, or 256 (assuming 8-bit depth), // then the shades will not be exactly evenly spaced. For example, if there are 3 shades, // they will be 0, 128, and 255. It will often be the case that the shade we want is exactly // halfway between the nearest two available shades, and the "0.5000000001" fudge factor is my // attempt to make sure it rounds consistently in the same direction. *s_cvt_floor_full = floor(0.5000000001 + floor(samp_cvt_expanded) * (overall_maxcolorcode/posterized_maxcolorcode)); *s_cvt_ceil_full = floor(0.5000000001 + ceil (samp_cvt_expanded) * (overall_maxcolorcode/posterized_maxcolorcode)); } floor_int = (unsigned int)(*s_cvt_floor_full); ceil_int = (unsigned int)(*s_cvt_ceil_full); if(floor_int == ceil_int) { return 1; } // Convert the candidates to our linear color space *s_lin_floor_1 = cvt_int_sample_to_linear_output(ctx,floor_int,csdescr,overall_maxcolorcode); *s_lin_ceil_1 = cvt_int_sample_to_linear_output(ctx,ceil_int ,csdescr,overall_maxcolorcode); return 0; } // channel is the output channel static void put_sample_convert_from_linear_flt(struct iw_context *ctx, iw_tmpsample samp_lin, int x, int y, int channel, const struct iw_csdescr *csdescr) { put_raw_sample_flt32(ctx,(double)samp_lin,x,y,channel); } static double get_final_sample_using_nc_tbl(struct iw_context *ctx, iw_tmpsample samp_lin) { unsigned int x; unsigned int d; // For numbers 0 through 254, find the smallest one for which the // corresponding table value is larger than samp_lin. // Do a binary search. x = 127; d = 64; while(1) { if(x>254 || ctx->nearest_color_table[x] > samp_lin) x -= d; else x += d; if(d==1) { if(x>254 || ctx->nearest_color_table[x] > samp_lin) return (double)(x); else return (double)(x+1); } d = d/2; } } // channel is the output channel static void put_sample_convert_from_linear(struct iw_context *ctx, iw_tmpsample samp_lin, int x, int y, int channel, const struct iw_csdescr *csdescr) { double s_lin_floor_1, s_lin_ceil_1; double s_cvt_floor_full, s_cvt_ceil_full; double d_floor, d_ceil; int is_exact; double s_full; int ditherfamily; int dd; // Dither decision: 0 to use floor, 1 to use ceil. // Clamp to the [0.0,1.0] range. // The sample type is UINT, so out-of-range samples can't be represented. // TODO: I think that out-of-range samples could still have a meaningful // effect if we are dithering. More investigation is needed here. if(samp_lin<0.0) samp_lin=0.0; if(samp_lin>1.0) samp_lin=1.0; // TODO: This is getting messy. The conditions under which we use lookup // tables are too complicated, and we still don't use them as often as we // should. For example, if we are not dithering, we can use a table optimized // for telling us the single nearest color. But if we are dithering, then we // instead need to know both the next-highest and next-lowest colors, which // would require a different table. The same table could be used for both, // but not quite as efficiently. Currently, we don't use use a lookup table // when dithering, except that we may still use one to do some of the // intermediate computations. Etc. if(ctx->img2_ci[channel].use_nearest_color_table) { s_full = get_final_sample_using_nc_tbl(ctx,samp_lin); goto okay; } ditherfamily=ctx->img2_ci[channel].ditherfamily; if(ditherfamily==IW_DITHERFAMILY_ERRDIFF) { samp_lin += ctx->dither_errors[0][x]; // If the prior error makes the ideal brightness out of the available range, // just throw away any extra. if(samp_lin>1.0) samp_lin=1.0; else if(samp_lin<0.0) samp_lin=0.0; } is_exact = get_nearest_valid_colors(ctx,samp_lin,csdescr, &s_lin_floor_1, &s_lin_ceil_1, &s_cvt_floor_full, &s_cvt_ceil_full, ctx->img2_ci[channel].maxcolorcode_dbl, ctx->img2_ci[channel].color_count); if(is_exact) { s_full = s_cvt_floor_full; // Hack to keep the PRNG in sync. We have to generate exactly one random // number per sample, regardless of whether we use it. if(ditherfamily==IW_DITHERFAMILY_RANDOM) { (void)iwpvt_prng_rand(ctx->prng); } goto okay; } // samp_lin should be between s_lin_floor_1 and s_lin_ceil_1. Figure out // which is closer, and use the final pixel value we figured out earlier // (either s_cvt_floor_full or s_cvt_ceil_full). d_floor = samp_lin-s_lin_floor_1; d_ceil = s_lin_ceil_1-samp_lin; if(ditherfamily==IW_DITHERFAMILY_NONE) { // Not dithering. Just choose closest value. if(d_ceil<=d_floor) s_full=s_cvt_ceil_full; else s_full=s_cvt_floor_full; } else if(ditherfamily==IW_DITHERFAMILY_ERRDIFF) { if(d_ceil<=d_floor) { // Ceiling is closer. This pixel will be lighter than ideal. // so the error is negative, to make other pixels darker. iw_errdiff_dither(ctx,ctx->img2_ci[channel].dithersubtype,-d_ceil,x,y); s_full=s_cvt_ceil_full; } else { iw_errdiff_dither(ctx,ctx->img2_ci[channel].dithersubtype,d_floor,x,y); s_full=s_cvt_floor_full; } } else if(ditherfamily==IW_DITHERFAMILY_ORDERED) { dd=iw_ordered_dither(ctx->img2_ci[channel].dithersubtype, d_floor/(d_floor+d_ceil),x,y); s_full = dd ? s_cvt_ceil_full : s_cvt_floor_full; } else if(ditherfamily==IW_DITHERFAMILY_RANDOM) { dd=iw_random_dither(ctx,d_floor/(d_floor+d_ceil),x,y,ctx->img2_ci[channel].dithersubtype,channel); s_full = dd ? s_cvt_ceil_full : s_cvt_floor_full; } else { // Unsupported dither method. s_full = 0.0; } okay: put_raw_sample(ctx,s_full,x,y,channel); } // A stripped-down version of put_sample_convert_from_linear(), // intended for use with background colors. static unsigned int calc_sample_convert_from_linear(struct iw_context *ctx, iw_tmpsample samp_lin, const struct iw_csdescr *csdescr, double overall_maxcolorcode) { double s_lin_floor_1, s_lin_ceil_1; double s_cvt_floor_full, s_cvt_ceil_full; double d_floor, d_ceil; int is_exact; double s_full; if(samp_lin<0.0) samp_lin=0.0; if(samp_lin>1.0) samp_lin=1.0; is_exact = get_nearest_valid_colors(ctx,samp_lin,csdescr, &s_lin_floor_1, &s_lin_ceil_1, &s_cvt_floor_full, &s_cvt_ceil_full, overall_maxcolorcode, 0); if(is_exact) { s_full = s_cvt_floor_full; goto okay; } d_floor = samp_lin-s_lin_floor_1; d_ceil = s_lin_ceil_1-samp_lin; if(d_ceil<=d_floor) s_full=s_cvt_ceil_full; else s_full=s_cvt_floor_full; okay: return (unsigned int)(0.5+s_full); } static void clamp_output_samples(struct iw_context *ctx, iw_tmpsample *out_pix, int num_out_pix) { int i; for(i=0;i<num_out_pix;i++) { if(out_pix[i]<0.0) out_pix[i]=0.0; else if(out_pix[i]>1.0) out_pix[i]=1.0; } } // TODO: Maybe this should be a flag in ctx, instead of a function that is // called repeatedly. static int iw_bkgd_has_transparency(struct iw_context *ctx) { if(!ctx->apply_bkgd) return 0; if(!(ctx->output_profile&IW_PROFILE_TRANSPARENCY)) return 0; if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) return 0; if(ctx->bkgd_color_source==IW_BKGD_COLOR_SOURCE_FILE) { if(ctx->img1_bkgd_label_inputcs.c[3]<1.0) return 1; } else if(ctx->bkgd_color_source==IW_BKGD_COLOR_SOURCE_REQ) { if(ctx->bkgd_checkerboard) { if(ctx->req.bkgd2.c[3]<1.0) return 1; } if(ctx->req.bkgd.c[3]<1.0) return 1; } return 0; } // 'channel' is an intermediate channel number. static int iw_process_cols_to_intermediate(struct iw_context *ctx, int channel, const struct iw_csdescr *in_csdescr) { int i,j; int retval=0; iw_tmpsample tmp_alpha; iw_tmpsample *inpix_tofree = NULL; iw_tmpsample *outpix_tofree = NULL; int is_alpha_channel; struct iw_resize_settings *rs = NULL; struct iw_channelinfo_intermed *int_ci; iw_tmpsample *in_pix; iw_tmpsample *out_pix; int num_in_pix; int num_out_pix; int_ci = &ctx->intermed_ci[channel]; is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); num_in_pix = ctx->input_h; inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); if(!inpix_tofree) goto done; in_pix = inpix_tofree; num_out_pix = ctx->intermed_canvas_height; outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; rs=&ctx->resize_settings[IW_DIMENSION_V]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { // TODO: The use of the word "rows" here is misleading, because we are // actually resizing columns. rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(i=0;i<ctx->input_w;i++) { // Read a column of pixels into ctx->in_pix for(j=0;j<ctx->input_h;j++) { in_pix[j] = get_sample_cvt_to_linear(ctx,i,j,channel,in_csdescr); if(int_ci->need_unassoc_alpha_processing) { // We need opacity information also tmp_alpha = get_raw_sample(ctx,i,j,ctx->img1_alpha_channel_index); // Multiply color amount by opacity in_pix[j] *= tmp_alpha; } else if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) { // We're doing "Early" background color application. // All intermediate channels will need the background color // applied to them. tmp_alpha = get_raw_sample(ctx,i,j,ctx->img1_alpha_channel_index); in_pix[j] = (tmp_alpha)*(in_pix[j]) + (1.0-tmp_alpha)*(int_ci->bkgd_color_lin); } } // Now we have a row in the right format. // Resize it and store it in the right place in the intermediate array. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // The intermediate pixels are in ctx->out_pix. Copy them to the intermediate array. for(j=0;j<ctx->intermed_canvas_height;j++) { if(is_alpha_channel) { ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width + i] = (iw_float32)out_pix[j]; } else { ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width + i] = (iw_float32)out_pix[j]; } } } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; } // 'handle_alpha_flag' must be set if an alpha channel exists and this is not // the alpha channel. static int iw_process_rows_intermediate_to_final(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *out_csdescr) { int i,j; int z; int k; int retval=0; iw_tmpsample tmpsamp; iw_tmpsample alphasamp = 0.0; iw_tmpsample *inpix_tofree = NULL; // Used if we need a separate temp buffer for input samples iw_tmpsample *outpix_tofree = NULL; // Used if we need a separate temp buffer for output samples // Do any of the output channels use error-diffusion dithering? int using_errdiffdither = 0; int output_channel; int is_alpha_channel; int bkgd_has_transparency; double tmpbkgdalpha=0.0; int alt_bkgd = 0; // Nonzero if we should use bkgd2 for this sample struct iw_resize_settings *rs = NULL; int ditherfamily, dithersubtype; struct iw_channelinfo_intermed *int_ci; struct iw_channelinfo_out *out_ci; iw_tmpsample *in_pix = NULL; iw_tmpsample *out_pix = NULL; int num_in_pix; int num_out_pix; num_in_pix = ctx->intermed_canvas_width; num_out_pix = ctx->img2.width; int_ci = &ctx->intermed_ci[intermed_channel]; output_channel = int_ci->corresponding_output_channel; out_ci = &ctx->img2_ci[output_channel]; is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); bkgd_has_transparency = iw_bkgd_has_transparency(ctx); inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); in_pix = inpix_tofree; // We need an output buffer. outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; // Decide if the 'nearest color table' optimization can be used if(ctx->nearest_color_table && !is_alpha_channel && out_ci->ditherfamily==IW_DITHERFAMILY_NONE && out_ci->color_count==0) { out_ci->use_nearest_color_table = 1; } else { out_ci->use_nearest_color_table = 0; } // Seed the PRNG, if necessary. ditherfamily = out_ci->ditherfamily; dithersubtype = out_ci->dithersubtype; if(ditherfamily==IW_DITHERFAMILY_RANDOM) { // Decide what random seed to use. The alpha channel always has its own // seed. If using "r" (not "r2") dithering, every channel has its own seed. if(dithersubtype==IW_DITHERSUBTYPE_SAMEPATTERN && out_ci->channeltype!=IW_CHANNELTYPE_ALPHA) { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed); } else { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed+out_ci->channeltype); } } // Initialize Floyd-Steinberg dithering. if(output_channel>=0 && out_ci->ditherfamily==IW_DITHERFAMILY_ERRDIFF) { using_errdiffdither = 1; for(i=0;i<ctx->img2.width;i++) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k][i] = 0.0; } } } rs=&ctx->resize_settings[IW_DIMENSION_H]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(j=0;j<ctx->intermed_canvas_height;j++) { // As needed, either copy the input pixels to a temp buffer (inpix, which // ctx->in_pix already points to), or point ctx->in_pix directly to the // intermediate data. if(is_alpha_channel) { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width+i]; } } else { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width+i]; } } // Resize ctx->in_pix to ctx->out_pix. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // If necessary, copy the resized samples to the final_alpha image if(is_alpha_channel && outpix_tofree && ctx->final_alpha32) { for(i=0;i<num_out_pix;i++) { ctx->final_alpha32[((size_t)j)*ctx->img2.width+i] = (iw_float32)outpix_tofree[i]; } } // Now convert the out_pix and put them in the final image. if(output_channel == -1) { // No corresponding output channel. // (Presumably because this is an alpha channel that's being // removed because we're applying a background.) goto here; } for(z=0;z<ctx->img2.width;z++) { // For decent Floyd-Steinberg dithering, we need to process alternate // rows in reverse order. if(using_errdiffdither && (j%2)) i=ctx->img2.width-1-z; else i=z; tmpsamp = out_pix[i]; if(ctx->bkgd_checkerboard) { alt_bkgd = (((ctx->bkgd_check_origin[IW_DIMENSION_H]+i)/ctx->bkgd_check_size)%2) != (((ctx->bkgd_check_origin[IW_DIMENSION_V]+j)/ctx->bkgd_check_size)%2); } if(bkgd_has_transparency) { tmpbkgdalpha = alt_bkgd ? ctx->bkgd2alpha : ctx->bkgd1alpha; } if(int_ci->need_unassoc_alpha_processing) { // Convert color samples back to unassociated alpha. alphasamp = ctx->final_alpha32[((size_t)j)*ctx->img2.width + i]; if(alphasamp!=0.0) { tmpsamp /= alphasamp; } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE) { // Apply a background color (or checkerboard pattern). double bkcolor; bkcolor = alt_bkgd ? out_ci->bkgd2_color_lin : out_ci->bkgd1_color_lin; if(bkgd_has_transparency) { tmpsamp = tmpsamp*alphasamp + bkcolor*tmpbkgdalpha*(1.0-alphasamp); } else { tmpsamp = tmpsamp*alphasamp + bkcolor*(1.0-alphasamp); } } } else if(is_alpha_channel && bkgd_has_transparency) { // Composite the alpha of the foreground over the alpha of the background. tmpsamp = tmpsamp + tmpbkgdalpha*(1.0-tmpsamp); } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) put_sample_convert_from_linear_flt(ctx,tmpsamp,i,j,output_channel,out_csdescr); else put_sample_convert_from_linear(ctx,tmpsamp,i,j,output_channel,out_csdescr); } if(using_errdiffdither) { // Move "next row" error data to "this row", and clear the "next row". // TODO: Obviously, it would be more efficient to just swap pointers // to the rows. for(i=0;i<ctx->img2.width;i++) { // Move data in all rows but the first row up one row. for(k=0;k<IW_DITHER_MAXROWS-1;k++) { ctx->dither_errors[k][i] = ctx->dither_errors[k+1][i]; } // Clear the last row. ctx->dither_errors[IW_DITHER_MAXROWS-1][i] = 0.0; } } here: ; } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; } static int iw_process_one_channel(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *in_csdescr, const struct iw_csdescr *out_csdescr) { if(!iw_process_cols_to_intermediate(ctx,intermed_channel,in_csdescr)) { return 0; } if(!iw_process_rows_intermediate_to_final(ctx,intermed_channel,out_csdescr)) { return 0; } return 1; } // Potentially make a lookup table for color correction. static void iw_make_x_to_linear_table(struct iw_context *ctx, double **ptable, const struct iw_image *img, const struct iw_csdescr *csdescr) { int ncolors; int i; double *tbl; if(csdescr->cstype==IW_CSTYPE_LINEAR) return; ncolors = (1 << img->bit_depth); if(ncolors>256) return; // Don't make a table if the image is really small. if( ((size_t)img->width)*img->height <= 512 ) return; tbl = iw_malloc(ctx,ncolors*sizeof(double)); if(!tbl) return; for(i=0;i<ncolors;i++) { tbl[i] = x_to_linear_sample(((double)i)/(ncolors-1), csdescr); } *ptable = tbl; } static void iw_make_nearest_color_table(struct iw_context *ctx, double **ptable, const struct iw_image *img, const struct iw_csdescr *csdescr) { int ncolors; int nentries; int i; double *tbl; double prev; double curr; if(ctx->no_gamma) return; if(csdescr->cstype==IW_CSTYPE_LINEAR) return; if(img->sampletype==IW_SAMPLETYPE_FLOATINGPOINT) return; if(img->bit_depth != ctx->img2.bit_depth) return; ncolors = (1 << img->bit_depth); if(ncolors>256) return; nentries = ncolors-1; // Don't make a table if the image is really small. if( ((size_t)img->width)*img->height <= 512 ) return; tbl = iw_malloc(ctx,nentries*sizeof(double)); if(!tbl) return; // Table stores the maximum value for the given entry. // The final entry is omitted, since there is no maximum value. prev = 0.0; for(i=0;i<nentries;i++) { // This conversion may appear to be going in the wrong direction // (we're coverting *from* linear), but it's correct because we will // search through its contents to find the corresponding index, // instead of vice versa. curr = x_to_linear_sample( ((double)(i+1))/(ncolors-1), csdescr); tbl[i] = (prev + curr)/2.0; prev = curr; } *ptable = tbl; } // Label is returned in linear colorspace. // Returns 0 if no label available. static int get_output_bkgd_label_lin(struct iw_context *ctx, struct iw_color *clr) { clr->c[0] = 1.0; clr->c[1] = 0.0; clr->c[2] = 1.0; clr->c[3] = 1.0; if(ctx->req.suppress_output_bkgd_label) return 0; if(ctx->req.output_bkgd_label_valid) { *clr = ctx->req.output_bkgd_label; return 1; } // If the user didn't specify a label, but the input file had one, copy the // input file's label. if(ctx->img1_bkgd_label_set) { *clr = ctx->img1_bkgd_label_lin; return 1; } return 0; } static unsigned int iw_scale_to_int(double s, unsigned int maxcolor) { if(s<=0.0) return 0; if(s>=1.0) return maxcolor; return (unsigned int)(0.5+s*maxcolor); } // Quantize the background color label, and store in ctx->img2.bkgdlabel. // Also convert it to grayscale if needed. static void iw_process_bkgd_label(struct iw_context *ctx) { int ret; int k; struct iw_color clr; double maxcolor; unsigned int tmpu; if(!(ctx->output_profile&IW_PROFILE_PNG_BKGD) && !(ctx->output_profile&IW_PROFILE_RGB8_BKGD) && !(ctx->output_profile&IW_PROFILE_RGB16_BKGD)) { return; } ret = get_output_bkgd_label_lin(ctx,&clr); if(!ret) return; if(ctx->to_grayscale) { iw_tmpsample g; g = iw_color_to_grayscale(ctx, clr.c[0], clr.c[1], clr.c[2]); clr.c[0] = clr.c[1] = clr.c[2] = g; } if(ctx->output_profile&IW_PROFILE_RGB8_BKGD) { maxcolor=255.0; } else if(ctx->output_profile&IW_PROFILE_RGB16_BKGD) { maxcolor=65535.0; } else if(ctx->img2.bit_depth==8) { maxcolor=255.0; } else if(ctx->img2.bit_depth==16) { maxcolor=65535.0; } else { return; } // Although the bkgd label is stored as floating point, we're responsible for // making sure that, when scaled and rounded to a format suitable for the output // format, it will be the correct color. for(k=0;k<3;k++) { tmpu = calc_sample_convert_from_linear(ctx, clr.c[k], &ctx->img2cs, maxcolor); ctx->img2.bkgdlabel.c[k] = ((double)tmpu)/maxcolor; } // Alpha sample tmpu = iw_scale_to_int(clr.c[3],(unsigned int)maxcolor); ctx->img2.bkgdlabel.c[3] = ((double)tmpu)/maxcolor; ctx->img2.has_bkgdlabel = 1; } static void negate_target_image(struct iw_context *ctx) { int channel; struct iw_channelinfo_out *ci; int i,j; size_t pos; iw_float32 s; unsigned int n; for(channel=0; channel<ctx->img2_numchannels; channel++) { ci = &ctx->img2_ci[channel]; if(ci->channeltype == IW_CHANNELTYPE_ALPHA) continue; // Don't negate alpha channels if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { for(j=0; j<ctx->img2.height; j++) { for(i=0; i<ctx->img2.width; i++) { pos = j*ctx->img2.bpr + ctx->img2_numchannels*i*4 + channel*4; s = iw_get_float32(&ctx->img2.pixels[pos]); iw_put_float32(&ctx->img2.pixels[pos], ((iw_float32)1.0)-s); } } } else if(ctx->img2.bit_depth==8) { for(j=0; j<ctx->img2.height; j++) { for(i=0; i<ctx->img2.width; i++) { pos = j*ctx->img2.bpr + ctx->img2_numchannels*i + channel; ctx->img2.pixels[pos] = ci->maxcolorcode_int-ctx->img2.pixels[pos]; } } } else if(ctx->img2.bit_depth==16) { for(j=0; j<ctx->img2.height; j++) { for(i=0; i<ctx->img2.width; i++) { pos = j*ctx->img2.bpr + ctx->img2_numchannels*i*2 + channel*2; n = ctx->img2.pixels[pos]*256 + ctx->img2.pixels[pos+1]; n = ci->maxcolorcode_int - n; ctx->img2.pixels[pos] = (n&0xff00)>>8; ctx->img2.pixels[pos+1] = n&0x00ff; } } } } } static int iw_process_internal(struct iw_context *ctx) { int channel; int retval=0; int i,k; int ret; // A linear color-correction descriptor to use with alpha channels. struct iw_csdescr csdescr_linear; ctx->intermediate32=NULL; ctx->intermediate_alpha32=NULL; ctx->final_alpha32=NULL; ctx->intermed_canvas_width = ctx->input_w; ctx->intermed_canvas_height = ctx->img2.height; iw_make_linear_csdescr(&csdescr_linear); ctx->img2.bpr = iw_calc_bytesperrow(ctx->img2.width,ctx->img2.bit_depth*ctx->img2_numchannels); ctx->img2.pixels = iw_malloc_large(ctx, ctx->img2.bpr, ctx->img2.height); if(!ctx->img2.pixels) { goto done; } ctx->intermediate32 = (iw_float32*)iw_malloc_large(ctx, ctx->intermed_canvas_width * ctx->intermed_canvas_height, sizeof(iw_float32)); if(!ctx->intermediate32) { goto done; } if(ctx->uses_errdiffdither) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k] = (double*)iw_malloc(ctx, ctx->img2.width * sizeof(double)); if(!ctx->dither_errors[k]) goto done; } } if(!ctx->disable_output_lookup_tables) { iw_make_x_to_linear_table(ctx,&ctx->output_rev_color_corr_table,&ctx->img2,&ctx->img2cs); iw_make_nearest_color_table(ctx,&ctx->nearest_color_table,&ctx->img2,&ctx->img2cs); } // If an alpha channel is present, we have to process it first. if(IW_IMGTYPE_HAS_ALPHA(ctx->intermed_imgtype)) { ctx->intermediate_alpha32 = (iw_float32*)iw_malloc_large(ctx, ctx->intermed_canvas_width * ctx->intermed_canvas_height, sizeof(iw_float32)); if(!ctx->intermediate_alpha32) { goto done; } ctx->final_alpha32 = (iw_float32*)iw_malloc_large(ctx, ctx->img2.width * ctx->img2.height, sizeof(iw_float32)); if(!ctx->final_alpha32) { goto done; } if(!iw_process_one_channel(ctx,ctx->intermed_alpha_channel_index,&csdescr_linear,&csdescr_linear)) goto done; } // Process the non-alpha channels. for(channel=0;channel<ctx->intermed_numchannels;channel++) { if(ctx->intermed_ci[channel].channeltype!=IW_CHANNELTYPE_ALPHA) { if(ctx->no_gamma) ret=iw_process_one_channel(ctx,channel,&csdescr_linear,&csdescr_linear); else ret=iw_process_one_channel(ctx,channel,&ctx->img1cs,&ctx->img2cs); if(!ret) goto done; } } iw_process_bkgd_label(ctx); if(ctx->req.negate_target) { negate_target_image(ctx); } retval=1; done: if(ctx->intermediate32) { iw_free(ctx,ctx->intermediate32); ctx->intermediate32=NULL; } if(ctx->intermediate_alpha32) { iw_free(ctx,ctx->intermediate_alpha32); ctx->intermediate_alpha32=NULL; } if(ctx->final_alpha32) { iw_free(ctx,ctx->final_alpha32); ctx->final_alpha32=NULL; } for(k=0;k<IW_DITHER_MAXROWS;k++) { if(ctx->dither_errors[k]) { iw_free(ctx,ctx->dither_errors[k]); ctx->dither_errors[k]=NULL; } } // The 'resize contexts' are usually kept around so that they can be reused. // Now that we're done with everything, free them. for(i=0;i<2;i++) { // horizontal, vertical if(ctx->resize_settings[i].rrctx) { iwpvt_resize_rows_done(ctx->resize_settings[i].rrctx); ctx->resize_settings[i].rrctx = NULL; } } return retval; } static int iw_get_channeltype(int imgtype, int channel) { switch(imgtype) { case IW_IMGTYPE_GRAY: if(channel==0) return IW_CHANNELTYPE_GRAY; break; case IW_IMGTYPE_GRAYA: if(channel==0) return IW_CHANNELTYPE_GRAY; if(channel==1) return IW_CHANNELTYPE_ALPHA; break; case IW_IMGTYPE_RGB: if(channel==0) return IW_CHANNELTYPE_RED; if(channel==1) return IW_CHANNELTYPE_GREEN; if(channel==2) return IW_CHANNELTYPE_BLUE; break; case IW_IMGTYPE_RGBA: if(channel==0) return IW_CHANNELTYPE_RED; if(channel==1) return IW_CHANNELTYPE_GREEN; if(channel==2) return IW_CHANNELTYPE_BLUE; if(channel==3) return IW_CHANNELTYPE_ALPHA; break; } return 0; } static void iw_set_input_channeltypes(struct iw_context *ctx) { int i; for(i=0;i<ctx->img1_numchannels_logical;i++) { ctx->img1_ci[i].channeltype = iw_get_channeltype(ctx->img1_imgtype_logical,i); } } static void iw_set_intermed_channeltypes(struct iw_context *ctx) { int i; for(i=0;i<ctx->intermed_numchannels;i++) { ctx->intermed_ci[i].channeltype = iw_get_channeltype(ctx->intermed_imgtype,i); } } static void iw_set_out_channeltypes(struct iw_context *ctx) { int i; for(i=0;i<ctx->img2_numchannels;i++) { ctx->img2_ci[i].channeltype = iw_get_channeltype(ctx->img2.imgtype,i); } } // Set img2.bit_depth based on output_depth_req, etc. // Set img2.sampletype. static void decide_output_bit_depth(struct iw_context *ctx) { if(ctx->output_profile&IW_PROFILE_HDRI) { ctx->img2.sampletype=IW_SAMPLETYPE_FLOATINGPOINT; } else { ctx->img2.sampletype=IW_SAMPLETYPE_UINT; } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { // Floating point output. ctx->img2.bit_depth=32; return; } // Below this point, sample type is UINT. if(ctx->req.output_depth>8 && (ctx->output_profile&IW_PROFILE_16BPS)) { ctx->img2.bit_depth=16; } else { if(ctx->req.output_depth>8) { // Caller requested a depth higher than this format can handle. iw_warning(ctx,"Reducing depth to 8; required by the output format."); } ctx->img2.bit_depth=8; } } // Set the background color samples that will be used when processing the // image. (All the logic about how to apply a background color is in // decide_how_to_apply_bkgd(), not here.) static void prepare_apply_bkgd(struct iw_context *ctx) { struct iw_color bkgd1; // Main background color in linear colorspace struct iw_color bkgd2; // Secondary background color ... int i; if(!ctx->apply_bkgd) return; // Start with a default background color. bkgd1.c[0]=1.0; bkgd1.c[1]=0.0; bkgd1.c[2]=1.0; bkgd1.c[3]=1.0; bkgd2.c[0]=0.0; bkgd2.c[1]=0.0; bkgd2.c[2]=0.0; bkgd2.c[3]=1.0; // Possibly overwrite it with the background color from the appropriate // source. if(ctx->bkgd_color_source == IW_BKGD_COLOR_SOURCE_FILE) { bkgd1 = ctx->img1_bkgd_label_lin; // sructure copy ctx->bkgd_checkerboard = 0; } else if(ctx->bkgd_color_source == IW_BKGD_COLOR_SOURCE_REQ) { bkgd1 = ctx->req.bkgd; if(ctx->req.bkgd_checkerboard) { bkgd2 = ctx->req.bkgd2; } } // Set up the channelinfo (and ctx->bkgd*alpha) as needed according to the // target image type, and whether we are applying the background before or // after resizing. if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) { ctx->bkgd1alpha = 1.0; } else { ctx->bkgd1alpha = bkgd1.c[3]; ctx->bkgd2alpha = bkgd2.c[3]; } if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE && (ctx->img2.imgtype==IW_IMGTYPE_RGB || ctx->img2.imgtype==IW_IMGTYPE_RGBA)) { for(i=0;i<3;i++) { ctx->img2_ci[i].bkgd1_color_lin = bkgd1.c[i]; } if(ctx->bkgd_checkerboard) { for(i=0;i<3;i++) { ctx->img2_ci[i].bkgd2_color_lin = bkgd2.c[i]; } } } else if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE && (ctx->img2.imgtype==IW_IMGTYPE_GRAY || ctx->img2.imgtype==IW_IMGTYPE_GRAYA)) { ctx->img2_ci[0].bkgd1_color_lin = iw_color_to_grayscale(ctx,bkgd1.c[0],bkgd1.c[1],bkgd1.c[2]); if(ctx->bkgd_checkerboard) { ctx->img2_ci[0].bkgd2_color_lin = iw_color_to_grayscale(ctx,bkgd2.c[0],bkgd2.c[1],bkgd2.c[2]); } } else if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY && ctx->img2.imgtype==IW_IMGTYPE_RGB) { for(i=0;i<3;i++) { ctx->intermed_ci[i].bkgd_color_lin = bkgd1.c[i]; } } else if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY && ctx->img2.imgtype==IW_IMGTYPE_GRAY) { ctx->intermed_ci[0].bkgd_color_lin = iw_color_to_grayscale(ctx,bkgd1.c[0],bkgd1.c[1],bkgd1.c[2]); } } #define IW_STRAT1_G_G 0x011 // -grayscale #define IW_STRAT1_G_RGB 0x013 // default #define IW_STRAT1_GA_G 0x021 // -grayscale, BKGD_STRATEGY_EARLY (never happens?) #define IW_STRAT1_GA_GA 0x022 // -grayscale #define IW_STRAT1_GA_RGB 0x023 // BKGD_STRATEGY_EARLY #define IW_STRAT1_GA_RGBA 0x024 // default #define IW_STRAT1_RGB_G 0x031 // -grayscale #define IW_STRAT1_RGB_RGB 0x033 // default #define IW_STRAT1_RGBA_G 0x041 // -grayscale, BKGD_STRATEGY_EARLY (never happens?) #define IW_STRAT1_RGBA_GA 0x042 // -grayscale #define IW_STRAT1_RGBA_RGB 0x043 // BKGD_STRATEGY_EARLY #define IW_STRAT1_RGBA_RGBA 0x044 // default #define IW_STRAT2_G_G 0x111 // -grayscale #define IW_STRAT2_GA_G 0x121 // -grayscale, BKGD_STRATEGY_LATE #define IW_STRAT2_GA_GA 0x122 // -grayscale #define IW_STRAT2_RGB_RGB 0x133 // default #define IW_STRAT2_RGBA_RGB 0x143 // BKGD_STRATEGY_LATE #define IW_STRAT2_RGBA_RGBA 0x144 // default static void iw_restrict_to_range(int r1, int r2, int *pvar) { if(*pvar < r1) *pvar = r1; else if(*pvar > r2) *pvar = r2; } static void decide_strategy(struct iw_context *ctx, int *ps1, int *ps2) { int s1, s2; // Start with a default strategy switch(ctx->img1_imgtype_logical) { case IW_IMGTYPE_RGBA: if(ctx->to_grayscale) { s1=IW_STRAT1_RGBA_GA; s2=IW_STRAT2_GA_GA; } else { s1=IW_STRAT1_RGBA_RGBA; s2=IW_STRAT2_RGBA_RGBA; } break; case IW_IMGTYPE_RGB: if(ctx->to_grayscale) { s1=IW_STRAT1_RGB_G; s2=IW_STRAT2_G_G; } else { s1=IW_STRAT1_RGB_RGB; s2=IW_STRAT2_RGB_RGB; } break; case IW_IMGTYPE_GRAYA: if(ctx->to_grayscale) { s1=IW_STRAT1_GA_GA; s2=IW_STRAT2_GA_GA; } else { s1=IW_STRAT1_GA_RGBA; s2=IW_STRAT2_RGBA_RGBA; } break; default: if(ctx->to_grayscale) { s1=IW_STRAT1_G_G; s2=IW_STRAT2_G_G; } else { s1=IW_STRAT1_G_RGB; s2=IW_STRAT2_RGB_RGB; } } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) { // Applying background before resizing if(s1==IW_STRAT1_RGBA_RGBA) { s1=IW_STRAT1_RGBA_RGB; s2=IW_STRAT2_RGB_RGB; } else if(s1==IW_STRAT1_GA_GA) { s1=IW_STRAT1_GA_G; s2=IW_STRAT2_G_G; } else if(s1==IW_STRAT1_GA_RGBA) { s1=IW_STRAT1_GA_RGB; s2=IW_STRAT2_RGB_RGB; } else if(s1==IW_STRAT1_RGBA_GA) { s1=IW_STRAT1_RGBA_G; s2=IW_STRAT2_G_G; } } if(ctx->apply_bkgd && !iw_bkgd_has_transparency(ctx)) { if(s2==IW_STRAT2_GA_GA) { s2=IW_STRAT2_GA_G; } else if(s2==IW_STRAT2_RGBA_RGBA) { s2=IW_STRAT2_RGBA_RGB; } } *ps1 = s1; *ps2 = s2; } // Choose our strategy for applying a background to the image. // Uses: // - ctx->img1_imgtype_logical (set by init_channel_info()) // - ctx->req.bkgd_valid (was background set by caller?) // - ctx->req.bkgd_checkerboard (set by caller) // - ctx->bkgd_check_size (set by caller) // - ctx->resize_settings[d].use_offset // Sets: // - ctx->apply_bkgd (flag indicating whether we'll apply a background) // - ctx->apply_bkgd_strategy (flag indicating *when* we'll apply a background) // - ctx->bkgd_color_source (where to get the background color) // - ctx->bkgd_checkerboard // - ctx->bkgd_check_size (sanitized) // May emit a warning if the caller's settings can't be honored. static void decide_how_to_apply_bkgd(struct iw_context *ctx) { if(!IW_IMGTYPE_HAS_ALPHA(ctx->img1_imgtype_logical)) { // If we know the image does not have any transparency, // we don't have to do anything. ctx->apply_bkgd=0; return; } // Figure out where to get the background color from, on the assumption // that we'll use one. if(ctx->img1_bkgd_label_set && (ctx->req.use_bkgd_label_from_file || !ctx->req.bkgd_valid)) { // The input file has a background color label, and either we are // requested to prefer it to the caller's background color, or // the caller did not give us a background color. // Use the color from the input file. ctx->bkgd_color_source = IW_BKGD_COLOR_SOURCE_FILE; } else if(ctx->req.bkgd_valid) { // Use the background color given by the caller. ctx->bkgd_color_source = IW_BKGD_COLOR_SOURCE_REQ; // Tentatively use the caller's checkerboard setting. // This may be overridden if we can't support checkerboard backgrounds // for some reason. ctx->bkgd_checkerboard = ctx->req.bkgd_checkerboard; } else { // No background color available. If we need one, we'll have to invent one. ctx->bkgd_color_source = IW_BKGD_COLOR_SOURCE_NONE; } if(ctx->bkgd_checkerboard) { if(ctx->bkgd_check_size<1) ctx->bkgd_check_size=1; } if(ctx->req.bkgd_valid) { // Caller told us to apply a background. ctx->apply_bkgd=1; } if(!(ctx->output_profile&IW_PROFILE_TRANSPARENCY)) { if(!ctx->req.bkgd_valid && !ctx->apply_bkgd) { iw_warning(ctx,"This image may have transparency, which is incompatible with the output format. A background color will be applied."); } ctx->apply_bkgd=1; } if(ctx->resize_settings[IW_DIMENSION_H].use_offset || ctx->resize_settings[IW_DIMENSION_V].use_offset) { // If channel offset is enabled, and the image has transparency, we // must apply a solid color background (and we must apply it before // resizing), regardless of whether the user asked for it. It's the // only strategy we support. if(!ctx->req.bkgd_valid && !ctx->apply_bkgd) { iw_warning(ctx,"This image may have transparency, which is incompatible with a channel offset. A background color will be applied."); } ctx->apply_bkgd=1; if(ctx->bkgd_checkerboard && ctx->req.bkgd_checkerboard) { iw_warning(ctx,"Checkerboard backgrounds are not supported when using a channel offset."); ctx->bkgd_checkerboard=0; } ctx->apply_bkgd_strategy=IW_BKGD_STRATEGY_EARLY; return; } if(!ctx->apply_bkgd) { // No reason to apply a background color. return; } if(ctx->bkgd_checkerboard) { // Non-solid-color backgrounds must be applied after resizing. ctx->apply_bkgd_strategy=IW_BKGD_STRATEGY_LATE; return; } // At this point, either Early or Late background application is possible, // and (I think) would, in an idealized situation, yield the same result. // Things that can cause it to be different include // * using a different resampling algorithm for the alpha channel (this is // no longer supported) // * 'intermediate clamping' // // Setting this to Late is the safe, though it is slower than Early. ctx->apply_bkgd_strategy=IW_BKGD_STRATEGY_LATE; } static void iw_set_auto_resizetype(struct iw_context *ctx, int size1, int size2, int dimension) { // If not changing the size, default to "null" resize if we can. // (We can't do that if using a translation or channel offset.) if(size2==size1 && !ctx->resize_settings[dimension].use_offset && !ctx->req.out_true_valid && ctx->resize_settings[dimension].translate==0.0) { iw_set_resize_alg(ctx, dimension, IW_RESIZETYPE_NULL, 1.0, 0.0, 0.0); return; } // Otherwise, default to Catmull-Rom iw_set_resize_alg(ctx, dimension, IW_RESIZETYPE_CUBIC, 1.0, 0.0, 0.5); } static void init_channel_info(struct iw_context *ctx) { int i; ctx->img1_imgtype_logical = ctx->img1.imgtype; if(ctx->resize_settings[IW_DIMENSION_H].edge_policy==IW_EDGE_POLICY_TRANSPARENT || ctx->resize_settings[IW_DIMENSION_V].edge_policy==IW_EDGE_POLICY_TRANSPARENT) { // Add a virtual alpha channel if(ctx->img1.imgtype==IW_IMGTYPE_GRAY) { ctx->img1_imgtype_logical = IW_IMGTYPE_GRAYA; } else if(ctx->img1.imgtype==IW_IMGTYPE_RGB) ctx->img1_imgtype_logical = IW_IMGTYPE_RGBA; } ctx->img1_numchannels_physical = iw_imgtype_num_channels(ctx->img1.imgtype); ctx->img1_numchannels_logical = iw_imgtype_num_channels(ctx->img1_imgtype_logical); ctx->img1_alpha_channel_index = iw_imgtype_alpha_channel_index(ctx->img1_imgtype_logical); iw_set_input_channeltypes(ctx); ctx->img2.imgtype = ctx->img1_imgtype_logical; // default ctx->img2_numchannels = ctx->img1_numchannels_logical; // default ctx->intermed_numchannels = ctx->img1_numchannels_logical; // default for(i=0;i<ctx->img1_numchannels_logical;i++) { ctx->intermed_ci[i].channeltype = ctx->img1_ci[i].channeltype; ctx->intermed_ci[i].corresponding_input_channel = i; ctx->img2_ci[i].channeltype = ctx->img1_ci[i].channeltype; if(i>=ctx->img1_numchannels_physical) { // This is a virtual channel, which is handled by get_raw_sample(). // But some optimizations cause that function to be bypassed, so we // have to disable those optimizations. ctx->img1_ci[i].disable_fast_get_sample = 1; } } } // Set the weights for the grayscale algorithm, if needed. static void prepare_grayscale(struct iw_context *ctx) { switch(ctx->grayscale_formula) { case IW_GSF_STANDARD: ctx->grayscale_formula = IW_GSF_WEIGHTED; iw_set_grayscale_weights(ctx,0.212655,0.715158,0.072187); break; case IW_GSF_COMPATIBLE: ctx->grayscale_formula = IW_GSF_WEIGHTED; iw_set_grayscale_weights(ctx,0.299,0.587,0.114); break; } } // Set up some things before we do the resize, and check to make // sure everything looks okay. static int iw_prepare_processing(struct iw_context *ctx, int w, int h) { int i,j; int output_maxcolorcode_int; int strategy1, strategy2; int flag; if(ctx->output_profile==0) { iw_set_error(ctx,"Output profile not set"); return 0; } if(!ctx->prng) { // TODO: It would be better to only create the random number generator // if we will need it. ctx->prng = iwpvt_prng_create(ctx); } if(ctx->randomize) { // Acquire and record a random seed. This also seeds the PRNG, but // that's irrelevant. It will be re-seeded before it is used. ctx->random_seed = iwpvt_util_randomize(ctx->prng); } if(ctx->req.out_true_valid) { ctx->resize_settings[IW_DIMENSION_H].out_true_size = ctx->req.out_true_width; ctx->resize_settings[IW_DIMENSION_V].out_true_size = ctx->req.out_true_height; } else { ctx->resize_settings[IW_DIMENSION_H].out_true_size = (double)w; ctx->resize_settings[IW_DIMENSION_V].out_true_size = (double)h; } if(!iw_check_image_dimensions(ctx,ctx->img1.width,ctx->img1.height)) { return 0; } if(!iw_check_image_dimensions(ctx,w,h)) { return 0; } if(ctx->to_grayscale) { prepare_grayscale(ctx); } init_channel_info(ctx); ctx->img2.width = w; ctx->img2.height = h; // Figure out the region of the source image to read from. if(ctx->input_start_x<0) ctx->input_start_x=0; if(ctx->input_start_y<0) ctx->input_start_y=0; if(ctx->input_start_x>ctx->img1.width-1) ctx->input_start_x=ctx->img1.width-1; if(ctx->input_start_y>ctx->img1.height-1) ctx->input_start_x=ctx->img1.height-1; if(ctx->input_w<0) ctx->input_w = ctx->img1.width - ctx->input_start_x; if(ctx->input_h<0) ctx->input_h = ctx->img1.height - ctx->input_start_y; if(ctx->input_w<1) ctx->input_w = 1; if(ctx->input_h<1) ctx->input_h = 1; if(ctx->input_w>(ctx->img1.width-ctx->input_start_x)) ctx->input_w=ctx->img1.width-ctx->input_start_x; if(ctx->input_h>(ctx->img1.height-ctx->input_start_y)) ctx->input_h=ctx->img1.height-ctx->input_start_y; // Decide on the output colorspace. if(ctx->req.output_cs_valid) { // Try to use colorspace requested by caller. ctx->img2cs = ctx->req.output_cs; if(ctx->output_profile&IW_PROFILE_ALWAYSLINEAR) { if(ctx->img2cs.cstype!=IW_CSTYPE_LINEAR) { iw_warning(ctx,"Forcing output colorspace to linear; required by the output format."); iw_make_linear_csdescr(&ctx->img2cs); } } } else { // By default, set the output colorspace to sRGB in most cases. if(ctx->output_profile&IW_PROFILE_ALWAYSLINEAR) { iw_make_linear_csdescr(&ctx->img2cs); } else { iw_make_srgb_csdescr_2(&ctx->img2cs); } } // Make sure maxcolorcodes are set. if(ctx->img1.sampletype!=IW_SAMPLETYPE_FLOATINGPOINT) { ctx->input_maxcolorcode_int = (1 << ctx->img1.bit_depth)-1; ctx->input_maxcolorcode = (double)ctx->input_maxcolorcode_int; for(i=0;i<IW_CI_COUNT;i++) { if(ctx->img1_ci[i].maxcolorcode_int<=0) { ctx->img1_ci[i].maxcolorcode_int = ctx->input_maxcolorcode_int; } ctx->img1_ci[i].maxcolorcode_dbl = (double)ctx->img1_ci[i].maxcolorcode_int; if(ctx->img1_ci[i].maxcolorcode_int != ctx->input_maxcolorcode_int) { // This is overzealous: We could enable it per-channel. // But it's probably not worth the trouble. ctx->support_reduced_input_bitdepths = 1; } } } if(ctx->support_reduced_input_bitdepths || ctx->img1.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { for(i=0;i<ctx->img1_numchannels_physical;i++) { ctx->img1_ci[i].disable_fast_get_sample=1; } } // Set the .use_offset flags, based on whether the caller set any // .channel_offset[]s. for(i=0;i<2;i++) { // horizontal, vertical for(j=0;j<3;j++) { // red, green, blue if(fabs(ctx->resize_settings[i].channel_offset[j])>0.00001) { ctx->resize_settings[i].use_offset=1; } } } if(ctx->to_grayscale && (ctx->resize_settings[IW_DIMENSION_H].use_offset || ctx->resize_settings[IW_DIMENSION_V].use_offset) ) { iw_warning(ctx,"Disabling channel offset, due to grayscale output."); ctx->resize_settings[IW_DIMENSION_H].use_offset=0; ctx->resize_settings[IW_DIMENSION_V].use_offset=0; } decide_how_to_apply_bkgd(ctx); // Decide if we can cache the resize settings. for(i=0;i<2;i++) { if(ctx->resize_settings[i].use_offset || (ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY && ctx->resize_settings[i].edge_policy==IW_EDGE_POLICY_TRANSPARENT)) { // If a channel offset is used, we have to disable caching, because the // offset is stored in the cache, and it won't be the same for all channels. // If transparent virtual pixels will be converted to the background color // during the resize, we have to disable caching, because the background // sample value is stored in the cache, and it may be different for each // channel. ctx->resize_settings[i].disable_rrctx_cache=1; } } decide_strategy(ctx,&strategy1,&strategy2); switch(strategy1) { // input-to-intermediate case IW_STRAT1_RGBA_RGBA: ctx->intermed_imgtype = IW_IMGTYPE_RGBA; break; case IW_STRAT1_GA_RGBA: ctx->intermed_imgtype = IW_IMGTYPE_RGBA; ctx->intermed_ci[0].corresponding_input_channel=0; ctx->intermed_ci[1].corresponding_input_channel=0; ctx->intermed_ci[2].corresponding_input_channel=0; ctx->intermed_ci[3].corresponding_input_channel=1; break; case IW_STRAT1_RGB_RGB: case IW_STRAT1_RGBA_RGB: ctx->intermed_imgtype = IW_IMGTYPE_RGB; break; case IW_STRAT1_G_RGB: case IW_STRAT1_GA_RGB: ctx->intermed_imgtype = IW_IMGTYPE_RGB; ctx->intermed_ci[0].corresponding_input_channel=0; ctx->intermed_ci[1].corresponding_input_channel=0; ctx->intermed_ci[2].corresponding_input_channel=0; break; case IW_STRAT1_RGBA_GA: ctx->intermed_imgtype = IW_IMGTYPE_GRAYA; ctx->intermed_ci[0].cvt_to_grayscale=1; ctx->intermed_ci[0].corresponding_input_channel=0; ctx->intermed_ci[1].corresponding_input_channel=3; break; case IW_STRAT1_GA_GA: ctx->intermed_imgtype = IW_IMGTYPE_GRAYA; break; case IW_STRAT1_RGB_G: ctx->intermed_imgtype = IW_IMGTYPE_GRAY; ctx->intermed_ci[0].cvt_to_grayscale=1; ctx->intermed_ci[0].corresponding_input_channel=0; break; case IW_STRAT1_G_G: ctx->intermed_imgtype = IW_IMGTYPE_GRAY; ctx->intermed_ci[0].corresponding_input_channel=0; break; default: iw_set_errorf(ctx,"Internal error, unknown strategy %d",strategy1); return 0; } ctx->intermed_numchannels = iw_imgtype_num_channels(ctx->intermed_imgtype); ctx->intermed_alpha_channel_index = iw_imgtype_alpha_channel_index(ctx->intermed_imgtype); // Start with default mapping: for(i=0;i<ctx->intermed_numchannels;i++) { ctx->intermed_ci[i].corresponding_output_channel = i; } switch(strategy2) { // intermediate-to-output case IW_STRAT2_RGBA_RGBA: ctx->img2.imgtype = IW_IMGTYPE_RGBA; break; case IW_STRAT2_RGB_RGB: ctx->img2.imgtype = IW_IMGTYPE_RGB; break; case IW_STRAT2_RGBA_RGB: ctx->img2.imgtype = IW_IMGTYPE_RGB; ctx->intermed_ci[3].corresponding_output_channel= -1; break; case IW_STRAT2_GA_GA: ctx->img2.imgtype = IW_IMGTYPE_GRAYA; break; case IW_STRAT2_G_G: ctx->img2.imgtype = IW_IMGTYPE_GRAY; break; case IW_STRAT2_GA_G: ctx->img2.imgtype = IW_IMGTYPE_GRAY; ctx->intermed_ci[1].corresponding_output_channel= -1; break; default: iw_set_error(ctx,"Internal error"); return 0; } ctx->img2_numchannels = iw_imgtype_num_channels(ctx->img2.imgtype); iw_set_intermed_channeltypes(ctx); iw_set_out_channeltypes(ctx); // If an alpha channel is present, set a flag on the other channels to indicate // that we have to process them differently. if(IW_IMGTYPE_HAS_ALPHA(ctx->intermed_imgtype)) { for(i=0;i<ctx->intermed_numchannels;i++) { if(ctx->intermed_ci[i].channeltype!=IW_CHANNELTYPE_ALPHA) ctx->intermed_ci[i].need_unassoc_alpha_processing = 1; } } decide_output_bit_depth(ctx); if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { flag=0; for(i=0;i<IW_NUM_CHANNELTYPES;i++) { if(ctx->req.color_count[i]) flag=1; } if(flag) { iw_warning(ctx,"Posterization is not supported with floating point output."); } } else { output_maxcolorcode_int = (1 << ctx->img2.bit_depth)-1; // Set the default maxcolorcodes for(i=0;i<ctx->img2_numchannels;i++) { ctx->img2_ci[i].maxcolorcode_int = output_maxcolorcode_int; } // Check for special "reduced" colorcodes. if((ctx->output_profile&IW_PROFILE_REDUCEDBITDEPTHS)) { for(i=0;i<ctx->img2_numchannels;i++) { int mccr; mccr = ctx->req.output_maxcolorcode[ctx->img2_ci[i].channeltype]; if(mccr>0) { if(mccr>output_maxcolorcode_int) mccr=output_maxcolorcode_int; ctx->img2_ci[i].maxcolorcode_int = mccr; } } } // Set some flags, and set the floating-point versions of the maxcolorcodes. for(i=0;i<ctx->img2_numchannels;i++) { if(ctx->img2_ci[i].maxcolorcode_int != output_maxcolorcode_int) { ctx->reduced_output_maxcolor_flag = 1; ctx->disable_output_lookup_tables = 1; } ctx->img2_ci[i].maxcolorcode_dbl = (double)ctx->img2_ci[i].maxcolorcode_int; } } for(i=0;i<ctx->img2_numchannels;i++) { ctx->img2_ci[i].color_count = ctx->req.color_count[ctx->img2_ci[i].channeltype]; if(ctx->img2_ci[i].color_count) { iw_restrict_to_range(2,ctx->img2_ci[i].maxcolorcode_int,&ctx->img2_ci[i].color_count); } if(ctx->img2_ci[i].color_count==1+ctx->img2_ci[i].maxcolorcode_int) { ctx->img2_ci[i].color_count = 0; } ctx->img2_ci[i].ditherfamily = ctx->ditherfamily_by_channeltype[ctx->img2_ci[i].channeltype]; ctx->img2_ci[i].dithersubtype = ctx->dithersubtype_by_channeltype[ctx->img2_ci[i].channeltype]; } // Scan the output channels to see whether certain types of dithering are used. for(i=0;i<ctx->img2_numchannels;i++) { if(ctx->img2_ci[i].ditherfamily==IW_DITHERFAMILY_ERRDIFF) { ctx->uses_errdiffdither=1; } } if(!ctx->support_reduced_input_bitdepths && ctx->img1.sampletype==IW_SAMPLETYPE_UINT) { iw_make_x_to_linear_table(ctx,&ctx->input_color_corr_table,&ctx->img1,&ctx->img1cs); } if(ctx->img1_bkgd_label_set) { // Convert the background color to a linear colorspace. for(i=0;i<3;i++) { ctx->img1_bkgd_label_lin.c[i] = x_to_linear_sample(ctx->img1_bkgd_label_inputcs.c[i],&ctx->img1cs); } ctx->img1_bkgd_label_lin.c[3] = ctx->img1_bkgd_label_inputcs.c[3]; } if(ctx->apply_bkgd) { prepare_apply_bkgd(ctx); } if(ctx->req.output_rendering_intent==IW_INTENT_UNKNOWN) { // User didn't request a specific intent; copy from input file. ctx->img2.rendering_intent = ctx->img1.rendering_intent; } else { ctx->img2.rendering_intent = ctx->req.output_rendering_intent; } if(ctx->resize_settings[IW_DIMENSION_H].family==IW_RESIZETYPE_AUTO) { iw_set_auto_resizetype(ctx,ctx->input_w,ctx->img2.width,IW_DIMENSION_H); } if(ctx->resize_settings[IW_DIMENSION_V].family==IW_RESIZETYPE_AUTO) { iw_set_auto_resizetype(ctx,ctx->input_h,ctx->img2.height,IW_DIMENSION_V); } if(IW_IMGTYPE_HAS_ALPHA(ctx->img2.imgtype)) { if(!ctx->opt_strip_alpha) { // If we're not allowed to strip the alpha channel, also disable // other optimizations that would implicitly remove the alpha // channel. (The optimization routines may do weird things if we // were to allow this.) ctx->opt_palette = 0; ctx->opt_binary_trns = 0; } } return 1; } IW_IMPL(int) iw_process_image(struct iw_context *ctx) { int ret; int retval = 0; if(ctx->use_count>0) { iw_set_error(ctx,"Internal: Incorrect attempt to reprocess image"); goto done; } ctx->use_count++; ret = iw_prepare_processing(ctx,ctx->canvas_width,ctx->canvas_height); if(!ret) goto done; ret = iw_process_internal(ctx); if(!ret) goto done; iwpvt_optimize_image(ctx); retval = 1; done: return retval; }
// imagew-main.c // Part of ImageWorsener, Copyright (c) 2011 by Jason Summers. // For more information, see the readme.txt file. #include "imagew-config.h" #include <stdlib.h> #include <string.h> #include <math.h> #include "imagew-internals.h" // Given a color type having an alpha channel, returns the index of the // alpha channel. // Return value is not meaningful if type does not have an alpha channel. static int iw_imgtype_alpha_channel_index(int t) { switch(t) { case IW_IMGTYPE_RGBA: return 3; case IW_IMGTYPE_GRAYA: return 1; } return 0; } static IW_INLINE iw_tmpsample srgb_to_linear_sample(iw_tmpsample v_srgb) { if(v_srgb<=0.04045) { return v_srgb/12.92; } else { return pow( (v_srgb+0.055)/(1.055) , 2.4); } } static IW_INLINE iw_tmpsample rec709_to_linear_sample(iw_tmpsample v_rec709) { if(v_rec709 < 4.5*0.020) { return v_rec709/4.5; } else { return pow( (v_rec709+0.099)/1.099 , 1.0/0.45); } } static IW_INLINE iw_tmpsample gamma_to_linear_sample(iw_tmpsample v, double gamma) { return pow(v,gamma); } static iw_tmpsample x_to_linear_sample(iw_tmpsample v, const struct iw_csdescr *csdescr) { switch(csdescr->cstype) { case IW_CSTYPE_SRGB: return srgb_to_linear_sample(v); case IW_CSTYPE_LINEAR: return v; case IW_CSTYPE_GAMMA: return gamma_to_linear_sample(v,csdescr->gamma); case IW_CSTYPE_REC709: return rec709_to_linear_sample(v); } return srgb_to_linear_sample(v); } // Public version of x_to_linear_sample(). IW_IMPL(double) iw_convert_sample_to_linear(double v, const struct iw_csdescr *csdescr) { return (double)x_to_linear_sample(v,csdescr); } static IW_INLINE iw_tmpsample linear_to_srgb_sample(iw_tmpsample v_linear) { if(v_linear <= 0.0031308) { return 12.92*v_linear; } return 1.055*pow(v_linear,1.0/2.4) - 0.055; } static IW_INLINE iw_tmpsample linear_to_rec709_sample(iw_tmpsample v_linear) { // The cutoff point is supposed to be 0.018, but that doesn't make sense, // because the curves don't intersect there. They intersect at almost exactly // 0.020. if(v_linear < 0.020) { return 4.5*v_linear; } return 1.099*pow(v_linear,0.45) - 0.099; } static IW_INLINE iw_tmpsample linear_to_gamma_sample(iw_tmpsample v_linear, double gamma) { return pow(v_linear,1.0/gamma); } static iw_float32 iw_get_float32(const iw_byte *m) { int k; // !!! Portability warning: Using a union in this way may be nonportable. union su_union { iw_byte c[4]; iw_float32 f; } volatile su; for(k=0;k<4;k++) { su.c[k] = m[k]; } return su.f; } static void iw_put_float32(iw_byte *m, iw_float32 s) { int k; // !!! Portability warning: Using a union in this way may be nonportable. union su_union { iw_byte c[4]; iw_float32 f; } volatile su; su.f = s; for(k=0;k<4;k++) { m[k] = su.c[k]; } } static iw_tmpsample get_raw_sample_flt32(struct iw_context *ctx, int x, int y, int channel) { size_t z; z = y*ctx->img1.bpr + (ctx->img1_numchannels_physical*x + channel)*4; return (iw_tmpsample)iw_get_float32(&ctx->img1.pixels[z]); } static IW_INLINE unsigned int get_raw_sample_16(struct iw_context *ctx, int x, int y, int channel) { size_t z; unsigned short tmpui16; z = y*ctx->img1.bpr + (ctx->img1_numchannels_physical*x + channel)*2; tmpui16 = ( ((unsigned short)(ctx->img1.pixels[z+0])) <<8) | ctx->img1.pixels[z+1]; return tmpui16; } static IW_INLINE unsigned int get_raw_sample_8(struct iw_context *ctx, int x, int y, int channel) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + ctx->img1_numchannels_physical*x + channel]; return tmpui8; } // 4 bits/pixel static IW_INLINE unsigned int get_raw_sample_4(struct iw_context *ctx, int x, int y) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + x/2]; if(x&0x1) tmpui8 = tmpui8&0x0f; else tmpui8 = tmpui8>>4; return tmpui8; } // 2 bits/pixel static IW_INLINE unsigned int get_raw_sample_2(struct iw_context *ctx, int x, int y) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + x/4]; tmpui8 = ( tmpui8 >> ((3-x%4)*2) ) & 0x03; return tmpui8; } // 1 bit/pixel static IW_INLINE unsigned int get_raw_sample_1(struct iw_context *ctx, int x, int y) { unsigned short tmpui8; tmpui8 = ctx->img1.pixels[y*ctx->img1.bpr + x/8]; if(tmpui8 & (1<<(7-x%8))) return 1; return 0; } // Translate a pixel position from logical to physical coordinates. static IW_INLINE void translate_coords(struct iw_context *ctx, int x, int y, int *prx, int *pry) { if(ctx->img1.orient_transform==0) { // The fast path *prx = ctx->input_start_x+x; *pry = ctx->input_start_y+y; return; } switch(ctx->img1.orient_transform) { case 1: // mirror-x *prx = ctx->img1.width - 1 - (ctx->input_start_x+x); *pry = ctx->input_start_y+y; break; case 2: // mirror-y *prx = ctx->input_start_x+x; *pry = ctx->img1.height - 1 - (ctx->input_start_y+y); break; case 3: // mirror-x, mirror-y *prx = ctx->img1.width - 1 - (ctx->input_start_x+x); *pry = ctx->img1.height - 1 - (ctx->input_start_y+y); break; case 4: // transpose *prx = ctx->input_start_y+y; *pry = ctx->input_start_x+x; break; case 5: *prx = ctx->input_start_y+y; *pry = ctx->img1.width - 1 - (ctx->input_start_x+x); break; case 6: *prx = ctx->img1.height - 1 - (ctx->input_start_y+y); *pry = ctx->input_start_x+x; break; case 7: *prx = ctx->img1.height - 1 - (ctx->input_start_y+y); *pry = ctx->img1.width - 1 - (ctx->input_start_x+x); break; default: *prx = 0; *pry = 0; break; } } // Returns a value from 0 to 2^(ctx->img1.bit_depth)-1. // x and y are logical coordinates. static unsigned int get_raw_sample_int(struct iw_context *ctx, int x, int y, int channel) { int rx,ry; // physical coordinates translate_coords(ctx,x,y,&rx,&ry); switch(ctx->img1.bit_depth) { case 8: return get_raw_sample_8(ctx,rx,ry,channel); case 1: return get_raw_sample_1(ctx,rx,ry); case 16: return get_raw_sample_16(ctx,rx,ry,channel); case 4: return get_raw_sample_4(ctx,rx,ry); case 2: return get_raw_sample_2(ctx,rx,ry); } return 0; } // Channel is the input channel number. // x and y are logical coordinates. static iw_tmpsample get_raw_sample(struct iw_context *ctx, int x, int y, int channel) { unsigned int v; if(channel>=ctx->img1_numchannels_physical) { // This is a virtual alpha channel. Return "opaque". return 1.0; } if(ctx->img1.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { int rx, ry; translate_coords(ctx,x,y,&rx,&ry); if(ctx->img1.bit_depth!=32) return 0.0; return get_raw_sample_flt32(ctx,rx,ry,channel); } v = get_raw_sample_int(ctx,x,y,channel); return ((double)v) / ctx->img1_ci[channel].maxcolorcode_dbl; } static iw_tmpsample iw_color_to_grayscale(struct iw_context *ctx, iw_tmpsample r, iw_tmpsample g, iw_tmpsample b) { iw_tmpsample v0,v1,v2; switch(ctx->grayscale_formula) { case IW_GSF_WEIGHTED: return ctx->grayscale_weight[0]*r + ctx->grayscale_weight[1]*g + ctx->grayscale_weight[2]*b; case IW_GSF_ORDERBYVALUE: // Sort the R, G, and B values, then use the corresponding weights. if(g<=r) { v0=r; v1=g; } else { v0=g; v1=r; } if(b<=v1) { v2=b; } else { v2=v1; if(b<=v0) { v1=b; } else { v1=v0; v0=b; } } return ctx->grayscale_weight[0]*v0 + ctx->grayscale_weight[1]*v1 + ctx->grayscale_weight[2]*v2; } return 0.0; } // Based on color depth of the input image. // Assumes this channel's maxcolorcode == ctx->input_maxcolorcode static iw_tmpsample cvt_int_sample_to_linear(struct iw_context *ctx, unsigned int v, const struct iw_csdescr *csdescr) { iw_tmpsample s; if(csdescr->cstype==IW_CSTYPE_LINEAR) { // Sort of a hack: This is not just an optimization for linear colorspaces, // but is necessary to handle alpha channels correctly. // The lookup table is not correct for alpha channels. return ((double)v) / ctx->input_maxcolorcode; } else if(ctx->input_color_corr_table) { // If the colorspace is not linear, assume we can use the lookup table. return ctx->input_color_corr_table[v]; } s = ((double)v) / ctx->input_maxcolorcode; return x_to_linear_sample(s,csdescr); } // Based on color depth of the output image. static iw_tmpsample cvt_int_sample_to_linear_output(struct iw_context *ctx, unsigned int v, const struct iw_csdescr *csdescr, double overall_maxcolorcode) { iw_tmpsample s; if(csdescr->cstype==IW_CSTYPE_LINEAR) { return ((double)v) / overall_maxcolorcode; } else if(ctx->output_rev_color_corr_table) { return ctx->output_rev_color_corr_table[v]; } s = ((double)v) / overall_maxcolorcode; return x_to_linear_sample(s,csdescr); } // Return a sample, converted to a linear colorspace if it isn't already in one. // Channel is the output channel number. static iw_tmpsample get_sample_cvt_to_linear(struct iw_context *ctx, int x, int y, int channel, const struct iw_csdescr *csdescr) { unsigned int v1,v2,v3; iw_tmpsample r,g,b; int ch; ch = ctx->intermed_ci[channel].corresponding_input_channel; if(ctx->img1_ci[ch].disable_fast_get_sample) { // The slow way... if(ctx->intermed_ci[channel].cvt_to_grayscale) { r = x_to_linear_sample(get_raw_sample(ctx,x,y,ch+0),csdescr); g = x_to_linear_sample(get_raw_sample(ctx,x,y,ch+1),csdescr); b = x_to_linear_sample(get_raw_sample(ctx,x,y,ch+2),csdescr); return iw_color_to_grayscale(ctx,r,g,b); } return x_to_linear_sample(get_raw_sample(ctx,x,y,ch),csdescr); } // This method is faster, because it may use a gamma lookup table. // But all channels have to have the nominal input bitdepth, and it doesn't // support floating point samples, or a virtual alpha channel. if(ctx->intermed_ci[channel].cvt_to_grayscale) { v1 = get_raw_sample_int(ctx,x,y,ch+0); v2 = get_raw_sample_int(ctx,x,y,ch+1); v3 = get_raw_sample_int(ctx,x,y,ch+2); r = cvt_int_sample_to_linear(ctx,v1,csdescr); g = cvt_int_sample_to_linear(ctx,v2,csdescr); b = cvt_int_sample_to_linear(ctx,v3,csdescr); return iw_color_to_grayscale(ctx,r,g,b); } v1 = get_raw_sample_int(ctx,x,y,ch); return cvt_int_sample_to_linear(ctx,v1,csdescr); } // s is from 0.0 to 65535.0 static IW_INLINE void put_raw_sample_16(struct iw_context *ctx, double s, int x, int y, int channel) { size_t z; unsigned short tmpui16; tmpui16 = (unsigned short)(0.5+s); z = y*ctx->img2.bpr + (ctx->img2_numchannels*x + channel)*2; ctx->img2.pixels[z+0] = (iw_byte)(tmpui16>>8); ctx->img2.pixels[z+1] = (iw_byte)(tmpui16&0xff); } // s is from 0.0 to 255.0 static IW_INLINE void put_raw_sample_8(struct iw_context *ctx, double s, int x, int y, int channel) { iw_byte tmpui8; tmpui8 = (iw_byte)(0.5+s); ctx->img2.pixels[y*ctx->img2.bpr + ctx->img2_numchannels*x + channel] = tmpui8; } // Sample must already be scaled and in the target colorspace. E.g. 255.0 might be white. static void put_raw_sample(struct iw_context *ctx, double s, int x, int y, int channel) { switch(ctx->img2.bit_depth) { case 8: put_raw_sample_8(ctx,s,x,y,channel); break; case 16: put_raw_sample_16(ctx,s,x,y,channel); break; } } // s is from 0.0 to 1.0 static void put_raw_sample_flt32(struct iw_context *ctx, double s, int x, int y, int channel) { size_t pos; pos = y*ctx->img2.bpr + (ctx->img2_numchannels*x + channel)*4; iw_put_float32(&ctx->img2.pixels[pos], (iw_float32)s); } static iw_tmpsample linear_to_x_sample(iw_tmpsample samp_lin, const struct iw_csdescr *csdescr) { if(samp_lin > 0.999999999) { // This check is done mostly because glibc's pow() function may be // very slow for some arguments near 1. return 1.0; } switch(csdescr->cstype) { case IW_CSTYPE_SRGB: return linear_to_srgb_sample(samp_lin); case IW_CSTYPE_LINEAR: return samp_lin; case IW_CSTYPE_GAMMA: return linear_to_gamma_sample(samp_lin,csdescr->gamma); case IW_CSTYPE_REC709: return linear_to_rec709_sample(samp_lin); } return linear_to_srgb_sample(samp_lin); } // Public version of linear_to_x_sample(). IW_IMPL(double) iw_convert_sample_from_linear(double v, const struct iw_csdescr *csdescr) { return (double)linear_to_x_sample(v,csdescr); } // Returns 0 if we should round down, 1 if we should round up. // TODO: It might be good to use a different-sized matrix for alpha channels // (e.g. 9x7), but I don't know how to make a good one. static int iw_ordered_dither(int dithersubtype, double fraction, int x, int y) { double threshold; static const float pattern[2][64] = { { // Dispersed ordered dither 0.5/64,48.5/64,12.5/64,60.5/64, 3.5/64,51.5/64,15.5/64,63.5/64, 32.5/64,16.5/64,44.5/64,28.5/64,35.5/64,19.5/64,47.5/64,31.5/64, 8.5/64,56.5/64, 4.5/64,52.5/64,11.5/64,59.5/64, 7.5/64,55.5/64, 40.5/64,24.5/64,36.5/64,20.5/64,43.5/64,27.5/64,39.5/64,23.5/64, 2.5/64,50.5/64,14.5/64,62.5/64, 1.5/64,49.5/64,13.5/64,61.5/64, 34.5/64,18.5/64,46.5/64,30.5/64,33.5/64,17.5/64,45.5/64,29.5/64, 10.5/64,58.5/64, 6.5/64,54.5/64, 9.5/64,57.5/64, 5.5/64,53.5/64, 42.5/64,26.5/64,38.5/64,22.5/64,41.5/64,25.5/64,37.5/64,21.5/64 }, { // Halftone ordered dither 3.5/64, 9.5/64,17.5/64,27.5/64,25.5/64,15.5/64, 7.5/64, 1.5/64, 11.5/64,29.5/64,37.5/64,45.5/64,43.5/64,35.5/64,23.5/64, 5.5/64, 19.5/64,39.5/64,51.5/64,57.5/64,55.5/64,49.5/64,33.5/64,13.5/64, 31.5/64,47.5/64,59.5/64,63.5/64,61.5/64,53.5/64,41.5/64,21.5/64, 30.5/64,46.5/64,58.5/64,62.5/64,60.5/64,52.5/64,40.5/64,20.5/64, 18.5/64,38.5/64,50.5/64,56.5/64,54.5/64,48.5/64,32.5/64,12.5/64, 10.5/64,28.5/64,36.5/64,44.5/64,42.5/64,34.5/64,22.5/64, 4.5/64, 2.5/64, 8.5/64,16.5/64,26.5/64,24.5/64,14.5/64, 6.5/64, 0.5/64 }}; threshold = pattern[dithersubtype][(x%8) + 8*(y%8)]; return (fraction >= threshold); } // Returns 0 if we should round down, 1 if we should round up. static int iw_random_dither(struct iw_context *ctx, double fraction, int x, int y, int dithersubtype, int channel) { double threshold; threshold = ((double)iwpvt_prng_rand(ctx->prng)) / (double)0xffffffff; if(fraction>=threshold) return 1; return 0; } static void iw_errdiff_dither(struct iw_context *ctx,int dithersubtype, double err,int x,int y) { int fwd; const double *m; // x 0 1 // 2 3 4 5 6 // 7 8 9 10 11 static const double matrix_list[][12] = { { 7.0/16, 0.0, // 0 = Floyd-Steinberg 0.0 , 3.0/16, 5.0/16, 1.0/16, 0.0, 0.0 , 0.0, 0.0, 0.0 , 0.0 }, { 7.0/48, 5.0/48, // 1 = JJN 3.0/48, 5.0/48, 7.0/48, 5.0/48, 3.0/48, 1.0/48, 3.0/48, 5.0/48, 3.0/48, 1.0/48 }, { 8.0/42, 4.0/42, // 2 = Stucki 2.0/42, 4.0/42, 8.0/42, 4.0/42, 2.0/42, 1.0/42, 2.0/42, 4.0/42, 2.0/42, 1.0/42 }, { 8.0/32, 4.0/32, // 3 = Burkes 2.0/32, 4.0/32, 8.0/32, 4.0/32, 2.0/32, 0.0 , 0.0 , 0.0 , 0.0 , 0.0 }, { 5.0/32, 3.0/32, // 4 = Sierra3 2.0/32, 4.0/32, 5.0/32, 4.0/32, 2.0/32, 0.0, 2.0/32, 3.0/32, 2.0/32, 0.0 }, { 4.0/16, 3.0/16, // 5 = Sierra2 1.0/16, 2.0/16, 3.0/16, 2.0/16, 1.0/16, 0.0 , 0.0 , 0.0 , 0.0 , 0.0 }, { 2.0/4 , 0.0, // 6 = Sierra42a 0.0 , 1.0/4 , 1.0/4 , 0.0 , 0.0, 0.0 , 0.0 , 0.0 , 0.0 , 0.0 }, { 1.0/8 , 1.0/8, // 7 = Atkinson 0.0 , 1.0/8 , 1.0/8 , 1.0/8 , 0.0, 0.0 , 0.0 , 1.0/8 , 0.0 , 0.0 } }; if(dithersubtype<=7) m = matrix_list[dithersubtype]; else m = matrix_list[0]; fwd = (y%2)?(-1):1; if((x-fwd)>=0 && (x-fwd)<ctx->img2.width) { if((x-2*fwd)>=0 && (x-2*fwd)<ctx->img2.width) { ctx->dither_errors[1][x-2*fwd] += err*(m[2]); ctx->dither_errors[2][x-2*fwd] += err*(m[7]); } ctx->dither_errors[1][x-fwd] += err*(m[3]); ctx->dither_errors[2][x-fwd] += err*(m[8]); } ctx->dither_errors[1][x] += err*(m[4]); ctx->dither_errors[2][x] += err*(m[9]); if((x+fwd)>=0 && (x+fwd)<ctx->img2.width) { ctx->dither_errors[0][x+fwd] += err*(m[0]); ctx->dither_errors[1][x+fwd] += err*(m[5]); ctx->dither_errors[2][x+fwd] += err*(m[10]); if((x+2*fwd)>=0 && (x+2*fwd)<ctx->img2.width) { ctx->dither_errors[0][x+2*fwd] += err*(m[1]); ctx->dither_errors[1][x+2*fwd] += err*(m[6]); ctx->dither_errors[2][x+2*fwd] += err*(m[11]); } } } // 'channel' is the output channel. static int get_nearest_valid_colors(struct iw_context *ctx, iw_tmpsample samp_lin, const struct iw_csdescr *csdescr, double *s_lin_floor_1, double *s_lin_ceil_1, double *s_cvt_floor_full, double *s_cvt_ceil_full, double overall_maxcolorcode, int color_count) { iw_tmpsample samp_cvt; double samp_cvt_expanded; unsigned int floor_int, ceil_int; // A prelimary conversion to the target color space. samp_cvt = linear_to_x_sample(samp_lin,csdescr); if(color_count==0) { // The normal case: we want to use this channel's full available depth. samp_cvt_expanded = samp_cvt * overall_maxcolorcode; if(samp_cvt_expanded>overall_maxcolorcode) samp_cvt_expanded=overall_maxcolorcode; if(samp_cvt_expanded<0.0) samp_cvt_expanded=0.0; // Find the next-smallest and next-largest valid values that // can be stored in this image. // We will use one of them, but in order to figure out *which* one, // we have to compare their distances in the *linear* color space. *s_cvt_floor_full = floor(samp_cvt_expanded); *s_cvt_ceil_full = ceil(samp_cvt_expanded); } else { // We're "posterizing": restricting to a certain number of color shades. double posterized_maxcolorcode; // Example: color_count = 4, bit_depth = 8; // Colors are from 0.0 to 3.0, mapped to 0.0 to 255.0. // Reduction factor is 255.0/3.0 = 85.0 posterized_maxcolorcode = (double)(color_count-1); samp_cvt_expanded = samp_cvt * posterized_maxcolorcode; if(samp_cvt_expanded>posterized_maxcolorcode) samp_cvt_expanded=posterized_maxcolorcode; if(samp_cvt_expanded<0.0) samp_cvt_expanded=0.0; // If the number of shades is not 2, 4, 6, 16, 18, 52, 86, or 256 (assuming 8-bit depth), // then the shades will not be exactly evenly spaced. For example, if there are 3 shades, // they will be 0, 128, and 255. It will often be the case that the shade we want is exactly // halfway between the nearest two available shades, and the "0.5000000001" fudge factor is my // attempt to make sure it rounds consistently in the same direction. *s_cvt_floor_full = floor(0.5000000001 + floor(samp_cvt_expanded) * (overall_maxcolorcode/posterized_maxcolorcode)); *s_cvt_ceil_full = floor(0.5000000001 + ceil (samp_cvt_expanded) * (overall_maxcolorcode/posterized_maxcolorcode)); } floor_int = (unsigned int)(*s_cvt_floor_full); ceil_int = (unsigned int)(*s_cvt_ceil_full); if(floor_int == ceil_int) { return 1; } // Convert the candidates to our linear color space *s_lin_floor_1 = cvt_int_sample_to_linear_output(ctx,floor_int,csdescr,overall_maxcolorcode); *s_lin_ceil_1 = cvt_int_sample_to_linear_output(ctx,ceil_int ,csdescr,overall_maxcolorcode); return 0; } // channel is the output channel static void put_sample_convert_from_linear_flt(struct iw_context *ctx, iw_tmpsample samp_lin, int x, int y, int channel, const struct iw_csdescr *csdescr) { put_raw_sample_flt32(ctx,(double)samp_lin,x,y,channel); } static double get_final_sample_using_nc_tbl(struct iw_context *ctx, iw_tmpsample samp_lin) { unsigned int x; unsigned int d; // For numbers 0 through 254, find the smallest one for which the // corresponding table value is larger than samp_lin. // Do a binary search. x = 127; d = 64; while(1) { if(x>254 || ctx->nearest_color_table[x] > samp_lin) x -= d; else x += d; if(d==1) { if(x>254 || ctx->nearest_color_table[x] > samp_lin) return (double)(x); else return (double)(x+1); } d = d/2; } } // channel is the output channel static void put_sample_convert_from_linear(struct iw_context *ctx, iw_tmpsample samp_lin, int x, int y, int channel, const struct iw_csdescr *csdescr) { double s_lin_floor_1, s_lin_ceil_1; double s_cvt_floor_full, s_cvt_ceil_full; double d_floor, d_ceil; int is_exact; double s_full; int ditherfamily; int dd; // Dither decision: 0 to use floor, 1 to use ceil. // Clamp to the [0.0,1.0] range. // The sample type is UINT, so out-of-range samples can't be represented. // TODO: I think that out-of-range samples could still have a meaningful // effect if we are dithering. More investigation is needed here. if(samp_lin<0.0) samp_lin=0.0; if(samp_lin>1.0) samp_lin=1.0; // TODO: This is getting messy. The conditions under which we use lookup // tables are too complicated, and we still don't use them as often as we // should. For example, if we are not dithering, we can use a table optimized // for telling us the single nearest color. But if we are dithering, then we // instead need to know both the next-highest and next-lowest colors, which // would require a different table. The same table could be used for both, // but not quite as efficiently. Currently, we don't use use a lookup table // when dithering, except that we may still use one to do some of the // intermediate computations. Etc. if(ctx->img2_ci[channel].use_nearest_color_table) { s_full = get_final_sample_using_nc_tbl(ctx,samp_lin); goto okay; } ditherfamily=ctx->img2_ci[channel].ditherfamily; if(ditherfamily==IW_DITHERFAMILY_ERRDIFF) { samp_lin += ctx->dither_errors[0][x]; // If the prior error makes the ideal brightness out of the available range, // just throw away any extra. if(samp_lin>1.0) samp_lin=1.0; else if(samp_lin<0.0) samp_lin=0.0; } is_exact = get_nearest_valid_colors(ctx,samp_lin,csdescr, &s_lin_floor_1, &s_lin_ceil_1, &s_cvt_floor_full, &s_cvt_ceil_full, ctx->img2_ci[channel].maxcolorcode_dbl, ctx->img2_ci[channel].color_count); if(is_exact) { s_full = s_cvt_floor_full; // Hack to keep the PRNG in sync. We have to generate exactly one random // number per sample, regardless of whether we use it. if(ditherfamily==IW_DITHERFAMILY_RANDOM) { (void)iwpvt_prng_rand(ctx->prng); } goto okay; } // samp_lin should be between s_lin_floor_1 and s_lin_ceil_1. Figure out // which is closer, and use the final pixel value we figured out earlier // (either s_cvt_floor_full or s_cvt_ceil_full). d_floor = samp_lin-s_lin_floor_1; d_ceil = s_lin_ceil_1-samp_lin; if(ditherfamily==IW_DITHERFAMILY_NONE) { // Not dithering. Just choose closest value. if(d_ceil<=d_floor) s_full=s_cvt_ceil_full; else s_full=s_cvt_floor_full; } else if(ditherfamily==IW_DITHERFAMILY_ERRDIFF) { if(d_ceil<=d_floor) { // Ceiling is closer. This pixel will be lighter than ideal. // so the error is negative, to make other pixels darker. iw_errdiff_dither(ctx,ctx->img2_ci[channel].dithersubtype,-d_ceil,x,y); s_full=s_cvt_ceil_full; } else { iw_errdiff_dither(ctx,ctx->img2_ci[channel].dithersubtype,d_floor,x,y); s_full=s_cvt_floor_full; } } else if(ditherfamily==IW_DITHERFAMILY_ORDERED) { dd=iw_ordered_dither(ctx->img2_ci[channel].dithersubtype, d_floor/(d_floor+d_ceil),x,y); s_full = dd ? s_cvt_ceil_full : s_cvt_floor_full; } else if(ditherfamily==IW_DITHERFAMILY_RANDOM) { dd=iw_random_dither(ctx,d_floor/(d_floor+d_ceil),x,y,ctx->img2_ci[channel].dithersubtype,channel); s_full = dd ? s_cvt_ceil_full : s_cvt_floor_full; } else { // Unsupported dither method. s_full = 0.0; } okay: put_raw_sample(ctx,s_full,x,y,channel); } // A stripped-down version of put_sample_convert_from_linear(), // intended for use with background colors. static unsigned int calc_sample_convert_from_linear(struct iw_context *ctx, iw_tmpsample samp_lin, const struct iw_csdescr *csdescr, double overall_maxcolorcode) { double s_lin_floor_1, s_lin_ceil_1; double s_cvt_floor_full, s_cvt_ceil_full; double d_floor, d_ceil; int is_exact; double s_full; if(samp_lin<0.0) samp_lin=0.0; if(samp_lin>1.0) samp_lin=1.0; is_exact = get_nearest_valid_colors(ctx,samp_lin,csdescr, &s_lin_floor_1, &s_lin_ceil_1, &s_cvt_floor_full, &s_cvt_ceil_full, overall_maxcolorcode, 0); if(is_exact) { s_full = s_cvt_floor_full; goto okay; } d_floor = samp_lin-s_lin_floor_1; d_ceil = s_lin_ceil_1-samp_lin; if(d_ceil<=d_floor) s_full=s_cvt_ceil_full; else s_full=s_cvt_floor_full; okay: return (unsigned int)(0.5+s_full); } static void clamp_output_samples(struct iw_context *ctx, iw_tmpsample *out_pix, int num_out_pix) { int i; for(i=0;i<num_out_pix;i++) { if(out_pix[i]<0.0) out_pix[i]=0.0; else if(out_pix[i]>1.0) out_pix[i]=1.0; } } // TODO: Maybe this should be a flag in ctx, instead of a function that is // called repeatedly. static int iw_bkgd_has_transparency(struct iw_context *ctx) { if(!ctx->apply_bkgd) return 0; if(!(ctx->output_profile&IW_PROFILE_TRANSPARENCY)) return 0; if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) return 0; if(ctx->bkgd_color_source==IW_BKGD_COLOR_SOURCE_FILE) { if(ctx->img1_bkgd_label_inputcs.c[3]<1.0) return 1; } else if(ctx->bkgd_color_source==IW_BKGD_COLOR_SOURCE_REQ) { if(ctx->bkgd_checkerboard) { if(ctx->req.bkgd2.c[3]<1.0) return 1; } if(ctx->req.bkgd.c[3]<1.0) return 1; } return 0; } // 'channel' is an intermediate channel number. static int iw_process_cols_to_intermediate(struct iw_context *ctx, int channel, const struct iw_csdescr *in_csdescr) { int i,j; int retval=0; iw_tmpsample tmp_alpha; iw_tmpsample *inpix_tofree = NULL; iw_tmpsample *outpix_tofree = NULL; int is_alpha_channel; struct iw_resize_settings *rs = NULL; struct iw_channelinfo_intermed *int_ci; iw_tmpsample *in_pix; iw_tmpsample *out_pix; int num_in_pix; int num_out_pix; int_ci = &ctx->intermed_ci[channel]; is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); num_in_pix = ctx->input_h; inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); if(!inpix_tofree) goto done; in_pix = inpix_tofree; num_out_pix = ctx->intermed_canvas_height; outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; rs=&ctx->resize_settings[IW_DIMENSION_V]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { // TODO: The use of the word "rows" here is misleading, because we are // actually resizing columns. rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(i=0;i<ctx->input_w;i++) { // Read a column of pixels into ctx->in_pix for(j=0;j<ctx->input_h;j++) { in_pix[j] = get_sample_cvt_to_linear(ctx,i,j,channel,in_csdescr); if(int_ci->need_unassoc_alpha_processing) { // We need opacity information also tmp_alpha = get_raw_sample(ctx,i,j,ctx->img1_alpha_channel_index); // Multiply color amount by opacity in_pix[j] *= tmp_alpha; } else if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) { // We're doing "Early" background color application. // All intermediate channels will need the background color // applied to them. tmp_alpha = get_raw_sample(ctx,i,j,ctx->img1_alpha_channel_index); in_pix[j] = (tmp_alpha)*(in_pix[j]) + (1.0-tmp_alpha)*(int_ci->bkgd_color_lin); } } // Now we have a row in the right format. // Resize it and store it in the right place in the intermediate array. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // The intermediate pixels are in ctx->out_pix. Copy them to the intermediate array. for(j=0;j<ctx->intermed_canvas_height;j++) { if(is_alpha_channel) { ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width + i] = (iw_float32)out_pix[j]; } else { ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width + i] = (iw_float32)out_pix[j]; } } } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; } static int iw_process_rows_intermediate_to_final(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *out_csdescr) { int i,j; int z; int k; int retval=0; iw_tmpsample tmpsamp; iw_tmpsample alphasamp = 0.0; iw_tmpsample *inpix_tofree = NULL; // Used if we need a separate temp buffer for input samples iw_tmpsample *outpix_tofree = NULL; // Used if we need a separate temp buffer for output samples // Do any of the output channels use error-diffusion dithering? int using_errdiffdither = 0; int output_channel; int is_alpha_channel; int bkgd_has_transparency; double tmpbkgdalpha=0.0; int alt_bkgd = 0; // Nonzero if we should use bkgd2 for this sample struct iw_resize_settings *rs = NULL; int ditherfamily, dithersubtype; struct iw_channelinfo_intermed *int_ci; struct iw_channelinfo_out *out_ci; iw_tmpsample *in_pix = NULL; iw_tmpsample *out_pix = NULL; int num_in_pix; int num_out_pix; struct iw_channelinfo_out default_ci_out; num_in_pix = ctx->intermed_canvas_width; num_out_pix = ctx->img2.width; int_ci = &ctx->intermed_ci[intermed_channel]; output_channel = int_ci->corresponding_output_channel; if(output_channel>=0) { out_ci = &ctx->img2_ci[output_channel]; } else { // If there is no output channelinfo struct, create a temporary one to // use. // TODO: This is admittedly ugly, but we use these settings for a few // things even when there is no corresponding output channel, and I // don't remember exactly why. iw_zeromem(&default_ci_out, sizeof(struct iw_channelinfo_out)); default_ci_out.channeltype = IW_CHANNELTYPE_NONALPHA; out_ci = &default_ci_out; } is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); bkgd_has_transparency = iw_bkgd_has_transparency(ctx); inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); in_pix = inpix_tofree; // We need an output buffer. outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; // Decide if the 'nearest color table' optimization can be used if(ctx->nearest_color_table && !is_alpha_channel && out_ci->ditherfamily==IW_DITHERFAMILY_NONE && out_ci->color_count==0) { out_ci->use_nearest_color_table = 1; } else { out_ci->use_nearest_color_table = 0; } // Seed the PRNG, if necessary. ditherfamily = out_ci->ditherfamily; dithersubtype = out_ci->dithersubtype; if(ditherfamily==IW_DITHERFAMILY_RANDOM) { // Decide what random seed to use. The alpha channel always has its own // seed. If using "r" (not "r2") dithering, every channel has its own seed. if(dithersubtype==IW_DITHERSUBTYPE_SAMEPATTERN && out_ci->channeltype!=IW_CHANNELTYPE_ALPHA) { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed); } else { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed+out_ci->channeltype); } } // Initialize Floyd-Steinberg dithering. if(output_channel>=0 && out_ci->ditherfamily==IW_DITHERFAMILY_ERRDIFF) { using_errdiffdither = 1; for(i=0;i<ctx->img2.width;i++) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k][i] = 0.0; } } } rs=&ctx->resize_settings[IW_DIMENSION_H]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(j=0;j<ctx->intermed_canvas_height;j++) { // As needed, either copy the input pixels to a temp buffer (inpix, which // ctx->in_pix already points to), or point ctx->in_pix directly to the // intermediate data. if(is_alpha_channel) { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width+i]; } } else { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width+i]; } } // Resize ctx->in_pix to ctx->out_pix. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // If necessary, copy the resized samples to the final_alpha image if(is_alpha_channel && outpix_tofree && ctx->final_alpha32) { for(i=0;i<num_out_pix;i++) { ctx->final_alpha32[((size_t)j)*ctx->img2.width+i] = (iw_float32)outpix_tofree[i]; } } // Now convert the out_pix and put them in the final image. if(output_channel == -1) { // No corresponding output channel. // (Presumably because this is an alpha channel that's being // removed because we're applying a background.) goto here; } for(z=0;z<ctx->img2.width;z++) { // For decent Floyd-Steinberg dithering, we need to process alternate // rows in reverse order. if(using_errdiffdither && (j%2)) i=ctx->img2.width-1-z; else i=z; tmpsamp = out_pix[i]; if(ctx->bkgd_checkerboard) { alt_bkgd = (((ctx->bkgd_check_origin[IW_DIMENSION_H]+i)/ctx->bkgd_check_size)%2) != (((ctx->bkgd_check_origin[IW_DIMENSION_V]+j)/ctx->bkgd_check_size)%2); } if(bkgd_has_transparency) { tmpbkgdalpha = alt_bkgd ? ctx->bkgd2alpha : ctx->bkgd1alpha; } if(int_ci->need_unassoc_alpha_processing) { // Convert color samples back to unassociated alpha. alphasamp = ctx->final_alpha32[((size_t)j)*ctx->img2.width + i]; if(alphasamp!=0.0) { tmpsamp /= alphasamp; } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE) { // Apply a background color (or checkerboard pattern). double bkcolor; bkcolor = alt_bkgd ? out_ci->bkgd2_color_lin : out_ci->bkgd1_color_lin; if(bkgd_has_transparency) { tmpsamp = tmpsamp*alphasamp + bkcolor*tmpbkgdalpha*(1.0-alphasamp); } else { tmpsamp = tmpsamp*alphasamp + bkcolor*(1.0-alphasamp); } } } else if(is_alpha_channel && bkgd_has_transparency) { // Composite the alpha of the foreground over the alpha of the background. tmpsamp = tmpsamp + tmpbkgdalpha*(1.0-tmpsamp); } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) put_sample_convert_from_linear_flt(ctx,tmpsamp,i,j,output_channel,out_csdescr); else put_sample_convert_from_linear(ctx,tmpsamp,i,j,output_channel,out_csdescr); } if(using_errdiffdither) { // Move "next row" error data to "this row", and clear the "next row". // TODO: Obviously, it would be more efficient to just swap pointers // to the rows. for(i=0;i<ctx->img2.width;i++) { // Move data in all rows but the first row up one row. for(k=0;k<IW_DITHER_MAXROWS-1;k++) { ctx->dither_errors[k][i] = ctx->dither_errors[k+1][i]; } // Clear the last row. ctx->dither_errors[IW_DITHER_MAXROWS-1][i] = 0.0; } } here: ; } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; } static int iw_process_one_channel(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *in_csdescr, const struct iw_csdescr *out_csdescr) { if(!iw_process_cols_to_intermediate(ctx,intermed_channel,in_csdescr)) { return 0; } if(!iw_process_rows_intermediate_to_final(ctx,intermed_channel,out_csdescr)) { return 0; } return 1; } // Potentially make a lookup table for color correction. static void iw_make_x_to_linear_table(struct iw_context *ctx, double **ptable, const struct iw_image *img, const struct iw_csdescr *csdescr) { int ncolors; int i; double *tbl; if(csdescr->cstype==IW_CSTYPE_LINEAR) return; ncolors = (1 << img->bit_depth); if(ncolors>256) return; // Don't make a table if the image is really small. if( ((size_t)img->width)*img->height <= 512 ) return; tbl = iw_malloc(ctx,ncolors*sizeof(double)); if(!tbl) return; for(i=0;i<ncolors;i++) { tbl[i] = x_to_linear_sample(((double)i)/(ncolors-1), csdescr); } *ptable = tbl; } static void iw_make_nearest_color_table(struct iw_context *ctx, double **ptable, const struct iw_image *img, const struct iw_csdescr *csdescr) { int ncolors; int nentries; int i; double *tbl; double prev; double curr; if(ctx->no_gamma) return; if(csdescr->cstype==IW_CSTYPE_LINEAR) return; if(img->sampletype==IW_SAMPLETYPE_FLOATINGPOINT) return; if(img->bit_depth != ctx->img2.bit_depth) return; ncolors = (1 << img->bit_depth); if(ncolors>256) return; nentries = ncolors-1; // Don't make a table if the image is really small. if( ((size_t)img->width)*img->height <= 512 ) return; tbl = iw_malloc(ctx,nentries*sizeof(double)); if(!tbl) return; // Table stores the maximum value for the given entry. // The final entry is omitted, since there is no maximum value. prev = 0.0; for(i=0;i<nentries;i++) { // This conversion may appear to be going in the wrong direction // (we're coverting *from* linear), but it's correct because we will // search through its contents to find the corresponding index, // instead of vice versa. curr = x_to_linear_sample( ((double)(i+1))/(ncolors-1), csdescr); tbl[i] = (prev + curr)/2.0; prev = curr; } *ptable = tbl; } // Label is returned in linear colorspace. // Returns 0 if no label available. static int get_output_bkgd_label_lin(struct iw_context *ctx, struct iw_color *clr) { clr->c[0] = 1.0; clr->c[1] = 0.0; clr->c[2] = 1.0; clr->c[3] = 1.0; if(ctx->req.suppress_output_bkgd_label) return 0; if(ctx->req.output_bkgd_label_valid) { *clr = ctx->req.output_bkgd_label; return 1; } // If the user didn't specify a label, but the input file had one, copy the // input file's label. if(ctx->img1_bkgd_label_set) { *clr = ctx->img1_bkgd_label_lin; return 1; } return 0; } static unsigned int iw_scale_to_int(double s, unsigned int maxcolor) { if(s<=0.0) return 0; if(s>=1.0) return maxcolor; return (unsigned int)(0.5+s*maxcolor); } // Quantize the background color label, and store in ctx->img2.bkgdlabel. // Also convert it to grayscale if needed. static void iw_process_bkgd_label(struct iw_context *ctx) { int ret; int k; struct iw_color clr; double maxcolor; unsigned int tmpu; if(!(ctx->output_profile&IW_PROFILE_PNG_BKGD) && !(ctx->output_profile&IW_PROFILE_RGB8_BKGD) && !(ctx->output_profile&IW_PROFILE_RGB16_BKGD)) { return; } ret = get_output_bkgd_label_lin(ctx,&clr); if(!ret) return; if(ctx->to_grayscale) { iw_tmpsample g; g = iw_color_to_grayscale(ctx, clr.c[0], clr.c[1], clr.c[2]); clr.c[0] = clr.c[1] = clr.c[2] = g; } if(ctx->output_profile&IW_PROFILE_RGB8_BKGD) { maxcolor=255.0; } else if(ctx->output_profile&IW_PROFILE_RGB16_BKGD) { maxcolor=65535.0; } else if(ctx->img2.bit_depth==8) { maxcolor=255.0; } else if(ctx->img2.bit_depth==16) { maxcolor=65535.0; } else { return; } // Although the bkgd label is stored as floating point, we're responsible for // making sure that, when scaled and rounded to a format suitable for the output // format, it will be the correct color. for(k=0;k<3;k++) { tmpu = calc_sample_convert_from_linear(ctx, clr.c[k], &ctx->img2cs, maxcolor); ctx->img2.bkgdlabel.c[k] = ((double)tmpu)/maxcolor; } // Alpha sample tmpu = iw_scale_to_int(clr.c[3],(unsigned int)maxcolor); ctx->img2.bkgdlabel.c[3] = ((double)tmpu)/maxcolor; ctx->img2.has_bkgdlabel = 1; } static void negate_target_image(struct iw_context *ctx) { int channel; struct iw_channelinfo_out *ci; int i,j; size_t pos; iw_float32 s; unsigned int n; for(channel=0; channel<ctx->img2_numchannels; channel++) { ci = &ctx->img2_ci[channel]; if(ci->channeltype == IW_CHANNELTYPE_ALPHA) continue; // Don't negate alpha channels if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { for(j=0; j<ctx->img2.height; j++) { for(i=0; i<ctx->img2.width; i++) { pos = j*ctx->img2.bpr + ctx->img2_numchannels*i*4 + channel*4; s = iw_get_float32(&ctx->img2.pixels[pos]); iw_put_float32(&ctx->img2.pixels[pos], ((iw_float32)1.0)-s); } } } else if(ctx->img2.bit_depth==8) { for(j=0; j<ctx->img2.height; j++) { for(i=0; i<ctx->img2.width; i++) { pos = j*ctx->img2.bpr + ctx->img2_numchannels*i + channel; ctx->img2.pixels[pos] = ci->maxcolorcode_int-ctx->img2.pixels[pos]; } } } else if(ctx->img2.bit_depth==16) { for(j=0; j<ctx->img2.height; j++) { for(i=0; i<ctx->img2.width; i++) { pos = j*ctx->img2.bpr + ctx->img2_numchannels*i*2 + channel*2; n = ctx->img2.pixels[pos]*256 + ctx->img2.pixels[pos+1]; n = ci->maxcolorcode_int - n; ctx->img2.pixels[pos] = (n&0xff00)>>8; ctx->img2.pixels[pos+1] = n&0x00ff; } } } } } static int iw_process_internal(struct iw_context *ctx) { int channel; int retval=0; int i,k; int ret; // A linear color-correction descriptor to use with alpha channels. struct iw_csdescr csdescr_linear; ctx->intermediate32=NULL; ctx->intermediate_alpha32=NULL; ctx->final_alpha32=NULL; ctx->intermed_canvas_width = ctx->input_w; ctx->intermed_canvas_height = ctx->img2.height; iw_make_linear_csdescr(&csdescr_linear); ctx->img2.bpr = iw_calc_bytesperrow(ctx->img2.width,ctx->img2.bit_depth*ctx->img2_numchannels); ctx->img2.pixels = iw_malloc_large(ctx, ctx->img2.bpr, ctx->img2.height); if(!ctx->img2.pixels) { goto done; } ctx->intermediate32 = (iw_float32*)iw_malloc_large(ctx, ctx->intermed_canvas_width * ctx->intermed_canvas_height, sizeof(iw_float32)); if(!ctx->intermediate32) { goto done; } if(ctx->uses_errdiffdither) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k] = (double*)iw_malloc(ctx, ctx->img2.width * sizeof(double)); if(!ctx->dither_errors[k]) goto done; } } if(!ctx->disable_output_lookup_tables) { iw_make_x_to_linear_table(ctx,&ctx->output_rev_color_corr_table,&ctx->img2,&ctx->img2cs); iw_make_nearest_color_table(ctx,&ctx->nearest_color_table,&ctx->img2,&ctx->img2cs); } // If an alpha channel is present, we have to process it first. if(IW_IMGTYPE_HAS_ALPHA(ctx->intermed_imgtype)) { ctx->intermediate_alpha32 = (iw_float32*)iw_malloc_large(ctx, ctx->intermed_canvas_width * ctx->intermed_canvas_height, sizeof(iw_float32)); if(!ctx->intermediate_alpha32) { goto done; } ctx->final_alpha32 = (iw_float32*)iw_malloc_large(ctx, ctx->img2.width * ctx->img2.height, sizeof(iw_float32)); if(!ctx->final_alpha32) { goto done; } if(!iw_process_one_channel(ctx,ctx->intermed_alpha_channel_index,&csdescr_linear,&csdescr_linear)) goto done; } // Process the non-alpha channels. for(channel=0;channel<ctx->intermed_numchannels;channel++) { if(ctx->intermed_ci[channel].channeltype!=IW_CHANNELTYPE_ALPHA) { if(ctx->no_gamma) ret=iw_process_one_channel(ctx,channel,&csdescr_linear,&csdescr_linear); else ret=iw_process_one_channel(ctx,channel,&ctx->img1cs,&ctx->img2cs); if(!ret) goto done; } } iw_process_bkgd_label(ctx); if(ctx->req.negate_target) { negate_target_image(ctx); } retval=1; done: if(ctx->intermediate32) { iw_free(ctx,ctx->intermediate32); ctx->intermediate32=NULL; } if(ctx->intermediate_alpha32) { iw_free(ctx,ctx->intermediate_alpha32); ctx->intermediate_alpha32=NULL; } if(ctx->final_alpha32) { iw_free(ctx,ctx->final_alpha32); ctx->final_alpha32=NULL; } for(k=0;k<IW_DITHER_MAXROWS;k++) { if(ctx->dither_errors[k]) { iw_free(ctx,ctx->dither_errors[k]); ctx->dither_errors[k]=NULL; } } // The 'resize contexts' are usually kept around so that they can be reused. // Now that we're done with everything, free them. for(i=0;i<2;i++) { // horizontal, vertical if(ctx->resize_settings[i].rrctx) { iwpvt_resize_rows_done(ctx->resize_settings[i].rrctx); ctx->resize_settings[i].rrctx = NULL; } } return retval; } static int iw_get_channeltype(int imgtype, int channel) { switch(imgtype) { case IW_IMGTYPE_GRAY: if(channel==0) return IW_CHANNELTYPE_GRAY; break; case IW_IMGTYPE_GRAYA: if(channel==0) return IW_CHANNELTYPE_GRAY; if(channel==1) return IW_CHANNELTYPE_ALPHA; break; case IW_IMGTYPE_RGB: if(channel==0) return IW_CHANNELTYPE_RED; if(channel==1) return IW_CHANNELTYPE_GREEN; if(channel==2) return IW_CHANNELTYPE_BLUE; break; case IW_IMGTYPE_RGBA: if(channel==0) return IW_CHANNELTYPE_RED; if(channel==1) return IW_CHANNELTYPE_GREEN; if(channel==2) return IW_CHANNELTYPE_BLUE; if(channel==3) return IW_CHANNELTYPE_ALPHA; break; } return 0; } static void iw_set_input_channeltypes(struct iw_context *ctx) { int i; for(i=0;i<ctx->img1_numchannels_logical;i++) { ctx->img1_ci[i].channeltype = iw_get_channeltype(ctx->img1_imgtype_logical,i); } } static void iw_set_intermed_channeltypes(struct iw_context *ctx) { int i; for(i=0;i<ctx->intermed_numchannels;i++) { ctx->intermed_ci[i].channeltype = iw_get_channeltype(ctx->intermed_imgtype,i); } } static void iw_set_out_channeltypes(struct iw_context *ctx) { int i; for(i=0;i<ctx->img2_numchannels;i++) { ctx->img2_ci[i].channeltype = iw_get_channeltype(ctx->img2.imgtype,i); } } // Set img2.bit_depth based on output_depth_req, etc. // Set img2.sampletype. static void decide_output_bit_depth(struct iw_context *ctx) { if(ctx->output_profile&IW_PROFILE_HDRI) { ctx->img2.sampletype=IW_SAMPLETYPE_FLOATINGPOINT; } else { ctx->img2.sampletype=IW_SAMPLETYPE_UINT; } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { // Floating point output. ctx->img2.bit_depth=32; return; } // Below this point, sample type is UINT. if(ctx->req.output_depth>8 && (ctx->output_profile&IW_PROFILE_16BPS)) { ctx->img2.bit_depth=16; } else { if(ctx->req.output_depth>8) { // Caller requested a depth higher than this format can handle. iw_warning(ctx,"Reducing depth to 8; required by the output format."); } ctx->img2.bit_depth=8; } } // Set the background color samples that will be used when processing the // image. (All the logic about how to apply a background color is in // decide_how_to_apply_bkgd(), not here.) static void prepare_apply_bkgd(struct iw_context *ctx) { struct iw_color bkgd1; // Main background color in linear colorspace struct iw_color bkgd2; // Secondary background color ... int i; if(!ctx->apply_bkgd) return; // Start with a default background color. bkgd1.c[0]=1.0; bkgd1.c[1]=0.0; bkgd1.c[2]=1.0; bkgd1.c[3]=1.0; bkgd2.c[0]=0.0; bkgd2.c[1]=0.0; bkgd2.c[2]=0.0; bkgd2.c[3]=1.0; // Possibly overwrite it with the background color from the appropriate // source. if(ctx->bkgd_color_source == IW_BKGD_COLOR_SOURCE_FILE) { bkgd1 = ctx->img1_bkgd_label_lin; // sructure copy ctx->bkgd_checkerboard = 0; } else if(ctx->bkgd_color_source == IW_BKGD_COLOR_SOURCE_REQ) { bkgd1 = ctx->req.bkgd; if(ctx->req.bkgd_checkerboard) { bkgd2 = ctx->req.bkgd2; } } // Set up the channelinfo (and ctx->bkgd*alpha) as needed according to the // target image type, and whether we are applying the background before or // after resizing. if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) { ctx->bkgd1alpha = 1.0; } else { ctx->bkgd1alpha = bkgd1.c[3]; ctx->bkgd2alpha = bkgd2.c[3]; } if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE && (ctx->img2.imgtype==IW_IMGTYPE_RGB || ctx->img2.imgtype==IW_IMGTYPE_RGBA)) { for(i=0;i<3;i++) { ctx->img2_ci[i].bkgd1_color_lin = bkgd1.c[i]; } if(ctx->bkgd_checkerboard) { for(i=0;i<3;i++) { ctx->img2_ci[i].bkgd2_color_lin = bkgd2.c[i]; } } } else if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE && (ctx->img2.imgtype==IW_IMGTYPE_GRAY || ctx->img2.imgtype==IW_IMGTYPE_GRAYA)) { ctx->img2_ci[0].bkgd1_color_lin = iw_color_to_grayscale(ctx,bkgd1.c[0],bkgd1.c[1],bkgd1.c[2]); if(ctx->bkgd_checkerboard) { ctx->img2_ci[0].bkgd2_color_lin = iw_color_to_grayscale(ctx,bkgd2.c[0],bkgd2.c[1],bkgd2.c[2]); } } else if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY && ctx->img2.imgtype==IW_IMGTYPE_RGB) { for(i=0;i<3;i++) { ctx->intermed_ci[i].bkgd_color_lin = bkgd1.c[i]; } } else if(ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY && ctx->img2.imgtype==IW_IMGTYPE_GRAY) { ctx->intermed_ci[0].bkgd_color_lin = iw_color_to_grayscale(ctx,bkgd1.c[0],bkgd1.c[1],bkgd1.c[2]); } } #define IW_STRAT1_G_G 0x011 // -grayscale #define IW_STRAT1_G_RGB 0x013 // default #define IW_STRAT1_GA_G 0x021 // -grayscale, BKGD_STRATEGY_EARLY (never happens?) #define IW_STRAT1_GA_GA 0x022 // -grayscale #define IW_STRAT1_GA_RGB 0x023 // BKGD_STRATEGY_EARLY #define IW_STRAT1_GA_RGBA 0x024 // default #define IW_STRAT1_RGB_G 0x031 // -grayscale #define IW_STRAT1_RGB_RGB 0x033 // default #define IW_STRAT1_RGBA_G 0x041 // -grayscale, BKGD_STRATEGY_EARLY (never happens?) #define IW_STRAT1_RGBA_GA 0x042 // -grayscale #define IW_STRAT1_RGBA_RGB 0x043 // BKGD_STRATEGY_EARLY #define IW_STRAT1_RGBA_RGBA 0x044 // default #define IW_STRAT2_G_G 0x111 // -grayscale #define IW_STRAT2_GA_G 0x121 // -grayscale, BKGD_STRATEGY_LATE #define IW_STRAT2_GA_GA 0x122 // -grayscale #define IW_STRAT2_RGB_RGB 0x133 // default #define IW_STRAT2_RGBA_RGB 0x143 // BKGD_STRATEGY_LATE #define IW_STRAT2_RGBA_RGBA 0x144 // default static void iw_restrict_to_range(int r1, int r2, int *pvar) { if(*pvar < r1) *pvar = r1; else if(*pvar > r2) *pvar = r2; } static void decide_strategy(struct iw_context *ctx, int *ps1, int *ps2) { int s1, s2; // Start with a default strategy switch(ctx->img1_imgtype_logical) { case IW_IMGTYPE_RGBA: if(ctx->to_grayscale) { s1=IW_STRAT1_RGBA_GA; s2=IW_STRAT2_GA_GA; } else { s1=IW_STRAT1_RGBA_RGBA; s2=IW_STRAT2_RGBA_RGBA; } break; case IW_IMGTYPE_RGB: if(ctx->to_grayscale) { s1=IW_STRAT1_RGB_G; s2=IW_STRAT2_G_G; } else { s1=IW_STRAT1_RGB_RGB; s2=IW_STRAT2_RGB_RGB; } break; case IW_IMGTYPE_GRAYA: if(ctx->to_grayscale) { s1=IW_STRAT1_GA_GA; s2=IW_STRAT2_GA_GA; } else { s1=IW_STRAT1_GA_RGBA; s2=IW_STRAT2_RGBA_RGBA; } break; default: if(ctx->to_grayscale) { s1=IW_STRAT1_G_G; s2=IW_STRAT2_G_G; } else { s1=IW_STRAT1_G_RGB; s2=IW_STRAT2_RGB_RGB; } } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY) { // Applying background before resizing if(s1==IW_STRAT1_RGBA_RGBA) { s1=IW_STRAT1_RGBA_RGB; s2=IW_STRAT2_RGB_RGB; } else if(s1==IW_STRAT1_GA_GA) { s1=IW_STRAT1_GA_G; s2=IW_STRAT2_G_G; } else if(s1==IW_STRAT1_GA_RGBA) { s1=IW_STRAT1_GA_RGB; s2=IW_STRAT2_RGB_RGB; } else if(s1==IW_STRAT1_RGBA_GA) { s1=IW_STRAT1_RGBA_G; s2=IW_STRAT2_G_G; } } if(ctx->apply_bkgd && !iw_bkgd_has_transparency(ctx)) { if(s2==IW_STRAT2_GA_GA) { s2=IW_STRAT2_GA_G; } else if(s2==IW_STRAT2_RGBA_RGBA) { s2=IW_STRAT2_RGBA_RGB; } } *ps1 = s1; *ps2 = s2; } // Choose our strategy for applying a background to the image. // Uses: // - ctx->img1_imgtype_logical (set by init_channel_info()) // - ctx->req.bkgd_valid (was background set by caller?) // - ctx->req.bkgd_checkerboard (set by caller) // - ctx->bkgd_check_size (set by caller) // - ctx->resize_settings[d].use_offset // Sets: // - ctx->apply_bkgd (flag indicating whether we'll apply a background) // - ctx->apply_bkgd_strategy (flag indicating *when* we'll apply a background) // - ctx->bkgd_color_source (where to get the background color) // - ctx->bkgd_checkerboard // - ctx->bkgd_check_size (sanitized) // May emit a warning if the caller's settings can't be honored. static void decide_how_to_apply_bkgd(struct iw_context *ctx) { if(!IW_IMGTYPE_HAS_ALPHA(ctx->img1_imgtype_logical)) { // If we know the image does not have any transparency, // we don't have to do anything. ctx->apply_bkgd=0; return; } // Figure out where to get the background color from, on the assumption // that we'll use one. if(ctx->img1_bkgd_label_set && (ctx->req.use_bkgd_label_from_file || !ctx->req.bkgd_valid)) { // The input file has a background color label, and either we are // requested to prefer it to the caller's background color, or // the caller did not give us a background color. // Use the color from the input file. ctx->bkgd_color_source = IW_BKGD_COLOR_SOURCE_FILE; } else if(ctx->req.bkgd_valid) { // Use the background color given by the caller. ctx->bkgd_color_source = IW_BKGD_COLOR_SOURCE_REQ; // Tentatively use the caller's checkerboard setting. // This may be overridden if we can't support checkerboard backgrounds // for some reason. ctx->bkgd_checkerboard = ctx->req.bkgd_checkerboard; } else { // No background color available. If we need one, we'll have to invent one. ctx->bkgd_color_source = IW_BKGD_COLOR_SOURCE_NONE; } if(ctx->bkgd_checkerboard) { if(ctx->bkgd_check_size<1) ctx->bkgd_check_size=1; } if(ctx->req.bkgd_valid) { // Caller told us to apply a background. ctx->apply_bkgd=1; } if(!(ctx->output_profile&IW_PROFILE_TRANSPARENCY)) { if(!ctx->req.bkgd_valid && !ctx->apply_bkgd) { iw_warning(ctx,"This image may have transparency, which is incompatible with the output format. A background color will be applied."); } ctx->apply_bkgd=1; } if(ctx->resize_settings[IW_DIMENSION_H].use_offset || ctx->resize_settings[IW_DIMENSION_V].use_offset) { // If channel offset is enabled, and the image has transparency, we // must apply a solid color background (and we must apply it before // resizing), regardless of whether the user asked for it. It's the // only strategy we support. if(!ctx->req.bkgd_valid && !ctx->apply_bkgd) { iw_warning(ctx,"This image may have transparency, which is incompatible with a channel offset. A background color will be applied."); } ctx->apply_bkgd=1; if(ctx->bkgd_checkerboard && ctx->req.bkgd_checkerboard) { iw_warning(ctx,"Checkerboard backgrounds are not supported when using a channel offset."); ctx->bkgd_checkerboard=0; } ctx->apply_bkgd_strategy=IW_BKGD_STRATEGY_EARLY; return; } if(!ctx->apply_bkgd) { // No reason to apply a background color. return; } if(ctx->bkgd_checkerboard) { // Non-solid-color backgrounds must be applied after resizing. ctx->apply_bkgd_strategy=IW_BKGD_STRATEGY_LATE; return; } // At this point, either Early or Late background application is possible, // and (I think) would, in an idealized situation, yield the same result. // Things that can cause it to be different include // * using a different resampling algorithm for the alpha channel (this is // no longer supported) // * 'intermediate clamping' // // Setting this to Late is the safe, though it is slower than Early. ctx->apply_bkgd_strategy=IW_BKGD_STRATEGY_LATE; } static void iw_set_auto_resizetype(struct iw_context *ctx, int size1, int size2, int dimension) { // If not changing the size, default to "null" resize if we can. // (We can't do that if using a translation or channel offset.) if(size2==size1 && !ctx->resize_settings[dimension].use_offset && !ctx->req.out_true_valid && ctx->resize_settings[dimension].translate==0.0) { iw_set_resize_alg(ctx, dimension, IW_RESIZETYPE_NULL, 1.0, 0.0, 0.0); return; } // Otherwise, default to Catmull-Rom iw_set_resize_alg(ctx, dimension, IW_RESIZETYPE_CUBIC, 1.0, 0.0, 0.5); } static void init_channel_info(struct iw_context *ctx) { int i; ctx->img1_imgtype_logical = ctx->img1.imgtype; if(ctx->resize_settings[IW_DIMENSION_H].edge_policy==IW_EDGE_POLICY_TRANSPARENT || ctx->resize_settings[IW_DIMENSION_V].edge_policy==IW_EDGE_POLICY_TRANSPARENT) { // Add a virtual alpha channel if(ctx->img1.imgtype==IW_IMGTYPE_GRAY) { ctx->img1_imgtype_logical = IW_IMGTYPE_GRAYA; } else if(ctx->img1.imgtype==IW_IMGTYPE_RGB) ctx->img1_imgtype_logical = IW_IMGTYPE_RGBA; } ctx->img1_numchannels_physical = iw_imgtype_num_channels(ctx->img1.imgtype); ctx->img1_numchannels_logical = iw_imgtype_num_channels(ctx->img1_imgtype_logical); ctx->img1_alpha_channel_index = iw_imgtype_alpha_channel_index(ctx->img1_imgtype_logical); iw_set_input_channeltypes(ctx); ctx->img2.imgtype = ctx->img1_imgtype_logical; // default ctx->img2_numchannels = ctx->img1_numchannels_logical; // default ctx->intermed_numchannels = ctx->img1_numchannels_logical; // default for(i=0;i<ctx->img1_numchannels_logical;i++) { ctx->intermed_ci[i].channeltype = ctx->img1_ci[i].channeltype; ctx->intermed_ci[i].corresponding_input_channel = i; ctx->img2_ci[i].channeltype = ctx->img1_ci[i].channeltype; if(i>=ctx->img1_numchannels_physical) { // This is a virtual channel, which is handled by get_raw_sample(). // But some optimizations cause that function to be bypassed, so we // have to disable those optimizations. ctx->img1_ci[i].disable_fast_get_sample = 1; } } } // Set the weights for the grayscale algorithm, if needed. static void prepare_grayscale(struct iw_context *ctx) { switch(ctx->grayscale_formula) { case IW_GSF_STANDARD: ctx->grayscale_formula = IW_GSF_WEIGHTED; iw_set_grayscale_weights(ctx,0.212655,0.715158,0.072187); break; case IW_GSF_COMPATIBLE: ctx->grayscale_formula = IW_GSF_WEIGHTED; iw_set_grayscale_weights(ctx,0.299,0.587,0.114); break; } } // Set up some things before we do the resize, and check to make // sure everything looks okay. static int iw_prepare_processing(struct iw_context *ctx, int w, int h) { int i,j; int output_maxcolorcode_int; int strategy1, strategy2; int flag; if(ctx->output_profile==0) { iw_set_error(ctx,"Output profile not set"); return 0; } if(!ctx->prng) { // TODO: It would be better to only create the random number generator // if we will need it. ctx->prng = iwpvt_prng_create(ctx); } if(ctx->randomize) { // Acquire and record a random seed. This also seeds the PRNG, but // that's irrelevant. It will be re-seeded before it is used. ctx->random_seed = iwpvt_util_randomize(ctx->prng); } if(ctx->req.out_true_valid) { ctx->resize_settings[IW_DIMENSION_H].out_true_size = ctx->req.out_true_width; ctx->resize_settings[IW_DIMENSION_V].out_true_size = ctx->req.out_true_height; } else { ctx->resize_settings[IW_DIMENSION_H].out_true_size = (double)w; ctx->resize_settings[IW_DIMENSION_V].out_true_size = (double)h; } if(!iw_check_image_dimensions(ctx,ctx->img1.width,ctx->img1.height)) { return 0; } if(!iw_check_image_dimensions(ctx,w,h)) { return 0; } if(ctx->to_grayscale) { prepare_grayscale(ctx); } init_channel_info(ctx); ctx->img2.width = w; ctx->img2.height = h; // Figure out the region of the source image to read from. if(ctx->input_start_x<0) ctx->input_start_x=0; if(ctx->input_start_y<0) ctx->input_start_y=0; if(ctx->input_start_x>ctx->img1.width-1) ctx->input_start_x=ctx->img1.width-1; if(ctx->input_start_y>ctx->img1.height-1) ctx->input_start_x=ctx->img1.height-1; if(ctx->input_w<0) ctx->input_w = ctx->img1.width - ctx->input_start_x; if(ctx->input_h<0) ctx->input_h = ctx->img1.height - ctx->input_start_y; if(ctx->input_w<1) ctx->input_w = 1; if(ctx->input_h<1) ctx->input_h = 1; if(ctx->input_w>(ctx->img1.width-ctx->input_start_x)) ctx->input_w=ctx->img1.width-ctx->input_start_x; if(ctx->input_h>(ctx->img1.height-ctx->input_start_y)) ctx->input_h=ctx->img1.height-ctx->input_start_y; // Decide on the output colorspace. if(ctx->req.output_cs_valid) { // Try to use colorspace requested by caller. ctx->img2cs = ctx->req.output_cs; if(ctx->output_profile&IW_PROFILE_ALWAYSLINEAR) { if(ctx->img2cs.cstype!=IW_CSTYPE_LINEAR) { iw_warning(ctx,"Forcing output colorspace to linear; required by the output format."); iw_make_linear_csdescr(&ctx->img2cs); } } } else { // By default, set the output colorspace to sRGB in most cases. if(ctx->output_profile&IW_PROFILE_ALWAYSLINEAR) { iw_make_linear_csdescr(&ctx->img2cs); } else { iw_make_srgb_csdescr_2(&ctx->img2cs); } } // Make sure maxcolorcodes are set. if(ctx->img1.sampletype!=IW_SAMPLETYPE_FLOATINGPOINT) { ctx->input_maxcolorcode_int = (1 << ctx->img1.bit_depth)-1; ctx->input_maxcolorcode = (double)ctx->input_maxcolorcode_int; for(i=0;i<IW_CI_COUNT;i++) { if(ctx->img1_ci[i].maxcolorcode_int<=0) { ctx->img1_ci[i].maxcolorcode_int = ctx->input_maxcolorcode_int; } ctx->img1_ci[i].maxcolorcode_dbl = (double)ctx->img1_ci[i].maxcolorcode_int; if(ctx->img1_ci[i].maxcolorcode_int != ctx->input_maxcolorcode_int) { // This is overzealous: We could enable it per-channel. // But it's probably not worth the trouble. ctx->support_reduced_input_bitdepths = 1; } } } if(ctx->support_reduced_input_bitdepths || ctx->img1.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { for(i=0;i<ctx->img1_numchannels_physical;i++) { ctx->img1_ci[i].disable_fast_get_sample=1; } } // Set the .use_offset flags, based on whether the caller set any // .channel_offset[]s. for(i=0;i<2;i++) { // horizontal, vertical for(j=0;j<3;j++) { // red, green, blue if(fabs(ctx->resize_settings[i].channel_offset[j])>0.00001) { ctx->resize_settings[i].use_offset=1; } } } if(ctx->to_grayscale && (ctx->resize_settings[IW_DIMENSION_H].use_offset || ctx->resize_settings[IW_DIMENSION_V].use_offset) ) { iw_warning(ctx,"Disabling channel offset, due to grayscale output."); ctx->resize_settings[IW_DIMENSION_H].use_offset=0; ctx->resize_settings[IW_DIMENSION_V].use_offset=0; } decide_how_to_apply_bkgd(ctx); // Decide if we can cache the resize settings. for(i=0;i<2;i++) { if(ctx->resize_settings[i].use_offset || (ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_EARLY && ctx->resize_settings[i].edge_policy==IW_EDGE_POLICY_TRANSPARENT)) { // If a channel offset is used, we have to disable caching, because the // offset is stored in the cache, and it won't be the same for all channels. // If transparent virtual pixels will be converted to the background color // during the resize, we have to disable caching, because the background // sample value is stored in the cache, and it may be different for each // channel. ctx->resize_settings[i].disable_rrctx_cache=1; } } decide_strategy(ctx,&strategy1,&strategy2); switch(strategy1) { // input-to-intermediate case IW_STRAT1_RGBA_RGBA: ctx->intermed_imgtype = IW_IMGTYPE_RGBA; break; case IW_STRAT1_GA_RGBA: ctx->intermed_imgtype = IW_IMGTYPE_RGBA; ctx->intermed_ci[0].corresponding_input_channel=0; ctx->intermed_ci[1].corresponding_input_channel=0; ctx->intermed_ci[2].corresponding_input_channel=0; ctx->intermed_ci[3].corresponding_input_channel=1; break; case IW_STRAT1_RGB_RGB: case IW_STRAT1_RGBA_RGB: ctx->intermed_imgtype = IW_IMGTYPE_RGB; break; case IW_STRAT1_G_RGB: case IW_STRAT1_GA_RGB: ctx->intermed_imgtype = IW_IMGTYPE_RGB; ctx->intermed_ci[0].corresponding_input_channel=0; ctx->intermed_ci[1].corresponding_input_channel=0; ctx->intermed_ci[2].corresponding_input_channel=0; break; case IW_STRAT1_RGBA_GA: ctx->intermed_imgtype = IW_IMGTYPE_GRAYA; ctx->intermed_ci[0].cvt_to_grayscale=1; ctx->intermed_ci[0].corresponding_input_channel=0; ctx->intermed_ci[1].corresponding_input_channel=3; break; case IW_STRAT1_GA_GA: ctx->intermed_imgtype = IW_IMGTYPE_GRAYA; break; case IW_STRAT1_RGB_G: ctx->intermed_imgtype = IW_IMGTYPE_GRAY; ctx->intermed_ci[0].cvt_to_grayscale=1; ctx->intermed_ci[0].corresponding_input_channel=0; break; case IW_STRAT1_G_G: ctx->intermed_imgtype = IW_IMGTYPE_GRAY; ctx->intermed_ci[0].corresponding_input_channel=0; break; default: iw_set_errorf(ctx,"Internal error, unknown strategy %d",strategy1); return 0; } ctx->intermed_numchannels = iw_imgtype_num_channels(ctx->intermed_imgtype); ctx->intermed_alpha_channel_index = iw_imgtype_alpha_channel_index(ctx->intermed_imgtype); // Start with default mapping: for(i=0;i<ctx->intermed_numchannels;i++) { ctx->intermed_ci[i].corresponding_output_channel = i; } switch(strategy2) { // intermediate-to-output case IW_STRAT2_RGBA_RGBA: ctx->img2.imgtype = IW_IMGTYPE_RGBA; break; case IW_STRAT2_RGB_RGB: ctx->img2.imgtype = IW_IMGTYPE_RGB; break; case IW_STRAT2_RGBA_RGB: ctx->img2.imgtype = IW_IMGTYPE_RGB; ctx->intermed_ci[3].corresponding_output_channel= -1; break; case IW_STRAT2_GA_GA: ctx->img2.imgtype = IW_IMGTYPE_GRAYA; break; case IW_STRAT2_G_G: ctx->img2.imgtype = IW_IMGTYPE_GRAY; break; case IW_STRAT2_GA_G: ctx->img2.imgtype = IW_IMGTYPE_GRAY; ctx->intermed_ci[1].corresponding_output_channel= -1; break; default: iw_set_error(ctx,"Internal error"); return 0; } ctx->img2_numchannels = iw_imgtype_num_channels(ctx->img2.imgtype); iw_set_intermed_channeltypes(ctx); iw_set_out_channeltypes(ctx); // If an alpha channel is present, set a flag on the other channels to indicate // that we have to process them differently. if(IW_IMGTYPE_HAS_ALPHA(ctx->intermed_imgtype)) { for(i=0;i<ctx->intermed_numchannels;i++) { if(ctx->intermed_ci[i].channeltype!=IW_CHANNELTYPE_ALPHA) ctx->intermed_ci[i].need_unassoc_alpha_processing = 1; } } decide_output_bit_depth(ctx); if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) { flag=0; for(i=0;i<IW_NUM_CHANNELTYPES;i++) { if(ctx->req.color_count[i]) flag=1; } if(flag) { iw_warning(ctx,"Posterization is not supported with floating point output."); } } else { output_maxcolorcode_int = (1 << ctx->img2.bit_depth)-1; // Set the default maxcolorcodes for(i=0;i<ctx->img2_numchannels;i++) { ctx->img2_ci[i].maxcolorcode_int = output_maxcolorcode_int; } // Check for special "reduced" colorcodes. if((ctx->output_profile&IW_PROFILE_REDUCEDBITDEPTHS)) { for(i=0;i<ctx->img2_numchannels;i++) { int mccr; mccr = ctx->req.output_maxcolorcode[ctx->img2_ci[i].channeltype]; if(mccr>0) { if(mccr>output_maxcolorcode_int) mccr=output_maxcolorcode_int; ctx->img2_ci[i].maxcolorcode_int = mccr; } } } // Set some flags, and set the floating-point versions of the maxcolorcodes. for(i=0;i<ctx->img2_numchannels;i++) { if(ctx->img2_ci[i].maxcolorcode_int != output_maxcolorcode_int) { ctx->reduced_output_maxcolor_flag = 1; ctx->disable_output_lookup_tables = 1; } ctx->img2_ci[i].maxcolorcode_dbl = (double)ctx->img2_ci[i].maxcolorcode_int; } } for(i=0;i<ctx->img2_numchannels;i++) { ctx->img2_ci[i].color_count = ctx->req.color_count[ctx->img2_ci[i].channeltype]; if(ctx->img2_ci[i].color_count) { iw_restrict_to_range(2,ctx->img2_ci[i].maxcolorcode_int,&ctx->img2_ci[i].color_count); } if(ctx->img2_ci[i].color_count==1+ctx->img2_ci[i].maxcolorcode_int) { ctx->img2_ci[i].color_count = 0; } ctx->img2_ci[i].ditherfamily = ctx->ditherfamily_by_channeltype[ctx->img2_ci[i].channeltype]; ctx->img2_ci[i].dithersubtype = ctx->dithersubtype_by_channeltype[ctx->img2_ci[i].channeltype]; } // Scan the output channels to see whether certain types of dithering are used. for(i=0;i<ctx->img2_numchannels;i++) { if(ctx->img2_ci[i].ditherfamily==IW_DITHERFAMILY_ERRDIFF) { ctx->uses_errdiffdither=1; } } if(!ctx->support_reduced_input_bitdepths && ctx->img1.sampletype==IW_SAMPLETYPE_UINT) { iw_make_x_to_linear_table(ctx,&ctx->input_color_corr_table,&ctx->img1,&ctx->img1cs); } if(ctx->img1_bkgd_label_set) { // Convert the background color to a linear colorspace. for(i=0;i<3;i++) { ctx->img1_bkgd_label_lin.c[i] = x_to_linear_sample(ctx->img1_bkgd_label_inputcs.c[i],&ctx->img1cs); } ctx->img1_bkgd_label_lin.c[3] = ctx->img1_bkgd_label_inputcs.c[3]; } if(ctx->apply_bkgd) { prepare_apply_bkgd(ctx); } if(ctx->req.output_rendering_intent==IW_INTENT_UNKNOWN) { // User didn't request a specific intent; copy from input file. ctx->img2.rendering_intent = ctx->img1.rendering_intent; } else { ctx->img2.rendering_intent = ctx->req.output_rendering_intent; } if(ctx->resize_settings[IW_DIMENSION_H].family==IW_RESIZETYPE_AUTO) { iw_set_auto_resizetype(ctx,ctx->input_w,ctx->img2.width,IW_DIMENSION_H); } if(ctx->resize_settings[IW_DIMENSION_V].family==IW_RESIZETYPE_AUTO) { iw_set_auto_resizetype(ctx,ctx->input_h,ctx->img2.height,IW_DIMENSION_V); } if(IW_IMGTYPE_HAS_ALPHA(ctx->img2.imgtype)) { if(!ctx->opt_strip_alpha) { // If we're not allowed to strip the alpha channel, also disable // other optimizations that would implicitly remove the alpha // channel. (The optimization routines may do weird things if we // were to allow this.) ctx->opt_palette = 0; ctx->opt_binary_trns = 0; } } return 1; } IW_IMPL(int) iw_process_image(struct iw_context *ctx) { int ret; int retval = 0; if(ctx->use_count>0) { iw_set_error(ctx,"Internal: Incorrect attempt to reprocess image"); goto done; } ctx->use_count++; ret = iw_prepare_processing(ctx,ctx->canvas_width,ctx->canvas_height); if(!ret) goto done; ret = iw_process_internal(ctx); if(!ret) goto done; iwpvt_optimize_image(ctx); retval = 1; done: return retval; }
static int iw_process_rows_intermediate_to_final(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *out_csdescr) { int i,j; int z; int k; int retval=0; iw_tmpsample tmpsamp; iw_tmpsample alphasamp = 0.0; iw_tmpsample *inpix_tofree = NULL; // Used if we need a separate temp buffer for input samples iw_tmpsample *outpix_tofree = NULL; // Used if we need a separate temp buffer for output samples // Do any of the output channels use error-diffusion dithering? int using_errdiffdither = 0; int output_channel; int is_alpha_channel; int bkgd_has_transparency; double tmpbkgdalpha=0.0; int alt_bkgd = 0; // Nonzero if we should use bkgd2 for this sample struct iw_resize_settings *rs = NULL; int ditherfamily, dithersubtype; struct iw_channelinfo_intermed *int_ci; struct iw_channelinfo_out *out_ci; iw_tmpsample *in_pix = NULL; iw_tmpsample *out_pix = NULL; int num_in_pix; int num_out_pix; num_in_pix = ctx->intermed_canvas_width; num_out_pix = ctx->img2.width; int_ci = &ctx->intermed_ci[intermed_channel]; output_channel = int_ci->corresponding_output_channel; out_ci = &ctx->img2_ci[output_channel]; is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); bkgd_has_transparency = iw_bkgd_has_transparency(ctx); inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); in_pix = inpix_tofree; // We need an output buffer. outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; // Decide if the 'nearest color table' optimization can be used if(ctx->nearest_color_table && !is_alpha_channel && out_ci->ditherfamily==IW_DITHERFAMILY_NONE && out_ci->color_count==0) { out_ci->use_nearest_color_table = 1; } else { out_ci->use_nearest_color_table = 0; } // Seed the PRNG, if necessary. ditherfamily = out_ci->ditherfamily; dithersubtype = out_ci->dithersubtype; if(ditherfamily==IW_DITHERFAMILY_RANDOM) { // Decide what random seed to use. The alpha channel always has its own // seed. If using "r" (not "r2") dithering, every channel has its own seed. if(dithersubtype==IW_DITHERSUBTYPE_SAMEPATTERN && out_ci->channeltype!=IW_CHANNELTYPE_ALPHA) { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed); } else { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed+out_ci->channeltype); } } // Initialize Floyd-Steinberg dithering. if(output_channel>=0 && out_ci->ditherfamily==IW_DITHERFAMILY_ERRDIFF) { using_errdiffdither = 1; for(i=0;i<ctx->img2.width;i++) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k][i] = 0.0; } } } rs=&ctx->resize_settings[IW_DIMENSION_H]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(j=0;j<ctx->intermed_canvas_height;j++) { // As needed, either copy the input pixels to a temp buffer (inpix, which // ctx->in_pix already points to), or point ctx->in_pix directly to the // intermediate data. if(is_alpha_channel) { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width+i]; } } else { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width+i]; } } // Resize ctx->in_pix to ctx->out_pix. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // If necessary, copy the resized samples to the final_alpha image if(is_alpha_channel && outpix_tofree && ctx->final_alpha32) { for(i=0;i<num_out_pix;i++) { ctx->final_alpha32[((size_t)j)*ctx->img2.width+i] = (iw_float32)outpix_tofree[i]; } } // Now convert the out_pix and put them in the final image. if(output_channel == -1) { // No corresponding output channel. // (Presumably because this is an alpha channel that's being // removed because we're applying a background.) goto here; } for(z=0;z<ctx->img2.width;z++) { // For decent Floyd-Steinberg dithering, we need to process alternate // rows in reverse order. if(using_errdiffdither && (j%2)) i=ctx->img2.width-1-z; else i=z; tmpsamp = out_pix[i]; if(ctx->bkgd_checkerboard) { alt_bkgd = (((ctx->bkgd_check_origin[IW_DIMENSION_H]+i)/ctx->bkgd_check_size)%2) != (((ctx->bkgd_check_origin[IW_DIMENSION_V]+j)/ctx->bkgd_check_size)%2); } if(bkgd_has_transparency) { tmpbkgdalpha = alt_bkgd ? ctx->bkgd2alpha : ctx->bkgd1alpha; } if(int_ci->need_unassoc_alpha_processing) { // Convert color samples back to unassociated alpha. alphasamp = ctx->final_alpha32[((size_t)j)*ctx->img2.width + i]; if(alphasamp!=0.0) { tmpsamp /= alphasamp; } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE) { // Apply a background color (or checkerboard pattern). double bkcolor; bkcolor = alt_bkgd ? out_ci->bkgd2_color_lin : out_ci->bkgd1_color_lin; if(bkgd_has_transparency) { tmpsamp = tmpsamp*alphasamp + bkcolor*tmpbkgdalpha*(1.0-alphasamp); } else { tmpsamp = tmpsamp*alphasamp + bkcolor*(1.0-alphasamp); } } } else if(is_alpha_channel && bkgd_has_transparency) { // Composite the alpha of the foreground over the alpha of the background. tmpsamp = tmpsamp + tmpbkgdalpha*(1.0-tmpsamp); } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) put_sample_convert_from_linear_flt(ctx,tmpsamp,i,j,output_channel,out_csdescr); else put_sample_convert_from_linear(ctx,tmpsamp,i,j,output_channel,out_csdescr); } if(using_errdiffdither) { // Move "next row" error data to "this row", and clear the "next row". // TODO: Obviously, it would be more efficient to just swap pointers // to the rows. for(i=0;i<ctx->img2.width;i++) { // Move data in all rows but the first row up one row. for(k=0;k<IW_DITHER_MAXROWS-1;k++) { ctx->dither_errors[k][i] = ctx->dither_errors[k+1][i]; } // Clear the last row. ctx->dither_errors[IW_DITHER_MAXROWS-1][i] = 0.0; } } here: ; } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; }
static int iw_process_rows_intermediate_to_final(struct iw_context *ctx, int intermed_channel, const struct iw_csdescr *out_csdescr) { int i,j; int z; int k; int retval=0; iw_tmpsample tmpsamp; iw_tmpsample alphasamp = 0.0; iw_tmpsample *inpix_tofree = NULL; // Used if we need a separate temp buffer for input samples iw_tmpsample *outpix_tofree = NULL; // Used if we need a separate temp buffer for output samples // Do any of the output channels use error-diffusion dithering? int using_errdiffdither = 0; int output_channel; int is_alpha_channel; int bkgd_has_transparency; double tmpbkgdalpha=0.0; int alt_bkgd = 0; // Nonzero if we should use bkgd2 for this sample struct iw_resize_settings *rs = NULL; int ditherfamily, dithersubtype; struct iw_channelinfo_intermed *int_ci; struct iw_channelinfo_out *out_ci; iw_tmpsample *in_pix = NULL; iw_tmpsample *out_pix = NULL; int num_in_pix; int num_out_pix; struct iw_channelinfo_out default_ci_out; num_in_pix = ctx->intermed_canvas_width; num_out_pix = ctx->img2.width; int_ci = &ctx->intermed_ci[intermed_channel]; output_channel = int_ci->corresponding_output_channel; if(output_channel>=0) { out_ci = &ctx->img2_ci[output_channel]; } else { // If there is no output channelinfo struct, create a temporary one to // use. // TODO: This is admittedly ugly, but we use these settings for a few // things even when there is no corresponding output channel, and I // don't remember exactly why. iw_zeromem(&default_ci_out, sizeof(struct iw_channelinfo_out)); default_ci_out.channeltype = IW_CHANNELTYPE_NONALPHA; out_ci = &default_ci_out; } is_alpha_channel = (int_ci->channeltype==IW_CHANNELTYPE_ALPHA); bkgd_has_transparency = iw_bkgd_has_transparency(ctx); inpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_in_pix * sizeof(iw_tmpsample)); in_pix = inpix_tofree; // We need an output buffer. outpix_tofree = (iw_tmpsample*)iw_malloc(ctx, num_out_pix * sizeof(iw_tmpsample)); if(!outpix_tofree) goto done; out_pix = outpix_tofree; // Decide if the 'nearest color table' optimization can be used if(ctx->nearest_color_table && !is_alpha_channel && out_ci->ditherfamily==IW_DITHERFAMILY_NONE && out_ci->color_count==0) { out_ci->use_nearest_color_table = 1; } else { out_ci->use_nearest_color_table = 0; } // Seed the PRNG, if necessary. ditherfamily = out_ci->ditherfamily; dithersubtype = out_ci->dithersubtype; if(ditherfamily==IW_DITHERFAMILY_RANDOM) { // Decide what random seed to use. The alpha channel always has its own // seed. If using "r" (not "r2") dithering, every channel has its own seed. if(dithersubtype==IW_DITHERSUBTYPE_SAMEPATTERN && out_ci->channeltype!=IW_CHANNELTYPE_ALPHA) { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed); } else { iwpvt_prng_set_random_seed(ctx->prng,ctx->random_seed+out_ci->channeltype); } } // Initialize Floyd-Steinberg dithering. if(output_channel>=0 && out_ci->ditherfamily==IW_DITHERFAMILY_ERRDIFF) { using_errdiffdither = 1; for(i=0;i<ctx->img2.width;i++) { for(k=0;k<IW_DITHER_MAXROWS;k++) { ctx->dither_errors[k][i] = 0.0; } } } rs=&ctx->resize_settings[IW_DIMENSION_H]; // If the resize context for this dimension already exists, we should be // able to reuse it. Otherwise, create a new one. if(!rs->rrctx) { rs->rrctx = iwpvt_resize_rows_init(ctx,rs,int_ci->channeltype, num_in_pix, num_out_pix); if(!rs->rrctx) goto done; } for(j=0;j<ctx->intermed_canvas_height;j++) { // As needed, either copy the input pixels to a temp buffer (inpix, which // ctx->in_pix already points to), or point ctx->in_pix directly to the // intermediate data. if(is_alpha_channel) { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate_alpha32[((size_t)j)*ctx->intermed_canvas_width+i]; } } else { for(i=0;i<num_in_pix;i++) { inpix_tofree[i] = ctx->intermediate32[((size_t)j)*ctx->intermed_canvas_width+i]; } } // Resize ctx->in_pix to ctx->out_pix. iwpvt_resize_row_main(rs->rrctx,in_pix,out_pix); if(ctx->intclamp) clamp_output_samples(ctx,out_pix,num_out_pix); // If necessary, copy the resized samples to the final_alpha image if(is_alpha_channel && outpix_tofree && ctx->final_alpha32) { for(i=0;i<num_out_pix;i++) { ctx->final_alpha32[((size_t)j)*ctx->img2.width+i] = (iw_float32)outpix_tofree[i]; } } // Now convert the out_pix and put them in the final image. if(output_channel == -1) { // No corresponding output channel. // (Presumably because this is an alpha channel that's being // removed because we're applying a background.) goto here; } for(z=0;z<ctx->img2.width;z++) { // For decent Floyd-Steinberg dithering, we need to process alternate // rows in reverse order. if(using_errdiffdither && (j%2)) i=ctx->img2.width-1-z; else i=z; tmpsamp = out_pix[i]; if(ctx->bkgd_checkerboard) { alt_bkgd = (((ctx->bkgd_check_origin[IW_DIMENSION_H]+i)/ctx->bkgd_check_size)%2) != (((ctx->bkgd_check_origin[IW_DIMENSION_V]+j)/ctx->bkgd_check_size)%2); } if(bkgd_has_transparency) { tmpbkgdalpha = alt_bkgd ? ctx->bkgd2alpha : ctx->bkgd1alpha; } if(int_ci->need_unassoc_alpha_processing) { // Convert color samples back to unassociated alpha. alphasamp = ctx->final_alpha32[((size_t)j)*ctx->img2.width + i]; if(alphasamp!=0.0) { tmpsamp /= alphasamp; } if(ctx->apply_bkgd && ctx->apply_bkgd_strategy==IW_BKGD_STRATEGY_LATE) { // Apply a background color (or checkerboard pattern). double bkcolor; bkcolor = alt_bkgd ? out_ci->bkgd2_color_lin : out_ci->bkgd1_color_lin; if(bkgd_has_transparency) { tmpsamp = tmpsamp*alphasamp + bkcolor*tmpbkgdalpha*(1.0-alphasamp); } else { tmpsamp = tmpsamp*alphasamp + bkcolor*(1.0-alphasamp); } } } else if(is_alpha_channel && bkgd_has_transparency) { // Composite the alpha of the foreground over the alpha of the background. tmpsamp = tmpsamp + tmpbkgdalpha*(1.0-tmpsamp); } if(ctx->img2.sampletype==IW_SAMPLETYPE_FLOATINGPOINT) put_sample_convert_from_linear_flt(ctx,tmpsamp,i,j,output_channel,out_csdescr); else put_sample_convert_from_linear(ctx,tmpsamp,i,j,output_channel,out_csdescr); } if(using_errdiffdither) { // Move "next row" error data to "this row", and clear the "next row". // TODO: Obviously, it would be more efficient to just swap pointers // to the rows. for(i=0;i<ctx->img2.width;i++) { // Move data in all rows but the first row up one row. for(k=0;k<IW_DITHER_MAXROWS-1;k++) { ctx->dither_errors[k][i] = ctx->dither_errors[k+1][i]; } // Clear the last row. ctx->dither_errors[IW_DITHER_MAXROWS-1][i] = 0.0; } } here: ; } retval=1; done: if(rs && rs->disable_rrctx_cache && rs->rrctx) { // In some cases, the channels may need different resize contexts. // Delete the current context, so that it doesn't get reused. iwpvt_resize_rows_done(rs->rrctx); rs->rrctx = NULL; } if(inpix_tofree) iw_free(ctx,inpix_tofree); if(outpix_tofree) iw_free(ctx,outpix_tofree); return retval; }
{'added': [(952, '\tstruct iw_channelinfo_out default_ci_out;'), (959, '\tif(output_channel>=0) {'), (960, '\t\tout_ci = &ctx->img2_ci[output_channel];'), (961, '\t}'), (962, '\telse {'), (963, '\t\t// If there is no output channelinfo struct, create a temporary one to'), (964, '\t\t// use.'), (965, '\t\t// TODO: This is admittedly ugly, but we use these settings for a few'), (966, '\t\t// things even when there is no corresponding output channel, and I'), (967, "\t\t// don't remember exactly why."), (968, '\t\tiw_zeromem(&default_ci_out, sizeof(struct iw_channelinfo_out));'), (969, '\t\tdefault_ci_out.channeltype = IW_CHANNELTYPE_NONALPHA;'), (970, '\t\tout_ci = &default_ci_out;'), (971, '\t}'), (972, '')], 'deleted': [(925, "// 'handle_alpha_flag' must be set if an alpha channel exists and this is not"), (926, '// the alpha channel.'), (960, '\tout_ci = &ctx->img2_ci[output_channel];')]}
15
3
1,670
13,487
151
1,086
48
https://github.com/jsummers/imageworsener
CVE-2017-9203
CWE-787
2,725
ngiflib.c
C
WritePixel
#ifndef NGIFLIB_NO_FILE #include <stdio.h> #endif /* NGIFLIB_NO_FILE */ #include "ngiflib.h" /* decodeur GIF en C portable (pas de pb big/little endian) * Thomas BERNARD. janvier 2004. * (c) 2004-2017 Thomas Bernard. All rights reserved */ /* Fonction de debug */ #ifdef DEBUG void fprintf_ngiflib_img(FILE * f, struct ngiflib_img * i) { fprintf(f, " * ngiflib_img @ %p\n", i); fprintf(f, " next = %p\n", i->next); fprintf(f, " parent = %p\n", i->parent); fprintf(f, " palette = %p\n", i->palette); fprintf(f, " %3d couleurs", i->ncolors); if(i->interlaced) fprintf(f, " interlaced"); fprintf(f, "\n taille : %dx%d, pos (%d,%d)\n", i->width, i->height, i->posX, i->posY); fprintf(f, " sort_flag=%x localpalbits=%d\n", i->sort_flag, i->localpalbits); } #endif /* DEBUG */ void GifImgDestroy(struct ngiflib_img * i) { if(i==NULL) return; if(i->next) GifImgDestroy(i->next); if(i->palette && (i->palette != i->parent->palette)) ngiflib_free(i->palette); ngiflib_free(i); } /* Fonction de debug */ #ifdef DEBUG void fprintf_ngiflib_gif(FILE * f, struct ngiflib_gif * g) { struct ngiflib_img * i; fprintf(f, "* ngiflib_gif @ %p %s\n", g, g->signature); fprintf(f, " %dx%d, %d bits, %d couleurs\n", g->width, g->height, g->imgbits, g->ncolors); fprintf(f, " palette = %p, backgroundcolorindex %d\n", g->palette, g->backgroundindex); fprintf(f, " pixelaspectratio = %d\n", g->pixaspectratio); fprintf(f, " frbuff = %p\n", g->frbuff.p8); fprintf(f, " cur_img = %p\n", g->cur_img); fprintf(f, " %d images :\n", g->nimg); i = g->first_img; while(i) { fprintf_ngiflib_img(f, i); i = i->next; } } #endif /* DEBUG */ void GifDestroy(struct ngiflib_gif * g) { if(g==NULL) return; GifImgDestroy(g->first_img); if(g->palette) ngiflib_free(g->palette); if(g->frbuff.p8) ngiflib_free(g->frbuff.p8); ngiflib_free(g); } /* u8 GetByte(struct ngiflib_gif * g); * fonction qui renvoie un octet du fichier .gif * on pourait optimiser en faisant 2 fonctions. */ static u8 GetByte(struct ngiflib_gif * g) { #ifndef NGIFLIB_NO_FILE if(g->mode & NGIFLIB_MODE_FROM_MEM) { #endif /* NGIFLIB_NO_FILE */ return *(g->input.bytes++); #ifndef NGIFLIB_NO_FILE } else { return (u8)(getc(g->input.file)); } #endif /* NGIFLIB_NO_FILE */ } /* u16 GetWord() * Renvoie un mot de 16bits * N'est pas influencee par l'endianess du CPU ! */ static u16 GetWord(struct ngiflib_gif * g) { u16 r = (u16)GetByte(g); r |= ((u16)GetByte(g) << 8); return r; } /* int GetByteStr(struct ngiflib_gif * g, u8 * p, int n); * prend en argument un pointeur sur la destination * et le nombre d'octet a lire. * Renvoie 0 si l'operation a reussi, -1 sinon. */ static int GetByteStr(struct ngiflib_gif * g, u8 * p, int n) { if(!p) return -1; #ifndef NGIFLIB_NO_FILE if(g->mode & NGIFLIB_MODE_FROM_MEM) { #endif /* NGIFLIB_NO_FILE */ ngiflib_memcpy(p, g->input.bytes, n); g->input.bytes += n; return 0; #ifndef NGIFLIB_NO_FILE } else { size_t read; read = fread(p, 1, n, g->input.file); return ((int)read == n) ? 0 : -1; } #endif /* NGIFLIB_NO_FILE */ } /* void WritePixel(struct ngiflib_img * i, u8 v); * ecrit le pixel de valeur v dans le frame buffer */ static void WritePixel(struct ngiflib_img * i, struct ngiflib_decode_context * context, u8 v) { struct ngiflib_gif * p = i->parent; if(v!=i->gce.transparent_color || !i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ *context->frbuff_p.p8 = v; #ifndef NGIFLIB_INDEXED_ONLY } else *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, v); #endif /* NGIFLIB_INDEXED_ONLY */ } if(--(context->Xtogo) <= 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 4; } break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 2; } break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 1; } break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ context->frbuff_p.p8++; #ifndef NGIFLIB_INDEXED_ONLY } else { context->frbuff_p.p32++; } #endif /* NGIFLIB_INDEXED_ONLY */ } } /* void WritePixels(struct ngiflib_img * i, const u8 * pixels, u16 n); * ecrit les pixels dans le frame buffer */ static void WritePixels(struct ngiflib_img * i, struct ngiflib_decode_context * context, const u8 * pixels, u16 n) { u16 tocopy; struct ngiflib_gif * p = i->parent; while(n > 0) { tocopy = (context->Xtogo < n) ? context->Xtogo : n; if(!i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ ngiflib_memcpy(context->frbuff_p.p8, pixels, tocopy); pixels += tocopy; context->frbuff_p.p8 += tocopy; #ifndef NGIFLIB_INDEXED_ONLY } else { int j; for(j = (int)tocopy; j > 0; j--) { *(context->frbuff_p.p32++) = GifIndexToTrueColor(i->palette, *pixels++); } } #endif /* NGIFLIB_INDEXED_ONLY */ } else { int j; #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ for(j = (int)tocopy; j > 0; j--) { if(*pixels != i->gce.transparent_color) *context->frbuff_p.p8 = *pixels; pixels++; context->frbuff_p.p8++; } #ifndef NGIFLIB_INDEXED_ONLY } else { for(j = (int)tocopy; j > 0; j--) { if(*pixels != i->gce.transparent_color) { *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, *pixels); } pixels++; context->frbuff_p.p32++; } } #endif /* NGIFLIB_INDEXED_ONLY */ } context->Xtogo -= tocopy; if(context->Xtogo == 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 4; } break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 2; } break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 1; } break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } n -= tocopy; } } /* * u16 GetGifWord(struct ngiflib_img * i); * Renvoie un code LZW (taille variable) */ static u16 GetGifWord(struct ngiflib_img * i, struct ngiflib_decode_context * context) { u16 r; int bits_todo; u16 newbyte; bits_todo = (int)context->nbbit - (int)context->restbits; if( bits_todo <= 0) { /* nbbit <= restbits */ r = context->lbyte; context->restbits -= context->nbbit; context->lbyte >>= context->nbbit; } else if( bits_todo > 8 ) { /* nbbit > restbits + 8 */ if(context->restbyte >= 2) { context->restbyte -= 2; r = *context->srcbyte++; } else { if(context->restbyte == 0) { context->restbyte = GetByte(i->parent); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "restbyte = %02X\n", context->restbyte); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ GetByteStr(i->parent, context->byte_buffer, context->restbyte); context->srcbyte = context->byte_buffer; } r = *context->srcbyte++; if(--context->restbyte == 0) { context->restbyte = GetByte(i->parent); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "restbyte = %02X\n", context->restbyte); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ GetByteStr(i->parent, context->byte_buffer, context->restbyte); context->srcbyte = context->byte_buffer; } context->restbyte--; } newbyte = *context->srcbyte++; r |= newbyte << 8; r = (r << context->restbits) | context->lbyte; context->restbits = 16 - bits_todo; context->lbyte = newbyte >> (bits_todo - 8); } else /*if( bits_todo > 0 )*/ { /* nbbit > restbits */ if(context->restbyte == 0) { context->restbyte = GetByte(i->parent); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "restbyte = %02X\n", context->restbyte); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ GetByteStr(i->parent, context->byte_buffer, context->restbyte); context->srcbyte = context->byte_buffer; } newbyte = *context->srcbyte++; context->restbyte--; r = (newbyte << context->restbits) | context->lbyte; context->restbits = 8 - bits_todo; context->lbyte = newbyte >> bits_todo; } return (r & context->max); /* applique le bon masque pour eliminer les bits en trop */ } /* ------------------------------------------------ */ static void FillGifBackGround(struct ngiflib_gif * g) { long n = (long)g->width*g->height; #ifndef NGIFLIB_INDEXED_ONLY u32 bg_truecolor; #endif /* NGIFLIB_INDEXED_ONLY */ if((g->frbuff.p8==NULL)||(g->palette==NULL)) return; #ifndef NGIFLIB_INDEXED_ONLY if(g->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ ngiflib_memset(g->frbuff.p8, g->backgroundindex, n); #ifndef NGIFLIB_INDEXED_ONLY } else { u32 * p = g->frbuff.p32; bg_truecolor = GifIndexToTrueColor(g->palette, g->backgroundindex); while(n-->0) *p++ = bg_truecolor; } #endif /* NGIFLIB_INDEXED_ONLY */ } /* ------------------------------------------------ */ int CheckGif(u8 * b) { return (b[0]=='G')&&(b[1]=='I')&&(b[2]=='F')&&(b[3]=='8'); } /* ------------------------------------------------ */ static int DecodeGifImg(struct ngiflib_img * i) { struct ngiflib_decode_context context; long npix; u8 * stackp; u8 * stack_top; u16 clr; u16 eof; u16 free; u16 act_code = 0; u16 old_code = 0; u16 read_byt; u16 ab_prfx[4096]; u8 ab_suffx[4096]; u8 ab_stack[4096]; u8 flags; u8 casspecial = 0; if(!i) return -1; i->posX = GetWord(i->parent); /* offsetX */ i->posY = GetWord(i->parent); /* offsetY */ i->width = GetWord(i->parent); /* SizeX */ i->height = GetWord(i->parent); /* SizeY */ if((i->width > i->parent->width) || (i->height > i->parent->height)) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Image bigger than global GIF canvas !\n"); #endif return -1; } if((i->posX + i->width) > i->parent->width) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting X position\n"); #endif i->posX = i->parent->width - i->width; } if((i->posY + i->height) > i->parent->height) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting Y position\n"); #endif i->posY = i->parent->height - i->height; } context.Xtogo = i->width; context.curY = i->posY; #ifdef NGIFLIB_INDEXED_ONLY #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #else if(i->parent->mode & NGIFLIB_MODE_INDEXED) { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width; context.frbuff_p.p32 = context.line_p.p32 + i->posX; #else context.frbuff_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ npix = (long)i->width * i->height; flags = GetByte(i->parent); i->interlaced = (flags & 64) >> 6; context.pass = i->interlaced ? 1 : 0; i->sort_flag = (flags & 32) >> 5; /* is local palette sorted by color frequency ? */ i->localpalbits = (flags & 7) + 1; if(flags&128) { /* palette locale */ int k; int localpalsize = 1 << i->localpalbits; #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Local palette\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ i->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*localpalsize); for(k=0; k<localpalsize; k++) { i->palette[k].r = GetByte(i->parent); i->palette[k].g = GetByte(i->parent); i->palette[k].b = GetByte(i->parent); } #ifdef NGIFLIB_ENABLE_CALLBACKS if(i->parent->palette_cb) i->parent->palette_cb(i->parent, i->palette, localpalsize); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { i->palette = i->parent->palette; i->localpalbits = i->parent->imgbits; } i->ncolors = 1 << i->localpalbits; i->imgbits = GetByte(i->parent); /* LZW Minimum Code Size */ if (i->imgbits > 11) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Invalid LZW Minimum Code Size : %d\n", (int)i->imgbits); #endif return -1; } #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) { if(i->interlaced) fprintf(i->parent->log, "interlaced "); fprintf(i->parent->log, "img pos(%hu,%hu) size %hux%hu palbits=%hhu imgbits=%hhu ncolors=%hu\n", i->posX, i->posY, i->width, i->height, i->localpalbits, i->imgbits, i->ncolors); } #endif /* !defined(NGIFLIB_NO_FILE) */ if(i->imgbits==1) { /* fix for 1bit images ? */ i->imgbits = 2; } clr = 1 << i->imgbits; eof = clr + 1; free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ stackp = stack_top = ab_stack + 4096; context.restbits = 0; /* initialise le "buffer" de lecture */ context.restbyte = 0; /* des codes LZW */ context.lbyte = 0; for(;;) { act_code = GetGifWord(i, &context); if(act_code==eof) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "End of image code\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 0; } if(npix==0) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "assez de pixels, On se casse !\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 1; } if(act_code==clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Code clear (%hu) (free=%hu) npix=%ld\n", clr, free, npix); #endif /* !defined(NGIFLIB_NO_FILE) */ /* clear */ free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ act_code = GetGifWord(i, &context); /* the first code after the clear code is concrete */ if (act_code >= clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Invalid code %hu just after clear(%hu) !\n", act_code, clr); #endif /* !defined(NGIFLIB_NO_FILE) */ return -1; } casspecial = (u8)act_code; old_code = act_code; if(npix > 0) WritePixel(i, &context, casspecial); npix--; } else if(act_code > free) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Invalid code %hu (free=%hu) !\n", act_code, free); #endif /* !defined(NGIFLIB_NO_FILE) */ return -1; } else { read_byt = act_code; if(act_code == free) { /* code pas encore dans alphabet */ /* printf("Code pas dans alphabet : %d>=%d push %d\n", act_code, free, casspecial); */ *(--stackp) = casspecial; /* dernier debut de chaine ! */ act_code = old_code; } /* printf("actcode=%d\n", act_code); */ while(act_code > clr) { /* code non concret */ /* fillstackloop empile les suffixes ! */ *(--stackp) = ab_suffx[act_code]; act_code = ab_prfx[act_code]; /* prefixe */ } /* act_code est concret */ casspecial = (u8)act_code; /* dernier debut de chaine ! */ *(--stackp) = casspecial; /* push on stack */ if(npix >= (stack_top - stackp)) { WritePixels(i, &context, stackp, stack_top - stackp); /* unstack all pixels at once */ } else if(npix > 0) { /* "pixel overflow" */ WritePixels(i, &context, stackp, npix); } npix -= (stack_top - stackp); stackp = stack_top; /* putchar('\n'); */ if(free < 4096) { /* la taille du dico est 4096 max ! */ ab_prfx[free] = old_code; ab_suffx[free] = (u8)act_code; free++; if((free > context.max) && (context.nbbit < 12)) { context.nbbit++; /* 1 bit de plus pour les codes LZW */ context.max += context.max + 1; } } old_code = read_byt; } } return 0; } /* ------------------------------------------------ * int LoadGif(struct ngiflib_gif *); * s'assurer que nimg=0 au depart ! * retourne : * 0 si GIF termin * un nombre negatif si ERREUR * 1 si image Decode * rappeler pour decoder les images suivantes * ------------------------------------------------ */ int LoadGif(struct ngiflib_gif * g) { struct ngiflib_gce gce; u8 sign; u8 tmp; int i; if(!g) return -1; gce.gce_present = 0; if(g->nimg==0) { GetByteStr(g, g->signature, 6); g->signature[6] = '\0'; if( g->signature[0] != 'G' || g->signature[1] != 'I' || g->signature[2] != 'F' || g->signature[3] != '8') { return -1; } #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "%s\n", g->signature); #endif /* !defined(NGIFLIB_NO_FILE) */ g->width = GetWord(g); g->height = GetWord(g); /* allocate frame buffer */ #ifndef NGIFLIB_INDEXED_ONLY if((g->mode & NGIFLIB_MODE_INDEXED)==0) g->frbuff.p32 = ngiflib_malloc(4*(long)g->height*(long)g->width); else #endif /* NGIFLIB_INDEXED_ONLY */ g->frbuff.p8 = ngiflib_malloc((long)g->height*(long)g->width); tmp = GetByte(g);/* <Packed Fields> = Global Color Table Flag 1 Bit Color Resolution 3 Bits Sort Flag 1 Bit Size of Global Color Table 3 Bits */ g->colorresolution = ((tmp & 0x70) >> 4) + 1; g->sort_flag = (tmp & 8) >> 3; g->imgbits = (tmp & 7) + 1; /* Global Palette color resolution */ g->ncolors = 1 << g->imgbits; g->backgroundindex = GetByte(g); #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "%hux%hu %hhubits %hu couleurs bg=%hhu\n", g->width, g->height, g->imgbits, g->ncolors, g->backgroundindex); #endif /* NGIFLIB_INDEXED_ONLY */ g->pixaspectratio = GetByte(g); /* pixel aspect ratio (0 : unspecified) */ if(tmp&0x80) { /* la palette globale suit. */ g->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*g->ncolors); for(i=0; i<g->ncolors; i++) { g->palette[i].r = GetByte(g); g->palette[i].g = GetByte(g); g->palette[i].b = GetByte(g); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "%3d %02X %02X %02X\n", i, g->palette[i].r,g->palette[i].g,g->palette[i].b); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ } #ifdef NGIFLIB_ENABLE_CALLBACKS if(g->palette_cb) g->palette_cb(g, g->palette, g->ncolors); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { g->palette = NULL; } g->netscape_loop_count = -1; } for(;;) { char appid_auth[11]; u8 id,size; int blockindex; sign = GetByte(g); /* signature du prochain bloc */ #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "BLOCK SIGNATURE 0x%02X '%c'\n", sign, (sign >= 32) ? sign : '.'); #endif /* NGIFLIB_INDEXED_ONLY */ switch(sign) { case 0x3B: /* END OF GIF */ return 0; case '!': /* Extension introducer 0x21 */ id = GetByte(g); blockindex = 0; #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "extension (id=0x%02hhx)\n", id); #endif /* NGIFLIB_NO_FILE */ while( (size = GetByte(g)) ) { u8 ext[256]; GetByteStr(g, ext, size); switch(id) { case 0xF9: /* Graphic Control Extension */ /* The scope of this extension is the first graphic * rendering block to follow. */ gce.gce_present = 1; gce.disposal_method = (ext[0] >> 2) & 7; gce.transparent_flag = ext[0] & 1; gce.user_input_flag = (ext[0] >> 1) & 1; gce.delay_time = ext[1] | (ext[2]<<8); gce.transparent_color = ext[3]; #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "disposal_method=%hhu delay_time=%hu (transp=%hhu)transparent_color=0x%02hhX\n", gce.disposal_method, gce.delay_time, gce.transparent_flag, gce.transparent_color); #endif /* NGIFLIB_INDEXED_ONLY */ /* this propably should be adjusted depending on the disposal_method * of the _previous_ image. */ if(gce.transparent_flag && ((g->nimg == 0) || gce.disposal_method == 2)) { FillGifBackGround(g); } break; case 0xFE: /* Comment Extension. */ #if !defined(NGIFLIB_NO_FILE) if(g->log) { if(blockindex==0) fprintf(g->log, "-------------------- Comment extension --------------------\n"); ext[size] = '\0'; fputs((char *)ext, g->log); } #endif /* NGIFLIB_NO_FILE */ break; case 0xFF: /* application extension */ /* NETSCAPE2.0 extension : * http://www.vurdalakov.net/misc/gif/netscape-looping-application-extension */ if(blockindex==0) { ngiflib_memcpy(appid_auth, ext, 11); #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "---------------- Application extension ---------------\n"); fprintf(g->log, "Application identifier : '%.8s', auth code : %02X %02X %02X (", appid_auth, ext[8], ext[9], ext[10]); fputc((ext[8]<32)?' ':ext[8], g->log); fputc((ext[9]<32)?' ':ext[9], g->log); fputc((ext[10]<32)?' ':ext[10], g->log); fprintf(g->log, ")\n"); } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "Datas (as hex) : "); for(i=0; i<size; i++) { fprintf(g->log, "%02x ", ext[i]); } fprintf(g->log, "\nDatas (as text) : '"); for(i=0; i<size; i++) { putc((ext[i]<32)?' ':ext[i], g->log); } fprintf(g->log, "'\n"); } #endif /* NGIFLIB_INDEXED_ONLY */ if(0 == ngiflib_memcmp(appid_auth, "NETSCAPE2.0", 11)) { /* ext[0] : Sub-block ID */ if(ext[0] == 1) { /* 1 : Netscape Looping Extension. */ g->netscape_loop_count = (int)ext[1] | ((int)ext[2] << 8); #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "NETSCAPE loop_count = %d\n", g->netscape_loop_count); } #endif /* NGIFLIB_NO_FILE */ } } } break; case 0x01: /* plain text extension */ #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "Plain text extension blockindex=%d\n", blockindex); for(i=0; i<size; i++) { putc((ext[i]<32)?' ':ext[i], g->log); } putc('\n', g->log); } #endif /* NGIFLIB_INDEXED_ONLY */ break; } blockindex++; } switch(id) { case 0x01: /* plain text extension */ case 0xFE: /* Comment Extension. */ case 0xFF: /* application extension */ #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "-----------------------------------------------------------\n"); } #endif /* NGIFLIB_NO_FILE */ break; } break; case 0x2C: /* Image separator */ if(g->nimg==0) { g->cur_img = ngiflib_malloc(sizeof(struct ngiflib_img)); if(g->cur_img == NULL) return -2; /* memory error */ g->first_img = g->cur_img; } else { g->cur_img->next = ngiflib_malloc(sizeof(struct ngiflib_img)); if(g->cur_img->next == NULL) return -2; /* memory error */ g->cur_img = g->cur_img->next; } ngiflib_memset(g->cur_img, 0, sizeof(struct ngiflib_img)); g->cur_img->parent = g; if(gce.gce_present) { ngiflib_memcpy(&g->cur_img->gce, &gce, sizeof(struct ngiflib_gce)); } else { ngiflib_memset(&g->cur_img->gce, 0, sizeof(struct ngiflib_gce)); } if (DecodeGifImg(g->cur_img) < 0) return -1; g->nimg++; tmp = GetByte(g);/* 0 final */ #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "ZERO TERMINATOR 0x%02X\n", tmp); #endif /* NGIFLIB_INDEXED_ONLY */ return 1; /* image decode */ default: /* unexpected byte */ #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "unexpected signature 0x%02X\n", sign); #endif /* NGIFLIB_INDEXED_ONLY */ return -1; } } } u32 GifIndexToTrueColor(struct ngiflib_rgb * palette, u8 v) { return palette[v].b | (palette[v].g << 8) | (palette[v].r << 16); }
#ifndef NGIFLIB_NO_FILE #include <stdio.h> #endif /* NGIFLIB_NO_FILE */ #include "ngiflib.h" /* decodeur GIF en C portable (pas de pb big/little endian) * Thomas BERNARD. janvier 2004. * (c) 2004-2019 Thomas Bernard. All rights reserved */ /* Fonction de debug */ #ifdef DEBUG void fprintf_ngiflib_img(FILE * f, struct ngiflib_img * i) { fprintf(f, " * ngiflib_img @ %p\n", i); fprintf(f, " next = %p\n", i->next); fprintf(f, " parent = %p\n", i->parent); fprintf(f, " palette = %p\n", i->palette); fprintf(f, " %3d couleurs", i->ncolors); if(i->interlaced) fprintf(f, " interlaced"); fprintf(f, "\n taille : %dx%d, pos (%d,%d)\n", i->width, i->height, i->posX, i->posY); fprintf(f, " sort_flag=%x localpalbits=%d\n", i->sort_flag, i->localpalbits); } #endif /* DEBUG */ void GifImgDestroy(struct ngiflib_img * i) { if(i==NULL) return; if(i->next) GifImgDestroy(i->next); if(i->palette && (i->palette != i->parent->palette)) ngiflib_free(i->palette); ngiflib_free(i); } /* Fonction de debug */ #ifdef DEBUG void fprintf_ngiflib_gif(FILE * f, struct ngiflib_gif * g) { struct ngiflib_img * i; fprintf(f, "* ngiflib_gif @ %p %s\n", g, g->signature); fprintf(f, " %dx%d, %d bits, %d couleurs\n", g->width, g->height, g->imgbits, g->ncolors); fprintf(f, " palette = %p, backgroundcolorindex %d\n", g->palette, g->backgroundindex); fprintf(f, " pixelaspectratio = %d\n", g->pixaspectratio); fprintf(f, " frbuff = %p\n", g->frbuff.p8); fprintf(f, " cur_img = %p\n", g->cur_img); fprintf(f, " %d images :\n", g->nimg); i = g->first_img; while(i) { fprintf_ngiflib_img(f, i); i = i->next; } } #endif /* DEBUG */ void GifDestroy(struct ngiflib_gif * g) { if(g==NULL) return; GifImgDestroy(g->first_img); if(g->palette) ngiflib_free(g->palette); if(g->frbuff.p8) ngiflib_free(g->frbuff.p8); ngiflib_free(g); } /* u8 GetByte(struct ngiflib_gif * g); * fonction qui renvoie un octet du fichier .gif * on pourait optimiser en faisant 2 fonctions. */ static u8 GetByte(struct ngiflib_gif * g) { #ifndef NGIFLIB_NO_FILE if(g->mode & NGIFLIB_MODE_FROM_MEM) { #endif /* NGIFLIB_NO_FILE */ return *(g->input.bytes++); #ifndef NGIFLIB_NO_FILE } else { return (u8)(getc(g->input.file)); } #endif /* NGIFLIB_NO_FILE */ } /* u16 GetWord() * Renvoie un mot de 16bits * N'est pas influencee par l'endianess du CPU ! */ static u16 GetWord(struct ngiflib_gif * g) { u16 r = (u16)GetByte(g); r |= ((u16)GetByte(g) << 8); return r; } /* int GetByteStr(struct ngiflib_gif * g, u8 * p, int n); * prend en argument un pointeur sur la destination * et le nombre d'octet a lire. * Renvoie 0 si l'operation a reussi, -1 sinon. */ static int GetByteStr(struct ngiflib_gif * g, u8 * p, int n) { if(!p) return -1; #ifndef NGIFLIB_NO_FILE if(g->mode & NGIFLIB_MODE_FROM_MEM) { #endif /* NGIFLIB_NO_FILE */ ngiflib_memcpy(p, g->input.bytes, n); g->input.bytes += n; return 0; #ifndef NGIFLIB_NO_FILE } else { size_t read; read = fread(p, 1, n, g->input.file); return ((int)read == n) ? 0 : -1; } #endif /* NGIFLIB_NO_FILE */ } /* void WritePixel(struct ngiflib_img * i, u8 v); * ecrit le pixel de valeur v dans le frame buffer */ static void WritePixel(struct ngiflib_img * i, struct ngiflib_decode_context * context, u8 v) { struct ngiflib_gif * p = i->parent; if(v!=i->gce.transparent_color || !i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ *context->frbuff_p.p8 = v; #ifndef NGIFLIB_INDEXED_ONLY } else *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, v); #endif /* NGIFLIB_INDEXED_ONLY */ } if(--(context->Xtogo) <= 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } while(context->pass > 0 && context->pass < 4 && context->curY >= p->height) { switch(++context->pass) { case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY = i->posY + 4; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY = i->posY + 2; break; case 4: /* 4th pass : every odd row */ context->curY = i->posY + 1; break; } } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ context->frbuff_p.p8++; #ifndef NGIFLIB_INDEXED_ONLY } else { context->frbuff_p.p32++; } #endif /* NGIFLIB_INDEXED_ONLY */ } } /* void WritePixels(struct ngiflib_img * i, const u8 * pixels, u16 n); * ecrit les pixels dans le frame buffer */ static void WritePixels(struct ngiflib_img * i, struct ngiflib_decode_context * context, const u8 * pixels, u16 n) { u16 tocopy; struct ngiflib_gif * p = i->parent; while(n > 0) { tocopy = (context->Xtogo < n) ? context->Xtogo : n; if(!i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ ngiflib_memcpy(context->frbuff_p.p8, pixels, tocopy); pixels += tocopy; context->frbuff_p.p8 += tocopy; #ifndef NGIFLIB_INDEXED_ONLY } else { int j; for(j = (int)tocopy; j > 0; j--) { *(context->frbuff_p.p32++) = GifIndexToTrueColor(i->palette, *pixels++); } } #endif /* NGIFLIB_INDEXED_ONLY */ } else { int j; #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ for(j = (int)tocopy; j > 0; j--) { if(*pixels != i->gce.transparent_color) *context->frbuff_p.p8 = *pixels; pixels++; context->frbuff_p.p8++; } #ifndef NGIFLIB_INDEXED_ONLY } else { for(j = (int)tocopy; j > 0; j--) { if(*pixels != i->gce.transparent_color) { *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, *pixels); } pixels++; context->frbuff_p.p32++; } } #endif /* NGIFLIB_INDEXED_ONLY */ } context->Xtogo -= tocopy; if(context->Xtogo == 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } while(context->pass > 0 && context->pass < 4 && context->curY >= p->height) { switch(++context->pass) { case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY = i->posY + 4; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY = i->posY + 2; break; case 4: /* 4th pass : every odd row */ context->curY = i->posY + 1; break; } } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } n -= tocopy; } } /* * u16 GetGifWord(struct ngiflib_img * i); * Renvoie un code LZW (taille variable) */ static u16 GetGifWord(struct ngiflib_img * i, struct ngiflib_decode_context * context) { u16 r; int bits_todo; u16 newbyte; bits_todo = (int)context->nbbit - (int)context->restbits; if( bits_todo <= 0) { /* nbbit <= restbits */ r = context->lbyte; context->restbits -= context->nbbit; context->lbyte >>= context->nbbit; } else if( bits_todo > 8 ) { /* nbbit > restbits + 8 */ if(context->restbyte >= 2) { context->restbyte -= 2; r = *context->srcbyte++; } else { if(context->restbyte == 0) { context->restbyte = GetByte(i->parent); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "restbyte = %02X\n", context->restbyte); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ GetByteStr(i->parent, context->byte_buffer, context->restbyte); context->srcbyte = context->byte_buffer; } r = *context->srcbyte++; if(--context->restbyte == 0) { context->restbyte = GetByte(i->parent); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "restbyte = %02X\n", context->restbyte); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ GetByteStr(i->parent, context->byte_buffer, context->restbyte); context->srcbyte = context->byte_buffer; } context->restbyte--; } newbyte = *context->srcbyte++; r |= newbyte << 8; r = (r << context->restbits) | context->lbyte; context->restbits = 16 - bits_todo; context->lbyte = newbyte >> (bits_todo - 8); } else /*if( bits_todo > 0 )*/ { /* nbbit > restbits */ if(context->restbyte == 0) { context->restbyte = GetByte(i->parent); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "restbyte = %02X\n", context->restbyte); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ GetByteStr(i->parent, context->byte_buffer, context->restbyte); context->srcbyte = context->byte_buffer; } newbyte = *context->srcbyte++; context->restbyte--; r = (newbyte << context->restbits) | context->lbyte; context->restbits = 8 - bits_todo; context->lbyte = newbyte >> bits_todo; } return (r & context->max); /* applique le bon masque pour eliminer les bits en trop */ } /* ------------------------------------------------ */ static void FillGifBackGround(struct ngiflib_gif * g) { long n = (long)g->width*g->height; #ifndef NGIFLIB_INDEXED_ONLY u32 bg_truecolor; #endif /* NGIFLIB_INDEXED_ONLY */ if((g->frbuff.p8==NULL)||(g->palette==NULL)) return; #ifndef NGIFLIB_INDEXED_ONLY if(g->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ ngiflib_memset(g->frbuff.p8, g->backgroundindex, n); #ifndef NGIFLIB_INDEXED_ONLY } else { u32 * p = g->frbuff.p32; bg_truecolor = GifIndexToTrueColor(g->palette, g->backgroundindex); while(n-->0) *p++ = bg_truecolor; } #endif /* NGIFLIB_INDEXED_ONLY */ } /* ------------------------------------------------ */ int CheckGif(u8 * b) { return (b[0]=='G')&&(b[1]=='I')&&(b[2]=='F')&&(b[3]=='8'); } /* ------------------------------------------------ */ static int DecodeGifImg(struct ngiflib_img * i) { struct ngiflib_decode_context context; long npix; u8 * stackp; u8 * stack_top; u16 clr; u16 eof; u16 free; u16 act_code = 0; u16 old_code = 0; u16 read_byt; u16 ab_prfx[4096]; u8 ab_suffx[4096]; u8 ab_stack[4096]; u8 flags; u8 casspecial = 0; if(!i) return -1; i->posX = GetWord(i->parent); /* offsetX */ i->posY = GetWord(i->parent); /* offsetY */ i->width = GetWord(i->parent); /* SizeX */ i->height = GetWord(i->parent); /* SizeY */ if((i->width > i->parent->width) || (i->height > i->parent->height)) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Image bigger than global GIF canvas !\n"); #endif return -1; } if((i->posX + i->width) > i->parent->width) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting X position\n"); #endif i->posX = i->parent->width - i->width; } if((i->posY + i->height) > i->parent->height) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** WARNING *** Adjusting Y position\n"); #endif i->posY = i->parent->height - i->height; } context.Xtogo = i->width; context.curY = i->posY; #ifdef NGIFLIB_INDEXED_ONLY #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #else if(i->parent->mode & NGIFLIB_MODE_INDEXED) { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width; context.frbuff_p.p8 = context.line_p.p8 + i->posX; #else context.frbuff_p.p8 = i->parent->frbuff.p8 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context.line_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width; context.frbuff_p.p32 = context.line_p.p32 + i->posX; #else context.frbuff_p.p32 = i->parent->frbuff.p32 + (u32)i->posY*i->parent->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ npix = (long)i->width * i->height; flags = GetByte(i->parent); i->interlaced = (flags & 64) >> 6; context.pass = i->interlaced ? 1 : 0; i->sort_flag = (flags & 32) >> 5; /* is local palette sorted by color frequency ? */ i->localpalbits = (flags & 7) + 1; if(flags&128) { /* palette locale */ int k; int localpalsize = 1 << i->localpalbits; #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Local palette\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ i->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*localpalsize); for(k=0; k<localpalsize; k++) { i->palette[k].r = GetByte(i->parent); i->palette[k].g = GetByte(i->parent); i->palette[k].b = GetByte(i->parent); } #ifdef NGIFLIB_ENABLE_CALLBACKS if(i->parent->palette_cb) i->parent->palette_cb(i->parent, i->palette, localpalsize); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { i->palette = i->parent->palette; i->localpalbits = i->parent->imgbits; } i->ncolors = 1 << i->localpalbits; i->imgbits = GetByte(i->parent); /* LZW Minimum Code Size */ if (i->imgbits > 11) { #if !defined(NGIFLIB_NO_FILE) if(i->parent->log) fprintf(i->parent->log, "*** ERROR *** Invalid LZW Minimum Code Size : %d\n", (int)i->imgbits); #endif return -1; } #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) { if(i->interlaced) fprintf(i->parent->log, "interlaced "); fprintf(i->parent->log, "img pos(%hu,%hu) size %hux%hu palbits=%hhu imgbits=%hhu ncolors=%hu\n", i->posX, i->posY, i->width, i->height, i->localpalbits, i->imgbits, i->ncolors); } #endif /* !defined(NGIFLIB_NO_FILE) */ if(i->imgbits==1) { /* fix for 1bit images ? */ i->imgbits = 2; } clr = 1 << i->imgbits; eof = clr + 1; free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ stackp = stack_top = ab_stack + 4096; context.restbits = 0; /* initialise le "buffer" de lecture */ context.restbyte = 0; /* des codes LZW */ context.lbyte = 0; for(;;) { act_code = GetGifWord(i, &context); if(act_code==eof) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "End of image code\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 0; } if(npix==0) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "assez de pixels, On se casse !\n"); #endif /* !defined(NGIFLIB_NO_FILE) */ return 1; } if(act_code==clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Code clear (%hu) (free=%hu) npix=%ld\n", clr, free, npix); #endif /* !defined(NGIFLIB_NO_FILE) */ /* clear */ free = clr + 2; context.nbbit = i->imgbits + 1; context.max = clr + clr - 1; /* (1 << context.nbbit) - 1 */ act_code = GetGifWord(i, &context); /* the first code after the clear code is concrete */ if (act_code >= clr) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Invalid code %hu just after clear(%hu) !\n", act_code, clr); #endif /* !defined(NGIFLIB_NO_FILE) */ return -1; } casspecial = (u8)act_code; old_code = act_code; if(npix > 0) WritePixel(i, &context, casspecial); npix--; } else if(act_code > free) { #if !defined(NGIFLIB_NO_FILE) if(i->parent && i->parent->log) fprintf(i->parent->log, "Invalid code %hu (free=%hu) !\n", act_code, free); #endif /* !defined(NGIFLIB_NO_FILE) */ return -1; } else { read_byt = act_code; if(act_code == free) { /* code pas encore dans alphabet */ /* printf("Code pas dans alphabet : %d>=%d push %d\n", act_code, free, casspecial); */ *(--stackp) = casspecial; /* dernier debut de chaine ! */ act_code = old_code; } /* printf("actcode=%d\n", act_code); */ while(act_code > clr) { /* code non concret */ /* fillstackloop empile les suffixes ! */ *(--stackp) = ab_suffx[act_code]; act_code = ab_prfx[act_code]; /* prefixe */ } /* act_code est concret */ casspecial = (u8)act_code; /* dernier debut de chaine ! */ *(--stackp) = casspecial; /* push on stack */ if(npix >= (stack_top - stackp)) { WritePixels(i, &context, stackp, stack_top - stackp); /* unstack all pixels at once */ } else if(npix > 0) { /* "pixel overflow" */ WritePixels(i, &context, stackp, npix); } npix -= (stack_top - stackp); stackp = stack_top; /* putchar('\n'); */ if(free < 4096) { /* la taille du dico est 4096 max ! */ ab_prfx[free] = old_code; ab_suffx[free] = (u8)act_code; free++; if((free > context.max) && (context.nbbit < 12)) { context.nbbit++; /* 1 bit de plus pour les codes LZW */ context.max += context.max + 1; } } old_code = read_byt; } } return 0; } /* ------------------------------------------------ * int LoadGif(struct ngiflib_gif *); * s'assurer que nimg=0 au depart ! * retourne : * 0 si GIF termin * un nombre negatif si ERREUR * 1 si image Decode * rappeler pour decoder les images suivantes * ------------------------------------------------ */ int LoadGif(struct ngiflib_gif * g) { struct ngiflib_gce gce; u8 sign; u8 tmp; int i; if(!g) return -1; gce.gce_present = 0; if(g->nimg==0) { GetByteStr(g, g->signature, 6); g->signature[6] = '\0'; if( g->signature[0] != 'G' || g->signature[1] != 'I' || g->signature[2] != 'F' || g->signature[3] != '8') { return -1; } #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "%s\n", g->signature); #endif /* !defined(NGIFLIB_NO_FILE) */ g->width = GetWord(g); g->height = GetWord(g); /* allocate frame buffer */ #ifndef NGIFLIB_INDEXED_ONLY if((g->mode & NGIFLIB_MODE_INDEXED)==0) g->frbuff.p32 = ngiflib_malloc(4*(long)g->height*(long)g->width); else #endif /* NGIFLIB_INDEXED_ONLY */ g->frbuff.p8 = ngiflib_malloc((long)g->height*(long)g->width); tmp = GetByte(g);/* <Packed Fields> = Global Color Table Flag 1 Bit Color Resolution 3 Bits Sort Flag 1 Bit Size of Global Color Table 3 Bits */ g->colorresolution = ((tmp & 0x70) >> 4) + 1; g->sort_flag = (tmp & 8) >> 3; g->imgbits = (tmp & 7) + 1; /* Global Palette color resolution */ g->ncolors = 1 << g->imgbits; g->backgroundindex = GetByte(g); #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "%hux%hu %hhubits %hu couleurs bg=%hhu\n", g->width, g->height, g->imgbits, g->ncolors, g->backgroundindex); #endif /* NGIFLIB_INDEXED_ONLY */ g->pixaspectratio = GetByte(g); /* pixel aspect ratio (0 : unspecified) */ if(tmp&0x80) { /* la palette globale suit. */ g->palette = (struct ngiflib_rgb *)ngiflib_malloc(sizeof(struct ngiflib_rgb)*g->ncolors); for(i=0; i<g->ncolors; i++) { g->palette[i].r = GetByte(g); g->palette[i].g = GetByte(g); g->palette[i].b = GetByte(g); #if defined(DEBUG) && !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "%3d %02X %02X %02X\n", i, g->palette[i].r,g->palette[i].g,g->palette[i].b); #endif /* defined(DEBUG) && !defined(NGIFLIB_NO_FILE) */ } #ifdef NGIFLIB_ENABLE_CALLBACKS if(g->palette_cb) g->palette_cb(g, g->palette, g->ncolors); #endif /* NGIFLIB_ENABLE_CALLBACKS */ } else { g->palette = NULL; } g->netscape_loop_count = -1; } for(;;) { char appid_auth[11]; u8 id,size; int blockindex; sign = GetByte(g); /* signature du prochain bloc */ #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "BLOCK SIGNATURE 0x%02X '%c'\n", sign, (sign >= 32) ? sign : '.'); #endif /* NGIFLIB_INDEXED_ONLY */ switch(sign) { case 0x3B: /* END OF GIF */ return 0; case '!': /* Extension introducer 0x21 */ id = GetByte(g); blockindex = 0; #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "extension (id=0x%02hhx)\n", id); #endif /* NGIFLIB_NO_FILE */ while( (size = GetByte(g)) ) { u8 ext[256]; GetByteStr(g, ext, size); switch(id) { case 0xF9: /* Graphic Control Extension */ /* The scope of this extension is the first graphic * rendering block to follow. */ gce.gce_present = 1; gce.disposal_method = (ext[0] >> 2) & 7; gce.transparent_flag = ext[0] & 1; gce.user_input_flag = (ext[0] >> 1) & 1; gce.delay_time = ext[1] | (ext[2]<<8); gce.transparent_color = ext[3]; #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "disposal_method=%hhu delay_time=%hu (transp=%hhu)transparent_color=0x%02hhX\n", gce.disposal_method, gce.delay_time, gce.transparent_flag, gce.transparent_color); #endif /* NGIFLIB_INDEXED_ONLY */ /* this propably should be adjusted depending on the disposal_method * of the _previous_ image. */ if(gce.transparent_flag && ((g->nimg == 0) || gce.disposal_method == 2)) { FillGifBackGround(g); } break; case 0xFE: /* Comment Extension. */ #if !defined(NGIFLIB_NO_FILE) if(g->log) { if(blockindex==0) fprintf(g->log, "-------------------- Comment extension --------------------\n"); ext[size] = '\0'; fputs((char *)ext, g->log); } #endif /* NGIFLIB_NO_FILE */ break; case 0xFF: /* application extension */ /* NETSCAPE2.0 extension : * http://www.vurdalakov.net/misc/gif/netscape-looping-application-extension */ if(blockindex==0) { ngiflib_memcpy(appid_auth, ext, 11); #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "---------------- Application extension ---------------\n"); fprintf(g->log, "Application identifier : '%.8s', auth code : %02X %02X %02X (", appid_auth, ext[8], ext[9], ext[10]); fputc((ext[8]<32)?' ':ext[8], g->log); fputc((ext[9]<32)?' ':ext[9], g->log); fputc((ext[10]<32)?' ':ext[10], g->log); fprintf(g->log, ")\n"); } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "Datas (as hex) : "); for(i=0; i<size; i++) { fprintf(g->log, "%02x ", ext[i]); } fprintf(g->log, "\nDatas (as text) : '"); for(i=0; i<size; i++) { putc((ext[i]<32)?' ':ext[i], g->log); } fprintf(g->log, "'\n"); } #endif /* NGIFLIB_INDEXED_ONLY */ if(0 == ngiflib_memcmp(appid_auth, "NETSCAPE2.0", 11)) { /* ext[0] : Sub-block ID */ if(ext[0] == 1) { /* 1 : Netscape Looping Extension. */ g->netscape_loop_count = (int)ext[1] | ((int)ext[2] << 8); #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "NETSCAPE loop_count = %d\n", g->netscape_loop_count); } #endif /* NGIFLIB_NO_FILE */ } } } break; case 0x01: /* plain text extension */ #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "Plain text extension blockindex=%d\n", blockindex); for(i=0; i<size; i++) { putc((ext[i]<32)?' ':ext[i], g->log); } putc('\n', g->log); } #endif /* NGIFLIB_INDEXED_ONLY */ break; } blockindex++; } switch(id) { case 0x01: /* plain text extension */ case 0xFE: /* Comment Extension. */ case 0xFF: /* application extension */ #if !defined(NGIFLIB_NO_FILE) if(g->log) { fprintf(g->log, "-----------------------------------------------------------\n"); } #endif /* NGIFLIB_NO_FILE */ break; } break; case 0x2C: /* Image separator */ if(g->nimg==0) { g->cur_img = ngiflib_malloc(sizeof(struct ngiflib_img)); if(g->cur_img == NULL) return -2; /* memory error */ g->first_img = g->cur_img; } else { g->cur_img->next = ngiflib_malloc(sizeof(struct ngiflib_img)); if(g->cur_img->next == NULL) return -2; /* memory error */ g->cur_img = g->cur_img->next; } ngiflib_memset(g->cur_img, 0, sizeof(struct ngiflib_img)); g->cur_img->parent = g; if(gce.gce_present) { ngiflib_memcpy(&g->cur_img->gce, &gce, sizeof(struct ngiflib_gce)); } else { ngiflib_memset(&g->cur_img->gce, 0, sizeof(struct ngiflib_gce)); } if (DecodeGifImg(g->cur_img) < 0) return -1; g->nimg++; tmp = GetByte(g);/* 0 final */ #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "ZERO TERMINATOR 0x%02X\n", tmp); #endif /* NGIFLIB_INDEXED_ONLY */ return 1; /* image decode */ default: /* unexpected byte */ #if !defined(NGIFLIB_NO_FILE) if(g->log) fprintf(g->log, "unexpected signature 0x%02X\n", sign); #endif /* NGIFLIB_INDEXED_ONLY */ return -1; } } } u32 GifIndexToTrueColor(struct ngiflib_rgb * palette, u8 v) { return palette[v].b | (palette[v].g << 8) | (palette[v].r << 16); }
static void WritePixel(struct ngiflib_img * i, struct ngiflib_decode_context * context, u8 v) { struct ngiflib_gif * p = i->parent; if(v!=i->gce.transparent_color || !i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ *context->frbuff_p.p8 = v; #ifndef NGIFLIB_INDEXED_ONLY } else *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, v); #endif /* NGIFLIB_INDEXED_ONLY */ } if(--(context->Xtogo) <= 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 4; } break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 2; } break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; if(context->curY >= p->height) { context->pass++; context->curY = i->posY + 1; } break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ context->frbuff_p.p8++; #ifndef NGIFLIB_INDEXED_ONLY } else { context->frbuff_p.p32++; } #endif /* NGIFLIB_INDEXED_ONLY */ } }
static void WritePixel(struct ngiflib_img * i, struct ngiflib_decode_context * context, u8 v) { struct ngiflib_gif * p = i->parent; if(v!=i->gce.transparent_color || !i->gce.transparent_flag) { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ *context->frbuff_p.p8 = v; #ifndef NGIFLIB_INDEXED_ONLY } else *context->frbuff_p.p32 = GifIndexToTrueColor(i->palette, v); #endif /* NGIFLIB_INDEXED_ONLY */ } if(--(context->Xtogo) <= 0) { #ifdef NGIFLIB_ENABLE_CALLBACKS if(p->line_cb) p->line_cb(p, context->line_p, context->curY); #endif /* NGIFLIB_ENABLE_CALLBACKS */ context->Xtogo = i->width; switch(context->pass) { case 0: context->curY++; break; case 1: /* 1st pass : every eighth row starting from 0 */ context->curY += 8; break; case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY += 8; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY += 4; break; case 4: /* 4th pass : every odd row */ context->curY += 2; break; } while(context->pass > 0 && context->pass < 4 && context->curY >= p->height) { switch(++context->pass) { case 2: /* 2nd pass : every eighth row starting from 4 */ context->curY = i->posY + 4; break; case 3: /* 3rd pass : every fourth row starting from 2 */ context->curY = i->posY + 2; break; case 4: /* 4th pass : every odd row */ context->curY = i->posY + 1; break; } } #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width; context->frbuff_p.p8 = context->line_p.p8 + i->posX; #else context->frbuff_p.p8 = p->frbuff.p8 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ #ifndef NGIFLIB_INDEXED_ONLY } else { #ifdef NGIFLIB_ENABLE_CALLBACKS context->line_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width; context->frbuff_p.p32 = context->line_p.p32 + i->posX; #else context->frbuff_p.p32 = p->frbuff.p32 + (u32)context->curY*p->width + i->posX; #endif /* NGIFLIB_ENABLE_CALLBACKS */ } #endif /* NGIFLIB_INDEXED_ONLY */ } else { #ifndef NGIFLIB_INDEXED_ONLY if(p->mode & NGIFLIB_MODE_INDEXED) { #endif /* NGIFLIB_INDEXED_ONLY */ context->frbuff_p.p8++; #ifndef NGIFLIB_INDEXED_ONLY } else { context->frbuff_p.p32++; } #endif /* NGIFLIB_INDEXED_ONLY */ } }
{'added': [(9, ' * (c) 2004-2019 Thomas Bernard. All rights reserved'), (149, '\t\twhile(context->pass > 0 && context->pass < 4 &&'), (150, '\t\t context->curY >= p->height) {'), (151, '\t\t\tswitch(++context->pass) {'), (152, '\t\t\tcase 2:\t/* 2nd pass : every eighth row starting from 4 */'), (153, '\t\t\t\tcontext->curY = i->posY + 4;'), (154, '\t\t\t\tbreak;'), (155, '\t\t\tcase 3:\t/* 3rd pass : every fourth row starting from 2 */'), (156, '\t\t\t\tcontext->curY = i->posY + 2;'), (157, '\t\t\t\tbreak;'), (158, '\t\t\tcase 4:\t/* 4th pass : every odd row */'), (159, '\t\t\t\tcontext->curY = i->posY + 1;'), (160, '\t\t\t\tbreak;'), (161, '\t\t\t}'), (162, '\t\t}'), (265, '\t\t\twhile(context->pass > 0 && context->pass < 4 &&'), (266, '\t\t\t context->curY >= p->height) {'), (267, '\t\t\t\tswitch(++context->pass) {'), (268, '\t\t\t\tcase 2:\t/* 2nd pass : every eighth row starting from 4 */'), (269, '\t\t\t\t\tcontext->curY = i->posY + 4;'), (270, '\t\t\t\t\tbreak;'), (271, '\t\t\t\tcase 3:\t/* 3rd pass : every fourth row starting from 2 */'), (272, '\t\t\t\t\tcontext->curY = i->posY + 2;'), (273, '\t\t\t\t\tbreak;'), (274, '\t\t\t\tcase 4:\t/* 4th pass : every odd row */'), (275, '\t\t\t\t\tcontext->curY = i->posY + 1;'), (276, '\t\t\t\t\tbreak;'), (277, '\t\t\t\t}'), (278, '\t\t\t}')], 'deleted': [(9, ' * (c) 2004-2017 Thomas Bernard. All rights reserved'), (138, '\t\t\tif(context->curY >= p->height) {'), (139, '\t\t\t\tcontext->pass++;'), (140, '\t\t\t\tcontext->curY = i->posY + 4;'), (141, '\t\t\t}'), (145, '\t\t\tif(context->curY >= p->height) {'), (146, '\t\t\t\tcontext->pass++;'), (147, '\t\t\t\tcontext->curY = i->posY + 2;'), (148, '\t\t\t}'), (152, '\t\t\tif(context->curY >= p->height) {'), (153, '\t\t\t\tcontext->pass++;'), (154, '\t\t\t\tcontext->curY = i->posY + 1;'), (155, '\t\t\t}'), (252, '\t\t\t\tif(context->curY >= p->height) {'), (253, '\t\t\t\t\tcontext->pass++;'), (254, '\t\t\t\t\tcontext->curY = i->posY + 4;'), (255, '\t\t\t\t}'), (259, '\t\t\t\tif(context->curY >= p->height) {'), (260, '\t\t\t\t\tcontext->pass++;'), (261, '\t\t\t\t\tcontext->curY = i->posY + 2;'), (262, '\t\t\t\t}'), (266, '\t\t\t\tif(context->curY >= p->height) {'), (267, '\t\t\t\t\tcontext->pass++;'), (268, '\t\t\t\t\tcontext->curY = i->posY + 1;'), (269, '\t\t\t\t}')]}
29
25
594
5,196
58
441
19
https://github.com/miniupnp/ngiflib
CVE-2019-16346
CWE-787
2,839
unsquash-4.c
C
read_filesystem_tables_4
/* * Unsquash a squashfs filesystem. This is a highly compressed read only * filesystem. * * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * unsquash-4.c */ #include "unsquashfs.h" #include "squashfs_swap.h" #include "xattr.h" static struct squashfs_fragment_entry *fragment_table; static unsigned int *id_table; static int read_fragment_table(long long *directory_table_end) { int res, i; int bytes = SQUASHFS_FRAGMENT_BYTES(sBlk.s.fragments); int indexes = SQUASHFS_FRAGMENT_INDEXES(sBlk.s.fragments); long long fragment_table_index[indexes]; TRACE("read_fragment_table: %d fragments, reading %d fragment indexes " "from 0x%llx\n", sBlk.s.fragments, indexes, sBlk.s.fragment_table_start); if(sBlk.s.fragments == 0) { *directory_table_end = sBlk.s.fragment_table_start; return TRUE; } fragment_table = malloc(bytes); if(fragment_table == NULL) EXIT_UNSQUASH("read_fragment_table: failed to allocate " "fragment table\n"); res = read_fs_bytes(fd, sBlk.s.fragment_table_start, SQUASHFS_FRAGMENT_INDEX_BYTES(sBlk.s.fragments), fragment_table_index); if(res == FALSE) { ERROR("read_fragment_table: failed to read fragment table " "index\n"); return FALSE; } SQUASHFS_INSWAP_FRAGMENT_INDEXES(fragment_table_index, indexes); for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); int length = read_block(fd, fragment_table_index[i], NULL, expected, ((char *) fragment_table) + (i * SQUASHFS_METADATA_SIZE)); TRACE("Read fragment table block %d, from 0x%llx, length %d\n", i, fragment_table_index[i], length); if(length == FALSE) { ERROR("read_fragment_table: failed to read fragment " "table index\n"); return FALSE; } } for(i = 0; i < sBlk.s.fragments; i++) SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]); *directory_table_end = fragment_table_index[0]; return TRUE; } void read_fragment_4(unsigned int fragment, long long *start_block, int *size) { TRACE("read_fragment: reading fragment %d\n", fragment); struct squashfs_fragment_entry *fragment_entry; fragment_entry = &fragment_table[fragment]; *start_block = fragment_entry->start_block; *size = fragment_entry->size; } struct inode *read_inode_4(unsigned int start_block, unsigned int offset) { static union squashfs_inode_header header; long long start = sBlk.s.inode_table_start + start_block; long long bytes = lookup_entry(inode_table_hash, start); char *block_ptr = inode_table + bytes + offset; static struct inode i; TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset); if(bytes == -1) EXIT_UNSQUASH("read_inode: inode table block %lld not found\n", start); SQUASHFS_SWAP_BASE_INODE_HEADER(block_ptr, &header.base); i.uid = (uid_t) id_table[header.base.uid]; i.gid = (uid_t) id_table[header.base.guid]; i.mode = lookup_type[header.base.inode_type] | header.base.mode; i.type = header.base.inode_type; i.time = header.base.mtime; i.inode_number = header.base.inode_number; switch(header.base.inode_type) { case SQUASHFS_DIR_TYPE: { struct squashfs_dir_inode_header *inode = &header.dir; SQUASHFS_SWAP_DIR_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.offset = inode->offset; i.start = inode->start_block; i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_LDIR_TYPE: { struct squashfs_ldir_inode_header *inode = &header.ldir; SQUASHFS_SWAP_LDIR_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.offset = inode->offset; i.start = inode->start_block; i.xattr = inode->xattr; break; } case SQUASHFS_FILE_TYPE: { struct squashfs_reg_inode_header *inode = &header.reg; SQUASHFS_SWAP_REG_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG ? 0 : inode->file_size % sBlk.s.block_size; i.fragment = inode->fragment; i.offset = inode->offset; i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ? (i.data + sBlk.s.block_size - 1) >> sBlk.s.block_log : i.data >> sBlk.s.block_log; i.start = inode->start_block; i.sparse = 0; i.block_ptr = block_ptr + sizeof(*inode); i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_LREG_TYPE: { struct squashfs_lreg_inode_header *inode = &header.lreg; SQUASHFS_SWAP_LREG_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG ? 0 : inode->file_size % sBlk.s.block_size; i.fragment = inode->fragment; i.offset = inode->offset; i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ? (inode->file_size + sBlk.s.block_size - 1) >> sBlk.s.block_log : inode->file_size >> sBlk.s.block_log; i.start = inode->start_block; i.sparse = inode->sparse != 0; i.block_ptr = block_ptr + sizeof(*inode); i.xattr = inode->xattr; break; } case SQUASHFS_SYMLINK_TYPE: case SQUASHFS_LSYMLINK_TYPE: { struct squashfs_symlink_inode_header *inode = &header.symlink; SQUASHFS_SWAP_SYMLINK_INODE_HEADER(block_ptr, inode); i.symlink = malloc(inode->symlink_size + 1); if(i.symlink == NULL) EXIT_UNSQUASH("read_inode: failed to malloc " "symlink data\n"); strncpy(i.symlink, block_ptr + sizeof(struct squashfs_symlink_inode_header), inode->symlink_size); i.symlink[inode->symlink_size] = '\0'; i.data = inode->symlink_size; if(header.base.inode_type == SQUASHFS_LSYMLINK_TYPE) SQUASHFS_SWAP_INTS(block_ptr + sizeof(struct squashfs_symlink_inode_header) + inode->symlink_size, &i.xattr, 1); else i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_BLKDEV_TYPE: case SQUASHFS_CHRDEV_TYPE: { struct squashfs_dev_inode_header *inode = &header.dev; SQUASHFS_SWAP_DEV_INODE_HEADER(block_ptr, inode); i.data = inode->rdev; i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_LBLKDEV_TYPE: case SQUASHFS_LCHRDEV_TYPE: { struct squashfs_ldev_inode_header *inode = &header.ldev; SQUASHFS_SWAP_LDEV_INODE_HEADER(block_ptr, inode); i.data = inode->rdev; i.xattr = inode->xattr; break; } case SQUASHFS_FIFO_TYPE: case SQUASHFS_SOCKET_TYPE: i.data = 0; i.xattr = SQUASHFS_INVALID_XATTR; break; case SQUASHFS_LFIFO_TYPE: case SQUASHFS_LSOCKET_TYPE: { struct squashfs_lipc_inode_header *inode = &header.lipc; SQUASHFS_SWAP_LIPC_INODE_HEADER(block_ptr, inode); i.data = 0; i.xattr = inode->xattr; break; } default: EXIT_UNSQUASH("Unknown inode type %d in read_inode!\n", header.base.inode_type); } return &i; } struct dir *squashfs_opendir_4(unsigned int block_start, unsigned int offset, struct inode **i) { struct squashfs_dir_header dirh; char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1] __attribute__((aligned)); struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer; long long start; long long bytes; int dir_count, size; struct dir_ent *new_dir; struct dir *dir; TRACE("squashfs_opendir: inode start block %d, offset %d\n", block_start, offset); *i = s_ops.read_inode(block_start, offset); dir = malloc(sizeof(struct dir)); if(dir == NULL) EXIT_UNSQUASH("squashfs_opendir: malloc failed!\n"); dir->dir_count = 0; dir->cur_entry = 0; dir->mode = (*i)->mode; dir->uid = (*i)->uid; dir->guid = (*i)->gid; dir->mtime = (*i)->time; dir->xattr = (*i)->xattr; dir->dirs = NULL; if ((*i)->data == 3) /* * if the directory is empty, skip the unnecessary * lookup_entry, this fixes the corner case with * completely empty filesystems where lookup_entry correctly * returning -1 is incorrectly treated as an error */ return dir; start = sBlk.s.directory_table_start + (*i)->start; bytes = lookup_entry(directory_table_hash, start); if(bytes == -1) EXIT_UNSQUASH("squashfs_opendir: directory block %lld not " "found!\n", start); bytes += (*i)->offset; size = (*i)->data + bytes - 3; while(bytes < size) { SQUASHFS_SWAP_DIR_HEADER(directory_table + bytes, &dirh); dir_count = dirh.count + 1; TRACE("squashfs_opendir: Read directory header @ byte position " "%d, %d directory entries\n", bytes, dir_count); bytes += sizeof(dirh); /* dir_count should never be larger than SQUASHFS_DIR_COUNT */ if(dir_count > SQUASHFS_DIR_COUNT) { ERROR("File system corrupted: too many entries in directory\n"); goto corrupted; } while(dir_count--) { SQUASHFS_SWAP_DIR_ENTRY(directory_table + bytes, dire); bytes += sizeof(*dire); /* size should never be SQUASHFS_NAME_LEN or larger */ if(dire->size >= SQUASHFS_NAME_LEN) { ERROR("File system corrupted: filename too long\n"); goto corrupted; } memcpy(dire->name, directory_table + bytes, dire->size + 1); dire->name[dire->size + 1] = '\0'; TRACE("squashfs_opendir: directory entry %s, inode " "%d:%d, type %d\n", dire->name, dirh.start_block, dire->offset, dire->type); if((dir->dir_count % DIR_ENT_SIZE) == 0) { new_dir = realloc(dir->dirs, (dir->dir_count + DIR_ENT_SIZE) * sizeof(struct dir_ent)); if(new_dir == NULL) EXIT_UNSQUASH("squashfs_opendir: " "realloc failed!\n"); dir->dirs = new_dir; } strcpy(dir->dirs[dir->dir_count].name, dire->name); dir->dirs[dir->dir_count].start_block = dirh.start_block; dir->dirs[dir->dir_count].offset = dire->offset; dir->dirs[dir->dir_count].type = dire->type; dir->dir_count ++; bytes += dire->size + 1; } } return dir; corrupted: free(dir->dirs); free(dir); return NULL; } static int read_uids_guids(long long *table_start) { int res, i; int bytes = SQUASHFS_ID_BYTES(sBlk.s.no_ids); int indexes = SQUASHFS_ID_BLOCKS(sBlk.s.no_ids); long long id_index_table[indexes]; TRACE("read_uids_guids: no_ids %d\n", sBlk.s.no_ids); id_table = malloc(bytes); if(id_table == NULL) { ERROR("read_uids_guids: failed to allocate id table\n"); return FALSE; } res = read_fs_bytes(fd, sBlk.s.id_table_start, SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids), id_index_table); if(res == FALSE) { ERROR("read_uids_guids: failed to read id index table\n"); return FALSE; } SQUASHFS_INSWAP_ID_BLOCKS(id_index_table, indexes); /* * id_index_table[0] stores the start of the compressed id blocks. * This by definition is also the end of the previous filesystem * table - this may be the exports table if it is present, or the * fragments table if it isn't. */ *table_start = id_index_table[0]; for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); res = read_block(fd, id_index_table[i], NULL, expected, ((char *) id_table) + i * SQUASHFS_METADATA_SIZE); if(res == FALSE) { ERROR("read_uids_guids: failed to read id table block" "\n"); return FALSE; } } SQUASHFS_INSWAP_INTS(id_table, sBlk.s.no_ids); return TRUE; } static int parse_exports_table(long long *table_start) { int res; int indexes = SQUASHFS_LOOKUP_BLOCKS(sBlk.s.inodes); long long export_index_table[indexes]; res = read_fs_bytes(fd, sBlk.s.lookup_table_start, SQUASHFS_LOOKUP_BLOCK_BYTES(sBlk.s.inodes), export_index_table); if(res == FALSE) { ERROR("parse_exports_table: failed to read export index table\n"); return FALSE; } SQUASHFS_INSWAP_LOOKUP_BLOCKS(export_index_table, indexes); /* * export_index_table[0] stores the start of the compressed export blocks. * This by definition is also the end of the previous filesystem * table - the fragment table. */ *table_start = export_index_table[0]; return TRUE; } int read_filesystem_tables_4() { long long directory_table_end, table_start; if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0) return FALSE; if(read_uids_guids(&table_start) == FALSE) return FALSE; if(parse_exports_table(&table_start) == FALSE) return FALSE; if(read_fragment_table(&directory_table_end) == FALSE) return FALSE; if(read_inode_table(sBlk.s.inode_table_start, sBlk.s.directory_table_start) == FALSE) return FALSE; if(read_directory_table(sBlk.s.directory_table_start, directory_table_end) == FALSE) return FALSE; if(no_xattrs) sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; return TRUE; }
/* * Unsquash a squashfs filesystem. This is a highly compressed read only * filesystem. * * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2019 * Phillip Lougher <phillip@squashfs.org.uk> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2, * or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * unsquash-4.c */ #include "unsquashfs.h" #include "squashfs_swap.h" #include "xattr.h" static struct squashfs_fragment_entry *fragment_table; static unsigned int *id_table; long long *alloc_index_table(int indexes) { static long long *alloc_table = NULL; static int alloc_size = 0; int length = indexes * sizeof(long long); if(alloc_size < length) { long long *table = realloc(alloc_table, length); if(table == NULL) EXIT_UNSQUASH("alloc_index_table: failed to allocate " "index table\n"); alloc_table = table; alloc_size = length; } return alloc_table; } static int read_fragment_table(long long *table_start) { /* * Note on overflow limits: * Size of SBlk.s.fragments is 2^32 (unsigned int) * Max size of bytes is 2^32*16 or 2^36 * Max indexes is (2^32*16)/8K or 2^23 * Max length is ((2^32*16)/8K)*8 or 2^26 or 64M */ int res, i; long long bytes = SQUASHFS_FRAGMENT_BYTES((long long) sBlk.s.fragments); int indexes = SQUASHFS_FRAGMENT_INDEXES((long long) sBlk.s.fragments); int length = SQUASHFS_FRAGMENT_INDEX_BYTES((long long) sBlk.s.fragments); long long *fragment_table_index; /* * The size of the index table (length bytes) should match the * table start and end points */ if(length != (*table_start - sBlk.s.fragment_table_start)) { ERROR("read_fragment_table: Bad fragment count in super block\n"); return FALSE; } TRACE("read_fragment_table: %d fragments, reading %d fragment indexes " "from 0x%llx\n", sBlk.s.fragments, indexes, sBlk.s.fragment_table_start); fragment_table_index = alloc_index_table(indexes); fragment_table = malloc(bytes); if(fragment_table == NULL) EXIT_UNSQUASH("read_fragment_table: failed to allocate " "fragment table\n"); res = read_fs_bytes(fd, sBlk.s.fragment_table_start, length, fragment_table_index); if(res == FALSE) { ERROR("read_fragment_table: failed to read fragment table " "index\n"); return FALSE; } SQUASHFS_INSWAP_FRAGMENT_INDEXES(fragment_table_index, indexes); for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); int length = read_block(fd, fragment_table_index[i], NULL, expected, ((char *) fragment_table) + (i * SQUASHFS_METADATA_SIZE)); TRACE("Read fragment table block %d, from 0x%llx, length %d\n", i, fragment_table_index[i], length); if(length == FALSE) { ERROR("read_fragment_table: failed to read fragment " "table index\n"); return FALSE; } } for(i = 0; i < sBlk.s.fragments; i++) SQUASHFS_INSWAP_FRAGMENT_ENTRY(&fragment_table[i]); *table_start = fragment_table_index[0]; return TRUE; } void read_fragment_4(unsigned int fragment, long long *start_block, int *size) { TRACE("read_fragment: reading fragment %d\n", fragment); struct squashfs_fragment_entry *fragment_entry; fragment_entry = &fragment_table[fragment]; *start_block = fragment_entry->start_block; *size = fragment_entry->size; } struct inode *read_inode_4(unsigned int start_block, unsigned int offset) { static union squashfs_inode_header header; long long start = sBlk.s.inode_table_start + start_block; long long bytes = lookup_entry(inode_table_hash, start); char *block_ptr = inode_table + bytes + offset; static struct inode i; TRACE("read_inode: reading inode [%d:%d]\n", start_block, offset); if(bytes == -1) EXIT_UNSQUASH("read_inode: inode table block %lld not found\n", start); SQUASHFS_SWAP_BASE_INODE_HEADER(block_ptr, &header.base); i.uid = (uid_t) id_table[header.base.uid]; i.gid = (uid_t) id_table[header.base.guid]; i.mode = lookup_type[header.base.inode_type] | header.base.mode; i.type = header.base.inode_type; i.time = header.base.mtime; i.inode_number = header.base.inode_number; switch(header.base.inode_type) { case SQUASHFS_DIR_TYPE: { struct squashfs_dir_inode_header *inode = &header.dir; SQUASHFS_SWAP_DIR_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.offset = inode->offset; i.start = inode->start_block; i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_LDIR_TYPE: { struct squashfs_ldir_inode_header *inode = &header.ldir; SQUASHFS_SWAP_LDIR_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.offset = inode->offset; i.start = inode->start_block; i.xattr = inode->xattr; break; } case SQUASHFS_FILE_TYPE: { struct squashfs_reg_inode_header *inode = &header.reg; SQUASHFS_SWAP_REG_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG ? 0 : inode->file_size % sBlk.s.block_size; i.fragment = inode->fragment; i.offset = inode->offset; i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ? (i.data + sBlk.s.block_size - 1) >> sBlk.s.block_log : i.data >> sBlk.s.block_log; i.start = inode->start_block; i.sparse = 0; i.block_ptr = block_ptr + sizeof(*inode); i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_LREG_TYPE: { struct squashfs_lreg_inode_header *inode = &header.lreg; SQUASHFS_SWAP_LREG_INODE_HEADER(block_ptr, inode); i.data = inode->file_size; i.frag_bytes = inode->fragment == SQUASHFS_INVALID_FRAG ? 0 : inode->file_size % sBlk.s.block_size; i.fragment = inode->fragment; i.offset = inode->offset; i.blocks = inode->fragment == SQUASHFS_INVALID_FRAG ? (inode->file_size + sBlk.s.block_size - 1) >> sBlk.s.block_log : inode->file_size >> sBlk.s.block_log; i.start = inode->start_block; i.sparse = inode->sparse != 0; i.block_ptr = block_ptr + sizeof(*inode); i.xattr = inode->xattr; break; } case SQUASHFS_SYMLINK_TYPE: case SQUASHFS_LSYMLINK_TYPE: { struct squashfs_symlink_inode_header *inode = &header.symlink; SQUASHFS_SWAP_SYMLINK_INODE_HEADER(block_ptr, inode); i.symlink = malloc(inode->symlink_size + 1); if(i.symlink == NULL) EXIT_UNSQUASH("read_inode: failed to malloc " "symlink data\n"); strncpy(i.symlink, block_ptr + sizeof(struct squashfs_symlink_inode_header), inode->symlink_size); i.symlink[inode->symlink_size] = '\0'; i.data = inode->symlink_size; if(header.base.inode_type == SQUASHFS_LSYMLINK_TYPE) SQUASHFS_SWAP_INTS(block_ptr + sizeof(struct squashfs_symlink_inode_header) + inode->symlink_size, &i.xattr, 1); else i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_BLKDEV_TYPE: case SQUASHFS_CHRDEV_TYPE: { struct squashfs_dev_inode_header *inode = &header.dev; SQUASHFS_SWAP_DEV_INODE_HEADER(block_ptr, inode); i.data = inode->rdev; i.xattr = SQUASHFS_INVALID_XATTR; break; } case SQUASHFS_LBLKDEV_TYPE: case SQUASHFS_LCHRDEV_TYPE: { struct squashfs_ldev_inode_header *inode = &header.ldev; SQUASHFS_SWAP_LDEV_INODE_HEADER(block_ptr, inode); i.data = inode->rdev; i.xattr = inode->xattr; break; } case SQUASHFS_FIFO_TYPE: case SQUASHFS_SOCKET_TYPE: i.data = 0; i.xattr = SQUASHFS_INVALID_XATTR; break; case SQUASHFS_LFIFO_TYPE: case SQUASHFS_LSOCKET_TYPE: { struct squashfs_lipc_inode_header *inode = &header.lipc; SQUASHFS_SWAP_LIPC_INODE_HEADER(block_ptr, inode); i.data = 0; i.xattr = inode->xattr; break; } default: EXIT_UNSQUASH("Unknown inode type %d in read_inode!\n", header.base.inode_type); } return &i; } struct dir *squashfs_opendir_4(unsigned int block_start, unsigned int offset, struct inode **i) { struct squashfs_dir_header dirh; char buffer[sizeof(struct squashfs_dir_entry) + SQUASHFS_NAME_LEN + 1] __attribute__((aligned)); struct squashfs_dir_entry *dire = (struct squashfs_dir_entry *) buffer; long long start; long long bytes; int dir_count, size; struct dir_ent *new_dir; struct dir *dir; TRACE("squashfs_opendir: inode start block %d, offset %d\n", block_start, offset); *i = s_ops.read_inode(block_start, offset); dir = malloc(sizeof(struct dir)); if(dir == NULL) EXIT_UNSQUASH("squashfs_opendir: malloc failed!\n"); dir->dir_count = 0; dir->cur_entry = 0; dir->mode = (*i)->mode; dir->uid = (*i)->uid; dir->guid = (*i)->gid; dir->mtime = (*i)->time; dir->xattr = (*i)->xattr; dir->dirs = NULL; if ((*i)->data == 3) /* * if the directory is empty, skip the unnecessary * lookup_entry, this fixes the corner case with * completely empty filesystems where lookup_entry correctly * returning -1 is incorrectly treated as an error */ return dir; start = sBlk.s.directory_table_start + (*i)->start; bytes = lookup_entry(directory_table_hash, start); if(bytes == -1) EXIT_UNSQUASH("squashfs_opendir: directory block %lld not " "found!\n", start); bytes += (*i)->offset; size = (*i)->data + bytes - 3; while(bytes < size) { SQUASHFS_SWAP_DIR_HEADER(directory_table + bytes, &dirh); dir_count = dirh.count + 1; TRACE("squashfs_opendir: Read directory header @ byte position " "%d, %d directory entries\n", bytes, dir_count); bytes += sizeof(dirh); /* dir_count should never be larger than SQUASHFS_DIR_COUNT */ if(dir_count > SQUASHFS_DIR_COUNT) { ERROR("File system corrupted: too many entries in directory\n"); goto corrupted; } while(dir_count--) { SQUASHFS_SWAP_DIR_ENTRY(directory_table + bytes, dire); bytes += sizeof(*dire); /* size should never be SQUASHFS_NAME_LEN or larger */ if(dire->size >= SQUASHFS_NAME_LEN) { ERROR("File system corrupted: filename too long\n"); goto corrupted; } memcpy(dire->name, directory_table + bytes, dire->size + 1); dire->name[dire->size + 1] = '\0'; TRACE("squashfs_opendir: directory entry %s, inode " "%d:%d, type %d\n", dire->name, dirh.start_block, dire->offset, dire->type); if((dir->dir_count % DIR_ENT_SIZE) == 0) { new_dir = realloc(dir->dirs, (dir->dir_count + DIR_ENT_SIZE) * sizeof(struct dir_ent)); if(new_dir == NULL) EXIT_UNSQUASH("squashfs_opendir: " "realloc failed!\n"); dir->dirs = new_dir; } strcpy(dir->dirs[dir->dir_count].name, dire->name); dir->dirs[dir->dir_count].start_block = dirh.start_block; dir->dirs[dir->dir_count].offset = dire->offset; dir->dirs[dir->dir_count].type = dire->type; dir->dir_count ++; bytes += dire->size + 1; } } return dir; corrupted: free(dir->dirs); free(dir); return NULL; } static int read_id_table(long long *table_start) { /* * Note on overflow limits: * Size of SBlk.s.no_ids is 2^16 (unsigned short) * Max size of bytes is 2^16*4 or 256K * Max indexes is (2^16*4)/8K or 32 * Max length is ((2^16*4)/8K)*8 or 256 */ int res, i; int bytes = SQUASHFS_ID_BYTES(sBlk.s.no_ids); int indexes = SQUASHFS_ID_BLOCKS(sBlk.s.no_ids); int length = SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids); long long *id_index_table; /* * The size of the index table (length bytes) should match the * table start and end points */ if(length != (*table_start - sBlk.s.id_table_start)) { ERROR("read_id_table: Bad id count in super block\n"); return FALSE; } TRACE("read_id_table: no_ids %d\n", sBlk.s.no_ids); id_index_table = alloc_index_table(indexes); id_table = malloc(bytes); if(id_table == NULL) { ERROR("read_id_table: failed to allocate id table\n"); return FALSE; } res = read_fs_bytes(fd, sBlk.s.id_table_start, length, id_index_table); if(res == FALSE) { ERROR("read_id_table: failed to read id index table\n"); return FALSE; } SQUASHFS_INSWAP_ID_BLOCKS(id_index_table, indexes); /* * id_index_table[0] stores the start of the compressed id blocks. * This by definition is also the end of the previous filesystem * table - this may be the exports table if it is present, or the * fragments table if it isn't. */ *table_start = id_index_table[0]; for(i = 0; i < indexes; i++) { int expected = (i + 1) != indexes ? SQUASHFS_METADATA_SIZE : bytes & (SQUASHFS_METADATA_SIZE - 1); res = read_block(fd, id_index_table[i], NULL, expected, ((char *) id_table) + i * SQUASHFS_METADATA_SIZE); if(res == FALSE) { ERROR("read_id_table: failed to read id table block" "\n"); return FALSE; } } SQUASHFS_INSWAP_INTS(id_table, sBlk.s.no_ids); return TRUE; } static int parse_exports_table(long long *table_start) { /* * Note on overflow limits: * Size of SBlk.s.inodes is 2^32 (unsigned int) * Max indexes is (2^32*8)/8K or 2^22 * Max length is ((2^32*8)/8K)*8 or 2^25 */ int res; int indexes = SQUASHFS_LOOKUP_BLOCKS((long long) sBlk.s.inodes); int length = SQUASHFS_LOOKUP_BLOCK_BYTES((long long) sBlk.s.inodes); long long *export_index_table; /* * The size of the index table (length bytes) should match the * table start and end points */ if(length != (*table_start - sBlk.s.lookup_table_start)) { ERROR("parse_exports_table: Bad inode count in super block\n"); return FALSE; } export_index_table = alloc_index_table(indexes); res = read_fs_bytes(fd, sBlk.s.lookup_table_start, length, export_index_table); if(res == FALSE) { ERROR("parse_exports_table: failed to read export index table\n"); return FALSE; } SQUASHFS_INSWAP_LOOKUP_BLOCKS(export_index_table, indexes); /* * export_index_table[0] stores the start of the compressed export blocks. * This by definition is also the end of the previous filesystem * table - the fragment table. */ *table_start = export_index_table[0]; return TRUE; } int read_filesystem_tables_4() { long long table_start; /* Read xattrs */ if(sBlk.s.xattr_id_table_start != SQUASHFS_INVALID_BLK) { /* sanity check super block contents */ if(sBlk.s.xattr_id_table_start >= sBlk.s.bytes_used) { ERROR("read_filesystem_tables: xattr id table start too large in super block\n"); goto corrupted; } if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0) goto corrupted; } else table_start = sBlk.s.bytes_used; /* Read id lookup table */ /* Sanity check super block contents */ if(sBlk.s.id_table_start >= table_start) { ERROR("read_filesystem_tables: id table start too large in super block\n"); goto corrupted; } /* there should always be at least one id */ if(sBlk.s.no_ids == 0) { ERROR("read_filesystem_tables: Bad id count in super block\n"); goto corrupted; } /* * the number of ids can never be more than double the number of inodes * (the maximum is a unique uid and gid for each inode). */ if(sBlk.s.no_ids > (sBlk.s.inodes * 2L)) { ERROR("read_filesystem_tables: Bad id count in super block\n"); goto corrupted; } if(read_id_table(&table_start) == FALSE) goto corrupted; /* Read exports table */ if(sBlk.s.lookup_table_start != SQUASHFS_INVALID_BLK) { /* sanity check super block contents */ if(sBlk.s.lookup_table_start >= table_start) { ERROR("read_filesystem_tables: lookup table start too large in super block\n"); goto corrupted; } if(parse_exports_table(&table_start) == FALSE) goto corrupted; } /* Read fragment table */ if(sBlk.s.fragments != 0) { /* Sanity check super block contents */ if(sBlk.s.fragment_table_start >= table_start) { ERROR("read_filesystem_tables: fragment table start too large in super block\n"); goto corrupted; } /* The number of fragments should not exceed the number of inodes */ if(sBlk.s.fragments > sBlk.s.inodes) { ERROR("read_filesystem_tables: Bad fragment count in super block\n"); goto corrupted; } if(read_fragment_table(&table_start) == FALSE) goto corrupted; } else { /* * Sanity check super block contents - with 0 fragments, * the fragment table should be empty */ if(sBlk.s.fragment_table_start != table_start) { ERROR("read_filesystem_tables: fragment table start invalid in super block\n"); goto corrupted; } } /* Read directory table */ /* Sanity check super block contents */ if(sBlk.s.directory_table_start >= table_start) { ERROR("read_filesystem_tables: directory table start too large in super block\n"); goto corrupted; } if(read_directory_table(sBlk.s.directory_table_start, table_start) == FALSE) goto corrupted; /* Read inode table */ /* Sanity check super block contents */ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) { ERROR("read_filesystem_tables: inode table start too large in super block\n"); goto corrupted; } if(read_inode_table(sBlk.s.inode_table_start, sBlk.s.directory_table_start) == FALSE) goto corrupted; if(no_xattrs) sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; return TRUE; corrupted: ERROR("File system corruption detected\n"); return FALSE; }
int read_filesystem_tables_4() { long long directory_table_end, table_start; if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0) return FALSE; if(read_uids_guids(&table_start) == FALSE) return FALSE; if(parse_exports_table(&table_start) == FALSE) return FALSE; if(read_fragment_table(&directory_table_end) == FALSE) return FALSE; if(read_inode_table(sBlk.s.inode_table_start, sBlk.s.directory_table_start) == FALSE) return FALSE; if(read_directory_table(sBlk.s.directory_table_start, directory_table_end) == FALSE) return FALSE; if(no_xattrs) sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; return TRUE; }
int read_filesystem_tables_4() { long long table_start; /* Read xattrs */ if(sBlk.s.xattr_id_table_start != SQUASHFS_INVALID_BLK) { /* sanity check super block contents */ if(sBlk.s.xattr_id_table_start >= sBlk.s.bytes_used) { ERROR("read_filesystem_tables: xattr id table start too large in super block\n"); goto corrupted; } if(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0) goto corrupted; } else table_start = sBlk.s.bytes_used; /* Read id lookup table */ /* Sanity check super block contents */ if(sBlk.s.id_table_start >= table_start) { ERROR("read_filesystem_tables: id table start too large in super block\n"); goto corrupted; } /* there should always be at least one id */ if(sBlk.s.no_ids == 0) { ERROR("read_filesystem_tables: Bad id count in super block\n"); goto corrupted; } /* * the number of ids can never be more than double the number of inodes * (the maximum is a unique uid and gid for each inode). */ if(sBlk.s.no_ids > (sBlk.s.inodes * 2L)) { ERROR("read_filesystem_tables: Bad id count in super block\n"); goto corrupted; } if(read_id_table(&table_start) == FALSE) goto corrupted; /* Read exports table */ if(sBlk.s.lookup_table_start != SQUASHFS_INVALID_BLK) { /* sanity check super block contents */ if(sBlk.s.lookup_table_start >= table_start) { ERROR("read_filesystem_tables: lookup table start too large in super block\n"); goto corrupted; } if(parse_exports_table(&table_start) == FALSE) goto corrupted; } /* Read fragment table */ if(sBlk.s.fragments != 0) { /* Sanity check super block contents */ if(sBlk.s.fragment_table_start >= table_start) { ERROR("read_filesystem_tables: fragment table start too large in super block\n"); goto corrupted; } /* The number of fragments should not exceed the number of inodes */ if(sBlk.s.fragments > sBlk.s.inodes) { ERROR("read_filesystem_tables: Bad fragment count in super block\n"); goto corrupted; } if(read_fragment_table(&table_start) == FALSE) goto corrupted; } else { /* * Sanity check super block contents - with 0 fragments, * the fragment table should be empty */ if(sBlk.s.fragment_table_start != table_start) { ERROR("read_filesystem_tables: fragment table start invalid in super block\n"); goto corrupted; } } /* Read directory table */ /* Sanity check super block contents */ if(sBlk.s.directory_table_start >= table_start) { ERROR("read_filesystem_tables: directory table start too large in super block\n"); goto corrupted; } if(read_directory_table(sBlk.s.directory_table_start, table_start) == FALSE) goto corrupted; /* Read inode table */ /* Sanity check super block contents */ if(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) { ERROR("read_filesystem_tables: inode table start too large in super block\n"); goto corrupted; } if(read_inode_table(sBlk.s.inode_table_start, sBlk.s.directory_table_start) == FALSE) goto corrupted; if(no_xattrs) sBlk.s.xattr_id_table_start = SQUASHFS_INVALID_BLK; return TRUE; corrupted: ERROR("File system corruption detected\n"); return FALSE; }
{'added': [(32, 'long long *alloc_index_table(int indexes)'), (34, '\tstatic long long *alloc_table = NULL;'), (35, '\tstatic int alloc_size = 0;'), (36, '\tint length = indexes * sizeof(long long);'), (37, ''), (38, '\tif(alloc_size < length) {'), (39, '\t\tlong long *table = realloc(alloc_table, length);'), (40, ''), (41, '\t\tif(table == NULL)'), (42, '\t\t\tEXIT_UNSQUASH("alloc_index_table: failed to allocate "'), (43, '\t\t\t\t"index table\\n");'), (44, ''), (45, '\t\talloc_table = table;'), (46, '\t\talloc_size = length;'), (47, '\t}'), (48, ''), (49, '\treturn alloc_table;'), (50, '}'), (51, ''), (52, ''), (53, 'static int read_fragment_table(long long *table_start)'), (54, '{'), (55, '\t/*'), (56, '\t * Note on overflow limits:'), (57, '\t * Size of SBlk.s.fragments is 2^32 (unsigned int)'), (58, '\t * Max size of bytes is 2^32*16 or 2^36'), (59, '\t * Max indexes is (2^32*16)/8K or 2^23'), (60, '\t * Max length is ((2^32*16)/8K)*8 or 2^26 or 64M'), (61, '\t */'), (63, '\tlong long bytes = SQUASHFS_FRAGMENT_BYTES((long long) sBlk.s.fragments);'), (64, '\tint indexes = SQUASHFS_FRAGMENT_INDEXES((long long) sBlk.s.fragments);'), (65, '\tint length = SQUASHFS_FRAGMENT_INDEX_BYTES((long long) sBlk.s.fragments);'), (66, '\tlong long *fragment_table_index;'), (67, ''), (68, '\t/*'), (69, '\t * The size of the index table (length bytes) should match the'), (70, '\t * table start and end points'), (71, '\t */'), (72, '\tif(length != (*table_start - sBlk.s.fragment_table_start)) {'), (73, '\t\tERROR("read_fragment_table: Bad fragment count in super block\\n");'), (74, '\t\treturn FALSE;'), (75, '\t}'), (81, '\tfragment_table_index = alloc_index_table(indexes);'), (87, '\tres = read_fs_bytes(fd, sBlk.s.fragment_table_start, length,'), (88, '\t\t\t\t\t\t\tfragment_table_index);'), (114, '\t*table_start = fragment_table_index[0];'), (392, 'static int read_id_table(long long *table_start)'), (394, '\t/*'), (395, '\t * Note on overflow limits:'), (396, '\t * Size of SBlk.s.no_ids is 2^16 (unsigned short)'), (397, '\t * Max size of bytes is 2^16*4 or 256K'), (398, '\t * Max indexes is (2^16*4)/8K or 32'), (399, '\t * Max length is ((2^16*4)/8K)*8 or 256'), (400, '\t */'), (404, '\tint length = SQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids);'), (405, '\tlong long *id_index_table;'), (406, ''), (407, '\t/*'), (408, '\t * The size of the index table (length bytes) should match the'), (409, '\t * table start and end points'), (410, '\t */'), (411, '\tif(length != (*table_start - sBlk.s.id_table_start)) {'), (412, '\t\tERROR("read_id_table: Bad id count in super block\\n");'), (413, '\t\treturn FALSE;'), (414, '\t}'), (416, '\tTRACE("read_id_table: no_ids %d\\n", sBlk.s.no_ids);'), (418, '\tid_index_table = alloc_index_table(indexes);'), (421, '\t\tERROR("read_id_table: failed to allocate id table\\n");'), (425, '\tres = read_fs_bytes(fd, sBlk.s.id_table_start, length, id_index_table);'), (427, '\t\tERROR("read_id_table: failed to read id index table\\n");'), (446, '\t\t\tERROR("read_id_table: failed to read id table block"'), (460, '\t/*'), (461, '\t * Note on overflow limits:'), (462, '\t * Size of SBlk.s.inodes is 2^32 (unsigned int)'), (463, '\t * Max indexes is (2^32*8)/8K or 2^22'), (464, '\t * Max length is ((2^32*8)/8K)*8 or 2^25'), (465, '\t */'), (467, '\tint indexes = SQUASHFS_LOOKUP_BLOCKS((long long) sBlk.s.inodes);'), (468, '\tint length = SQUASHFS_LOOKUP_BLOCK_BYTES((long long) sBlk.s.inodes);'), (469, '\tlong long *export_index_table;'), (470, ''), (471, '\t/*'), (472, '\t * The size of the index table (length bytes) should match the'), (473, '\t * table start and end points'), (474, '\t */'), (475, '\tif(length != (*table_start - sBlk.s.lookup_table_start)) {'), (476, '\t\tERROR("parse_exports_table: Bad inode count in super block\\n");'), (477, '\t\treturn FALSE;'), (478, '\t}'), (480, '\texport_index_table = alloc_index_table(indexes);'), (481, ''), (482, '\tres = read_fs_bytes(fd, sBlk.s.lookup_table_start, length,'), (483, '\t\t\t\t\t\t\texport_index_table);'), (503, '\tlong long table_start;'), (505, '\t/* Read xattrs */'), (506, '\tif(sBlk.s.xattr_id_table_start != SQUASHFS_INVALID_BLK) {'), (507, '\t\t/* sanity check super block contents */'), (508, '\t\tif(sBlk.s.xattr_id_table_start >= sBlk.s.bytes_used) {'), (509, '\t\t\tERROR("read_filesystem_tables: xattr id table start too large in super block\\n");'), (510, '\t\t\tgoto corrupted;'), (511, '\t\t}'), (513, '\t\tif(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0)'), (514, '\t\t\tgoto corrupted;'), (515, '\t} else'), (516, '\t\ttable_start = sBlk.s.bytes_used;'), (518, '\t/* Read id lookup table */'), (520, '\t/* Sanity check super block contents */'), (521, '\tif(sBlk.s.id_table_start >= table_start) {'), (522, '\t\tERROR("read_filesystem_tables: id table start too large in super block\\n");'), (523, '\t\tgoto corrupted;'), (524, '\t}'), (526, '\t/* there should always be at least one id */'), (527, '\tif(sBlk.s.no_ids == 0) {'), (528, '\t\tERROR("read_filesystem_tables: Bad id count in super block\\n");'), (529, '\t\tgoto corrupted;'), (530, '\t}'), (531, ''), (532, '\t/*'), (533, '\t * the number of ids can never be more than double the number of inodes'), (534, '\t * (the maximum is a unique uid and gid for each inode).'), (535, '\t */'), (536, '\tif(sBlk.s.no_ids > (sBlk.s.inodes * 2L)) {'), (537, '\t\tERROR("read_filesystem_tables: Bad id count in super block\\n");'), (538, '\t\tgoto corrupted;'), (539, '\t}'), (540, ''), (541, '\tif(read_id_table(&table_start) == FALSE)'), (542, '\t\tgoto corrupted;'), (543, ''), (544, '\t/* Read exports table */'), (545, '\tif(sBlk.s.lookup_table_start != SQUASHFS_INVALID_BLK) {'), (546, ''), (547, '\t\t/* sanity check super block contents */'), (548, '\t\tif(sBlk.s.lookup_table_start >= table_start) {'), (549, '\t\t\tERROR("read_filesystem_tables: lookup table start too large in super block\\n");'), (550, '\t\t\tgoto corrupted;'), (551, '\t\t}'), (552, ''), (553, '\t\tif(parse_exports_table(&table_start) == FALSE)'), (554, '\t\t\tgoto corrupted;'), (555, '\t}'), (556, ''), (557, '\t/* Read fragment table */'), (558, '\tif(sBlk.s.fragments != 0) {'), (559, ''), (560, '\t\t/* Sanity check super block contents */'), (561, '\t\tif(sBlk.s.fragment_table_start >= table_start) {'), (562, '\t\t\tERROR("read_filesystem_tables: fragment table start too large in super block\\n");'), (563, '\t\t\tgoto corrupted;'), (564, '\t\t}'), (565, ''), (566, '\t\t/* The number of fragments should not exceed the number of inodes */'), (567, '\t\tif(sBlk.s.fragments > sBlk.s.inodes) {'), (568, '\t\t\tERROR("read_filesystem_tables: Bad fragment count in super block\\n");'), (569, '\t\t\tgoto corrupted;'), (570, '\t\t}'), (571, ''), (572, '\t\tif(read_fragment_table(&table_start) == FALSE)'), (573, '\t\t\tgoto corrupted;'), (574, '\t} else {'), (575, '\t\t/*'), (576, '\t\t * Sanity check super block contents - with 0 fragments,'), (577, '\t\t * the fragment table should be empty'), (578, '\t\t */'), (579, '\t\tif(sBlk.s.fragment_table_start != table_start) {'), (580, '\t\t\tERROR("read_filesystem_tables: fragment table start invalid in super block\\n");'), (581, '\t\t\tgoto corrupted;'), (582, '\t\t}'), (583, '\t}'), (584, ''), (585, '\t/* Read directory table */'), (586, ''), (587, '\t/* Sanity check super block contents */'), (588, '\tif(sBlk.s.directory_table_start >= table_start) {'), (589, '\t\tERROR("read_filesystem_tables: directory table start too large in super block\\n");'), (590, '\t\tgoto corrupted;'), (591, '\t}'), (594, '\t\t\t\ttable_start) == FALSE)'), (595, '\t\tgoto corrupted;'), (596, ''), (597, '\t/* Read inode table */'), (598, ''), (599, '\t/* Sanity check super block contents */'), (600, '\tif(sBlk.s.inode_table_start >= sBlk.s.directory_table_start) {'), (601, '\t\tERROR("read_filesystem_tables: inode table start too large in super block\\n");'), (602, '\t\tgoto corrupted;'), (603, '\t}'), (604, ''), (605, '\tif(read_inode_table(sBlk.s.inode_table_start,'), (606, '\t\t\t\tsBlk.s.directory_table_start) == FALSE)'), (607, '\t\tgoto corrupted;'), (613, ''), (614, 'corrupted:'), (615, '\tERROR("File system corruption detected\\n");'), (616, '\treturn FALSE;')], 'deleted': [(32, 'static int read_fragment_table(long long *directory_table_end)'), (35, '\tint bytes = SQUASHFS_FRAGMENT_BYTES(sBlk.s.fragments);'), (36, '\tint indexes = SQUASHFS_FRAGMENT_INDEXES(sBlk.s.fragments);'), (37, '\tlong long fragment_table_index[indexes];'), (43, '\tif(sBlk.s.fragments == 0) {'), (44, '\t\t*directory_table_end = sBlk.s.fragment_table_start;'), (45, '\t\treturn TRUE;'), (46, '\t}'), (47, ''), (53, '\tres = read_fs_bytes(fd, sBlk.s.fragment_table_start,'), (54, '\t\tSQUASHFS_FRAGMENT_INDEX_BYTES(sBlk.s.fragments),'), (55, '\t\tfragment_table_index);'), (81, '\t*directory_table_end = fragment_table_index[0];'), (359, 'static int read_uids_guids(long long *table_start)'), (364, '\tlong long id_index_table[indexes];'), (366, '\tTRACE("read_uids_guids: no_ids %d\\n", sBlk.s.no_ids);'), (370, '\t\tERROR("read_uids_guids: failed to allocate id table\\n");'), (374, '\tres = read_fs_bytes(fd, sBlk.s.id_table_start,'), (375, '\t\tSQUASHFS_ID_BLOCK_BYTES(sBlk.s.no_ids), id_index_table);'), (377, '\t\tERROR("read_uids_guids: failed to read id index table\\n");'), (396, '\t\t\tERROR("read_uids_guids: failed to read id table block"'), (411, '\tint indexes = SQUASHFS_LOOKUP_BLOCKS(sBlk.s.inodes);'), (412, '\tlong long export_index_table[indexes];'), (414, '\tres = read_fs_bytes(fd, sBlk.s.lookup_table_start,'), (415, '\t\tSQUASHFS_LOOKUP_BLOCK_BYTES(sBlk.s.inodes), export_index_table);'), (435, '\tlong long directory_table_end, table_start;'), (437, '\tif(read_xattrs_from_disk(fd, &sBlk.s, no_xattrs, &table_start) == 0)'), (438, '\t\treturn FALSE;'), (440, '\tif(read_uids_guids(&table_start) == FALSE)'), (441, '\t\treturn FALSE;'), (443, '\tif(parse_exports_table(&table_start) == FALSE)'), (444, '\t\treturn FALSE;'), (446, '\tif(read_fragment_table(&directory_table_end) == FALSE)'), (447, '\t\treturn FALSE;'), (449, '\tif(read_inode_table(sBlk.s.inode_table_start,'), (450, '\t\t\t\tsBlk.s.directory_table_start) == FALSE)'), (451, '\t\treturn FALSE;'), (454, '\t\t\t\tdirectory_table_end) == FALSE)'), (455, '\t\treturn FALSE;')]}
195
39
416
2,722
21
127
8
https://github.com/plougher/squashfs-tools
CVE-2015-4645
CWE-190
425
archive_read_support_format_iso9660.c
C
choose_volume
/*- * Copyright (c) 2003-2007 Tim Kientzle * Copyright (c) 2009 Andreas Henriksson <andreas@fatal.se> * Copyright (c) 2009-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_iso9660.c 201246 2009-12-30 05:30:35Z kientzle $"); #ifdef HAVE_ERRNO_H #include <errno.h> #endif /* #include <stdint.h> */ /* See archive_platform.h */ #include <stdio.h> #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include <time.h> #ifdef HAVE_ZLIB_H #include <zlib.h> #endif #include "archive.h" #include "archive_endian.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_private.h" #include "archive_read_private.h" #include "archive_string.h" /* * An overview of ISO 9660 format: * * Each disk is laid out as follows: * * 32k reserved for private use * * Volume descriptor table. Each volume descriptor * is 2k and specifies basic format information. * The "Primary Volume Descriptor" (PVD) is defined by the * standard and should always be present; other volume * descriptors include various vendor-specific extensions. * * Files and directories. Each file/dir is specified by * an "extent" (starting sector and length in bytes). * Dirs are just files with directory records packed one * after another. The PVD contains a single dir entry * specifying the location of the root directory. Everything * else follows from there. * * This module works by first reading the volume descriptors, then * building a list of directory entries, sorted by starting * sector. At each step, I look for the earliest dir entry that * hasn't yet been read, seek forward to that location and read * that entry. If it's a dir, I slurp in the new dir entries and * add them to the heap; if it's a regular file, I return the * corresponding archive_entry and wait for the client to request * the file body. This strategy allows us to read most compliant * CDs with a single pass through the data, as required by libarchive. */ #define LOGICAL_BLOCK_SIZE 2048 #define SYSTEM_AREA_BLOCK 16 /* Structure of on-disk primary volume descriptor. */ #define PVD_type_offset 0 #define PVD_type_size 1 #define PVD_id_offset (PVD_type_offset + PVD_type_size) #define PVD_id_size 5 #define PVD_version_offset (PVD_id_offset + PVD_id_size) #define PVD_version_size 1 #define PVD_reserved1_offset (PVD_version_offset + PVD_version_size) #define PVD_reserved1_size 1 #define PVD_system_id_offset (PVD_reserved1_offset + PVD_reserved1_size) #define PVD_system_id_size 32 #define PVD_volume_id_offset (PVD_system_id_offset + PVD_system_id_size) #define PVD_volume_id_size 32 #define PVD_reserved2_offset (PVD_volume_id_offset + PVD_volume_id_size) #define PVD_reserved2_size 8 #define PVD_volume_space_size_offset (PVD_reserved2_offset + PVD_reserved2_size) #define PVD_volume_space_size_size 8 #define PVD_reserved3_offset (PVD_volume_space_size_offset + PVD_volume_space_size_size) #define PVD_reserved3_size 32 #define PVD_volume_set_size_offset (PVD_reserved3_offset + PVD_reserved3_size) #define PVD_volume_set_size_size 4 #define PVD_volume_sequence_number_offset (PVD_volume_set_size_offset + PVD_volume_set_size_size) #define PVD_volume_sequence_number_size 4 #define PVD_logical_block_size_offset (PVD_volume_sequence_number_offset + PVD_volume_sequence_number_size) #define PVD_logical_block_size_size 4 #define PVD_path_table_size_offset (PVD_logical_block_size_offset + PVD_logical_block_size_size) #define PVD_path_table_size_size 8 #define PVD_type_1_path_table_offset (PVD_path_table_size_offset + PVD_path_table_size_size) #define PVD_type_1_path_table_size 4 #define PVD_opt_type_1_path_table_offset (PVD_type_1_path_table_offset + PVD_type_1_path_table_size) #define PVD_opt_type_1_path_table_size 4 #define PVD_type_m_path_table_offset (PVD_opt_type_1_path_table_offset + PVD_opt_type_1_path_table_size) #define PVD_type_m_path_table_size 4 #define PVD_opt_type_m_path_table_offset (PVD_type_m_path_table_offset + PVD_type_m_path_table_size) #define PVD_opt_type_m_path_table_size 4 #define PVD_root_directory_record_offset (PVD_opt_type_m_path_table_offset + PVD_opt_type_m_path_table_size) #define PVD_root_directory_record_size 34 #define PVD_volume_set_id_offset (PVD_root_directory_record_offset + PVD_root_directory_record_size) #define PVD_volume_set_id_size 128 #define PVD_publisher_id_offset (PVD_volume_set_id_offset + PVD_volume_set_id_size) #define PVD_publisher_id_size 128 #define PVD_preparer_id_offset (PVD_publisher_id_offset + PVD_publisher_id_size) #define PVD_preparer_id_size 128 #define PVD_application_id_offset (PVD_preparer_id_offset + PVD_preparer_id_size) #define PVD_application_id_size 128 #define PVD_copyright_file_id_offset (PVD_application_id_offset + PVD_application_id_size) #define PVD_copyright_file_id_size 37 #define PVD_abstract_file_id_offset (PVD_copyright_file_id_offset + PVD_copyright_file_id_size) #define PVD_abstract_file_id_size 37 #define PVD_bibliographic_file_id_offset (PVD_abstract_file_id_offset + PVD_abstract_file_id_size) #define PVD_bibliographic_file_id_size 37 #define PVD_creation_date_offset (PVD_bibliographic_file_id_offset + PVD_bibliographic_file_id_size) #define PVD_creation_date_size 17 #define PVD_modification_date_offset (PVD_creation_date_offset + PVD_creation_date_size) #define PVD_modification_date_size 17 #define PVD_expiration_date_offset (PVD_modification_date_offset + PVD_modification_date_size) #define PVD_expiration_date_size 17 #define PVD_effective_date_offset (PVD_expiration_date_offset + PVD_expiration_date_size) #define PVD_effective_date_size 17 #define PVD_file_structure_version_offset (PVD_effective_date_offset + PVD_effective_date_size) #define PVD_file_structure_version_size 1 #define PVD_reserved4_offset (PVD_file_structure_version_offset + PVD_file_structure_version_size) #define PVD_reserved4_size 1 #define PVD_application_data_offset (PVD_reserved4_offset + PVD_reserved4_size) #define PVD_application_data_size 512 #define PVD_reserved5_offset (PVD_application_data_offset + PVD_application_data_size) #define PVD_reserved5_size (2048 - PVD_reserved5_offset) /* TODO: It would make future maintenance easier to just hardcode the * above values. In particular, ECMA119 states the offsets as part of * the standard. That would eliminate the need for the following check.*/ #if PVD_reserved5_offset != 1395 #error PVD offset and size definitions are wrong. #endif /* Structure of optional on-disk supplementary volume descriptor. */ #define SVD_type_offset 0 #define SVD_type_size 1 #define SVD_id_offset (SVD_type_offset + SVD_type_size) #define SVD_id_size 5 #define SVD_version_offset (SVD_id_offset + SVD_id_size) #define SVD_version_size 1 /* ... */ #define SVD_reserved1_offset 72 #define SVD_reserved1_size 8 #define SVD_volume_space_size_offset 80 #define SVD_volume_space_size_size 8 #define SVD_escape_sequences_offset (SVD_volume_space_size_offset + SVD_volume_space_size_size) #define SVD_escape_sequences_size 32 /* ... */ #define SVD_logical_block_size_offset 128 #define SVD_logical_block_size_size 4 #define SVD_type_L_path_table_offset 140 #define SVD_type_M_path_table_offset 148 /* ... */ #define SVD_root_directory_record_offset 156 #define SVD_root_directory_record_size 34 #define SVD_file_structure_version_offset 881 #define SVD_reserved2_offset 882 #define SVD_reserved2_size 1 #define SVD_reserved3_offset 1395 #define SVD_reserved3_size 653 /* ... */ /* FIXME: validate correctness of last SVD entry offset. */ /* Structure of an on-disk directory record. */ /* Note: ISO9660 stores each multi-byte integer twice, once in * each byte order. The sizes here are the size of just one * of the two integers. (This is why the offset of a field isn't * the same as the offset+size of the previous field.) */ #define DR_length_offset 0 #define DR_length_size 1 #define DR_ext_attr_length_offset 1 #define DR_ext_attr_length_size 1 #define DR_extent_offset 2 #define DR_extent_size 4 #define DR_size_offset 10 #define DR_size_size 4 #define DR_date_offset 18 #define DR_date_size 7 #define DR_flags_offset 25 #define DR_flags_size 1 #define DR_file_unit_size_offset 26 #define DR_file_unit_size_size 1 #define DR_interleave_offset 27 #define DR_interleave_size 1 #define DR_volume_sequence_number_offset 28 #define DR_volume_sequence_number_size 2 #define DR_name_len_offset 32 #define DR_name_len_size 1 #define DR_name_offset 33 #ifdef HAVE_ZLIB_H static const unsigned char zisofs_magic[8] = { 0x37, 0xE4, 0x53, 0x96, 0xC9, 0xDB, 0xD6, 0x07 }; struct zisofs { /* Set 1 if this file compressed by paged zlib */ int pz; int pz_log2_bs; /* Log2 of block size */ uint64_t pz_uncompressed_size; int initialized; unsigned char *uncompressed_buffer; size_t uncompressed_buffer_size; uint32_t pz_offset; unsigned char header[16]; size_t header_avail; int header_passed; unsigned char *block_pointers; size_t block_pointers_alloc; size_t block_pointers_size; size_t block_pointers_avail; size_t block_off; uint32_t block_avail; z_stream stream; int stream_valid; }; #else struct zisofs { /* Set 1 if this file compressed by paged zlib */ int pz; }; #endif struct content { uint64_t offset;/* Offset on disk. */ uint64_t size; /* File size in bytes. */ struct content *next; }; /* In-memory storage for a directory record. */ struct file_info { struct file_info *use_next; struct file_info *parent; struct file_info *next; struct file_info *re_next; int subdirs; uint64_t key; /* Heap Key. */ uint64_t offset; /* Offset on disk. */ uint64_t size; /* File size in bytes. */ uint32_t ce_offset; /* Offset of CE. */ uint32_t ce_size; /* Size of CE. */ char rr_moved; /* Flag to rr_moved. */ char rr_moved_has_re_only; char re; /* Having RRIP "RE" extension. */ char re_descendant; uint64_t cl_offset; /* Having RRIP "CL" extension. */ int birthtime_is_set; time_t birthtime; /* File created time. */ time_t mtime; /* File last modified time. */ time_t atime; /* File last accessed time. */ time_t ctime; /* File attribute change time. */ uint64_t rdev; /* Device number. */ mode_t mode; uid_t uid; gid_t gid; int64_t number; int nlinks; struct archive_string name; /* Pathname */ unsigned char *utf16be_name; size_t utf16be_bytes; char name_continues; /* Non-zero if name continues */ struct archive_string symlink; char symlink_continues; /* Non-zero if link continues */ /* Set 1 if this file compressed by paged zlib(zisofs) */ int pz; int pz_log2_bs; /* Log2 of block size */ uint64_t pz_uncompressed_size; /* Set 1 if this file is multi extent. */ int multi_extent; struct { struct content *first; struct content **last; } contents; struct { struct file_info *first; struct file_info **last; } rede_files; }; struct heap_queue { struct file_info **files; int allocated; int used; }; struct iso9660 { int magic; #define ISO9660_MAGIC 0x96609660 int opt_support_joliet; int opt_support_rockridge; struct archive_string pathname; char seenRockridge; /* Set true if RR extensions are used. */ char seenSUSP; /* Set true if SUSP is beging used. */ char seenJoliet; unsigned char suspOffset; struct file_info *rr_moved; struct read_ce_queue { struct read_ce_req { uint64_t offset;/* Offset of CE on disk. */ struct file_info *file; } *reqs; int cnt; int allocated; } read_ce_req; int64_t previous_number; struct archive_string previous_pathname; struct file_info *use_files; struct heap_queue pending_files; struct { struct file_info *first; struct file_info **last; } cache_files; struct { struct file_info *first; struct file_info **last; } re_files; uint64_t current_position; ssize_t logical_block_size; uint64_t volume_size; /* Total size of volume in bytes. */ int32_t volume_block;/* Total size of volume in logical blocks. */ struct vd { int location; /* Location of Extent. */ uint32_t size; } primary, joliet; int64_t entry_sparse_offset; int64_t entry_bytes_remaining; size_t entry_bytes_unconsumed; struct zisofs entry_zisofs; struct content *entry_content; struct archive_string_conv *sconv_utf16be; /* * Buffers for a full pathname in UTF-16BE in Joliet extensions. */ #define UTF16_NAME_MAX 1024 unsigned char *utf16be_path; size_t utf16be_path_len; unsigned char *utf16be_previous_path; size_t utf16be_previous_path_len; /* Null buufer used in bidder to improve its performance. */ unsigned char null[2048]; }; static int archive_read_format_iso9660_bid(struct archive_read *, int); static int archive_read_format_iso9660_options(struct archive_read *, const char *, const char *); static int archive_read_format_iso9660_cleanup(struct archive_read *); static int archive_read_format_iso9660_read_data(struct archive_read *, const void **, size_t *, int64_t *); static int archive_read_format_iso9660_read_data_skip(struct archive_read *); static int archive_read_format_iso9660_read_header(struct archive_read *, struct archive_entry *); static const char *build_pathname(struct archive_string *, struct file_info *, int); static int build_pathname_utf16be(unsigned char *, size_t, size_t *, struct file_info *); #if DEBUG static void dump_isodirrec(FILE *, const unsigned char *isodirrec); #endif static time_t time_from_tm(struct tm *); static time_t isodate17(const unsigned char *); static time_t isodate7(const unsigned char *); static int isBootRecord(struct iso9660 *, const unsigned char *); static int isVolumePartition(struct iso9660 *, const unsigned char *); static int isVDSetTerminator(struct iso9660 *, const unsigned char *); static int isJolietSVD(struct iso9660 *, const unsigned char *); static int isSVD(struct iso9660 *, const unsigned char *); static int isEVD(struct iso9660 *, const unsigned char *); static int isPVD(struct iso9660 *, const unsigned char *); static int next_cache_entry(struct archive_read *, struct iso9660 *, struct file_info **); static int next_entry_seek(struct archive_read *, struct iso9660 *, struct file_info **); static struct file_info * parse_file_info(struct archive_read *a, struct file_info *parent, const unsigned char *isodirrec); static int parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *start, const unsigned char *end); static int register_CE(struct archive_read *a, int32_t location, struct file_info *file); static int read_CE(struct archive_read *a, struct iso9660 *iso9660); static void parse_rockridge_NM1(struct file_info *, const unsigned char *, int); static void parse_rockridge_SL1(struct file_info *, const unsigned char *, int); static void parse_rockridge_TF1(struct file_info *, const unsigned char *, int); static void parse_rockridge_ZF1(struct file_info *, const unsigned char *, int); static void register_file(struct iso9660 *, struct file_info *); static void release_files(struct iso9660 *); static unsigned toi(const void *p, int n); static inline void re_add_entry(struct iso9660 *, struct file_info *); static inline struct file_info * re_get_entry(struct iso9660 *); static inline int rede_add_entry(struct file_info *); static inline struct file_info * rede_get_entry(struct file_info *); static inline void cache_add_entry(struct iso9660 *iso9660, struct file_info *file); static inline struct file_info *cache_get_entry(struct iso9660 *iso9660); static int heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key); static struct file_info *heap_get_entry(struct heap_queue *heap); #define add_entry(arch, iso9660, file) \ heap_add_entry(arch, &((iso9660)->pending_files), file, file->offset) #define next_entry(iso9660) \ heap_get_entry(&((iso9660)->pending_files)) int archive_read_support_format_iso9660(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct iso9660 *iso9660; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_iso9660"); iso9660 = (struct iso9660 *)calloc(1, sizeof(*iso9660)); if (iso9660 == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate iso9660 data"); return (ARCHIVE_FATAL); } iso9660->magic = ISO9660_MAGIC; iso9660->cache_files.first = NULL; iso9660->cache_files.last = &(iso9660->cache_files.first); iso9660->re_files.first = NULL; iso9660->re_files.last = &(iso9660->re_files.first); /* Enable to support Joliet extensions by default. */ iso9660->opt_support_joliet = 1; /* Enable to support Rock Ridge extensions by default. */ iso9660->opt_support_rockridge = 1; r = __archive_read_register_format(a, iso9660, "iso9660", archive_read_format_iso9660_bid, archive_read_format_iso9660_options, archive_read_format_iso9660_read_header, archive_read_format_iso9660_read_data, archive_read_format_iso9660_read_data_skip, NULL, archive_read_format_iso9660_cleanup, NULL, NULL); if (r != ARCHIVE_OK) { free(iso9660); return (r); } return (ARCHIVE_OK); } static int archive_read_format_iso9660_bid(struct archive_read *a, int best_bid) { struct iso9660 *iso9660; ssize_t bytes_read; const unsigned char *p; int seenTerminator; /* If there's already a better bid than we can ever make, don't bother testing. */ if (best_bid > 48) return (-1); iso9660 = (struct iso9660 *)(a->format->data); /* * Skip the first 32k (reserved area) and get the first * 8 sectors of the volume descriptor table. Of course, * if the I/O layer gives us more, we'll take it. */ #define RESERVED_AREA (SYSTEM_AREA_BLOCK * LOGICAL_BLOCK_SIZE) p = __archive_read_ahead(a, RESERVED_AREA + 8 * LOGICAL_BLOCK_SIZE, &bytes_read); if (p == NULL) return (-1); /* Skip the reserved area. */ bytes_read -= RESERVED_AREA; p += RESERVED_AREA; /* Check each volume descriptor. */ seenTerminator = 0; for (; bytes_read > LOGICAL_BLOCK_SIZE; bytes_read -= LOGICAL_BLOCK_SIZE, p += LOGICAL_BLOCK_SIZE) { /* Do not handle undefined Volume Descriptor Type. */ if (p[0] >= 4 && p[0] <= 254) return (0); /* Standard Identifier must be "CD001" */ if (memcmp(p + 1, "CD001", 5) != 0) return (0); if (isPVD(iso9660, p)) continue; if (!iso9660->joliet.location) { if (isJolietSVD(iso9660, p)) continue; } if (isBootRecord(iso9660, p)) continue; if (isEVD(iso9660, p)) continue; if (isSVD(iso9660, p)) continue; if (isVolumePartition(iso9660, p)) continue; if (isVDSetTerminator(iso9660, p)) { seenTerminator = 1; break; } return (0); } /* * ISO 9660 format must have Primary Volume Descriptor and * Volume Descriptor Set Terminator. */ if (seenTerminator && iso9660->primary.location > 16) return (48); /* We didn't find a valid PVD; return a bid of zero. */ return (0); } static int archive_read_format_iso9660_options(struct archive_read *a, const char *key, const char *val) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); if (strcmp(key, "joliet") == 0) { if (val == NULL || strcmp(val, "off") == 0 || strcmp(val, "ignore") == 0 || strcmp(val, "disable") == 0 || strcmp(val, "0") == 0) iso9660->opt_support_joliet = 0; else iso9660->opt_support_joliet = 1; return (ARCHIVE_OK); } if (strcmp(key, "rockridge") == 0 || strcmp(key, "Rockridge") == 0) { iso9660->opt_support_rockridge = val != NULL; return (ARCHIVE_OK); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } static int isNull(struct iso9660 *iso9660, const unsigned char *h, unsigned offset, unsigned bytes) { while (bytes >= sizeof(iso9660->null)) { if (!memcmp(iso9660->null, h + offset, sizeof(iso9660->null))) return (0); offset += sizeof(iso9660->null); bytes -= sizeof(iso9660->null); } if (bytes) return memcmp(iso9660->null, h + offset, bytes) == 0; else return (1); } static int isBootRecord(struct iso9660 *iso9660, const unsigned char *h) { (void)iso9660; /* UNUSED */ /* Type of the Volume Descriptor Boot Record must be 0. */ if (h[0] != 0) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); return (1); } static int isVolumePartition(struct iso9660 *iso9660, const unsigned char *h) { int32_t location; /* Type of the Volume Partition Descriptor must be 3. */ if (h[0] != 3) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); /* Unused Field */ if (h[7] != 0) return (0); location = archive_le32dec(h + 72); if (location <= SYSTEM_AREA_BLOCK || location >= iso9660->volume_block) return (0); if ((uint32_t)location != archive_be32dec(h + 76)) return (0); return (1); } static int isVDSetTerminator(struct iso9660 *iso9660, const unsigned char *h) { (void)iso9660; /* UNUSED */ /* Type of the Volume Descriptor Set Terminator must be 255. */ if (h[0] != 255) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, 7, 2048-7)) return (0); return (1); } static int isJolietSVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; /* Check if current sector is a kind of Supplementary Volume * Descriptor. */ if (!isSVD(iso9660, h)) return (0); /* FIXME: do more validations according to joliet spec. */ /* check if this SVD contains joliet extension! */ p = h + SVD_escape_sequences_offset; /* N.B. Joliet spec says p[1] == '\\', but.... */ if (p[0] == '%' && p[1] == '/') { int level = 0; if (p[2] == '@') level = 1; else if (p[2] == 'C') level = 2; else if (p[2] == 'E') level = 3; else /* not joliet */ return (0); iso9660->seenJoliet = level; } else /* not joliet */ return (0); logical_block_size = archive_le16dec(h + SVD_logical_block_size_offset); volume_block = archive_le32dec(h + SVD_volume_space_size_offset); iso9660->logical_block_size = logical_block_size; iso9660->volume_block = volume_block; iso9660->volume_size = logical_block_size * (uint64_t)volume_block; /* Read Root Directory Record in Volume Descriptor. */ p = h + SVD_root_directory_record_offset; iso9660->joliet.location = archive_le32dec(p + DR_extent_offset); iso9660->joliet.size = archive_le32dec(p + DR_size_offset); return (48); } static int isSVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; (void)iso9660; /* UNUSED */ /* Type 2 means it's a SVD. */ if (h[SVD_type_offset] != 2) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, SVD_reserved1_offset, SVD_reserved1_size)) return (0); if (!isNull(iso9660, h, SVD_reserved2_offset, SVD_reserved2_size)) return (0); if (!isNull(iso9660, h, SVD_reserved3_offset, SVD_reserved3_size)) return (0); /* File structure version must be 1 for ISO9660/ECMA119. */ if (h[SVD_file_structure_version_offset] != 1) return (0); logical_block_size = archive_le16dec(h + SVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + SVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+SVD_type_L_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* The Type M Path Table must be at a valid location (WinISO * and probably other programs omit this, so we allow zero) * * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+SVD_type_M_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Read Root Directory Record in Volume Descriptor. */ p = h + SVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); return (48); } static int isEVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; (void)iso9660; /* UNUSED */ /* Type of the Enhanced Volume Descriptor must be 2. */ if (h[PVD_type_offset] != 2) return (0); /* EVD version must be 2. */ if (h[PVD_version_offset] != 2) return (0); /* Reserved field must be 0. */ if (h[PVD_reserved1_offset] != 0) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size)) return (0); /* Logical block size must be > 0. */ /* I've looked at Ecma 119 and can't find any stronger * restriction on this field. */ logical_block_size = archive_le16dec(h + PVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + PVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* File structure version must be 2 for ISO9660:1999. */ if (h[PVD_file_structure_version_offset] != 2) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+PVD_type_1_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* Location of Occurrence of Type M Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+PVD_type_m_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved4_offset, PVD_reserved4_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size)) return (0); /* Read Root Directory Record in Volume Descriptor. */ p = h + PVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); return (48); } static int isPVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; int i; /* Type of the Primary Volume Descriptor must be 1. */ if (h[PVD_type_offset] != 1) return (0); /* PVD version must be 1. */ if (h[PVD_version_offset] != 1) return (0); /* Reserved field must be 0. */ if (h[PVD_reserved1_offset] != 0) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size)) return (0); /* Logical block size must be > 0. */ /* I've looked at Ecma 119 and can't find any stronger * restriction on this field. */ logical_block_size = archive_le16dec(h + PVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + PVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* File structure version must be 1 for ISO9660/ECMA119. */ if (h[PVD_file_structure_version_offset] != 1) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * > SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+PVD_type_1_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* The Type M Path Table must also be at a valid location * (although ECMA 119 requires a Type M Path Table, WinISO and * probably other programs omit it, so we permit a zero here) * * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+PVD_type_m_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Reserved field must be 0. */ /* But accept NetBSD/FreeBSD "makefs" images with 0x20 here. */ for (i = 0; i < PVD_reserved4_size; ++i) if (h[PVD_reserved4_offset + i] != 0 && h[PVD_reserved4_offset + i] != 0x20) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size)) return (0); /* XXX TODO: Check other values for sanity; reject more * malformed PVDs. XXX */ /* Read Root Directory Record in Volume Descriptor. */ p = h + PVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); if (!iso9660->primary.location) { iso9660->logical_block_size = logical_block_size; iso9660->volume_block = volume_block; iso9660->volume_size = logical_block_size * (uint64_t)volume_block; iso9660->primary.location = archive_le32dec(p + DR_extent_offset); iso9660->primary.size = archive_le32dec(p + DR_size_offset); } return (48); } static int read_children(struct archive_read *a, struct file_info *parent) { struct iso9660 *iso9660; const unsigned char *b, *p; struct file_info *multi; size_t step, skip_size; iso9660 = (struct iso9660 *)(a->format->data); /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->current_position > parent->offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order directory (%s) %jd > %jd", parent->name.s, (intmax_t)iso9660->current_position, (intmax_t)parent->offset); return (ARCHIVE_WARN); } if (parent->offset + parent->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Directory is beyond end-of-media: %s", parent->name.s); return (ARCHIVE_WARN); } if (iso9660->current_position < parent->offset) { int64_t skipsize; skipsize = parent->offset - iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = parent->offset; } step = (size_t)(((parent->size + iso9660->logical_block_size -1) / iso9660->logical_block_size) * iso9660->logical_block_size); b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->current_position += step; multi = NULL; skip_size = step; while (step) { p = b; b += iso9660->logical_block_size; step -= iso9660->logical_block_size; for (; *p != 0 && p < b && p + *p <= b; p += *p) { struct file_info *child; /* N.B.: these special directory identifiers * are 8 bit "values" even on a * Joliet CD with UCS-2 (16bit) encoding. */ /* Skip '.' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\0') continue; /* Skip '..' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\001') continue; child = parse_file_info(a, parent, p); if (child == NULL) { __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } if (child->cl_offset == 0 && (child->multi_extent || multi != NULL)) { struct content *con; if (multi == NULL) { multi = child; multi->contents.first = NULL; multi->contents.last = &(multi->contents.first); } con = malloc(sizeof(struct content)); if (con == NULL) { archive_set_error( &a->archive, ENOMEM, "No memory for multi extent"); __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } con->offset = child->offset; con->size = child->size; con->next = NULL; *multi->contents.last = con; multi->contents.last = &(con->next); if (multi == child) { if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } else { multi->size += child->size; if (!child->multi_extent) multi = NULL; } } else if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } __archive_read_consume(a, skip_size); /* Read data which recorded by RRIP "CE" extension. */ if (read_CE(a, iso9660) != ARCHIVE_OK) return (ARCHIVE_FATAL); return (ARCHIVE_OK); } static int choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); } static int archive_read_format_iso9660_read_header(struct archive_read *a, struct archive_entry *entry) { struct iso9660 *iso9660; struct file_info *file; int r, rd_r = ARCHIVE_OK; iso9660 = (struct iso9660 *)(a->format->data); if (!a->archive.archive_format) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660; a->archive.archive_format_name = "ISO9660"; } if (iso9660->current_position == 0) { r = choose_volume(a, iso9660); if (r != ARCHIVE_OK) return (r); } file = NULL;/* Eliminate a warning. */ /* Get the next entry that appears after the current offset. */ r = next_entry_seek(a, iso9660, &file); if (r != ARCHIVE_OK) return (r); if (iso9660->seenJoliet) { /* * Convert UTF-16BE of a filename to local locale MBS * and store the result into a filename field. */ if (iso9660->sconv_utf16be == NULL) { iso9660->sconv_utf16be = archive_string_conversion_from_charset( &(a->archive), "UTF-16BE", 1); if (iso9660->sconv_utf16be == NULL) /* Coundn't allocate memory */ return (ARCHIVE_FATAL); } if (iso9660->utf16be_path == NULL) { iso9660->utf16be_path = malloc(UTF16_NAME_MAX); if (iso9660->utf16be_path == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory"); return (ARCHIVE_FATAL); } } if (iso9660->utf16be_previous_path == NULL) { iso9660->utf16be_previous_path = malloc(UTF16_NAME_MAX); if (iso9660->utf16be_previous_path == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory"); return (ARCHIVE_FATAL); } } iso9660->utf16be_path_len = 0; if (build_pathname_utf16be(iso9660->utf16be_path, UTF16_NAME_MAX, &(iso9660->utf16be_path_len), file) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname is too long"); return (ARCHIVE_FATAL); } r = archive_entry_copy_pathname_l(entry, (const char *)iso9660->utf16be_path, iso9660->utf16be_path_len, iso9660->sconv_utf16be); if (r != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "No memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( iso9660->sconv_utf16be)); rd_r = ARCHIVE_WARN; } } else { const char *path = build_pathname(&iso9660->pathname, file, 0); if (path == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname is too long"); return (ARCHIVE_FATAL); } else { archive_string_empty(&iso9660->pathname); archive_entry_set_pathname(entry, path); } } iso9660->entry_bytes_remaining = file->size; /* Offset for sparse-file-aware clients. */ iso9660->entry_sparse_offset = 0; if (file->offset + file->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "File is beyond end-of-media: %s", archive_entry_pathname(entry)); iso9660->entry_bytes_remaining = 0; return (ARCHIVE_WARN); } /* Set up the entry structure with information about this entry. */ archive_entry_set_mode(entry, file->mode); archive_entry_set_uid(entry, file->uid); archive_entry_set_gid(entry, file->gid); archive_entry_set_nlink(entry, file->nlinks); if (file->birthtime_is_set) archive_entry_set_birthtime(entry, file->birthtime, 0); else archive_entry_unset_birthtime(entry); archive_entry_set_mtime(entry, file->mtime, 0); archive_entry_set_ctime(entry, file->ctime, 0); archive_entry_set_atime(entry, file->atime, 0); /* N.B.: Rock Ridge supports 64-bit device numbers. */ archive_entry_set_rdev(entry, (dev_t)file->rdev); archive_entry_set_size(entry, iso9660->entry_bytes_remaining); if (file->symlink.s != NULL) archive_entry_copy_symlink(entry, file->symlink.s); /* Note: If the input isn't seekable, we can't rewind to * return the same body again, so if the next entry refers to * the same data, we have to return it as a hardlink to the * original entry. */ if (file->number != -1 && file->number == iso9660->previous_number) { if (iso9660->seenJoliet) { r = archive_entry_copy_hardlink_l(entry, (const char *)iso9660->utf16be_previous_path, iso9660->utf16be_previous_path_len, iso9660->sconv_utf16be); if (r != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "No memory for Linkname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Linkname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( iso9660->sconv_utf16be)); rd_r = ARCHIVE_WARN; } } else archive_entry_set_hardlink(entry, iso9660->previous_pathname.s); archive_entry_unset_size(entry); iso9660->entry_bytes_remaining = 0; return (rd_r); } if ((file->mode & AE_IFMT) != AE_IFDIR && file->offset < iso9660->current_position) { int64_t r64; r64 = __archive_read_seek(a, file->offset, SEEK_SET); if (r64 != (int64_t)file->offset) { /* We can't seek backwards to extract it, so issue * a warning. Note that this can only happen if * this entry was added to the heap after we passed * this offset, that is, only if the directory * mentioning this entry is later than the body of * the entry. Such layouts are very unusual; most * ISO9660 writers lay out and record all directory * information first, then store all file bodies. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order file @%jx (%s) %jd < %jd", (intmax_t)file->number, iso9660->pathname.s, (intmax_t)file->offset, (intmax_t)iso9660->current_position); iso9660->entry_bytes_remaining = 0; return (ARCHIVE_WARN); } iso9660->current_position = (uint64_t)r64; } /* Initialize zisofs variables. */ iso9660->entry_zisofs.pz = file->pz; if (file->pz) { #ifdef HAVE_ZLIB_H struct zisofs *zisofs; zisofs = &iso9660->entry_zisofs; zisofs->initialized = 0; zisofs->pz_log2_bs = file->pz_log2_bs; zisofs->pz_uncompressed_size = file->pz_uncompressed_size; zisofs->pz_offset = 0; zisofs->header_avail = 0; zisofs->header_passed = 0; zisofs->block_pointers_avail = 0; #endif archive_entry_set_size(entry, file->pz_uncompressed_size); } iso9660->previous_number = file->number; if (iso9660->seenJoliet) { memcpy(iso9660->utf16be_previous_path, iso9660->utf16be_path, iso9660->utf16be_path_len); iso9660->utf16be_previous_path_len = iso9660->utf16be_path_len; } else archive_strcpy( &iso9660->previous_pathname, iso9660->pathname.s); /* Reset entry_bytes_remaining if the file is multi extent. */ iso9660->entry_content = file->contents.first; if (iso9660->entry_content != NULL) iso9660->entry_bytes_remaining = iso9660->entry_content->size; if (archive_entry_filetype(entry) == AE_IFDIR) { /* Overwrite nlinks by proper link number which is * calculated from number of sub directories. */ archive_entry_set_nlink(entry, 2 + file->subdirs); /* Directory data has been read completely. */ iso9660->entry_bytes_remaining = 0; } if (rd_r != ARCHIVE_OK) return (rd_r); return (ARCHIVE_OK); } static int archive_read_format_iso9660_read_data_skip(struct archive_read *a) { /* Because read_next_header always does an explicit skip * to the next entry, we don't need to do anything here. */ (void)a; /* UNUSED */ return (ARCHIVE_OK); } #ifdef HAVE_ZLIB_H static int zisofs_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct iso9660 *iso9660; struct zisofs *zisofs; const unsigned char *p; size_t avail; ssize_t bytes_read; size_t uncompressed_size; int r; iso9660 = (struct iso9660 *)(a->format->data); zisofs = &iso9660->entry_zisofs; p = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read <= 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated zisofs file body"); return (ARCHIVE_FATAL); } if (bytes_read > iso9660->entry_bytes_remaining) bytes_read = (ssize_t)iso9660->entry_bytes_remaining; avail = bytes_read; uncompressed_size = 0; if (!zisofs->initialized) { size_t ceil, xsize; /* Allocate block pointers buffer. */ ceil = (size_t)((zisofs->pz_uncompressed_size + (((int64_t)1) << zisofs->pz_log2_bs) - 1) >> zisofs->pz_log2_bs); xsize = (ceil + 1) * 4; if (zisofs->block_pointers_alloc < xsize) { size_t alloc; if (zisofs->block_pointers != NULL) free(zisofs->block_pointers); alloc = ((xsize >> 10) + 1) << 10; zisofs->block_pointers = malloc(alloc); if (zisofs->block_pointers == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for zisofs decompression"); return (ARCHIVE_FATAL); } zisofs->block_pointers_alloc = alloc; } zisofs->block_pointers_size = xsize; /* Allocate uncompressed data buffer. */ xsize = (size_t)1UL << zisofs->pz_log2_bs; if (zisofs->uncompressed_buffer_size < xsize) { if (zisofs->uncompressed_buffer != NULL) free(zisofs->uncompressed_buffer); zisofs->uncompressed_buffer = malloc(xsize); if (zisofs->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for zisofs decompression"); return (ARCHIVE_FATAL); } } zisofs->uncompressed_buffer_size = xsize; /* * Read the file header, and check the magic code of zisofs. */ if (zisofs->header_avail < sizeof(zisofs->header)) { xsize = sizeof(zisofs->header) - zisofs->header_avail; if (avail < xsize) xsize = avail; memcpy(zisofs->header + zisofs->header_avail, p, xsize); zisofs->header_avail += xsize; avail -= xsize; p += xsize; } if (!zisofs->header_passed && zisofs->header_avail == sizeof(zisofs->header)) { int err = 0; if (memcmp(zisofs->header, zisofs_magic, sizeof(zisofs_magic)) != 0) err = 1; if (archive_le32dec(zisofs->header + 8) != zisofs->pz_uncompressed_size) err = 1; if (zisofs->header[12] != 4) err = 1; if (zisofs->header[13] != zisofs->pz_log2_bs) err = 1; if (err) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs file body"); return (ARCHIVE_FATAL); } zisofs->header_passed = 1; } /* * Read block pointers. */ if (zisofs->header_passed && zisofs->block_pointers_avail < zisofs->block_pointers_size) { xsize = zisofs->block_pointers_size - zisofs->block_pointers_avail; if (avail < xsize) xsize = avail; memcpy(zisofs->block_pointers + zisofs->block_pointers_avail, p, xsize); zisofs->block_pointers_avail += xsize; avail -= xsize; p += xsize; if (zisofs->block_pointers_avail == zisofs->block_pointers_size) { /* We've got all block pointers and initialize * related variables. */ zisofs->block_off = 0; zisofs->block_avail = 0; /* Complete a initialization */ zisofs->initialized = 1; } } if (!zisofs->initialized) goto next_data; /* We need more data. */ } /* * Get block offsets from block pointers. */ if (zisofs->block_avail == 0) { uint32_t bst, bed; if (zisofs->block_off + 4 >= zisofs->block_pointers_size) { /* There isn't a pair of offsets. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers"); return (ARCHIVE_FATAL); } bst = archive_le32dec( zisofs->block_pointers + zisofs->block_off); if (bst != zisofs->pz_offset + (bytes_read - avail)) { /* TODO: Should we seek offset of current file * by bst ? */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers(cannot seek)"); return (ARCHIVE_FATAL); } bed = archive_le32dec( zisofs->block_pointers + zisofs->block_off + 4); if (bed < bst) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers"); return (ARCHIVE_FATAL); } zisofs->block_avail = bed - bst; zisofs->block_off += 4; /* Initialize compression library for new block. */ if (zisofs->stream_valid) r = inflateReset(&zisofs->stream); else r = inflateInit(&zisofs->stream); if (r != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Can't initialize zisofs decompression."); return (ARCHIVE_FATAL); } zisofs->stream_valid = 1; zisofs->stream.total_in = 0; zisofs->stream.total_out = 0; } /* * Make uncompressed data. */ if (zisofs->block_avail == 0) { memset(zisofs->uncompressed_buffer, 0, zisofs->uncompressed_buffer_size); uncompressed_size = zisofs->uncompressed_buffer_size; } else { zisofs->stream.next_in = (Bytef *)(uintptr_t)(const void *)p; if (avail > zisofs->block_avail) zisofs->stream.avail_in = zisofs->block_avail; else zisofs->stream.avail_in = (uInt)avail; zisofs->stream.next_out = zisofs->uncompressed_buffer; zisofs->stream.avail_out = (uInt)zisofs->uncompressed_buffer_size; r = inflate(&zisofs->stream, 0); switch (r) { case Z_OK: /* Decompressor made some progress.*/ case Z_STREAM_END: /* Found end of stream. */ break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "zisofs decompression failed (%d)", r); return (ARCHIVE_FATAL); } uncompressed_size = zisofs->uncompressed_buffer_size - zisofs->stream.avail_out; avail -= zisofs->stream.next_in - p; zisofs->block_avail -= (uint32_t)(zisofs->stream.next_in - p); } next_data: bytes_read -= avail; *buff = zisofs->uncompressed_buffer; *size = uncompressed_size; *offset = iso9660->entry_sparse_offset; iso9660->entry_sparse_offset += uncompressed_size; iso9660->entry_bytes_remaining -= bytes_read; iso9660->current_position += bytes_read; zisofs->pz_offset += (uint32_t)bytes_read; iso9660->entry_bytes_unconsumed += bytes_read; return (ARCHIVE_OK); } #else /* HAVE_ZLIB_H */ static int zisofs_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { (void)buff;/* UNUSED */ (void)size;/* UNUSED */ (void)offset;/* UNUSED */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "zisofs is not supported on this platform."); return (ARCHIVE_FAILED); } #endif /* HAVE_ZLIB_H */ static int archive_read_format_iso9660_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { ssize_t bytes_read; struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->entry_bytes_remaining <= 0) { if (iso9660->entry_content != NULL) iso9660->entry_content = iso9660->entry_content->next; if (iso9660->entry_content == NULL) { *buff = NULL; *size = 0; *offset = iso9660->entry_sparse_offset; return (ARCHIVE_EOF); } /* Seek forward to the start of the entry. */ if (iso9660->current_position < iso9660->entry_content->offset) { int64_t step; step = iso9660->entry_content->offset - iso9660->current_position; step = __archive_read_consume(a, step); if (step < 0) return ((int)step); iso9660->current_position = iso9660->entry_content->offset; } if (iso9660->entry_content->offset < iso9660->current_position) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order file (%s) %jd < %jd", iso9660->pathname.s, (intmax_t)iso9660->entry_content->offset, (intmax_t)iso9660->current_position); *buff = NULL; *size = 0; *offset = iso9660->entry_sparse_offset; return (ARCHIVE_WARN); } iso9660->entry_bytes_remaining = iso9660->entry_content->size; } if (iso9660->entry_zisofs.pz) return (zisofs_read_data(a, buff, size, offset)); *buff = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Truncated input file"); if (*buff == NULL) return (ARCHIVE_FATAL); if (bytes_read > iso9660->entry_bytes_remaining) bytes_read = (ssize_t)iso9660->entry_bytes_remaining; *size = bytes_read; *offset = iso9660->entry_sparse_offset; iso9660->entry_sparse_offset += bytes_read; iso9660->entry_bytes_remaining -= bytes_read; iso9660->entry_bytes_unconsumed = bytes_read; iso9660->current_position += bytes_read; return (ARCHIVE_OK); } static int archive_read_format_iso9660_cleanup(struct archive_read *a) { struct iso9660 *iso9660; int r = ARCHIVE_OK; iso9660 = (struct iso9660 *)(a->format->data); release_files(iso9660); free(iso9660->read_ce_req.reqs); archive_string_free(&iso9660->pathname); archive_string_free(&iso9660->previous_pathname); if (iso9660->pending_files.files) free(iso9660->pending_files.files); #ifdef HAVE_ZLIB_H free(iso9660->entry_zisofs.uncompressed_buffer); free(iso9660->entry_zisofs.block_pointers); if (iso9660->entry_zisofs.stream_valid) { if (inflateEnd(&iso9660->entry_zisofs.stream) != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up zlib decompressor"); r = ARCHIVE_FATAL; } } #endif free(iso9660->utf16be_path); free(iso9660->utf16be_previous_path); free(iso9660); (a->format->data) = NULL; return (r); } /* * This routine parses a single ISO directory record, makes sense * of any extensions, and stores the result in memory. */ static struct file_info * parse_file_info(struct archive_read *a, struct file_info *parent, const unsigned char *isodirrec) { struct iso9660 *iso9660; struct file_info *file, *filep; size_t name_len; const unsigned char *rr_start, *rr_end; const unsigned char *p; size_t dr_len; uint64_t fsize, offset; int32_t location; int flags; iso9660 = (struct iso9660 *)(a->format->data); dr_len = (size_t)isodirrec[DR_length_offset]; name_len = (size_t)isodirrec[DR_name_len_offset]; location = archive_le32dec(isodirrec + DR_extent_offset); fsize = toi(isodirrec + DR_size_offset, DR_size_size); /* Sanity check that dr_len needs at least 34. */ if (dr_len < 34) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid length of directory record"); return (NULL); } /* Sanity check that name_len doesn't exceed dr_len. */ if (dr_len - 33 < name_len || name_len == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid length of file identifier"); return (NULL); } /* Sanity check that location doesn't exceed volume block. * Don't check lower limit of location; it's possibility * the location has negative value when file type is symbolic * link or file size is zero. As far as I know latest mkisofs * do that. */ if (location > 0 && (location + ((fsize + iso9660->logical_block_size -1) / iso9660->logical_block_size)) > (uint32_t)iso9660->volume_block) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid location of extent of file"); return (NULL); } /* Sanity check that location doesn't have a negative value * when the file is not empty. it's too large. */ if (fsize != 0 && location < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid location of extent of file"); return (NULL); } /* Sanity check that this entry does not create a cycle. */ offset = iso9660->logical_block_size * (uint64_t)location; for (filep = parent; filep != NULL; filep = filep->parent) { if (filep->offset == offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Directory structure contains loop"); return (NULL); } } /* Create a new file entry and copy data from the ISO dir record. */ file = (struct file_info *)calloc(1, sizeof(*file)); if (file == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for file entry"); return (NULL); } file->parent = parent; file->offset = offset; file->size = fsize; file->mtime = isodate7(isodirrec + DR_date_offset); file->ctime = file->atime = file->mtime; file->rede_files.first = NULL; file->rede_files.last = &(file->rede_files.first); p = isodirrec + DR_name_offset; /* Rockridge extensions (if any) follow name. Compute this * before fidgeting the name_len below. */ rr_start = p + name_len + (name_len & 1 ? 0 : 1); rr_end = isodirrec + dr_len; if (iso9660->seenJoliet) { /* Joliet names are max 64 chars (128 bytes) according to spec, * but genisoimage/mkisofs allows recording longer Joliet * names which are 103 UCS2 characters(206 bytes) by their * option '-joliet-long'. */ if (name_len > 206) name_len = 206; name_len &= ~1; /* trim trailing first version and dot from filename. * * Remember we were in UTF-16BE land! * SEPARATOR 1 (.) and SEPARATOR 2 (;) are both * 16 bits big endian characters on Joliet. * * TODO: sanitize filename? * Joliet allows any UCS-2 char except: * *, /, :, ;, ? and \. */ /* Chop off trailing ';1' from files. */ if (name_len > 4 && p[name_len-4] == 0 && p[name_len-3] == ';' && p[name_len-2] == 0 && p[name_len-1] == '1') name_len -= 4; #if 0 /* XXX: this somehow manages to strip of single-character file extensions, like '.c'. */ /* Chop off trailing '.' from filenames. */ if (name_len > 2 && p[name_len-2] == 0 && p[name_len-1] == '.') name_len -= 2; #endif if ((file->utf16be_name = malloc(name_len)) == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for file name"); return (NULL); } memcpy(file->utf16be_name, p, name_len); file->utf16be_bytes = name_len; } else { /* Chop off trailing ';1' from files. */ if (name_len > 2 && p[name_len - 2] == ';' && p[name_len - 1] == '1') name_len -= 2; /* Chop off trailing '.' from filenames. */ if (name_len > 1 && p[name_len - 1] == '.') --name_len; archive_strncpy(&file->name, (const char *)p, name_len); } flags = isodirrec[DR_flags_offset]; if (flags & 0x02) file->mode = AE_IFDIR | 0700; else file->mode = AE_IFREG | 0400; if (flags & 0x80) file->multi_extent = 1; else file->multi_extent = 0; /* * Use a location for the file number, which is treated as an inode * number to find out hardlink target. If Rockridge extensions is * being used, the file number will be overwritten by FILE SERIAL * NUMBER of RRIP "PX" extension. * Note: Old mkisofs did not record that FILE SERIAL NUMBER * in ISO images. * Note2: xorriso set 0 to the location of a symlink file. */ if (file->size == 0 && location >= 0) { /* If file->size is zero, its location points wrong place, * and so we should not use it for the file number. * When the location has negative value, it can be used * for the file number. */ file->number = -1; /* Do not appear before any directory entries. */ file->offset = -1; } else file->number = (int64_t)(uint32_t)location; /* Rockridge extensions overwrite information from above. */ if (iso9660->opt_support_rockridge) { if (parent == NULL && rr_end - rr_start >= 7) { p = rr_start; if (memcmp(p, "SP\x07\x01\xbe\xef", 6) == 0) { /* * SP extension stores the suspOffset * (Number of bytes to skip between * filename and SUSP records.) * It is mandatory by the SUSP standard * (IEEE 1281). * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * before SUSP data. * * SP extension must be in the root * directory entry, disable all SUSP * processing if not found. */ iso9660->suspOffset = p[6]; iso9660->seenSUSP = 1; rr_start += 7; } } if (iso9660->seenSUSP) { int r; file->name_continues = 0; file->symlink_continues = 0; rr_start += iso9660->suspOffset; r = parse_rockridge(a, file, rr_start, rr_end); if (r != ARCHIVE_OK) { free(file); return (NULL); } /* * A file size of symbolic link files in ISO images * made by makefs is not zero and its location is * the same as those of next regular file. That is * the same as hard like file and it causes unexpected * error. */ if (file->size > 0 && (file->mode & AE_IFMT) == AE_IFLNK) { file->size = 0; file->number = -1; file->offset = -1; } } else /* If there isn't SUSP, disable parsing * rock ridge extensions. */ iso9660->opt_support_rockridge = 0; } file->nlinks = 1;/* Reset nlink. we'll calculate it later. */ /* Tell file's parent how many children that parent has. */ if (parent != NULL && (flags & 0x02)) parent->subdirs++; if (iso9660->seenRockridge) { if (parent != NULL && parent->parent == NULL && (flags & 0x02) && iso9660->rr_moved == NULL && file->name.s && (strcmp(file->name.s, "rr_moved") == 0 || strcmp(file->name.s, ".rr_moved") == 0)) { iso9660->rr_moved = file; file->rr_moved = 1; file->rr_moved_has_re_only = 1; file->re = 0; parent->subdirs--; } else if (file->re) { /* * Sanity check: file's parent is rr_moved. */ if (parent == NULL || parent->rr_moved == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE"); return (NULL); } /* * Sanity check: file does not have "CL" extension. */ if (file->cl_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE and CL"); return (NULL); } /* * Sanity check: The file type must be a directory. */ if ((flags & 0x02) == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE"); return (NULL); } } else if (parent != NULL && parent->rr_moved) file->rr_moved_has_re_only = 0; else if (parent != NULL && (flags & 0x02) && (parent->re || parent->re_descendant)) file->re_descendant = 1; if (file->cl_offset) { struct file_info *r; if (parent == NULL || parent->parent == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } /* * Sanity check: The file type must be a regular file. */ if ((flags & 0x02) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } parent->subdirs++; /* Overwrite an offset and a number of this "CL" entry * to appear before other dirs. "+1" to those is to * make sure to appear after "RE" entry which this * "CL" entry should be connected with. */ file->offset = file->number = file->cl_offset + 1; /* * Sanity check: cl_offset does not point at its * the parents or itself. */ for (r = parent; r; r = r->parent) { if (r->offset == file->cl_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } } if (file->cl_offset == file->offset || parent->rr_moved) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } } } #if DEBUG /* DEBUGGING: Warn about attributes I don't yet fully support. */ if ((flags & ~0x02) != 0) { fprintf(stderr, "\n ** Unrecognized flag: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (toi(isodirrec + DR_volume_sequence_number_offset, 2) != 1) { fprintf(stderr, "\n ** Unrecognized sequence number: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_file_unit_size_offset) != 0) { fprintf(stderr, "\n ** Unexpected file unit size: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_interleave_offset) != 0) { fprintf(stderr, "\n ** Unexpected interleave: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_ext_attr_length_offset) != 0) { fprintf(stderr, "\n ** Unexpected extended attribute length: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } #endif register_file(iso9660, file); return (file); } static int parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *p, const unsigned char *end) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); while (p + 4 <= end /* Enough space for another entry. */ && p[0] >= 'A' && p[0] <= 'Z' /* Sanity-check 1st char of name. */ && p[1] >= 'A' && p[1] <= 'Z' /* Sanity-check 2nd char of name. */ && p[2] >= 4 /* Sanity-check length. */ && p + p[2] <= end) { /* Sanity-check length. */ const unsigned char *data = p + 4; int data_length = p[2] - 4; int version = p[3]; switch(p[0]) { case 'C': if (p[1] == 'E') { if (version == 1 && data_length == 24) { /* * CE extension comprises: * 8 byte sector containing extension * 8 byte offset w/in above sector * 8 byte length of continuation */ int32_t location = archive_le32dec(data); file->ce_offset = archive_le32dec(data+8); file->ce_size = archive_le32dec(data+16); if (register_CE(a, location, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } else if (p[1] == 'L') { if (version == 1 && data_length == 8) { file->cl_offset = (uint64_t) iso9660->logical_block_size * (uint64_t)archive_le32dec(data); iso9660->seenRockridge = 1; } } break; case 'N': if (p[1] == 'M') { if (version == 1) { parse_rockridge_NM1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'P': /* * PD extension is padding; * contents are always ignored. * * PL extension won't appear; * contents are always ignored. */ if (p[1] == 'N') { if (version == 1 && data_length == 16) { file->rdev = toi(data,4); file->rdev <<= 32; file->rdev |= toi(data + 8, 4); iso9660->seenRockridge = 1; } } else if (p[1] == 'X') { /* * PX extension comprises: * 8 bytes for mode, * 8 bytes for nlinks, * 8 bytes for uid, * 8 bytes for gid, * 8 bytes for inode. */ if (version == 1) { if (data_length >= 8) file->mode = toi(data, 4); if (data_length >= 16) file->nlinks = toi(data + 8, 4); if (data_length >= 24) file->uid = toi(data + 16, 4); if (data_length >= 32) file->gid = toi(data + 24, 4); if (data_length >= 40) file->number = toi(data + 32, 4); iso9660->seenRockridge = 1; } } break; case 'R': if (p[1] == 'E' && version == 1) { file->re = 1; iso9660->seenRockridge = 1; } else if (p[1] == 'R' && version == 1) { /* * RR extension comprises: * one byte flag value * This extension is obsolete, * so contents are always ignored. */ } break; case 'S': if (p[1] == 'L') { if (version == 1) { parse_rockridge_SL1(file, data, data_length); iso9660->seenRockridge = 1; } } else if (p[1] == 'T' && data_length == 0 && version == 1) { /* * ST extension marks end of this * block of SUSP entries. * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * after SUSP data. */ iso9660->seenSUSP = 0; iso9660->seenRockridge = 0; return (ARCHIVE_OK); } break; case 'T': if (p[1] == 'F') { if (version == 1) { parse_rockridge_TF1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'Z': if (p[1] == 'F') { if (version == 1) parse_rockridge_ZF1(file, data, data_length); } break; default: break; } p += p[2]; } return (ARCHIVE_OK); } static int register_CE(struct archive_read *a, int32_t location, struct file_info *file) { struct iso9660 *iso9660; struct read_ce_queue *heap; struct read_ce_req *p; uint64_t offset, parent_offset; int hole, parent; iso9660 = (struct iso9660 *)(a->format->data); offset = ((uint64_t)location) * (uint64_t)iso9660->logical_block_size; if (((file->mode & AE_IFMT) == AE_IFREG && offset >= file->offset) || offset < iso9660->current_position || (((uint64_t)file->ce_offset) + file->ce_size) > (uint64_t)iso9660->logical_block_size || offset + file->ce_offset + file->ce_size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid parameter in SUSP \"CE\" extension"); return (ARCHIVE_FATAL); } /* Expand our CE list as necessary. */ heap = &(iso9660->read_ce_req); if (heap->cnt >= heap->allocated) { int new_size; if (heap->allocated < 16) new_size = 16; else new_size = heap->allocated * 2; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } p = calloc(new_size, sizeof(p[0])); if (p == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } if (heap->reqs != NULL) { memcpy(p, heap->reqs, heap->cnt * sizeof(*p)); free(heap->reqs); } heap->reqs = p; heap->allocated = new_size; } /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->cnt++; while (hole > 0) { parent = (hole - 1)/2; parent_offset = heap->reqs[parent].offset; if (offset >= parent_offset) { heap->reqs[hole].offset = offset; heap->reqs[hole].file = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->reqs[hole] = heap->reqs[parent]; hole = parent; } heap->reqs[0].offset = offset; heap->reqs[0].file = file; return (ARCHIVE_OK); } static void next_CE(struct read_ce_queue *heap) { uint64_t a_offset, b_offset, c_offset; int a, b, c; struct read_ce_req tmp; if (heap->cnt < 1) return; /* * Move the last item in the heap to the root of the tree */ heap->reqs[0] = heap->reqs[--(heap->cnt)]; /* * Rebalance the heap. */ a = 0; /* Starting element and its offset */ a_offset = heap->reqs[a].offset; for (;;) { b = a + a + 1; /* First child */ if (b >= heap->cnt) return; b_offset = heap->reqs[b].offset; c = b + 1; /* Use second child if it is smaller. */ if (c < heap->cnt) { c_offset = heap->reqs[c].offset; if (c_offset < b_offset) { b = c; b_offset = c_offset; } } if (a_offset <= b_offset) return; tmp = heap->reqs[a]; heap->reqs[a] = heap->reqs[b]; heap->reqs[b] = tmp; a = b; } } static int read_CE(struct archive_read *a, struct iso9660 *iso9660) { struct read_ce_queue *heap; const unsigned char *b, *p, *end; struct file_info *file; size_t step; int r; /* Read data which RRIP "CE" extension points. */ heap = &(iso9660->read_ce_req); step = iso9660->logical_block_size; while (heap->cnt && heap->reqs[0].offset == iso9660->current_position) { b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } do { file = heap->reqs[0].file; if (file->ce_offset + file->ce_size > step) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed CE information"); return (ARCHIVE_FATAL); } p = b + file->ce_offset; end = p + file->ce_size; next_CE(heap); r = parse_rockridge(a, file, p, end); if (r != ARCHIVE_OK) return (ARCHIVE_FATAL); } while (heap->cnt && heap->reqs[0].offset == iso9660->current_position); /* NOTE: Do not move this consume's code to fron of * do-while loop. Registration of nested CE extension * might cause error because of current position. */ __archive_read_consume(a, step); iso9660->current_position += step; } return (ARCHIVE_OK); } static void parse_rockridge_NM1(struct file_info *file, const unsigned char *data, int data_length) { if (!file->name_continues) archive_string_empty(&file->name); file->name_continues = 0; if (data_length < 1) return; /* * NM version 1 extension comprises: * 1 byte flag, value is one of: * = 0: remainder is name * = 1: remainder is name, next NM entry continues name * = 2: "." * = 4: ".." * = 32: Implementation specific * All other values are reserved. */ switch(data[0]) { case 0: if (data_length < 2) return; archive_strncat(&file->name, (const char *)data + 1, data_length - 1); break; case 1: if (data_length < 2) return; archive_strncat(&file->name, (const char *)data + 1, data_length - 1); file->name_continues = 1; break; case 2: archive_strcat(&file->name, "."); break; case 4: archive_strcat(&file->name, ".."); break; default: return; } } static void parse_rockridge_TF1(struct file_info *file, const unsigned char *data, int data_length) { char flag; /* * TF extension comprises: * one byte flag * create time (optional) * modify time (optional) * access time (optional) * attribute time (optional) * Time format and presence of fields * is controlled by flag bits. */ if (data_length < 1) return; flag = data[0]; ++data; --data_length; if (flag & 0x80) { /* Use 17-byte time format. */ if ((flag & 1) && data_length >= 17) { /* Create time. */ file->birthtime_is_set = 1; file->birthtime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 2) && data_length >= 17) { /* Modify time. */ file->mtime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 4) && data_length >= 17) { /* Access time. */ file->atime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 8) && data_length >= 17) { /* Attribute change time. */ file->ctime = isodate17(data); } } else { /* Use 7-byte time format. */ if ((flag & 1) && data_length >= 7) { /* Create time. */ file->birthtime_is_set = 1; file->birthtime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 2) && data_length >= 7) { /* Modify time. */ file->mtime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 4) && data_length >= 7) { /* Access time. */ file->atime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 8) && data_length >= 7) { /* Attribute change time. */ file->ctime = isodate7(data); } } } static void parse_rockridge_SL1(struct file_info *file, const unsigned char *data, int data_length) { const char *separator = ""; if (!file->symlink_continues || file->symlink.length < 1) archive_string_empty(&file->symlink); file->symlink_continues = 0; /* * Defined flag values: * 0: This is the last SL record for this symbolic link * 1: this symbolic link field continues in next SL entry * All other values are reserved. */ if (data_length < 1) return; switch(*data) { case 0: break; case 1: file->symlink_continues = 1; break; default: return; } ++data; /* Skip flag byte. */ --data_length; /* * SL extension body stores "components". * Basically, this is a complicated way of storing * a POSIX path. It also interferes with using * symlinks for storing non-path data. <sigh> * * Each component is 2 bytes (flag and length) * possibly followed by name data. */ while (data_length >= 2) { unsigned char flag = *data++; unsigned char nlen = *data++; data_length -= 2; archive_strcat(&file->symlink, separator); separator = "/"; switch(flag) { case 0: /* Usual case, this is text. */ if (data_length < nlen) return; archive_strncat(&file->symlink, (const char *)data, nlen); break; case 0x01: /* Text continues in next component. */ if (data_length < nlen) return; archive_strncat(&file->symlink, (const char *)data, nlen); separator = ""; break; case 0x02: /* Current dir. */ archive_strcat(&file->symlink, "."); break; case 0x04: /* Parent dir. */ archive_strcat(&file->symlink, ".."); break; case 0x08: /* Root of filesystem. */ archive_strcat(&file->symlink, "/"); separator = ""; break; case 0x10: /* Undefined (historically "volume root" */ archive_string_empty(&file->symlink); archive_strcat(&file->symlink, "ROOT"); break; case 0x20: /* Undefined (historically "hostname") */ archive_strcat(&file->symlink, "hostname"); break; default: /* TODO: issue a warning ? */ return; } data += nlen; data_length -= nlen; } } static void parse_rockridge_ZF1(struct file_info *file, const unsigned char *data, int data_length) { if (data[0] == 0x70 && data[1] == 0x7a && data_length == 12) { /* paged zlib */ file->pz = 1; file->pz_log2_bs = data[3]; file->pz_uncompressed_size = archive_le32dec(&data[4]); } } static void register_file(struct iso9660 *iso9660, struct file_info *file) { file->use_next = iso9660->use_files; iso9660->use_files = file; } static void release_files(struct iso9660 *iso9660) { struct content *con, *connext; struct file_info *file; file = iso9660->use_files; while (file != NULL) { struct file_info *next = file->use_next; archive_string_free(&file->name); archive_string_free(&file->symlink); free(file->utf16be_name); con = file->contents.first; while (con != NULL) { connext = con->next; free(con); con = connext; } free(file); file = next; } } static int next_entry_seek(struct archive_read *a, struct iso9660 *iso9660, struct file_info **pfile) { struct file_info *file; int r; r = next_cache_entry(a, iso9660, pfile); if (r != ARCHIVE_OK) return (r); file = *pfile; /* Don't waste time seeking for zero-length bodies. */ if (file->size == 0) file->offset = iso9660->current_position; /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } /* Seek forward to the start of the entry. */ if (iso9660->current_position < file->offset) { int64_t step; step = file->offset - iso9660->current_position; step = __archive_read_consume(a, step); if (step < 0) return ((int)step); iso9660->current_position = file->offset; } /* We found body of file; handle it now. */ return (ARCHIVE_OK); } static int next_cache_entry(struct archive_read *a, struct iso9660 *iso9660, struct file_info **pfile) { struct file_info *file; struct { struct file_info *first; struct file_info **last; } empty_files; int64_t number; int count; file = cache_get_entry(iso9660); if (file != NULL) { *pfile = file; return (ARCHIVE_OK); } for (;;) { struct file_info *re, *d; *pfile = file = next_entry(iso9660); if (file == NULL) { /* * If directory entries all which are descendant of * rr_moved are stil remaning, expose their. */ if (iso9660->re_files.first != NULL && iso9660->rr_moved != NULL && iso9660->rr_moved->rr_moved_has_re_only) /* Expose "rr_moved" entry. */ cache_add_entry(iso9660, iso9660->rr_moved); while ((re = re_get_entry(iso9660)) != NULL) { /* Expose its descendant dirs. */ while ((d = rede_get_entry(re)) != NULL) cache_add_entry(iso9660, d); } if (iso9660->cache_files.first != NULL) return (next_cache_entry(a, iso9660, pfile)); return (ARCHIVE_EOF); } if (file->cl_offset) { struct file_info *first_re = NULL; int nexted_re = 0; /* * Find "RE" dir for the current file, which * has "CL" flag. */ while ((re = re_get_entry(iso9660)) != first_re) { if (first_re == NULL) first_re = re; if (re->offset == file->cl_offset) { re->parent->subdirs--; re->parent = file->parent; re->re = 0; if (re->parent->re_descendant) { nexted_re = 1; re->re_descendant = 1; if (rede_add_entry(re) < 0) goto fatal_rr; /* Move a list of descendants * to a new ancestor. */ while ((d = rede_get_entry( re)) != NULL) if (rede_add_entry(d) < 0) goto fatal_rr; break; } /* Replace the current file * with "RE" dir */ *pfile = file = re; /* Expose its descendant */ while ((d = rede_get_entry( file)) != NULL) cache_add_entry( iso9660, d); break; } else re_add_entry(iso9660, re); } if (nexted_re) { /* * Do not expose this at this time * because we have not gotten its full-path * name yet. */ continue; } } else if ((file->mode & AE_IFMT) == AE_IFDIR) { int r; /* Read file entries in this dir. */ r = read_children(a, file); if (r != ARCHIVE_OK) return (r); /* * Handle a special dir of Rockridge extensions, * "rr_moved". */ if (file->rr_moved) { /* * If this has only the subdirectories which * have "RE" flags, do not expose at this time. */ if (file->rr_moved_has_re_only) continue; /* Otherwise expose "rr_moved" entry. */ } else if (file->re) { /* * Do not expose this at this time * because we have not gotten its full-path * name yet. */ re_add_entry(iso9660, file); continue; } else if (file->re_descendant) { /* * If the top level "RE" entry of this entry * is not exposed, we, accordingly, should not * expose this entry at this time because * we cannot make its proper full-path name. */ if (rede_add_entry(file) == 0) continue; /* Otherwise we can expose this entry because * it seems its top level "RE" has already been * exposed. */ } } break; } if ((file->mode & AE_IFMT) != AE_IFREG || file->number == -1) return (ARCHIVE_OK); count = 0; number = file->number; iso9660->cache_files.first = NULL; iso9660->cache_files.last = &(iso9660->cache_files.first); empty_files.first = NULL; empty_files.last = &empty_files.first; /* Collect files which has the same file serial number. * Peek pending_files so that file which number is different * is not put bak. */ while (iso9660->pending_files.used > 0 && (iso9660->pending_files.files[0]->number == -1 || iso9660->pending_files.files[0]->number == number)) { if (file->number == -1) { /* This file has the same offset * but it's wrong offset which empty files * and symlink files have. * NOTE: This wrong offse was recorded by * old mkisofs utility. If ISO images is * created by latest mkisofs, this does not * happen. */ file->next = NULL; *empty_files.last = file; empty_files.last = &(file->next); } else { count++; cache_add_entry(iso9660, file); } file = next_entry(iso9660); } if (count == 0) { *pfile = file; return ((file == NULL)?ARCHIVE_EOF:ARCHIVE_OK); } if (file->number == -1) { file->next = NULL; *empty_files.last = file; empty_files.last = &(file->next); } else { count++; cache_add_entry(iso9660, file); } if (count > 1) { /* The count is the same as number of hardlink, * so much so that each nlinks of files in cache_file * is overwritten by value of the count. */ for (file = iso9660->cache_files.first; file != NULL; file = file->next) file->nlinks = count; } /* If there are empty files, that files are added * to the tail of the cache_files. */ if (empty_files.first != NULL) { *iso9660->cache_files.last = empty_files.first; iso9660->cache_files.last = empty_files.last; } *pfile = cache_get_entry(iso9660); return ((*pfile == NULL)?ARCHIVE_EOF:ARCHIVE_OK); fatal_rr: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to connect 'CL' pointer to 'RE' rr_moved pointer of " "Rockridge extensions: current position = %jd, CL offset = %jd", (intmax_t)iso9660->current_position, (intmax_t)file->cl_offset); return (ARCHIVE_FATAL); } static inline void re_add_entry(struct iso9660 *iso9660, struct file_info *file) { file->re_next = NULL; *iso9660->re_files.last = file; iso9660->re_files.last = &(file->re_next); } static inline struct file_info * re_get_entry(struct iso9660 *iso9660) { struct file_info *file; if ((file = iso9660->re_files.first) != NULL) { iso9660->re_files.first = file->re_next; if (iso9660->re_files.first == NULL) iso9660->re_files.last = &(iso9660->re_files.first); } return (file); } static inline int rede_add_entry(struct file_info *file) { struct file_info *re; /* * Find "RE" entry. */ re = file->parent; while (re != NULL && !re->re) re = re->parent; if (re == NULL) return (-1); file->re_next = NULL; *re->rede_files.last = file; re->rede_files.last = &(file->re_next); return (0); } static inline struct file_info * rede_get_entry(struct file_info *re) { struct file_info *file; if ((file = re->rede_files.first) != NULL) { re->rede_files.first = file->re_next; if (re->rede_files.first == NULL) re->rede_files.last = &(re->rede_files.first); } return (file); } static inline void cache_add_entry(struct iso9660 *iso9660, struct file_info *file) { file->next = NULL; *iso9660->cache_files.last = file; iso9660->cache_files.last = &(file->next); } static inline struct file_info * cache_get_entry(struct iso9660 *iso9660) { struct file_info *file; if ((file = iso9660->cache_files.first) != NULL) { iso9660->cache_files.first = file->next; if (iso9660->cache_files.first == NULL) iso9660->cache_files.last = &(iso9660->cache_files.first); } return (file); } static int heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key) { uint64_t file_key, parent_key; int hole, parent; /* Expand our pending files list as necessary. */ if (heap->used >= heap->allocated) { struct file_info **new_pending_files; int new_size = heap->allocated * 2; if (heap->allocated < 1024) new_size = 1024; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } new_pending_files = (struct file_info **) malloc(new_size * sizeof(new_pending_files[0])); if (new_pending_files == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } memcpy(new_pending_files, heap->files, heap->allocated * sizeof(new_pending_files[0])); if (heap->files != NULL) free(heap->files); heap->files = new_pending_files; heap->allocated = new_size; } file_key = file->key = key; /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->used++; while (hole > 0) { parent = (hole - 1)/2; parent_key = heap->files[parent]->key; if (file_key >= parent_key) { heap->files[hole] = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->files[hole] = heap->files[parent]; hole = parent; } heap->files[0] = file; return (ARCHIVE_OK); } static struct file_info * heap_get_entry(struct heap_queue *heap) { uint64_t a_key, b_key, c_key; int a, b, c; struct file_info *r, *tmp; if (heap->used < 1) return (NULL); /* * The first file in the list is the earliest; we'll return this. */ r = heap->files[0]; /* * Move the last item in the heap to the root of the tree */ heap->files[0] = heap->files[--(heap->used)]; /* * Rebalance the heap. */ a = 0; /* Starting element and its heap key */ a_key = heap->files[a]->key; for (;;) { b = a + a + 1; /* First child */ if (b >= heap->used) return (r); b_key = heap->files[b]->key; c = b + 1; /* Use second child if it is smaller. */ if (c < heap->used) { c_key = heap->files[c]->key; if (c_key < b_key) { b = c; b_key = c_key; } } if (a_key <= b_key) return (r); tmp = heap->files[a]; heap->files[a] = heap->files[b]; heap->files[b] = tmp; a = b; } } static unsigned int toi(const void *p, int n) { const unsigned char *v = (const unsigned char *)p; if (n > 1) return v[0] + 256 * toi(v + 1, n - 1); if (n == 1) return v[0]; return (0); } static time_t isodate7(const unsigned char *v) { struct tm tm; int offset; time_t t; memset(&tm, 0, sizeof(tm)); tm.tm_year = v[0]; tm.tm_mon = v[1] - 1; tm.tm_mday = v[2]; tm.tm_hour = v[3]; tm.tm_min = v[4]; tm.tm_sec = v[5]; /* v[6] is the signed timezone offset, in 1/4-hour increments. */ offset = ((const signed char *)v)[6]; if (offset > -48 && offset < 52) { tm.tm_hour -= offset / 4; tm.tm_min -= (offset % 4) * 15; } t = time_from_tm(&tm); if (t == (time_t)-1) return ((time_t)0); return (t); } static time_t isodate17(const unsigned char *v) { struct tm tm; int offset; time_t t; memset(&tm, 0, sizeof(tm)); tm.tm_year = (v[0] - '0') * 1000 + (v[1] - '0') * 100 + (v[2] - '0') * 10 + (v[3] - '0') - 1900; tm.tm_mon = (v[4] - '0') * 10 + (v[5] - '0'); tm.tm_mday = (v[6] - '0') * 10 + (v[7] - '0'); tm.tm_hour = (v[8] - '0') * 10 + (v[9] - '0'); tm.tm_min = (v[10] - '0') * 10 + (v[11] - '0'); tm.tm_sec = (v[12] - '0') * 10 + (v[13] - '0'); /* v[16] is the signed timezone offset, in 1/4-hour increments. */ offset = ((const signed char *)v)[16]; if (offset > -48 && offset < 52) { tm.tm_hour -= offset / 4; tm.tm_min -= (offset % 4) * 15; } t = time_from_tm(&tm); if (t == (time_t)-1) return ((time_t)0); return (t); } static time_t time_from_tm(struct tm *t) { #if HAVE_TIMEGM /* Use platform timegm() if available. */ return (timegm(t)); #elif HAVE__MKGMTIME64 return (_mkgmtime64(t)); #else /* Else use direct calculation using POSIX assumptions. */ /* First, fix up tm_yday based on the year/month/day. */ if (mktime(t) == (time_t)-1) return ((time_t)-1); /* Then we can compute timegm() from first principles. */ return (t->tm_sec + t->tm_min * 60 + t->tm_hour * 3600 + t->tm_yday * 86400 + (t->tm_year - 70) * 31536000 + ((t->tm_year - 69) / 4) * 86400 - ((t->tm_year - 1) / 100) * 86400 + ((t->tm_year + 299) / 400) * 86400); #endif } static const char * build_pathname(struct archive_string *as, struct file_info *file, int depth) { // Plain ISO9660 only allows 8 dir levels; if we get // to 1000, then something is very, very wrong. if (depth > 1000) { return NULL; } if (file->parent != NULL && archive_strlen(&file->parent->name) > 0) { if (build_pathname(as, file->parent, depth + 1) == NULL) { return NULL; } archive_strcat(as, "/"); } if (archive_strlen(&file->name) == 0) archive_strcat(as, "."); else archive_string_concat(as, &file->name); return (as->s); } static int build_pathname_utf16be(unsigned char *p, size_t max, size_t *len, struct file_info *file) { if (file->parent != NULL && file->parent->utf16be_bytes > 0) { if (build_pathname_utf16be(p, max, len, file->parent) != 0) return (-1); p[*len] = 0; p[*len + 1] = '/'; *len += 2; } if (file->utf16be_bytes == 0) { if (*len + 2 > max) return (-1);/* Path is too long! */ p[*len] = 0; p[*len + 1] = '.'; *len += 2; } else { if (*len + file->utf16be_bytes > max) return (-1);/* Path is too long! */ memcpy(p + *len, file->utf16be_name, file->utf16be_bytes); *len += file->utf16be_bytes; } return (0); } #if DEBUG static void dump_isodirrec(FILE *out, const unsigned char *isodirrec) { fprintf(out, " l %d,", toi(isodirrec + DR_length_offset, DR_length_size)); fprintf(out, " a %d,", toi(isodirrec + DR_ext_attr_length_offset, DR_ext_attr_length_size)); fprintf(out, " ext 0x%x,", toi(isodirrec + DR_extent_offset, DR_extent_size)); fprintf(out, " s %d,", toi(isodirrec + DR_size_offset, DR_extent_size)); fprintf(out, " f 0x%x,", toi(isodirrec + DR_flags_offset, DR_flags_size)); fprintf(out, " u %d,", toi(isodirrec + DR_file_unit_size_offset, DR_file_unit_size_size)); fprintf(out, " ilv %d,", toi(isodirrec + DR_interleave_offset, DR_interleave_size)); fprintf(out, " seq %d,", toi(isodirrec + DR_volume_sequence_number_offset, DR_volume_sequence_number_size)); fprintf(out, " nl %d:", toi(isodirrec + DR_name_len_offset, DR_name_len_size)); fprintf(out, " `%.*s'", toi(isodirrec + DR_name_len_offset, DR_name_len_size), isodirrec + DR_name_offset); } #endif
/*- * Copyright (c) 2003-2007 Tim Kientzle * Copyright (c) 2009 Andreas Henriksson <andreas@fatal.se> * Copyright (c) 2009-2012 Michihiro NAKAJIMA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "archive_platform.h" __FBSDID("$FreeBSD: head/lib/libarchive/archive_read_support_format_iso9660.c 201246 2009-12-30 05:30:35Z kientzle $"); #ifdef HAVE_ERRNO_H #include <errno.h> #endif /* #include <stdint.h> */ /* See archive_platform.h */ #include <stdio.h> #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef HAVE_STRING_H #include <string.h> #endif #include <time.h> #ifdef HAVE_ZLIB_H #include <zlib.h> #endif #include "archive.h" #include "archive_endian.h" #include "archive_entry.h" #include "archive_entry_locale.h" #include "archive_private.h" #include "archive_read_private.h" #include "archive_string.h" /* * An overview of ISO 9660 format: * * Each disk is laid out as follows: * * 32k reserved for private use * * Volume descriptor table. Each volume descriptor * is 2k and specifies basic format information. * The "Primary Volume Descriptor" (PVD) is defined by the * standard and should always be present; other volume * descriptors include various vendor-specific extensions. * * Files and directories. Each file/dir is specified by * an "extent" (starting sector and length in bytes). * Dirs are just files with directory records packed one * after another. The PVD contains a single dir entry * specifying the location of the root directory. Everything * else follows from there. * * This module works by first reading the volume descriptors, then * building a list of directory entries, sorted by starting * sector. At each step, I look for the earliest dir entry that * hasn't yet been read, seek forward to that location and read * that entry. If it's a dir, I slurp in the new dir entries and * add them to the heap; if it's a regular file, I return the * corresponding archive_entry and wait for the client to request * the file body. This strategy allows us to read most compliant * CDs with a single pass through the data, as required by libarchive. */ #define LOGICAL_BLOCK_SIZE 2048 #define SYSTEM_AREA_BLOCK 16 /* Structure of on-disk primary volume descriptor. */ #define PVD_type_offset 0 #define PVD_type_size 1 #define PVD_id_offset (PVD_type_offset + PVD_type_size) #define PVD_id_size 5 #define PVD_version_offset (PVD_id_offset + PVD_id_size) #define PVD_version_size 1 #define PVD_reserved1_offset (PVD_version_offset + PVD_version_size) #define PVD_reserved1_size 1 #define PVD_system_id_offset (PVD_reserved1_offset + PVD_reserved1_size) #define PVD_system_id_size 32 #define PVD_volume_id_offset (PVD_system_id_offset + PVD_system_id_size) #define PVD_volume_id_size 32 #define PVD_reserved2_offset (PVD_volume_id_offset + PVD_volume_id_size) #define PVD_reserved2_size 8 #define PVD_volume_space_size_offset (PVD_reserved2_offset + PVD_reserved2_size) #define PVD_volume_space_size_size 8 #define PVD_reserved3_offset (PVD_volume_space_size_offset + PVD_volume_space_size_size) #define PVD_reserved3_size 32 #define PVD_volume_set_size_offset (PVD_reserved3_offset + PVD_reserved3_size) #define PVD_volume_set_size_size 4 #define PVD_volume_sequence_number_offset (PVD_volume_set_size_offset + PVD_volume_set_size_size) #define PVD_volume_sequence_number_size 4 #define PVD_logical_block_size_offset (PVD_volume_sequence_number_offset + PVD_volume_sequence_number_size) #define PVD_logical_block_size_size 4 #define PVD_path_table_size_offset (PVD_logical_block_size_offset + PVD_logical_block_size_size) #define PVD_path_table_size_size 8 #define PVD_type_1_path_table_offset (PVD_path_table_size_offset + PVD_path_table_size_size) #define PVD_type_1_path_table_size 4 #define PVD_opt_type_1_path_table_offset (PVD_type_1_path_table_offset + PVD_type_1_path_table_size) #define PVD_opt_type_1_path_table_size 4 #define PVD_type_m_path_table_offset (PVD_opt_type_1_path_table_offset + PVD_opt_type_1_path_table_size) #define PVD_type_m_path_table_size 4 #define PVD_opt_type_m_path_table_offset (PVD_type_m_path_table_offset + PVD_type_m_path_table_size) #define PVD_opt_type_m_path_table_size 4 #define PVD_root_directory_record_offset (PVD_opt_type_m_path_table_offset + PVD_opt_type_m_path_table_size) #define PVD_root_directory_record_size 34 #define PVD_volume_set_id_offset (PVD_root_directory_record_offset + PVD_root_directory_record_size) #define PVD_volume_set_id_size 128 #define PVD_publisher_id_offset (PVD_volume_set_id_offset + PVD_volume_set_id_size) #define PVD_publisher_id_size 128 #define PVD_preparer_id_offset (PVD_publisher_id_offset + PVD_publisher_id_size) #define PVD_preparer_id_size 128 #define PVD_application_id_offset (PVD_preparer_id_offset + PVD_preparer_id_size) #define PVD_application_id_size 128 #define PVD_copyright_file_id_offset (PVD_application_id_offset + PVD_application_id_size) #define PVD_copyright_file_id_size 37 #define PVD_abstract_file_id_offset (PVD_copyright_file_id_offset + PVD_copyright_file_id_size) #define PVD_abstract_file_id_size 37 #define PVD_bibliographic_file_id_offset (PVD_abstract_file_id_offset + PVD_abstract_file_id_size) #define PVD_bibliographic_file_id_size 37 #define PVD_creation_date_offset (PVD_bibliographic_file_id_offset + PVD_bibliographic_file_id_size) #define PVD_creation_date_size 17 #define PVD_modification_date_offset (PVD_creation_date_offset + PVD_creation_date_size) #define PVD_modification_date_size 17 #define PVD_expiration_date_offset (PVD_modification_date_offset + PVD_modification_date_size) #define PVD_expiration_date_size 17 #define PVD_effective_date_offset (PVD_expiration_date_offset + PVD_expiration_date_size) #define PVD_effective_date_size 17 #define PVD_file_structure_version_offset (PVD_effective_date_offset + PVD_effective_date_size) #define PVD_file_structure_version_size 1 #define PVD_reserved4_offset (PVD_file_structure_version_offset + PVD_file_structure_version_size) #define PVD_reserved4_size 1 #define PVD_application_data_offset (PVD_reserved4_offset + PVD_reserved4_size) #define PVD_application_data_size 512 #define PVD_reserved5_offset (PVD_application_data_offset + PVD_application_data_size) #define PVD_reserved5_size (2048 - PVD_reserved5_offset) /* TODO: It would make future maintenance easier to just hardcode the * above values. In particular, ECMA119 states the offsets as part of * the standard. That would eliminate the need for the following check.*/ #if PVD_reserved5_offset != 1395 #error PVD offset and size definitions are wrong. #endif /* Structure of optional on-disk supplementary volume descriptor. */ #define SVD_type_offset 0 #define SVD_type_size 1 #define SVD_id_offset (SVD_type_offset + SVD_type_size) #define SVD_id_size 5 #define SVD_version_offset (SVD_id_offset + SVD_id_size) #define SVD_version_size 1 /* ... */ #define SVD_reserved1_offset 72 #define SVD_reserved1_size 8 #define SVD_volume_space_size_offset 80 #define SVD_volume_space_size_size 8 #define SVD_escape_sequences_offset (SVD_volume_space_size_offset + SVD_volume_space_size_size) #define SVD_escape_sequences_size 32 /* ... */ #define SVD_logical_block_size_offset 128 #define SVD_logical_block_size_size 4 #define SVD_type_L_path_table_offset 140 #define SVD_type_M_path_table_offset 148 /* ... */ #define SVD_root_directory_record_offset 156 #define SVD_root_directory_record_size 34 #define SVD_file_structure_version_offset 881 #define SVD_reserved2_offset 882 #define SVD_reserved2_size 1 #define SVD_reserved3_offset 1395 #define SVD_reserved3_size 653 /* ... */ /* FIXME: validate correctness of last SVD entry offset. */ /* Structure of an on-disk directory record. */ /* Note: ISO9660 stores each multi-byte integer twice, once in * each byte order. The sizes here are the size of just one * of the two integers. (This is why the offset of a field isn't * the same as the offset+size of the previous field.) */ #define DR_length_offset 0 #define DR_length_size 1 #define DR_ext_attr_length_offset 1 #define DR_ext_attr_length_size 1 #define DR_extent_offset 2 #define DR_extent_size 4 #define DR_size_offset 10 #define DR_size_size 4 #define DR_date_offset 18 #define DR_date_size 7 #define DR_flags_offset 25 #define DR_flags_size 1 #define DR_file_unit_size_offset 26 #define DR_file_unit_size_size 1 #define DR_interleave_offset 27 #define DR_interleave_size 1 #define DR_volume_sequence_number_offset 28 #define DR_volume_sequence_number_size 2 #define DR_name_len_offset 32 #define DR_name_len_size 1 #define DR_name_offset 33 #ifdef HAVE_ZLIB_H static const unsigned char zisofs_magic[8] = { 0x37, 0xE4, 0x53, 0x96, 0xC9, 0xDB, 0xD6, 0x07 }; struct zisofs { /* Set 1 if this file compressed by paged zlib */ int pz; int pz_log2_bs; /* Log2 of block size */ uint64_t pz_uncompressed_size; int initialized; unsigned char *uncompressed_buffer; size_t uncompressed_buffer_size; uint32_t pz_offset; unsigned char header[16]; size_t header_avail; int header_passed; unsigned char *block_pointers; size_t block_pointers_alloc; size_t block_pointers_size; size_t block_pointers_avail; size_t block_off; uint32_t block_avail; z_stream stream; int stream_valid; }; #else struct zisofs { /* Set 1 if this file compressed by paged zlib */ int pz; }; #endif struct content { uint64_t offset;/* Offset on disk. */ uint64_t size; /* File size in bytes. */ struct content *next; }; /* In-memory storage for a directory record. */ struct file_info { struct file_info *use_next; struct file_info *parent; struct file_info *next; struct file_info *re_next; int subdirs; uint64_t key; /* Heap Key. */ uint64_t offset; /* Offset on disk. */ uint64_t size; /* File size in bytes. */ uint32_t ce_offset; /* Offset of CE. */ uint32_t ce_size; /* Size of CE. */ char rr_moved; /* Flag to rr_moved. */ char rr_moved_has_re_only; char re; /* Having RRIP "RE" extension. */ char re_descendant; uint64_t cl_offset; /* Having RRIP "CL" extension. */ int birthtime_is_set; time_t birthtime; /* File created time. */ time_t mtime; /* File last modified time. */ time_t atime; /* File last accessed time. */ time_t ctime; /* File attribute change time. */ uint64_t rdev; /* Device number. */ mode_t mode; uid_t uid; gid_t gid; int64_t number; int nlinks; struct archive_string name; /* Pathname */ unsigned char *utf16be_name; size_t utf16be_bytes; char name_continues; /* Non-zero if name continues */ struct archive_string symlink; char symlink_continues; /* Non-zero if link continues */ /* Set 1 if this file compressed by paged zlib(zisofs) */ int pz; int pz_log2_bs; /* Log2 of block size */ uint64_t pz_uncompressed_size; /* Set 1 if this file is multi extent. */ int multi_extent; struct { struct content *first; struct content **last; } contents; struct { struct file_info *first; struct file_info **last; } rede_files; }; struct heap_queue { struct file_info **files; int allocated; int used; }; struct iso9660 { int magic; #define ISO9660_MAGIC 0x96609660 int opt_support_joliet; int opt_support_rockridge; struct archive_string pathname; char seenRockridge; /* Set true if RR extensions are used. */ char seenSUSP; /* Set true if SUSP is beging used. */ char seenJoliet; unsigned char suspOffset; struct file_info *rr_moved; struct read_ce_queue { struct read_ce_req { uint64_t offset;/* Offset of CE on disk. */ struct file_info *file; } *reqs; int cnt; int allocated; } read_ce_req; int64_t previous_number; struct archive_string previous_pathname; struct file_info *use_files; struct heap_queue pending_files; struct { struct file_info *first; struct file_info **last; } cache_files; struct { struct file_info *first; struct file_info **last; } re_files; uint64_t current_position; ssize_t logical_block_size; uint64_t volume_size; /* Total size of volume in bytes. */ int32_t volume_block;/* Total size of volume in logical blocks. */ struct vd { int location; /* Location of Extent. */ uint32_t size; } primary, joliet; int64_t entry_sparse_offset; int64_t entry_bytes_remaining; size_t entry_bytes_unconsumed; struct zisofs entry_zisofs; struct content *entry_content; struct archive_string_conv *sconv_utf16be; /* * Buffers for a full pathname in UTF-16BE in Joliet extensions. */ #define UTF16_NAME_MAX 1024 unsigned char *utf16be_path; size_t utf16be_path_len; unsigned char *utf16be_previous_path; size_t utf16be_previous_path_len; /* Null buufer used in bidder to improve its performance. */ unsigned char null[2048]; }; static int archive_read_format_iso9660_bid(struct archive_read *, int); static int archive_read_format_iso9660_options(struct archive_read *, const char *, const char *); static int archive_read_format_iso9660_cleanup(struct archive_read *); static int archive_read_format_iso9660_read_data(struct archive_read *, const void **, size_t *, int64_t *); static int archive_read_format_iso9660_read_data_skip(struct archive_read *); static int archive_read_format_iso9660_read_header(struct archive_read *, struct archive_entry *); static const char *build_pathname(struct archive_string *, struct file_info *, int); static int build_pathname_utf16be(unsigned char *, size_t, size_t *, struct file_info *); #if DEBUG static void dump_isodirrec(FILE *, const unsigned char *isodirrec); #endif static time_t time_from_tm(struct tm *); static time_t isodate17(const unsigned char *); static time_t isodate7(const unsigned char *); static int isBootRecord(struct iso9660 *, const unsigned char *); static int isVolumePartition(struct iso9660 *, const unsigned char *); static int isVDSetTerminator(struct iso9660 *, const unsigned char *); static int isJolietSVD(struct iso9660 *, const unsigned char *); static int isSVD(struct iso9660 *, const unsigned char *); static int isEVD(struct iso9660 *, const unsigned char *); static int isPVD(struct iso9660 *, const unsigned char *); static int next_cache_entry(struct archive_read *, struct iso9660 *, struct file_info **); static int next_entry_seek(struct archive_read *, struct iso9660 *, struct file_info **); static struct file_info * parse_file_info(struct archive_read *a, struct file_info *parent, const unsigned char *isodirrec); static int parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *start, const unsigned char *end); static int register_CE(struct archive_read *a, int32_t location, struct file_info *file); static int read_CE(struct archive_read *a, struct iso9660 *iso9660); static void parse_rockridge_NM1(struct file_info *, const unsigned char *, int); static void parse_rockridge_SL1(struct file_info *, const unsigned char *, int); static void parse_rockridge_TF1(struct file_info *, const unsigned char *, int); static void parse_rockridge_ZF1(struct file_info *, const unsigned char *, int); static void register_file(struct iso9660 *, struct file_info *); static void release_files(struct iso9660 *); static unsigned toi(const void *p, int n); static inline void re_add_entry(struct iso9660 *, struct file_info *); static inline struct file_info * re_get_entry(struct iso9660 *); static inline int rede_add_entry(struct file_info *); static inline struct file_info * rede_get_entry(struct file_info *); static inline void cache_add_entry(struct iso9660 *iso9660, struct file_info *file); static inline struct file_info *cache_get_entry(struct iso9660 *iso9660); static int heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key); static struct file_info *heap_get_entry(struct heap_queue *heap); #define add_entry(arch, iso9660, file) \ heap_add_entry(arch, &((iso9660)->pending_files), file, file->offset) #define next_entry(iso9660) \ heap_get_entry(&((iso9660)->pending_files)) int archive_read_support_format_iso9660(struct archive *_a) { struct archive_read *a = (struct archive_read *)_a; struct iso9660 *iso9660; int r; archive_check_magic(_a, ARCHIVE_READ_MAGIC, ARCHIVE_STATE_NEW, "archive_read_support_format_iso9660"); iso9660 = (struct iso9660 *)calloc(1, sizeof(*iso9660)); if (iso9660 == NULL) { archive_set_error(&a->archive, ENOMEM, "Can't allocate iso9660 data"); return (ARCHIVE_FATAL); } iso9660->magic = ISO9660_MAGIC; iso9660->cache_files.first = NULL; iso9660->cache_files.last = &(iso9660->cache_files.first); iso9660->re_files.first = NULL; iso9660->re_files.last = &(iso9660->re_files.first); /* Enable to support Joliet extensions by default. */ iso9660->opt_support_joliet = 1; /* Enable to support Rock Ridge extensions by default. */ iso9660->opt_support_rockridge = 1; r = __archive_read_register_format(a, iso9660, "iso9660", archive_read_format_iso9660_bid, archive_read_format_iso9660_options, archive_read_format_iso9660_read_header, archive_read_format_iso9660_read_data, archive_read_format_iso9660_read_data_skip, NULL, archive_read_format_iso9660_cleanup, NULL, NULL); if (r != ARCHIVE_OK) { free(iso9660); return (r); } return (ARCHIVE_OK); } static int archive_read_format_iso9660_bid(struct archive_read *a, int best_bid) { struct iso9660 *iso9660; ssize_t bytes_read; const unsigned char *p; int seenTerminator; /* If there's already a better bid than we can ever make, don't bother testing. */ if (best_bid > 48) return (-1); iso9660 = (struct iso9660 *)(a->format->data); /* * Skip the first 32k (reserved area) and get the first * 8 sectors of the volume descriptor table. Of course, * if the I/O layer gives us more, we'll take it. */ #define RESERVED_AREA (SYSTEM_AREA_BLOCK * LOGICAL_BLOCK_SIZE) p = __archive_read_ahead(a, RESERVED_AREA + 8 * LOGICAL_BLOCK_SIZE, &bytes_read); if (p == NULL) return (-1); /* Skip the reserved area. */ bytes_read -= RESERVED_AREA; p += RESERVED_AREA; /* Check each volume descriptor. */ seenTerminator = 0; for (; bytes_read > LOGICAL_BLOCK_SIZE; bytes_read -= LOGICAL_BLOCK_SIZE, p += LOGICAL_BLOCK_SIZE) { /* Do not handle undefined Volume Descriptor Type. */ if (p[0] >= 4 && p[0] <= 254) return (0); /* Standard Identifier must be "CD001" */ if (memcmp(p + 1, "CD001", 5) != 0) return (0); if (isPVD(iso9660, p)) continue; if (!iso9660->joliet.location) { if (isJolietSVD(iso9660, p)) continue; } if (isBootRecord(iso9660, p)) continue; if (isEVD(iso9660, p)) continue; if (isSVD(iso9660, p)) continue; if (isVolumePartition(iso9660, p)) continue; if (isVDSetTerminator(iso9660, p)) { seenTerminator = 1; break; } return (0); } /* * ISO 9660 format must have Primary Volume Descriptor and * Volume Descriptor Set Terminator. */ if (seenTerminator && iso9660->primary.location > 16) return (48); /* We didn't find a valid PVD; return a bid of zero. */ return (0); } static int archive_read_format_iso9660_options(struct archive_read *a, const char *key, const char *val) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); if (strcmp(key, "joliet") == 0) { if (val == NULL || strcmp(val, "off") == 0 || strcmp(val, "ignore") == 0 || strcmp(val, "disable") == 0 || strcmp(val, "0") == 0) iso9660->opt_support_joliet = 0; else iso9660->opt_support_joliet = 1; return (ARCHIVE_OK); } if (strcmp(key, "rockridge") == 0 || strcmp(key, "Rockridge") == 0) { iso9660->opt_support_rockridge = val != NULL; return (ARCHIVE_OK); } /* Note: The "warn" return is just to inform the options * supervisor that we didn't handle it. It will generate * a suitable error if no one used this option. */ return (ARCHIVE_WARN); } static int isNull(struct iso9660 *iso9660, const unsigned char *h, unsigned offset, unsigned bytes) { while (bytes >= sizeof(iso9660->null)) { if (!memcmp(iso9660->null, h + offset, sizeof(iso9660->null))) return (0); offset += sizeof(iso9660->null); bytes -= sizeof(iso9660->null); } if (bytes) return memcmp(iso9660->null, h + offset, bytes) == 0; else return (1); } static int isBootRecord(struct iso9660 *iso9660, const unsigned char *h) { (void)iso9660; /* UNUSED */ /* Type of the Volume Descriptor Boot Record must be 0. */ if (h[0] != 0) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); return (1); } static int isVolumePartition(struct iso9660 *iso9660, const unsigned char *h) { int32_t location; /* Type of the Volume Partition Descriptor must be 3. */ if (h[0] != 3) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); /* Unused Field */ if (h[7] != 0) return (0); location = archive_le32dec(h + 72); if (location <= SYSTEM_AREA_BLOCK || location >= iso9660->volume_block) return (0); if ((uint32_t)location != archive_be32dec(h + 76)) return (0); return (1); } static int isVDSetTerminator(struct iso9660 *iso9660, const unsigned char *h) { (void)iso9660; /* UNUSED */ /* Type of the Volume Descriptor Set Terminator must be 255. */ if (h[0] != 255) return (0); /* Volume Descriptor Version must be 1. */ if (h[6] != 1) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, 7, 2048-7)) return (0); return (1); } static int isJolietSVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; /* Check if current sector is a kind of Supplementary Volume * Descriptor. */ if (!isSVD(iso9660, h)) return (0); /* FIXME: do more validations according to joliet spec. */ /* check if this SVD contains joliet extension! */ p = h + SVD_escape_sequences_offset; /* N.B. Joliet spec says p[1] == '\\', but.... */ if (p[0] == '%' && p[1] == '/') { int level = 0; if (p[2] == '@') level = 1; else if (p[2] == 'C') level = 2; else if (p[2] == 'E') level = 3; else /* not joliet */ return (0); iso9660->seenJoliet = level; } else /* not joliet */ return (0); logical_block_size = archive_le16dec(h + SVD_logical_block_size_offset); volume_block = archive_le32dec(h + SVD_volume_space_size_offset); iso9660->logical_block_size = logical_block_size; iso9660->volume_block = volume_block; iso9660->volume_size = logical_block_size * (uint64_t)volume_block; /* Read Root Directory Record in Volume Descriptor. */ p = h + SVD_root_directory_record_offset; iso9660->joliet.location = archive_le32dec(p + DR_extent_offset); iso9660->joliet.size = archive_le32dec(p + DR_size_offset); return (48); } static int isSVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; (void)iso9660; /* UNUSED */ /* Type 2 means it's a SVD. */ if (h[SVD_type_offset] != 2) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, SVD_reserved1_offset, SVD_reserved1_size)) return (0); if (!isNull(iso9660, h, SVD_reserved2_offset, SVD_reserved2_size)) return (0); if (!isNull(iso9660, h, SVD_reserved3_offset, SVD_reserved3_size)) return (0); /* File structure version must be 1 for ISO9660/ECMA119. */ if (h[SVD_file_structure_version_offset] != 1) return (0); logical_block_size = archive_le16dec(h + SVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + SVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+SVD_type_L_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* The Type M Path Table must be at a valid location (WinISO * and probably other programs omit this, so we allow zero) * * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+SVD_type_M_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Read Root Directory Record in Volume Descriptor. */ p = h + SVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); return (48); } static int isEVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; (void)iso9660; /* UNUSED */ /* Type of the Enhanced Volume Descriptor must be 2. */ if (h[PVD_type_offset] != 2) return (0); /* EVD version must be 2. */ if (h[PVD_version_offset] != 2) return (0); /* Reserved field must be 0. */ if (h[PVD_reserved1_offset] != 0) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size)) return (0); /* Logical block size must be > 0. */ /* I've looked at Ecma 119 and can't find any stronger * restriction on this field. */ logical_block_size = archive_le16dec(h + PVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + PVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* File structure version must be 2 for ISO9660:1999. */ if (h[PVD_file_structure_version_offset] != 2) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+PVD_type_1_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* Location of Occurrence of Type M Path Table must be * available location, * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+PVD_type_m_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved4_offset, PVD_reserved4_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size)) return (0); /* Read Root Directory Record in Volume Descriptor. */ p = h + PVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); return (48); } static int isPVD(struct iso9660 *iso9660, const unsigned char *h) { const unsigned char *p; ssize_t logical_block_size; int32_t volume_block; int32_t location; int i; /* Type of the Primary Volume Descriptor must be 1. */ if (h[PVD_type_offset] != 1) return (0); /* PVD version must be 1. */ if (h[PVD_version_offset] != 1) return (0); /* Reserved field must be 0. */ if (h[PVD_reserved1_offset] != 0) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved2_offset, PVD_reserved2_size)) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved3_offset, PVD_reserved3_size)) return (0); /* Logical block size must be > 0. */ /* I've looked at Ecma 119 and can't find any stronger * restriction on this field. */ logical_block_size = archive_le16dec(h + PVD_logical_block_size_offset); if (logical_block_size <= 0) return (0); volume_block = archive_le32dec(h + PVD_volume_space_size_offset); if (volume_block <= SYSTEM_AREA_BLOCK+4) return (0); /* File structure version must be 1 for ISO9660/ECMA119. */ if (h[PVD_file_structure_version_offset] != 1) return (0); /* Location of Occurrence of Type L Path Table must be * available location, * > SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_le32dec(h+PVD_type_1_path_table_offset); if (location < SYSTEM_AREA_BLOCK+2 || location >= volume_block) return (0); /* The Type M Path Table must also be at a valid location * (although ECMA 119 requires a Type M Path Table, WinISO and * probably other programs omit it, so we permit a zero here) * * >= SYSTEM_AREA_BLOCK(16) + 2 and < Volume Space Size. */ location = archive_be32dec(h+PVD_type_m_path_table_offset); if ((location > 0 && location < SYSTEM_AREA_BLOCK+2) || location >= volume_block) return (0); /* Reserved field must be 0. */ /* But accept NetBSD/FreeBSD "makefs" images with 0x20 here. */ for (i = 0; i < PVD_reserved4_size; ++i) if (h[PVD_reserved4_offset + i] != 0 && h[PVD_reserved4_offset + i] != 0x20) return (0); /* Reserved field must be 0. */ if (!isNull(iso9660, h, PVD_reserved5_offset, PVD_reserved5_size)) return (0); /* XXX TODO: Check other values for sanity; reject more * malformed PVDs. XXX */ /* Read Root Directory Record in Volume Descriptor. */ p = h + PVD_root_directory_record_offset; if (p[DR_length_offset] != 34) return (0); if (!iso9660->primary.location) { iso9660->logical_block_size = logical_block_size; iso9660->volume_block = volume_block; iso9660->volume_size = logical_block_size * (uint64_t)volume_block; iso9660->primary.location = archive_le32dec(p + DR_extent_offset); iso9660->primary.size = archive_le32dec(p + DR_size_offset); } return (48); } static int read_children(struct archive_read *a, struct file_info *parent) { struct iso9660 *iso9660; const unsigned char *b, *p; struct file_info *multi; size_t step, skip_size; iso9660 = (struct iso9660 *)(a->format->data); /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->current_position > parent->offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order directory (%s) %jd > %jd", parent->name.s, (intmax_t)iso9660->current_position, (intmax_t)parent->offset); return (ARCHIVE_WARN); } if (parent->offset + parent->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Directory is beyond end-of-media: %s", parent->name.s); return (ARCHIVE_WARN); } if (iso9660->current_position < parent->offset) { int64_t skipsize; skipsize = parent->offset - iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = parent->offset; } step = (size_t)(((parent->size + iso9660->logical_block_size -1) / iso9660->logical_block_size) * iso9660->logical_block_size); b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->current_position += step; multi = NULL; skip_size = step; while (step) { p = b; b += iso9660->logical_block_size; step -= iso9660->logical_block_size; for (; *p != 0 && p < b && p + *p <= b; p += *p) { struct file_info *child; /* N.B.: these special directory identifiers * are 8 bit "values" even on a * Joliet CD with UCS-2 (16bit) encoding. */ /* Skip '.' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\0') continue; /* Skip '..' entry. */ if (*(p + DR_name_len_offset) == 1 && *(p + DR_name_offset) == '\001') continue; child = parse_file_info(a, parent, p); if (child == NULL) { __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } if (child->cl_offset == 0 && (child->multi_extent || multi != NULL)) { struct content *con; if (multi == NULL) { multi = child; multi->contents.first = NULL; multi->contents.last = &(multi->contents.first); } con = malloc(sizeof(struct content)); if (con == NULL) { archive_set_error( &a->archive, ENOMEM, "No memory for multi extent"); __archive_read_consume(a, skip_size); return (ARCHIVE_FATAL); } con->offset = child->offset; con->size = child->size; con->next = NULL; *multi->contents.last = con; multi->contents.last = &(con->next); if (multi == child) { if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } else { multi->size += child->size; if (!child->multi_extent) multi = NULL; } } else if (add_entry(a, iso9660, child) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } __archive_read_consume(a, skip_size); /* Read data which recorded by RRIP "CE" extension. */ if (read_CE(a, iso9660) != ARCHIVE_OK) return (ARCHIVE_FATAL); return (ARCHIVE_OK); } static int choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); } static int archive_read_format_iso9660_read_header(struct archive_read *a, struct archive_entry *entry) { struct iso9660 *iso9660; struct file_info *file; int r, rd_r = ARCHIVE_OK; iso9660 = (struct iso9660 *)(a->format->data); if (!a->archive.archive_format) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660; a->archive.archive_format_name = "ISO9660"; } if (iso9660->current_position == 0) { r = choose_volume(a, iso9660); if (r != ARCHIVE_OK) return (r); } file = NULL;/* Eliminate a warning. */ /* Get the next entry that appears after the current offset. */ r = next_entry_seek(a, iso9660, &file); if (r != ARCHIVE_OK) return (r); if (iso9660->seenJoliet) { /* * Convert UTF-16BE of a filename to local locale MBS * and store the result into a filename field. */ if (iso9660->sconv_utf16be == NULL) { iso9660->sconv_utf16be = archive_string_conversion_from_charset( &(a->archive), "UTF-16BE", 1); if (iso9660->sconv_utf16be == NULL) /* Coundn't allocate memory */ return (ARCHIVE_FATAL); } if (iso9660->utf16be_path == NULL) { iso9660->utf16be_path = malloc(UTF16_NAME_MAX); if (iso9660->utf16be_path == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory"); return (ARCHIVE_FATAL); } } if (iso9660->utf16be_previous_path == NULL) { iso9660->utf16be_previous_path = malloc(UTF16_NAME_MAX); if (iso9660->utf16be_previous_path == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory"); return (ARCHIVE_FATAL); } } iso9660->utf16be_path_len = 0; if (build_pathname_utf16be(iso9660->utf16be_path, UTF16_NAME_MAX, &(iso9660->utf16be_path_len), file) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname is too long"); return (ARCHIVE_FATAL); } r = archive_entry_copy_pathname_l(entry, (const char *)iso9660->utf16be_path, iso9660->utf16be_path_len, iso9660->sconv_utf16be); if (r != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "No memory for Pathname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( iso9660->sconv_utf16be)); rd_r = ARCHIVE_WARN; } } else { const char *path = build_pathname(&iso9660->pathname, file, 0); if (path == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Pathname is too long"); return (ARCHIVE_FATAL); } else { archive_string_empty(&iso9660->pathname); archive_entry_set_pathname(entry, path); } } iso9660->entry_bytes_remaining = file->size; /* Offset for sparse-file-aware clients. */ iso9660->entry_sparse_offset = 0; if (file->offset + file->size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "File is beyond end-of-media: %s", archive_entry_pathname(entry)); iso9660->entry_bytes_remaining = 0; return (ARCHIVE_WARN); } /* Set up the entry structure with information about this entry. */ archive_entry_set_mode(entry, file->mode); archive_entry_set_uid(entry, file->uid); archive_entry_set_gid(entry, file->gid); archive_entry_set_nlink(entry, file->nlinks); if (file->birthtime_is_set) archive_entry_set_birthtime(entry, file->birthtime, 0); else archive_entry_unset_birthtime(entry); archive_entry_set_mtime(entry, file->mtime, 0); archive_entry_set_ctime(entry, file->ctime, 0); archive_entry_set_atime(entry, file->atime, 0); /* N.B.: Rock Ridge supports 64-bit device numbers. */ archive_entry_set_rdev(entry, (dev_t)file->rdev); archive_entry_set_size(entry, iso9660->entry_bytes_remaining); if (file->symlink.s != NULL) archive_entry_copy_symlink(entry, file->symlink.s); /* Note: If the input isn't seekable, we can't rewind to * return the same body again, so if the next entry refers to * the same data, we have to return it as a hardlink to the * original entry. */ if (file->number != -1 && file->number == iso9660->previous_number) { if (iso9660->seenJoliet) { r = archive_entry_copy_hardlink_l(entry, (const char *)iso9660->utf16be_previous_path, iso9660->utf16be_previous_path_len, iso9660->sconv_utf16be); if (r != 0) { if (errno == ENOMEM) { archive_set_error(&a->archive, ENOMEM, "No memory for Linkname"); return (ARCHIVE_FATAL); } archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Linkname cannot be converted " "from %s to current locale.", archive_string_conversion_charset_name( iso9660->sconv_utf16be)); rd_r = ARCHIVE_WARN; } } else archive_entry_set_hardlink(entry, iso9660->previous_pathname.s); archive_entry_unset_size(entry); iso9660->entry_bytes_remaining = 0; return (rd_r); } if ((file->mode & AE_IFMT) != AE_IFDIR && file->offset < iso9660->current_position) { int64_t r64; r64 = __archive_read_seek(a, file->offset, SEEK_SET); if (r64 != (int64_t)file->offset) { /* We can't seek backwards to extract it, so issue * a warning. Note that this can only happen if * this entry was added to the heap after we passed * this offset, that is, only if the directory * mentioning this entry is later than the body of * the entry. Such layouts are very unusual; most * ISO9660 writers lay out and record all directory * information first, then store all file bodies. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order file @%jx (%s) %jd < %jd", (intmax_t)file->number, iso9660->pathname.s, (intmax_t)file->offset, (intmax_t)iso9660->current_position); iso9660->entry_bytes_remaining = 0; return (ARCHIVE_WARN); } iso9660->current_position = (uint64_t)r64; } /* Initialize zisofs variables. */ iso9660->entry_zisofs.pz = file->pz; if (file->pz) { #ifdef HAVE_ZLIB_H struct zisofs *zisofs; zisofs = &iso9660->entry_zisofs; zisofs->initialized = 0; zisofs->pz_log2_bs = file->pz_log2_bs; zisofs->pz_uncompressed_size = file->pz_uncompressed_size; zisofs->pz_offset = 0; zisofs->header_avail = 0; zisofs->header_passed = 0; zisofs->block_pointers_avail = 0; #endif archive_entry_set_size(entry, file->pz_uncompressed_size); } iso9660->previous_number = file->number; if (iso9660->seenJoliet) { memcpy(iso9660->utf16be_previous_path, iso9660->utf16be_path, iso9660->utf16be_path_len); iso9660->utf16be_previous_path_len = iso9660->utf16be_path_len; } else archive_strcpy( &iso9660->previous_pathname, iso9660->pathname.s); /* Reset entry_bytes_remaining if the file is multi extent. */ iso9660->entry_content = file->contents.first; if (iso9660->entry_content != NULL) iso9660->entry_bytes_remaining = iso9660->entry_content->size; if (archive_entry_filetype(entry) == AE_IFDIR) { /* Overwrite nlinks by proper link number which is * calculated from number of sub directories. */ archive_entry_set_nlink(entry, 2 + file->subdirs); /* Directory data has been read completely. */ iso9660->entry_bytes_remaining = 0; } if (rd_r != ARCHIVE_OK) return (rd_r); return (ARCHIVE_OK); } static int archive_read_format_iso9660_read_data_skip(struct archive_read *a) { /* Because read_next_header always does an explicit skip * to the next entry, we don't need to do anything here. */ (void)a; /* UNUSED */ return (ARCHIVE_OK); } #ifdef HAVE_ZLIB_H static int zisofs_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { struct iso9660 *iso9660; struct zisofs *zisofs; const unsigned char *p; size_t avail; ssize_t bytes_read; size_t uncompressed_size; int r; iso9660 = (struct iso9660 *)(a->format->data); zisofs = &iso9660->entry_zisofs; p = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read <= 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Truncated zisofs file body"); return (ARCHIVE_FATAL); } if (bytes_read > iso9660->entry_bytes_remaining) bytes_read = (ssize_t)iso9660->entry_bytes_remaining; avail = bytes_read; uncompressed_size = 0; if (!zisofs->initialized) { size_t ceil, xsize; /* Allocate block pointers buffer. */ ceil = (size_t)((zisofs->pz_uncompressed_size + (((int64_t)1) << zisofs->pz_log2_bs) - 1) >> zisofs->pz_log2_bs); xsize = (ceil + 1) * 4; if (zisofs->block_pointers_alloc < xsize) { size_t alloc; if (zisofs->block_pointers != NULL) free(zisofs->block_pointers); alloc = ((xsize >> 10) + 1) << 10; zisofs->block_pointers = malloc(alloc); if (zisofs->block_pointers == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for zisofs decompression"); return (ARCHIVE_FATAL); } zisofs->block_pointers_alloc = alloc; } zisofs->block_pointers_size = xsize; /* Allocate uncompressed data buffer. */ xsize = (size_t)1UL << zisofs->pz_log2_bs; if (zisofs->uncompressed_buffer_size < xsize) { if (zisofs->uncompressed_buffer != NULL) free(zisofs->uncompressed_buffer); zisofs->uncompressed_buffer = malloc(xsize); if (zisofs->uncompressed_buffer == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for zisofs decompression"); return (ARCHIVE_FATAL); } } zisofs->uncompressed_buffer_size = xsize; /* * Read the file header, and check the magic code of zisofs. */ if (zisofs->header_avail < sizeof(zisofs->header)) { xsize = sizeof(zisofs->header) - zisofs->header_avail; if (avail < xsize) xsize = avail; memcpy(zisofs->header + zisofs->header_avail, p, xsize); zisofs->header_avail += xsize; avail -= xsize; p += xsize; } if (!zisofs->header_passed && zisofs->header_avail == sizeof(zisofs->header)) { int err = 0; if (memcmp(zisofs->header, zisofs_magic, sizeof(zisofs_magic)) != 0) err = 1; if (archive_le32dec(zisofs->header + 8) != zisofs->pz_uncompressed_size) err = 1; if (zisofs->header[12] != 4) err = 1; if (zisofs->header[13] != zisofs->pz_log2_bs) err = 1; if (err) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs file body"); return (ARCHIVE_FATAL); } zisofs->header_passed = 1; } /* * Read block pointers. */ if (zisofs->header_passed && zisofs->block_pointers_avail < zisofs->block_pointers_size) { xsize = zisofs->block_pointers_size - zisofs->block_pointers_avail; if (avail < xsize) xsize = avail; memcpy(zisofs->block_pointers + zisofs->block_pointers_avail, p, xsize); zisofs->block_pointers_avail += xsize; avail -= xsize; p += xsize; if (zisofs->block_pointers_avail == zisofs->block_pointers_size) { /* We've got all block pointers and initialize * related variables. */ zisofs->block_off = 0; zisofs->block_avail = 0; /* Complete a initialization */ zisofs->initialized = 1; } } if (!zisofs->initialized) goto next_data; /* We need more data. */ } /* * Get block offsets from block pointers. */ if (zisofs->block_avail == 0) { uint32_t bst, bed; if (zisofs->block_off + 4 >= zisofs->block_pointers_size) { /* There isn't a pair of offsets. */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers"); return (ARCHIVE_FATAL); } bst = archive_le32dec( zisofs->block_pointers + zisofs->block_off); if (bst != zisofs->pz_offset + (bytes_read - avail)) { /* TODO: Should we seek offset of current file * by bst ? */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers(cannot seek)"); return (ARCHIVE_FATAL); } bed = archive_le32dec( zisofs->block_pointers + zisofs->block_off + 4); if (bed < bst) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Illegal zisofs block pointers"); return (ARCHIVE_FATAL); } zisofs->block_avail = bed - bst; zisofs->block_off += 4; /* Initialize compression library for new block. */ if (zisofs->stream_valid) r = inflateReset(&zisofs->stream); else r = inflateInit(&zisofs->stream); if (r != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Can't initialize zisofs decompression."); return (ARCHIVE_FATAL); } zisofs->stream_valid = 1; zisofs->stream.total_in = 0; zisofs->stream.total_out = 0; } /* * Make uncompressed data. */ if (zisofs->block_avail == 0) { memset(zisofs->uncompressed_buffer, 0, zisofs->uncompressed_buffer_size); uncompressed_size = zisofs->uncompressed_buffer_size; } else { zisofs->stream.next_in = (Bytef *)(uintptr_t)(const void *)p; if (avail > zisofs->block_avail) zisofs->stream.avail_in = zisofs->block_avail; else zisofs->stream.avail_in = (uInt)avail; zisofs->stream.next_out = zisofs->uncompressed_buffer; zisofs->stream.avail_out = (uInt)zisofs->uncompressed_buffer_size; r = inflate(&zisofs->stream, 0); switch (r) { case Z_OK: /* Decompressor made some progress.*/ case Z_STREAM_END: /* Found end of stream. */ break; default: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "zisofs decompression failed (%d)", r); return (ARCHIVE_FATAL); } uncompressed_size = zisofs->uncompressed_buffer_size - zisofs->stream.avail_out; avail -= zisofs->stream.next_in - p; zisofs->block_avail -= (uint32_t)(zisofs->stream.next_in - p); } next_data: bytes_read -= avail; *buff = zisofs->uncompressed_buffer; *size = uncompressed_size; *offset = iso9660->entry_sparse_offset; iso9660->entry_sparse_offset += uncompressed_size; iso9660->entry_bytes_remaining -= bytes_read; iso9660->current_position += bytes_read; zisofs->pz_offset += (uint32_t)bytes_read; iso9660->entry_bytes_unconsumed += bytes_read; return (ARCHIVE_OK); } #else /* HAVE_ZLIB_H */ static int zisofs_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { (void)buff;/* UNUSED */ (void)size;/* UNUSED */ (void)offset;/* UNUSED */ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "zisofs is not supported on this platform."); return (ARCHIVE_FAILED); } #endif /* HAVE_ZLIB_H */ static int archive_read_format_iso9660_read_data(struct archive_read *a, const void **buff, size_t *size, int64_t *offset) { ssize_t bytes_read; struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } if (iso9660->entry_bytes_remaining <= 0) { if (iso9660->entry_content != NULL) iso9660->entry_content = iso9660->entry_content->next; if (iso9660->entry_content == NULL) { *buff = NULL; *size = 0; *offset = iso9660->entry_sparse_offset; return (ARCHIVE_EOF); } /* Seek forward to the start of the entry. */ if (iso9660->current_position < iso9660->entry_content->offset) { int64_t step; step = iso9660->entry_content->offset - iso9660->current_position; step = __archive_read_consume(a, step); if (step < 0) return ((int)step); iso9660->current_position = iso9660->entry_content->offset; } if (iso9660->entry_content->offset < iso9660->current_position) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Ignoring out-of-order file (%s) %jd < %jd", iso9660->pathname.s, (intmax_t)iso9660->entry_content->offset, (intmax_t)iso9660->current_position); *buff = NULL; *size = 0; *offset = iso9660->entry_sparse_offset; return (ARCHIVE_WARN); } iso9660->entry_bytes_remaining = iso9660->entry_content->size; } if (iso9660->entry_zisofs.pz) return (zisofs_read_data(a, buff, size, offset)); *buff = __archive_read_ahead(a, 1, &bytes_read); if (bytes_read == 0) archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Truncated input file"); if (*buff == NULL) return (ARCHIVE_FATAL); if (bytes_read > iso9660->entry_bytes_remaining) bytes_read = (ssize_t)iso9660->entry_bytes_remaining; *size = bytes_read; *offset = iso9660->entry_sparse_offset; iso9660->entry_sparse_offset += bytes_read; iso9660->entry_bytes_remaining -= bytes_read; iso9660->entry_bytes_unconsumed = bytes_read; iso9660->current_position += bytes_read; return (ARCHIVE_OK); } static int archive_read_format_iso9660_cleanup(struct archive_read *a) { struct iso9660 *iso9660; int r = ARCHIVE_OK; iso9660 = (struct iso9660 *)(a->format->data); release_files(iso9660); free(iso9660->read_ce_req.reqs); archive_string_free(&iso9660->pathname); archive_string_free(&iso9660->previous_pathname); if (iso9660->pending_files.files) free(iso9660->pending_files.files); #ifdef HAVE_ZLIB_H free(iso9660->entry_zisofs.uncompressed_buffer); free(iso9660->entry_zisofs.block_pointers); if (iso9660->entry_zisofs.stream_valid) { if (inflateEnd(&iso9660->entry_zisofs.stream) != Z_OK) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to clean up zlib decompressor"); r = ARCHIVE_FATAL; } } #endif free(iso9660->utf16be_path); free(iso9660->utf16be_previous_path); free(iso9660); (a->format->data) = NULL; return (r); } /* * This routine parses a single ISO directory record, makes sense * of any extensions, and stores the result in memory. */ static struct file_info * parse_file_info(struct archive_read *a, struct file_info *parent, const unsigned char *isodirrec) { struct iso9660 *iso9660; struct file_info *file, *filep; size_t name_len; const unsigned char *rr_start, *rr_end; const unsigned char *p; size_t dr_len; uint64_t fsize, offset; int32_t location; int flags; iso9660 = (struct iso9660 *)(a->format->data); dr_len = (size_t)isodirrec[DR_length_offset]; name_len = (size_t)isodirrec[DR_name_len_offset]; location = archive_le32dec(isodirrec + DR_extent_offset); fsize = toi(isodirrec + DR_size_offset, DR_size_size); /* Sanity check that dr_len needs at least 34. */ if (dr_len < 34) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid length of directory record"); return (NULL); } /* Sanity check that name_len doesn't exceed dr_len. */ if (dr_len - 33 < name_len || name_len == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid length of file identifier"); return (NULL); } /* Sanity check that location doesn't exceed volume block. * Don't check lower limit of location; it's possibility * the location has negative value when file type is symbolic * link or file size is zero. As far as I know latest mkisofs * do that. */ if (location > 0 && (location + ((fsize + iso9660->logical_block_size -1) / iso9660->logical_block_size)) > (uint32_t)iso9660->volume_block) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid location of extent of file"); return (NULL); } /* Sanity check that location doesn't have a negative value * when the file is not empty. it's too large. */ if (fsize != 0 && location < 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid location of extent of file"); return (NULL); } /* Sanity check that this entry does not create a cycle. */ offset = iso9660->logical_block_size * (uint64_t)location; for (filep = parent; filep != NULL; filep = filep->parent) { if (filep->offset == offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Directory structure contains loop"); return (NULL); } } /* Create a new file entry and copy data from the ISO dir record. */ file = (struct file_info *)calloc(1, sizeof(*file)); if (file == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for file entry"); return (NULL); } file->parent = parent; file->offset = offset; file->size = fsize; file->mtime = isodate7(isodirrec + DR_date_offset); file->ctime = file->atime = file->mtime; file->rede_files.first = NULL; file->rede_files.last = &(file->rede_files.first); p = isodirrec + DR_name_offset; /* Rockridge extensions (if any) follow name. Compute this * before fidgeting the name_len below. */ rr_start = p + name_len + (name_len & 1 ? 0 : 1); rr_end = isodirrec + dr_len; if (iso9660->seenJoliet) { /* Joliet names are max 64 chars (128 bytes) according to spec, * but genisoimage/mkisofs allows recording longer Joliet * names which are 103 UCS2 characters(206 bytes) by their * option '-joliet-long'. */ if (name_len > 206) name_len = 206; name_len &= ~1; /* trim trailing first version and dot from filename. * * Remember we were in UTF-16BE land! * SEPARATOR 1 (.) and SEPARATOR 2 (;) are both * 16 bits big endian characters on Joliet. * * TODO: sanitize filename? * Joliet allows any UCS-2 char except: * *, /, :, ;, ? and \. */ /* Chop off trailing ';1' from files. */ if (name_len > 4 && p[name_len-4] == 0 && p[name_len-3] == ';' && p[name_len-2] == 0 && p[name_len-1] == '1') name_len -= 4; #if 0 /* XXX: this somehow manages to strip of single-character file extensions, like '.c'. */ /* Chop off trailing '.' from filenames. */ if (name_len > 2 && p[name_len-2] == 0 && p[name_len-1] == '.') name_len -= 2; #endif if ((file->utf16be_name = malloc(name_len)) == NULL) { archive_set_error(&a->archive, ENOMEM, "No memory for file name"); return (NULL); } memcpy(file->utf16be_name, p, name_len); file->utf16be_bytes = name_len; } else { /* Chop off trailing ';1' from files. */ if (name_len > 2 && p[name_len - 2] == ';' && p[name_len - 1] == '1') name_len -= 2; /* Chop off trailing '.' from filenames. */ if (name_len > 1 && p[name_len - 1] == '.') --name_len; archive_strncpy(&file->name, (const char *)p, name_len); } flags = isodirrec[DR_flags_offset]; if (flags & 0x02) file->mode = AE_IFDIR | 0700; else file->mode = AE_IFREG | 0400; if (flags & 0x80) file->multi_extent = 1; else file->multi_extent = 0; /* * Use a location for the file number, which is treated as an inode * number to find out hardlink target. If Rockridge extensions is * being used, the file number will be overwritten by FILE SERIAL * NUMBER of RRIP "PX" extension. * Note: Old mkisofs did not record that FILE SERIAL NUMBER * in ISO images. * Note2: xorriso set 0 to the location of a symlink file. */ if (file->size == 0 && location >= 0) { /* If file->size is zero, its location points wrong place, * and so we should not use it for the file number. * When the location has negative value, it can be used * for the file number. */ file->number = -1; /* Do not appear before any directory entries. */ file->offset = -1; } else file->number = (int64_t)(uint32_t)location; /* Rockridge extensions overwrite information from above. */ if (iso9660->opt_support_rockridge) { if (parent == NULL && rr_end - rr_start >= 7) { p = rr_start; if (memcmp(p, "SP\x07\x01\xbe\xef", 6) == 0) { /* * SP extension stores the suspOffset * (Number of bytes to skip between * filename and SUSP records.) * It is mandatory by the SUSP standard * (IEEE 1281). * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * before SUSP data. * * SP extension must be in the root * directory entry, disable all SUSP * processing if not found. */ iso9660->suspOffset = p[6]; iso9660->seenSUSP = 1; rr_start += 7; } } if (iso9660->seenSUSP) { int r; file->name_continues = 0; file->symlink_continues = 0; rr_start += iso9660->suspOffset; r = parse_rockridge(a, file, rr_start, rr_end); if (r != ARCHIVE_OK) { free(file); return (NULL); } /* * A file size of symbolic link files in ISO images * made by makefs is not zero and its location is * the same as those of next regular file. That is * the same as hard like file and it causes unexpected * error. */ if (file->size > 0 && (file->mode & AE_IFMT) == AE_IFLNK) { file->size = 0; file->number = -1; file->offset = -1; } } else /* If there isn't SUSP, disable parsing * rock ridge extensions. */ iso9660->opt_support_rockridge = 0; } file->nlinks = 1;/* Reset nlink. we'll calculate it later. */ /* Tell file's parent how many children that parent has. */ if (parent != NULL && (flags & 0x02)) parent->subdirs++; if (iso9660->seenRockridge) { if (parent != NULL && parent->parent == NULL && (flags & 0x02) && iso9660->rr_moved == NULL && file->name.s && (strcmp(file->name.s, "rr_moved") == 0 || strcmp(file->name.s, ".rr_moved") == 0)) { iso9660->rr_moved = file; file->rr_moved = 1; file->rr_moved_has_re_only = 1; file->re = 0; parent->subdirs--; } else if (file->re) { /* * Sanity check: file's parent is rr_moved. */ if (parent == NULL || parent->rr_moved == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE"); return (NULL); } /* * Sanity check: file does not have "CL" extension. */ if (file->cl_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE and CL"); return (NULL); } /* * Sanity check: The file type must be a directory. */ if ((flags & 0x02) == 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge RE"); return (NULL); } } else if (parent != NULL && parent->rr_moved) file->rr_moved_has_re_only = 0; else if (parent != NULL && (flags & 0x02) && (parent->re || parent->re_descendant)) file->re_descendant = 1; if (file->cl_offset) { struct file_info *r; if (parent == NULL || parent->parent == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } /* * Sanity check: The file type must be a regular file. */ if ((flags & 0x02) != 0) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } parent->subdirs++; /* Overwrite an offset and a number of this "CL" entry * to appear before other dirs. "+1" to those is to * make sure to appear after "RE" entry which this * "CL" entry should be connected with. */ file->offset = file->number = file->cl_offset + 1; /* * Sanity check: cl_offset does not point at its * the parents or itself. */ for (r = parent; r; r = r->parent) { if (r->offset == file->cl_offset) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } } if (file->cl_offset == file->offset || parent->rr_moved) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid Rockridge CL"); return (NULL); } } } #if DEBUG /* DEBUGGING: Warn about attributes I don't yet fully support. */ if ((flags & ~0x02) != 0) { fprintf(stderr, "\n ** Unrecognized flag: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (toi(isodirrec + DR_volume_sequence_number_offset, 2) != 1) { fprintf(stderr, "\n ** Unrecognized sequence number: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_file_unit_size_offset) != 0) { fprintf(stderr, "\n ** Unexpected file unit size: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_interleave_offset) != 0) { fprintf(stderr, "\n ** Unexpected interleave: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } else if (*(isodirrec + DR_ext_attr_length_offset) != 0) { fprintf(stderr, "\n ** Unexpected extended attribute length: "); dump_isodirrec(stderr, isodirrec); fprintf(stderr, "\n"); } #endif register_file(iso9660, file); return (file); } static int parse_rockridge(struct archive_read *a, struct file_info *file, const unsigned char *p, const unsigned char *end) { struct iso9660 *iso9660; iso9660 = (struct iso9660 *)(a->format->data); while (p + 4 <= end /* Enough space for another entry. */ && p[0] >= 'A' && p[0] <= 'Z' /* Sanity-check 1st char of name. */ && p[1] >= 'A' && p[1] <= 'Z' /* Sanity-check 2nd char of name. */ && p[2] >= 4 /* Sanity-check length. */ && p + p[2] <= end) { /* Sanity-check length. */ const unsigned char *data = p + 4; int data_length = p[2] - 4; int version = p[3]; switch(p[0]) { case 'C': if (p[1] == 'E') { if (version == 1 && data_length == 24) { /* * CE extension comprises: * 8 byte sector containing extension * 8 byte offset w/in above sector * 8 byte length of continuation */ int32_t location = archive_le32dec(data); file->ce_offset = archive_le32dec(data+8); file->ce_size = archive_le32dec(data+16); if (register_CE(a, location, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); } } else if (p[1] == 'L') { if (version == 1 && data_length == 8) { file->cl_offset = (uint64_t) iso9660->logical_block_size * (uint64_t)archive_le32dec(data); iso9660->seenRockridge = 1; } } break; case 'N': if (p[1] == 'M') { if (version == 1) { parse_rockridge_NM1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'P': /* * PD extension is padding; * contents are always ignored. * * PL extension won't appear; * contents are always ignored. */ if (p[1] == 'N') { if (version == 1 && data_length == 16) { file->rdev = toi(data,4); file->rdev <<= 32; file->rdev |= toi(data + 8, 4); iso9660->seenRockridge = 1; } } else if (p[1] == 'X') { /* * PX extension comprises: * 8 bytes for mode, * 8 bytes for nlinks, * 8 bytes for uid, * 8 bytes for gid, * 8 bytes for inode. */ if (version == 1) { if (data_length >= 8) file->mode = toi(data, 4); if (data_length >= 16) file->nlinks = toi(data + 8, 4); if (data_length >= 24) file->uid = toi(data + 16, 4); if (data_length >= 32) file->gid = toi(data + 24, 4); if (data_length >= 40) file->number = toi(data + 32, 4); iso9660->seenRockridge = 1; } } break; case 'R': if (p[1] == 'E' && version == 1) { file->re = 1; iso9660->seenRockridge = 1; } else if (p[1] == 'R' && version == 1) { /* * RR extension comprises: * one byte flag value * This extension is obsolete, * so contents are always ignored. */ } break; case 'S': if (p[1] == 'L') { if (version == 1) { parse_rockridge_SL1(file, data, data_length); iso9660->seenRockridge = 1; } } else if (p[1] == 'T' && data_length == 0 && version == 1) { /* * ST extension marks end of this * block of SUSP entries. * * It allows SUSP to coexist with * non-SUSP uses of the System * Use Area by placing non-SUSP data * after SUSP data. */ iso9660->seenSUSP = 0; iso9660->seenRockridge = 0; return (ARCHIVE_OK); } break; case 'T': if (p[1] == 'F') { if (version == 1) { parse_rockridge_TF1(file, data, data_length); iso9660->seenRockridge = 1; } } break; case 'Z': if (p[1] == 'F') { if (version == 1) parse_rockridge_ZF1(file, data, data_length); } break; default: break; } p += p[2]; } return (ARCHIVE_OK); } static int register_CE(struct archive_read *a, int32_t location, struct file_info *file) { struct iso9660 *iso9660; struct read_ce_queue *heap; struct read_ce_req *p; uint64_t offset, parent_offset; int hole, parent; iso9660 = (struct iso9660 *)(a->format->data); offset = ((uint64_t)location) * (uint64_t)iso9660->logical_block_size; if (((file->mode & AE_IFMT) == AE_IFREG && offset >= file->offset) || offset < iso9660->current_position || (((uint64_t)file->ce_offset) + file->ce_size) > (uint64_t)iso9660->logical_block_size || offset + file->ce_offset + file->ce_size > iso9660->volume_size) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Invalid parameter in SUSP \"CE\" extension"); return (ARCHIVE_FATAL); } /* Expand our CE list as necessary. */ heap = &(iso9660->read_ce_req); if (heap->cnt >= heap->allocated) { int new_size; if (heap->allocated < 16) new_size = 16; else new_size = heap->allocated * 2; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } p = calloc(new_size, sizeof(p[0])); if (p == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } if (heap->reqs != NULL) { memcpy(p, heap->reqs, heap->cnt * sizeof(*p)); free(heap->reqs); } heap->reqs = p; heap->allocated = new_size; } /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->cnt++; while (hole > 0) { parent = (hole - 1)/2; parent_offset = heap->reqs[parent].offset; if (offset >= parent_offset) { heap->reqs[hole].offset = offset; heap->reqs[hole].file = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->reqs[hole] = heap->reqs[parent]; hole = parent; } heap->reqs[0].offset = offset; heap->reqs[0].file = file; return (ARCHIVE_OK); } static void next_CE(struct read_ce_queue *heap) { uint64_t a_offset, b_offset, c_offset; int a, b, c; struct read_ce_req tmp; if (heap->cnt < 1) return; /* * Move the last item in the heap to the root of the tree */ heap->reqs[0] = heap->reqs[--(heap->cnt)]; /* * Rebalance the heap. */ a = 0; /* Starting element and its offset */ a_offset = heap->reqs[a].offset; for (;;) { b = a + a + 1; /* First child */ if (b >= heap->cnt) return; b_offset = heap->reqs[b].offset; c = b + 1; /* Use second child if it is smaller. */ if (c < heap->cnt) { c_offset = heap->reqs[c].offset; if (c_offset < b_offset) { b = c; b_offset = c_offset; } } if (a_offset <= b_offset) return; tmp = heap->reqs[a]; heap->reqs[a] = heap->reqs[b]; heap->reqs[b] = tmp; a = b; } } static int read_CE(struct archive_read *a, struct iso9660 *iso9660) { struct read_ce_queue *heap; const unsigned char *b, *p, *end; struct file_info *file; size_t step; int r; /* Read data which RRIP "CE" extension points. */ heap = &(iso9660->read_ce_req); step = iso9660->logical_block_size; while (heap->cnt && heap->reqs[0].offset == iso9660->current_position) { b = __archive_read_ahead(a, step, NULL); if (b == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } do { file = heap->reqs[0].file; if (file->ce_offset + file->ce_size > step) { archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, "Malformed CE information"); return (ARCHIVE_FATAL); } p = b + file->ce_offset; end = p + file->ce_size; next_CE(heap); r = parse_rockridge(a, file, p, end); if (r != ARCHIVE_OK) return (ARCHIVE_FATAL); } while (heap->cnt && heap->reqs[0].offset == iso9660->current_position); /* NOTE: Do not move this consume's code to fron of * do-while loop. Registration of nested CE extension * might cause error because of current position. */ __archive_read_consume(a, step); iso9660->current_position += step; } return (ARCHIVE_OK); } static void parse_rockridge_NM1(struct file_info *file, const unsigned char *data, int data_length) { if (!file->name_continues) archive_string_empty(&file->name); file->name_continues = 0; if (data_length < 1) return; /* * NM version 1 extension comprises: * 1 byte flag, value is one of: * = 0: remainder is name * = 1: remainder is name, next NM entry continues name * = 2: "." * = 4: ".." * = 32: Implementation specific * All other values are reserved. */ switch(data[0]) { case 0: if (data_length < 2) return; archive_strncat(&file->name, (const char *)data + 1, data_length - 1); break; case 1: if (data_length < 2) return; archive_strncat(&file->name, (const char *)data + 1, data_length - 1); file->name_continues = 1; break; case 2: archive_strcat(&file->name, "."); break; case 4: archive_strcat(&file->name, ".."); break; default: return; } } static void parse_rockridge_TF1(struct file_info *file, const unsigned char *data, int data_length) { char flag; /* * TF extension comprises: * one byte flag * create time (optional) * modify time (optional) * access time (optional) * attribute time (optional) * Time format and presence of fields * is controlled by flag bits. */ if (data_length < 1) return; flag = data[0]; ++data; --data_length; if (flag & 0x80) { /* Use 17-byte time format. */ if ((flag & 1) && data_length >= 17) { /* Create time. */ file->birthtime_is_set = 1; file->birthtime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 2) && data_length >= 17) { /* Modify time. */ file->mtime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 4) && data_length >= 17) { /* Access time. */ file->atime = isodate17(data); data += 17; data_length -= 17; } if ((flag & 8) && data_length >= 17) { /* Attribute change time. */ file->ctime = isodate17(data); } } else { /* Use 7-byte time format. */ if ((flag & 1) && data_length >= 7) { /* Create time. */ file->birthtime_is_set = 1; file->birthtime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 2) && data_length >= 7) { /* Modify time. */ file->mtime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 4) && data_length >= 7) { /* Access time. */ file->atime = isodate7(data); data += 7; data_length -= 7; } if ((flag & 8) && data_length >= 7) { /* Attribute change time. */ file->ctime = isodate7(data); } } } static void parse_rockridge_SL1(struct file_info *file, const unsigned char *data, int data_length) { const char *separator = ""; if (!file->symlink_continues || file->symlink.length < 1) archive_string_empty(&file->symlink); file->symlink_continues = 0; /* * Defined flag values: * 0: This is the last SL record for this symbolic link * 1: this symbolic link field continues in next SL entry * All other values are reserved. */ if (data_length < 1) return; switch(*data) { case 0: break; case 1: file->symlink_continues = 1; break; default: return; } ++data; /* Skip flag byte. */ --data_length; /* * SL extension body stores "components". * Basically, this is a complicated way of storing * a POSIX path. It also interferes with using * symlinks for storing non-path data. <sigh> * * Each component is 2 bytes (flag and length) * possibly followed by name data. */ while (data_length >= 2) { unsigned char flag = *data++; unsigned char nlen = *data++; data_length -= 2; archive_strcat(&file->symlink, separator); separator = "/"; switch(flag) { case 0: /* Usual case, this is text. */ if (data_length < nlen) return; archive_strncat(&file->symlink, (const char *)data, nlen); break; case 0x01: /* Text continues in next component. */ if (data_length < nlen) return; archive_strncat(&file->symlink, (const char *)data, nlen); separator = ""; break; case 0x02: /* Current dir. */ archive_strcat(&file->symlink, "."); break; case 0x04: /* Parent dir. */ archive_strcat(&file->symlink, ".."); break; case 0x08: /* Root of filesystem. */ archive_strcat(&file->symlink, "/"); separator = ""; break; case 0x10: /* Undefined (historically "volume root" */ archive_string_empty(&file->symlink); archive_strcat(&file->symlink, "ROOT"); break; case 0x20: /* Undefined (historically "hostname") */ archive_strcat(&file->symlink, "hostname"); break; default: /* TODO: issue a warning ? */ return; } data += nlen; data_length -= nlen; } } static void parse_rockridge_ZF1(struct file_info *file, const unsigned char *data, int data_length) { if (data[0] == 0x70 && data[1] == 0x7a && data_length == 12) { /* paged zlib */ file->pz = 1; file->pz_log2_bs = data[3]; file->pz_uncompressed_size = archive_le32dec(&data[4]); } } static void register_file(struct iso9660 *iso9660, struct file_info *file) { file->use_next = iso9660->use_files; iso9660->use_files = file; } static void release_files(struct iso9660 *iso9660) { struct content *con, *connext; struct file_info *file; file = iso9660->use_files; while (file != NULL) { struct file_info *next = file->use_next; archive_string_free(&file->name); archive_string_free(&file->symlink); free(file->utf16be_name); con = file->contents.first; while (con != NULL) { connext = con->next; free(con); con = connext; } free(file); file = next; } } static int next_entry_seek(struct archive_read *a, struct iso9660 *iso9660, struct file_info **pfile) { struct file_info *file; int r; r = next_cache_entry(a, iso9660, pfile); if (r != ARCHIVE_OK) return (r); file = *pfile; /* Don't waste time seeking for zero-length bodies. */ if (file->size == 0) file->offset = iso9660->current_position; /* flush any remaining bytes from the last round to ensure * we're positioned */ if (iso9660->entry_bytes_unconsumed) { __archive_read_consume(a, iso9660->entry_bytes_unconsumed); iso9660->entry_bytes_unconsumed = 0; } /* Seek forward to the start of the entry. */ if (iso9660->current_position < file->offset) { int64_t step; step = file->offset - iso9660->current_position; step = __archive_read_consume(a, step); if (step < 0) return ((int)step); iso9660->current_position = file->offset; } /* We found body of file; handle it now. */ return (ARCHIVE_OK); } static int next_cache_entry(struct archive_read *a, struct iso9660 *iso9660, struct file_info **pfile) { struct file_info *file; struct { struct file_info *first; struct file_info **last; } empty_files; int64_t number; int count; file = cache_get_entry(iso9660); if (file != NULL) { *pfile = file; return (ARCHIVE_OK); } for (;;) { struct file_info *re, *d; *pfile = file = next_entry(iso9660); if (file == NULL) { /* * If directory entries all which are descendant of * rr_moved are stil remaning, expose their. */ if (iso9660->re_files.first != NULL && iso9660->rr_moved != NULL && iso9660->rr_moved->rr_moved_has_re_only) /* Expose "rr_moved" entry. */ cache_add_entry(iso9660, iso9660->rr_moved); while ((re = re_get_entry(iso9660)) != NULL) { /* Expose its descendant dirs. */ while ((d = rede_get_entry(re)) != NULL) cache_add_entry(iso9660, d); } if (iso9660->cache_files.first != NULL) return (next_cache_entry(a, iso9660, pfile)); return (ARCHIVE_EOF); } if (file->cl_offset) { struct file_info *first_re = NULL; int nexted_re = 0; /* * Find "RE" dir for the current file, which * has "CL" flag. */ while ((re = re_get_entry(iso9660)) != first_re) { if (first_re == NULL) first_re = re; if (re->offset == file->cl_offset) { re->parent->subdirs--; re->parent = file->parent; re->re = 0; if (re->parent->re_descendant) { nexted_re = 1; re->re_descendant = 1; if (rede_add_entry(re) < 0) goto fatal_rr; /* Move a list of descendants * to a new ancestor. */ while ((d = rede_get_entry( re)) != NULL) if (rede_add_entry(d) < 0) goto fatal_rr; break; } /* Replace the current file * with "RE" dir */ *pfile = file = re; /* Expose its descendant */ while ((d = rede_get_entry( file)) != NULL) cache_add_entry( iso9660, d); break; } else re_add_entry(iso9660, re); } if (nexted_re) { /* * Do not expose this at this time * because we have not gotten its full-path * name yet. */ continue; } } else if ((file->mode & AE_IFMT) == AE_IFDIR) { int r; /* Read file entries in this dir. */ r = read_children(a, file); if (r != ARCHIVE_OK) return (r); /* * Handle a special dir of Rockridge extensions, * "rr_moved". */ if (file->rr_moved) { /* * If this has only the subdirectories which * have "RE" flags, do not expose at this time. */ if (file->rr_moved_has_re_only) continue; /* Otherwise expose "rr_moved" entry. */ } else if (file->re) { /* * Do not expose this at this time * because we have not gotten its full-path * name yet. */ re_add_entry(iso9660, file); continue; } else if (file->re_descendant) { /* * If the top level "RE" entry of this entry * is not exposed, we, accordingly, should not * expose this entry at this time because * we cannot make its proper full-path name. */ if (rede_add_entry(file) == 0) continue; /* Otherwise we can expose this entry because * it seems its top level "RE" has already been * exposed. */ } } break; } if ((file->mode & AE_IFMT) != AE_IFREG || file->number == -1) return (ARCHIVE_OK); count = 0; number = file->number; iso9660->cache_files.first = NULL; iso9660->cache_files.last = &(iso9660->cache_files.first); empty_files.first = NULL; empty_files.last = &empty_files.first; /* Collect files which has the same file serial number. * Peek pending_files so that file which number is different * is not put bak. */ while (iso9660->pending_files.used > 0 && (iso9660->pending_files.files[0]->number == -1 || iso9660->pending_files.files[0]->number == number)) { if (file->number == -1) { /* This file has the same offset * but it's wrong offset which empty files * and symlink files have. * NOTE: This wrong offse was recorded by * old mkisofs utility. If ISO images is * created by latest mkisofs, this does not * happen. */ file->next = NULL; *empty_files.last = file; empty_files.last = &(file->next); } else { count++; cache_add_entry(iso9660, file); } file = next_entry(iso9660); } if (count == 0) { *pfile = file; return ((file == NULL)?ARCHIVE_EOF:ARCHIVE_OK); } if (file->number == -1) { file->next = NULL; *empty_files.last = file; empty_files.last = &(file->next); } else { count++; cache_add_entry(iso9660, file); } if (count > 1) { /* The count is the same as number of hardlink, * so much so that each nlinks of files in cache_file * is overwritten by value of the count. */ for (file = iso9660->cache_files.first; file != NULL; file = file->next) file->nlinks = count; } /* If there are empty files, that files are added * to the tail of the cache_files. */ if (empty_files.first != NULL) { *iso9660->cache_files.last = empty_files.first; iso9660->cache_files.last = empty_files.last; } *pfile = cache_get_entry(iso9660); return ((*pfile == NULL)?ARCHIVE_EOF:ARCHIVE_OK); fatal_rr: archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to connect 'CL' pointer to 'RE' rr_moved pointer of " "Rockridge extensions: current position = %jd, CL offset = %jd", (intmax_t)iso9660->current_position, (intmax_t)file->cl_offset); return (ARCHIVE_FATAL); } static inline void re_add_entry(struct iso9660 *iso9660, struct file_info *file) { file->re_next = NULL; *iso9660->re_files.last = file; iso9660->re_files.last = &(file->re_next); } static inline struct file_info * re_get_entry(struct iso9660 *iso9660) { struct file_info *file; if ((file = iso9660->re_files.first) != NULL) { iso9660->re_files.first = file->re_next; if (iso9660->re_files.first == NULL) iso9660->re_files.last = &(iso9660->re_files.first); } return (file); } static inline int rede_add_entry(struct file_info *file) { struct file_info *re; /* * Find "RE" entry. */ re = file->parent; while (re != NULL && !re->re) re = re->parent; if (re == NULL) return (-1); file->re_next = NULL; *re->rede_files.last = file; re->rede_files.last = &(file->re_next); return (0); } static inline struct file_info * rede_get_entry(struct file_info *re) { struct file_info *file; if ((file = re->rede_files.first) != NULL) { re->rede_files.first = file->re_next; if (re->rede_files.first == NULL) re->rede_files.last = &(re->rede_files.first); } return (file); } static inline void cache_add_entry(struct iso9660 *iso9660, struct file_info *file) { file->next = NULL; *iso9660->cache_files.last = file; iso9660->cache_files.last = &(file->next); } static inline struct file_info * cache_get_entry(struct iso9660 *iso9660) { struct file_info *file; if ((file = iso9660->cache_files.first) != NULL) { iso9660->cache_files.first = file->next; if (iso9660->cache_files.first == NULL) iso9660->cache_files.last = &(iso9660->cache_files.first); } return (file); } static int heap_add_entry(struct archive_read *a, struct heap_queue *heap, struct file_info *file, uint64_t key) { uint64_t file_key, parent_key; int hole, parent; /* Expand our pending files list as necessary. */ if (heap->used >= heap->allocated) { struct file_info **new_pending_files; int new_size = heap->allocated * 2; if (heap->allocated < 1024) new_size = 1024; /* Overflow might keep us from growing the list. */ if (new_size <= heap->allocated) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } new_pending_files = (struct file_info **) malloc(new_size * sizeof(new_pending_files[0])); if (new_pending_files == NULL) { archive_set_error(&a->archive, ENOMEM, "Out of memory"); return (ARCHIVE_FATAL); } memcpy(new_pending_files, heap->files, heap->allocated * sizeof(new_pending_files[0])); if (heap->files != NULL) free(heap->files); heap->files = new_pending_files; heap->allocated = new_size; } file_key = file->key = key; /* * Start with hole at end, walk it up tree to find insertion point. */ hole = heap->used++; while (hole > 0) { parent = (hole - 1)/2; parent_key = heap->files[parent]->key; if (file_key >= parent_key) { heap->files[hole] = file; return (ARCHIVE_OK); } /* Move parent into hole <==> move hole up tree. */ heap->files[hole] = heap->files[parent]; hole = parent; } heap->files[0] = file; return (ARCHIVE_OK); } static struct file_info * heap_get_entry(struct heap_queue *heap) { uint64_t a_key, b_key, c_key; int a, b, c; struct file_info *r, *tmp; if (heap->used < 1) return (NULL); /* * The first file in the list is the earliest; we'll return this. */ r = heap->files[0]; /* * Move the last item in the heap to the root of the tree */ heap->files[0] = heap->files[--(heap->used)]; /* * Rebalance the heap. */ a = 0; /* Starting element and its heap key */ a_key = heap->files[a]->key; for (;;) { b = a + a + 1; /* First child */ if (b >= heap->used) return (r); b_key = heap->files[b]->key; c = b + 1; /* Use second child if it is smaller. */ if (c < heap->used) { c_key = heap->files[c]->key; if (c_key < b_key) { b = c; b_key = c_key; } } if (a_key <= b_key) return (r); tmp = heap->files[a]; heap->files[a] = heap->files[b]; heap->files[b] = tmp; a = b; } } static unsigned int toi(const void *p, int n) { const unsigned char *v = (const unsigned char *)p; if (n > 1) return v[0] + 256 * toi(v + 1, n - 1); if (n == 1) return v[0]; return (0); } static time_t isodate7(const unsigned char *v) { struct tm tm; int offset; time_t t; memset(&tm, 0, sizeof(tm)); tm.tm_year = v[0]; tm.tm_mon = v[1] - 1; tm.tm_mday = v[2]; tm.tm_hour = v[3]; tm.tm_min = v[4]; tm.tm_sec = v[5]; /* v[6] is the signed timezone offset, in 1/4-hour increments. */ offset = ((const signed char *)v)[6]; if (offset > -48 && offset < 52) { tm.tm_hour -= offset / 4; tm.tm_min -= (offset % 4) * 15; } t = time_from_tm(&tm); if (t == (time_t)-1) return ((time_t)0); return (t); } static time_t isodate17(const unsigned char *v) { struct tm tm; int offset; time_t t; memset(&tm, 0, sizeof(tm)); tm.tm_year = (v[0] - '0') * 1000 + (v[1] - '0') * 100 + (v[2] - '0') * 10 + (v[3] - '0') - 1900; tm.tm_mon = (v[4] - '0') * 10 + (v[5] - '0'); tm.tm_mday = (v[6] - '0') * 10 + (v[7] - '0'); tm.tm_hour = (v[8] - '0') * 10 + (v[9] - '0'); tm.tm_min = (v[10] - '0') * 10 + (v[11] - '0'); tm.tm_sec = (v[12] - '0') * 10 + (v[13] - '0'); /* v[16] is the signed timezone offset, in 1/4-hour increments. */ offset = ((const signed char *)v)[16]; if (offset > -48 && offset < 52) { tm.tm_hour -= offset / 4; tm.tm_min -= (offset % 4) * 15; } t = time_from_tm(&tm); if (t == (time_t)-1) return ((time_t)0); return (t); } static time_t time_from_tm(struct tm *t) { #if HAVE_TIMEGM /* Use platform timegm() if available. */ return (timegm(t)); #elif HAVE__MKGMTIME64 return (_mkgmtime64(t)); #else /* Else use direct calculation using POSIX assumptions. */ /* First, fix up tm_yday based on the year/month/day. */ if (mktime(t) == (time_t)-1) return ((time_t)-1); /* Then we can compute timegm() from first principles. */ return (t->tm_sec + t->tm_min * 60 + t->tm_hour * 3600 + t->tm_yday * 86400 + (t->tm_year - 70) * 31536000 + ((t->tm_year - 69) / 4) * 86400 - ((t->tm_year - 1) / 100) * 86400 + ((t->tm_year + 299) / 400) * 86400); #endif } static const char * build_pathname(struct archive_string *as, struct file_info *file, int depth) { // Plain ISO9660 only allows 8 dir levels; if we get // to 1000, then something is very, very wrong. if (depth > 1000) { return NULL; } if (file->parent != NULL && archive_strlen(&file->parent->name) > 0) { if (build_pathname(as, file->parent, depth + 1) == NULL) { return NULL; } archive_strcat(as, "/"); } if (archive_strlen(&file->name) == 0) archive_strcat(as, "."); else archive_string_concat(as, &file->name); return (as->s); } static int build_pathname_utf16be(unsigned char *p, size_t max, size_t *len, struct file_info *file) { if (file->parent != NULL && file->parent->utf16be_bytes > 0) { if (build_pathname_utf16be(p, max, len, file->parent) != 0) return (-1); p[*len] = 0; p[*len + 1] = '/'; *len += 2; } if (file->utf16be_bytes == 0) { if (*len + 2 > max) return (-1);/* Path is too long! */ p[*len] = 0; p[*len + 1] = '.'; *len += 2; } else { if (*len + file->utf16be_bytes > max) return (-1);/* Path is too long! */ memcpy(p + *len, file->utf16be_name, file->utf16be_bytes); *len += file->utf16be_bytes; } return (0); } #if DEBUG static void dump_isodirrec(FILE *out, const unsigned char *isodirrec) { fprintf(out, " l %d,", toi(isodirrec + DR_length_offset, DR_length_size)); fprintf(out, " a %d,", toi(isodirrec + DR_ext_attr_length_offset, DR_ext_attr_length_size)); fprintf(out, " ext 0x%x,", toi(isodirrec + DR_extent_offset, DR_extent_size)); fprintf(out, " s %d,", toi(isodirrec + DR_size_offset, DR_extent_size)); fprintf(out, " f 0x%x,", toi(isodirrec + DR_flags_offset, DR_flags_size)); fprintf(out, " u %d,", toi(isodirrec + DR_file_unit_size_offset, DR_file_unit_size_size)); fprintf(out, " ilv %d,", toi(isodirrec + DR_interleave_offset, DR_interleave_size)); fprintf(out, " seq %d,", toi(isodirrec + DR_volume_sequence_number_offset, DR_volume_sequence_number_size)); fprintf(out, " nl %d:", toi(isodirrec + DR_name_len_offset, DR_name_len_size)); fprintf(out, " `%.*s'", toi(isodirrec + DR_name_len_offset, DR_name_len_size), isodirrec + DR_name_offset); } #endif
choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); }
choose_volume(struct archive_read *a, struct iso9660 *iso9660) { struct file_info *file; int64_t skipsize; struct vd *vd; const void *block; char seenJoliet; vd = &(iso9660->primary); if (!iso9660->opt_support_joliet) iso9660->seenJoliet = 0; if (iso9660->seenJoliet && vd->location > iso9660->joliet.location) /* This condition is unlikely; by way of caution. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position = skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } /* * While reading Root Directory, flag seenJoliet must be zero to * avoid converting special name 0x00(Current Directory) and * next byte to UCS2. */ seenJoliet = iso9660->seenJoliet;/* Save flag. */ iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; /* * If the iso image has both RockRidge and Joliet, we preferentially * use RockRidge Extensions rather than Joliet ones. */ if (vd == &(iso9660->primary) && iso9660->seenRockridge && iso9660->seenJoliet) iso9660->seenJoliet = 0; if (vd == &(iso9660->primary) && !iso9660->seenRockridge && iso9660->seenJoliet) { /* Switch reading data from primary to joliet. */ vd = &(iso9660->joliet); skipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location; skipsize -= iso9660->current_position; skipsize = __archive_read_consume(a, skipsize); if (skipsize < 0) return ((int)skipsize); iso9660->current_position += skipsize; block = __archive_read_ahead(a, vd->size, NULL); if (block == NULL) { archive_set_error(&a->archive, ARCHIVE_ERRNO_MISC, "Failed to read full block when scanning " "ISO9660 directory list"); return (ARCHIVE_FATAL); } iso9660->seenJoliet = 0; file = parse_file_info(a, NULL, block); if (file == NULL) return (ARCHIVE_FATAL); iso9660->seenJoliet = seenJoliet; } /* Store the root directory in the pending list. */ if (add_entry(a, iso9660, file) != ARCHIVE_OK) return (ARCHIVE_FATAL); if (iso9660->seenRockridge) { a->archive.archive_format = ARCHIVE_FORMAT_ISO9660_ROCKRIDGE; a->archive.archive_format_name = "ISO9660 with Rockridge extensions"; } return (ARCHIVE_OK); }
{'added': [(1094, '\tskipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location;'), (1132, '\t\tskipsize = LOGICAL_BLOCK_SIZE * (int64_t)vd->location;')], 'deleted': [(1094, '\tskipsize = LOGICAL_BLOCK_SIZE * vd->location;'), (1132, '\t\tskipsize = LOGICAL_BLOCK_SIZE * vd->location;')]}
2
2
2,297
14,310
65
417
18
https://github.com/libarchive/libarchive
CVE-2016-5844
CWE-190
878
elf.c
C
store_versioninfo_gnu_verneed
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
/* radare - LGPL - Copyright 2008-2017 - nibble, pancake, alvaro_fe */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <r_types.h> #include <r_util.h> #include "elf.h" #ifdef IFDBG #undef IFDBG #endif #define DO_THE_DBG 0 #define IFDBG if (DO_THE_DBG) #define IFINT if (0) #define ELF_PAGE_MASK 0xFFFFFFFFFFFFF000LL #define ELF_PAGE_SIZE 12 #define R_ELF_NO_RELRO 0 #define R_ELF_PART_RELRO 1 #define R_ELF_FULL_RELRO 2 #define bprintf if(bin->verbose)eprintf #define READ8(x, i) r_read_ble8(x + i); i += 1; #define READ16(x, i) r_read_ble16(x + i, bin->endian); i += 2; #define READ32(x, i) r_read_ble32(x + i, bin->endian); i += 4; #define READ64(x, i) r_read_ble64(x + i, bin->endian); i += 8; #define GROWTH_FACTOR (1.5) static inline int __strnlen(const char *str, int len) { int l = 0; while (IS_PRINTABLE (*str) && --len) { if (((ut8)*str) == 0xff) { break; } str++; l++; } return l + 1; } static int handle_e_ident(ELFOBJ *bin) { return !strncmp ((char *)bin->ehdr.e_ident, ELFMAG, SELFMAG) || !strncmp ((char *)bin->ehdr.e_ident, CGCMAG, SCGCMAG); } static int init_ehdr(ELFOBJ *bin) { ut8 e_ident[EI_NIDENT]; ut8 ehdr[sizeof (Elf_(Ehdr))] = {0}; int i, len; if (r_buf_read_at (bin->b, 0, e_ident, EI_NIDENT) == -1) { bprintf ("Warning: read (magic)\n"); return false; } sdb_set (bin->kv, "elf_type.cparse", "enum elf_type { ET_NONE=0, ET_REL=1," " ET_EXEC=2, ET_DYN=3, ET_CORE=4, ET_LOOS=0xfe00, ET_HIOS=0xfeff," " ET_LOPROC=0xff00, ET_HIPROC=0xffff };", 0); sdb_set (bin->kv, "elf_machine.cparse", "enum elf_machine{EM_NONE=0, EM_M32=1," " EM_SPARC=2, EM_386=3, EM_68K=4, EM_88K=5, EM_486=6, " " EM_860=7, EM_MIPS=8, EM_S370=9, EM_MIPS_RS3_LE=10, EM_RS6000=11," " EM_UNKNOWN12=12, EM_UNKNOWN13=13, EM_UNKNOWN14=14, " " EM_PA_RISC=15, EM_PARISC=EM_PA_RISC, EM_nCUBE=16, EM_VPP500=17," " EM_SPARC32PLUS=18, EM_960=19, EM_PPC=20, EM_PPC64=21, " " EM_S390=22, EM_UNKNOWN22=EM_S390, EM_UNKNOWN23=23, EM_UNKNOWN24=24," " EM_UNKNOWN25=25, EM_UNKNOWN26=26, EM_UNKNOWN27=27, EM_UNKNOWN28=28," " EM_UNKNOWN29=29, EM_UNKNOWN30=30, EM_UNKNOWN31=31, EM_UNKNOWN32=32," " EM_UNKNOWN33=33, EM_UNKNOWN34=34, EM_UNKNOWN35=35, EM_V800=36," " EM_FR20=37, EM_RH32=38, EM_RCE=39, EM_ARM=40, EM_ALPHA=41, EM_SH=42," " EM_SPARCV9=43, EM_TRICORE=44, EM_ARC=45, EM_H8_300=46, EM_H8_300H=47," " EM_H8S=48, EM_H8_500=49, EM_IA_64=50, EM_MIPS_X=51, EM_COLDFIRE=52," " EM_68HC12=53, EM_MMA=54, EM_PCP=55, EM_NCPU=56, EM_NDR1=57," " EM_STARCORE=58, EM_ME16=59, EM_ST100=60, EM_TINYJ=61, EM_AMD64=62," " EM_X86_64=EM_AMD64, EM_PDSP=63, EM_UNKNOWN64=64, EM_UNKNOWN65=65," " EM_FX66=66, EM_ST9PLUS=67, EM_ST7=68, EM_68HC16=69, EM_68HC11=70," " EM_68HC08=71, EM_68HC05=72, EM_SVX=73, EM_ST19=74, EM_VAX=75, " " EM_CRIS=76, EM_JAVELIN=77, EM_FIREPATH=78, EM_ZSP=79, EM_MMIX=80," " EM_HUANY=81, EM_PRISM=82, EM_AVR=83, EM_FR30=84, EM_D10V=85, EM_D30V=86," " EM_V850=87, EM_M32R=88, EM_MN10300=89, EM_MN10200=90, EM_PJ=91," " EM_OPENRISC=92, EM_ARC_A5=93, EM_XTENSA=94, EM_NUM=95};", 0); sdb_num_set (bin->kv, "elf_header.offset", 0, 0); sdb_num_set (bin->kv, "elf_header.size", sizeof (Elf_(Ehdr)), 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exqqqxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #else sdb_set (bin->kv, "elf_header.format", "[16]z[2]E[2]Exxxxxwwwwww" " ident (elf_type)type (elf_machine)machine version entry phoff shoff flags ehsize" " phentsize phnum shentsize shnum shstrndx", 0); #endif bin->endian = (e_ident[EI_DATA] == ELFDATA2MSB)? 1: 0; memset (&bin->ehdr, 0, sizeof (Elf_(Ehdr))); len = r_buf_read_at (bin->b, 0, ehdr, sizeof (Elf_(Ehdr))); if (len < 1) { bprintf ("Warning: read (ehdr)\n"); return false; } memcpy (&bin->ehdr.e_ident, ehdr, 16); i = 16; bin->ehdr.e_type = READ16 (ehdr, i) bin->ehdr.e_machine = READ16 (ehdr, i) bin->ehdr.e_version = READ32 (ehdr, i) #if R_BIN_ELF64 bin->ehdr.e_entry = READ64 (ehdr, i) bin->ehdr.e_phoff = READ64 (ehdr, i) bin->ehdr.e_shoff = READ64 (ehdr, i) #else bin->ehdr.e_entry = READ32 (ehdr, i) bin->ehdr.e_phoff = READ32 (ehdr, i) bin->ehdr.e_shoff = READ32 (ehdr, i) #endif bin->ehdr.e_flags = READ32 (ehdr, i) bin->ehdr.e_ehsize = READ16 (ehdr, i) bin->ehdr.e_phentsize = READ16 (ehdr, i) bin->ehdr.e_phnum = READ16 (ehdr, i) bin->ehdr.e_shentsize = READ16 (ehdr, i) bin->ehdr.e_shnum = READ16 (ehdr, i) bin->ehdr.e_shstrndx = READ16 (ehdr, i) return handle_e_ident (bin); // Usage example: // > td `k bin/cur/info/elf_type.cparse`; td `k bin/cur/info/elf_machine.cparse` // > pf `k bin/cur/info/elf_header.format` @ `k bin/cur/info/elf_header.offset` } static int init_phdr(ELFOBJ *bin) { ut32 phdr_size; ut8 phdr[sizeof (Elf_(Phdr))] = {0}; int i, j, len; if (!bin->ehdr.e_phnum) { return false; } if (bin->phdr) { return true; } if (!UT32_MUL (&phdr_size, (ut32)bin->ehdr.e_phnum, sizeof (Elf_(Phdr)))) { return false; } if (!phdr_size) { return false; } if (phdr_size > bin->size) { return false; } if (phdr_size > (ut32)bin->size) { return false; } if (bin->ehdr.e_phoff > bin->size) { return false; } if (bin->ehdr.e_phoff + phdr_size > bin->size) { return false; } if (!(bin->phdr = calloc (phdr_size, 1))) { perror ("malloc (phdr)"); return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_phoff + i * sizeof (Elf_(Phdr)), phdr, sizeof (Elf_(Phdr))); if (len < 1) { bprintf ("Warning: read (phdr)\n"); R_FREE (bin->phdr); return false; } bin->phdr[i].p_type = READ32 (phdr, j) #if R_BIN_ELF64 bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_offset = READ64 (phdr, j) bin->phdr[i].p_vaddr = READ64 (phdr, j) bin->phdr[i].p_paddr = READ64 (phdr, j) bin->phdr[i].p_filesz = READ64 (phdr, j) bin->phdr[i].p_memsz = READ64 (phdr, j) bin->phdr[i].p_align = READ64 (phdr, j) #else bin->phdr[i].p_offset = READ32 (phdr, j) bin->phdr[i].p_vaddr = READ32 (phdr, j) bin->phdr[i].p_paddr = READ32 (phdr, j) bin->phdr[i].p_filesz = READ32 (phdr, j) bin->phdr[i].p_memsz = READ32 (phdr, j) bin->phdr[i].p_flags = READ32 (phdr, j) bin->phdr[i].p_align = READ32 (phdr, j) #endif } sdb_num_set (bin->kv, "elf_phdr.offset", bin->ehdr.e_phoff, 0); sdb_num_set (bin->kv, "elf_phdr.size", sizeof (Elf_(Phdr)), 0); sdb_set (bin->kv, "elf_p_type.cparse", "enum elf_p_type {PT_NULL=0,PT_LOAD=1,PT_DYNAMIC=2," "PT_INTERP=3,PT_NOTE=4,PT_SHLIB=5,PT_PHDR=6,PT_LOOS=0x60000000," "PT_HIOS=0x6fffffff,PT_LOPROC=0x70000000,PT_HIPROC=0x7fffffff};", 0); sdb_set (bin->kv, "elf_p_flags.cparse", "enum elf_p_flags {PF_None=0,PF_Exec=1," "PF_Write=2,PF_Write_Exec=3,PF_Read=4,PF_Read_Exec=5,PF_Read_Write=6," "PF_Read_Write_Exec=7};", 0); #if R_BIN_ELF64 sdb_set (bin->kv, "elf_phdr.format", "[4]E[4]Eqqqqqq (elf_p_type)type (elf_p_flags)flags" " offset vaddr paddr filesz memsz align", 0); #else sdb_set (bin->kv, "elf_phdr.format", "[4]Exxxxx[4]Ex (elf_p_type)type offset vaddr paddr" " filesz memsz (elf_p_flags)flags align", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_p_type.cparse`; td `k bin/cur/info/elf_p_flags.cparse` // > pf `k bin/cur/info/elf_phdr.format` @ `k bin/cur/info/elf_phdr.offset` } static int init_shdr(ELFOBJ *bin) { ut32 shdr_size; ut8 shdr[sizeof (Elf_(Shdr))] = {0}; int i, j, len; if (!bin || bin->shdr) { return true; } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size < 1) { return false; } if (shdr_size > bin->size) { return false; } if (bin->ehdr.e_shoff > bin->size) { return false; } if (bin->ehdr.e_shoff + shdr_size > bin->size) { return false; } if (!(bin->shdr = calloc (1, shdr_size + 1))) { perror ("malloc (shdr)"); return false; } sdb_num_set (bin->kv, "elf_shdr.offset", bin->ehdr.e_shoff, 0); sdb_num_set (bin->kv, "elf_shdr.size", sizeof (Elf_(Shdr)), 0); sdb_set (bin->kv, "elf_s_type.cparse", "enum elf_s_type {SHT_NULL=0,SHT_PROGBITS=1," "SHT_SYMTAB=2,SHT_STRTAB=3,SHT_RELA=4,SHT_HASH=5,SHT_DYNAMIC=6,SHT_NOTE=7," "SHT_NOBITS=8,SHT_REL=9,SHT_SHLIB=10,SHT_DYNSYM=11,SHT_LOOS=0x60000000," "SHT_HIOS=0x6fffffff,SHT_LOPROC=0x70000000,SHT_HIPROC=0x7fffffff};", 0); for (i = 0; i < bin->ehdr.e_shnum; i++) { j = 0; len = r_buf_read_at (bin->b, bin->ehdr.e_shoff + i * sizeof (Elf_(Shdr)), shdr, sizeof (Elf_(Shdr))); if (len < 1) { bprintf ("Warning: read (shdr) at 0x%"PFMT64x"\n", (ut64) bin->ehdr.e_shoff); R_FREE (bin->shdr); return false; } bin->shdr[i].sh_name = READ32 (shdr, j) bin->shdr[i].sh_type = READ32 (shdr, j) #if R_BIN_ELF64 bin->shdr[i].sh_flags = READ64 (shdr, j) bin->shdr[i].sh_addr = READ64 (shdr, j) bin->shdr[i].sh_offset = READ64 (shdr, j) bin->shdr[i].sh_size = READ64 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ64 (shdr, j) bin->shdr[i].sh_entsize = READ64 (shdr, j) #else bin->shdr[i].sh_flags = READ32 (shdr, j) bin->shdr[i].sh_addr = READ32 (shdr, j) bin->shdr[i].sh_offset = READ32 (shdr, j) bin->shdr[i].sh_size = READ32 (shdr, j) bin->shdr[i].sh_link = READ32 (shdr, j) bin->shdr[i].sh_info = READ32 (shdr, j) bin->shdr[i].sh_addralign = READ32 (shdr, j) bin->shdr[i].sh_entsize = READ32 (shdr, j) #endif } #if R_BIN_ELF64 sdb_set (bin->kv, "elf_s_flags_64.cparse", "enum elf_s_flags_64 {SF64_None=0,SF64_Exec=1," "SF64_Alloc=2,SF64_Alloc_Exec=3,SF64_Write=4,SF64_Write_Exec=5," "SF64_Write_Alloc=6,SF64_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[8]Eqqqxxqq name (elf_s_type)type" " (elf_s_flags_64)flags addr offset size link info addralign entsize", 0); #else sdb_set (bin->kv, "elf_s_flags_32.cparse", "enum elf_s_flags_32 {SF32_None=0,SF32_Exec=1," "SF32_Alloc=2,SF32_Alloc_Exec=3,SF32_Write=4,SF32_Write_Exec=5," "SF32_Write_Alloc=6,SF32_Write_Alloc_Exec=7};", 0); sdb_set (bin->kv, "elf_shdr.format", "x[4]E[4]Exxxxxxx name (elf_s_type)type" " (elf_s_flags_32)flags addr offset size link info addralign entsize", 0); #endif return true; // Usage example: // > td `k bin/cur/info/elf_s_type.cparse`; td `k bin/cur/info/elf_s_flags_64.cparse` // > pf `k bin/cur/info/elf_shdr.format` @ `k bin/cur/info/elf_shdr.offset` } static int init_strtab(ELFOBJ *bin) { if (bin->strtab || !bin->shdr) { return false; } if (bin->ehdr.e_shstrndx != SHN_UNDEF && (bin->ehdr.e_shstrndx >= bin->ehdr.e_shnum || (bin->ehdr.e_shstrndx >= SHN_LORESERVE && bin->ehdr.e_shstrndx < SHN_HIRESERVE))) return false; /* sh_size must be lower than UT32_MAX and not equal to zero, to avoid bugs on malloc() */ if (bin->shdr[bin->ehdr.e_shstrndx].sh_size > UT32_MAX) { return false; } if (!bin->shdr[bin->ehdr.e_shstrndx].sh_size) { return false; } bin->shstrtab_section = bin->strtab_section = &bin->shdr[bin->ehdr.e_shstrndx]; bin->shstrtab_size = bin->strtab_section->sh_size; if (bin->shstrtab_size > bin->size) { return false; } if (!(bin->shstrtab = calloc (1, bin->shstrtab_size + 1))) { perror ("malloc"); bin->shstrtab = NULL; return false; } if (bin->shstrtab_section->sh_offset > bin->size) { R_FREE (bin->shstrtab); return false; } if (bin->shstrtab_section->sh_offset + bin->shstrtab_section->sh_size > bin->size) { R_FREE (bin->shstrtab); return false; } if (r_buf_read_at (bin->b, bin->shstrtab_section->sh_offset, (ut8*)bin->shstrtab, bin->shstrtab_section->sh_size + 1) < 1) { bprintf ("Warning: read (shstrtab) at 0x%"PFMT64x"\n", (ut64) bin->shstrtab_section->sh_offset); R_FREE (bin->shstrtab); return false; } bin->shstrtab[bin->shstrtab_section->sh_size] = '\0'; sdb_num_set (bin->kv, "elf_shstrtab.offset", bin->shstrtab_section->sh_offset, 0); sdb_num_set (bin->kv, "elf_shstrtab.size", bin->shstrtab_section->sh_size, 0); return true; } static int init_dynamic_section(struct Elf_(r_bin_elf_obj_t) *bin) { Elf_(Dyn) *dyn = NULL; Elf_(Dyn) d = {0}; Elf_(Addr) strtabaddr = 0; ut64 offset = 0; char *strtab = NULL; size_t relentry = 0, strsize = 0; int entries; int i, j, len, r; ut8 sdyn[sizeof (Elf_(Dyn))] = {0}; ut32 dyn_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return false; } for (i = 0; i < bin->ehdr.e_phnum ; i++) { if (bin->phdr[i].p_type == PT_DYNAMIC) { dyn_size = bin->phdr[i].p_filesz; break; } } if (i == bin->ehdr.e_phnum) { return false; } if (bin->phdr[i].p_filesz > bin->size) { return false; } if (bin->phdr[i].p_offset > bin->size) { return false; } if (bin->phdr[i].p_offset + sizeof(Elf_(Dyn)) > bin->size) { return false; } for (entries = 0; entries < (dyn_size / sizeof (Elf_(Dyn))); entries++) { j = 0; len = r_buf_read_at (bin->b, bin->phdr[i].p_offset + entries * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { goto beach; } #if R_BIN_ELF64 d.d_tag = READ64 (sdyn, j) #else d.d_tag = READ32 (sdyn, j) #endif if (d.d_tag == DT_NULL) { break; } } if (entries < 1) { return false; } dyn = (Elf_(Dyn)*)calloc (entries, sizeof (Elf_(Dyn))); if (!dyn) { return false; } if (!UT32_MUL (&dyn_size, entries, sizeof (Elf_(Dyn)))) { goto beach; } if (!dyn_size) { goto beach; } offset = Elf_(r_bin_elf_v2p) (bin, bin->phdr[i].p_vaddr); if (offset > bin->size || offset + dyn_size > bin->size) { goto beach; } for (i = 0; i < entries; i++) { j = 0; r_buf_read_at (bin->b, offset + i * sizeof (Elf_(Dyn)), sdyn, sizeof (Elf_(Dyn))); if (len < 1) { bprintf("Warning: read (dyn)\n"); } #if R_BIN_ELF64 dyn[i].d_tag = READ64 (sdyn, j) dyn[i].d_un.d_ptr = READ64 (sdyn, j) #else dyn[i].d_tag = READ32 (sdyn, j) dyn[i].d_un.d_ptr = READ32 (sdyn, j) #endif switch (dyn[i].d_tag) { case DT_STRTAB: strtabaddr = Elf_(r_bin_elf_v2p) (bin, dyn[i].d_un.d_ptr); break; case DT_STRSZ: strsize = dyn[i].d_un.d_val; break; case DT_PLTREL: bin->is_rela = dyn[i].d_un.d_val; break; case DT_RELAENT: relentry = dyn[i].d_un.d_val; break; default: if ((dyn[i].d_tag >= DT_VERSYM) && (dyn[i].d_tag <= DT_VERNEEDNUM)) { bin->version_info[DT_VERSIONTAGIDX (dyn[i].d_tag)] = dyn[i].d_un.d_val; } break; } } if (!bin->is_rela) { bin->is_rela = sizeof (Elf_(Rela)) == relentry? DT_RELA : DT_REL; } if (!strtabaddr || strtabaddr > bin->size || strsize > ST32_MAX || !strsize || strsize > bin->size) { if (!strtabaddr) { bprintf ("Warning: section.shstrtab not found or invalid\n"); } goto beach; } strtab = (char *)calloc (1, strsize + 1); if (!strtab) { goto beach; } if (strtabaddr + strsize > bin->size) { free (strtab); goto beach; } r = r_buf_read_at (bin->b, strtabaddr, (ut8 *)strtab, strsize); if (r < 1) { free (strtab); goto beach; } bin->dyn_buf = dyn; bin->dyn_entries = entries; bin->strtab = strtab; bin->strtab_size = strsize; r = Elf_(r_bin_elf_has_relro)(bin); switch (r) { case R_ELF_FULL_RELRO: sdb_set (bin->kv, "elf.relro", "full", 0); break; case R_ELF_PART_RELRO: sdb_set (bin->kv, "elf.relro", "partial", 0); break; default: sdb_set (bin->kv, "elf.relro", "no", 0); break; } sdb_num_set (bin->kv, "elf_strtab.offset", strtabaddr, 0); sdb_num_set (bin->kv, "elf_strtab.size", strsize, 0); return true; beach: free (dyn); return false; } static RBinElfSection* get_section_by_name(ELFOBJ *bin, const char *section_name) { int i; if (!bin->g_sections) { return NULL; } for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, section_name, ELF_STRING_LENGTH-1)) { return &bin->g_sections[i]; } } return NULL; } static char *get_ver_flags(ut32 flags) { static char buff[32]; buff[0] = 0; if (!flags) { return "none"; } if (flags & VER_FLG_BASE) { strcpy (buff, "BASE "); } if (flags & VER_FLG_WEAK) { if (flags & VER_FLG_BASE) { strcat (buff, "| "); } strcat (buff, "WEAK "); } if (flags & ~(VER_FLG_BASE | VER_FLG_WEAK)) { strcat (buff, "| <unknown>"); } return buff; } static Sdb *store_versioninfo_gnu_versym(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { int i; const ut64 num_entries = sz / sizeof (Elf_(Versym)); const char *section_name = ""; const char *link_section_name = ""; Elf_(Shdr) *link_shdr = NULL; Sdb *sdb = sdb_new0(); if (!sdb) { return NULL; } if (!bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]) { sdb_free (sdb); return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { sdb_free (sdb); return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; ut8 *edata = (ut8*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!edata) { sdb_free (sdb); return NULL; } ut16 *data = (ut16*) calloc (R_MAX (1, num_entries), sizeof (ut16)); if (!data) { free (edata); sdb_free (sdb); return NULL; } ut64 off = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERSYM)]); if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } r_buf_read_at (bin->b, off, edata, sizeof (ut16) * num_entries); sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", num_entries, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (i = num_entries; i--;) { data[i] = r_read_ble16 (&edata[i * sizeof (ut16)], bin->endian); } R_FREE (edata); for (i = 0; i < num_entries; i += 4) { int j; int check_def; char key[32] = {0}; Sdb *sdb_entry = sdb_new0 (); snprintf (key, sizeof (key), "entry%d", i / 4); sdb_ns_set (sdb, key, sdb_entry); sdb_num_set (sdb_entry, "idx", i, 0); for (j = 0; (j < 4) && (i + j) < num_entries; ++j) { int k; char *tmp_val = NULL; snprintf (key, sizeof (key), "value%d", j); switch (data[i + j]) { case 0: sdb_set (sdb_entry, key, "0 (*local*)", 0); break; case 1: sdb_set (sdb_entry, key, "1 (*global*)", 0); break; default: tmp_val = sdb_fmt (0, "%x ", data[i+j] & 0x7FFF); check_def = true; if (bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]) { Elf_(Verneed) vn; ut8 svn[sizeof (Elf_(Verneed))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, bin->version_info[DT_VERSIONTAGIDX (DT_VERNEED)]); do { Elf_(Vernaux) vna; ut8 svna[sizeof (Elf_(Vernaux))] = {0}; ut64 a_off; if (offset > bin->size || offset + sizeof (vn) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, offset, svn, sizeof (svn)) < 0) { bprintf ("Warning: Cannot read Verneed for Versym\n"); goto beach; } k = 0; vn.vn_version = READ16 (svn, k) vn.vn_cnt = READ16 (svn, k) vn.vn_file = READ32 (svn, k) vn.vn_aux = READ32 (svn, k) vn.vn_next = READ32 (svn, k) a_off = offset + vn.vn_aux; do { if (a_off > bin->size || a_off + sizeof (vna) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, a_off, svna, sizeof (svna)) < 0) { bprintf ("Warning: Cannot read Vernaux for Versym\n"); goto beach; } k = 0; vna.vna_hash = READ32 (svna, k) vna.vna_flags = READ16 (svna, k) vna.vna_other = READ16 (svna, k) vna.vna_name = READ32 (svna, k) vna.vna_next = READ32 (svna, k) a_off += vna.vna_next; } while (vna.vna_other != data[i + j] && vna.vna_next != 0); if (vna.vna_other == data[i + j]) { if (vna.vna_name > bin->strtab_size) { goto beach; } sdb_set (sdb_entry, key, sdb_fmt (0, "%s(%s)", tmp_val, bin->strtab + vna.vna_name), 0); check_def = false; break; } offset += vn.vn_next; } while (vn.vn_next); } ut64 vinfoaddr = bin->version_info[DT_VERSIONTAGIDX (DT_VERDEF)]; if (check_def && data[i + j] != 0x8001 && vinfoaddr) { Elf_(Verdef) vd; ut8 svd[sizeof (Elf_(Verdef))] = {0}; ut64 offset = Elf_(r_bin_elf_v2p) (bin, vinfoaddr); if (offset > bin->size || offset + sizeof (vd) > bin->size) { goto beach; } do { if (r_buf_read_at (bin->b, offset, svd, sizeof (svd)) < 0) { bprintf ("Warning: Cannot read Verdef for Versym\n"); goto beach; } k = 0; vd.vd_version = READ16 (svd, k) vd.vd_flags = READ16 (svd, k) vd.vd_ndx = READ16 (svd, k) vd.vd_cnt = READ16 (svd, k) vd.vd_hash = READ32 (svd, k) vd.vd_aux = READ32 (svd, k) vd.vd_next = READ32 (svd, k) offset += vd.vd_next; } while (vd.vd_ndx != (data[i + j] & 0x7FFF) && vd.vd_next != 0); if (vd.vd_ndx == (data[i + j] & 0x7FFF)) { Elf_(Verdaux) vda; ut8 svda[sizeof (Elf_(Verdaux))] = {0}; ut64 off_vda = offset - vd.vd_next + vd.vd_aux; if (off_vda > bin->size || off_vda + sizeof (vda) > bin->size) { goto beach; } if (r_buf_read_at (bin->b, off_vda, svda, sizeof (svda)) < 0) { bprintf ("Warning: Cannot read Verdaux for Versym\n"); goto beach; } k = 0; vda.vda_name = READ32 (svda, k) vda.vda_next = READ32 (svda, k) if (vda.vda_name > bin->strtab_size) { goto beach; } const char *name = bin->strtab + vda.vda_name; sdb_set (sdb_entry, key, sdb_fmt (0,"%s(%s%-*s)", tmp_val, name, (int)(12 - strlen (name)),")") , 0); } } } } } beach: free (data); return sdb; } static Sdb *store_versioninfo_gnu_verdef(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { const char *section_name = ""; const char *link_section_name = ""; char *end = NULL; Elf_(Shdr) *link_shdr = NULL; ut8 dfs[sizeof (Elf_(Verdef))] = {0}; Sdb *sdb; int cnt, i; if (shdr->sh_link > bin->ehdr.e_shnum) { return false; } link_shdr = &bin->shdr[shdr->sh_link]; if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return false; } Elf_(Verdef) *defs = calloc (shdr->sh_size, sizeof (char)); if (!defs) { return false; } if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (link_shdr && bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!defs) { bprintf ("Warning: Cannot allocate memory (Check Elf_(Verdef))\n"); return NULL; } sdb = sdb_new0 (); end = (char *)defs + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); for (cnt = 0, i = 0; i >= 0 && cnt < shdr->sh_info && ((char *)defs + i < end); ++cnt) { Sdb *sdb_verdef = sdb_new0 (); char *vstart = ((char*)defs) + i; char key[32] = {0}; Elf_(Verdef) *verdef = (Elf_(Verdef)*)vstart; Elf_(Verdaux) aux = {0}; int j = 0; int isum = 0; r_buf_read_at (bin->b, shdr->sh_offset + i, dfs, sizeof (Elf_(Verdef))); verdef->vd_version = READ16 (dfs, j) verdef->vd_flags = READ16 (dfs, j) verdef->vd_ndx = READ16 (dfs, j) verdef->vd_cnt = READ16 (dfs, j) verdef->vd_hash = READ32 (dfs, j) verdef->vd_aux = READ32 (dfs, j) verdef->vd_next = READ32 (dfs, j) int vdaux = verdef->vd_aux; if (vdaux < 1) { sdb_free (sdb_verdef); goto out_error; } vstart += vdaux; if (vstart > end || vstart + sizeof (Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); goto out_error; } j = 0; aux.vda_name = READ32 (vstart, j) aux.vda_next = READ32 (vstart, j) isum = i + verdef->vd_aux; if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); goto out_error; } sdb_num_set (sdb_verdef, "idx", i, 0); sdb_num_set (sdb_verdef, "vd_version", verdef->vd_version, 0); sdb_num_set (sdb_verdef, "vd_ndx", verdef->vd_ndx, 0); sdb_num_set (sdb_verdef, "vd_cnt", verdef->vd_cnt, 0); sdb_set (sdb_verdef, "vda_name", &bin->dynstr[aux.vda_name], 0); sdb_set (sdb_verdef, "flags", get_ver_flags (verdef->vd_flags), 0); for (j = 1; j < verdef->vd_cnt; ++j) { int k; Sdb *sdb_parent = sdb_new0 (); isum += aux.vda_next; vstart += aux.vda_next; if (vstart > end || vstart + sizeof(Elf_(Verdaux)) > end) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } k = 0; aux.vda_name = READ32 (vstart, k) aux.vda_next = READ32 (vstart, k) if (aux.vda_name > bin->dynstr_size) { sdb_free (sdb_verdef); sdb_free (sdb_parent); goto out_error; } sdb_num_set (sdb_parent, "idx", isum, 0); sdb_num_set (sdb_parent, "parent", j, 0); sdb_set (sdb_parent, "vda_name", &bin->dynstr[aux.vda_name], 0); snprintf (key, sizeof (key), "parent%d", j - 1); sdb_ns_set (sdb_verdef, key, sdb_parent); } snprintf (key, sizeof (key), "verdef%d", cnt); sdb_ns_set (sdb, key, sdb_verdef); if (!verdef->vd_next) { sdb_free (sdb_verdef); goto out_error; } if ((st32)verdef->vd_next < 1) { eprintf ("Warning: Invalid vd_next in the ELF version\n"); break; } i += verdef->vd_next; } free (defs); return sdb; out_error: free (defs); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; } static Sdb *store_versioninfo(ELFOBJ *bin) { Sdb *sdb_versioninfo = NULL; int num_verdef = 0; int num_verneed = 0; int num_versym = 0; int i; if (!bin || !bin->shdr) { return NULL; } if (!(sdb_versioninfo = sdb_new0 ())) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { Sdb *sdb = NULL; char key[32] = {0}; int size = bin->shdr[i].sh_size; if (size - (i*sizeof(Elf_(Shdr)) > bin->size)) { size = bin->size - (i*sizeof(Elf_(Shdr))); } int left = size - (i * sizeof (Elf_(Shdr))); left = R_MIN (left, bin->shdr[i].sh_size); if (left < 0) { break; } switch (bin->shdr[i].sh_type) { case SHT_GNU_verdef: sdb = store_versioninfo_gnu_verdef (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verdef%d", num_verdef++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_verneed: sdb = store_versioninfo_gnu_verneed (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "verneed%d", num_verneed++); sdb_ns_set (sdb_versioninfo, key, sdb); break; case SHT_GNU_versym: sdb = store_versioninfo_gnu_versym (bin, &bin->shdr[i], left); snprintf (key, sizeof (key), "versym%d", num_versym++); sdb_ns_set (sdb_versioninfo, key, sdb); break; } } return sdb_versioninfo; } static bool init_dynstr(ELFOBJ *bin) { int i, r; const char *section_name = NULL; if (!bin || !bin->shdr) { return false; } if (!bin->shstrtab) { return false; } for (i = 0; i < bin->ehdr.e_shnum; ++i) { if (bin->shdr[i].sh_name > bin->shstrtab_size) { return false; } section_name = &bin->shstrtab[bin->shdr[i].sh_name]; if (bin->shdr[i].sh_type == SHT_STRTAB && !strcmp (section_name, ".dynstr")) { if (!(bin->dynstr = (char*) calloc (bin->shdr[i].sh_size + 1, sizeof (char)))) { bprintf("Warning: Cannot allocate memory for dynamic strings\n"); return false; } if (bin->shdr[i].sh_offset > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size > bin->size) { return false; } if (bin->shdr[i].sh_offset + bin->shdr[i].sh_size < bin->shdr[i].sh_size) { return false; } r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset, (ut8*)bin->dynstr, bin->shdr[i].sh_size); if (r < 1) { R_FREE (bin->dynstr); bin->dynstr_size = 0; return false; } bin->dynstr_size = bin->shdr[i].sh_size; return true; } } return false; } static int elf_init(ELFOBJ *bin) { bin->phdr = NULL; bin->shdr = NULL; bin->strtab = NULL; bin->shstrtab = NULL; bin->strtab_size = 0; bin->strtab_section = NULL; bin->dyn_buf = NULL; bin->dynstr = NULL; ZERO_FILL (bin->version_info); bin->g_sections = NULL; bin->g_symbols = NULL; bin->g_imports = NULL; /* bin is not an ELF */ if (!init_ehdr (bin)) { return false; } if (!init_phdr (bin)) { bprintf ("Warning: Cannot initialize program headers\n"); } if (!init_shdr (bin)) { bprintf ("Warning: Cannot initialize section headers\n"); } if (!init_strtab (bin)) { bprintf ("Warning: Cannot initialize strings table\n"); } if (!init_dynstr (bin)) { bprintf ("Warning: Cannot initialize dynamic strings\n"); } bin->baddr = Elf_(r_bin_elf_get_baddr) (bin); if (!init_dynamic_section (bin) && !Elf_(r_bin_elf_get_static)(bin)) bprintf ("Warning: Cannot initialize dynamic section\n"); bin->imports_by_ord_size = 0; bin->imports_by_ord = NULL; bin->symbols_by_ord_size = 0; bin->symbols_by_ord = NULL; bin->g_sections = Elf_(r_bin_elf_get_sections) (bin); bin->boffset = Elf_(r_bin_elf_get_boffset) (bin); sdb_ns_set (bin->kv, "versioninfo", store_versioninfo (bin)); return true; } ut64 Elf_(r_bin_elf_get_section_offset)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); if (!section) return UT64_MAX; return section->offset; } ut64 Elf_(r_bin_elf_get_section_addr)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva: UT64_MAX; } ut64 Elf_(r_bin_elf_get_section_addr_end)(ELFOBJ *bin, const char *section_name) { RBinElfSection *section = get_section_by_name (bin, section_name); return section? section->rva + section->size: UT64_MAX; } #define REL (is_rela ? (void*)rela : (void*)rel) #define REL_BUF is_rela ? (ut8*)(&rela[k]) : (ut8*)(&rel[k]) #define REL_OFFSET is_rela ? rela[k].r_offset : rel[k].r_offset #define REL_TYPE is_rela ? rela[k].r_info : rel[k].r_info static ut64 get_import_addr(ELFOBJ *bin, int sym) { Elf_(Rel) *rel = NULL; Elf_(Rela) *rela = NULL; ut8 rl[sizeof (Elf_(Rel))] = {0}; ut8 rla[sizeof (Elf_(Rela))] = {0}; RBinElfSection *rel_sec = NULL; Elf_(Addr) plt_sym_addr = -1; ut64 got_addr, got_offset; ut64 plt_addr; int j, k, tsize, len, nrel; bool is_rela = false; const char *rel_sect[] = { ".rel.plt", ".rela.plt", ".rel.dyn", ".rela.dyn", NULL }; const char *rela_sect[] = { ".rela.plt", ".rel.plt", ".rela.dyn", ".rel.dyn", NULL }; if ((!bin->shdr || !bin->strtab) && !bin->phdr) { return -1; } if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) == -1 && (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) == -1) { return -1; } if ((got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got")) == -1 && (got_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".got.plt")) == -1) { return -1; } if (bin->is_rela == DT_REL) { j = 0; while (!rel_sec && rel_sect[j]) { rel_sec = get_section_by_name (bin, rel_sect[j++]); } tsize = sizeof (Elf_(Rel)); } else if (bin->is_rela == DT_RELA) { j = 0; while (!rel_sec && rela_sect[j]) { rel_sec = get_section_by_name (bin, rela_sect[j++]); } is_rela = true; tsize = sizeof (Elf_(Rela)); } if (!rel_sec) { return -1; } if (rel_sec->size < 1) { return -1; } nrel = (ut32)((int)rel_sec->size / (int)tsize); if (nrel < 1) { return -1; } if (is_rela) { rela = calloc (nrel, tsize); if (!rela) { return -1; } } else { rel = calloc (nrel, tsize); if (!rel) { return -1; } } for (j = k = 0; j < rel_sec->size && k < nrel; j += tsize, k++) { int l = 0; if (rel_sec->offset + j > bin->size) { goto out; } if (rel_sec->offset + j + tsize > bin->size) { goto out; } len = r_buf_read_at ( bin->b, rel_sec->offset + j, is_rela ? rla : rl, is_rela ? sizeof (Elf_ (Rela)) : sizeof (Elf_ (Rel))); if (len < 1) { goto out; } #if R_BIN_ELF64 if (is_rela) { rela[k].r_offset = READ64 (rla, l) rela[k].r_info = READ64 (rla, l) rela[k].r_addend = READ64 (rla, l) } else { rel[k].r_offset = READ64 (rl, l) rel[k].r_info = READ64 (rl, l) } #else if (is_rela) { rela[k].r_offset = READ32 (rla, l) rela[k].r_info = READ32 (rla, l) rela[k].r_addend = READ32 (rla, l) } else { rel[k].r_offset = READ32 (rl, l) rel[k].r_info = READ32 (rl, l) } #endif int reloc_type = ELF_R_TYPE (REL_TYPE); int reloc_sym = ELF_R_SYM (REL_TYPE); if (reloc_sym == sym) { int of = REL_OFFSET; of = of - got_addr + got_offset; switch (bin->ehdr.e_machine) { case EM_PPC: case EM_PPC64: { RBinElfSection *s = get_section_by_name (bin, ".plt"); if (s) { ut8 buf[4]; ut64 base; len = r_buf_read_at (bin->b, s->offset, buf, sizeof (buf)); if (len < 4) { goto out; } base = r_read_be32 (buf); base -= (nrel * 16); base += (k * 16); plt_addr = base; free (REL); return plt_addr; } } break; case EM_SPARC: case EM_SPARCV9: case EM_SPARC32PLUS: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return -1; } if (reloc_type == R_386_PC16) { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } else { bprintf ("Unknown sparc reloc type %d\n", reloc_type); } /* SPARC */ break; case EM_ARM: case EM_AARCH64: plt_addr = Elf_(r_bin_elf_get_section_addr) (bin, ".plt"); if (plt_addr == -1) { free (rela); return UT32_MAX; } switch (reloc_type) { case R_386_8: { plt_addr += k * 12 + 20; // thumb symbol if (plt_addr & 1) { plt_addr--; } free (REL); return plt_addr; } break; case 1026: // arm64 aarch64 plt_sym_addr = plt_addr + k * 16 + 32; goto done; default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); break; } break; case EM_386: case EM_X86_64: switch (reloc_type) { case 1: // unknown relocs found in voidlinux for x86-64 // break; case R_386_GLOB_DAT: case R_386_JMP_SLOT: { ut8 buf[8]; if (of + sizeof(Elf_(Addr)) < bin->size) { // ONLY FOR X86 if (of > bin->size || of + sizeof (Elf_(Addr)) > bin->size) { goto out; } len = r_buf_read_at (bin->b, of, buf, sizeof (Elf_(Addr))); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); if (!plt_sym_addr) { //XXX HACK ALERT!!!! full relro?? try to fix it //will there always be .plt.got, what would happen if is .got.plt? RBinElfSection *s = get_section_by_name (bin, ".plt.got"); if (Elf_(r_bin_elf_has_relro)(bin) < R_ELF_PART_RELRO || !s) { goto done; } plt_addr = s->offset; of = of + got_addr - got_offset; while (plt_addr + 2 + 4 < s->offset + s->size) { /*we try to locate the plt entry that correspond with the relocation since got does not point back to .plt. In this case it has the following form ff253a152000 JMP QWORD [RIP + 0x20153A] 6690 NOP ---- ff25ec9f0408 JMP DWORD [reloc.puts_236] plt_addr + 2 to remove jmp opcode and get the imm reading 4 and if RIP (plt_addr + 6) + imm == rel->offset return plt_addr, that will be our sym addr perhaps this hack doesn't work on 32 bits */ len = r_buf_read_at (bin->b, plt_addr + 2, buf, 4); if (len < -1) { goto out; } plt_sym_addr = sizeof (Elf_(Addr)) == 4 ? r_read_le32 (buf) : r_read_le64 (buf); //relative address if ((plt_addr + 6 + Elf_(r_bin_elf_v2p) (bin, plt_sym_addr)) == of) { plt_sym_addr = plt_addr; goto done; } else if (plt_sym_addr == of) { plt_sym_addr = plt_addr; goto done; } plt_addr += 8; } } else { plt_sym_addr -= 6; } goto done; } break; } default: bprintf ("Unsupported relocation type for imports %d\n", reloc_type); free (REL); return of; break; } break; case 8: // MIPS32 BIG ENDIAN relocs { RBinElfSection *s = get_section_by_name (bin, ".rela.plt"); if (s) { ut8 buf[1024]; const ut8 *base; plt_addr = s->rva + s->size; len = r_buf_read_at (bin->b, s->offset + s->size, buf, sizeof (buf)); if (len != sizeof (buf)) { // oops } base = r_mem_mem_aligned (buf, sizeof (buf), (const ut8*)"\x3c\x0f\x00", 3, 4); if (base) { plt_addr += (int)(size_t)(base - buf); } else { plt_addr += 108 + 8; // HARDCODED HACK } plt_addr += k * 16; free (REL); return plt_addr; } } break; default: bprintf ("Unsupported relocs type %d for arch %d\n", reloc_type, bin->ehdr.e_machine); break; } } } done: free (REL); return plt_sym_addr; out: free (REL); return -1; } int Elf_(r_bin_elf_has_nx)(ELFOBJ *bin) { int i; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_STACK) { return (!(bin->phdr[i].p_flags & 1))? 1: 0; } } } return 0; } int Elf_(r_bin_elf_has_relro)(ELFOBJ *bin) { int i; bool haveBindNow = false; bool haveGnuRelro = false; if (bin && bin->dyn_buf) { for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_BIND_NOW: haveBindNow = true; break; case DT_FLAGS: for (i++; i < bin->dyn_entries ; i++) { ut32 dTag = bin->dyn_buf[i].d_tag; if (!dTag) { break; } switch (dTag) { case DT_FLAGS_1: if (bin->dyn_buf[i].d_un.d_val & DF_1_NOW) { haveBindNow = true; break; } } } break; } } } if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_GNU_RELRO) { haveGnuRelro = true; break; } } } if (haveGnuRelro) { if (haveBindNow) { return R_ELF_FULL_RELRO; } return R_ELF_PART_RELRO; } return R_ELF_NO_RELRO; } /* To compute the base address, one determines the memory address associated with the lowest p_vaddr value for a PT_LOAD segment. One then obtains the base address by truncating the memory address to the nearest multiple of the maximum page size */ ut64 Elf_(r_bin_elf_get_baddr)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (!bin) { return 0; } if (bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_vaddr & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } if (base == UT64_MAX && bin->ehdr.e_type == ET_REL) { //we return our own base address for ET_REL type //we act as a loader for ELF return 0x08000000; } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_boffset)(ELFOBJ *bin) { int i; ut64 tmp, base = UT64_MAX; if (bin && bin->phdr) { for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_LOAD) { tmp = (ut64)bin->phdr[i].p_offset & ELF_PAGE_MASK; tmp = tmp - (tmp % (1 << ELF_PAGE_SIZE)); if (tmp < base) { base = tmp; } } } } return base == UT64_MAX ? 0 : base; } ut64 Elf_(r_bin_elf_get_init_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry + 16, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (init_offset)\n"); return 0; } if (buf[0] == 0x68) { // push // x86 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_fini_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (r_buf_read_at (bin->b, entry+11, buf, sizeof (buf)) == -1) { bprintf ("Warning: read (get_fini)\n"); return 0; } if (*buf == 0x68) { // push // x86/32 only ut64 addr; memmove (buf, buf+1, 4); addr = (ut64)r_read_le32 (buf); return Elf_(r_bin_elf_v2p) (bin, addr); } return 0; } ut64 Elf_(r_bin_elf_get_entry_offset)(ELFOBJ *bin) { ut64 entry; if (!bin) { return 0LL; } entry = bin->ehdr.e_entry; if (!entry) { entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init.text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); if (entry != UT64_MAX) { return entry; } entry = Elf_(r_bin_elf_get_section_offset)(bin, ".init"); if (entry != UT64_MAX) { return entry; } if (entry == UT64_MAX) { return 0; } } return Elf_(r_bin_elf_v2p) (bin, entry); } static ut64 getmainsymbol(ELFOBJ *bin) { struct r_bin_elf_symbol_t *symbol; int i; if (!(symbol = Elf_(r_bin_elf_get_symbols) (bin))) { return UT64_MAX; } for (i = 0; !symbol[i].last; i++) { if (!strcmp (symbol[i].name, "main")) { ut64 paddr = symbol[i].offset; return Elf_(r_bin_elf_p2v) (bin, paddr); } } return UT64_MAX; } ut64 Elf_(r_bin_elf_get_main_offset)(ELFOBJ *bin) { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); ut8 buf[512]; if (!bin) { return 0LL; } if (entry > bin->size || (entry + sizeof (buf)) > bin->size) { return 0; } if (r_buf_read_at (bin->b, entry, buf, sizeof (buf)) < 1) { bprintf ("Warning: read (main)\n"); return 0; } // ARM64 if (buf[0x18+3] == 0x58 && buf[0x2f] == 0x00) { ut32 entry_vaddr = Elf_(r_bin_elf_p2v) (bin, entry); ut32 main_addr = r_read_le32 (&buf[0x30]); if ((main_addr >> 16) == (entry_vaddr >> 16)) { return Elf_(r_bin_elf_v2p) (bin, main_addr); } } // TODO: Use arch to identify arch before memcmp's // ARM ut64 text = Elf_(r_bin_elf_get_section_offset)(bin, ".text"); ut64 text_end = text + bin->size; // ARM-Thumb-Linux if (entry & 1 && !memcmp (buf, "\xf0\x00\x0b\x4f\xf0\x00", 6)) { ut32 * ptr = (ut32*)(buf+40-1); if (*ptr &1) { return Elf_(r_bin_elf_v2p) (bin, *ptr -1); } } if (!memcmp (buf, "\x00\xb0\xa0\xe3\x00\xe0\xa0\xe3", 8)) { // endian stuff here ut32 *addr = (ut32*)(buf+0x34); /* 0x00012000 00b0a0e3 mov fp, 0 0x00012004 00e0a0e3 mov lr, 0 */ if (*addr > text && *addr < (text_end)) { return Elf_(r_bin_elf_v2p) (bin, *addr); } } // MIPS /* get .got, calculate offset of main symbol */ if (!memcmp (buf, "\x21\x00\xe0\x03\x01\x00\x11\x04", 8)) { /* assuming the startup code looks like got = gp-0x7ff0 got[index__libc_start_main] ( got[index_main] ); looking for the instruction generating the first argument to find main lw a0, offset(gp) */ ut64 got_offset; if ((got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got")) != -1 || (got_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".got.plt")) != -1) { const ut64 gp = got_offset + 0x7ff0; unsigned i; for (i = 0; i < sizeof(buf) / sizeof(buf[0]); i += 4) { const ut32 instr = r_read_le32 (&buf[i]); if ((instr & 0xffff0000) == 0x8f840000) { // lw a0, offset(gp) const short delta = instr & 0x0000ffff; r_buf_read_at (bin->b, /* got_entry_offset = */ gp + delta, buf, 4); return Elf_(r_bin_elf_v2p) (bin, r_read_le32 (&buf[0])); } } } return 0; } // ARM if (!memcmp (buf, "\x24\xc0\x9f\xe5\x00\xb0\xa0\xe3", 8)) { ut64 addr = r_read_le32 (&buf[48]); return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-CGC if (buf[0] == 0xe8 && !memcmp (buf + 5, "\x50\xe8\x00\x00\x00\x00\xb8\x01\x00\x00\x00\x53", 12)) { size_t SIZEOF_CALL = 5; ut64 rel_addr = (ut64)((int)(buf[1] + (buf[2] << 8) + (buf[3] << 16) + (buf[4] << 24))); ut64 addr = Elf_(r_bin_elf_p2v)(bin, entry + SIZEOF_CALL); addr += rel_addr; return Elf_(r_bin_elf_v2p) (bin, addr); } // X86-PIE if (buf[0x00] == 0x48 && buf[0x1e] == 0x8d && buf[0x11] == 0xe8) { ut32 *pmain = (ut32*)(buf + 0x30); ut64 vmain = Elf_(r_bin_elf_p2v) (bin, (ut64)*pmain); ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain >> 16 == ventry >> 16) { return (ut64)vmain; } } // X86-PIE if (buf[0x1d] == 0x48 && buf[0x1e] == 0x8b) { if (!memcmp (buf, "\x31\xed\x49\x89", 4)) {// linux ut64 maddr, baddr; ut8 n32s[sizeof (ut32)] = {0}; maddr = entry + 0x24 + r_read_le32 (buf + 0x20); if (r_buf_read_at (bin->b, maddr, n32s, sizeof (ut32)) == -1) { bprintf ("Warning: read (maddr) 2\n"); return 0; } maddr = (ut64)r_read_le32 (&n32s[0]); baddr = (bin->ehdr.e_entry >> 16) << 16; if (bin->phdr) { baddr = Elf_(r_bin_elf_get_baddr) (bin); } maddr += baddr; return maddr; } } // X86-NONPIE #if R_BIN_ELF64 if (!memcmp (buf, "\x49\x89\xd9", 3) && buf[156] == 0xe8) { // openbsd return r_read_le32 (&buf[157]) + entry + 156 + 5; } if (!memcmp (buf+29, "\x48\xc7\xc7", 3)) { // linux ut64 addr = (ut64)r_read_le32 (&buf[29 + 3]); return Elf_(r_bin_elf_v2p) (bin, addr); } #else if (buf[23] == '\x68') { ut64 addr = (ut64)r_read_le32 (&buf[23 + 1]); return Elf_(r_bin_elf_v2p) (bin, addr); } #endif /* linux64 pie main -- probably buggy in some cases */ if (buf[29] == 0x48 && buf[30] == 0x8d) { // lea rdi, qword [rip-0x21c4] ut8 *p = buf + 32; st32 maindelta = (st32)r_read_le32 (p); ut64 vmain = (ut64)(entry + 29 + maindelta) + 7; ut64 ventry = Elf_(r_bin_elf_p2v) (bin, entry); if (vmain>>16 == ventry>>16) { return (ut64)vmain; } } /* find sym.main if possible */ { ut64 m = getmainsymbol (bin); if (m != UT64_MAX) return m; } return UT64_MAX; } int Elf_(r_bin_elf_get_stripped)(ELFOBJ *bin) { int i; if (!bin->shdr) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if (bin->shdr[i].sh_type == SHT_SYMTAB) { return false; } } return true; } char *Elf_(r_bin_elf_intrp)(ELFOBJ *bin) { int i; if (!bin || !bin->phdr) { return NULL; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { char *str = NULL; ut64 addr = bin->phdr[i].p_offset; int sz = bin->phdr[i].p_memsz; sdb_num_set (bin->kv, "elf_header.intrp_addr", addr, 0); sdb_num_set (bin->kv, "elf_header.intrp_size", sz, 0); if (sz < 1) { return NULL; } str = malloc (sz + 1); if (!str) { return NULL; } if (r_buf_read_at (bin->b, addr, (ut8*)str, sz) < 1) { bprintf ("Warning: read (main)\n"); return 0; } str[sz] = 0; sdb_set (bin->kv, "elf_header.intrp", str, 0); return str; } } return NULL; } int Elf_(r_bin_elf_get_static)(ELFOBJ *bin) { int i; if (!bin->phdr) { return false; } for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { return false; } } return true; } char* Elf_(r_bin_elf_get_data_encoding)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_DATA]) { case ELFDATANONE: return strdup ("none"); case ELFDATA2LSB: return strdup ("2's complement, little endian"); case ELFDATA2MSB: return strdup ("2's complement, big endian"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_DATA]); } } int Elf_(r_bin_elf_has_va)(ELFOBJ *bin) { return true; } char* Elf_(r_bin_elf_get_arch)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_ARC: case EM_ARC_A5: return strdup ("arc"); case EM_AVR: return strdup ("avr"); case EM_CRIS: return strdup ("cris"); case EM_68K: return strdup ("m68k"); case EM_MIPS: case EM_MIPS_RS3_LE: case EM_MIPS_X: return strdup ("mips"); case EM_MCST_ELBRUS: return strdup ("elbrus"); case EM_TRICORE: return strdup ("tricore"); case EM_ARM: case EM_AARCH64: return strdup ("arm"); case EM_HEXAGON: return strdup ("hexagon"); case EM_BLACKFIN: return strdup ("blackfin"); case EM_SPARC: case EM_SPARC32PLUS: case EM_SPARCV9: return strdup ("sparc"); case EM_PPC: case EM_PPC64: return strdup ("ppc"); case EM_PARISC: return strdup ("hppa"); case EM_PROPELLER: return strdup ("propeller"); case EM_MICROBLAZE: return strdup ("microblaze.gnu"); case EM_RISCV: return strdup ("riscv"); case EM_VAX: return strdup ("vax"); case EM_XTENSA: return strdup ("xtensa"); case EM_LANAI: return strdup ("lanai"); case EM_VIDEOCORE3: case EM_VIDEOCORE4: return strdup ("vc4"); case EM_SH: return strdup ("sh"); case EM_V850: return strdup ("v850"); case EM_IA_64: return strdup("ia64"); default: return strdup ("x86"); } } char* Elf_(r_bin_elf_get_machine_name)(ELFOBJ *bin) { switch (bin->ehdr.e_machine) { case EM_NONE: return strdup ("No machine"); case EM_M32: return strdup ("AT&T WE 32100"); case EM_SPARC: return strdup ("SUN SPARC"); case EM_386: return strdup ("Intel 80386"); case EM_68K: return strdup ("Motorola m68k family"); case EM_88K: return strdup ("Motorola m88k family"); case EM_860: return strdup ("Intel 80860"); case EM_MIPS: return strdup ("MIPS R3000"); case EM_S370: return strdup ("IBM System/370"); case EM_MIPS_RS3_LE: return strdup ("MIPS R3000 little-endian"); case EM_PARISC: return strdup ("HPPA"); case EM_VPP500: return strdup ("Fujitsu VPP500"); case EM_SPARC32PLUS: return strdup ("Sun's \"v8plus\""); case EM_960: return strdup ("Intel 80960"); case EM_PPC: return strdup ("PowerPC"); case EM_PPC64: return strdup ("PowerPC 64-bit"); case EM_S390: return strdup ("IBM S390"); case EM_V800: return strdup ("NEC V800 series"); case EM_FR20: return strdup ("Fujitsu FR20"); case EM_RH32: return strdup ("TRW RH-32"); case EM_RCE: return strdup ("Motorola RCE"); case EM_ARM: return strdup ("ARM"); case EM_BLACKFIN: return strdup ("Analog Devices Blackfin"); case EM_FAKE_ALPHA: return strdup ("Digital Alpha"); case EM_SH: return strdup ("Hitachi SH"); case EM_SPARCV9: return strdup ("SPARC v9 64-bit"); case EM_TRICORE: return strdup ("Siemens Tricore"); case EM_ARC: return strdup ("Argonaut RISC Core"); case EM_H8_300: return strdup ("Hitachi H8/300"); case EM_H8_300H: return strdup ("Hitachi H8/300H"); case EM_H8S: return strdup ("Hitachi H8S"); case EM_H8_500: return strdup ("Hitachi H8/500"); case EM_IA_64: return strdup ("Intel Merced"); case EM_MIPS_X: return strdup ("Stanford MIPS-X"); case EM_COLDFIRE: return strdup ("Motorola Coldfire"); case EM_68HC12: return strdup ("Motorola M68HC12"); case EM_MMA: return strdup ("Fujitsu MMA Multimedia Accelerator"); case EM_PCP: return strdup ("Siemens PCP"); case EM_NCPU: return strdup ("Sony nCPU embeeded RISC"); case EM_NDR1: return strdup ("Denso NDR1 microprocessor"); case EM_STARCORE: return strdup ("Motorola Start*Core processor"); case EM_ME16: return strdup ("Toyota ME16 processor"); case EM_ST100: return strdup ("STMicroelectronic ST100 processor"); case EM_TINYJ: return strdup ("Advanced Logic Corp. Tinyj emb.fam"); case EM_X86_64: return strdup ("AMD x86-64 architecture"); case EM_LANAI: return strdup ("32bit LANAI architecture"); case EM_PDSP: return strdup ("Sony DSP Processor"); case EM_FX66: return strdup ("Siemens FX66 microcontroller"); case EM_ST9PLUS: return strdup ("STMicroelectronics ST9+ 8/16 mc"); case EM_ST7: return strdup ("STmicroelectronics ST7 8 bit mc"); case EM_68HC16: return strdup ("Motorola MC68HC16 microcontroller"); case EM_68HC11: return strdup ("Motorola MC68HC11 microcontroller"); case EM_68HC08: return strdup ("Motorola MC68HC08 microcontroller"); case EM_68HC05: return strdup ("Motorola MC68HC05 microcontroller"); case EM_SVX: return strdup ("Silicon Graphics SVx"); case EM_ST19: return strdup ("STMicroelectronics ST19 8 bit mc"); case EM_VAX: return strdup ("Digital VAX"); case EM_CRIS: return strdup ("Axis Communications 32-bit embedded processor"); case EM_JAVELIN: return strdup ("Infineon Technologies 32-bit embedded processor"); case EM_FIREPATH: return strdup ("Element 14 64-bit DSP Processor"); case EM_ZSP: return strdup ("LSI Logic 16-bit DSP Processor"); case EM_MMIX: return strdup ("Donald Knuth's educational 64-bit processor"); case EM_HUANY: return strdup ("Harvard University machine-independent object files"); case EM_PRISM: return strdup ("SiTera Prism"); case EM_AVR: return strdup ("Atmel AVR 8-bit microcontroller"); case EM_FR30: return strdup ("Fujitsu FR30"); case EM_D10V: return strdup ("Mitsubishi D10V"); case EM_D30V: return strdup ("Mitsubishi D30V"); case EM_V850: return strdup ("NEC v850"); case EM_M32R: return strdup ("Mitsubishi M32R"); case EM_MN10300: return strdup ("Matsushita MN10300"); case EM_MN10200: return strdup ("Matsushita MN10200"); case EM_PJ: return strdup ("picoJava"); case EM_OPENRISC: return strdup ("OpenRISC 32-bit embedded processor"); case EM_ARC_A5: return strdup ("ARC Cores Tangent-A5"); case EM_XTENSA: return strdup ("Tensilica Xtensa Architecture"); case EM_AARCH64: return strdup ("ARM aarch64"); case EM_PROPELLER: return strdup ("Parallax Propeller"); case EM_MICROBLAZE: return strdup ("Xilinx MicroBlaze"); case EM_RISCV: return strdup ("RISC V"); case EM_VIDEOCORE3: return strdup ("VideoCore III"); case EM_VIDEOCORE4: return strdup ("VideoCore IV"); default: return r_str_newf ("<unknown>: 0x%x", bin->ehdr.e_machine); } } char* Elf_(r_bin_elf_get_file_type)(ELFOBJ *bin) { ut32 e_type; if (!bin) { return NULL; } e_type = (ut32)bin->ehdr.e_type; // cast to avoid warn in iphone-gcc, must be ut16 switch (e_type) { case ET_NONE: return strdup ("NONE (None)"); case ET_REL: return strdup ("REL (Relocatable file)"); case ET_EXEC: return strdup ("EXEC (Executable file)"); case ET_DYN: return strdup ("DYN (Shared object file)"); case ET_CORE: return strdup ("CORE (Core file)"); } if ((e_type >= ET_LOPROC) && (e_type <= ET_HIPROC)) { return r_str_newf ("Processor Specific: %x", e_type); } if ((e_type >= ET_LOOS) && (e_type <= ET_HIOS)) { return r_str_newf ("OS Specific: %x", e_type); } return r_str_newf ("<unknown>: %x", e_type); } char* Elf_(r_bin_elf_get_elf_class)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASSNONE: return strdup ("none"); case ELFCLASS32: return strdup ("ELF32"); case ELFCLASS64: return strdup ("ELF64"); default: return r_str_newf ("<unknown: %x>", bin->ehdr.e_ident[EI_CLASS]); } } int Elf_(r_bin_elf_get_bits)(ELFOBJ *bin) { /* Hack for ARCompact */ if (bin->ehdr.e_machine == EM_ARC_A5) { return 16; } /* Hack for Ps2 */ if (bin->phdr && bin->ehdr.e_machine == EM_MIPS) { const ut32 mipsType = bin->ehdr.e_flags & EF_MIPS_ARCH; if (bin->ehdr.e_type == ET_EXEC) { int i; bool haveInterp = false; for (i = 0; i < bin->ehdr.e_phnum; i++) { if (bin->phdr[i].p_type == PT_INTERP) { haveInterp = true; } } if (!haveInterp && mipsType == EF_MIPS_ARCH_3) { // Playstation2 Hack return 64; } } // TODO: show this specific asm.cpu somewhere in bininfo (mips1, mips2, mips3, mips32r2, ...) switch (mipsType) { case EF_MIPS_ARCH_1: case EF_MIPS_ARCH_2: case EF_MIPS_ARCH_3: case EF_MIPS_ARCH_4: case EF_MIPS_ARCH_5: case EF_MIPS_ARCH_32: return 32; case EF_MIPS_ARCH_64: return 64; case EF_MIPS_ARCH_32R2: return 32; case EF_MIPS_ARCH_64R2: return 64; break; } return 32; } /* Hack for Thumb */ if (bin->ehdr.e_machine == EM_ARM) { if (bin->ehdr.e_type != ET_EXEC) { struct r_bin_elf_symbol_t *symbol; if ((symbol = Elf_(r_bin_elf_get_symbols) (bin))) { int i = 0; for (i = 0; !symbol[i].last; i++) { ut64 paddr = symbol[i].offset; if (paddr & 1) { return 16; } } } } { ut64 entry = Elf_(r_bin_elf_get_entry_offset) (bin); if (entry & 1) { return 16; } } } switch (bin->ehdr.e_ident[EI_CLASS]) { case ELFCLASS32: return 32; case ELFCLASS64: return 64; case ELFCLASSNONE: default: return 32; // defaults } } static inline int noodle(ELFOBJ *bin, const char *s) { const ut8 *p = bin->b->buf; if (bin->b->length > 64) { p += bin->b->length - 64; } else { return 0; } return r_mem_mem (p, 64, (const ut8 *)s, strlen (s)) != NULL; } static inline int needle(ELFOBJ *bin, const char *s) { if (bin->shstrtab) { ut32 len = bin->shstrtab_size; if (len > 4096) { len = 4096; // avoid slow loading .. can be buggy? } return r_mem_mem ((const ut8*)bin->shstrtab, len, (const ut8*)s, strlen (s)) != NULL; } return 0; } // TODO: must return const char * all those strings must be const char os[LINUX] or so char* Elf_(r_bin_elf_get_osabi_name)(ELFOBJ *bin) { switch (bin->ehdr.e_ident[EI_OSABI]) { case ELFOSABI_LINUX: return strdup("linux"); case ELFOSABI_SOLARIS: return strdup("solaris"); case ELFOSABI_FREEBSD: return strdup("freebsd"); case ELFOSABI_HPUX: return strdup("hpux"); } /* Hack to identify OS */ if (needle (bin, "openbsd")) return strdup ("openbsd"); if (needle (bin, "netbsd")) return strdup ("netbsd"); if (needle (bin, "freebsd")) return strdup ("freebsd"); if (noodle (bin, "BEOS:APP_VERSION")) return strdup ("beos"); if (needle (bin, "GNU")) return strdup ("linux"); return strdup ("linux"); } ut8 *Elf_(r_bin_elf_grab_regstate)(ELFOBJ *bin, int *len) { if (bin->phdr) { int i; int num = bin->ehdr.e_phnum; for (i = 0; i < num; i++) { if (bin->phdr[i].p_type != PT_NOTE) { continue; } int bits = Elf_(r_bin_elf_get_bits)(bin); int regdelta = (bits == 64)? 0x84: 0x40; // x64 vs x32 int regsize = 160; // for x86-64 ut8 *buf = malloc (regsize); if (r_buf_read_at (bin->b, bin->phdr[i].p_offset + regdelta, buf, regsize) != regsize) { free (buf); bprintf ("Cannot read register state from CORE file\n"); return NULL; } if (len) { *len = regsize; } return buf; } } bprintf ("Cannot find NOTE section\n"); return NULL; } int Elf_(r_bin_elf_is_big_endian)(ELFOBJ *bin) { return (bin->ehdr.e_ident[EI_DATA] == ELFDATA2MSB); } /* XXX Init dt_strtab? */ char *Elf_(r_bin_elf_get_rpath)(ELFOBJ *bin) { char *ret = NULL; int j; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab) { return NULL; } for (j = 0; j< bin->dyn_entries; j++) { if (bin->dyn_buf[j].d_tag == DT_RPATH || bin->dyn_buf[j].d_tag == DT_RUNPATH) { if (!(ret = calloc (1, ELF_STRING_LENGTH))) { perror ("malloc (rpath)"); return NULL; } if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[ELF_STRING_LENGTH - 1] = '\0'; break; } } return ret; } static size_t get_relocs_num(ELFOBJ *bin) { size_t i, size, ret = 0; /* we need to be careful here, in malformed files the section size might * not be a multiple of a Rel/Rela size; round up so we allocate enough * space. */ #define NUMENTRIES_ROUNDUP(sectionsize, entrysize) (((sectionsize)+(entrysize)-1)/(entrysize)) if (!bin->g_sections) { return 0; } size = bin->is_rela == DT_REL ? sizeof (Elf_(Rel)) : sizeof (Elf_(Rela)); for (i = 0; !bin->g_sections[i].last; i++) { if (!strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela."))) { if (!bin->is_rela) { size = sizeof (Elf_(Rela)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } else if (!strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel."))){ if (!bin->is_rela) { size = sizeof (Elf_(Rel)); } ret += NUMENTRIES_ROUNDUP (bin->g_sections[i].size, size); } } return ret; #undef NUMENTRIES_ROUNDUP } static int read_reloc(ELFOBJ *bin, RBinElfReloc *r, int is_rela, ut64 offset) { ut8 *buf = bin->b->buf; int j = 0; if (offset + sizeof (Elf_ (Rela)) > bin->size || offset + sizeof (Elf_(Rela)) < offset) { return -1; } if (is_rela == DT_RELA) { Elf_(Rela) rela; #if R_BIN_ELF64 rela.r_offset = READ64 (buf + offset, j) rela.r_info = READ64 (buf + offset, j) rela.r_addend = READ64 (buf + offset, j) #else rela.r_offset = READ32 (buf + offset, j) rela.r_info = READ32 (buf + offset, j) rela.r_addend = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rela.r_offset; r->type = ELF_R_TYPE (rela.r_info); r->sym = ELF_R_SYM (rela.r_info); r->last = 0; r->addend = rela.r_addend; return sizeof (Elf_(Rela)); } else { Elf_(Rel) rel; #if R_BIN_ELF64 rel.r_offset = READ64 (buf + offset, j) rel.r_info = READ64 (buf + offset, j) #else rel.r_offset = READ32 (buf + offset, j) rel.r_info = READ32 (buf + offset, j) #endif r->is_rela = is_rela; r->offset = rel.r_offset; r->type = ELF_R_TYPE (rel.r_info); r->sym = ELF_R_SYM (rel.r_info); r->last = 0; return sizeof (Elf_(Rel)); } } RBinElfReloc* Elf_(r_bin_elf_get_relocs)(ELFOBJ *bin) { int res, rel, rela, i, j; size_t reloc_num = 0; RBinElfReloc *ret = NULL; if (!bin || !bin->g_sections) { return NULL; } reloc_num = get_relocs_num (bin); if (!reloc_num) { return NULL; } bin->reloc_num = reloc_num; ret = (RBinElfReloc*)calloc ((size_t)reloc_num + 1, sizeof(RBinElfReloc)); if (!ret) { return NULL; } #if DEAD_CODE ut64 section_text_offset = Elf_(r_bin_elf_get_section_offset) (bin, ".text"); if (section_text_offset == -1) { section_text_offset = 0; } #endif for (i = 0, rel = 0; !bin->g_sections[i].last && rel < reloc_num ; i++) { bool is_rela = 0 == strncmp (bin->g_sections[i].name, ".rela.", strlen (".rela.")); bool is_rel = 0 == strncmp (bin->g_sections[i].name, ".rel.", strlen (".rel.")); if (!is_rela && !is_rel) { continue; } for (j = 0; j < bin->g_sections[i].size; j += res) { if (bin->g_sections[i].size > bin->size) { break; } if (bin->g_sections[i].offset > bin->size) { break; } if (rel >= reloc_num) { bprintf ("Internal error: ELF relocation buffer too small," "please file a bug report."); break; } if (!bin->is_rela) { rela = is_rela? DT_RELA : DT_REL; } else { rela = bin->is_rela; } res = read_reloc (bin, &ret[rel], rela, bin->g_sections[i].offset + j); if (j + res > bin->g_sections[i].size) { bprintf ("Warning: malformed file, relocation entry #%u is partially beyond the end of section %u.\n", rel, i); } if (bin->ehdr.e_type == ET_REL) { if (bin->g_sections[i].info < bin->ehdr.e_shnum && bin->shdr) { ret[rel].rva = bin->shdr[bin->g_sections[i].info].sh_offset + ret[rel].offset; ret[rel].rva = Elf_(r_bin_elf_p2v) (bin, ret[rel].rva); } else { ret[rel].rva = ret[rel].offset; } } else { ret[rel].rva = ret[rel].offset; ret[rel].offset = Elf_(r_bin_elf_v2p) (bin, ret[rel].offset); } ret[rel].last = 0; if (res < 0) { break; } rel++; } } ret[reloc_num].last = 1; return ret; } RBinElfLib* Elf_(r_bin_elf_get_libs)(ELFOBJ *bin) { RBinElfLib *ret = NULL; int j, k; if (!bin || !bin->phdr || !bin->dyn_buf || !bin->strtab || *(bin->strtab+1) == '0') { return NULL; } for (j = 0, k = 0; j < bin->dyn_entries; j++) if (bin->dyn_buf[j].d_tag == DT_NEEDED) { RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; if (bin->dyn_buf[j].d_un.d_val > bin->strtab_size) { free (ret); return NULL; } strncpy (ret[k].name, bin->strtab + bin->dyn_buf[j].d_un.d_val, ELF_STRING_LENGTH); ret[k].name[ELF_STRING_LENGTH - 1] = '\0'; ret[k].last = 0; if (ret[k].name[0]) { k++; } } RBinElfLib *r = realloc (ret, (k + 1) * sizeof (RBinElfLib)); if (!r) { perror ("realloc (libs)"); free (ret); return NULL; } ret = r; ret[k].last = 1; return ret; } static RBinElfSection* get_sections_from_phdr(ELFOBJ *bin) { RBinElfSection *ret; int i, num_sections = 0; ut64 reldyn = 0, relava = 0, pltgotva = 0, relva = 0; ut64 reldynsz = 0, relasz = 0, pltgotsz = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) return NULL; for (i = 0; i < bin->dyn_entries; i++) { switch (bin->dyn_buf[i].d_tag) { case DT_REL: reldyn = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELA: relva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_RELSZ: reldynsz = bin->dyn_buf[i].d_un.d_val; break; case DT_RELASZ: relasz = bin->dyn_buf[i].d_un.d_val; break; case DT_PLTGOT: pltgotva = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; case DT_PLTRELSZ: pltgotsz = bin->dyn_buf[i].d_un.d_val; break; case DT_JMPREL: relava = bin->dyn_buf[i].d_un.d_ptr; num_sections++; break; default: break; } } ret = calloc (num_sections + 1, sizeof(RBinElfSection)); if (!ret) { return NULL; } i = 0; if (reldyn) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, reldyn); ret[i].rva = reldyn; ret[i].size = reldynsz; strcpy (ret[i].name, ".rel.dyn"); ret[i].last = 0; i++; } if (relava) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relava); ret[i].rva = relava; ret[i].size = pltgotsz; strcpy (ret[i].name, ".rela.plt"); ret[i].last = 0; i++; } if (relva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, relva); ret[i].rva = relva; ret[i].size = relasz; strcpy (ret[i].name, ".rel.plt"); ret[i].last = 0; i++; } if (pltgotva) { ret[i].offset = Elf_(r_bin_elf_v2p) (bin, pltgotva); ret[i].rva = pltgotva; ret[i].size = pltgotsz; strcpy (ret[i].name, ".got.plt"); ret[i].last = 0; i++; } ret[i].last = 1; return ret; } RBinElfSection* Elf_(r_bin_elf_get_sections)(ELFOBJ *bin) { RBinElfSection *ret = NULL; char unknown_s[20], invalid_s[20]; int i, nidx, unknown_c=0, invalid_c=0; if (!bin) { return NULL; } if (bin->g_sections) { return bin->g_sections; } if (!bin->shdr) { //we don't give up search in phdr section return get_sections_from_phdr (bin); } if (!(ret = calloc ((bin->ehdr.e_shnum + 1), sizeof (RBinElfSection)))) { return NULL; } for (i = 0; i < bin->ehdr.e_shnum; i++) { ret[i].offset = bin->shdr[i].sh_offset; ret[i].size = bin->shdr[i].sh_size; ret[i].align = bin->shdr[i].sh_addralign; ret[i].flags = bin->shdr[i].sh_flags; ret[i].link = bin->shdr[i].sh_link; ret[i].info = bin->shdr[i].sh_info; ret[i].type = bin->shdr[i].sh_type; if (bin->ehdr.e_type == ET_REL) { ret[i].rva = bin->baddr + bin->shdr[i].sh_offset; } else { ret[i].rva = bin->shdr[i].sh_addr; } nidx = bin->shdr[i].sh_name; #define SHNAME (int)bin->shdr[i].sh_name #define SHNLEN ELF_STRING_LENGTH - 4 #define SHSIZE (int)bin->shstrtab_size if (nidx < 0 || !bin->shstrtab_section || !bin->shstrtab_size || nidx > bin->shstrtab_size) { snprintf (invalid_s, sizeof (invalid_s) - 4, "invalid%d", invalid_c); strncpy (ret[i].name, invalid_s, SHNLEN); invalid_c++; } else { if (bin->shstrtab && (SHNAME > 0) && (SHNAME < SHSIZE)) { strncpy (ret[i].name, &bin->shstrtab[SHNAME], SHNLEN); } else { if (bin->shdr[i].sh_type == SHT_NULL) { //to follow the same behaviour as readelf strncpy (ret[i].name, "", sizeof (ret[i].name) - 4); } else { snprintf (unknown_s, sizeof (unknown_s)-4, "unknown%d", unknown_c); strncpy (ret[i].name, unknown_s, sizeof (ret[i].name)-4); unknown_c++; } } } ret[i].name[ELF_STRING_LENGTH-2] = '\0'; ret[i].last = 0; } ret[i].last = 1; return ret; } static void fill_symbol_bind_and_type (struct r_bin_elf_symbol_t *ret, Elf_(Sym) *sym) { #define s_bind(x) ret->bind = x #define s_type(x) ret->type = x switch (ELF_ST_BIND(sym->st_info)) { case STB_LOCAL: s_bind ("LOCAL"); break; case STB_GLOBAL: s_bind ("GLOBAL"); break; case STB_WEAK: s_bind ("WEAK"); break; case STB_NUM: s_bind ("NUM"); break; case STB_LOOS: s_bind ("LOOS"); break; case STB_HIOS: s_bind ("HIOS"); break; case STB_LOPROC: s_bind ("LOPROC"); break; case STB_HIPROC: s_bind ("HIPROC"); break; default: s_bind ("UNKNOWN"); } switch (ELF_ST_TYPE (sym->st_info)) { case STT_NOTYPE: s_type ("NOTYPE"); break; case STT_OBJECT: s_type ("OBJECT"); break; case STT_FUNC: s_type ("FUNC"); break; case STT_SECTION: s_type ("SECTION"); break; case STT_FILE: s_type ("FILE"); break; case STT_COMMON: s_type ("COMMON"); break; case STT_TLS: s_type ("TLS"); break; case STT_NUM: s_type ("NUM"); break; case STT_LOOS: s_type ("LOOS"); break; case STT_HIOS: s_type ("HIOS"); break; case STT_LOPROC: s_type ("LOPROC"); break; case STT_HIPROC: s_type ("HIPROC"); break; default: s_type ("UNKNOWN"); } } static RBinElfSymbol* get_symbols_from_phdr(ELFOBJ *bin, int type) { Elf_(Sym) *sym = NULL; Elf_(Addr) addr_sym_table = 0; ut8 s[sizeof (Elf_(Sym))] = {0}; RBinElfSymbol *ret = NULL; int i, j, r, tsize, nsym, ret_ctr; ut64 toffset = 0, tmp_offset; ut32 size, sym_size = 0; if (!bin || !bin->phdr || !bin->ehdr.e_phnum) { return NULL; } for (j = 0; j < bin->dyn_entries; j++) { switch (bin->dyn_buf[j].d_tag) { case (DT_SYMTAB): addr_sym_table = Elf_(r_bin_elf_v2p) (bin, bin->dyn_buf[j].d_un.d_ptr); break; case (DT_SYMENT): sym_size = bin->dyn_buf[j].d_un.d_val; break; default: break; } } if (!addr_sym_table) { return NULL; } if (!sym_size) { return NULL; } //since ELF doesn't specify the symbol table size we may read until the end of the buffer nsym = (bin->size - addr_sym_table) / sym_size; if (!UT32_MUL (&size, nsym, sizeof (Elf_ (Sym)))) { goto beach; } if (size < 1) { goto beach; } if (addr_sym_table > bin->size || addr_sym_table + size > bin->size) { goto beach; } if (nsym < 1) { return NULL; } // we reserve room for 4096 and grow as needed. size_t capacity1 = 4096; size_t capacity2 = 4096; sym = (Elf_(Sym)*) calloc (capacity1, sym_size); ret = (RBinElfSymbol *) calloc (capacity2, sizeof (struct r_bin_elf_symbol_t)); if (!sym || !ret) { goto beach; } for (i = 1, ret_ctr = 0; i < nsym; i++) { if (i >= capacity1) { // maybe grow // You take what you want, but you eat what you take. Elf_(Sym)* temp_sym = (Elf_(Sym)*) realloc(sym, (capacity1 * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; capacity1 *= GROWTH_FACTOR; } if (ret_ctr >= capacity2) { // maybe grow RBinElfSymbol *temp_ret = realloc (ret, capacity2 * GROWTH_FACTOR * sizeof (struct r_bin_elf_symbol_t)); if (!temp_ret) { goto beach; } ret = temp_ret; capacity2 *= GROWTH_FACTOR; } // read in one entry r = r_buf_read_at (bin->b, addr_sym_table + i * sizeof (Elf_ (Sym)), s, sizeof (Elf_ (Sym))); if (r < 1) { goto beach; } int j = 0; #if R_BIN_ELF64 sym[i].st_name = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); sym[i].st_value = READ64 (s, j); sym[i].st_size = READ64 (s, j); #else sym[i].st_name = READ32 (s, j); sym[i].st_value = READ32 (s, j); sym[i].st_size = READ32 (s, j); sym[i].st_info = READ8 (s, j); sym[i].st_other = READ8 (s, j); sym[i].st_shndx = READ16 (s, j); #endif // zero symbol is always empty // Examine entry and maybe store if (type == R_BIN_ELF_IMPORTS && sym[i].st_shndx == STN_UNDEF) { if (sym[i].st_value) { toffset = sym[i].st_value; } else if ((toffset = get_import_addr (bin, i)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[i].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[i].st_info) != STT_SECTION && ELF_ST_TYPE (sym[i].st_info) != STT_FILE) { tsize = sym[i].st_size; toffset = (ut64) sym[i].st_value; } else { continue; } tmp_offset = Elf_(r_bin_elf_v2p) (bin, toffset); if (tmp_offset > bin->size) { goto done; } if (sym[i].st_name + 2 > bin->strtab_size) { // Since we are reading beyond the symbol table what's happening // is that some entry is trying to dereference the strtab beyond its capacity // is not a symbol so is the end goto done; } ret[ret_ctr].offset = tmp_offset; ret[ret_ctr].size = tsize; { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[i].st_name; int maxsize = R_MIN (bin->size, bin->strtab_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const int len = __strnlen (bin->strtab + st_name, rest); memcpy (ret[ret_ctr].name, &bin->strtab[st_name], len); } } ret[ret_ctr].ordinal = i; ret[ret_ctr].in_shdr = false; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[i]); ret[ret_ctr].last = 0; ret_ctr++; } done: ret[ret_ctr].last = 1; // Size everything down to only what is used { nsym = i > 0 ? i : 1; Elf_ (Sym) * temp_sym = (Elf_ (Sym)*) realloc (sym, (nsym * GROWTH_FACTOR) * sym_size); if (!temp_sym) { goto beach; } sym = temp_sym; } { ret_ctr = ret_ctr > 0 ? ret_ctr : 1; RBinElfSymbol *p = (RBinElfSymbol *) realloc (ret, (ret_ctr + 1) * sizeof (RBinElfSymbol)); if (!p) { goto beach; } ret = p; } if (type == R_BIN_ELF_IMPORTS && !bin->imports_by_ord_size) { bin->imports_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->imports_by_ord = (RBinImport * *) calloc (ret_ctr + 1, sizeof (RBinImport*)); } else { bin->imports_by_ord = NULL; } } else if (type == R_BIN_ELF_SYMBOLS && !bin->symbols_by_ord_size && ret_ctr) { bin->symbols_by_ord_size = ret_ctr + 1; if (ret_ctr > 0) { bin->symbols_by_ord = (RBinSymbol * *) calloc (ret_ctr + 1, sizeof (RBinSymbol*)); }else { bin->symbols_by_ord = NULL; } } free (sym); return ret; beach: free (sym); free (ret); return NULL; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_symbols)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_symbols) { return bin->phdr_symbols; } bin->phdr_symbols = get_symbols_from_phdr (bin, R_BIN_ELF_SYMBOLS); return bin->phdr_symbols; } static RBinElfSymbol *Elf_(r_bin_elf_get_phdr_imports)(ELFOBJ *bin) { if (!bin) { return NULL; } if (bin->phdr_imports) { return bin->phdr_imports; } bin->phdr_imports = get_symbols_from_phdr (bin, R_BIN_ELF_IMPORTS); return bin->phdr_imports; } static int Elf_(fix_symbols)(ELFOBJ *bin, int nsym, int type, RBinElfSymbol **sym) { int count = 0; RBinElfSymbol *ret = *sym; RBinElfSymbol *phdr_symbols = (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); RBinElfSymbol *tmp, *p; if (phdr_symbols) { RBinElfSymbol *d = ret; while (!d->last) { /* find match in phdr */ p = phdr_symbols; while (!p->last) { if (p->offset && d->offset == p->offset) { p->in_shdr = true; if (*p->name && strcmp (d->name, p->name)) { strcpy (d->name, p->name); } } p++; } d++; } p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { count++; } p++; } /*Take those symbols that are not present in the shdr but yes in phdr*/ /*This should only should happen with fucked up binaries*/ if (count > 0) { /*what happens if a shdr says it has only one symbol? we should look anyway into phdr*/ tmp = (RBinElfSymbol*)realloc (ret, (nsym + count + 1) * sizeof (RBinElfSymbol)); if (!tmp) { return -1; } ret = tmp; ret[nsym--].last = 0; p = phdr_symbols; while (!p->last) { if (!p->in_shdr) { memcpy (&ret[++nsym], p, sizeof (RBinElfSymbol)); } p++; } ret[nsym + 1].last = 1; } *sym = ret; return nsym + 1; } return nsym; } static RBinElfSymbol* Elf_(_r_bin_elf_get_symbols_imports)(ELFOBJ *bin, int type) { ut32 shdr_size; int tsize, nsym, ret_ctr = 0, i, j, r, k, newsize; ut64 toffset; ut32 size = 0; RBinElfSymbol *ret = NULL; Elf_(Shdr) *strtab_section = NULL; Elf_(Sym) *sym = NULL; ut8 s[sizeof (Elf_(Sym))] = { 0 }; char *strtab = NULL; if (!bin || !bin->shdr || !bin->ehdr.e_shnum || bin->ehdr.e_shnum == 0xffff) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } if (!UT32_MUL (&shdr_size, bin->ehdr.e_shnum, sizeof (Elf_(Shdr)))) { return false; } if (shdr_size + 8 > bin->size) { return false; } for (i = 0; i < bin->ehdr.e_shnum; i++) { if ((type == R_BIN_ELF_IMPORTS && bin->shdr[i].sh_type == (bin->ehdr.e_type == ET_REL ? SHT_SYMTAB : SHT_DYNSYM)) || (type == R_BIN_ELF_SYMBOLS && bin->shdr[i].sh_type == (Elf_(r_bin_elf_get_stripped) (bin) ? SHT_DYNSYM : SHT_SYMTAB))) { if (bin->shdr[i].sh_link < 1) { /* oops. fix out of range pointers */ continue; } // hack to avoid asan cry if ((bin->shdr[i].sh_link * sizeof(Elf_(Shdr))) >= shdr_size) { /* oops. fix out of range pointers */ continue; } strtab_section = &bin->shdr[bin->shdr[i].sh_link]; if (strtab_section->sh_size > ST32_MAX || strtab_section->sh_size+8 > bin->size) { bprintf ("size (syms strtab)"); free (ret); free (strtab); return NULL; } if (!strtab) { if (!(strtab = (char *)calloc (1, 8 + strtab_section->sh_size))) { bprintf ("malloc (syms strtab)"); goto beach; } if (strtab_section->sh_offset > bin->size || strtab_section->sh_offset + strtab_section->sh_size > bin->size) { goto beach; } if (r_buf_read_at (bin->b, strtab_section->sh_offset, (ut8*)strtab, strtab_section->sh_size) == -1) { bprintf ("Warning: read (syms strtab)\n"); goto beach; } } newsize = 1 + bin->shdr[i].sh_size; if (newsize < 0 || newsize > bin->size) { bprintf ("invalid shdr %d size\n", i); goto beach; } nsym = (int)(bin->shdr[i].sh_size / sizeof (Elf_(Sym))); if (nsym < 0) { goto beach; } if (!(sym = (Elf_(Sym) *)calloc (nsym, sizeof (Elf_(Sym))))) { bprintf ("calloc (syms)"); goto beach; } if (!UT32_MUL (&size, nsym, sizeof (Elf_(Sym)))) { goto beach; } if (size < 1 || size > bin->size) { goto beach; } if (bin->shdr[i].sh_offset > bin->size) { goto beach; } if (bin->shdr[i].sh_offset + size > bin->size) { goto beach; } for (j = 0; j < nsym; j++) { int k = 0; r = r_buf_read_at (bin->b, bin->shdr[i].sh_offset + j * sizeof (Elf_(Sym)), s, sizeof (Elf_(Sym))); if (r < 1) { bprintf ("Warning: read (sym)\n"); goto beach; } #if R_BIN_ELF64 sym[j].st_name = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) sym[j].st_value = READ64 (s, k) sym[j].st_size = READ64 (s, k) #else sym[j].st_name = READ32 (s, k) sym[j].st_value = READ32 (s, k) sym[j].st_size = READ32 (s, k) sym[j].st_info = READ8 (s, k) sym[j].st_other = READ8 (s, k) sym[j].st_shndx = READ16 (s, k) #endif } free (ret); ret = calloc (nsym, sizeof (RBinElfSymbol)); if (!ret) { bprintf ("Cannot allocate %d symbols\n", nsym); goto beach; } for (k = 1, ret_ctr = 0; k < nsym; k++) { if (type == R_BIN_ELF_IMPORTS && sym[k].st_shndx == STN_UNDEF) { if (sym[k].st_value) { toffset = sym[k].st_value; } else if ((toffset = get_import_addr (bin, k)) == -1){ toffset = 0; } tsize = 16; } else if (type == R_BIN_ELF_SYMBOLS && sym[k].st_shndx != STN_UNDEF && ELF_ST_TYPE (sym[k].st_info) != STT_SECTION && ELF_ST_TYPE (sym[k].st_info) != STT_FILE) { //int idx = sym[k].st_shndx; tsize = sym[k].st_size; toffset = (ut64)sym[k].st_value; } else { continue; } if (bin->ehdr.e_type == ET_REL) { if (sym[k].st_shndx < bin->ehdr.e_shnum) ret[ret_ctr].offset = sym[k].st_value + bin->shdr[sym[k].st_shndx].sh_offset; } else { ret[ret_ctr].offset = Elf_(r_bin_elf_v2p) (bin, toffset); } ret[ret_ctr].size = tsize; if (sym[k].st_name + 2 > strtab_section->sh_size) { bprintf ("Warning: index out of strtab range\n"); goto beach; } { int rest = ELF_STRING_LENGTH - 1; int st_name = sym[k].st_name; int maxsize = R_MIN (bin->b->length, strtab_section->sh_size); if (st_name < 0 || st_name >= maxsize) { ret[ret_ctr].name[0] = 0; } else { const size_t len = __strnlen (strtab + sym[k].st_name, rest); memcpy (ret[ret_ctr].name, &strtab[sym[k].st_name], len); } } ret[ret_ctr].ordinal = k; ret[ret_ctr].name[ELF_STRING_LENGTH - 2] = '\0'; fill_symbol_bind_and_type (&ret[ret_ctr], &sym[k]); ret[ret_ctr].last = 0; ret_ctr++; } ret[ret_ctr].last = 1; // ugly dirty hack :D R_FREE (strtab); R_FREE (sym); } } if (!ret) { return (type == R_BIN_ELF_SYMBOLS) ? Elf_(r_bin_elf_get_phdr_symbols) (bin) : Elf_(r_bin_elf_get_phdr_imports) (bin); } int max = -1; RBinElfSymbol *aux = NULL; nsym = Elf_(fix_symbols) (bin, ret_ctr, type, &ret); if (nsym == -1) { goto beach; } aux = ret; while (!aux->last) { if ((int)aux->ordinal > max) { max = aux->ordinal; } aux++; } nsym = max; if (type == R_BIN_ELF_IMPORTS) { R_FREE (bin->imports_by_ord); bin->imports_by_ord_size = nsym + 1; bin->imports_by_ord = (RBinImport**)calloc (R_MAX (1, nsym + 1), sizeof (RBinImport*)); } else if (type == R_BIN_ELF_SYMBOLS) { R_FREE (bin->symbols_by_ord); bin->symbols_by_ord_size = nsym + 1; bin->symbols_by_ord = (RBinSymbol**)calloc (R_MAX (1, nsym + 1), sizeof (RBinSymbol*)); } return ret; beach: free (ret); free (sym); free (strtab); return NULL; } RBinElfSymbol *Elf_(r_bin_elf_get_symbols)(ELFOBJ *bin) { if (!bin->g_symbols) { bin->g_symbols = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_SYMBOLS); } return bin->g_symbols; } RBinElfSymbol *Elf_(r_bin_elf_get_imports)(ELFOBJ *bin) { if (!bin->g_imports) { bin->g_imports = Elf_(_r_bin_elf_get_symbols_imports) (bin, R_BIN_ELF_IMPORTS); } return bin->g_imports; } RBinElfField* Elf_(r_bin_elf_get_fields)(ELFOBJ *bin) { RBinElfField *ret = NULL; int i = 0, j; if (!bin || !(ret = calloc ((bin->ehdr.e_phnum + 3 + 1), sizeof (RBinElfField)))) { return NULL; } strncpy (ret[i].name, "ehdr", ELF_STRING_LENGTH); ret[i].offset = 0; ret[i++].last = 0; strncpy (ret[i].name, "shoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_shoff; ret[i++].last = 0; strncpy (ret[i].name, "phoff", ELF_STRING_LENGTH); ret[i].offset = bin->ehdr.e_phoff; ret[i++].last = 0; for (j = 0; bin->phdr && j < bin->ehdr.e_phnum; i++, j++) { snprintf (ret[i].name, ELF_STRING_LENGTH, "phdr_%i", j); ret[i].offset = bin->phdr[j].p_offset; ret[i].last = 0; } ret[i].last = 1; return ret; } void* Elf_(r_bin_elf_free)(ELFOBJ* bin) { int i; if (!bin) { return NULL; } free (bin->phdr); free (bin->shdr); free (bin->strtab); free (bin->dyn_buf); free (bin->shstrtab); free (bin->dynstr); //free (bin->strtab_section); if (bin->imports_by_ord) { for (i = 0; i<bin->imports_by_ord_size; i++) { free (bin->imports_by_ord[i]); } free (bin->imports_by_ord); } if (bin->symbols_by_ord) { for (i = 0; i<bin->symbols_by_ord_size; i++) { free (bin->symbols_by_ord[i]); } free (bin->symbols_by_ord); } r_buf_free (bin->b); if (bin->g_symbols != bin->phdr_symbols) { R_FREE (bin->phdr_symbols); } if (bin->g_imports != bin->phdr_imports) { R_FREE (bin->phdr_imports); } R_FREE (bin->g_sections); R_FREE (bin->g_symbols); R_FREE (bin->g_imports); free (bin); return NULL; } ELFOBJ* Elf_(r_bin_elf_new)(const char* file, bool verbose) { ut8 *buf; int size; ELFOBJ *bin = R_NEW0 (ELFOBJ); if (!bin) { return NULL; } memset (bin, 0, sizeof (ELFOBJ)); bin->file = file; if (!(buf = (ut8*)r_file_slurp (file, &size))) { return Elf_(r_bin_elf_free) (bin); } bin->size = size; bin->verbose = verbose; bin->b = r_buf_new (); if (!r_buf_set_bytes (bin->b, buf, bin->size)) { free (buf); return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { free (buf); return Elf_(r_bin_elf_free) (bin); } free (buf); return bin; } ELFOBJ* Elf_(r_bin_elf_new_buf)(RBuffer *buf, bool verbose) { ELFOBJ *bin = R_NEW0 (ELFOBJ); bin->kv = sdb_new0 (); bin->b = r_buf_new (); bin->size = (ut32)buf->length; bin->verbose = verbose; if (!r_buf_set_bytes (bin->b, buf->buf, buf->length)) { return Elf_(r_bin_elf_free) (bin); } if (!elf_init (bin)) { return Elf_(r_bin_elf_free) (bin); } return bin; } static int is_in_pphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_offset && addr < p->p_offset + p->p_memsz; } static int is_in_vphdr (Elf_(Phdr) *p, ut64 addr) { return addr >= p->p_vaddr && addr < p->p_vaddr + p->p_memsz; } /* converts a physical address to the virtual address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_p2v) (ELFOBJ *bin, ut64 paddr) { int i; if (!bin) return 0; if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return bin->baddr + paddr; } return paddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_pphdr (p, paddr)) { if (!p->p_vaddr && !p->p_offset) { continue; } return p->p_vaddr + paddr - p->p_offset; } } return paddr; } /* converts a virtual address to the relative physical address, looking * at the program headers in the binary bin */ ut64 Elf_(r_bin_elf_v2p) (ELFOBJ *bin, ut64 vaddr) { int i; if (!bin) { return 0; } if (!bin->phdr) { if (bin->ehdr.e_type == ET_REL) { return vaddr - bin->baddr; } return vaddr; } for (i = 0; i < bin->ehdr.e_phnum; ++i) { Elf_(Phdr) *p = &bin->phdr[i]; if (!p) { break; } if (p->p_type == PT_LOAD && is_in_vphdr (p, vaddr)) { if (!p->p_offset && !p->p_vaddr) { continue; } return p->p_offset + vaddr - p->p_vaddr; } } return vaddr; }
static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; }
static Sdb *store_versioninfo_gnu_verneed(ELFOBJ *bin, Elf_(Shdr) *shdr, int sz) { ut8 *end, *need = NULL; const char *section_name = ""; Elf_(Shdr) *link_shdr = NULL; const char *link_section_name = ""; Sdb *sdb_vernaux = NULL; Sdb *sdb_version = NULL; Sdb *sdb = NULL; int i, cnt; if (!bin || !bin->dynstr) { return NULL; } if (shdr->sh_link > bin->ehdr.e_shnum) { return NULL; } if (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) { return NULL; } sdb = sdb_new0 (); if (!sdb) { return NULL; } link_shdr = &bin->shdr[shdr->sh_link]; if (bin->shstrtab && shdr->sh_name < bin->shstrtab_size) { section_name = &bin->shstrtab[shdr->sh_name]; } if (bin->shstrtab && link_shdr->sh_name < bin->shstrtab_size) { link_section_name = &bin->shstrtab[link_shdr->sh_name]; } if (!(need = (ut8*) calloc (R_MAX (1, shdr->sh_size), sizeof (ut8)))) { bprintf ("Warning: Cannot allocate memory for Elf_(Verneed)\n"); goto beach; } end = need + shdr->sh_size; sdb_set (sdb, "section_name", section_name, 0); sdb_num_set (sdb, "num_entries", shdr->sh_info, 0); sdb_num_set (sdb, "addr", shdr->sh_addr, 0); sdb_num_set (sdb, "offset", shdr->sh_offset, 0); sdb_num_set (sdb, "link", shdr->sh_link, 0); sdb_set (sdb, "link_section_name", link_section_name, 0); if (shdr->sh_offset > bin->size || shdr->sh_offset + shdr->sh_size > bin->size) { goto beach; } if (shdr->sh_offset + shdr->sh_size < shdr->sh_size) { goto beach; } i = r_buf_read_at (bin->b, shdr->sh_offset, need, shdr->sh_size); if (i < 0) goto beach; //XXX we should use DT_VERNEEDNUM instead of sh_info //TODO https://sourceware.org/ml/binutils/2014-11/msg00353.html for (i = 0, cnt = 0; cnt < shdr->sh_info; ++cnt) { int j, isum; ut8 *vstart = need + i; Elf_(Verneed) vvn = {0}; if (vstart + sizeof (Elf_(Verneed)) > end) { goto beach; } Elf_(Verneed) *entry = &vvn; char key[32] = {0}; sdb_version = sdb_new0 (); if (!sdb_version) { goto beach; } j = 0; vvn.vn_version = READ16 (vstart, j) vvn.vn_cnt = READ16 (vstart, j) vvn.vn_file = READ32 (vstart, j) vvn.vn_aux = READ32 (vstart, j) vvn.vn_next = READ32 (vstart, j) sdb_num_set (sdb_version, "vn_version", entry->vn_version, 0); sdb_num_set (sdb_version, "idx", i, 0); if (entry->vn_file > bin->dynstr_size) { goto beach; } { char *s = r_str_ndup (&bin->dynstr[entry->vn_file], 16); sdb_set (sdb_version, "file_name", s, 0); free (s); } sdb_num_set (sdb_version, "cnt", entry->vn_cnt, 0); st32 vnaux = entry->vn_aux; if (vnaux < 1) { goto beach; } vstart += vnaux; for (j = 0, isum = i + entry->vn_aux; j < entry->vn_cnt && vstart + sizeof (Elf_(Vernaux)) <= end; ++j) { int k; Elf_(Vernaux) * aux = NULL; Elf_(Vernaux) vaux = {0}; sdb_vernaux = sdb_new0 (); if (!sdb_vernaux) { goto beach; } aux = (Elf_(Vernaux)*)&vaux; k = 0; vaux.vna_hash = READ32 (vstart, k) vaux.vna_flags = READ16 (vstart, k) vaux.vna_other = READ16 (vstart, k) vaux.vna_name = READ32 (vstart, k) vaux.vna_next = READ32 (vstart, k) if (aux->vna_name > bin->dynstr_size) { goto beach; } sdb_num_set (sdb_vernaux, "idx", isum, 0); if (aux->vna_name > 0 && aux->vna_name + 8 < bin->dynstr_size) { char name [16]; strncpy (name, &bin->dynstr[aux->vna_name], sizeof (name)-1); name[sizeof(name)-1] = 0; sdb_set (sdb_vernaux, "name", name, 0); } sdb_set (sdb_vernaux, "flags", get_ver_flags (aux->vna_flags), 0); sdb_num_set (sdb_vernaux, "version", aux->vna_other, 0); isum += aux->vna_next; vstart += aux->vna_next; snprintf (key, sizeof (key), "vernaux%d", j); sdb_ns_set (sdb_version, key, sdb_vernaux); } if ((int)entry->vn_next < 0) { bprintf ("Invalid vn_next\n"); break; } i += entry->vn_next; snprintf (key, sizeof (key), "version%d", cnt ); sdb_ns_set (sdb, key, sdb_version); //if entry->vn_next is 0 it iterate infinitely if (!entry->vn_next) { break; } } free (need); return sdb; beach: free (need); sdb_free (sdb_vernaux); sdb_free (sdb_version); sdb_free (sdb); return NULL; }
{'added': [(708, '\tif (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) {'), (840, '\tif (shdr->sh_size < 1 || shdr->sh_size > SIZE_MAX) {')], 'deleted': [(708, '\tif (shdr->sh_size < 1) {'), (840, '\tif (shdr->sh_size < 1) {')]}
2
2
2,830
21,469
136
1,055
28
https://github.com/radare/radare2
CVE-2017-16357
CWE-119
678
tee_svc_cryp.c
C
syscall_obj_generate_key
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <assert.h> #include <compiler.h> #include <crypto/crypto.h> #include <kernel/tee_ta_manager.h> #include <mm/tee_mmu.h> #include <string_ext.h> #include <string.h> #include <sys/queue.h> #include <tee_api_types.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <trace.h> #include <utee_defines.h> #include <util.h> #include <tee_api_defines_extensions.h> #if defined(CFG_CRYPTO_HKDF) #include <tee/tee_cryp_hkdf.h> #endif #if defined(CFG_CRYPTO_CONCAT_KDF) #include <tee/tee_cryp_concat_kdf.h> #endif #if defined(CFG_CRYPTO_PBKDF2) #include <tee/tee_cryp_pbkdf2.h> #endif typedef void (*tee_cryp_ctx_finalize_func_t) (void *ctx, uint32_t algo); struct tee_cryp_state { TAILQ_ENTRY(tee_cryp_state) link; uint32_t algo; uint32_t mode; vaddr_t key1; vaddr_t key2; void *ctx; tee_cryp_ctx_finalize_func_t ctx_finalize; }; struct tee_cryp_obj_secret { uint32_t key_size; uint32_t alloc_size; /* * Pseudo code visualize layout of structure * Next follows data, such as: * uint8_t data[alloc_size] * key_size must never exceed alloc_size */ }; #define TEE_TYPE_ATTR_OPTIONAL 0x0 #define TEE_TYPE_ATTR_REQUIRED 0x1 #define TEE_TYPE_ATTR_OPTIONAL_GROUP 0x2 #define TEE_TYPE_ATTR_SIZE_INDICATOR 0x4 #define TEE_TYPE_ATTR_GEN_KEY_OPT 0x8 #define TEE_TYPE_ATTR_GEN_KEY_REQ 0x10 /* Handle storing of generic secret keys of varying lengths */ #define ATTR_OPS_INDEX_SECRET 0 /* Convert to/from big-endian byte array and provider-specific bignum */ #define ATTR_OPS_INDEX_BIGNUM 1 /* Convert to/from value attribute depending on direction */ #define ATTR_OPS_INDEX_VALUE 2 struct tee_cryp_obj_type_attrs { uint32_t attr_id; uint16_t flags; uint16_t ops_index; uint16_t raw_offs; uint16_t raw_size; }; #define RAW_DATA(_x, _y) \ .raw_offs = offsetof(_x, _y), .raw_size = MEMBER_SIZE(_x, _y) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_secret_value_attrs[] = { { .attr_id = TEE_ATTR_SECRET_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, e) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, e) }, { .attr_id = TEE_ATTR_RSA_PRIVATE_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, d) }, { .attr_id = TEE_ATTR_RSA_PRIME1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, p) }, { .attr_id = TEE_ATTR_RSA_PRIME2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, q) }, { .attr_id = TEE_ATTR_RSA_EXPONENT1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dp) }, { .attr_id = TEE_ATTR_RSA_EXPONENT2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dq) }, { .attr_id = TEE_ATTR_RSA_COEFFICIENT, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, qp) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, g) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, g) }, { .attr_id = TEE_ATTR_DSA_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, x) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dh_keypair_attrs[] = { { .attr_id = TEE_ATTR_DH_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, p) }, { .attr_id = TEE_ATTR_DH_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, g) }, { .attr_id = TEE_ATTR_DH_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, y) }, { .attr_id = TEE_ATTR_DH_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, x) }, { .attr_id = TEE_ATTR_DH_SUBPRIME, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP | TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, q) }, { .attr_id = TEE_ATTR_DH_X_BITS, .flags = TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct dh_keypair, xbits) }, }; #if defined(CFG_CRYPTO_HKDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_hkdf_ikm_attrs[] = { { .attr_id = TEE_ATTR_HKDF_IKM, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_concat_kdf_z_attrs[] = { { .attr_id = TEE_ATTR_CONCAT_KDF_Z, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_PBKDF2) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_pbkdf2_passwd_attrs[] = { { .attr_id = TEE_ATTR_PBKDF2_PASSWORD, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_pub_key_attrs[] = { { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_public_key, curve) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_keypair_attrs[] = { { .attr_id = TEE_ATTR_ECC_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, d) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_keypair, curve) }, }; struct tee_cryp_obj_type_props { TEE_ObjectType obj_type; uint16_t min_size; /* may not be smaller than this */ uint16_t max_size; /* may not be larger than this */ uint16_t alloc_size; /* this many bytes are allocated to hold data */ uint8_t quanta; /* may only be an multiple of this */ uint8_t num_type_attrs; const struct tee_cryp_obj_type_attrs *type_attrs; }; #define PROP(obj_type, quanta, min_size, max_size, alloc_size, type_attrs) \ { (obj_type), (min_size), (max_size), (alloc_size), (quanta), \ ARRAY_SIZE(type_attrs), (type_attrs) } static const struct tee_cryp_obj_type_props tee_cryp_obj_props[] = { PROP(TEE_TYPE_AES, 64, 128, 256, /* valid sizes 128, 192, 256 */ 256 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES, 56, 56, 56, /* * Valid size 56 without parity, note that we still allocate * for 64 bits since the key is supplied with parity. */ 64 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES3, 56, 112, 168, /* * Valid sizes 112, 168 without parity, note that we still * allocate for with space for the parity since the key is * supplied with parity. */ 192 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_MD5, 8, 64, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA1, 8, 80, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA224, 8, 112, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA256, 8, 192, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA384, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA512, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_GENERIC_SECRET, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), #if defined(CFG_CRYPTO_HKDF) PROP(TEE_TYPE_HKDF_IKM, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_hkdf_ikm_attrs), #endif #if defined(CFG_CRYPTO_CONCAT_KDF) PROP(TEE_TYPE_CONCAT_KDF_Z, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_concat_kdf_z_attrs), #endif #if defined(CFG_CRYPTO_PBKDF2) PROP(TEE_TYPE_PBKDF2_PASSWORD, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_pbkdf2_passwd_attrs), #endif PROP(TEE_TYPE_RSA_PUBLIC_KEY, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_public_key), tee_cryp_obj_rsa_pub_key_attrs), PROP(TEE_TYPE_RSA_KEYPAIR, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_keypair), tee_cryp_obj_rsa_keypair_attrs), PROP(TEE_TYPE_DSA_PUBLIC_KEY, 64, 512, 3072, sizeof(struct dsa_public_key), tee_cryp_obj_dsa_pub_key_attrs), PROP(TEE_TYPE_DSA_KEYPAIR, 64, 512, 3072, sizeof(struct dsa_keypair), tee_cryp_obj_dsa_keypair_attrs), PROP(TEE_TYPE_DH_KEYPAIR, 1, 256, 2048, sizeof(struct dh_keypair), tee_cryp_obj_dh_keypair_attrs), PROP(TEE_TYPE_ECDSA_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDSA_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), PROP(TEE_TYPE_ECDH_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDH_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), }; struct attr_ops { TEE_Result (*from_user)(void *attr, const void *buffer, size_t size); TEE_Result (*to_user)(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size); TEE_Result (*to_binary)(void *attr, void *data, size_t data_len, size_t *offs); bool (*from_binary)(void *attr, const void *data, size_t data_len, size_t *offs); TEE_Result (*from_obj)(void *attr, void *src_attr); void (*free)(void *attr); void (*clear)(void *attr); }; static TEE_Result op_u32_to_binary_helper(uint32_t v, uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; size_t next_offs; if (ADD_OVERFLOW(*offs, sizeof(field), &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) { field = TEE_U32_TO_BIG_ENDIAN(v); memcpy(data + *offs, &field, sizeof(field)); } (*offs) = next_offs; return TEE_SUCCESS; } static bool op_u32_from_binary_helper(uint32_t *v, const uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; if (!data || (*offs + sizeof(field)) > data_len) return false; memcpy(&field, data + *offs, sizeof(field)); *v = TEE_U32_FROM_BIG_ENDIAN(field); (*offs) += sizeof(field); return true; } static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; } static TEE_Result op_attr_secret_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; uint64_t s; uint64_t key_size; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; key_size = key->key_size; res = tee_svc_copy_to_user(size, &key_size, sizeof(key_size)); if (res != TEE_SUCCESS) return res; if (s < key->key_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, key + 1, key->key_size); } static TEE_Result op_attr_secret_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; size_t next_offs; res = op_u32_to_binary_helper(key->key_size, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, key->key_size, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) memcpy((uint8_t *)data + *offs, key + 1, key->key_size); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_secret_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct tee_cryp_obj_secret *key = attr; uint32_t s; if (!op_u32_from_binary_helper(&s, data, data_len, offs)) return false; if ((*offs + s) > data_len) return false; /* Data size has to fit in allocated buffer */ if (s > key->alloc_size) return false; key->key_size = s; memcpy(key + 1, (const uint8_t *)data + *offs, s); (*offs) += s; return true; } static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr) { struct tee_cryp_obj_secret *key = attr; struct tee_cryp_obj_secret *src_key = src_attr; if (src_key->key_size > key->alloc_size) return TEE_ERROR_BAD_STATE; memcpy(key + 1, src_key + 1, src_key->key_size); key->key_size = src_key->key_size; return TEE_SUCCESS; } static void op_attr_secret_value_clear(void *attr) { struct tee_cryp_obj_secret *key = attr; key->key_size = 0; memset(key + 1, 0, key->alloc_size); } static TEE_Result op_attr_bignum_from_user(void *attr, const void *buffer, size_t size) { struct bignum **bn = attr; return crypto_bignum_bin2bn(buffer, size, *bn); } static TEE_Result op_attr_bignum_to_user(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size) { TEE_Result res; struct bignum **bn = attr; uint64_t req_size; uint64_t s; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; req_size = crypto_bignum_num_bytes(*bn); res = tee_svc_copy_to_user(size, &req_size, sizeof(req_size)); if (res != TEE_SUCCESS) return res; if (!req_size) return TEE_SUCCESS; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; /* Check we can access data using supplied user mode pointer */ res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buffer, req_size); if (res != TEE_SUCCESS) return res; /* * Write the bignum (wich raw data points to) into an array of * bytes (stored in buffer) */ crypto_bignum_bn2bin(*bn, buffer); return TEE_SUCCESS; } static TEE_Result op_attr_bignum_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct bignum **bn = attr; uint32_t n = crypto_bignum_num_bytes(*bn); size_t next_offs; res = op_u32_to_binary_helper(n, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, n, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) crypto_bignum_bn2bin(*bn, (uint8_t *)data + *offs); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_bignum_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct bignum **bn = attr; uint32_t n; if (!op_u32_from_binary_helper(&n, data, data_len, offs)) return false; if ((*offs + n) > data_len) return false; if (crypto_bignum_bin2bn((const uint8_t *)data + *offs, n, *bn)) return false; (*offs) += n; return true; } static TEE_Result op_attr_bignum_from_obj(void *attr, void *src_attr) { struct bignum **bn = attr; struct bignum **src_bn = src_attr; crypto_bignum_copy(*bn, *src_bn); return TEE_SUCCESS; } static void op_attr_bignum_clear(void *attr) { struct bignum **bn = attr; crypto_bignum_clear(*bn); } static void op_attr_bignum_free(void *attr) { struct bignum **bn = attr; crypto_bignum_free(*bn); *bn = NULL; } static TEE_Result op_attr_value_from_user(void *attr, const void *buffer, size_t size) { uint32_t *v = attr; if (size != sizeof(uint32_t) * 2) return TEE_ERROR_GENERIC; /* "can't happen */ /* Note that only the first value is copied */ memcpy(v, buffer, sizeof(uint32_t)); return TEE_SUCCESS; } static TEE_Result op_attr_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; uint32_t *v = attr; uint64_t s; uint32_t value[2] = { *v }; uint64_t req_size = sizeof(value); res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, value, req_size); } static TEE_Result op_attr_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_to_binary_helper(*v, data, data_len, offs); } static bool op_attr_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_from_binary_helper(v, data, data_len, offs); } static TEE_Result op_attr_value_from_obj(void *attr, void *src_attr) { uint32_t *v = attr; uint32_t *src_v = src_attr; *v = *src_v; return TEE_SUCCESS; } static void op_attr_value_clear(void *attr) { uint32_t *v = attr; *v = 0; } static const struct attr_ops attr_ops[] = { [ATTR_OPS_INDEX_SECRET] = { .from_user = op_attr_secret_value_from_user, .to_user = op_attr_secret_value_to_user, .to_binary = op_attr_secret_value_to_binary, .from_binary = op_attr_secret_value_from_binary, .from_obj = op_attr_secret_value_from_obj, .free = op_attr_secret_value_clear, /* not a typo */ .clear = op_attr_secret_value_clear, }, [ATTR_OPS_INDEX_BIGNUM] = { .from_user = op_attr_bignum_from_user, .to_user = op_attr_bignum_to_user, .to_binary = op_attr_bignum_to_binary, .from_binary = op_attr_bignum_from_binary, .from_obj = op_attr_bignum_from_obj, .free = op_attr_bignum_free, .clear = op_attr_bignum_clear, }, [ATTR_OPS_INDEX_VALUE] = { .from_user = op_attr_value_from_user, .to_user = op_attr_value_to_user, .to_binary = op_attr_value_to_binary, .from_binary = op_attr_value_from_binary, .from_obj = op_attr_value_from_obj, .free = op_attr_value_clear, /* not a typo */ .clear = op_attr_value_clear, }, }; TEE_Result syscall_cryp_obj_get_info(unsigned long obj, TEE_ObjectInfo *info) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; res = tee_svc_copy_to_user(info, &o->info, sizeof(o->info)); exit: return res; } TEE_Result syscall_cryp_obj_restrict_usage(unsigned long obj, unsigned long usage) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; o->info.objectUsage &= usage; exit: return res; } static int tee_svc_cryp_obj_find_type_attr_idx( uint32_t attr_id, const struct tee_cryp_obj_type_props *type_props) { size_t n; for (n = 0; n < type_props->num_type_attrs; n++) { if (attr_id == type_props->type_attrs[n].attr_id) return n; } return -1; } static const struct tee_cryp_obj_type_props *tee_svc_find_type_props( TEE_ObjectType obj_type) { size_t n; for (n = 0; n < ARRAY_SIZE(tee_cryp_obj_props); n++) { if (tee_cryp_obj_props[n].obj_type == obj_type) return tee_cryp_obj_props + n; } return NULL; } /* Set an attribute on an object */ static void set_attribute(struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return; o->have_attrs |= BIT(idx); } /* Get an attribute on an object */ static uint32_t get_attribute(const struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return 0; return o->have_attrs & BIT(idx); } TEE_Result syscall_cryp_obj_get_attr(unsigned long obj, unsigned long attr_id, void *buffer, uint64_t *size) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; int idx; const struct attr_ops *ops; void *attr; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return TEE_ERROR_ITEM_NOT_FOUND; /* Check that the object is initialized */ if (!(o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED)) return TEE_ERROR_BAD_PARAMETERS; /* Check that getting the attribute is allowed */ if (!(attr_id & TEE_ATTR_BIT_PROTECTED) && !(o->info.objectUsage & TEE_USAGE_EXTRACTABLE)) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) { /* Unknown object type, "can't happen" */ return TEE_ERROR_BAD_STATE; } idx = tee_svc_cryp_obj_find_type_attr_idx(attr_id, type_props); if ((idx < 0) || ((o->have_attrs & (1 << idx)) == 0)) return TEE_ERROR_ITEM_NOT_FOUND; ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; return ops->to_user(attr, sess, buffer, size); } void tee_obj_attr_free(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].free((uint8_t *)o->attr + ta->raw_offs); } } void tee_obj_attr_clear(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].clear((uint8_t *)o->attr + ta->raw_offs); } } TEE_Result tee_obj_attr_to_binary(struct tee_obj *o, void *data, size_t *data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; size_t len = data ? *data_len : 0; TEE_Result res; if (o->info.objectType == TEE_TYPE_DATA) { *data_len = 0; return TEE_SUCCESS; /* pure data object */ } if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; res = attr_ops[ta->ops_index].to_binary(attr, data, len, &offs); if (res != TEE_SUCCESS) return res; } *data_len = offs; if (data && offs > len) return TEE_ERROR_SHORT_BUFFER; return TEE_SUCCESS; } TEE_Result tee_obj_attr_from_binary(struct tee_obj *o, const void *data, size_t data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; if (!attr_ops[ta->ops_index].from_binary(attr, data, data_len, &offs)) return TEE_ERROR_CORRUPT_OBJECT; } return TEE_SUCCESS; } TEE_Result tee_obj_attr_copy_from(struct tee_obj *o, const struct tee_obj *src) { TEE_Result res; const struct tee_cryp_obj_type_props *tp; const struct tee_cryp_obj_type_attrs *ta; size_t n; uint32_t have_attrs = 0; void *attr; void *src_attr; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; if (o->info.objectType == src->info.objectType) { have_attrs = src->have_attrs; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + ta->raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } else { const struct tee_cryp_obj_type_props *tp_src; int idx; if (o->info.objectType == TEE_TYPE_RSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_RSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_DSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_DSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDH_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDH_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else { return TEE_ERROR_BAD_PARAMETERS; } tp_src = tee_svc_find_type_props(src->info.objectType); if (!tp_src) return TEE_ERROR_BAD_STATE; have_attrs = BIT32(tp->num_type_attrs) - 1; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; idx = tee_svc_cryp_obj_find_type_attr_idx(ta->attr_id, tp_src); if (idx < 0) return TEE_ERROR_BAD_STATE; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + tp_src->type_attrs[idx].raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } o->have_attrs = have_attrs; return TEE_SUCCESS; } TEE_Result tee_obj_set_type(struct tee_obj *o, uint32_t obj_type, size_t max_key_size) { TEE_Result res = TEE_SUCCESS; const struct tee_cryp_obj_type_props *type_props; /* Can only set type for newly allocated objs */ if (o->attr) return TEE_ERROR_BAD_STATE; /* * Verify that maxKeySize is supported and find out how * much should be allocated. */ if (obj_type == TEE_TYPE_DATA) { if (max_key_size) return TEE_ERROR_NOT_SUPPORTED; } else { /* Find description of object */ type_props = tee_svc_find_type_props(obj_type); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (max_key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; o->attr = calloc(1, type_props->alloc_size); if (!o->attr) return TEE_ERROR_OUT_OF_MEMORY; } /* If we have a key structure, pre-allocate the bignums inside */ switch (obj_type) { case TEE_TYPE_RSA_PUBLIC_KEY: res = crypto_acipher_alloc_rsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_RSA_KEYPAIR: res = crypto_acipher_alloc_rsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DSA_PUBLIC_KEY: res = crypto_acipher_alloc_dsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_DSA_KEYPAIR: res = crypto_acipher_alloc_dsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DH_KEYPAIR: res = crypto_acipher_alloc_dh_keypair(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_PUBLIC_KEY: case TEE_TYPE_ECDH_PUBLIC_KEY: res = crypto_acipher_alloc_ecc_public_key(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = crypto_acipher_alloc_ecc_keypair(o->attr, max_key_size); break; default: if (obj_type != TEE_TYPE_DATA) { struct tee_cryp_obj_secret *key = o->attr; key->alloc_size = type_props->alloc_size - sizeof(*key); } break; } if (res != TEE_SUCCESS) return res; o->info.objectType = obj_type; o->info.maxKeySize = max_key_size; o->info.objectUsage = TEE_USAGE_DEFAULT; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_alloc(unsigned long obj_type, unsigned long max_key_size, uint32_t *obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; if (obj_type == TEE_TYPE_DATA) return TEE_ERROR_NOT_SUPPORTED; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; o = tee_obj_alloc(); if (!o) return TEE_ERROR_OUT_OF_MEMORY; res = tee_obj_set_type(o, obj_type, max_key_size); if (res != TEE_SUCCESS) { tee_obj_free(o); return res; } tee_obj_add(to_user_ta_ctx(sess->ctx), o); res = tee_svc_copy_kaddr_to_uref(obj, o); if (res != TEE_SUCCESS) tee_obj_close(to_user_ta_ctx(sess->ctx), o); return res; } TEE_Result syscall_cryp_obj_close(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* * If it's busy it's used by an operation, a client should never have * this handle. */ if (o->busy) return TEE_ERROR_ITEM_NOT_FOUND; tee_obj_close(to_user_ta_ctx(sess->ctx), o); return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_reset(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) == 0) { tee_obj_attr_clear(o); o->info.keySize = 0; o->info.objectUsage = TEE_USAGE_DEFAULT; } else { return TEE_ERROR_BAD_PARAMETERS; } /* the object is no more initialized */ o->info.handleFlags &= ~TEE_HANDLE_FLAG_INITIALIZED; return TEE_SUCCESS; } static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; } enum attr_usage { ATTR_USAGE_POPULATE, ATTR_USAGE_GENERATE_KEY }; static TEE_Result tee_svc_cryp_check_attr(enum attr_usage usage, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { uint32_t required_flag; uint32_t opt_flag; bool all_opt_needed; uint32_t req_attrs = 0; uint32_t opt_grp_attrs = 0; uint32_t attrs_found = 0; size_t n; uint32_t bit; uint32_t flags; int idx; if (usage == ATTR_USAGE_POPULATE) { required_flag = TEE_TYPE_ATTR_REQUIRED; opt_flag = TEE_TYPE_ATTR_OPTIONAL_GROUP; all_opt_needed = true; } else { required_flag = TEE_TYPE_ATTR_GEN_KEY_REQ; opt_flag = TEE_TYPE_ATTR_GEN_KEY_OPT; all_opt_needed = false; } /* * First find out which attributes are required and which belong to * the optional group */ for (n = 0; n < type_props->num_type_attrs; n++) { bit = 1 << n; flags = type_props->type_attrs[n].flags; if (flags & required_flag) req_attrs |= bit; else if (flags & opt_flag) opt_grp_attrs |= bit; } /* * Verify that all required attributes are in place and * that the same attribute isn't repeated. */ for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; bit = 1 << idx; /* attribute not repeated */ if ((attrs_found & bit) != 0) return TEE_ERROR_ITEM_NOT_FOUND; attrs_found |= bit; } /* Required attribute missing */ if ((attrs_found & req_attrs) != req_attrs) return TEE_ERROR_ITEM_NOT_FOUND; /* * If the flag says that "if one of the optional attributes are included * all of them has to be included" this must be checked. */ if (all_opt_needed && (attrs_found & opt_grp_attrs) != 0 && (attrs_found & opt_grp_attrs) != opt_grp_attrs) return TEE_ERROR_ITEM_NOT_FOUND; return TEE_SUCCESS; } static TEE_Result get_ec_key_size(uint32_t curve, size_t *key_size) { switch (curve) { case TEE_ECC_CURVE_NIST_P192: *key_size = 192; break; case TEE_ECC_CURVE_NIST_P224: *key_size = 224; break; case TEE_ECC_CURVE_NIST_P256: *key_size = 256; break; case TEE_ECC_CURVE_NIST_P384: *key_size = 384; break; case TEE_ECC_CURVE_NIST_P521: *key_size = 521; break; default: return TEE_ERROR_NOT_SUPPORTED; } return TEE_SUCCESS; } static TEE_Result tee_svc_cryp_obj_populate_type( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { TEE_Result res; uint32_t have_attrs = 0; size_t obj_size = 0; size_t n; int idx; const struct attr_ops *ops; void *attr; for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; have_attrs |= BIT32(idx); ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) res = ops->from_user(attr, &attrs[n].content.value, sizeof(attrs[n].content.value)); else res = ops->from_user(attr, attrs[n].content.ref.buffer, attrs[n].content.ref.length); if (res != TEE_SUCCESS) return res; /* * First attr_idx signifies the attribute that gives the size * of the object */ if (type_props->type_attrs[idx].flags & TEE_TYPE_ATTR_SIZE_INDICATOR) { /* * For ECDSA/ECDH we need to translate curve into * object size */ if (attrs[n].attributeID == TEE_ATTR_ECC_CURVE) { res = get_ec_key_size(attrs[n].content.value.a, &obj_size); if (res != TEE_SUCCESS) return res; } else { obj_size += (attrs[n].content.ref.length * 8); } } } /* * We have to do it like this because the parity bits aren't counted * when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) obj_size -= obj_size / 8; /* Exclude parity in size of key */ o->have_attrs = have_attrs; o->info.keySize = obj_size; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size)) return TEE_ERROR_OVERFLOW; attrs = malloc(alloc_size); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; } TEE_Result syscall_cryp_obj_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *dst_o; struct tee_obj *src_o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(dst), &dst_o); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(src), &src_o); if (res != TEE_SUCCESS) return res; if ((src_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; res = tee_obj_attr_copy_from(dst_o, src_o); if (res != TEE_SUCCESS) return res; dst_o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; dst_o->info.keySize = src_o->info.keySize; dst_o->info.objectUsage = src_o->info.objectUsage; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_rsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct rsa_keypair *key = o->attr; uint32_t e = TEE_U32_TO_BIG_ENDIAN(65537); /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; if (!get_attribute(o, type_props, TEE_ATTR_RSA_PUBLIC_EXPONENT)) crypto_bignum_bin2bn((const uint8_t *)&e, sizeof(e), key->e); res = crypto_acipher_gen_rsa_key(key, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size) { TEE_Result res; res = crypto_acipher_gen_dsa_key(o->attr, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dh( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct dh_keypair *tee_dh_key; struct bignum *dh_q = NULL; uint32_t dh_xbits = 0; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_dh_key = (struct dh_keypair *)o->attr; if (get_attribute(o, type_props, TEE_ATTR_DH_SUBPRIME)) dh_q = tee_dh_key->q; if (get_attribute(o, type_props, TEE_ATTR_DH_X_BITS)) dh_xbits = tee_dh_key->xbits; res = crypto_acipher_gen_dh_key(tee_dh_key, dh_q, dh_xbits); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_DH_PUBLIC_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_X_BITS); return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_ecc( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct ecc_keypair *tee_ecc_key; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_ecc_key = (struct ecc_keypair *)o->attr; res = crypto_acipher_gen_ecc_key(tee_ecc_key); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_ECC_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_X); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_Y); set_attribute(o, type_props, TEE_ATTR_ECC_CURVE); return TEE_SUCCESS; } TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; } static TEE_Result tee_svc_cryp_get_state(struct tee_ta_session *sess, uint32_t state_id, struct tee_cryp_state **state) { struct tee_cryp_state *s; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); TAILQ_FOREACH(s, &utc->cryp_states, link) { if (state_id == (vaddr_t)s) { *state = s; return TEE_SUCCESS; } } return TEE_ERROR_BAD_PARAMETERS; } static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs) { struct tee_obj *o; if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS) tee_obj_close(utc, o); if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) tee_obj_close(utc, o); TAILQ_REMOVE(&utc->cryp_states, cs, link); if (cs->ctx_finalize != NULL) cs->ctx_finalize(cs->ctx, cs->algo); switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_AE: crypto_authenc_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_MAC: crypto_mac_free_ctx(cs->ctx, cs->algo); break; default: assert(!cs->ctx); } free(cs); } static TEE_Result tee_svc_cryp_check_key_type(const struct tee_obj *o, uint32_t algo, TEE_OperationMode mode) { uint32_t req_key_type; uint32_t req_key_type2 = 0; switch (TEE_ALG_GET_MAIN_ALG(algo)) { case TEE_MAIN_ALGO_MD5: req_key_type = TEE_TYPE_HMAC_MD5; break; case TEE_MAIN_ALGO_SHA1: req_key_type = TEE_TYPE_HMAC_SHA1; break; case TEE_MAIN_ALGO_SHA224: req_key_type = TEE_TYPE_HMAC_SHA224; break; case TEE_MAIN_ALGO_SHA256: req_key_type = TEE_TYPE_HMAC_SHA256; break; case TEE_MAIN_ALGO_SHA384: req_key_type = TEE_TYPE_HMAC_SHA384; break; case TEE_MAIN_ALGO_SHA512: req_key_type = TEE_TYPE_HMAC_SHA512; break; case TEE_MAIN_ALGO_AES: req_key_type = TEE_TYPE_AES; break; case TEE_MAIN_ALGO_DES: req_key_type = TEE_TYPE_DES; break; case TEE_MAIN_ALGO_DES3: req_key_type = TEE_TYPE_DES3; break; case TEE_MAIN_ALGO_RSA: req_key_type = TEE_TYPE_RSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_RSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DSA: req_key_type = TEE_TYPE_DSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_DSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DH: req_key_type = TEE_TYPE_DH_KEYPAIR; break; case TEE_MAIN_ALGO_ECDSA: req_key_type = TEE_TYPE_ECDSA_KEYPAIR; if (mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_ECDSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_ECDH: req_key_type = TEE_TYPE_ECDH_KEYPAIR; break; #if defined(CFG_CRYPTO_HKDF) case TEE_MAIN_ALGO_HKDF: req_key_type = TEE_TYPE_HKDF_IKM; break; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) case TEE_MAIN_ALGO_CONCAT_KDF: req_key_type = TEE_TYPE_CONCAT_KDF_Z; break; #endif #if defined(CFG_CRYPTO_PBKDF2) case TEE_MAIN_ALGO_PBKDF2: req_key_type = TEE_TYPE_PBKDF2_PASSWORD; break; #endif default: return TEE_ERROR_BAD_PARAMETERS; } if (req_key_type != o->info.objectType && req_key_type2 != o->info.objectType) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } TEE_Result syscall_cryp_state_alloc(unsigned long algo, unsigned long mode, unsigned long key1, unsigned long key2, uint32_t *state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o1 = NULL; struct tee_obj *o2 = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); if (key1 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key1), &o1); if (res != TEE_SUCCESS) return res; if (o1->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o1, algo, mode); if (res != TEE_SUCCESS) return res; } if (key2 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key2), &o2); if (res != TEE_SUCCESS) return res; if (o2->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o2, algo, mode); if (res != TEE_SUCCESS) return res; } cs = calloc(1, sizeof(struct tee_cryp_state)); if (!cs) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INSERT_TAIL(&utc->cryp_states, cs, link); cs->algo = algo; cs->mode = mode; switch (TEE_ALG_GET_CLASS(algo)) { case TEE_OPERATION_EXTENSION: #ifdef CFG_CRYPTO_RSASSA_NA1 if (algo == TEE_ALG_RSASSA_PKCS1_V1_5) goto rsassa_na1; #endif res = TEE_ERROR_NOT_SUPPORTED; break; case TEE_OPERATION_CIPHER: if ((algo == TEE_ALG_AES_XTS && (key1 == 0 || key2 == 0)) || (algo != TEE_ALG_AES_XTS && (key1 == 0 || key2 != 0))) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_cipher_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_AE: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_authenc_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_MAC: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_mac_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_DIGEST: if (key1 != 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_hash_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_ASYMMETRIC_CIPHER: case TEE_OPERATION_ASYMMETRIC_SIGNATURE: rsassa_na1: __maybe_unused if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; case TEE_OPERATION_KEY_DERIVATION: if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; default: res = TEE_ERROR_NOT_SUPPORTED; break; } if (res != TEE_SUCCESS) goto out; res = tee_svc_copy_kaddr_to_uref(state, cs); if (res != TEE_SUCCESS) goto out; /* Register keys */ if (o1 != NULL) { o1->busy = true; cs->key1 = (vaddr_t)o1; } if (o2 != NULL) { o2->busy = true; cs->key2 = (vaddr_t)o2; } out: if (res != TEE_SUCCESS) cryp_state_free(utc, cs); return res; } TEE_Result syscall_cryp_state_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_cryp_state *cs_dst; struct tee_cryp_state *cs_src; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(dst), &cs_dst); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(src), &cs_src); if (res != TEE_SUCCESS) return res; if (cs_dst->algo != cs_src->algo || cs_dst->mode != cs_src->mode) return TEE_ERROR_BAD_PARAMETERS; switch (TEE_ALG_GET_CLASS(cs_src->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_AE: crypto_authenc_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_MAC: crypto_mac_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; default: return TEE_ERROR_BAD_STATE; } return TEE_SUCCESS; } void tee_svc_cryp_free_states(struct user_ta_ctx *utc) { struct tee_cryp_state_head *states = &utc->cryp_states; while (!TAILQ_EMPTY(states)) cryp_state_free(utc, TAILQ_FIRST(states)); } TEE_Result syscall_cryp_state_free(unsigned long state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; cryp_state_free(to_user_ta_ctx(sess->ctx), cs); return TEE_SUCCESS; } TEE_Result syscall_hash_init(unsigned long state, const void *iv __maybe_unused, size_t iv_len __maybe_unused) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_init(cs->ctx, cs->algo); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: { struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = (struct tee_cryp_obj_secret *)o->attr; res = crypto_mac_init(cs->ctx, cs->algo, (void *)(key + 1), key->key_size); if (res != TEE_SUCCESS) return res; break; } default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_update(unsigned long state, const void *chunk, size_t chunk_size) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; /* Zero length hash is valid, but nothing we need to do. */ if (!chunk_size) return TEE_SUCCESS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_final(unsigned long state, const void *chunk, size_t chunk_size, void *hash, uint64_t *hash_len) { TEE_Result res, res2; size_t hash_size; uint64_t hlen; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&hlen, hash_len, sizeof(hlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)hash, hlen); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = tee_hash_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_hash_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = tee_mac_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_mac_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } out: hlen = hash_size; res2 = tee_svc_copy_to_user(hash_len, &hlen, sizeof(*hash_len)); if (res2 != TEE_SUCCESS) return res2; return res; } TEE_Result syscall_cipher_init(unsigned long state, const void *iv, size_t iv_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key1; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) iv, iv_len); if (res != TEE_SUCCESS) return res; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key1 = o->attr; if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) { struct tee_cryp_obj_secret *key2 = o->attr; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, (uint8_t *)(key2 + 1), key2->key_size, iv, iv_len); } else { res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, NULL, 0, iv, iv_len); } if (res != TEE_SUCCESS) return res; cs->ctx_finalize = crypto_cipher_final; return TEE_SUCCESS; } static TEE_Result tee_svc_cipher_update_helper(unsigned long state, bool last_block, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (src_len > 0) { /* Permit src_len == 0 to finalize the operation */ res = tee_do_cipher_update(cs->ctx, cs->algo, cs->mode, last_block, src, src_len, dst); } if (last_block && cs->ctx_finalize != NULL) { cs->ctx_finalize(cs->ctx, cs->algo); cs->ctx_finalize = NULL; } out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; dlen = src_len; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_cipher_update(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, false /* last_block */, src, src_len, dst, dst_len); } TEE_Result syscall_cipher_final(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, true /* last_block */, src, src_len, dst, dst_len); } #if defined(CFG_CRYPTO_HKDF) static TEE_Result get_hkdf_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, void **info, size_t *info_len, size_t *okm_len) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, INFO = 0x4 }; uint8_t found = 0; *salt = *info = NULL; *salt_len = *info_len = *okm_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_HKDF_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_HKDF_OKM_LENGTH: if (!(found & LENGTH)) { *okm_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_HKDF_INFO: if (!(found & INFO)) { *info = params[n].content.ref.buffer; *info_len = params[n].content.ref.length; found |= INFO; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static TEE_Result get_concat_kdf_params(const TEE_Attribute *params, uint32_t param_count, void **other_info, size_t *other_info_len, size_t *derived_key_len) { size_t n; enum { LENGTH = 0x1, INFO = 0x2 }; uint8_t found = 0; *other_info = NULL; *other_info_len = *derived_key_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_CONCAT_KDF_OTHER_INFO: if (!(found & INFO)) { *other_info = params[n].content.ref.buffer; *other_info_len = params[n].content.ref.length; found |= INFO; } break; case TEE_ATTR_CONCAT_KDF_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_PBKDF2) static TEE_Result get_pbkdf2_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, size_t *derived_key_len, size_t *iteration_count) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, COUNT = 0x4 }; uint8_t found = 0; *salt = NULL; *salt_len = *derived_key_len = *iteration_count = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_PBKDF2_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_PBKDF2_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_PBKDF2_ITERATION_COUNT: if (!(found & COUNT)) { *iteration_count = params[n].content.value.a; found |= COUNT; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if ((found & (LENGTH|COUNT)) != (LENGTH|COUNT)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif TEE_Result syscall_cryp_derive_key(unsigned long state, const struct utee_attribute *usr_params, unsigned long param_count, unsigned long derived_key) { TEE_Result res = TEE_ERROR_NOT_SUPPORTED; struct tee_ta_session *sess; struct tee_obj *ko; struct tee_obj *so; struct tee_cryp_state *cs; struct tee_cryp_obj_secret *sk; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; /* Get key set in operation */ res = tee_obj_get(utc, cs->key1, &ko); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, tee_svc_uref_to_vaddr(derived_key), &so); if (res != TEE_SUCCESS) goto out; /* Find information needed about the object to initialize */ sk = so->attr; /* Find description of object */ type_props = tee_svc_find_type_props(so->info.objectType); if (!type_props) { res = TEE_ERROR_NOT_SUPPORTED; goto out; } if (cs->algo == TEE_ALG_DH_DERIVE_SHARED_SECRET) { size_t alloc_size; struct bignum *pub; struct bignum *ss; if (param_count != 1 || params[0].attributeID != TEE_ATTR_DH_PUBLIC_VALUE) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } alloc_size = params[0].content.ref.length * 8; pub = crypto_bignum_allocate(alloc_size); ss = crypto_bignum_allocate(alloc_size); if (pub && ss) { crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, pub); res = crypto_acipher_dh_shared_secret(ko->attr, pub, ss); if (res == TEE_SUCCESS) { sk->key_size = crypto_bignum_num_bytes(ss); crypto_bignum_bn2bin(ss, (uint8_t *)(sk + 1)); so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } else { res = TEE_ERROR_OUT_OF_MEMORY; } crypto_bignum_free(pub); crypto_bignum_free(ss); } else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_ECDH) { size_t alloc_size; struct ecc_public_key key_public; uint8_t *pt_secret; unsigned long pt_secret_len; if (param_count != 2 || params[0].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_X || params[1].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_Y) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (cs->algo) { case TEE_ALG_ECDH_P192: alloc_size = 192; break; case TEE_ALG_ECDH_P224: alloc_size = 224; break; case TEE_ALG_ECDH_P256: alloc_size = 256; break; case TEE_ALG_ECDH_P384: alloc_size = 384; break; case TEE_ALG_ECDH_P521: alloc_size = 521; break; default: res = TEE_ERROR_NOT_IMPLEMENTED; goto out; } /* Create the public key */ res = crypto_acipher_alloc_ecc_public_key(&key_public, alloc_size); if (res != TEE_SUCCESS) goto out; key_public.curve = ((struct ecc_keypair *)ko->attr)->curve; crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, key_public.x); crypto_bignum_bin2bn(params[1].content.ref.buffer, params[1].content.ref.length, key_public.y); pt_secret = (uint8_t *)(sk + 1); pt_secret_len = sk->alloc_size; res = crypto_acipher_ecc_shared_secret(ko->attr, &key_public, pt_secret, &pt_secret_len); if (res == TEE_SUCCESS) { sk->key_size = pt_secret_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } /* free the public key */ crypto_acipher_free_ecc_public_key(&key_public); } #if defined(CFG_CRYPTO_HKDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_HKDF) { void *salt, *info; size_t salt_len, info_len, okm_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ik = ko->attr; const uint8_t *ikm = (const uint8_t *)(ik + 1); res = get_hkdf_params(params, param_count, &salt, &salt_len, &info, &info_len, &okm_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (okm_len > ik->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_hkdf(hash_id, ikm, ik->key_size, salt, salt_len, info, info_len, (uint8_t *)(sk + 1), okm_len); if (res == TEE_SUCCESS) { sk->key_size = okm_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_CONCAT_KDF) { void *info; size_t info_len, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *shared_secret = (const uint8_t *)(ss + 1); res = get_concat_kdf_params(params, param_count, &info, &info_len, &derived_key_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_concat_kdf(hash_id, shared_secret, ss->key_size, info, info_len, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_PBKDF2) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_PBKDF2) { void *salt; size_t salt_len, iteration_count, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *password = (const uint8_t *)(ss + 1); res = get_pbkdf2_params(params, param_count, &salt, &salt_len, &derived_key_len, &iteration_count); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_pbkdf2(hash_id, password, ss->key_size, salt, salt_len, iteration_count, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif else res = TEE_ERROR_NOT_SUPPORTED; out: free(params); return res; } TEE_Result syscall_cryp_random_number_generate(void *buf, size_t blen) { TEE_Result res; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buf, blen); if (res != TEE_SUCCESS) return res; res = crypto_rng_read(buf, blen); if (res != TEE_SUCCESS) return res; return res; } TEE_Result syscall_authenc_init(unsigned long state, const void *nonce, size_t nonce_len, size_t tag_len, size_t aad_len, size_t payload_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = o->attr; res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key + 1), key->key_size, nonce, nonce_len, tag_len, aad_len, payload_len); if (res != TEE_SUCCESS) return res; cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_aad(unsigned long state, const void *aad_data, size_t aad_data_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = crypto_authenc_update_aad(cs->ctx, cs->algo, cs->mode, aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_payload(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } tmp_dlen = dlen; res = crypto_authenc_update_payload(cs->ctx, cs->algo, cs->mode, src_data, src_len, dst_data, &tmp_dlen); dlen = tmp_dlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_authenc_enc_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, void *tag, uint64_t *tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; uint64_t tlen = 0; size_t tmp_dlen; size_t tmp_tlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_ENCRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_svc_copy_from_user(&tlen, tag_len, sizeof(tlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tlen); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; tmp_tlen = tlen; res = crypto_authenc_enc_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, &tmp_tlen); dlen = tmp_dlen; tlen = tmp_tlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; if (dst_len != NULL) { res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } res2 = tee_svc_copy_to_user(tag_len, &tlen, sizeof(*tag_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_authenc_dec_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, const void *tag, size_t tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_DECRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tag_len); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; res = crypto_authenc_dec_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, tag_len); dlen = tmp_dlen; out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } static int pkcs1_get_salt_len(const TEE_Attribute *params, uint32_t num_params, size_t default_len) { size_t n; assert(default_len < INT_MAX); for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_PSS_SALT_LENGTH) { if (params[n].content.value.a < INT_MAX) return params[n].content.value.a; break; } } /* * If salt length isn't provided use the default value which is * the length of the digest. */ return default_len; } TEE_Result syscall_asymm_operate(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen64; size_t dlen; struct tee_obj *o; void *label = NULL; size_t label_len = 0; size_t n; int salt_len; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen64, dst_len, sizeof(dlen64)); if (res != TEE_SUCCESS) return res; dlen = dlen64; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) dst_data, dlen); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_GENERIC; goto out; } switch (cs->algo) { case TEE_ALG_RSA_NOPAD: if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsanopad_encrypt(o->attr, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsanopad_decrypt(o->attr, src_data, src_len, dst_data, &dlen); } else { /* * We will panic because "the mode is not compatible * with the function" */ res = TEE_ERROR_GENERIC; } break; case TEE_ALG_RSAES_PKCS1_V1_5: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA1: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA224: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA256: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA384: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA512: for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_OAEP_LABEL) { label = params[n].content.ref.buffer; label_len = params[n].content.ref.length; break; } } if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsaes_encrypt(cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsaes_decrypt( cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else { res = TEE_ERROR_BAD_PARAMETERS; } break; #if defined(CFG_CRYPTO_RSASSA_NA1) case TEE_ALG_RSASSA_PKCS1_V1_5: #endif case TEE_ALG_RSASSA_PKCS1_V1_5_MD5: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA1: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA224: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA256: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA384: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA512: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA1: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA224: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA384: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA512: if (cs->mode != TEE_MODE_SIGN) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, src_len); res = crypto_acipher_rsassa_sign(cs->algo, o->attr, salt_len, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_DSA_SHA1: case TEE_ALG_DSA_SHA224: case TEE_ALG_DSA_SHA256: res = crypto_acipher_dsa_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_ECDSA_P192: case TEE_ALG_ECDSA_P224: case TEE_ALG_ECDSA_P256: case TEE_ALG_ECDSA_P384: case TEE_ALG_ECDSA_P521: res = crypto_acipher_ecc_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } out: free(params); if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; dlen64 = dlen; res2 = tee_svc_copy_to_user(dst_len, &dlen64, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_asymm_verify(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *data, size_t data_len, const void *sig, size_t sig_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; size_t hash_size; int salt_len = 0; TEE_Attribute *params = NULL; uint32_t hash_algo; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_VERIFY) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)data, data_len); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)sig, sig_len); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) { case TEE_MAIN_ALGO_RSA: if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) { hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; if (data_len != hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, hash_size); } res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_DSA: hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; /* * Depending on the DSA algorithm (NIST), the digital signature * output size may be truncated to the size of a key pair * (Q prime size). Q prime size must be less or equal than the * hash output length of the hash algorithm involved. */ if (data_len > hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } res = crypto_acipher_dsa_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_ECDSA: res = crypto_acipher_ecc_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; default: res = TEE_ERROR_NOT_SUPPORTED; } out: free(params); return res; }
// SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <assert.h> #include <compiler.h> #include <crypto/crypto.h> #include <kernel/tee_ta_manager.h> #include <mm/tee_mmu.h> #include <string_ext.h> #include <string.h> #include <sys/queue.h> #include <tee_api_types.h> #include <tee/tee_cryp_utl.h> #include <tee/tee_obj.h> #include <tee/tee_svc_cryp.h> #include <tee/tee_svc.h> #include <trace.h> #include <utee_defines.h> #include <util.h> #include <tee_api_defines_extensions.h> #if defined(CFG_CRYPTO_HKDF) #include <tee/tee_cryp_hkdf.h> #endif #if defined(CFG_CRYPTO_CONCAT_KDF) #include <tee/tee_cryp_concat_kdf.h> #endif #if defined(CFG_CRYPTO_PBKDF2) #include <tee/tee_cryp_pbkdf2.h> #endif typedef void (*tee_cryp_ctx_finalize_func_t) (void *ctx, uint32_t algo); struct tee_cryp_state { TAILQ_ENTRY(tee_cryp_state) link; uint32_t algo; uint32_t mode; vaddr_t key1; vaddr_t key2; void *ctx; tee_cryp_ctx_finalize_func_t ctx_finalize; }; struct tee_cryp_obj_secret { uint32_t key_size; uint32_t alloc_size; /* * Pseudo code visualize layout of structure * Next follows data, such as: * uint8_t data[alloc_size] * key_size must never exceed alloc_size */ }; #define TEE_TYPE_ATTR_OPTIONAL 0x0 #define TEE_TYPE_ATTR_REQUIRED 0x1 #define TEE_TYPE_ATTR_OPTIONAL_GROUP 0x2 #define TEE_TYPE_ATTR_SIZE_INDICATOR 0x4 #define TEE_TYPE_ATTR_GEN_KEY_OPT 0x8 #define TEE_TYPE_ATTR_GEN_KEY_REQ 0x10 /* Handle storing of generic secret keys of varying lengths */ #define ATTR_OPS_INDEX_SECRET 0 /* Convert to/from big-endian byte array and provider-specific bignum */ #define ATTR_OPS_INDEX_BIGNUM 1 /* Convert to/from value attribute depending on direction */ #define ATTR_OPS_INDEX_VALUE 2 struct tee_cryp_obj_type_attrs { uint32_t attr_id; uint16_t flags; uint16_t ops_index; uint16_t raw_offs; uint16_t raw_size; }; #define RAW_DATA(_x, _y) \ .raw_offs = offsetof(_x, _y), .raw_size = MEMBER_SIZE(_x, _y) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_secret_value_attrs[] = { { .attr_id = TEE_ATTR_SECRET_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_public_key, e) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_rsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_RSA_MODULUS, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, n) }, { .attr_id = TEE_ATTR_RSA_PUBLIC_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, e) }, { .attr_id = TEE_ATTR_RSA_PRIVATE_EXPONENT, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, d) }, { .attr_id = TEE_ATTR_RSA_PRIME1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, p) }, { .attr_id = TEE_ATTR_RSA_PRIME2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, q) }, { .attr_id = TEE_ATTR_RSA_EXPONENT1, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dp) }, { .attr_id = TEE_ATTR_RSA_EXPONENT2, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, dq) }, { .attr_id = TEE_ATTR_RSA_COEFFICIENT, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct rsa_keypair, qp) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_pub_key_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, g) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_public_key, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dsa_keypair_attrs[] = { { .attr_id = TEE_ATTR_DSA_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, p) }, { .attr_id = TEE_ATTR_DSA_SUBPRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, q) }, { .attr_id = TEE_ATTR_DSA_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, g) }, { .attr_id = TEE_ATTR_DSA_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, x) }, { .attr_id = TEE_ATTR_DSA_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dsa_keypair, y) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_dh_keypair_attrs[] = { { .attr_id = TEE_ATTR_DH_PRIME, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, p) }, { .attr_id = TEE_ATTR_DH_BASE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_GEN_KEY_REQ, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, g) }, { .attr_id = TEE_ATTR_DH_PUBLIC_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, y) }, { .attr_id = TEE_ATTR_DH_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, x) }, { .attr_id = TEE_ATTR_DH_SUBPRIME, .flags = TEE_TYPE_ATTR_OPTIONAL_GROUP | TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct dh_keypair, q) }, { .attr_id = TEE_ATTR_DH_X_BITS, .flags = TEE_TYPE_ATTR_GEN_KEY_OPT, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct dh_keypair, xbits) }, }; #if defined(CFG_CRYPTO_HKDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_hkdf_ikm_attrs[] = { { .attr_id = TEE_ATTR_HKDF_IKM, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_concat_kdf_z_attrs[] = { { .attr_id = TEE_ATTR_CONCAT_KDF_Z, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif #if defined(CFG_CRYPTO_PBKDF2) static const struct tee_cryp_obj_type_attrs tee_cryp_obj_pbkdf2_passwd_attrs[] = { { .attr_id = TEE_ATTR_PBKDF2_PASSWORD, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_SECRET, .raw_offs = 0, .raw_size = 0 }, }; #endif static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_pub_key_attrs[] = { { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_public_key, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_public_key, curve) }, }; static const struct tee_cryp_obj_type_attrs tee_cryp_obj_ecc_keypair_attrs[] = { { .attr_id = TEE_ATTR_ECC_PRIVATE_VALUE, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, d) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_X, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, x) }, { .attr_id = TEE_ATTR_ECC_PUBLIC_VALUE_Y, .flags = TEE_TYPE_ATTR_REQUIRED, .ops_index = ATTR_OPS_INDEX_BIGNUM, RAW_DATA(struct ecc_keypair, y) }, { .attr_id = TEE_ATTR_ECC_CURVE, .flags = TEE_TYPE_ATTR_REQUIRED | TEE_TYPE_ATTR_SIZE_INDICATOR, .ops_index = ATTR_OPS_INDEX_VALUE, RAW_DATA(struct ecc_keypair, curve) }, }; struct tee_cryp_obj_type_props { TEE_ObjectType obj_type; uint16_t min_size; /* may not be smaller than this */ uint16_t max_size; /* may not be larger than this */ uint16_t alloc_size; /* this many bytes are allocated to hold data */ uint8_t quanta; /* may only be an multiple of this */ uint8_t num_type_attrs; const struct tee_cryp_obj_type_attrs *type_attrs; }; #define PROP(obj_type, quanta, min_size, max_size, alloc_size, type_attrs) \ { (obj_type), (min_size), (max_size), (alloc_size), (quanta), \ ARRAY_SIZE(type_attrs), (type_attrs) } static const struct tee_cryp_obj_type_props tee_cryp_obj_props[] = { PROP(TEE_TYPE_AES, 64, 128, 256, /* valid sizes 128, 192, 256 */ 256 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES, 56, 56, 56, /* * Valid size 56 without parity, note that we still allocate * for 64 bits since the key is supplied with parity. */ 64 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_DES3, 56, 112, 168, /* * Valid sizes 112, 168 without parity, note that we still * allocate for with space for the parity since the key is * supplied with parity. */ 192 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_MD5, 8, 64, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA1, 8, 80, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA224, 8, 112, 512, 512 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA256, 8, 192, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA384, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_HMAC_SHA512, 8, 256, 1024, 1024 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), PROP(TEE_TYPE_GENERIC_SECRET, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_secret_value_attrs), #if defined(CFG_CRYPTO_HKDF) PROP(TEE_TYPE_HKDF_IKM, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_hkdf_ikm_attrs), #endif #if defined(CFG_CRYPTO_CONCAT_KDF) PROP(TEE_TYPE_CONCAT_KDF_Z, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_concat_kdf_z_attrs), #endif #if defined(CFG_CRYPTO_PBKDF2) PROP(TEE_TYPE_PBKDF2_PASSWORD, 8, 0, 4096, 4096 / 8 + sizeof(struct tee_cryp_obj_secret), tee_cryp_obj_pbkdf2_passwd_attrs), #endif PROP(TEE_TYPE_RSA_PUBLIC_KEY, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_public_key), tee_cryp_obj_rsa_pub_key_attrs), PROP(TEE_TYPE_RSA_KEYPAIR, 1, 256, CFG_CORE_BIGNUM_MAX_BITS, sizeof(struct rsa_keypair), tee_cryp_obj_rsa_keypair_attrs), PROP(TEE_TYPE_DSA_PUBLIC_KEY, 64, 512, 3072, sizeof(struct dsa_public_key), tee_cryp_obj_dsa_pub_key_attrs), PROP(TEE_TYPE_DSA_KEYPAIR, 64, 512, 3072, sizeof(struct dsa_keypair), tee_cryp_obj_dsa_keypair_attrs), PROP(TEE_TYPE_DH_KEYPAIR, 1, 256, 2048, sizeof(struct dh_keypair), tee_cryp_obj_dh_keypair_attrs), PROP(TEE_TYPE_ECDSA_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDSA_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), PROP(TEE_TYPE_ECDH_PUBLIC_KEY, 1, 192, 521, sizeof(struct ecc_public_key), tee_cryp_obj_ecc_pub_key_attrs), PROP(TEE_TYPE_ECDH_KEYPAIR, 1, 192, 521, sizeof(struct ecc_keypair), tee_cryp_obj_ecc_keypair_attrs), }; struct attr_ops { TEE_Result (*from_user)(void *attr, const void *buffer, size_t size); TEE_Result (*to_user)(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size); TEE_Result (*to_binary)(void *attr, void *data, size_t data_len, size_t *offs); bool (*from_binary)(void *attr, const void *data, size_t data_len, size_t *offs); TEE_Result (*from_obj)(void *attr, void *src_attr); void (*free)(void *attr); void (*clear)(void *attr); }; static TEE_Result op_u32_to_binary_helper(uint32_t v, uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; size_t next_offs; if (ADD_OVERFLOW(*offs, sizeof(field), &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) { field = TEE_U32_TO_BIG_ENDIAN(v); memcpy(data + *offs, &field, sizeof(field)); } (*offs) = next_offs; return TEE_SUCCESS; } static bool op_u32_from_binary_helper(uint32_t *v, const uint8_t *data, size_t data_len, size_t *offs) { uint32_t field; if (!data || (*offs + sizeof(field)) > data_len) return false; memcpy(&field, data + *offs, sizeof(field)); *v = TEE_U32_FROM_BIG_ENDIAN(field); (*offs) += sizeof(field); return true; } static TEE_Result op_attr_secret_value_from_user(void *attr, const void *buffer, size_t size) { struct tee_cryp_obj_secret *key = attr; /* Data size has to fit in allocated buffer */ if (size > key->alloc_size) return TEE_ERROR_SECURITY; memcpy(key + 1, buffer, size); key->key_size = size; return TEE_SUCCESS; } static TEE_Result op_attr_secret_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; uint64_t s; uint64_t key_size; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; key_size = key->key_size; res = tee_svc_copy_to_user(size, &key_size, sizeof(key_size)); if (res != TEE_SUCCESS) return res; if (s < key->key_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, key + 1, key->key_size); } static TEE_Result op_attr_secret_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct tee_cryp_obj_secret *key = attr; size_t next_offs; res = op_u32_to_binary_helper(key->key_size, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, key->key_size, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) memcpy((uint8_t *)data + *offs, key + 1, key->key_size); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_secret_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct tee_cryp_obj_secret *key = attr; uint32_t s; if (!op_u32_from_binary_helper(&s, data, data_len, offs)) return false; if ((*offs + s) > data_len) return false; /* Data size has to fit in allocated buffer */ if (s > key->alloc_size) return false; key->key_size = s; memcpy(key + 1, (const uint8_t *)data + *offs, s); (*offs) += s; return true; } static TEE_Result op_attr_secret_value_from_obj(void *attr, void *src_attr) { struct tee_cryp_obj_secret *key = attr; struct tee_cryp_obj_secret *src_key = src_attr; if (src_key->key_size > key->alloc_size) return TEE_ERROR_BAD_STATE; memcpy(key + 1, src_key + 1, src_key->key_size); key->key_size = src_key->key_size; return TEE_SUCCESS; } static void op_attr_secret_value_clear(void *attr) { struct tee_cryp_obj_secret *key = attr; key->key_size = 0; memset(key + 1, 0, key->alloc_size); } static TEE_Result op_attr_bignum_from_user(void *attr, const void *buffer, size_t size) { struct bignum **bn = attr; return crypto_bignum_bin2bn(buffer, size, *bn); } static TEE_Result op_attr_bignum_to_user(void *attr, struct tee_ta_session *sess, void *buffer, uint64_t *size) { TEE_Result res; struct bignum **bn = attr; uint64_t req_size; uint64_t s; res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; req_size = crypto_bignum_num_bytes(*bn); res = tee_svc_copy_to_user(size, &req_size, sizeof(req_size)); if (res != TEE_SUCCESS) return res; if (!req_size) return TEE_SUCCESS; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; /* Check we can access data using supplied user mode pointer */ res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buffer, req_size); if (res != TEE_SUCCESS) return res; /* * Write the bignum (wich raw data points to) into an array of * bytes (stored in buffer) */ crypto_bignum_bn2bin(*bn, buffer); return TEE_SUCCESS; } static TEE_Result op_attr_bignum_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { TEE_Result res; struct bignum **bn = attr; uint32_t n = crypto_bignum_num_bytes(*bn); size_t next_offs; res = op_u32_to_binary_helper(n, data, data_len, offs); if (res != TEE_SUCCESS) return res; if (ADD_OVERFLOW(*offs, n, &next_offs)) return TEE_ERROR_OVERFLOW; if (data && next_offs <= data_len) crypto_bignum_bn2bin(*bn, (uint8_t *)data + *offs); (*offs) = next_offs; return TEE_SUCCESS; } static bool op_attr_bignum_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { struct bignum **bn = attr; uint32_t n; if (!op_u32_from_binary_helper(&n, data, data_len, offs)) return false; if ((*offs + n) > data_len) return false; if (crypto_bignum_bin2bn((const uint8_t *)data + *offs, n, *bn)) return false; (*offs) += n; return true; } static TEE_Result op_attr_bignum_from_obj(void *attr, void *src_attr) { struct bignum **bn = attr; struct bignum **src_bn = src_attr; crypto_bignum_copy(*bn, *src_bn); return TEE_SUCCESS; } static void op_attr_bignum_clear(void *attr) { struct bignum **bn = attr; crypto_bignum_clear(*bn); } static void op_attr_bignum_free(void *attr) { struct bignum **bn = attr; crypto_bignum_free(*bn); *bn = NULL; } static TEE_Result op_attr_value_from_user(void *attr, const void *buffer, size_t size) { uint32_t *v = attr; if (size != sizeof(uint32_t) * 2) return TEE_ERROR_GENERIC; /* "can't happen */ /* Note that only the first value is copied */ memcpy(v, buffer, sizeof(uint32_t)); return TEE_SUCCESS; } static TEE_Result op_attr_value_to_user(void *attr, struct tee_ta_session *sess __unused, void *buffer, uint64_t *size) { TEE_Result res; uint32_t *v = attr; uint64_t s; uint32_t value[2] = { *v }; uint64_t req_size = sizeof(value); res = tee_svc_copy_from_user(&s, size, sizeof(s)); if (res != TEE_SUCCESS) return res; if (s < req_size || !buffer) return TEE_ERROR_SHORT_BUFFER; return tee_svc_copy_to_user(buffer, value, req_size); } static TEE_Result op_attr_value_to_binary(void *attr, void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_to_binary_helper(*v, data, data_len, offs); } static bool op_attr_value_from_binary(void *attr, const void *data, size_t data_len, size_t *offs) { uint32_t *v = attr; return op_u32_from_binary_helper(v, data, data_len, offs); } static TEE_Result op_attr_value_from_obj(void *attr, void *src_attr) { uint32_t *v = attr; uint32_t *src_v = src_attr; *v = *src_v; return TEE_SUCCESS; } static void op_attr_value_clear(void *attr) { uint32_t *v = attr; *v = 0; } static const struct attr_ops attr_ops[] = { [ATTR_OPS_INDEX_SECRET] = { .from_user = op_attr_secret_value_from_user, .to_user = op_attr_secret_value_to_user, .to_binary = op_attr_secret_value_to_binary, .from_binary = op_attr_secret_value_from_binary, .from_obj = op_attr_secret_value_from_obj, .free = op_attr_secret_value_clear, /* not a typo */ .clear = op_attr_secret_value_clear, }, [ATTR_OPS_INDEX_BIGNUM] = { .from_user = op_attr_bignum_from_user, .to_user = op_attr_bignum_to_user, .to_binary = op_attr_bignum_to_binary, .from_binary = op_attr_bignum_from_binary, .from_obj = op_attr_bignum_from_obj, .free = op_attr_bignum_free, .clear = op_attr_bignum_clear, }, [ATTR_OPS_INDEX_VALUE] = { .from_user = op_attr_value_from_user, .to_user = op_attr_value_to_user, .to_binary = op_attr_value_to_binary, .from_binary = op_attr_value_from_binary, .from_obj = op_attr_value_from_obj, .free = op_attr_value_clear, /* not a typo */ .clear = op_attr_value_clear, }, }; TEE_Result syscall_cryp_obj_get_info(unsigned long obj, TEE_ObjectInfo *info) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; res = tee_svc_copy_to_user(info, &o->info, sizeof(o->info)); exit: return res; } TEE_Result syscall_cryp_obj_restrict_usage(unsigned long obj, unsigned long usage) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) goto exit; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) goto exit; o->info.objectUsage &= usage; exit: return res; } static int tee_svc_cryp_obj_find_type_attr_idx( uint32_t attr_id, const struct tee_cryp_obj_type_props *type_props) { size_t n; for (n = 0; n < type_props->num_type_attrs; n++) { if (attr_id == type_props->type_attrs[n].attr_id) return n; } return -1; } static const struct tee_cryp_obj_type_props *tee_svc_find_type_props( TEE_ObjectType obj_type) { size_t n; for (n = 0; n < ARRAY_SIZE(tee_cryp_obj_props); n++) { if (tee_cryp_obj_props[n].obj_type == obj_type) return tee_cryp_obj_props + n; } return NULL; } /* Set an attribute on an object */ static void set_attribute(struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return; o->have_attrs |= BIT(idx); } /* Get an attribute on an object */ static uint32_t get_attribute(const struct tee_obj *o, const struct tee_cryp_obj_type_props *props, uint32_t attr) { int idx = tee_svc_cryp_obj_find_type_attr_idx(attr, props); if (idx < 0) return 0; return o->have_attrs & BIT(idx); } TEE_Result syscall_cryp_obj_get_attr(unsigned long obj, unsigned long attr_id, void *buffer, uint64_t *size) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; int idx; const struct attr_ops *ops; void *attr; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return TEE_ERROR_ITEM_NOT_FOUND; /* Check that the object is initialized */ if (!(o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED)) return TEE_ERROR_BAD_PARAMETERS; /* Check that getting the attribute is allowed */ if (!(attr_id & TEE_ATTR_BIT_PROTECTED) && !(o->info.objectUsage & TEE_USAGE_EXTRACTABLE)) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) { /* Unknown object type, "can't happen" */ return TEE_ERROR_BAD_STATE; } idx = tee_svc_cryp_obj_find_type_attr_idx(attr_id, type_props); if ((idx < 0) || ((o->have_attrs & (1 << idx)) == 0)) return TEE_ERROR_ITEM_NOT_FOUND; ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; return ops->to_user(attr, sess, buffer, size); } void tee_obj_attr_free(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].free((uint8_t *)o->attr + ta->raw_offs); } } void tee_obj_attr_clear(struct tee_obj *o) { const struct tee_cryp_obj_type_props *tp; size_t n; if (!o->attr) return; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; attr_ops[ta->ops_index].clear((uint8_t *)o->attr + ta->raw_offs); } } TEE_Result tee_obj_attr_to_binary(struct tee_obj *o, void *data, size_t *data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; size_t len = data ? *data_len : 0; TEE_Result res; if (o->info.objectType == TEE_TYPE_DATA) { *data_len = 0; return TEE_SUCCESS; /* pure data object */ } if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; res = attr_ops[ta->ops_index].to_binary(attr, data, len, &offs); if (res != TEE_SUCCESS) return res; } *data_len = offs; if (data && offs > len) return TEE_ERROR_SHORT_BUFFER; return TEE_SUCCESS; } TEE_Result tee_obj_attr_from_binary(struct tee_obj *o, const void *data, size_t data_len) { const struct tee_cryp_obj_type_props *tp; size_t n; size_t offs = 0; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; for (n = 0; n < tp->num_type_attrs; n++) { const struct tee_cryp_obj_type_attrs *ta = tp->type_attrs + n; void *attr = (uint8_t *)o->attr + ta->raw_offs; if (!attr_ops[ta->ops_index].from_binary(attr, data, data_len, &offs)) return TEE_ERROR_CORRUPT_OBJECT; } return TEE_SUCCESS; } TEE_Result tee_obj_attr_copy_from(struct tee_obj *o, const struct tee_obj *src) { TEE_Result res; const struct tee_cryp_obj_type_props *tp; const struct tee_cryp_obj_type_attrs *ta; size_t n; uint32_t have_attrs = 0; void *attr; void *src_attr; if (o->info.objectType == TEE_TYPE_DATA) return TEE_SUCCESS; /* pure data object */ if (!o->attr) return TEE_ERROR_BAD_STATE; tp = tee_svc_find_type_props(o->info.objectType); if (!tp) return TEE_ERROR_BAD_STATE; if (o->info.objectType == src->info.objectType) { have_attrs = src->have_attrs; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + ta->raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } else { const struct tee_cryp_obj_type_props *tp_src; int idx; if (o->info.objectType == TEE_TYPE_RSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_RSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_DSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_DSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDSA_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDSA_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else if (o->info.objectType == TEE_TYPE_ECDH_PUBLIC_KEY) { if (src->info.objectType != TEE_TYPE_ECDH_KEYPAIR) return TEE_ERROR_BAD_PARAMETERS; } else { return TEE_ERROR_BAD_PARAMETERS; } tp_src = tee_svc_find_type_props(src->info.objectType); if (!tp_src) return TEE_ERROR_BAD_STATE; have_attrs = BIT32(tp->num_type_attrs) - 1; for (n = 0; n < tp->num_type_attrs; n++) { ta = tp->type_attrs + n; idx = tee_svc_cryp_obj_find_type_attr_idx(ta->attr_id, tp_src); if (idx < 0) return TEE_ERROR_BAD_STATE; attr = (uint8_t *)o->attr + ta->raw_offs; src_attr = (uint8_t *)src->attr + tp_src->type_attrs[idx].raw_offs; res = attr_ops[ta->ops_index].from_obj(attr, src_attr); if (res != TEE_SUCCESS) return res; } } o->have_attrs = have_attrs; return TEE_SUCCESS; } TEE_Result tee_obj_set_type(struct tee_obj *o, uint32_t obj_type, size_t max_key_size) { TEE_Result res = TEE_SUCCESS; const struct tee_cryp_obj_type_props *type_props; /* Can only set type for newly allocated objs */ if (o->attr) return TEE_ERROR_BAD_STATE; /* * Verify that maxKeySize is supported and find out how * much should be allocated. */ if (obj_type == TEE_TYPE_DATA) { if (max_key_size) return TEE_ERROR_NOT_SUPPORTED; } else { /* Find description of object */ type_props = tee_svc_find_type_props(obj_type); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (max_key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (max_key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; o->attr = calloc(1, type_props->alloc_size); if (!o->attr) return TEE_ERROR_OUT_OF_MEMORY; } /* If we have a key structure, pre-allocate the bignums inside */ switch (obj_type) { case TEE_TYPE_RSA_PUBLIC_KEY: res = crypto_acipher_alloc_rsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_RSA_KEYPAIR: res = crypto_acipher_alloc_rsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DSA_PUBLIC_KEY: res = crypto_acipher_alloc_dsa_public_key(o->attr, max_key_size); break; case TEE_TYPE_DSA_KEYPAIR: res = crypto_acipher_alloc_dsa_keypair(o->attr, max_key_size); break; case TEE_TYPE_DH_KEYPAIR: res = crypto_acipher_alloc_dh_keypair(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_PUBLIC_KEY: case TEE_TYPE_ECDH_PUBLIC_KEY: res = crypto_acipher_alloc_ecc_public_key(o->attr, max_key_size); break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = crypto_acipher_alloc_ecc_keypair(o->attr, max_key_size); break; default: if (obj_type != TEE_TYPE_DATA) { struct tee_cryp_obj_secret *key = o->attr; key->alloc_size = type_props->alloc_size - sizeof(*key); } break; } if (res != TEE_SUCCESS) return res; o->info.objectType = obj_type; o->info.maxKeySize = max_key_size; o->info.objectUsage = TEE_USAGE_DEFAULT; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_alloc(unsigned long obj_type, unsigned long max_key_size, uint32_t *obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; if (obj_type == TEE_TYPE_DATA) return TEE_ERROR_NOT_SUPPORTED; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; o = tee_obj_alloc(); if (!o) return TEE_ERROR_OUT_OF_MEMORY; res = tee_obj_set_type(o, obj_type, max_key_size); if (res != TEE_SUCCESS) { tee_obj_free(o); return res; } tee_obj_add(to_user_ta_ctx(sess->ctx), o); res = tee_svc_copy_kaddr_to_uref(obj, o); if (res != TEE_SUCCESS) tee_obj_close(to_user_ta_ctx(sess->ctx), o); return res; } TEE_Result syscall_cryp_obj_close(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* * If it's busy it's used by an operation, a client should never have * this handle. */ if (o->busy) return TEE_ERROR_ITEM_NOT_FOUND; tee_obj_close(to_user_ta_ctx(sess->ctx), o); return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_reset(unsigned long obj) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) == 0) { tee_obj_attr_clear(o); o->info.keySize = 0; o->info.objectUsage = TEE_USAGE_DEFAULT; } else { return TEE_ERROR_BAD_PARAMETERS; } /* the object is no more initialized */ o->info.handleFlags &= ~TEE_HANDLE_FLAG_INITIALIZED; return TEE_SUCCESS; } static TEE_Result copy_in_attrs(struct user_ta_ctx *utc, const struct utee_attribute *usr_attrs, uint32_t attr_count, TEE_Attribute *attrs) { TEE_Result res; uint32_t n; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)usr_attrs, attr_count * sizeof(struct utee_attribute)); if (res != TEE_SUCCESS) return res; for (n = 0; n < attr_count; n++) { attrs[n].attributeID = usr_attrs[n].attribute_id; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) { attrs[n].content.value.a = usr_attrs[n].a; attrs[n].content.value.b = usr_attrs[n].b; } else { uintptr_t buf = usr_attrs[n].a; size_t len = usr_attrs[n].b; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, buf, len); if (res != TEE_SUCCESS) return res; attrs[n].content.ref.buffer = (void *)buf; attrs[n].content.ref.length = len; } } return TEE_SUCCESS; } enum attr_usage { ATTR_USAGE_POPULATE, ATTR_USAGE_GENERATE_KEY }; static TEE_Result tee_svc_cryp_check_attr(enum attr_usage usage, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { uint32_t required_flag; uint32_t opt_flag; bool all_opt_needed; uint32_t req_attrs = 0; uint32_t opt_grp_attrs = 0; uint32_t attrs_found = 0; size_t n; uint32_t bit; uint32_t flags; int idx; if (usage == ATTR_USAGE_POPULATE) { required_flag = TEE_TYPE_ATTR_REQUIRED; opt_flag = TEE_TYPE_ATTR_OPTIONAL_GROUP; all_opt_needed = true; } else { required_flag = TEE_TYPE_ATTR_GEN_KEY_REQ; opt_flag = TEE_TYPE_ATTR_GEN_KEY_OPT; all_opt_needed = false; } /* * First find out which attributes are required and which belong to * the optional group */ for (n = 0; n < type_props->num_type_attrs; n++) { bit = 1 << n; flags = type_props->type_attrs[n].flags; if (flags & required_flag) req_attrs |= bit; else if (flags & opt_flag) opt_grp_attrs |= bit; } /* * Verify that all required attributes are in place and * that the same attribute isn't repeated. */ for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; bit = 1 << idx; /* attribute not repeated */ if ((attrs_found & bit) != 0) return TEE_ERROR_ITEM_NOT_FOUND; attrs_found |= bit; } /* Required attribute missing */ if ((attrs_found & req_attrs) != req_attrs) return TEE_ERROR_ITEM_NOT_FOUND; /* * If the flag says that "if one of the optional attributes are included * all of them has to be included" this must be checked. */ if (all_opt_needed && (attrs_found & opt_grp_attrs) != 0 && (attrs_found & opt_grp_attrs) != opt_grp_attrs) return TEE_ERROR_ITEM_NOT_FOUND; return TEE_SUCCESS; } static TEE_Result get_ec_key_size(uint32_t curve, size_t *key_size) { switch (curve) { case TEE_ECC_CURVE_NIST_P192: *key_size = 192; break; case TEE_ECC_CURVE_NIST_P224: *key_size = 224; break; case TEE_ECC_CURVE_NIST_P256: *key_size = 256; break; case TEE_ECC_CURVE_NIST_P384: *key_size = 384; break; case TEE_ECC_CURVE_NIST_P521: *key_size = 521; break; default: return TEE_ERROR_NOT_SUPPORTED; } return TEE_SUCCESS; } static TEE_Result tee_svc_cryp_obj_populate_type( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, const TEE_Attribute *attrs, uint32_t attr_count) { TEE_Result res; uint32_t have_attrs = 0; size_t obj_size = 0; size_t n; int idx; const struct attr_ops *ops; void *attr; for (n = 0; n < attr_count; n++) { idx = tee_svc_cryp_obj_find_type_attr_idx( attrs[n].attributeID, type_props); /* attribute not defined in current object type */ if (idx < 0) return TEE_ERROR_ITEM_NOT_FOUND; have_attrs |= BIT32(idx); ops = attr_ops + type_props->type_attrs[idx].ops_index; attr = (uint8_t *)o->attr + type_props->type_attrs[idx].raw_offs; if (attrs[n].attributeID & TEE_ATTR_BIT_VALUE) res = ops->from_user(attr, &attrs[n].content.value, sizeof(attrs[n].content.value)); else res = ops->from_user(attr, attrs[n].content.ref.buffer, attrs[n].content.ref.length); if (res != TEE_SUCCESS) return res; /* * First attr_idx signifies the attribute that gives the size * of the object */ if (type_props->type_attrs[idx].flags & TEE_TYPE_ATTR_SIZE_INDICATOR) { /* * For ECDSA/ECDH we need to translate curve into * object size */ if (attrs[n].attributeID == TEE_ATTR_ECC_CURVE) { res = get_ec_key_size(attrs[n].content.value.a, &obj_size); if (res != TEE_SUCCESS) return res; } else { obj_size += (attrs[n].content.ref.length * 8); } } } /* * We have to do it like this because the parity bits aren't counted * when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) obj_size -= obj_size / 8; /* Exclude parity in size of key */ o->have_attrs = have_attrs; o->info.keySize = obj_size; return TEE_SUCCESS; } TEE_Result syscall_cryp_obj_populate(unsigned long obj, struct utee_attribute *usr_attrs, unsigned long attr_count) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *o; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *attrs = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_IMPLEMENTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), attr_count, &alloc_size)) return TEE_ERROR_OVERFLOW; attrs = malloc(alloc_size); if (!attrs) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_attrs, attr_count, attrs); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_POPULATE, type_props, attrs, attr_count); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_obj_populate_type(o, type_props, attrs, attr_count); if (res == TEE_SUCCESS) o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; out: free(attrs); return res; } TEE_Result syscall_cryp_obj_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_ta_session *sess; struct tee_obj *dst_o; struct tee_obj *src_o; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(dst), &dst_o); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(src), &src_o); if (res != TEE_SUCCESS) return res; if ((src_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_PARAMETERS; if ((dst_o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_PARAMETERS; res = tee_obj_attr_copy_from(dst_o, src_o); if (res != TEE_SUCCESS) return res; dst_o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; dst_o->info.keySize = src_o->info.keySize; dst_o->info.objectUsage = src_o->info.objectUsage; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_rsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct rsa_keypair *key = o->attr; uint32_t e = TEE_U32_TO_BIG_ENDIAN(65537); /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; if (!get_attribute(o, type_props, TEE_ATTR_RSA_PUBLIC_EXPONENT)) crypto_bignum_bin2bn((const uint8_t *)&e, sizeof(e), key->e); res = crypto_acipher_gen_rsa_key(key, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dsa( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size) { TEE_Result res; res = crypto_acipher_gen_dsa_key(o->attr, key_size); if (res != TEE_SUCCESS) return res; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_dh( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct dh_keypair *tee_dh_key; struct bignum *dh_q = NULL; uint32_t dh_xbits = 0; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_dh_key = (struct dh_keypair *)o->attr; if (get_attribute(o, type_props, TEE_ATTR_DH_SUBPRIME)) dh_q = tee_dh_key->q; if (get_attribute(o, type_props, TEE_ATTR_DH_X_BITS)) dh_xbits = tee_dh_key->xbits; res = crypto_acipher_gen_dh_key(tee_dh_key, dh_q, dh_xbits); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_DH_PUBLIC_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_DH_X_BITS); return TEE_SUCCESS; } static TEE_Result tee_svc_obj_generate_key_ecc( struct tee_obj *o, const struct tee_cryp_obj_type_props *type_props, uint32_t key_size __unused, const TEE_Attribute *params, uint32_t param_count) { TEE_Result res; struct ecc_keypair *tee_ecc_key; /* Copy the present attributes into the obj before starting */ res = tee_svc_cryp_obj_populate_type(o, type_props, params, param_count); if (res != TEE_SUCCESS) return res; tee_ecc_key = (struct ecc_keypair *)o->attr; res = crypto_acipher_gen_ecc_key(tee_ecc_key); if (res != TEE_SUCCESS) return res; /* Set bits for the generated public and private key */ set_attribute(o, type_props, TEE_ATTR_ECC_PRIVATE_VALUE); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_X); set_attribute(o, type_props, TEE_ATTR_ECC_PUBLIC_VALUE_Y); set_attribute(o, type_props, TEE_ATTR_ECC_CURVE); return TEE_SUCCESS; } TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), param_count, &alloc_size)) return TEE_ERROR_OVERFLOW; params = malloc(alloc_size); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; } static TEE_Result tee_svc_cryp_get_state(struct tee_ta_session *sess, uint32_t state_id, struct tee_cryp_state **state) { struct tee_cryp_state *s; struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx); TAILQ_FOREACH(s, &utc->cryp_states, link) { if (state_id == (vaddr_t)s) { *state = s; return TEE_SUCCESS; } } return TEE_ERROR_BAD_PARAMETERS; } static void cryp_state_free(struct user_ta_ctx *utc, struct tee_cryp_state *cs) { struct tee_obj *o; if (tee_obj_get(utc, cs->key1, &o) == TEE_SUCCESS) tee_obj_close(utc, o); if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) tee_obj_close(utc, o); TAILQ_REMOVE(&utc->cryp_states, cs, link); if (cs->ctx_finalize != NULL) cs->ctx_finalize(cs->ctx, cs->algo); switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_AE: crypto_authenc_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_free_ctx(cs->ctx, cs->algo); break; case TEE_OPERATION_MAC: crypto_mac_free_ctx(cs->ctx, cs->algo); break; default: assert(!cs->ctx); } free(cs); } static TEE_Result tee_svc_cryp_check_key_type(const struct tee_obj *o, uint32_t algo, TEE_OperationMode mode) { uint32_t req_key_type; uint32_t req_key_type2 = 0; switch (TEE_ALG_GET_MAIN_ALG(algo)) { case TEE_MAIN_ALGO_MD5: req_key_type = TEE_TYPE_HMAC_MD5; break; case TEE_MAIN_ALGO_SHA1: req_key_type = TEE_TYPE_HMAC_SHA1; break; case TEE_MAIN_ALGO_SHA224: req_key_type = TEE_TYPE_HMAC_SHA224; break; case TEE_MAIN_ALGO_SHA256: req_key_type = TEE_TYPE_HMAC_SHA256; break; case TEE_MAIN_ALGO_SHA384: req_key_type = TEE_TYPE_HMAC_SHA384; break; case TEE_MAIN_ALGO_SHA512: req_key_type = TEE_TYPE_HMAC_SHA512; break; case TEE_MAIN_ALGO_AES: req_key_type = TEE_TYPE_AES; break; case TEE_MAIN_ALGO_DES: req_key_type = TEE_TYPE_DES; break; case TEE_MAIN_ALGO_DES3: req_key_type = TEE_TYPE_DES3; break; case TEE_MAIN_ALGO_RSA: req_key_type = TEE_TYPE_RSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_RSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DSA: req_key_type = TEE_TYPE_DSA_KEYPAIR; if (mode == TEE_MODE_ENCRYPT || mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_DSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_DH: req_key_type = TEE_TYPE_DH_KEYPAIR; break; case TEE_MAIN_ALGO_ECDSA: req_key_type = TEE_TYPE_ECDSA_KEYPAIR; if (mode == TEE_MODE_VERIFY) req_key_type2 = TEE_TYPE_ECDSA_PUBLIC_KEY; break; case TEE_MAIN_ALGO_ECDH: req_key_type = TEE_TYPE_ECDH_KEYPAIR; break; #if defined(CFG_CRYPTO_HKDF) case TEE_MAIN_ALGO_HKDF: req_key_type = TEE_TYPE_HKDF_IKM; break; #endif #if defined(CFG_CRYPTO_CONCAT_KDF) case TEE_MAIN_ALGO_CONCAT_KDF: req_key_type = TEE_TYPE_CONCAT_KDF_Z; break; #endif #if defined(CFG_CRYPTO_PBKDF2) case TEE_MAIN_ALGO_PBKDF2: req_key_type = TEE_TYPE_PBKDF2_PASSWORD; break; #endif default: return TEE_ERROR_BAD_PARAMETERS; } if (req_key_type != o->info.objectType && req_key_type2 != o->info.objectType) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } TEE_Result syscall_cryp_state_alloc(unsigned long algo, unsigned long mode, unsigned long key1, unsigned long key2, uint32_t *state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o1 = NULL; struct tee_obj *o2 = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); if (key1 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key1), &o1); if (res != TEE_SUCCESS) return res; if (o1->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o1, algo, mode); if (res != TEE_SUCCESS) return res; } if (key2 != 0) { res = tee_obj_get(utc, tee_svc_uref_to_vaddr(key2), &o2); if (res != TEE_SUCCESS) return res; if (o2->busy) return TEE_ERROR_BAD_PARAMETERS; res = tee_svc_cryp_check_key_type(o2, algo, mode); if (res != TEE_SUCCESS) return res; } cs = calloc(1, sizeof(struct tee_cryp_state)); if (!cs) return TEE_ERROR_OUT_OF_MEMORY; TAILQ_INSERT_TAIL(&utc->cryp_states, cs, link); cs->algo = algo; cs->mode = mode; switch (TEE_ALG_GET_CLASS(algo)) { case TEE_OPERATION_EXTENSION: #ifdef CFG_CRYPTO_RSASSA_NA1 if (algo == TEE_ALG_RSASSA_PKCS1_V1_5) goto rsassa_na1; #endif res = TEE_ERROR_NOT_SUPPORTED; break; case TEE_OPERATION_CIPHER: if ((algo == TEE_ALG_AES_XTS && (key1 == 0 || key2 == 0)) || (algo != TEE_ALG_AES_XTS && (key1 == 0 || key2 != 0))) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_cipher_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_AE: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_authenc_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_MAC: if (key1 == 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_mac_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_DIGEST: if (key1 != 0 || key2 != 0) { res = TEE_ERROR_BAD_PARAMETERS; } else { res = crypto_hash_alloc_ctx(&cs->ctx, algo); if (res != TEE_SUCCESS) break; } break; case TEE_OPERATION_ASYMMETRIC_CIPHER: case TEE_OPERATION_ASYMMETRIC_SIGNATURE: rsassa_na1: __maybe_unused if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; case TEE_OPERATION_KEY_DERIVATION: if (key1 == 0 || key2 != 0) res = TEE_ERROR_BAD_PARAMETERS; break; default: res = TEE_ERROR_NOT_SUPPORTED; break; } if (res != TEE_SUCCESS) goto out; res = tee_svc_copy_kaddr_to_uref(state, cs); if (res != TEE_SUCCESS) goto out; /* Register keys */ if (o1 != NULL) { o1->busy = true; cs->key1 = (vaddr_t)o1; } if (o2 != NULL) { o2->busy = true; cs->key2 = (vaddr_t)o2; } out: if (res != TEE_SUCCESS) cryp_state_free(utc, cs); return res; } TEE_Result syscall_cryp_state_copy(unsigned long dst, unsigned long src) { TEE_Result res; struct tee_cryp_state *cs_dst; struct tee_cryp_state *cs_src; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(dst), &cs_dst); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(src), &cs_src); if (res != TEE_SUCCESS) return res; if (cs_dst->algo != cs_src->algo || cs_dst->mode != cs_src->mode) return TEE_ERROR_BAD_PARAMETERS; switch (TEE_ALG_GET_CLASS(cs_src->algo)) { case TEE_OPERATION_CIPHER: crypto_cipher_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_AE: crypto_authenc_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_DIGEST: crypto_hash_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; case TEE_OPERATION_MAC: crypto_mac_copy_state(cs_dst->ctx, cs_src->ctx, cs_src->algo); break; default: return TEE_ERROR_BAD_STATE; } return TEE_SUCCESS; } void tee_svc_cryp_free_states(struct user_ta_ctx *utc) { struct tee_cryp_state_head *states = &utc->cryp_states; while (!TAILQ_EMPTY(states)) cryp_state_free(utc, TAILQ_FIRST(states)); } TEE_Result syscall_cryp_state_free(unsigned long state) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; cryp_state_free(to_user_ta_ctx(sess->ctx), cs); return TEE_SUCCESS; } TEE_Result syscall_hash_init(unsigned long state, const void *iv __maybe_unused, size_t iv_len __maybe_unused) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_init(cs->ctx, cs->algo); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: { struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = (struct tee_cryp_obj_secret *)o->attr; res = crypto_mac_init(cs->ctx, cs->algo, (void *)(key + 1), key->key_size); if (res != TEE_SUCCESS) return res; break; } default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_update(unsigned long state, const void *chunk, size_t chunk_size) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; /* Zero length hash is valid, but nothing we need to do. */ if (!chunk_size) return TEE_SUCCESS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } return TEE_SUCCESS; } TEE_Result syscall_hash_final(unsigned long state, const void *chunk, size_t chunk_size, void *hash, uint64_t *hash_len) { TEE_Result res, res2; size_t hash_size; uint64_t hlen; struct tee_cryp_state *cs; struct tee_ta_session *sess; /* No data, but size provided isn't valid parameters. */ if (!chunk && chunk_size) return TEE_ERROR_BAD_PARAMETERS; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)chunk, chunk_size); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&hlen, hash_len, sizeof(hlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)hash, hlen); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; switch (TEE_ALG_GET_CLASS(cs->algo)) { case TEE_OPERATION_DIGEST: res = tee_hash_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_hash_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_hash_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; case TEE_OPERATION_MAC: res = tee_mac_get_digest_size(cs->algo, &hash_size); if (res != TEE_SUCCESS) return res; if (*hash_len < hash_size) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (chunk_size) { res = crypto_mac_update(cs->ctx, cs->algo, chunk, chunk_size); if (res != TEE_SUCCESS) return res; } res = crypto_mac_final(cs->ctx, cs->algo, hash, hash_size); if (res != TEE_SUCCESS) return res; break; default: return TEE_ERROR_BAD_PARAMETERS; } out: hlen = hash_size; res2 = tee_svc_copy_to_user(hash_len, &hlen, sizeof(*hash_len)); if (res2 != TEE_SUCCESS) return res2; return res; } TEE_Result syscall_cipher_init(unsigned long state, const void *iv, size_t iv_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key1; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) iv, iv_len); if (res != TEE_SUCCESS) return res; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key1 = o->attr; if (tee_obj_get(utc, cs->key2, &o) == TEE_SUCCESS) { struct tee_cryp_obj_secret *key2 = o->attr; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, (uint8_t *)(key2 + 1), key2->key_size, iv, iv_len); } else { res = crypto_cipher_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key1 + 1), key1->key_size, NULL, 0, iv, iv_len); } if (res != TEE_SUCCESS) return res; cs->ctx_finalize = crypto_cipher_final; return TEE_SUCCESS; } static TEE_Result tee_svc_cipher_update_helper(unsigned long state, bool last_block, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } if (src_len > 0) { /* Permit src_len == 0 to finalize the operation */ res = tee_do_cipher_update(cs->ctx, cs->algo, cs->mode, last_block, src, src_len, dst); } if (last_block && cs->ctx_finalize != NULL) { cs->ctx_finalize(cs->ctx, cs->algo); cs->ctx_finalize = NULL; } out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; dlen = src_len; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_cipher_update(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, false /* last_block */, src, src_len, dst, dst_len); } TEE_Result syscall_cipher_final(unsigned long state, const void *src, size_t src_len, void *dst, uint64_t *dst_len) { return tee_svc_cipher_update_helper(state, true /* last_block */, src, src_len, dst, dst_len); } #if defined(CFG_CRYPTO_HKDF) static TEE_Result get_hkdf_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, void **info, size_t *info_len, size_t *okm_len) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, INFO = 0x4 }; uint8_t found = 0; *salt = *info = NULL; *salt_len = *info_len = *okm_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_HKDF_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_HKDF_OKM_LENGTH: if (!(found & LENGTH)) { *okm_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_HKDF_INFO: if (!(found & INFO)) { *info = params[n].content.ref.buffer; *info_len = params[n].content.ref.length; found |= INFO; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) static TEE_Result get_concat_kdf_params(const TEE_Attribute *params, uint32_t param_count, void **other_info, size_t *other_info_len, size_t *derived_key_len) { size_t n; enum { LENGTH = 0x1, INFO = 0x2 }; uint8_t found = 0; *other_info = NULL; *other_info_len = *derived_key_len = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_CONCAT_KDF_OTHER_INFO: if (!(found & INFO)) { *other_info = params[n].content.ref.buffer; *other_info_len = params[n].content.ref.length; found |= INFO; } break; case TEE_ATTR_CONCAT_KDF_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if (!(found & LENGTH)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif #if defined(CFG_CRYPTO_PBKDF2) static TEE_Result get_pbkdf2_params(const TEE_Attribute *params, uint32_t param_count, void **salt, size_t *salt_len, size_t *derived_key_len, size_t *iteration_count) { size_t n; enum { SALT = 0x1, LENGTH = 0x2, COUNT = 0x4 }; uint8_t found = 0; *salt = NULL; *salt_len = *derived_key_len = *iteration_count = 0; for (n = 0; n < param_count; n++) { switch (params[n].attributeID) { case TEE_ATTR_PBKDF2_SALT: if (!(found & SALT)) { *salt = params[n].content.ref.buffer; *salt_len = params[n].content.ref.length; found |= SALT; } break; case TEE_ATTR_PBKDF2_DKM_LENGTH: if (!(found & LENGTH)) { *derived_key_len = params[n].content.value.a; found |= LENGTH; } break; case TEE_ATTR_PBKDF2_ITERATION_COUNT: if (!(found & COUNT)) { *iteration_count = params[n].content.value.a; found |= COUNT; } break; default: /* Unexpected attribute */ return TEE_ERROR_BAD_PARAMETERS; } } if ((found & (LENGTH|COUNT)) != (LENGTH|COUNT)) return TEE_ERROR_BAD_PARAMETERS; return TEE_SUCCESS; } #endif TEE_Result syscall_cryp_derive_key(unsigned long state, const struct utee_attribute *usr_params, unsigned long param_count, unsigned long derived_key) { TEE_Result res = TEE_ERROR_NOT_SUPPORTED; struct tee_ta_session *sess; struct tee_obj *ko; struct tee_obj *so; struct tee_cryp_state *cs; struct tee_cryp_obj_secret *sk; const struct tee_cryp_obj_type_props *type_props; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), param_count, &alloc_size)) return TEE_ERROR_OVERFLOW; params = malloc(alloc_size); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; /* Get key set in operation */ res = tee_obj_get(utc, cs->key1, &ko); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, tee_svc_uref_to_vaddr(derived_key), &so); if (res != TEE_SUCCESS) goto out; /* Find information needed about the object to initialize */ sk = so->attr; /* Find description of object */ type_props = tee_svc_find_type_props(so->info.objectType); if (!type_props) { res = TEE_ERROR_NOT_SUPPORTED; goto out; } if (cs->algo == TEE_ALG_DH_DERIVE_SHARED_SECRET) { size_t alloc_size; struct bignum *pub; struct bignum *ss; if (param_count != 1 || params[0].attributeID != TEE_ATTR_DH_PUBLIC_VALUE) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } alloc_size = params[0].content.ref.length * 8; pub = crypto_bignum_allocate(alloc_size); ss = crypto_bignum_allocate(alloc_size); if (pub && ss) { crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, pub); res = crypto_acipher_dh_shared_secret(ko->attr, pub, ss); if (res == TEE_SUCCESS) { sk->key_size = crypto_bignum_num_bytes(ss); crypto_bignum_bn2bin(ss, (uint8_t *)(sk + 1)); so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } else { res = TEE_ERROR_OUT_OF_MEMORY; } crypto_bignum_free(pub); crypto_bignum_free(ss); } else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_ECDH) { size_t alloc_size; struct ecc_public_key key_public; uint8_t *pt_secret; unsigned long pt_secret_len; if (param_count != 2 || params[0].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_X || params[1].attributeID != TEE_ATTR_ECC_PUBLIC_VALUE_Y) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (cs->algo) { case TEE_ALG_ECDH_P192: alloc_size = 192; break; case TEE_ALG_ECDH_P224: alloc_size = 224; break; case TEE_ALG_ECDH_P256: alloc_size = 256; break; case TEE_ALG_ECDH_P384: alloc_size = 384; break; case TEE_ALG_ECDH_P521: alloc_size = 521; break; default: res = TEE_ERROR_NOT_IMPLEMENTED; goto out; } /* Create the public key */ res = crypto_acipher_alloc_ecc_public_key(&key_public, alloc_size); if (res != TEE_SUCCESS) goto out; key_public.curve = ((struct ecc_keypair *)ko->attr)->curve; crypto_bignum_bin2bn(params[0].content.ref.buffer, params[0].content.ref.length, key_public.x); crypto_bignum_bin2bn(params[1].content.ref.buffer, params[1].content.ref.length, key_public.y); pt_secret = (uint8_t *)(sk + 1); pt_secret_len = sk->alloc_size; res = crypto_acipher_ecc_shared_secret(ko->attr, &key_public, pt_secret, &pt_secret_len); if (res == TEE_SUCCESS) { sk->key_size = pt_secret_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } /* free the public key */ crypto_acipher_free_ecc_public_key(&key_public); } #if defined(CFG_CRYPTO_HKDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_HKDF) { void *salt, *info; size_t salt_len, info_len, okm_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ik = ko->attr; const uint8_t *ikm = (const uint8_t *)(ik + 1); res = get_hkdf_params(params, param_count, &salt, &salt_len, &info, &info_len, &okm_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (okm_len > ik->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_hkdf(hash_id, ikm, ik->key_size, salt, salt_len, info, info_len, (uint8_t *)(sk + 1), okm_len); if (res == TEE_SUCCESS) { sk->key_size = okm_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_CONCAT_KDF) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_CONCAT_KDF) { void *info; size_t info_len, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *shared_secret = (const uint8_t *)(ss + 1); res = get_concat_kdf_params(params, param_count, &info, &info_len, &derived_key_len); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_concat_kdf(hash_id, shared_secret, ss->key_size, info, info_len, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif #if defined(CFG_CRYPTO_PBKDF2) else if (TEE_ALG_GET_MAIN_ALG(cs->algo) == TEE_MAIN_ALGO_PBKDF2) { void *salt; size_t salt_len, iteration_count, derived_key_len; uint32_t hash_id = TEE_ALG_GET_DIGEST_HASH(cs->algo); struct tee_cryp_obj_secret *ss = ko->attr; const uint8_t *password = (const uint8_t *)(ss + 1); res = get_pbkdf2_params(params, param_count, &salt, &salt_len, &derived_key_len, &iteration_count); if (res != TEE_SUCCESS) goto out; /* Requested size must fit into the output object's buffer */ if (derived_key_len > ss->alloc_size) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } res = tee_cryp_pbkdf2(hash_id, password, ss->key_size, salt, salt_len, iteration_count, (uint8_t *)(sk + 1), derived_key_len); if (res == TEE_SUCCESS) { sk->key_size = derived_key_len; so->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; set_attribute(so, type_props, TEE_ATTR_SECRET_VALUE); } } #endif else res = TEE_ERROR_NOT_SUPPORTED; out: free(params); return res; } TEE_Result syscall_cryp_random_number_generate(void *buf, size_t blen) { TEE_Result res; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)buf, blen); if (res != TEE_SUCCESS) return res; res = crypto_rng_read(buf, blen); if (res != TEE_SUCCESS) return res; return res; } TEE_Result syscall_authenc_init(unsigned long state, const void *nonce, size_t nonce_len, size_t tag_len, size_t aad_len, size_t payload_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; struct tee_cryp_obj_secret *key; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), cs->key1, &o); if (res != TEE_SUCCESS) return res; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) return TEE_ERROR_BAD_PARAMETERS; key = o->attr; res = crypto_authenc_init(cs->ctx, cs->algo, cs->mode, (uint8_t *)(key + 1), key->key_size, nonce, nonce_len, tag_len, aad_len, payload_len); if (res != TEE_SUCCESS) return res; cs->ctx_finalize = (tee_cryp_ctx_finalize_func_t)crypto_authenc_final; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_aad(unsigned long state, const void *aad_data, size_t aad_data_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = crypto_authenc_update_aad(cs->ctx, cs->algo, cs->mode, aad_data, aad_data_len); if (res != TEE_SUCCESS) return res; return TEE_SUCCESS; } TEE_Result syscall_authenc_update_payload(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } tmp_dlen = dlen; res = crypto_authenc_update_payload(cs->ctx, cs->algo, cs->mode, src_data, src_len, dst_data, &tmp_dlen); dlen = tmp_dlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) res = res2; } return res; } TEE_Result syscall_authenc_enc_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, void *tag, uint64_t *tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; uint64_t tlen = 0; size_t tmp_dlen; size_t tmp_tlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_ENCRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_svc_copy_from_user(&tlen, tag_len, sizeof(tlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tlen); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; tmp_tlen = tlen; res = crypto_authenc_enc_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, &tmp_tlen); dlen = tmp_dlen; tlen = tmp_tlen; out: if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; if (dst_len != NULL) { res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } res2 = tee_svc_copy_to_user(tag_len, &tlen, sizeof(*tag_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_authenc_dec_final(unsigned long state, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len, const void *tag, size_t tag_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen; size_t tmp_dlen; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_DECRYPT) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)src_data, src_len); if (res != TEE_SUCCESS) return res; if (!dst_len) { dlen = 0; } else { res = tee_svc_copy_from_user(&dlen, dst_len, sizeof(dlen)); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)dst_data, dlen); if (res != TEE_SUCCESS) return res; } if (dlen < src_len) { res = TEE_ERROR_SHORT_BUFFER; goto out; } res = tee_mmu_check_access_rights(to_user_ta_ctx(sess->ctx), TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)tag, tag_len); if (res != TEE_SUCCESS) return res; tmp_dlen = dlen; res = crypto_authenc_dec_final(cs->ctx, cs->algo, src_data, src_len, dst_data, &tmp_dlen, tag, tag_len); dlen = tmp_dlen; out: if ((res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) && dst_len != NULL) { TEE_Result res2; res2 = tee_svc_copy_to_user(dst_len, &dlen, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } static int pkcs1_get_salt_len(const TEE_Attribute *params, uint32_t num_params, size_t default_len) { size_t n; assert(default_len < INT_MAX); for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_PSS_SALT_LENGTH) { if (params[n].content.value.a < INT_MAX) return params[n].content.value.a; break; } } /* * If salt length isn't provided use the default value which is * the length of the digest. */ return default_len; } TEE_Result syscall_asymm_operate(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *src_data, size_t src_len, void *dst_data, uint64_t *dst_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; uint64_t dlen64; size_t dlen; struct tee_obj *o; void *label = NULL; size_t label_len = 0; size_t n; int salt_len; TEE_Attribute *params = NULL; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) src_data, src_len); if (res != TEE_SUCCESS) return res; res = tee_svc_copy_from_user(&dlen64, dst_len, sizeof(dlen64)); if (res != TEE_SUCCESS) return res; dlen = dlen64; res = tee_mmu_check_access_rights( utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_WRITE | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t) dst_data, dlen); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_GENERIC; goto out; } switch (cs->algo) { case TEE_ALG_RSA_NOPAD: if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsanopad_encrypt(o->attr, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsanopad_decrypt(o->attr, src_data, src_len, dst_data, &dlen); } else { /* * We will panic because "the mode is not compatible * with the function" */ res = TEE_ERROR_GENERIC; } break; case TEE_ALG_RSAES_PKCS1_V1_5: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA1: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA224: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA256: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA384: case TEE_ALG_RSAES_PKCS1_OAEP_MGF1_SHA512: for (n = 0; n < num_params; n++) { if (params[n].attributeID == TEE_ATTR_RSA_OAEP_LABEL) { label = params[n].content.ref.buffer; label_len = params[n].content.ref.length; break; } } if (cs->mode == TEE_MODE_ENCRYPT) { res = crypto_acipher_rsaes_encrypt(cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else if (cs->mode == TEE_MODE_DECRYPT) { res = crypto_acipher_rsaes_decrypt( cs->algo, o->attr, label, label_len, src_data, src_len, dst_data, &dlen); } else { res = TEE_ERROR_BAD_PARAMETERS; } break; #if defined(CFG_CRYPTO_RSASSA_NA1) case TEE_ALG_RSASSA_PKCS1_V1_5: #endif case TEE_ALG_RSASSA_PKCS1_V1_5_MD5: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA1: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA224: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA256: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA384: case TEE_ALG_RSASSA_PKCS1_V1_5_SHA512: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA1: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA224: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA256: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA384: case TEE_ALG_RSASSA_PKCS1_PSS_MGF1_SHA512: if (cs->mode != TEE_MODE_SIGN) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, src_len); res = crypto_acipher_rsassa_sign(cs->algo, o->attr, salt_len, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_DSA_SHA1: case TEE_ALG_DSA_SHA224: case TEE_ALG_DSA_SHA256: res = crypto_acipher_dsa_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; case TEE_ALG_ECDSA_P192: case TEE_ALG_ECDSA_P224: case TEE_ALG_ECDSA_P256: case TEE_ALG_ECDSA_P384: case TEE_ALG_ECDSA_P521: res = crypto_acipher_ecc_sign(cs->algo, o->attr, src_data, src_len, dst_data, &dlen); break; default: res = TEE_ERROR_BAD_PARAMETERS; break; } out: free(params); if (res == TEE_SUCCESS || res == TEE_ERROR_SHORT_BUFFER) { TEE_Result res2; dlen64 = dlen; res2 = tee_svc_copy_to_user(dst_len, &dlen64, sizeof(*dst_len)); if (res2 != TEE_SUCCESS) return res2; } return res; } TEE_Result syscall_asymm_verify(unsigned long state, const struct utee_attribute *usr_params, size_t num_params, const void *data, size_t data_len, const void *sig, size_t sig_len) { TEE_Result res; struct tee_cryp_state *cs; struct tee_ta_session *sess; struct tee_obj *o; size_t hash_size; int salt_len = 0; TEE_Attribute *params = NULL; uint32_t hash_algo; struct user_ta_ctx *utc; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; utc = to_user_ta_ctx(sess->ctx); res = tee_svc_cryp_get_state(sess, tee_svc_uref_to_vaddr(state), &cs); if (res != TEE_SUCCESS) return res; if (cs->mode != TEE_MODE_VERIFY) return TEE_ERROR_BAD_PARAMETERS; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)data, data_len); if (res != TEE_SUCCESS) return res; res = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ | TEE_MEMORY_ACCESS_ANY_OWNER, (uaddr_t)sig, sig_len); if (res != TEE_SUCCESS) return res; params = malloc(sizeof(TEE_Attribute) * num_params); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(utc, usr_params, num_params, params); if (res != TEE_SUCCESS) goto out; res = tee_obj_get(utc, cs->key1, &o); if (res != TEE_SUCCESS) goto out; if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) == 0) { res = TEE_ERROR_BAD_PARAMETERS; goto out; } switch (TEE_ALG_GET_MAIN_ALG(cs->algo)) { case TEE_MAIN_ALGO_RSA: if (cs->algo != TEE_ALG_RSASSA_PKCS1_V1_5) { hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; if (data_len != hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } salt_len = pkcs1_get_salt_len(params, num_params, hash_size); } res = crypto_acipher_rsassa_verify(cs->algo, o->attr, salt_len, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_DSA: hash_algo = TEE_DIGEST_HASH_TO_ALGO(cs->algo); res = tee_hash_get_digest_size(hash_algo, &hash_size); if (res != TEE_SUCCESS) break; /* * Depending on the DSA algorithm (NIST), the digital signature * output size may be truncated to the size of a key pair * (Q prime size). Q prime size must be less or equal than the * hash output length of the hash algorithm involved. */ if (data_len > hash_size) { res = TEE_ERROR_BAD_PARAMETERS; break; } res = crypto_acipher_dsa_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; case TEE_MAIN_ALGO_ECDSA: res = crypto_acipher_ecc_verify(cs->algo, o->attr, data, data_len, sig, sig_len); break; default: res = TEE_ERROR_NOT_SUPPORTED; } out: free(params); return res; }
TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; params = malloc(sizeof(TEE_Attribute) * param_count); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; }
TEE_Result syscall_obj_generate_key(unsigned long obj, unsigned long key_size, const struct utee_attribute *usr_params, unsigned long param_count) { TEE_Result res; struct tee_ta_session *sess; const struct tee_cryp_obj_type_props *type_props; struct tee_obj *o; struct tee_cryp_obj_secret *key; size_t byte_size; TEE_Attribute *params = NULL; res = tee_ta_get_current_session(&sess); if (res != TEE_SUCCESS) return res; res = tee_obj_get(to_user_ta_ctx(sess->ctx), tee_svc_uref_to_vaddr(obj), &o); if (res != TEE_SUCCESS) return res; /* Must be a transient object */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_PERSISTENT) != 0) return TEE_ERROR_BAD_STATE; /* Must not be initialized already */ if ((o->info.handleFlags & TEE_HANDLE_FLAG_INITIALIZED) != 0) return TEE_ERROR_BAD_STATE; /* Find description of object */ type_props = tee_svc_find_type_props(o->info.objectType); if (!type_props) return TEE_ERROR_NOT_SUPPORTED; /* Check that maxKeySize follows restrictions */ if (key_size % type_props->quanta != 0) return TEE_ERROR_NOT_SUPPORTED; if (key_size < type_props->min_size) return TEE_ERROR_NOT_SUPPORTED; if (key_size > type_props->max_size) return TEE_ERROR_NOT_SUPPORTED; size_t alloc_size = 0; if (MUL_OVERFLOW(sizeof(TEE_Attribute), param_count, &alloc_size)) return TEE_ERROR_OVERFLOW; params = malloc(alloc_size); if (!params) return TEE_ERROR_OUT_OF_MEMORY; res = copy_in_attrs(to_user_ta_ctx(sess->ctx), usr_params, param_count, params); if (res != TEE_SUCCESS) goto out; res = tee_svc_cryp_check_attr(ATTR_USAGE_GENERATE_KEY, type_props, params, param_count); if (res != TEE_SUCCESS) goto out; switch (o->info.objectType) { case TEE_TYPE_AES: case TEE_TYPE_DES: case TEE_TYPE_DES3: case TEE_TYPE_HMAC_MD5: case TEE_TYPE_HMAC_SHA1: case TEE_TYPE_HMAC_SHA224: case TEE_TYPE_HMAC_SHA256: case TEE_TYPE_HMAC_SHA384: case TEE_TYPE_HMAC_SHA512: case TEE_TYPE_GENERIC_SECRET: byte_size = key_size / 8; /* * We have to do it like this because the parity bits aren't * counted when telling the size of the key in bits. */ if (o->info.objectType == TEE_TYPE_DES || o->info.objectType == TEE_TYPE_DES3) { byte_size = (key_size + key_size / 7) / 8; } key = (struct tee_cryp_obj_secret *)o->attr; if (byte_size > key->alloc_size) { res = TEE_ERROR_EXCESS_DATA; goto out; } res = crypto_rng_read((void *)(key + 1), byte_size); if (res != TEE_SUCCESS) goto out; key->key_size = byte_size; /* Set bits for all known attributes for this object type */ o->have_attrs = (1 << type_props->num_type_attrs) - 1; break; case TEE_TYPE_RSA_KEYPAIR: res = tee_svc_obj_generate_key_rsa(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DSA_KEYPAIR: res = tee_svc_obj_generate_key_dsa(o, type_props, key_size); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_DH_KEYPAIR: res = tee_svc_obj_generate_key_dh(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; case TEE_TYPE_ECDSA_KEYPAIR: case TEE_TYPE_ECDH_KEYPAIR: res = tee_svc_obj_generate_key_ecc(o, type_props, key_size, params, param_count); if (res != TEE_SUCCESS) goto out; break; default: res = TEE_ERROR_BAD_FORMAT; } out: free(params); if (res == TEE_SUCCESS) { o->info.keySize = key_size; o->info.handleFlags |= TEE_HANDLE_FLAG_INITIALIZED; } return res; }
{'added': [(1762, '\tsize_t alloc_size = 0;'), (1763, ''), (1764, '\tif (MUL_OVERFLOW(sizeof(TEE_Attribute), param_count, &alloc_size))'), (1765, '\t\treturn TEE_ERROR_OVERFLOW;'), (1766, ''), (1767, '\tparams = malloc(alloc_size);'), (2676, '\tsize_t alloc_size = 0;'), (2677, ''), (2678, '\tif (MUL_OVERFLOW(sizeof(TEE_Attribute), param_count, &alloc_size))'), (2679, '\t\treturn TEE_ERROR_OVERFLOW;'), (2680, ''), (2681, '\tparams = malloc(alloc_size);')], 'deleted': [(1762, '\tparams = malloc(sizeof(TEE_Attribute) * param_count);'), (2671, '\tparams = malloc(sizeof(TEE_Attribute) * param_count);')]}
12
2
2,819
15,929
104
568
36
https://github.com/OP-TEE/optee_os
CVE-2019-1010297
CWE-787
336
read-packet.c
C
read_32
/* read-packet.c - Read OpenPGP packets * Copyright (C) 2001-2012 Free Software Foundation, Inc. * * Author: Timo Schulz * * This file is part of OpenCDK. * * The OpenCDK library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/> * */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <string.h> #include <stdio.h> #include <time.h> #include <assert.h> #include "opencdk.h" #include "main.h" #include "packet.h" #include "types.h" #include <algorithms.h> #include <str.h> #include <minmax.h> /* The version of the MDC packet considering the lastest OpenPGP draft. */ static int stream_read(cdk_stream_t s, void *buf, size_t buflen, size_t * r_nread) { int res = cdk_stream_read(s, buf, buflen); if (res > 0) { *r_nread = res; return 0; } else { return (cdk_stream_eof(s) ? EOF : _cdk_stream_get_errno(s)); } } /* Try to read 4 octets from the stream. */ static u32 read_32(cdk_stream_t s) { byte buf[4]; size_t nread; assert(s != NULL); stream_read(s, buf, 4, &nread); if (nread != 4) return (u32) - 1; return buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; } /* Try to read 2 octets from a stream. */ static u16 read_16(cdk_stream_t s) { byte buf[2]; size_t nread; assert(s != NULL); stream_read(s, buf, 2, &nread); if (nread != 2) return (u16) - 1; return buf[0] << 8 | buf[1]; } /* read about S2K at http://tools.ietf.org/html/rfc4880#section-3.7.1 */ static cdk_error_t read_s2k(cdk_stream_t inp, cdk_s2k_t s2k) { size_t nread; s2k->mode = cdk_stream_getc(inp); s2k->hash_algo = cdk_stream_getc(inp); if (s2k->mode == CDK_S2K_SIMPLE) return 0; else if (s2k->mode == CDK_S2K_SALTED || s2k->mode == CDK_S2K_ITERSALTED) { if (stream_read(inp, s2k->salt, DIM(s2k->salt), &nread)) return CDK_Inv_Packet; if (nread != DIM(s2k->salt)) return CDK_Inv_Packet; if (s2k->mode == CDK_S2K_ITERSALTED) s2k->count = cdk_stream_getc(inp); } else if (s2k->mode == CDK_S2K_GNU_EXT) { /* GNU extensions to the S2K : read DETAILS from gnupg */ return 0; } else return CDK_Not_Implemented; return 0; } static cdk_error_t read_mpi(cdk_stream_t inp, bigint_t * ret_m, int secure) { bigint_t m; int err; byte buf[MAX_MPI_BYTES + 2]; size_t nread, nbits; cdk_error_t rc; if (!inp || !ret_m) return CDK_Inv_Value; *ret_m = NULL; nbits = read_16(inp); nread = (nbits + 7) / 8; if (nbits > MAX_MPI_BITS || nbits == 0) { _gnutls_write_log("read_mpi: too large %d bits\n", (int) nbits); return gnutls_assert_val(CDK_MPI_Error); /* Sanity check */ } rc = stream_read(inp, buf + 2, nread, &nread); if (!rc && nread != ((nbits + 7) / 8)) { _gnutls_write_log("read_mpi: too short %d < %d\n", (int) nread, (int) ((nbits + 7) / 8)); return gnutls_assert_val(CDK_MPI_Error); } buf[0] = nbits >> 8; buf[1] = nbits >> 0; nread += 2; err = _gnutls_mpi_init_scan_pgp(&m, buf, nread); if (err < 0) return gnutls_assert_val(map_gnutls_error(err)); *ret_m = m; return rc; } /* Read the encoded packet length directly from the file object INP and return it. Reset RET_PARTIAL if this is the last packet in block mode. */ size_t _cdk_pkt_read_len(FILE * inp, size_t * ret_partial) { int c1, c2; size_t pktlen; c1 = fgetc(inp); if (c1 == EOF) return (size_t) EOF; if (c1 < 224 || c1 == 255) *ret_partial = 0; /* End of partial data */ if (c1 < 192) pktlen = c1; else if (c1 >= 192 && c1 <= 223) { c2 = fgetc(inp); if (c2 == EOF) return (size_t) EOF; pktlen = ((c1 - 192) << 8) + c2 + 192; } else if (c1 == 255) { pktlen = fgetc(inp) << 24; pktlen |= fgetc(inp) << 16; pktlen |= fgetc(inp) << 8; pktlen |= fgetc(inp) << 0; } else pktlen = 1 << (c1 & 0x1f); return pktlen; } static cdk_error_t read_pubkey_enc(cdk_stream_t inp, size_t pktlen, cdk_pkt_pubkey_enc_t pke) { size_t i, nenc; if (!inp || !pke) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_pubkey_enc: %d octets\n", (int) pktlen); if (pktlen < 12) return CDK_Inv_Packet; pke->version = cdk_stream_getc(inp); if (pke->version < 2 || pke->version > 3) return CDK_Inv_Packet; pke->keyid[0] = read_32(inp); pke->keyid[1] = read_32(inp); if (!pke->keyid[0] && !pke->keyid[1]) pke->throw_keyid = 1; /* RFC2440 "speculative" keyID */ pke->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); nenc = cdk_pk_get_nenc(pke->pubkey_algo); if (!nenc) return CDK_Inv_Algo; for (i = 0; i < nenc; i++) { cdk_error_t rc = read_mpi(inp, &pke->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } return 0; } static cdk_error_t read_mdc(cdk_stream_t inp, cdk_pkt_mdc_t mdc) { size_t n; cdk_error_t rc; if (!inp || !mdc) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_mdc:\n"); rc = stream_read(inp, mdc->hash, DIM(mdc->hash), &n); if (rc) return rc; return n != DIM(mdc->hash) ? CDK_Inv_Packet : 0; } static cdk_error_t read_compressed(cdk_stream_t inp, size_t pktlen, cdk_pkt_compressed_t c) { if (!inp || !c) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_compressed: %d octets\n", (int) pktlen); c->algorithm = cdk_stream_getc(inp); if (c->algorithm > 3) return CDK_Inv_Packet; /* don't know the size, so we read until EOF */ if (!pktlen) { c->len = 0; c->buf = inp; } /* FIXME: Support partial bodies. */ return 0; } static cdk_error_t read_public_key(cdk_stream_t inp, size_t pktlen, cdk_pkt_pubkey_t pk) { size_t i, ndays, npkey; if (!inp || !pk) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_public_key: %d octets\n", (int) pktlen); pk->is_invalid = 1; /* default to detect missing self signatures */ pk->is_revoked = 0; pk->has_expired = 0; pk->version = cdk_stream_getc(inp); if (pk->version < 2 || pk->version > 4) return CDK_Inv_Packet_Ver; pk->timestamp = read_32(inp); if (pk->version < 4) { ndays = read_16(inp); if (ndays) pk->expiredate = pk->timestamp + ndays * 86400L; } pk->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); npkey = cdk_pk_get_npkey(pk->pubkey_algo); if (!npkey) { gnutls_assert(); _gnutls_write_log("invalid public key algorithm %d\n", pk->pubkey_algo); return CDK_Inv_Algo; } for (i = 0; i < npkey; i++) { cdk_error_t rc = read_mpi(inp, &pk->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } /* This value is just for the first run and will be replaced with the actual key flags from the self signature. */ pk->pubkey_usage = 0; return 0; } static cdk_error_t read_public_subkey(cdk_stream_t inp, size_t pktlen, cdk_pkt_pubkey_t pk) { if (!inp || !pk) return CDK_Inv_Value; return read_public_key(inp, pktlen, pk); } static cdk_error_t read_secret_key(cdk_stream_t inp, size_t pktlen, cdk_pkt_seckey_t sk) { size_t p1, p2, nread; int i, nskey; int rc; if (!inp || !sk || !sk->pk) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_secret_key: %d octets\n", (int) pktlen); p1 = cdk_stream_tell(inp); rc = read_public_key(inp, pktlen, sk->pk); if (rc) return rc; sk->s2k_usage = cdk_stream_getc(inp); sk->protect.sha1chk = 0; if (sk->s2k_usage == 254 || sk->s2k_usage == 255) { sk->protect.sha1chk = (sk->s2k_usage == 254); sk->protect.algo = _pgp_cipher_to_gnutls(cdk_stream_getc(inp)); if (sk->protect.algo == GNUTLS_CIPHER_UNKNOWN) return gnutls_assert_val(CDK_Inv_Algo); sk->protect.s2k = cdk_calloc(1, sizeof *sk->protect.s2k); if (!sk->protect.s2k) return CDK_Out_Of_Core; rc = read_s2k(inp, sk->protect.s2k); if (rc) return rc; /* refer to --export-secret-subkeys in gpg(1) */ if (sk->protect.s2k->mode == CDK_S2K_GNU_EXT) sk->protect.ivlen = 0; else { sk->protect.ivlen = gnutls_cipher_get_block_size(sk->protect.algo); if (!sk->protect.ivlen) return CDK_Inv_Packet; rc = stream_read(inp, sk->protect.iv, sk->protect.ivlen, &nread); if (rc) return rc; if (nread != sk->protect.ivlen) return CDK_Inv_Packet; } } else sk->protect.algo = _pgp_cipher_to_gnutls(sk->s2k_usage); if (sk->protect.algo == GNUTLS_CIPHER_UNKNOWN) return gnutls_assert_val(CDK_Inv_Algo); else if (sk->protect.algo == GNUTLS_CIPHER_NULL) { sk->csum = 0; nskey = cdk_pk_get_nskey(sk->pk->pubkey_algo); if (!nskey) { gnutls_assert(); return CDK_Inv_Algo; } for (i = 0; i < nskey; i++) { rc = read_mpi(inp, &sk->mpi[i], 1); if (rc) return gnutls_assert_val(rc); } sk->csum = read_16(inp); sk->is_protected = 0; } else if (sk->pk->version < 4) { /* The length of each multiprecision integer is stored in plaintext. */ nskey = cdk_pk_get_nskey(sk->pk->pubkey_algo); if (!nskey) { gnutls_assert(); return CDK_Inv_Algo; } for (i = 0; i < nskey; i++) { rc = read_mpi(inp, &sk->mpi[i], 1); if (rc) return gnutls_assert_val(rc); } sk->csum = read_16(inp); sk->is_protected = 1; } else { /* We need to read the rest of the packet because we do not have any information how long the encrypted mpi's are */ p2 = cdk_stream_tell(inp); p2 -= p1; sk->enclen = pktlen - p2; if (sk->enclen < 2) return CDK_Inv_Packet; /* at least 16 bits for the checksum! */ sk->encdata = cdk_calloc(1, sk->enclen + 1); if (!sk->encdata) return CDK_Out_Of_Core; if (stream_read(inp, sk->encdata, sk->enclen, &nread)) return CDK_Inv_Packet; /* Handle the GNU S2K extensions we know (just gnu-dummy right now): */ if (sk->protect.s2k->mode == CDK_S2K_GNU_EXT) { unsigned char gnumode; if ((sk->enclen < strlen("GNU") + 1) || (0 != memcmp("GNU", sk->encdata, strlen("GNU")))) return CDK_Inv_Packet; gnumode = sk->encdata[strlen("GNU")]; /* we only handle gnu-dummy (mode 1). mode 2 should refer to external smart cards. */ if (gnumode != 1) return CDK_Inv_Packet; /* gnu-dummy should have no more data */ if (sk->enclen != strlen("GNU") + 1) return CDK_Inv_Packet; } nskey = cdk_pk_get_nskey(sk->pk->pubkey_algo); if (!nskey) { gnutls_assert(); return CDK_Inv_Algo; } /* We mark each MPI entry with NULL to indicate a protected key. */ for (i = 0; i < nskey; i++) sk->mpi[i] = NULL; sk->is_protected = 1; } sk->is_primary = 1; _cdk_copy_pk_to_sk(sk->pk, sk); return 0; } static cdk_error_t read_secret_subkey(cdk_stream_t inp, size_t pktlen, cdk_pkt_seckey_t sk) { cdk_error_t rc; if (!inp || !sk || !sk->pk) return CDK_Inv_Value; rc = read_secret_key(inp, pktlen, sk); sk->is_primary = 0; return rc; } #define ATTRIBUTE "[attribute]" static cdk_error_t read_attribute(cdk_stream_t inp, size_t pktlen, cdk_pkt_userid_t attr, int name_size) { const byte *p; byte *buf; size_t len, nread; cdk_error_t rc; if (!inp || !attr || !pktlen) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_attribute: %d octets\n", (int) pktlen); _gnutls_str_cpy(attr->name, name_size, ATTRIBUTE); attr->len = MIN(name_size, sizeof(ATTRIBUTE) - 1); buf = cdk_calloc(1, pktlen); if (!buf) return CDK_Out_Of_Core; rc = stream_read(inp, buf, pktlen, &nread); if (rc) { gnutls_assert(); rc = CDK_Inv_Packet; goto error; } p = buf; len = *p++; pktlen--; if (len == 255) { if (pktlen < 4) { gnutls_assert(); rc = CDK_Inv_Packet; goto error; } len = _cdk_buftou32(p); p += 4; pktlen -= 4; } else if (len >= 192) { if (pktlen < 2) { gnutls_assert(); rc = CDK_Inv_Packet; goto error; } len = ((len - 192) << 8) + *p + 192; p++; pktlen--; } if (!len || *p != 1) { /* Currently only 1, meaning an image, is defined. */ rc = CDK_Inv_Packet; goto error; } p++; len--; if (len >= pktlen) { rc = CDK_Inv_Packet; goto error; } attr->attrib_img = cdk_calloc(1, len); if (!attr->attrib_img) { rc = CDK_Out_Of_Core; goto error; } attr->attrib_len = len; memcpy(attr->attrib_img, p, len); cdk_free(buf); return rc; error: cdk_free(buf); return rc; } static cdk_error_t read_user_id(cdk_stream_t inp, size_t pktlen, cdk_pkt_userid_t user_id) { size_t nread; cdk_error_t rc; if (!inp || !user_id) return CDK_Inv_Value; if (!pktlen) return CDK_Inv_Packet; if (DEBUG_PKT) _gnutls_write_log("read_user_id: %lu octets\n", (unsigned long) pktlen); user_id->len = pktlen; rc = stream_read(inp, user_id->name, pktlen, &nread); if (rc) return rc; if (nread != pktlen) return CDK_Inv_Packet; user_id->name[nread] = '\0'; return rc; } static cdk_error_t read_subpkt(cdk_stream_t inp, cdk_subpkt_t * r_ctx, size_t * r_nbytes) { byte c, c1; size_t size, nread, n; cdk_subpkt_t node; cdk_error_t rc; if (!inp || !r_nbytes) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_subpkt:\n"); n = 0; *r_nbytes = 0; c = cdk_stream_getc(inp); n++; if (c == 255) { size = read_32(inp); n += 4; } else if (c >= 192 && c < 255) { c1 = cdk_stream_getc(inp); n++; if (c1 == 0) return 0; size = ((c - 192) << 8) + c1 + 192; } else if (c < 192) size = c; else return CDK_Inv_Packet; node = cdk_subpkt_new(size); if (!node) return CDK_Out_Of_Core; node->size = size; node->type = cdk_stream_getc(inp); if (DEBUG_PKT) _gnutls_write_log(" %d octets %d type\n", node->size, node->type); n++; node->size--; rc = stream_read(inp, node->d, node->size, &nread); n += nread; if (rc) { cdk_subpkt_free(node); return rc; } *r_nbytes = n; if (!*r_ctx) *r_ctx = node; else cdk_subpkt_add(*r_ctx, node); return rc; } static cdk_error_t read_onepass_sig(cdk_stream_t inp, size_t pktlen, cdk_pkt_onepass_sig_t sig) { if (!inp || !sig) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_onepass_sig: %d octets\n", (int) pktlen); if (pktlen != 13) return CDK_Inv_Packet; sig->version = cdk_stream_getc(inp); if (sig->version != 3) return CDK_Inv_Packet_Ver; sig->sig_class = cdk_stream_getc(inp); sig->digest_algo = _pgp_hash_algo_to_gnutls(cdk_stream_getc(inp)); sig->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); sig->keyid[0] = read_32(inp); sig->keyid[1] = read_32(inp); sig->last = cdk_stream_getc(inp); return 0; } static cdk_error_t parse_sig_subpackets(cdk_pkt_signature_t sig) { cdk_subpkt_t node; /* Setup the standard packet entries, so we can use V4 signatures similar to V3. */ for (node = sig->unhashed; node; node = node->next) { if (node->type == CDK_SIGSUBPKT_ISSUER && node->size >= 8) { sig->keyid[0] = _cdk_buftou32(node->d); sig->keyid[1] = _cdk_buftou32(node->d + 4); } else if (node->type == CDK_SIGSUBPKT_EXPORTABLE && node->d[0] == 0) { /* Sometimes this packet might be placed in the unhashed area */ sig->flags.exportable = 0; } } for (node = sig->hashed; node; node = node->next) { if (node->type == CDK_SIGSUBPKT_SIG_CREATED && node->size >= 4) sig->timestamp = _cdk_buftou32(node->d); else if (node->type == CDK_SIGSUBPKT_SIG_EXPIRE && node->size >= 4) { sig->expiredate = _cdk_buftou32(node->d); if (sig->expiredate > 0 && sig->expiredate < (u32) gnutls_time(NULL)) sig->flags.expired = 1; } else if (node->type == CDK_SIGSUBPKT_POLICY) sig->flags.policy_url = 1; else if (node->type == CDK_SIGSUBPKT_NOTATION) sig->flags.notation = 1; else if (node->type == CDK_SIGSUBPKT_REVOCABLE && node->d[0] == 0) sig->flags.revocable = 0; else if (node->type == CDK_SIGSUBPKT_EXPORTABLE && node->d[0] == 0) sig->flags.exportable = 0; } if (sig->sig_class == 0x1F) { cdk_desig_revoker_t r, rnode; for (node = sig->hashed; node; node = node->next) { if (node->type == CDK_SIGSUBPKT_REV_KEY) { if (node->size < 22) continue; rnode = cdk_calloc(1, sizeof *rnode); if (!rnode) return CDK_Out_Of_Core; rnode->r_class = node->d[0]; rnode->algid = node->d[1]; memcpy(rnode->fpr, node->d + 2, KEY_FPR_LEN); if (!sig->revkeys) sig->revkeys = rnode; else { for (r = sig->revkeys; r->next; r = r->next); r->next = rnode; } } } } return 0; } static cdk_error_t read_signature(cdk_stream_t inp, size_t pktlen, cdk_pkt_signature_t sig) { size_t nbytes; size_t i, nsig; ssize_t size; cdk_error_t rc; if (!inp || !sig) return gnutls_assert_val(CDK_Inv_Value); if (DEBUG_PKT) _gnutls_write_log("read_signature: %d octets\n", (int) pktlen); if (pktlen < 16) return gnutls_assert_val(CDK_Inv_Packet); sig->version = cdk_stream_getc(inp); if (sig->version < 2 || sig->version > 4) return gnutls_assert_val(CDK_Inv_Packet_Ver); sig->flags.exportable = 1; sig->flags.revocable = 1; if (sig->version < 4) { if (cdk_stream_getc(inp) != 5) return gnutls_assert_val(CDK_Inv_Packet); sig->sig_class = cdk_stream_getc(inp); sig->timestamp = read_32(inp); sig->keyid[0] = read_32(inp); sig->keyid[1] = read_32(inp); sig->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); sig->digest_algo = _pgp_hash_algo_to_gnutls(cdk_stream_getc(inp)); sig->digest_start[0] = cdk_stream_getc(inp); sig->digest_start[1] = cdk_stream_getc(inp); nsig = cdk_pk_get_nsig(sig->pubkey_algo); if (!nsig) return gnutls_assert_val(CDK_Inv_Algo); for (i = 0; i < nsig; i++) { rc = read_mpi(inp, &sig->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } } else { sig->sig_class = cdk_stream_getc(inp); sig->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); sig->digest_algo = _pgp_hash_algo_to_gnutls(cdk_stream_getc(inp)); sig->hashed_size = read_16(inp); size = sig->hashed_size; sig->hashed = NULL; while (size > 0) { rc = read_subpkt(inp, &sig->hashed, &nbytes); if (rc) return gnutls_assert_val(rc); size -= nbytes; } sig->unhashed_size = read_16(inp); size = sig->unhashed_size; sig->unhashed = NULL; while (size > 0) { rc = read_subpkt(inp, &sig->unhashed, &nbytes); if (rc) return gnutls_assert_val(rc); size -= nbytes; } rc = parse_sig_subpackets(sig); if (rc) return gnutls_assert_val(rc); sig->digest_start[0] = cdk_stream_getc(inp); sig->digest_start[1] = cdk_stream_getc(inp); nsig = cdk_pk_get_nsig(sig->pubkey_algo); if (!nsig) return gnutls_assert_val(CDK_Inv_Algo); for (i = 0; i < nsig; i++) { rc = read_mpi(inp, &sig->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } } return 0; } static cdk_error_t read_literal(cdk_stream_t inp, size_t pktlen, cdk_pkt_literal_t * ret_pt, int is_partial) { cdk_pkt_literal_t pt = *ret_pt; size_t nread; cdk_error_t rc; if (!inp || !pt) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_literal: %d octets\n", (int) pktlen); pt->mode = cdk_stream_getc(inp); if (pt->mode != 0x62 && pt->mode != 0x74 && pt->mode != 0x75) return CDK_Inv_Packet; if (cdk_stream_eof(inp)) return CDK_Inv_Packet; pt->namelen = cdk_stream_getc(inp); if (pt->namelen > 0) { *ret_pt = pt = cdk_realloc(pt, sizeof *pt + pt->namelen + 2); if (!pt) return CDK_Out_Of_Core; pt->name = (char *) pt + sizeof(*pt); rc = stream_read(inp, pt->name, pt->namelen, &nread); if (rc) return rc; if ((int) nread != pt->namelen) return CDK_Inv_Packet; pt->name[pt->namelen] = '\0'; } pt->timestamp = read_32(inp); pktlen = pktlen - 6 - pt->namelen; if (is_partial) _cdk_stream_set_blockmode(inp, pktlen); pt->buf = inp; pt->len = pktlen; return 0; } /* Read an old packet CTB and return the length of the body. */ static void read_old_length(cdk_stream_t inp, int ctb, size_t * r_len, size_t * r_size) { int llen = ctb & 0x03; if (llen == 0) { *r_len = cdk_stream_getc(inp); (*r_size)++; } else if (llen == 1) { *r_len = read_16(inp); (*r_size) += 2; } else if (llen == 2) { *r_len = read_32(inp); (*r_size) += 4; } else { *r_len = 0; *r_size = 0; } } /* Read a new CTB and decode the body length. */ static void read_new_length(cdk_stream_t inp, size_t * r_len, size_t * r_size, size_t * r_partial) { int c, c1; c = cdk_stream_getc(inp); (*r_size)++; if (c < 192) *r_len = c; else if (c >= 192 && c <= 223) { c1 = cdk_stream_getc(inp); (*r_size)++; *r_len = ((c - 192) << 8) + c1 + 192; } else if (c == 255) { *r_len = read_32(inp); (*r_size) += 4; } else { *r_len = 1 << (c & 0x1f); *r_partial = 1; } } /* Skip the current packet body. */ static cdk_error_t skip_packet(cdk_stream_t inp, size_t pktlen) { byte buf[BUFSIZE]; size_t nread, buflen = DIM(buf); while (pktlen > 0) { cdk_error_t rc; rc = stream_read(inp, buf, pktlen > buflen ? buflen : pktlen, &nread); if (rc) return rc; pktlen -= nread; } assert(pktlen == 0); return 0; } /** * cdk_pkt_read: * @inp: the input stream * @pkt: allocated packet handle to store the packet * * Parse the next packet on the @inp stream and return its contents in @pkt. **/ cdk_error_t cdk_pkt_read(cdk_stream_t inp, cdk_packet_t pkt) { int ctb, is_newctb; int pkttype; size_t pktlen = 0, pktsize = 0, is_partial = 0; cdk_error_t rc; if (!inp || !pkt) return CDK_Inv_Value; ctb = cdk_stream_getc(inp); if (cdk_stream_eof(inp) || ctb == EOF) return CDK_EOF; else if (!ctb) return gnutls_assert_val(CDK_Inv_Packet); pktsize++; if (!(ctb & 0x80)) { _cdk_log_info("cdk_pkt_read: no openpgp data found. " "(ctb=%02X; fpos=%02X)\n", (int) ctb, (int) cdk_stream_tell(inp)); return gnutls_assert_val(CDK_Inv_Packet); } if (ctb & 0x40) { /* RFC2440 packet format. */ pkttype = ctb & 0x3f; is_newctb = 1; } else { /* the old RFC1991 packet format. */ pkttype = ctb & 0x3f; pkttype >>= 2; is_newctb = 0; } if (pkttype > 63) { _cdk_log_info("cdk_pkt_read: unknown type %d\n", pkttype); return gnutls_assert_val(CDK_Inv_Packet); } if (is_newctb) read_new_length(inp, &pktlen, &pktsize, &is_partial); else read_old_length(inp, ctb, &pktlen, &pktsize); pkt->pkttype = pkttype; pkt->pktlen = pktlen; pkt->pktsize = pktsize + pktlen; pkt->old_ctb = is_newctb ? 0 : 1; rc = 0; switch (pkt->pkttype) { case CDK_PKT_ATTRIBUTE: #define NAME_SIZE (pkt->pktlen + 16 + 1) pkt->pkt.user_id = cdk_calloc(1, sizeof *pkt->pkt.user_id + NAME_SIZE); if (!pkt->pkt.user_id) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.user_id->name = (char *) pkt->pkt.user_id + sizeof(*pkt->pkt.user_id); rc = read_attribute(inp, pktlen, pkt->pkt.user_id, NAME_SIZE); pkt->pkttype = CDK_PKT_ATTRIBUTE; if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_USER_ID: pkt->pkt.user_id = cdk_calloc(1, sizeof *pkt->pkt.user_id + pkt->pktlen + 1); if (!pkt->pkt.user_id) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.user_id->name = (char *) pkt->pkt.user_id + sizeof(*pkt->pkt.user_id); rc = read_user_id(inp, pktlen, pkt->pkt.user_id); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_PUBLIC_KEY: pkt->pkt.public_key = cdk_calloc(1, sizeof *pkt->pkt.public_key); if (!pkt->pkt.public_key) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_public_key(inp, pktlen, pkt->pkt.public_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_PUBLIC_SUBKEY: pkt->pkt.public_key = cdk_calloc(1, sizeof *pkt->pkt.public_key); if (!pkt->pkt.public_key) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_public_subkey(inp, pktlen, pkt->pkt.public_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_SECRET_KEY: pkt->pkt.secret_key = cdk_calloc(1, sizeof *pkt->pkt.secret_key); if (!pkt->pkt.secret_key) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.secret_key->pk = cdk_calloc(1, sizeof *pkt->pkt. secret_key->pk); if (!pkt->pkt.secret_key->pk) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_secret_key(inp, pktlen, pkt->pkt.secret_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_SECRET_SUBKEY: pkt->pkt.secret_key = cdk_calloc(1, sizeof *pkt->pkt.secret_key); if (!pkt->pkt.secret_key) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.secret_key->pk = cdk_calloc(1, sizeof *pkt->pkt. secret_key->pk); if (!pkt->pkt.secret_key->pk) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_secret_subkey(inp, pktlen, pkt->pkt.secret_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_LITERAL: pkt->pkt.literal = cdk_calloc(1, sizeof *pkt->pkt.literal); if (!pkt->pkt.literal) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_literal(inp, pktlen, &pkt->pkt.literal, is_partial); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_ONEPASS_SIG: pkt->pkt.onepass_sig = cdk_calloc(1, sizeof *pkt->pkt.onepass_sig); if (!pkt->pkt.onepass_sig) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_onepass_sig(inp, pktlen, pkt->pkt.onepass_sig); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_SIGNATURE: pkt->pkt.signature = cdk_calloc(1, sizeof *pkt->pkt.signature); if (!pkt->pkt.signature) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_signature(inp, pktlen, pkt->pkt.signature); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_PUBKEY_ENC: pkt->pkt.pubkey_enc = cdk_calloc(1, sizeof *pkt->pkt.pubkey_enc); if (!pkt->pkt.pubkey_enc) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_pubkey_enc(inp, pktlen, pkt->pkt.pubkey_enc); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_COMPRESSED: pkt->pkt.compressed = cdk_calloc(1, sizeof *pkt->pkt.compressed); if (!pkt->pkt.compressed) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_compressed(inp, pktlen, pkt->pkt.compressed); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_MDC: pkt->pkt.mdc = cdk_calloc(1, sizeof *pkt->pkt.mdc); if (!pkt->pkt.mdc) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_mdc(inp, pkt->pkt.mdc); if (rc) return gnutls_assert_val(rc); break; default: /* Skip all packets we don't understand */ rc = skip_packet(inp, pktlen); if (rc) return gnutls_assert_val(rc); break; } return rc; }
/* read-packet.c - Read OpenPGP packets * Copyright (C) 2001-2012 Free Software Foundation, Inc. * * Author: Timo Schulz * * This file is part of OpenCDK. * * The OpenCDK library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 of * the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program. If not, see <http://www.gnu.org/licenses/> * */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <string.h> #include <stdio.h> #include <time.h> #include <assert.h> #include "opencdk.h" #include "main.h" #include "packet.h" #include "types.h" #include <algorithms.h> #include <str.h> #include <minmax.h> /* The version of the MDC packet considering the lastest OpenPGP draft. */ static int stream_read(cdk_stream_t s, void *buf, size_t buflen, size_t * r_nread) { int res = cdk_stream_read(s, buf, buflen); if (res > 0) { *r_nread = res; return 0; } else { return (cdk_stream_eof(s) ? EOF : _cdk_stream_get_errno(s)); } } /* Try to read 4 octets from the stream. */ static u32 read_32(cdk_stream_t s) { byte buf[4]; size_t nread = 0; assert(s != NULL); stream_read(s, buf, 4, &nread); if (nread != 4) return (u32) -1; return buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; } /* Try to read 2 octets from a stream. */ static u16 read_16(cdk_stream_t s) { byte buf[2]; size_t nread = 0; assert(s != NULL); stream_read(s, buf, 2, &nread); if (nread != 2) return (u16) - 1; return buf[0] << 8 | buf[1]; } /* read about S2K at http://tools.ietf.org/html/rfc4880#section-3.7.1 */ static cdk_error_t read_s2k(cdk_stream_t inp, cdk_s2k_t s2k) { size_t nread; s2k->mode = cdk_stream_getc(inp); s2k->hash_algo = cdk_stream_getc(inp); if (s2k->mode == CDK_S2K_SIMPLE) return 0; else if (s2k->mode == CDK_S2K_SALTED || s2k->mode == CDK_S2K_ITERSALTED) { if (stream_read(inp, s2k->salt, DIM(s2k->salt), &nread)) return CDK_Inv_Packet; if (nread != DIM(s2k->salt)) return CDK_Inv_Packet; if (s2k->mode == CDK_S2K_ITERSALTED) s2k->count = cdk_stream_getc(inp); } else if (s2k->mode == CDK_S2K_GNU_EXT) { /* GNU extensions to the S2K : read DETAILS from gnupg */ return 0; } else return CDK_Not_Implemented; return 0; } static cdk_error_t read_mpi(cdk_stream_t inp, bigint_t * ret_m, int secure) { bigint_t m; int err; byte buf[MAX_MPI_BYTES + 2]; size_t nread, nbits; cdk_error_t rc; if (!inp || !ret_m) return CDK_Inv_Value; *ret_m = NULL; nbits = read_16(inp); nread = (nbits + 7) / 8; if (nbits > MAX_MPI_BITS || nbits == 0) { _gnutls_write_log("read_mpi: too large %d bits\n", (int) nbits); return gnutls_assert_val(CDK_MPI_Error); /* Sanity check */ } rc = stream_read(inp, buf + 2, nread, &nread); if (!rc && nread != ((nbits + 7) / 8)) { _gnutls_write_log("read_mpi: too short %d < %d\n", (int) nread, (int) ((nbits + 7) / 8)); return gnutls_assert_val(CDK_MPI_Error); } buf[0] = nbits >> 8; buf[1] = nbits >> 0; nread += 2; err = _gnutls_mpi_init_scan_pgp(&m, buf, nread); if (err < 0) return gnutls_assert_val(map_gnutls_error(err)); *ret_m = m; return rc; } /* Read the encoded packet length directly from the file object INP and return it. Reset RET_PARTIAL if this is the last packet in block mode. */ size_t _cdk_pkt_read_len(FILE * inp, size_t * ret_partial) { int c1, c2; size_t pktlen; c1 = fgetc(inp); if (c1 == EOF) return (size_t) EOF; if (c1 < 224 || c1 == 255) *ret_partial = 0; /* End of partial data */ if (c1 < 192) pktlen = c1; else if (c1 >= 192 && c1 <= 223) { c2 = fgetc(inp); if (c2 == EOF) return (size_t) EOF; pktlen = ((c1 - 192) << 8) + c2 + 192; } else if (c1 == 255) { pktlen = fgetc(inp) << 24; pktlen |= fgetc(inp) << 16; pktlen |= fgetc(inp) << 8; pktlen |= fgetc(inp) << 0; } else pktlen = 1 << (c1 & 0x1f); return pktlen; } static cdk_error_t read_pubkey_enc(cdk_stream_t inp, size_t pktlen, cdk_pkt_pubkey_enc_t pke) { size_t i, nenc; if (!inp || !pke) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_pubkey_enc: %d octets\n", (int) pktlen); if (pktlen < 12) return CDK_Inv_Packet; pke->version = cdk_stream_getc(inp); if (pke->version < 2 || pke->version > 3) return CDK_Inv_Packet; pke->keyid[0] = read_32(inp); pke->keyid[1] = read_32(inp); if (!pke->keyid[0] && !pke->keyid[1]) pke->throw_keyid = 1; /* RFC2440 "speculative" keyID */ pke->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); nenc = cdk_pk_get_nenc(pke->pubkey_algo); if (!nenc) return CDK_Inv_Algo; for (i = 0; i < nenc; i++) { cdk_error_t rc = read_mpi(inp, &pke->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } return 0; } static cdk_error_t read_mdc(cdk_stream_t inp, cdk_pkt_mdc_t mdc) { size_t n; cdk_error_t rc; if (!inp || !mdc) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_mdc:\n"); rc = stream_read(inp, mdc->hash, DIM(mdc->hash), &n); if (rc) return rc; return n != DIM(mdc->hash) ? CDK_Inv_Packet : 0; } static cdk_error_t read_compressed(cdk_stream_t inp, size_t pktlen, cdk_pkt_compressed_t c) { if (!inp || !c) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_compressed: %d octets\n", (int) pktlen); c->algorithm = cdk_stream_getc(inp); if (c->algorithm > 3) return CDK_Inv_Packet; /* don't know the size, so we read until EOF */ if (!pktlen) { c->len = 0; c->buf = inp; } /* FIXME: Support partial bodies. */ return 0; } static cdk_error_t read_public_key(cdk_stream_t inp, size_t pktlen, cdk_pkt_pubkey_t pk) { size_t i, ndays, npkey; if (!inp || !pk) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_public_key: %d octets\n", (int) pktlen); pk->is_invalid = 1; /* default to detect missing self signatures */ pk->is_revoked = 0; pk->has_expired = 0; pk->version = cdk_stream_getc(inp); if (pk->version < 2 || pk->version > 4) return CDK_Inv_Packet_Ver; pk->timestamp = read_32(inp); if (pk->version < 4) { ndays = read_16(inp); if (ndays) pk->expiredate = pk->timestamp + ndays * 86400L; } pk->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); npkey = cdk_pk_get_npkey(pk->pubkey_algo); if (!npkey) { gnutls_assert(); _gnutls_write_log("invalid public key algorithm %d\n", pk->pubkey_algo); return CDK_Inv_Algo; } for (i = 0; i < npkey; i++) { cdk_error_t rc = read_mpi(inp, &pk->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } /* This value is just for the first run and will be replaced with the actual key flags from the self signature. */ pk->pubkey_usage = 0; return 0; } static cdk_error_t read_public_subkey(cdk_stream_t inp, size_t pktlen, cdk_pkt_pubkey_t pk) { if (!inp || !pk) return CDK_Inv_Value; return read_public_key(inp, pktlen, pk); } static cdk_error_t read_secret_key(cdk_stream_t inp, size_t pktlen, cdk_pkt_seckey_t sk) { size_t p1, p2, nread; int i, nskey; int rc; if (!inp || !sk || !sk->pk) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_secret_key: %d octets\n", (int) pktlen); p1 = cdk_stream_tell(inp); rc = read_public_key(inp, pktlen, sk->pk); if (rc) return rc; sk->s2k_usage = cdk_stream_getc(inp); sk->protect.sha1chk = 0; if (sk->s2k_usage == 254 || sk->s2k_usage == 255) { sk->protect.sha1chk = (sk->s2k_usage == 254); sk->protect.algo = _pgp_cipher_to_gnutls(cdk_stream_getc(inp)); if (sk->protect.algo == GNUTLS_CIPHER_UNKNOWN) return gnutls_assert_val(CDK_Inv_Algo); sk->protect.s2k = cdk_calloc(1, sizeof *sk->protect.s2k); if (!sk->protect.s2k) return CDK_Out_Of_Core; rc = read_s2k(inp, sk->protect.s2k); if (rc) return rc; /* refer to --export-secret-subkeys in gpg(1) */ if (sk->protect.s2k->mode == CDK_S2K_GNU_EXT) sk->protect.ivlen = 0; else { sk->protect.ivlen = gnutls_cipher_get_block_size(sk->protect.algo); if (!sk->protect.ivlen) return CDK_Inv_Packet; rc = stream_read(inp, sk->protect.iv, sk->protect.ivlen, &nread); if (rc) return rc; if (nread != sk->protect.ivlen) return CDK_Inv_Packet; } } else sk->protect.algo = _pgp_cipher_to_gnutls(sk->s2k_usage); if (sk->protect.algo == GNUTLS_CIPHER_UNKNOWN) return gnutls_assert_val(CDK_Inv_Algo); else if (sk->protect.algo == GNUTLS_CIPHER_NULL) { sk->csum = 0; nskey = cdk_pk_get_nskey(sk->pk->pubkey_algo); if (!nskey) { gnutls_assert(); return CDK_Inv_Algo; } for (i = 0; i < nskey; i++) { rc = read_mpi(inp, &sk->mpi[i], 1); if (rc) return gnutls_assert_val(rc); } sk->csum = read_16(inp); sk->is_protected = 0; } else if (sk->pk->version < 4) { /* The length of each multiprecision integer is stored in plaintext. */ nskey = cdk_pk_get_nskey(sk->pk->pubkey_algo); if (!nskey) { gnutls_assert(); return CDK_Inv_Algo; } for (i = 0; i < nskey; i++) { rc = read_mpi(inp, &sk->mpi[i], 1); if (rc) return gnutls_assert_val(rc); } sk->csum = read_16(inp); sk->is_protected = 1; } else { /* We need to read the rest of the packet because we do not have any information how long the encrypted mpi's are */ p2 = cdk_stream_tell(inp); p2 -= p1; sk->enclen = pktlen - p2; if (sk->enclen < 2) return CDK_Inv_Packet; /* at least 16 bits for the checksum! */ sk->encdata = cdk_calloc(1, sk->enclen + 1); if (!sk->encdata) return CDK_Out_Of_Core; if (stream_read(inp, sk->encdata, sk->enclen, &nread)) return CDK_Inv_Packet; /* Handle the GNU S2K extensions we know (just gnu-dummy right now): */ if (sk->protect.s2k->mode == CDK_S2K_GNU_EXT) { unsigned char gnumode; if ((sk->enclen < strlen("GNU") + 1) || (0 != memcmp("GNU", sk->encdata, strlen("GNU")))) return CDK_Inv_Packet; gnumode = sk->encdata[strlen("GNU")]; /* we only handle gnu-dummy (mode 1). mode 2 should refer to external smart cards. */ if (gnumode != 1) return CDK_Inv_Packet; /* gnu-dummy should have no more data */ if (sk->enclen != strlen("GNU") + 1) return CDK_Inv_Packet; } nskey = cdk_pk_get_nskey(sk->pk->pubkey_algo); if (!nskey) { gnutls_assert(); return CDK_Inv_Algo; } /* We mark each MPI entry with NULL to indicate a protected key. */ for (i = 0; i < nskey; i++) sk->mpi[i] = NULL; sk->is_protected = 1; } sk->is_primary = 1; _cdk_copy_pk_to_sk(sk->pk, sk); return 0; } static cdk_error_t read_secret_subkey(cdk_stream_t inp, size_t pktlen, cdk_pkt_seckey_t sk) { cdk_error_t rc; if (!inp || !sk || !sk->pk) return CDK_Inv_Value; rc = read_secret_key(inp, pktlen, sk); sk->is_primary = 0; return rc; } #define ATTRIBUTE "[attribute]" static cdk_error_t read_attribute(cdk_stream_t inp, size_t pktlen, cdk_pkt_userid_t attr, int name_size) { const byte *p; byte *buf; size_t len, nread; cdk_error_t rc; if (!inp || !attr || !pktlen) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_attribute: %d octets\n", (int) pktlen); _gnutls_str_cpy(attr->name, name_size, ATTRIBUTE); attr->len = MIN(name_size, sizeof(ATTRIBUTE) - 1); buf = cdk_calloc(1, pktlen); if (!buf) return CDK_Out_Of_Core; rc = stream_read(inp, buf, pktlen, &nread); if (rc) { gnutls_assert(); rc = CDK_Inv_Packet; goto error; } p = buf; len = *p++; pktlen--; if (len == 255) { if (pktlen < 4) { gnutls_assert(); rc = CDK_Inv_Packet; goto error; } len = _cdk_buftou32(p); p += 4; pktlen -= 4; } else if (len >= 192) { if (pktlen < 2) { gnutls_assert(); rc = CDK_Inv_Packet; goto error; } len = ((len - 192) << 8) + *p + 192; p++; pktlen--; } if (!len || *p != 1) { /* Currently only 1, meaning an image, is defined. */ rc = CDK_Inv_Packet; goto error; } p++; len--; if (len >= pktlen) { rc = CDK_Inv_Packet; goto error; } attr->attrib_img = cdk_calloc(1, len); if (!attr->attrib_img) { rc = CDK_Out_Of_Core; goto error; } attr->attrib_len = len; memcpy(attr->attrib_img, p, len); cdk_free(buf); return rc; error: cdk_free(buf); return rc; } static cdk_error_t read_user_id(cdk_stream_t inp, size_t pktlen, cdk_pkt_userid_t user_id) { size_t nread; cdk_error_t rc; if (!inp || !user_id) return CDK_Inv_Value; if (!pktlen) return CDK_Inv_Packet; if (DEBUG_PKT) _gnutls_write_log("read_user_id: %lu octets\n", (unsigned long) pktlen); user_id->len = pktlen; rc = stream_read(inp, user_id->name, pktlen, &nread); if (rc) return rc; if (nread != pktlen) return CDK_Inv_Packet; user_id->name[nread] = '\0'; return rc; } static cdk_error_t read_subpkt(cdk_stream_t inp, cdk_subpkt_t * r_ctx, size_t * r_nbytes) { int c, c1; size_t size, nread, n; cdk_subpkt_t node; cdk_error_t rc; if (!inp || !r_nbytes) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_subpkt:\n"); n = 0; *r_nbytes = 0; c = cdk_stream_getc(inp); n++; if (c == 255) { size = read_32(inp); if (size == (u32)-1) return CDK_Inv_Packet; n += 4; } else if (c >= 192 && c < 255) { c1 = cdk_stream_getc(inp); if (c1 == EOF) return CDK_Inv_Packet; n++; if (c1 == 0) return 0; size = ((c - 192) << 8) + c1 + 192; } else if (c < 192) size = c; else return CDK_Inv_Packet; node = cdk_subpkt_new(size); if (!node) return CDK_Out_Of_Core; node->size = size; node->type = cdk_stream_getc(inp); if (DEBUG_PKT) _gnutls_write_log(" %d octets %d type\n", node->size, node->type); n++; node->size--; rc = stream_read(inp, node->d, node->size, &nread); n += nread; if (rc) { cdk_subpkt_free(node); return rc; } *r_nbytes = n; if (!*r_ctx) *r_ctx = node; else cdk_subpkt_add(*r_ctx, node); return rc; } static cdk_error_t read_onepass_sig(cdk_stream_t inp, size_t pktlen, cdk_pkt_onepass_sig_t sig) { if (!inp || !sig) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_onepass_sig: %d octets\n", (int) pktlen); if (pktlen != 13) return CDK_Inv_Packet; sig->version = cdk_stream_getc(inp); if (sig->version != 3) return CDK_Inv_Packet_Ver; sig->sig_class = cdk_stream_getc(inp); sig->digest_algo = _pgp_hash_algo_to_gnutls(cdk_stream_getc(inp)); sig->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); sig->keyid[0] = read_32(inp); sig->keyid[1] = read_32(inp); sig->last = cdk_stream_getc(inp); return 0; } static cdk_error_t parse_sig_subpackets(cdk_pkt_signature_t sig) { cdk_subpkt_t node; /* Setup the standard packet entries, so we can use V4 signatures similar to V3. */ for (node = sig->unhashed; node; node = node->next) { if (node->type == CDK_SIGSUBPKT_ISSUER && node->size >= 8) { sig->keyid[0] = _cdk_buftou32(node->d); sig->keyid[1] = _cdk_buftou32(node->d + 4); } else if (node->type == CDK_SIGSUBPKT_EXPORTABLE && node->d[0] == 0) { /* Sometimes this packet might be placed in the unhashed area */ sig->flags.exportable = 0; } } for (node = sig->hashed; node; node = node->next) { if (node->type == CDK_SIGSUBPKT_SIG_CREATED && node->size >= 4) sig->timestamp = _cdk_buftou32(node->d); else if (node->type == CDK_SIGSUBPKT_SIG_EXPIRE && node->size >= 4) { sig->expiredate = _cdk_buftou32(node->d); if (sig->expiredate > 0 && sig->expiredate < (u32) gnutls_time(NULL)) sig->flags.expired = 1; } else if (node->type == CDK_SIGSUBPKT_POLICY) sig->flags.policy_url = 1; else if (node->type == CDK_SIGSUBPKT_NOTATION) sig->flags.notation = 1; else if (node->type == CDK_SIGSUBPKT_REVOCABLE && node->d[0] == 0) sig->flags.revocable = 0; else if (node->type == CDK_SIGSUBPKT_EXPORTABLE && node->d[0] == 0) sig->flags.exportable = 0; } if (sig->sig_class == 0x1F) { cdk_desig_revoker_t r, rnode; for (node = sig->hashed; node; node = node->next) { if (node->type == CDK_SIGSUBPKT_REV_KEY) { if (node->size < 22) continue; rnode = cdk_calloc(1, sizeof *rnode); if (!rnode) return CDK_Out_Of_Core; rnode->r_class = node->d[0]; rnode->algid = node->d[1]; memcpy(rnode->fpr, node->d + 2, KEY_FPR_LEN); if (!sig->revkeys) sig->revkeys = rnode; else { for (r = sig->revkeys; r->next; r = r->next); r->next = rnode; } } } } return 0; } static cdk_error_t read_signature(cdk_stream_t inp, size_t pktlen, cdk_pkt_signature_t sig) { size_t nbytes; size_t i, nsig; ssize_t size; cdk_error_t rc; if (!inp || !sig) return gnutls_assert_val(CDK_Inv_Value); if (DEBUG_PKT) _gnutls_write_log("read_signature: %d octets\n", (int) pktlen); if (pktlen < 16) return gnutls_assert_val(CDK_Inv_Packet); sig->version = cdk_stream_getc(inp); if (sig->version < 2 || sig->version > 4) return gnutls_assert_val(CDK_Inv_Packet_Ver); sig->flags.exportable = 1; sig->flags.revocable = 1; if (sig->version < 4) { if (cdk_stream_getc(inp) != 5) return gnutls_assert_val(CDK_Inv_Packet); sig->sig_class = cdk_stream_getc(inp); sig->timestamp = read_32(inp); sig->keyid[0] = read_32(inp); sig->keyid[1] = read_32(inp); sig->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); sig->digest_algo = _pgp_hash_algo_to_gnutls(cdk_stream_getc(inp)); sig->digest_start[0] = cdk_stream_getc(inp); sig->digest_start[1] = cdk_stream_getc(inp); nsig = cdk_pk_get_nsig(sig->pubkey_algo); if (!nsig) return gnutls_assert_val(CDK_Inv_Algo); for (i = 0; i < nsig; i++) { rc = read_mpi(inp, &sig->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } } else { sig->sig_class = cdk_stream_getc(inp); sig->pubkey_algo = _pgp_pub_algo_to_cdk(cdk_stream_getc(inp)); sig->digest_algo = _pgp_hash_algo_to_gnutls(cdk_stream_getc(inp)); sig->hashed_size = read_16(inp); size = sig->hashed_size; sig->hashed = NULL; while (size > 0) { rc = read_subpkt(inp, &sig->hashed, &nbytes); if (rc) return gnutls_assert_val(rc); size -= nbytes; } sig->unhashed_size = read_16(inp); size = sig->unhashed_size; sig->unhashed = NULL; while (size > 0) { rc = read_subpkt(inp, &sig->unhashed, &nbytes); if (rc) return gnutls_assert_val(rc); size -= nbytes; } rc = parse_sig_subpackets(sig); if (rc) return gnutls_assert_val(rc); sig->digest_start[0] = cdk_stream_getc(inp); sig->digest_start[1] = cdk_stream_getc(inp); nsig = cdk_pk_get_nsig(sig->pubkey_algo); if (!nsig) return gnutls_assert_val(CDK_Inv_Algo); for (i = 0; i < nsig; i++) { rc = read_mpi(inp, &sig->mpi[i], 0); if (rc) return gnutls_assert_val(rc); } } return 0; } static cdk_error_t read_literal(cdk_stream_t inp, size_t pktlen, cdk_pkt_literal_t * ret_pt, int is_partial) { cdk_pkt_literal_t pt = *ret_pt; size_t nread; cdk_error_t rc; if (!inp || !pt) return CDK_Inv_Value; if (DEBUG_PKT) _gnutls_write_log("read_literal: %d octets\n", (int) pktlen); pt->mode = cdk_stream_getc(inp); if (pt->mode != 0x62 && pt->mode != 0x74 && pt->mode != 0x75) return CDK_Inv_Packet; if (cdk_stream_eof(inp)) return CDK_Inv_Packet; pt->namelen = cdk_stream_getc(inp); if (pt->namelen > 0) { *ret_pt = pt = cdk_realloc(pt, sizeof *pt + pt->namelen + 2); if (!pt) return CDK_Out_Of_Core; pt->name = (char *) pt + sizeof(*pt); rc = stream_read(inp, pt->name, pt->namelen, &nread); if (rc) return rc; if ((int) nread != pt->namelen) return CDK_Inv_Packet; pt->name[pt->namelen] = '\0'; } pt->timestamp = read_32(inp); pktlen = pktlen - 6 - pt->namelen; if (is_partial) _cdk_stream_set_blockmode(inp, pktlen); pt->buf = inp; pt->len = pktlen; return 0; } /* Read an old packet CTB and return the length of the body. */ static void read_old_length(cdk_stream_t inp, int ctb, size_t * r_len, size_t * r_size) { int llen = ctb & 0x03; int c; if (llen == 0) { c = cdk_stream_getc(inp); if (c == EOF) goto fail; *r_len = c; (*r_size)++; } else if (llen == 1) { *r_len = read_16(inp); if (*r_len == (u16)-1) goto fail; (*r_size) += 2; } else if (llen == 2) { *r_len = read_32(inp); if (*r_len == (u32)-1) { goto fail; } (*r_size) += 4; } else { fail: *r_len = 0; *r_size = 0; } } /* Read a new CTB and decode the body length. */ static void read_new_length(cdk_stream_t inp, size_t * r_len, size_t * r_size, size_t * r_partial) { int c, c1; c = cdk_stream_getc(inp); if (c == EOF) return; (*r_size)++; if (c < 192) *r_len = c; else if (c >= 192 && c <= 223) { c1 = cdk_stream_getc(inp); if (c1 == EOF) return; (*r_size)++; *r_len = ((c - 192) << 8) + c1 + 192; } else if (c == 255) { *r_len = read_32(inp); if (*r_len == (u32)-1) { return; } (*r_size) += 4; } else { *r_len = 1 << (c & 0x1f); *r_partial = 1; } } /* Skip the current packet body. */ static cdk_error_t skip_packet(cdk_stream_t inp, size_t pktlen) { byte buf[BUFSIZE]; size_t nread, buflen = DIM(buf); while (pktlen > 0) { cdk_error_t rc; rc = stream_read(inp, buf, pktlen > buflen ? buflen : pktlen, &nread); if (rc) return rc; pktlen -= nread; } assert(pktlen == 0); return 0; } /** * cdk_pkt_read: * @inp: the input stream * @pkt: allocated packet handle to store the packet * * Parse the next packet on the @inp stream and return its contents in @pkt. **/ cdk_error_t cdk_pkt_read(cdk_stream_t inp, cdk_packet_t pkt) { int ctb, is_newctb; int pkttype; size_t pktlen = 0, pktsize = 0, is_partial = 0; cdk_error_t rc; if (!inp || !pkt) return CDK_Inv_Value; ctb = cdk_stream_getc(inp); if (cdk_stream_eof(inp) || ctb == EOF) return CDK_EOF; else if (!ctb) return gnutls_assert_val(CDK_Inv_Packet); pktsize++; if (!(ctb & 0x80)) { _cdk_log_info("cdk_pkt_read: no openpgp data found. " "(ctb=%02X; fpos=%02X)\n", (int) ctb, (int) cdk_stream_tell(inp)); return gnutls_assert_val(CDK_Inv_Packet); } if (ctb & 0x40) { /* RFC2440 packet format. */ pkttype = ctb & 0x3f; is_newctb = 1; } else { /* the old RFC1991 packet format. */ pkttype = ctb & 0x3f; pkttype >>= 2; is_newctb = 0; } if (pkttype > 63) { _cdk_log_info("cdk_pkt_read: unknown type %d\n", pkttype); return gnutls_assert_val(CDK_Inv_Packet); } if (is_newctb) read_new_length(inp, &pktlen, &pktsize, &is_partial); else read_old_length(inp, ctb, &pktlen, &pktsize); pkt->pkttype = pkttype; pkt->pktlen = pktlen; pkt->pktsize = pktsize + pktlen; pkt->old_ctb = is_newctb ? 0 : 1; rc = 0; switch (pkt->pkttype) { case CDK_PKT_ATTRIBUTE: #define NAME_SIZE (pkt->pktlen + 16 + 1) pkt->pkt.user_id = cdk_calloc(1, sizeof *pkt->pkt.user_id + NAME_SIZE); if (!pkt->pkt.user_id) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.user_id->name = (char *) pkt->pkt.user_id + sizeof(*pkt->pkt.user_id); rc = read_attribute(inp, pktlen, pkt->pkt.user_id, NAME_SIZE); pkt->pkttype = CDK_PKT_ATTRIBUTE; if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_USER_ID: pkt->pkt.user_id = cdk_calloc(1, sizeof *pkt->pkt.user_id + pkt->pktlen + 1); if (!pkt->pkt.user_id) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.user_id->name = (char *) pkt->pkt.user_id + sizeof(*pkt->pkt.user_id); rc = read_user_id(inp, pktlen, pkt->pkt.user_id); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_PUBLIC_KEY: pkt->pkt.public_key = cdk_calloc(1, sizeof *pkt->pkt.public_key); if (!pkt->pkt.public_key) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_public_key(inp, pktlen, pkt->pkt.public_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_PUBLIC_SUBKEY: pkt->pkt.public_key = cdk_calloc(1, sizeof *pkt->pkt.public_key); if (!pkt->pkt.public_key) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_public_subkey(inp, pktlen, pkt->pkt.public_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_SECRET_KEY: pkt->pkt.secret_key = cdk_calloc(1, sizeof *pkt->pkt.secret_key); if (!pkt->pkt.secret_key) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.secret_key->pk = cdk_calloc(1, sizeof *pkt->pkt. secret_key->pk); if (!pkt->pkt.secret_key->pk) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_secret_key(inp, pktlen, pkt->pkt.secret_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_SECRET_SUBKEY: pkt->pkt.secret_key = cdk_calloc(1, sizeof *pkt->pkt.secret_key); if (!pkt->pkt.secret_key) return gnutls_assert_val(CDK_Out_Of_Core); pkt->pkt.secret_key->pk = cdk_calloc(1, sizeof *pkt->pkt. secret_key->pk); if (!pkt->pkt.secret_key->pk) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_secret_subkey(inp, pktlen, pkt->pkt.secret_key); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_LITERAL: pkt->pkt.literal = cdk_calloc(1, sizeof *pkt->pkt.literal); if (!pkt->pkt.literal) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_literal(inp, pktlen, &pkt->pkt.literal, is_partial); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_ONEPASS_SIG: pkt->pkt.onepass_sig = cdk_calloc(1, sizeof *pkt->pkt.onepass_sig); if (!pkt->pkt.onepass_sig) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_onepass_sig(inp, pktlen, pkt->pkt.onepass_sig); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_SIGNATURE: pkt->pkt.signature = cdk_calloc(1, sizeof *pkt->pkt.signature); if (!pkt->pkt.signature) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_signature(inp, pktlen, pkt->pkt.signature); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_PUBKEY_ENC: pkt->pkt.pubkey_enc = cdk_calloc(1, sizeof *pkt->pkt.pubkey_enc); if (!pkt->pkt.pubkey_enc) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_pubkey_enc(inp, pktlen, pkt->pkt.pubkey_enc); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_COMPRESSED: pkt->pkt.compressed = cdk_calloc(1, sizeof *pkt->pkt.compressed); if (!pkt->pkt.compressed) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_compressed(inp, pktlen, pkt->pkt.compressed); if (rc) return gnutls_assert_val(rc); break; case CDK_PKT_MDC: pkt->pkt.mdc = cdk_calloc(1, sizeof *pkt->pkt.mdc); if (!pkt->pkt.mdc) return gnutls_assert_val(CDK_Out_Of_Core); rc = read_mdc(inp, pkt->pkt.mdc); if (rc) return gnutls_assert_val(rc); break; default: /* Skip all packets we don't understand */ rc = skip_packet(inp, pktlen); if (rc) return gnutls_assert_val(rc); break; } return rc; }
static u32 read_32(cdk_stream_t s) { byte buf[4]; size_t nread; assert(s != NULL); stream_read(s, buf, 4, &nread); if (nread != 4) return (u32) - 1; return buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; }
static u32 read_32(cdk_stream_t s) { byte buf[4]; size_t nread = 0; assert(s != NULL); stream_read(s, buf, 4, &nread); if (nread != 4) return (u32) -1; return buf[0] << 24 | buf[1] << 16 | buf[2] << 8 | buf[3]; }
{'added': [(45, ''), (59, '\tsize_t nread = 0;'), (65, '\t\treturn (u32) -1;'), (74, '\tsize_t nread = 0;'), (576, '\tint c, c1;'), (591, ''), (594, '\t\tif (size == (u32)-1)'), (595, '\t\t\treturn CDK_Inv_Packet;'), (596, ''), (600, '\t\tif (c1 == EOF)'), (601, '\t\t\treturn CDK_Inv_Packet;'), (602, ''), (869, '\tint c;'), (872, '\t\tc = cdk_stream_getc(inp);'), (873, '\t\tif (c == EOF)'), (874, '\t\t\tgoto fail;'), (875, ''), (876, '\t\t*r_len = c;'), (880, '\t\tif (*r_len == (u16)-1)'), (881, '\t\t\tgoto fail;'), (885, '\t\tif (*r_len == (u32)-1) {'), (886, '\t\t\tgoto fail;'), (887, '\t\t}'), (888, ''), (891, ' fail:'), (906, '\tif (c == EOF)'), (907, '\t\treturn;'), (908, ''), (914, '\t\tif (c1 == EOF)'), (915, '\t\t\treturn;'), (916, ''), (921, '\t\tif (*r_len == (u32)-1) {'), (922, '\t\t\treturn;'), (923, '\t\t}'), (924, '')], 'deleted': [(58, '\tsize_t nread;'), (64, '\t\treturn (u32) - 1;'), (73, '\tsize_t nread;'), (575, '\tbyte c, c1;'), (863, '\t\t*r_len = cdk_stream_getc(inp);')]}
35
5
932
5,970
10
78
2
https://gitlab.com/gnutls/gnutls
CVE-2017-5335
CWE-125
2,994
stats_ops.cc
C++
tensorflow::BoostedTreesCalculateBestFeatureSplitOp::Compute
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <limits> #include <string> #include <vector> #include "third_party/eigen3/Eigen/Core" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/boosted_trees/boosted_trees.pb.h" #include "tensorflow/core/kernels/boosted_trees/tree_helper.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { using Matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using ConstMatrixMap = Eigen::Map<const Matrix>; using MatrixMap = Eigen::Map<Matrix>; using ConstVectorMap = Eigen::Map<const Eigen::VectorXf>; using VectorMap = Eigen::Map<Eigen::VectorXf>; constexpr char kInequalitySplit[] = "inequality"; constexpr char kEqualitySplit[] = "equality"; // V1 Op. Deprecated. BoostedTreesCalculateBestFeatureSplitOpV2 is V2. class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { public: explicit BoostedTreesCalculateBestGainsPerFeatureOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->dims() == 1, errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " "given node_id_range has dims of ", node_id_range_t->dims())); OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, errors::InvalidArgument( "node_id_range must be a rank 1 tensor with shape=[2], but " "given node_id_range has shape ", node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive // stats_summary_list OpInputList stats_summary_list; OP_REQUIRES_OK(context, context->input_list("stats_summary_list", &stats_summary_list)); const int64_t num_buckets = stats_summary_list[0].dim_size(1); // Check for single logit: 1 gradient + 1 hessian value. DCHECK_EQ(stats_summary_list[0].dim_size(2), 2); std::vector<TTypes<float, 3>::ConstTensor> stats_summary; stats_summary.reserve(stats_summary_list.size()); for (const auto& tensor : stats_summary_list) { stats_summary.emplace_back(tensor.tensor<float, 3>()); } const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); const auto l1 = l1_t->scalar<float>()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); const auto l2 = l2_t->scalar<float>()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); const auto min_node_weight = min_node_weight_t->scalar<float>()(); // Allocate output lists of tensors: OpOutputList output_node_ids_list; OP_REQUIRES_OK( context, context->output_list("node_ids_list", &output_node_ids_list)); OpOutputList output_gains_list; OP_REQUIRES_OK(context, context->output_list("gains_list", &output_gains_list)); OpOutputList output_thresholds_list; OP_REQUIRES_OK(context, context->output_list("thresholds_list", &output_thresholds_list)); OpOutputList output_left_node_contribs_list; OP_REQUIRES_OK(context, context->output_list("left_node_contribs_list", &output_left_node_contribs_list)); OpOutputList output_right_node_contribs_list; OP_REQUIRES_OK(context, context->output_list("right_node_contribs_list", &output_right_node_contribs_list)); // Use identity later to convert float to Eigen::Matrix type for input to // CalculateWeightsAndGains. This op only supports single dimension logits. Eigen::MatrixXf identity; identity.setIdentity(1, 1); // Get the best split info per node for each feature. for (int feature_idx = 0; feature_idx < num_features_; ++feature_idx) { std::vector<float> cum_grad; std::vector<float> cum_hess; cum_grad.reserve(num_buckets); cum_hess.reserve(num_buckets); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_thresholds; std::vector<float> output_left_node_contribs; std::vector<float> output_right_node_contribs; for (int node_id = node_id_first; node_id < node_id_last; ++node_id) { // Calculate gains. cum_grad.clear(); cum_hess.clear(); float total_grad = 0.0; float total_hess = 0.0; for (int bucket = 0; bucket < num_buckets; ++bucket) { // TODO(nponomareva): Consider multi-dimensional gradients/hessians. total_grad += stats_summary[feature_idx](node_id, bucket, 0); total_hess += stats_summary[feature_idx](node_id, bucket, 1); cum_grad.push_back(total_grad); cum_hess.push_back(total_hess); } // Check if node has enough of average hessian. if (total_hess < min_node_weight) { // Do not split the node because not enough avg hessian. continue; } float best_gain = std::numeric_limits<float>::lowest(); float best_bucket = 0; float best_contrib_for_left = 0.0; float best_contrib_for_right = 0.0; // Parent gain. float parent_gain; Eigen::VectorXf unused(1); CalculateWeightsAndGains(total_grad * identity, total_hess * identity, l1, l2, &unused, &parent_gain); for (int bucket = 0; bucket < num_buckets; ++bucket) { const float cum_grad_bucket = cum_grad[bucket]; const float cum_hess_bucket = cum_hess[bucket]; // Left child. Eigen::VectorXf contrib_for_left(1); float gain_for_left; CalculateWeightsAndGains(cum_grad_bucket * identity, cum_hess_bucket * identity, l1, l2, &contrib_for_left, &gain_for_left); // Right child. // use contrib_for_right. Eigen::VectorXf contrib_for_right(1); float gain_for_right; CalculateWeightsAndGains((total_grad - cum_grad_bucket) * identity, (total_hess - cum_hess_bucket) * identity, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) { best_gain = gain_for_left + gain_for_right; best_bucket = bucket; best_contrib_for_left = contrib_for_left[0]; best_contrib_for_right = contrib_for_right[0]; } } // for bucket output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node_id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t; OP_REQUIRES_OK(context, output_node_ids_list.allocate(feature_idx, {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, output_gains_list.allocate( feature_idx, {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, output_thresholds_list.allocate(feature_idx, {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, output_left_node_contribs_list.allocate( feature_idx, {num_nodes, 1}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, output_right_node_contribs_list.allocate( feature_idx, {num_nodes, 1}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_thresholds_vec(i) = output_thresholds[i]; output_left_node_contribs_matrix(i, 0) = output_left_node_contribs[i]; // This op only supports 1-dimensional logits. output_right_node_contribs_matrix(i, 0) = output_right_node_contribs[i]; } } // for f } private: int max_splits_; int num_features_; }; // V1 op that only supports single dimensional logit. REGISTER_KERNEL_BUILDER( Name("BoostedTreesCalculateBestGainsPerFeature").Device(DEVICE_CPU), BoostedTreesCalculateBestGainsPerFeatureOp); // Deprecated op. Use BoostedTreesCalculateBestFeatureSplitOpV2. class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { public: explicit BoostedTreesCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; DCHECK_GT(hessian_dim, 0); DCHECK_LE(hessian_dim, logits_dim * logits_dim); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } } private: // TODO(crawles): Simplify inequality path just like equality b/138329196 // Currently this is not simplify-able due to numerical instability in math // i.e. gain = -g.transpose() * hessian_and_reg.colPivHouseholderQr().solve(g) // It caused gain to be Inf when g is approaching 0 but not exactly 0 while // there is no regularization. // Calculate the best inequality split per node. void CalculateBestInequalitySplit( TTypes<float, 4>::ConstTensor stats_summary, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float min_node_weight, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { std::vector<Eigen::VectorXf> cum_grad; std::vector<Eigen::VectorXf> cum_hess; // get all cumulative gradients including default bucket. cum_grad.reserve(num_buckets); cum_hess.reserve(num_buckets); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { ConstVectorMap default_stats_vec( &stats_summary(node_id, f_dim, num_buckets, 0), logits_dim + hessian_dim); Eigen::VectorXf missing_bucket_grad = default_stats_vec.head(logits_dim); Eigen::VectorXf missing_bucket_hess = default_stats_vec.tail(hessian_dim); cum_grad.clear(); cum_hess.clear(); Eigen::VectorXf total_grad = Eigen::VectorXf::Zero(logits_dim); Eigen::VectorXf total_hess = Eigen::VectorXf::Zero(hessian_dim); // sum all the gradients including default bucket. for (int bucket = 0; bucket <= num_buckets; ++bucket) { for (int i = 0; i < logits_dim; ++i) { total_grad[i] += stats_summary(node_id, f_dim, bucket, i); } for (int i = 0; i < hessian_dim; ++i) { // Full hessian. total_hess[i] += stats_summary(node_id, f_dim, bucket, logits_dim + i); } if (bucket < num_buckets) { cum_grad.push_back(total_grad); cum_hess.push_back(total_hess); } } const string kInequalityDefaultLeft = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); const string kInequalityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_RIGHT); // Iterate from left to right, excluding default bucket. for (int bucket = 0; bucket < num_buckets; ++bucket) { // default value goes to left node. const Eigen::VectorXf total_left_grad = cum_grad[bucket] + missing_bucket_grad; const Eigen::VectorXf total_left_hess = cum_hess[bucket] + missing_bucket_hess; MaybeUpdateBestSplit( total_left_grad, total_grad - total_left_grad, total_left_hess, total_hess - total_left_hess, logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultLeft, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); // default value goes to right node. MaybeUpdateBestSplit( cum_grad[bucket], total_grad - cum_grad[bucket], cum_hess[bucket], total_hess - cum_hess[bucket], logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } // for bucket } } // Calculate the best equality split per node. void CalculateBestEqualitySplit( TTypes<float, 4>::ConstTensor stats_summary, const Eigen::VectorXf& total_grad, const Eigen::VectorXf& total_hess, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { const string kEqualityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::EQUALITY_DEFAULT_RIGHT); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { for (int bucket = 0; bucket < num_buckets; ++bucket) { ConstVectorMap stats_vec(&stats_summary(node_id, f_dim, bucket, 0), logits_dim + hessian_dim); Eigen::VectorXf curr_grad = stats_vec.head(logits_dim); Eigen::VectorXf curr_hess = stats_vec.tail(hessian_dim); MaybeUpdateBestSplit(curr_grad, total_grad - curr_grad, curr_hess, total_hess - curr_hess, logits_dim, bucket, f_dim, l1, l2, kEqualityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } } } void MaybeUpdateBestSplit(const Eigen::VectorXf& grad_for_left, const Eigen::VectorXf& grad_for_right, const Eigen::VectorXf& hess_for_left, const Eigen::VectorXf& hess_for_right, const int32_t logits_dim, const int32_t bucket, const int32_t f_dim, const float l1, const float l2, const string split_type, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { // Left child. Eigen::VectorXf contrib_for_left(logits_dim); float gain_for_left; CalculateWeightsAndGains(grad_for_left, hess_for_left, l1, l2, &contrib_for_left, &gain_for_left); Eigen::VectorXf contrib_for_right(logits_dim); float gain_for_right; CalculateWeightsAndGains(grad_for_right, hess_for_right, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, *best_gain)) { *best_gain = gain_for_left + gain_for_right; *best_bucket = bucket; *best_f_dim = f_dim; *best_contrib_for_left = contrib_for_left; *best_contrib_for_right = contrib_for_right; *best_split_type = split_type; } } int logits_dim_; string split_type_; }; // Deprecated op. Use BoostedTreesCalculateBestFeatureSplitOpV2. REGISTER_KERNEL_BUILDER( Name("BoostedTreesCalculateBestFeatureSplit").Device(DEVICE_CPU), BoostedTreesCalculateBestFeatureSplitOp); // V2 Op. class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { public: explicit BoostedTreesCalculateBestFeatureSplitV2( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); const auto node_id_range = node_id_range_t->vec<int32>(); OP_REQUIRES( context, node_id_range_t->dims() == 1, errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " "given node_id_range has dims of ", node_id_range_t->dims())); OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, errors::InvalidArgument( "node_id_range must be a rank 1 tensor with shape=[2], but " "given node_id_range has shape ", node_id_range_t->dim_size(0), " on its first dim")); const int32_t node_id_first = node_id_range(0); // Inclusive. const int32_t node_id_last = node_id_range(1); // Exclusive. // Get stats_summaries_list. OpInputList stats_summaries_list; OP_REQUIRES_OK(context, context->input_list("stats_summaries_list", &stats_summaries_list)); // Infer dimensions of a stats_summary. DCHECK_GT(stats_summaries_list.size(), 0); const int32_t feature_dims = stats_summaries_list[0].dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summaries_list[0].dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summaries_list[0].dim_size(3) - logits_dim; DCHECK_GT(hessian_dim, 0); DCHECK_LE(hessian_dim, logits_dim * logits_dim); // Vector of stats_summaries; each element is stats for feature of shape // [max_splits, feature_dim, num_buckets, logits_dim + hessian_dim]. std::vector<TTypes<float, 4>::ConstTensor> stats_summaries; DCHECK_EQ(stats_summaries_list.size(), num_features_); stats_summaries.reserve(num_features_); for (const auto& tensor : stats_summaries_list) { stats_summaries.emplace_back(tensor.tensor<float, 4>()); } // Split types. const Tensor* split_types_t; OP_REQUIRES_OK(context, context->input("split_types", &split_types_t)); const auto split_types = split_types_t->vec<tstring>(); DCHECK_EQ(split_types.size(), num_features_); // Validate. for (int i = 0; i < num_features_; ++i) { if (!(split_types(i) == kInequalitySplit || split_types(i) == kEqualitySplit)) { OP_REQUIRES_OK( context, errors::Aborted( "Operation received an exception: Incorrect split type")); } } // Feature ids. const Tensor* candidate_feature_ids_t; OP_REQUIRES_OK(context, context->input("candidate_feature_ids", &candidate_feature_ids_t)); const auto candidate_feature_ids = candidate_feature_ids_t->vec<int32>(); DCHECK_EQ(candidate_feature_ids.size(), num_features_); // L1, L2, tree_complexity, min_node_weight. const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_ids; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. float parent_gain; for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket; int32_t best_f_id; int32_t best_f_dim; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); // Sum of gradient and hessian. Compute parent gain using first feature. ConstMatrixMap stats_mat(&stats_summaries[0](node_id, 0, 0, 0), num_buckets + 1, // Including default bucket. logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf unused(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &unused, &parent_gain); for (int f_idx = 0; f_idx < num_features_; ++f_idx) { const string split_type = split_types(f_idx); TTypes<float, 4>::ConstTensor stats_summary = stats_summaries[f_idx]; float f_best_gain = std::numeric_limits<float>::lowest(); int32_t f_best_bucket; int32_t f_best_f_dim; string f_best_split_type; Eigen::VectorXf f_best_contrib_for_left(logits_dim); Eigen::VectorXf f_best_contrib_for_right(logits_dim); if (split_type == kInequalitySplit) { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &f_best_gain, &f_best_bucket, &f_best_f_dim, &f_best_split_type, &f_best_contrib_for_left, &f_best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &f_best_gain, &f_best_bucket, &f_best_f_dim, &f_best_split_type, &f_best_contrib_for_left, &f_best_contrib_for_right); } if (f_best_gain > best_gain) { best_gain = f_best_gain; best_f_id = candidate_feature_ids(f_idx); best_f_dim = f_best_f_dim; best_split_type = f_best_split_type; best_bucket = f_best_bucket; best_contrib_for_left = f_best_contrib_for_left; best_contrib_for_right = f_best_contrib_for_right; } } // For feature id. if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if no split is found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_ids.push_back(best_f_id); output_feature_dimensions.push_back(best_f_dim); // Default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id. const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_ids Tensor* output_features_ids_t; OP_REQUIRES_OK(context, context->allocate_output("feature_ids", {num_nodes}, &output_features_ids_t)); auto output_features_vec = output_features_ids_t->vec<int32>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; output_features_vec(i) = output_feature_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } } private: // TODO(crawles): Simplify inequality path just like equality b/138329196 // Currently this is not simplify-able due to numerical instability in math // i.e. gain = -g.transpose() * hessian_and_reg.colPivHouseholderQr().solve(g) // It caused gain to be Inf when g is approaching 0 but not exactly 0 while // there is no regularization. // Calculate the best inequality split per node. void CalculateBestInequalitySplit( TTypes<float, 4>::ConstTensor stats_summary, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float min_node_weight, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { std::vector<Eigen::VectorXf> cum_grad; std::vector<Eigen::VectorXf> cum_hess; // get all cumulative gradients including default bucket. cum_grad.reserve(num_buckets); cum_hess.reserve(num_buckets); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { ConstVectorMap default_stats_vec( &stats_summary(node_id, f_dim, num_buckets, 0), logits_dim + hessian_dim); Eigen::VectorXf missing_bucket_grad = default_stats_vec.head(logits_dim); Eigen::VectorXf missing_bucket_hess = default_stats_vec.tail(hessian_dim); cum_grad.clear(); cum_hess.clear(); Eigen::VectorXf total_grad = Eigen::VectorXf::Zero(logits_dim); Eigen::VectorXf total_hess = Eigen::VectorXf::Zero(hessian_dim); // sum all the gradients including default bucket. for (int bucket = 0; bucket <= num_buckets; ++bucket) { for (int i = 0; i < logits_dim; ++i) { total_grad[i] += stats_summary(node_id, f_dim, bucket, i); } for (int i = 0; i < hessian_dim; ++i) { // Full hessian. total_hess[i] += stats_summary(node_id, f_dim, bucket, logits_dim + i); } if (bucket < num_buckets) { cum_grad.push_back(total_grad); cum_hess.push_back(total_hess); } } const string kInequalityDefaultLeft = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); const string kInequalityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_RIGHT); // Iterate from left to right, excluding default bucket. for (int bucket = 0; bucket < num_buckets; ++bucket) { // default value goes to left node. const Eigen::VectorXf total_left_grad = cum_grad[bucket] + missing_bucket_grad; const Eigen::VectorXf total_left_hess = cum_hess[bucket] + missing_bucket_hess; MaybeUpdateBestSplit( total_left_grad, total_grad - total_left_grad, total_left_hess, total_hess - total_left_hess, logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultLeft, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); // default value goes to right node. MaybeUpdateBestSplit( cum_grad[bucket], total_grad - cum_grad[bucket], cum_hess[bucket], total_hess - cum_hess[bucket], logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } // for bucket } } // Calculate the best equality split per node. void CalculateBestEqualitySplit( TTypes<float, 4>::ConstTensor stats_summary, const Eigen::VectorXf& total_grad, const Eigen::VectorXf& total_hess, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { const string kEqualityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::EQUALITY_DEFAULT_RIGHT); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { for (int bucket = 0; bucket < num_buckets; ++bucket) { ConstVectorMap stats_vec(&stats_summary(node_id, f_dim, bucket, 0), logits_dim + hessian_dim); Eigen::VectorXf curr_grad = stats_vec.head(logits_dim); Eigen::VectorXf curr_hess = stats_vec.tail(hessian_dim); MaybeUpdateBestSplit(curr_grad, total_grad - curr_grad, curr_hess, total_hess - curr_hess, logits_dim, bucket, f_dim, l1, l2, kEqualityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } } } void MaybeUpdateBestSplit(const Eigen::VectorXf& grad_for_left, const Eigen::VectorXf& grad_for_right, const Eigen::VectorXf& hess_for_left, const Eigen::VectorXf& hess_for_right, const int32_t logits_dim, const int32_t bucket, const int32_t f_dim, const float l1, const float l2, const string split_type, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { // Left child. Eigen::VectorXf contrib_for_left(logits_dim); float gain_for_left; CalculateWeightsAndGains(grad_for_left, hess_for_left, l1, l2, &contrib_for_left, &gain_for_left); Eigen::VectorXf contrib_for_right(logits_dim); float gain_for_right; CalculateWeightsAndGains(grad_for_right, hess_for_right, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, *best_gain)) { *best_gain = gain_for_left + gain_for_right; *best_bucket = bucket; *best_f_dim = f_dim; *best_contrib_for_left = contrib_for_left; *best_contrib_for_right = contrib_for_right; *best_split_type = split_type; } } int num_features_; int logits_dim_; }; // v2 op that supports multi-class. REGISTER_KERNEL_BUILDER( Name("BoostedTreesCalculateBestFeatureSplitV2").Device(DEVICE_CPU), BoostedTreesCalculateBestFeatureSplitV2); // Map from bucket id to vector of statistics. typedef std::map<int32, std::vector<float>> BucketMap; typedef BucketMap::iterator BucketMapIterator; // Map from feature dimension to BucketMap. typedef std::map<int32, BucketMap> FeatureMap; typedef FeatureMap::iterator FeatureMapIterator; class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { public: explicit BoostedTreesSparseCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { // TODO(crawles): Using logits_dim_ for multi-class split. OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); // TODO(tanzheny): Using this for equality split. OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_indices_t; OP_REQUIRES_OK(context, context->input("stats_summary_indices", &stats_summary_indices_t)); const auto stats_summary_indices = stats_summary_indices_t->matrix<int32>(); const int32_t num_sparse_entries = stats_summary_indices_t->dim_size(0); const Tensor* stats_summary_values_t; OP_REQUIRES_OK(context, context->input("stats_summary_values", &stats_summary_values_t)); const auto stats_summary_values = stats_summary_values_t->vec<float>(); const Tensor* stats_summary_shape_t; OP_REQUIRES_OK( context, context->input("stats_summary_shape", &stats_summary_shape_t)); const auto stats_summary_shape = stats_summary_shape_t->vec<int32>(); const int32_t num_buckets = stats_summary_shape(2) - 1; const int32_t stats_dims = stats_summary_shape(3); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); const auto l1 = l1_t->scalar<float>()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); const auto l2 = l2_t->scalar<float>()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<float> output_left_node_contribs; std::vector<float> output_right_node_contribs; std::vector<string> output_split_types; FeatureMap f_map; int32_t previous_node_id = -1; for (int idx = 0; idx < num_sparse_entries; ++idx) { int32_t node_id = stats_summary_indices(idx, 0); if (node_id != previous_node_id) { process_node(f_map, &output_node_ids, &output_gains, &output_feature_dimensions, &output_thresholds, &output_left_node_contribs, &output_right_node_contribs, &output_split_types, previous_node_id, min_node_weight, l1, l2, num_buckets); f_map.clear(); } previous_node_id = node_id; DCHECK_LE(node_id_first, node_id); DCHECK_LT(node_id, node_id_last); const int32_t feature_dim = stats_summary_indices(idx, 1); const int32_t bucket_id = stats_summary_indices(idx, 2); const int32_t stat_dim = stats_summary_indices(idx, 3); OP_REQUIRES(context, stat_dim < stats_dims, errors::InvalidArgument( "Stat dim, the sum of logits dim and hessian dim in " "stats_summary_indices, cannot be greater than stats " "dims, the last value in stats_summary_shape, which was ", stats_dims, ". At index (", idx, ", 4), stats_summary_indices contains value ", stat_dim)); std::pair<FeatureMapIterator, bool> const& f_insert_result = f_map.insert( FeatureMapIterator::value_type(feature_dim, BucketMap())); auto& b_map = f_insert_result.first->second; std::pair<BucketMapIterator, bool> const& b_insert_result = b_map.insert(BucketMapIterator::value_type( bucket_id, std::vector<float>(stats_dims))); auto& stats = b_insert_result.first->second; stats[stat_dim] = stats_summary_values(idx); } // for node_id // process the last node id process_node(f_map, &output_node_ids, &output_gains, &output_feature_dimensions, &output_thresholds, &output_left_node_contribs, &output_right_node_contribs, &output_split_types, previous_node_id, min_node_weight, l1, l2, num_buckets); const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK( context, context->allocate_output("left_node_contribs", {num_nodes, 1}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK( context, context->allocate_output("right_node_contribs", {num_nodes, 1}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; // TODO(crawles): change this for multi-class. output_left_node_contribs_matrix(i, 0) = output_left_node_contribs[i]; output_right_node_contribs_matrix(i, 0) = output_right_node_contribs[i]; output_split_types_vec(i) = output_split_types[i]; } } protected: void process_node(const FeatureMap& f_map, std::vector<int32>* output_node_ids, std::vector<float>* output_gains, std::vector<int32>* output_feature_dimensions, std::vector<int32>* output_thresholds, std::vector<float>* output_left_node_contribs, std::vector<float>* output_right_node_contribs, std::vector<string>* output_split_types, const int32_t node_id, const float min_node_weight, const float l1, const float l2, const int32_t num_buckets) { float parent_gain; Eigen::VectorXf unused(logits_dim_); Eigen::MatrixXf identity; identity.setIdentity(1, 1); // start processing for previous node id. float best_gain = std::numeric_limits<float>::lowest(); float best_bucket = 0; float best_f_dim = 0; string best_split_type = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); float best_contrib_for_left = 0.0; float best_contrib_for_right = 0.0; // the sum of gradients including default bucket. float total_grad = 0; // the sum of hessians including default bucket. float total_hess = 0; for (auto f_iter = f_map.begin(); f_iter != f_map.end(); ++f_iter) { const int32_t feature_dim = f_iter->first; const auto buckets_to_stats_map = f_iter->second; // The very last bucket contains stats for missing values. // TODO(crawles): use vector for multi-class. const float default_grad = (buckets_to_stats_map.find(num_buckets) == buckets_to_stats_map.end() ? 0 : buckets_to_stats_map.at(num_buckets)[0]); const float default_hess = (buckets_to_stats_map.find(num_buckets) == buckets_to_stats_map.end() ? 0 : buckets_to_stats_map.at(num_buckets)[1]); if (f_iter == f_map.begin()) { // first get the sum of grads, including default bucket. for (auto b_iter = buckets_to_stats_map.begin(); b_iter != buckets_to_stats_map.end(); ++b_iter) { total_grad += b_iter->second[0]; total_hess += b_iter->second[1]; } if (total_hess < min_node_weight) { // Do not split the node because not enough avg hessian. break; } CalculateWeightsAndGains(total_grad * identity, total_hess * identity, l1, l2, &unused, &parent_gain); } float total_left_grad = 0; float total_left_hess = 0; for (auto b_iter = buckets_to_stats_map.begin(); b_iter != buckets_to_stats_map.end(); ++b_iter) { const int32_t bucket_id = b_iter->first; // total_left_stats should exclude stats from default bucket. if (bucket_id == num_buckets) { break; } // TODO(crawles): vector for multi-class. total_left_grad += b_iter->second[0]; total_left_hess += b_iter->second[1]; // From left to right, default right. // Left child. Eigen::VectorXf contrib_for_left(1); float gain_for_left; CalculateWeightsAndGains(total_left_grad * identity, total_left_hess * identity, l1, l2, &contrib_for_left, &gain_for_left); // Right child. Eigen::VectorXf contrib_for_right(1); float gain_for_right; CalculateWeightsAndGains((total_grad - total_left_grad) * identity, (total_hess - total_left_hess) * identity, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) { best_gain = gain_for_left + gain_for_right; best_bucket = bucket_id; best_f_dim = feature_dim; best_split_type = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_RIGHT); best_contrib_for_left = contrib_for_left[0]; best_contrib_for_right = contrib_for_right[0]; } // From right to left, default left. CalculateWeightsAndGains((total_left_grad + default_grad) * identity, (total_left_hess + default_hess) * identity, l1, l2, &contrib_for_left, &gain_for_left); CalculateWeightsAndGains( (total_grad - default_grad - total_left_grad) * identity, (total_hess - default_hess - total_left_hess) * identity, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) { best_gain = gain_for_left + gain_for_right; best_bucket = bucket_id; best_f_dim = feature_dim; best_split_type = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); best_contrib_for_left = contrib_for_left[0]; best_contrib_for_right = contrib_for_right[0]; } } // for bucket_id } // for feature_dim if (best_gain != std::numeric_limits<float>::lowest()) { output_node_ids->push_back(node_id); // Remove the parent gain. output_gains->push_back(best_gain - parent_gain); output_feature_dimensions->push_back(best_f_dim); output_split_types->push_back(best_split_type); output_thresholds->push_back(best_bucket); output_left_node_contribs->push_back(best_contrib_for_left); output_right_node_contribs->push_back(best_contrib_for_right); } } private: int logits_dim_; string split_type_; }; REGISTER_KERNEL_BUILDER( Name("BoostedTreesSparseCalculateBestFeatureSplit").Device(DEVICE_CPU), BoostedTreesSparseCalculateBestFeatureSplitOp); class BoostedTreesMakeStatsSummaryOp : public OpKernel { public: explicit BoostedTreesMakeStatsSummaryOp(OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } void Compute(OpKernelContext* const context) override { // node_ids const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); const auto node_ids = node_ids_t->vec<int32>(); // gradients const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); const auto gradients = gradients_t->matrix<float>(); // hessians const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); const auto hessians = hessians_t->matrix<float>(); // bucketized_features OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features_list", &bucketized_features_list)); // Infer batch size. const int64_t batch_size = node_ids_t->dim_size(0); // Allocate temporary stats tensor (Rank 4). Tensor temp_stats_double_t; OP_REQUIRES_OK(context, context->allocate_temp( DT_DOUBLE, {num_features_, max_splits_, num_buckets_, 2}, &temp_stats_double_t)); auto temp_stats_double = temp_stats_double_t.tensor<double, 4>(); temp_stats_double.setZero(); // Partition by node, and then bucketize. for (int feature_idx = 0; feature_idx < num_features_; ++feature_idx) { const auto& features = bucketized_features_list[feature_idx].vec<int32>(); for (int i = 0; i < batch_size; ++i) { const int32_t node = node_ids(i); const int32_t bucket = features(i); temp_stats_double(feature_idx, node, bucket, 0) += gradients(i, 0); temp_stats_double(feature_idx, node, bucket, 1) += hessians(i, 0); } } // Copy temp tensor over to output tensor. Tensor* output_stats_summary_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( "stats_summary", temp_stats_double_t.shape(), &output_stats_summary_t)); output_stats_summary_t->tensor<float, 4>() = temp_stats_double.template cast<float>(); } private: int max_splits_; int num_buckets_; int num_features_; }; REGISTER_KERNEL_BUILDER(Name("BoostedTreesMakeStatsSummary").Device(DEVICE_CPU), BoostedTreesMakeStatsSummaryOp); // TODO(tanzheny): Add an option of default value into the API interface. class BoostedTreesAggregateStatsOp : public OpKernel { public: explicit BoostedTreesAggregateStatsOp(OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } void Compute(OpKernelContext* const context) override { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); const auto node_ids = node_ids_t->vec<int32>(); // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); const auto gradients = gradients_t->matrix<float>(); // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); const auto hessians = hessians_t->matrix<float>(); // feature. const Tensor* feature_t; OP_REQUIRES_OK(context, context->input("feature", &feature_t)); const auto feature = feature_t->matrix<int32>(); // Infer batch size, feature dimension and stats dimension. const int64_t batch_size = node_ids_t->dim_size(0); const int64_t logits_dims = gradients_t->dim_size(1); const int64_t hessians_dims = hessians_t->dim_size(1); const int64_t stats_dims = logits_dims + hessians_dims; const int64_t feature_dims = feature_t->dim_size(1); // Allocate temporary stats tensor (Rank 4), upcasting to double. // A default bucket is added to the end for missing/default values. Tensor temp_stats_double_t; OP_REQUIRES_OK( context, context->allocate_temp( DT_DOUBLE, {max_splits_, feature_dims, num_buckets_ + 1, stats_dims}, &temp_stats_double_t)); auto temp_stats_double = temp_stats_double_t.tensor<double, 4>(); temp_stats_double.setZero(); for (int i = 0; i < batch_size; ++i) { const int32_t node = node_ids(i); for (int feature_dim = 0; feature_dim < feature_dims; ++feature_dim) { const int32_t feature_value = feature(i, feature_dim); const int32_t bucket = (feature_value == -1) ? num_buckets_ : feature_value; for (int stat_dim = 0; stat_dim < logits_dims; ++stat_dim) { temp_stats_double(node, feature_dim, bucket, stat_dim) += gradients(i, stat_dim); } for (int stat_dim = logits_dims; stat_dim < stats_dims; ++stat_dim) { temp_stats_double(node, feature_dim, bucket, stat_dim) += hessians(i, stat_dim - logits_dims); } } } // Copy temp tensor over to output tensor, downcasting to float. Tensor* output_stats_summary_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( "stats_summary", temp_stats_double_t.shape(), &output_stats_summary_t)); output_stats_summary_t->tensor<float, 4>() = temp_stats_double.template cast<float>(); } private: int max_splits_; int num_buckets_; }; REGISTER_KERNEL_BUILDER(Name("BoostedTreesAggregateStats").Device(DEVICE_CPU), BoostedTreesAggregateStatsOp); // Key based on node id, feature dimension and bucket id. struct StatsPartitionKey { StatsPartitionKey(const int32_t node_id, const int32_t feature_dim, const int32_t bucket_id) : node_id(node_id), feature_dim(feature_dim), bucket_id(bucket_id) {} bool operator==(const StatsPartitionKey& other) const { return (node_id == other.node_id) && (feature_dim == other.feature_dim) && (bucket_id == other.bucket_id); } // Compare for StatsPartitionKey. struct Less { bool operator()(const StatsPartitionKey& a, const StatsPartitionKey& b) const { if (a.node_id < b.node_id) { return true; } if ((a.node_id == b.node_id) && (a.feature_dim < b.feature_dim)) { return true; } if ((a.node_id == b.node_id) && (a.feature_dim == b.feature_dim) && (a.bucket_id < b.bucket_id)) { return true; } return false; } }; // Tree node id. int32 node_id; // Dimension within feature column. int32 feature_dim; // bucketized feature value . int32 bucket_id; }; typedef std::map<StatsPartitionKey, std::vector<float>, StatsPartitionKey::Less> StatsPartitionMap; typedef StatsPartitionMap::iterator StatsPartitionIterator; // Key based on instance and feature dimension. struct InstanceFeatureDimKey { InstanceFeatureDimKey() : instance(-1), feature_dim(-1) {} InstanceFeatureDimKey(const int32_t instance, const int32_t feature_dim) : instance(instance), feature_dim(feature_dim) {} bool operator==(const InstanceFeatureDimKey& other) const { return (instance == other.instance) && (feature_dim == other.feature_dim); } // Compare for InstanceFeatureDimKey. struct Less { bool operator()(const InstanceFeatureDimKey& a, const InstanceFeatureDimKey& b) const { if (a.instance < b.instance) { return true; } if ((a.instance == b.instance) && (a.feature_dim < b.feature_dim)) { return true; } return false; } }; // Instance id within a batch. int32 instance; // Dimension within feature column. int32 feature_dim; }; // Add statistics to StatsPartitionMap for (instance, feature dim, bucket id). static void AddInstanceStatsToMap( const int32_t instance, const int32_t feature_dim, const int32_t bucket_id, const int32_t logits_dims, const int32_t stats_dims, StatsPartitionMap* stats_map, const TTypes<float>::ConstMatrix& gradients, const TTypes<float>::ConstMatrix& hessians, const TTypes<int32>::ConstVec& node_ids) { const int32_t node_id = node_ids(instance); const auto key = StatsPartitionKey(node_id, feature_dim, bucket_id); std::pair<StatsPartitionIterator, bool> const& insert_result = stats_map->insert(StatsPartitionIterator::value_type( key, std::vector<float>(stats_dims, 0.0f))); auto& stats = insert_result.first->second; for (int stat_dim = 0; stat_dim < logits_dims; ++stat_dim) { stats[stat_dim] += gradients(instance, stat_dim); } for (int stat_dim = logits_dims; stat_dim < stats_dims; ++stat_dim) { stats[stat_dim] += hessians(instance, stat_dim - logits_dims); } } // Add statistics to StatsPartitionMap for bucket_id ranging from // (start_instance, start_feature_dim) to (end_instance, end_feature_dim), // inclusive on start and end instances, exclusive on end feature dim. static void AddRangeStats(const int start_instance, const int end_instance, const int start_feature_dim, const int end_feature_dim, StatsPartitionMap* stats_map, const TTypes<float>::ConstMatrix& gradients, const TTypes<float>::ConstMatrix& hessians, const TTypes<int32>::ConstVec& node_ids, const int32_t feature_dims, const int32_t bucket_id, const int32_t logits_dims, const int32_t stats_dims) { DCHECK_LE(start_instance, end_instance); if (start_instance == end_instance) { DCHECK_LT(start_feature_dim, end_feature_dim); } for (int32_t instance = start_instance; instance <= end_instance; ++instance) { const int32_t start_f_dim = (instance == start_instance) ? start_feature_dim + 1 : 0; const int32_t end_f_dim = (instance == end_instance) ? end_feature_dim : feature_dims; for (int32_t f_dim = start_f_dim; f_dim < end_f_dim; ++f_dim) { AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims, stats_map, gradients, hessians, node_ids); } } } class BoostedTreesSparseAggregateStatsOp : public OpKernel { public: explicit BoostedTreesSparseAggregateStatsOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } void Compute(OpKernelContext* const context) override { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); const auto node_ids = node_ids_t->vec<int32>(); // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); const auto gradients = gradients_t->matrix<float>(); // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); const auto hessians = hessians_t->matrix<float>(); // feature indices. const Tensor* feature_indices_t; OP_REQUIRES_OK(context, context->input("feature_indices", &feature_indices_t)); const auto feature_indices = feature_indices_t->matrix<int32>(); // feature values. const Tensor* feature_values_t; OP_REQUIRES_OK(context, context->input("feature_values", &feature_values_t)); const auto feature_values = feature_values_t->vec<int32>(); // feature shape. const Tensor* feature_shape_t; OP_REQUIRES_OK(context, context->input("feature_shape", &feature_shape_t)); OP_REQUIRES(context, TensorShapeUtils::IsVector(feature_shape_t->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", feature_shape_t->shape().DebugString())); const auto feature_shape = feature_shape_t->vec<int32>(); const int64_t batch_size = gradients_t->dim_size(0); const int64_t logits_dims = gradients_t->dim_size(1); const int64_t hessians_dims = hessians_t->dim_size(1); const int64_t stats_dims = logits_dims + hessians_dims; const int64_t num_sparse_entries = feature_indices_t->dim_size(0); const int32_t feature_dims = feature_shape(1); DCHECK_LE(num_sparse_entries, batch_size * feature_dims); // Aggregate statistics info to map. StatsPartitionMap stats_map; int prev_instance = 0; int prev_f_dim = -1; for (int i = 0; i < num_sparse_entries; ++i) { // the instance number within a batch const int32_t instance = feature_indices(i, 0); DCHECK_LE(instance, batch_size); DCHECK_GE(instance, prev_instance); // the node id within a tree. const int32_t node_id = node_ids(instance); DCHECK_LE(node_id, max_splits_); // the feature dimension. const int32_t f_dim = feature_indices(i, 1); DCHECK_LE(f_dim, feature_dims); // the bucket id of the value. const int32_t bucket_id = feature_values(i); DCHECK_LE(bucket_id, num_buckets_); // Add statistics for the missing entries into default bucket. // The last bucket is default bucket. const int missing_entry_bucket = num_buckets_; AddRangeStats(prev_instance, instance, prev_f_dim, f_dim, &stats_map, gradients, hessians, node_ids, feature_dims, missing_entry_bucket, logits_dims, stats_dims); prev_instance = instance; prev_f_dim = f_dim; // Add statistics for the non-missing entry into // (cur_instance, cur_f_dim, bucket_id). AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims, &stats_map, gradients, hessians, node_ids); } AddRangeStats(prev_instance, batch_size - 1, prev_f_dim, feature_dims, &stats_map, gradients, hessians, node_ids, feature_dims, num_buckets_, logits_dims, stats_dims); // Serialize statistics info map to tensor output. const int64_t num_slots = stats_map.size() * stats_dims; Tensor* summary_indices_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("stats_summary_indices", TensorShape({num_slots, 4}), &summary_indices_t)); auto summary_indices = summary_indices_t->matrix<int32>(); Tensor* summary_values_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("stats_summary_values", TensorShape({num_slots}), &summary_values_t)); auto summary_values = summary_values_t->vec<float>(); int entry_index = 0; for (auto& iter : stats_map) { for (int stat_dim = 0; stat_dim < stats_dims; ++stat_dim) { summary_indices(entry_index, 0) = iter.first.node_id; summary_indices(entry_index, 1) = iter.first.feature_dim; summary_indices(entry_index, 2) = iter.first.bucket_id; summary_indices(entry_index, 3) = stat_dim; summary_values(entry_index) = iter.second[stat_dim]; ++entry_index; } } Tensor* summary_shape_t = nullptr; OP_REQUIRES_OK( context, context->allocate_output("stats_summary_shape", TensorShape({4}), &summary_shape_t)); auto summary_shape = summary_shape_t->vec<int32>(); summary_shape(0) = max_splits_; summary_shape(1) = feature_dims; summary_shape(2) = num_buckets_ + 1; summary_shape(3) = stats_dims; } private: int max_splits_; int num_buckets_; }; REGISTER_KERNEL_BUILDER( Name("BoostedTreesSparseAggregateStats").Device(DEVICE_CPU), BoostedTreesSparseAggregateStatsOp); } // namespace tensorflow
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <limits> #include <string> #include <vector> #include "third_party/eigen3/Eigen/Core" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/kernels/boosted_trees/boosted_trees.pb.h" #include "tensorflow/core/kernels/boosted_trees/tree_helper.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" namespace tensorflow { using Matrix = Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using ConstMatrixMap = Eigen::Map<const Matrix>; using MatrixMap = Eigen::Map<Matrix>; using ConstVectorMap = Eigen::Map<const Eigen::VectorXf>; using VectorMap = Eigen::Map<Eigen::VectorXf>; constexpr char kInequalitySplit[] = "inequality"; constexpr char kEqualitySplit[] = "equality"; // V1 Op. Deprecated. BoostedTreesCalculateBestFeatureSplitOpV2 is V2. class BoostedTreesCalculateBestGainsPerFeatureOp : public OpKernel { public: explicit BoostedTreesCalculateBestGainsPerFeatureOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->dims() == 1, errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " "given node_id_range has dims of ", node_id_range_t->dims())); OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, errors::InvalidArgument( "node_id_range must be a rank 1 tensor with shape=[2], but " "given node_id_range has shape ", node_id_range_t->dim_size(0), " on its first dim")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive // stats_summary_list OpInputList stats_summary_list; OP_REQUIRES_OK(context, context->input_list("stats_summary_list", &stats_summary_list)); const int64_t num_buckets = stats_summary_list[0].dim_size(1); // Check for single logit: 1 gradient + 1 hessian value. OP_REQUIRES(context, stats_summary_list[0].dim_size(2) == 2, errors::InvalidArgument("stats_summary_list[0] must have " "exactly 2 dimensions, obtained: ", stats_summary_list[0].dim_size(2))); std::vector<TTypes<float, 3>::ConstTensor> stats_summary; stats_summary.reserve(stats_summary_list.size()); for (const auto& tensor : stats_summary_list) { stats_summary.emplace_back(tensor.tensor<float, 3>()); } const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); const auto l1 = l1_t->scalar<float>()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); const auto l2 = l2_t->scalar<float>()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); const auto min_node_weight = min_node_weight_t->scalar<float>()(); // Allocate output lists of tensors: OpOutputList output_node_ids_list; OP_REQUIRES_OK( context, context->output_list("node_ids_list", &output_node_ids_list)); OpOutputList output_gains_list; OP_REQUIRES_OK(context, context->output_list("gains_list", &output_gains_list)); OpOutputList output_thresholds_list; OP_REQUIRES_OK(context, context->output_list("thresholds_list", &output_thresholds_list)); OpOutputList output_left_node_contribs_list; OP_REQUIRES_OK(context, context->output_list("left_node_contribs_list", &output_left_node_contribs_list)); OpOutputList output_right_node_contribs_list; OP_REQUIRES_OK(context, context->output_list("right_node_contribs_list", &output_right_node_contribs_list)); // Use identity later to convert float to Eigen::Matrix type for input to // CalculateWeightsAndGains. This op only supports single dimension logits. Eigen::MatrixXf identity; identity.setIdentity(1, 1); // Get the best split info per node for each feature. for (int feature_idx = 0; feature_idx < num_features_; ++feature_idx) { std::vector<float> cum_grad; std::vector<float> cum_hess; cum_grad.reserve(num_buckets); cum_hess.reserve(num_buckets); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_thresholds; std::vector<float> output_left_node_contribs; std::vector<float> output_right_node_contribs; for (int node_id = node_id_first; node_id < node_id_last; ++node_id) { // Calculate gains. cum_grad.clear(); cum_hess.clear(); float total_grad = 0.0; float total_hess = 0.0; for (int bucket = 0; bucket < num_buckets; ++bucket) { // TODO(nponomareva): Consider multi-dimensional gradients/hessians. total_grad += stats_summary[feature_idx](node_id, bucket, 0); total_hess += stats_summary[feature_idx](node_id, bucket, 1); cum_grad.push_back(total_grad); cum_hess.push_back(total_hess); } // Check if node has enough of average hessian. if (total_hess < min_node_weight) { // Do not split the node because not enough avg hessian. continue; } float best_gain = std::numeric_limits<float>::lowest(); float best_bucket = 0; float best_contrib_for_left = 0.0; float best_contrib_for_right = 0.0; // Parent gain. float parent_gain; Eigen::VectorXf unused(1); CalculateWeightsAndGains(total_grad * identity, total_hess * identity, l1, l2, &unused, &parent_gain); for (int bucket = 0; bucket < num_buckets; ++bucket) { const float cum_grad_bucket = cum_grad[bucket]; const float cum_hess_bucket = cum_hess[bucket]; // Left child. Eigen::VectorXf contrib_for_left(1); float gain_for_left; CalculateWeightsAndGains(cum_grad_bucket * identity, cum_hess_bucket * identity, l1, l2, &contrib_for_left, &gain_for_left); // Right child. // use contrib_for_right. Eigen::VectorXf contrib_for_right(1); float gain_for_right; CalculateWeightsAndGains((total_grad - cum_grad_bucket) * identity, (total_hess - cum_hess_bucket) * identity, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) { best_gain = gain_for_left + gain_for_right; best_bucket = bucket; best_contrib_for_left = contrib_for_left[0]; best_contrib_for_right = contrib_for_right[0]; } } // for bucket output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node_id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t; OP_REQUIRES_OK(context, output_node_ids_list.allocate(feature_idx, {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, output_gains_list.allocate( feature_idx, {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, output_thresholds_list.allocate(feature_idx, {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, output_left_node_contribs_list.allocate( feature_idx, {num_nodes, 1}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, output_right_node_contribs_list.allocate( feature_idx, {num_nodes, 1}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_thresholds_vec(i) = output_thresholds[i]; output_left_node_contribs_matrix(i, 0) = output_left_node_contribs[i]; // This op only supports 1-dimensional logits. output_right_node_contribs_matrix(i, 0) = output_right_node_contribs[i]; } } // for f } private: int max_splits_; int num_features_; }; // V1 op that only supports single dimensional logit. REGISTER_KERNEL_BUILDER( Name("BoostedTreesCalculateBestGainsPerFeature").Device(DEVICE_CPU), BoostedTreesCalculateBestGainsPerFeatureOp); // Deprecated op. Use BoostedTreesCalculateBestFeatureSplitOpV2. class BoostedTreesCalculateBestFeatureSplitOp : public OpKernel { public: explicit BoostedTreesCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; OP_REQUIRES(context, hessian_dim > 0, errors::InvalidArgument("hessian dim should be < 0, got ", hessian_dim)); OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim, errors::InvalidArgument( "hessian dim should be <= ", logits_dim * logits_dim, " but got: ", hessian_dim)); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } } private: // TODO(crawles): Simplify inequality path just like equality b/138329196 // Currently this is not simplify-able due to numerical instability in math // i.e. gain = -g.transpose() * hessian_and_reg.colPivHouseholderQr().solve(g) // It caused gain to be Inf when g is approaching 0 but not exactly 0 while // there is no regularization. // Calculate the best inequality split per node. void CalculateBestInequalitySplit( TTypes<float, 4>::ConstTensor stats_summary, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float min_node_weight, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { std::vector<Eigen::VectorXf> cum_grad; std::vector<Eigen::VectorXf> cum_hess; // get all cumulative gradients including default bucket. cum_grad.reserve(num_buckets); cum_hess.reserve(num_buckets); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { ConstVectorMap default_stats_vec( &stats_summary(node_id, f_dim, num_buckets, 0), logits_dim + hessian_dim); Eigen::VectorXf missing_bucket_grad = default_stats_vec.head(logits_dim); Eigen::VectorXf missing_bucket_hess = default_stats_vec.tail(hessian_dim); cum_grad.clear(); cum_hess.clear(); Eigen::VectorXf total_grad = Eigen::VectorXf::Zero(logits_dim); Eigen::VectorXf total_hess = Eigen::VectorXf::Zero(hessian_dim); // sum all the gradients including default bucket. for (int bucket = 0; bucket <= num_buckets; ++bucket) { for (int i = 0; i < logits_dim; ++i) { total_grad[i] += stats_summary(node_id, f_dim, bucket, i); } for (int i = 0; i < hessian_dim; ++i) { // Full hessian. total_hess[i] += stats_summary(node_id, f_dim, bucket, logits_dim + i); } if (bucket < num_buckets) { cum_grad.push_back(total_grad); cum_hess.push_back(total_hess); } } const string kInequalityDefaultLeft = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); const string kInequalityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_RIGHT); // Iterate from left to right, excluding default bucket. for (int bucket = 0; bucket < num_buckets; ++bucket) { // default value goes to left node. const Eigen::VectorXf total_left_grad = cum_grad[bucket] + missing_bucket_grad; const Eigen::VectorXf total_left_hess = cum_hess[bucket] + missing_bucket_hess; MaybeUpdateBestSplit( total_left_grad, total_grad - total_left_grad, total_left_hess, total_hess - total_left_hess, logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultLeft, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); // default value goes to right node. MaybeUpdateBestSplit( cum_grad[bucket], total_grad - cum_grad[bucket], cum_hess[bucket], total_hess - cum_hess[bucket], logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } // for bucket } } // Calculate the best equality split per node. void CalculateBestEqualitySplit( TTypes<float, 4>::ConstTensor stats_summary, const Eigen::VectorXf& total_grad, const Eigen::VectorXf& total_hess, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { const string kEqualityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::EQUALITY_DEFAULT_RIGHT); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { for (int bucket = 0; bucket < num_buckets; ++bucket) { ConstVectorMap stats_vec(&stats_summary(node_id, f_dim, bucket, 0), logits_dim + hessian_dim); Eigen::VectorXf curr_grad = stats_vec.head(logits_dim); Eigen::VectorXf curr_hess = stats_vec.tail(hessian_dim); MaybeUpdateBestSplit(curr_grad, total_grad - curr_grad, curr_hess, total_hess - curr_hess, logits_dim, bucket, f_dim, l1, l2, kEqualityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } } } void MaybeUpdateBestSplit(const Eigen::VectorXf& grad_for_left, const Eigen::VectorXf& grad_for_right, const Eigen::VectorXf& hess_for_left, const Eigen::VectorXf& hess_for_right, const int32_t logits_dim, const int32_t bucket, const int32_t f_dim, const float l1, const float l2, const string split_type, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { // Left child. Eigen::VectorXf contrib_for_left(logits_dim); float gain_for_left; CalculateWeightsAndGains(grad_for_left, hess_for_left, l1, l2, &contrib_for_left, &gain_for_left); Eigen::VectorXf contrib_for_right(logits_dim); float gain_for_right; CalculateWeightsAndGains(grad_for_right, hess_for_right, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, *best_gain)) { *best_gain = gain_for_left + gain_for_right; *best_bucket = bucket; *best_f_dim = f_dim; *best_contrib_for_left = contrib_for_left; *best_contrib_for_right = contrib_for_right; *best_split_type = split_type; } } int logits_dim_; string split_type_; }; // Deprecated op. Use BoostedTreesCalculateBestFeatureSplitOpV2. REGISTER_KERNEL_BUILDER( Name("BoostedTreesCalculateBestFeatureSplit").Device(DEVICE_CPU), BoostedTreesCalculateBestFeatureSplitOp); // V2 Op. class BoostedTreesCalculateBestFeatureSplitV2 : public OpKernel { public: explicit BoostedTreesCalculateBestFeatureSplitV2( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); const auto node_id_range = node_id_range_t->vec<int32>(); OP_REQUIRES( context, node_id_range_t->dims() == 1, errors::InvalidArgument("node_id_range must be a rank 1 tensor, but " "given node_id_range has dims of ", node_id_range_t->dims())); OP_REQUIRES(context, node_id_range_t->dim_size(0) == 2, errors::InvalidArgument( "node_id_range must be a rank 1 tensor with shape=[2], but " "given node_id_range has shape ", node_id_range_t->dim_size(0), " on its first dim")); const int32_t node_id_first = node_id_range(0); // Inclusive. const int32_t node_id_last = node_id_range(1); // Exclusive. // Get stats_summaries_list. OpInputList stats_summaries_list; OP_REQUIRES_OK(context, context->input_list("stats_summaries_list", &stats_summaries_list)); // Infer dimensions of a stats_summary. DCHECK_GT(stats_summaries_list.size(), 0); const int32_t feature_dims = stats_summaries_list[0].dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summaries_list[0].dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summaries_list[0].dim_size(3) - logits_dim; OP_REQUIRES(context, hessian_dim > 0, errors::InvalidArgument("hessian dim should be < 0, got ", hessian_dim)); OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim, errors::InvalidArgument( "hessian dim should be <= ", logits_dim * logits_dim, " but got: ", hessian_dim)); // Vector of stats_summaries; each element is stats for feature of shape // [max_splits, feature_dim, num_buckets, logits_dim + hessian_dim]. std::vector<TTypes<float, 4>::ConstTensor> stats_summaries; DCHECK_EQ(stats_summaries_list.size(), num_features_); stats_summaries.reserve(num_features_); for (const auto& tensor : stats_summaries_list) { stats_summaries.emplace_back(tensor.tensor<float, 4>()); } // Split types. const Tensor* split_types_t; OP_REQUIRES_OK(context, context->input("split_types", &split_types_t)); const auto split_types = split_types_t->vec<tstring>(); DCHECK_EQ(split_types.size(), num_features_); // Validate. for (int i = 0; i < num_features_; ++i) { if (!(split_types(i) == kInequalitySplit || split_types(i) == kEqualitySplit)) { OP_REQUIRES_OK( context, errors::Aborted( "Operation received an exception: Incorrect split type")); } } // Feature ids. const Tensor* candidate_feature_ids_t; OP_REQUIRES_OK(context, context->input("candidate_feature_ids", &candidate_feature_ids_t)); const auto candidate_feature_ids = candidate_feature_ids_t->vec<int32>(); DCHECK_EQ(candidate_feature_ids.size(), num_features_); // L1, L2, tree_complexity, min_node_weight. const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_ids; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. float parent_gain; for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket; int32_t best_f_id; int32_t best_f_dim; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); // Sum of gradient and hessian. Compute parent gain using first feature. ConstMatrixMap stats_mat(&stats_summaries[0](node_id, 0, 0, 0), num_buckets + 1, // Including default bucket. logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf unused(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &unused, &parent_gain); for (int f_idx = 0; f_idx < num_features_; ++f_idx) { const string split_type = split_types(f_idx); TTypes<float, 4>::ConstTensor stats_summary = stats_summaries[f_idx]; float f_best_gain = std::numeric_limits<float>::lowest(); int32_t f_best_bucket; int32_t f_best_f_dim; string f_best_split_type; Eigen::VectorXf f_best_contrib_for_left(logits_dim); Eigen::VectorXf f_best_contrib_for_right(logits_dim); if (split_type == kInequalitySplit) { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &f_best_gain, &f_best_bucket, &f_best_f_dim, &f_best_split_type, &f_best_contrib_for_left, &f_best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &f_best_gain, &f_best_bucket, &f_best_f_dim, &f_best_split_type, &f_best_contrib_for_left, &f_best_contrib_for_right); } if (f_best_gain > best_gain) { best_gain = f_best_gain; best_f_id = candidate_feature_ids(f_idx); best_f_dim = f_best_f_dim; best_split_type = f_best_split_type; best_bucket = f_best_bucket; best_contrib_for_left = f_best_contrib_for_left; best_contrib_for_right = f_best_contrib_for_right; } } // For feature id. if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if no split is found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_ids.push_back(best_f_id); output_feature_dimensions.push_back(best_f_dim); // Default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id. const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_ids Tensor* output_features_ids_t; OP_REQUIRES_OK(context, context->allocate_output("feature_ids", {num_nodes}, &output_features_ids_t)); auto output_features_vec = output_features_ids_t->vec<int32>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; output_features_vec(i) = output_feature_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } } private: // TODO(crawles): Simplify inequality path just like equality b/138329196 // Currently this is not simplify-able due to numerical instability in math // i.e. gain = -g.transpose() * hessian_and_reg.colPivHouseholderQr().solve(g) // It caused gain to be Inf when g is approaching 0 but not exactly 0 while // there is no regularization. // Calculate the best inequality split per node. void CalculateBestInequalitySplit( TTypes<float, 4>::ConstTensor stats_summary, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float min_node_weight, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { std::vector<Eigen::VectorXf> cum_grad; std::vector<Eigen::VectorXf> cum_hess; // get all cumulative gradients including default bucket. cum_grad.reserve(num_buckets); cum_hess.reserve(num_buckets); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { ConstVectorMap default_stats_vec( &stats_summary(node_id, f_dim, num_buckets, 0), logits_dim + hessian_dim); Eigen::VectorXf missing_bucket_grad = default_stats_vec.head(logits_dim); Eigen::VectorXf missing_bucket_hess = default_stats_vec.tail(hessian_dim); cum_grad.clear(); cum_hess.clear(); Eigen::VectorXf total_grad = Eigen::VectorXf::Zero(logits_dim); Eigen::VectorXf total_hess = Eigen::VectorXf::Zero(hessian_dim); // sum all the gradients including default bucket. for (int bucket = 0; bucket <= num_buckets; ++bucket) { for (int i = 0; i < logits_dim; ++i) { total_grad[i] += stats_summary(node_id, f_dim, bucket, i); } for (int i = 0; i < hessian_dim; ++i) { // Full hessian. total_hess[i] += stats_summary(node_id, f_dim, bucket, logits_dim + i); } if (bucket < num_buckets) { cum_grad.push_back(total_grad); cum_hess.push_back(total_hess); } } const string kInequalityDefaultLeft = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); const string kInequalityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_RIGHT); // Iterate from left to right, excluding default bucket. for (int bucket = 0; bucket < num_buckets; ++bucket) { // default value goes to left node. const Eigen::VectorXf total_left_grad = cum_grad[bucket] + missing_bucket_grad; const Eigen::VectorXf total_left_hess = cum_hess[bucket] + missing_bucket_hess; MaybeUpdateBestSplit( total_left_grad, total_grad - total_left_grad, total_left_hess, total_hess - total_left_hess, logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultLeft, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); // default value goes to right node. MaybeUpdateBestSplit( cum_grad[bucket], total_grad - cum_grad[bucket], cum_hess[bucket], total_hess - cum_hess[bucket], logits_dim, bucket, f_dim, l1, l2, kInequalityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } // for bucket } } // Calculate the best equality split per node. void CalculateBestEqualitySplit( TTypes<float, 4>::ConstTensor stats_summary, const Eigen::VectorXf& total_grad, const Eigen::VectorXf& total_hess, const int32_t node_id, const int32_t feature_dims, const int32_t logits_dim, const int32_t hessian_dim, const int32_t num_buckets, const float l1, const float l2, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { const string kEqualityDefaultRight = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::EQUALITY_DEFAULT_RIGHT); for (int f_dim = 0; f_dim < feature_dims; ++f_dim) { for (int bucket = 0; bucket < num_buckets; ++bucket) { ConstVectorMap stats_vec(&stats_summary(node_id, f_dim, bucket, 0), logits_dim + hessian_dim); Eigen::VectorXf curr_grad = stats_vec.head(logits_dim); Eigen::VectorXf curr_hess = stats_vec.tail(hessian_dim); MaybeUpdateBestSplit(curr_grad, total_grad - curr_grad, curr_hess, total_hess - curr_hess, logits_dim, bucket, f_dim, l1, l2, kEqualityDefaultRight, best_gain, best_bucket, best_f_dim, best_split_type, best_contrib_for_left, best_contrib_for_right); } } } void MaybeUpdateBestSplit(const Eigen::VectorXf& grad_for_left, const Eigen::VectorXf& grad_for_right, const Eigen::VectorXf& hess_for_left, const Eigen::VectorXf& hess_for_right, const int32_t logits_dim, const int32_t bucket, const int32_t f_dim, const float l1, const float l2, const string split_type, float* best_gain, int32* best_bucket, int32* best_f_dim, string* best_split_type, Eigen::VectorXf* best_contrib_for_left, Eigen::VectorXf* best_contrib_for_right) { // Left child. Eigen::VectorXf contrib_for_left(logits_dim); float gain_for_left; CalculateWeightsAndGains(grad_for_left, hess_for_left, l1, l2, &contrib_for_left, &gain_for_left); Eigen::VectorXf contrib_for_right(logits_dim); float gain_for_right; CalculateWeightsAndGains(grad_for_right, hess_for_right, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, *best_gain)) { *best_gain = gain_for_left + gain_for_right; *best_bucket = bucket; *best_f_dim = f_dim; *best_contrib_for_left = contrib_for_left; *best_contrib_for_right = contrib_for_right; *best_split_type = split_type; } } int num_features_; int logits_dim_; }; // v2 op that supports multi-class. REGISTER_KERNEL_BUILDER( Name("BoostedTreesCalculateBestFeatureSplitV2").Device(DEVICE_CPU), BoostedTreesCalculateBestFeatureSplitV2); // Map from bucket id to vector of statistics. typedef std::map<int32, std::vector<float>> BucketMap; typedef BucketMap::iterator BucketMapIterator; // Map from feature dimension to BucketMap. typedef std::map<int32, BucketMap> FeatureMap; typedef FeatureMap::iterator FeatureMapIterator; class BoostedTreesSparseCalculateBestFeatureSplitOp : public OpKernel { public: explicit BoostedTreesSparseCalculateBestFeatureSplitOp( OpKernelConstruction* const context) : OpKernel(context) { // TODO(crawles): Using logits_dim_ for multi-class split. OP_REQUIRES_OK(context, context->GetAttr("logits_dimension", &logits_dim_)); // TODO(tanzheny): Using this for equality split. OP_REQUIRES_OK(context, context->GetAttr("split_type", &split_type_)); } void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); const auto node_id_range = node_id_range_t->vec<int32>(); OP_REQUIRES( context, node_id_range.size() == 2, errors::InvalidArgument("node_id_range should have 2 entries, got: ", node_id_range.size())); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_indices_t; OP_REQUIRES_OK(context, context->input("stats_summary_indices", &stats_summary_indices_t)); const auto stats_summary_indices = stats_summary_indices_t->matrix<int32>(); const int32_t num_sparse_entries = stats_summary_indices_t->dim_size(0); const Tensor* stats_summary_values_t; OP_REQUIRES_OK(context, context->input("stats_summary_values", &stats_summary_values_t)); const auto stats_summary_values = stats_summary_values_t->vec<float>(); const Tensor* stats_summary_shape_t; OP_REQUIRES_OK( context, context->input("stats_summary_shape", &stats_summary_shape_t)); const auto stats_summary_shape = stats_summary_shape_t->vec<int32>(); const int32_t num_buckets = stats_summary_shape(2) - 1; const int32_t stats_dims = stats_summary_shape(3); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); const auto l1 = l1_t->scalar<float>()(); const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); const auto l2 = l2_t->scalar<float>()(); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<float> output_left_node_contribs; std::vector<float> output_right_node_contribs; std::vector<string> output_split_types; FeatureMap f_map; int32_t previous_node_id = -1; for (int idx = 0; idx < num_sparse_entries; ++idx) { int32_t node_id = stats_summary_indices(idx, 0); if (node_id != previous_node_id) { process_node(f_map, &output_node_ids, &output_gains, &output_feature_dimensions, &output_thresholds, &output_left_node_contribs, &output_right_node_contribs, &output_split_types, previous_node_id, min_node_weight, l1, l2, num_buckets); f_map.clear(); } previous_node_id = node_id; DCHECK_LE(node_id_first, node_id); DCHECK_LT(node_id, node_id_last); const int32_t feature_dim = stats_summary_indices(idx, 1); const int32_t bucket_id = stats_summary_indices(idx, 2); const int32_t stat_dim = stats_summary_indices(idx, 3); OP_REQUIRES(context, stat_dim < stats_dims, errors::InvalidArgument( "Stat dim, the sum of logits dim and hessian dim in " "stats_summary_indices, cannot be greater than stats " "dims, the last value in stats_summary_shape, which was ", stats_dims, ". At index (", idx, ", 4), stats_summary_indices contains value ", stat_dim)); OP_REQUIRES(context, stat_dim >= 0, errors::InvalidArgument( "Stat dim, the sum of logits dim and hessian dim in " "stats_summary_indices, should be >= 0, which was ", stat_dim, " at index ", idx)); std::pair<FeatureMapIterator, bool> const& f_insert_result = f_map.insert( FeatureMapIterator::value_type(feature_dim, BucketMap())); auto& b_map = f_insert_result.first->second; std::pair<BucketMapIterator, bool> const& b_insert_result = b_map.insert(BucketMapIterator::value_type( bucket_id, std::vector<float>(stats_dims))); auto& stats = b_insert_result.first->second; stats[stat_dim] = stats_summary_values(idx); } // for node_id // process the last node id process_node(f_map, &output_node_ids, &output_gains, &output_feature_dimensions, &output_thresholds, &output_left_node_contribs, &output_right_node_contribs, &output_split_types, previous_node_id, min_node_weight, l1, l2, num_buckets); const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK( context, context->allocate_output("left_node_contribs", {num_nodes, 1}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK( context, context->allocate_output("right_node_contribs", {num_nodes, 1}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; // TODO(crawles): change this for multi-class. output_left_node_contribs_matrix(i, 0) = output_left_node_contribs[i]; output_right_node_contribs_matrix(i, 0) = output_right_node_contribs[i]; output_split_types_vec(i) = output_split_types[i]; } } protected: void process_node(const FeatureMap& f_map, std::vector<int32>* output_node_ids, std::vector<float>* output_gains, std::vector<int32>* output_feature_dimensions, std::vector<int32>* output_thresholds, std::vector<float>* output_left_node_contribs, std::vector<float>* output_right_node_contribs, std::vector<string>* output_split_types, const int32_t node_id, const float min_node_weight, const float l1, const float l2, const int32_t num_buckets) { float parent_gain; Eigen::VectorXf unused(logits_dim_); Eigen::MatrixXf identity; identity.setIdentity(1, 1); // start processing for previous node id. float best_gain = std::numeric_limits<float>::lowest(); float best_bucket = 0; float best_f_dim = 0; string best_split_type = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); float best_contrib_for_left = 0.0; float best_contrib_for_right = 0.0; // the sum of gradients including default bucket. float total_grad = 0; // the sum of hessians including default bucket. float total_hess = 0; for (auto f_iter = f_map.begin(); f_iter != f_map.end(); ++f_iter) { const int32_t feature_dim = f_iter->first; const auto buckets_to_stats_map = f_iter->second; // The very last bucket contains stats for missing values. // TODO(crawles): use vector for multi-class. const float default_grad = (buckets_to_stats_map.find(num_buckets) == buckets_to_stats_map.end() ? 0 : buckets_to_stats_map.at(num_buckets)[0]); const float default_hess = (buckets_to_stats_map.find(num_buckets) == buckets_to_stats_map.end() ? 0 : buckets_to_stats_map.at(num_buckets)[1]); if (f_iter == f_map.begin()) { // first get the sum of grads, including default bucket. for (auto b_iter = buckets_to_stats_map.begin(); b_iter != buckets_to_stats_map.end(); ++b_iter) { total_grad += b_iter->second[0]; total_hess += b_iter->second[1]; } if (total_hess < min_node_weight) { // Do not split the node because not enough avg hessian. break; } CalculateWeightsAndGains(total_grad * identity, total_hess * identity, l1, l2, &unused, &parent_gain); } float total_left_grad = 0; float total_left_hess = 0; for (auto b_iter = buckets_to_stats_map.begin(); b_iter != buckets_to_stats_map.end(); ++b_iter) { const int32_t bucket_id = b_iter->first; // total_left_stats should exclude stats from default bucket. if (bucket_id == num_buckets) { break; } // TODO(crawles): vector for multi-class. total_left_grad += b_iter->second[0]; total_left_hess += b_iter->second[1]; // From left to right, default right. // Left child. Eigen::VectorXf contrib_for_left(1); float gain_for_left; CalculateWeightsAndGains(total_left_grad * identity, total_left_hess * identity, l1, l2, &contrib_for_left, &gain_for_left); // Right child. Eigen::VectorXf contrib_for_right(1); float gain_for_right; CalculateWeightsAndGains((total_grad - total_left_grad) * identity, (total_hess - total_left_hess) * identity, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) { best_gain = gain_for_left + gain_for_right; best_bucket = bucket_id; best_f_dim = feature_dim; best_split_type = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_RIGHT); best_contrib_for_left = contrib_for_left[0]; best_contrib_for_right = contrib_for_right[0]; } // From right to left, default left. CalculateWeightsAndGains((total_left_grad + default_grad) * identity, (total_left_hess + default_hess) * identity, l1, l2, &contrib_for_left, &gain_for_left); CalculateWeightsAndGains( (total_grad - default_grad - total_left_grad) * identity, (total_hess - default_hess - total_left_hess) * identity, l1, l2, &contrib_for_right, &gain_for_right); if (GainIsLarger(gain_for_left + gain_for_right, best_gain)) { best_gain = gain_for_left + gain_for_right; best_bucket = bucket_id; best_f_dim = feature_dim; best_split_type = boosted_trees::SplitTypeWithDefault_Name( boosted_trees::INEQUALITY_DEFAULT_LEFT); best_contrib_for_left = contrib_for_left[0]; best_contrib_for_right = contrib_for_right[0]; } } // for bucket_id } // for feature_dim if (best_gain != std::numeric_limits<float>::lowest()) { output_node_ids->push_back(node_id); // Remove the parent gain. output_gains->push_back(best_gain - parent_gain); output_feature_dimensions->push_back(best_f_dim); output_split_types->push_back(best_split_type); output_thresholds->push_back(best_bucket); output_left_node_contribs->push_back(best_contrib_for_left); output_right_node_contribs->push_back(best_contrib_for_right); } } private: int logits_dim_; string split_type_; }; REGISTER_KERNEL_BUILDER( Name("BoostedTreesSparseCalculateBestFeatureSplit").Device(DEVICE_CPU), BoostedTreesSparseCalculateBestFeatureSplitOp); class BoostedTreesMakeStatsSummaryOp : public OpKernel { public: explicit BoostedTreesMakeStatsSummaryOp(OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); OP_REQUIRES_OK(context, context->GetAttr("num_features", &num_features_)); } void Compute(OpKernelContext* const context) override { // node_ids const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); const auto node_ids = node_ids_t->vec<int32>(); // gradients const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); const auto gradients = gradients_t->matrix<float>(); OP_REQUIRES( context, node_ids.size() == gradients.dimension(0), errors::InvalidArgument( "node_ids size should match 0th dim of gradients. node ids " "size: ", node_ids.size(), ", gradients dim0: ", gradients.dimension(0))); // hessians const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); const auto hessians = hessians_t->matrix<float>(); // bucketized_features OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features_list", &bucketized_features_list)); // Infer batch size. const int64_t batch_size = node_ids_t->dim_size(0); // Allocate temporary stats tensor (Rank 4). Tensor temp_stats_double_t; OP_REQUIRES_OK(context, context->allocate_temp( DT_DOUBLE, {num_features_, max_splits_, num_buckets_, 2}, &temp_stats_double_t)); auto temp_stats_double = temp_stats_double_t.tensor<double, 4>(); temp_stats_double.setZero(); // Partition by node, and then bucketize. for (int feature_idx = 0; feature_idx < num_features_; ++feature_idx) { const auto& features = bucketized_features_list[feature_idx].vec<int32>(); for (int i = 0; i < batch_size; ++i) { const int32_t node = node_ids(i); const int32_t bucket = features(i); temp_stats_double(feature_idx, node, bucket, 0) += gradients(i, 0); temp_stats_double(feature_idx, node, bucket, 1) += hessians(i, 0); } } // Copy temp tensor over to output tensor. Tensor* output_stats_summary_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( "stats_summary", temp_stats_double_t.shape(), &output_stats_summary_t)); output_stats_summary_t->tensor<float, 4>() = temp_stats_double.template cast<float>(); } private: int max_splits_; int num_buckets_; int num_features_; }; REGISTER_KERNEL_BUILDER(Name("BoostedTreesMakeStatsSummary").Device(DEVICE_CPU), BoostedTreesMakeStatsSummaryOp); // TODO(tanzheny): Add an option of default value into the API interface. class BoostedTreesAggregateStatsOp : public OpKernel { public: explicit BoostedTreesAggregateStatsOp(OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } void Compute(OpKernelContext* const context) override { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); const auto node_ids = node_ids_t->vec<int32>(); // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); const auto gradients = gradients_t->matrix<float>(); OP_REQUIRES( context, node_ids.size() == gradients.dimension(0), errors::InvalidArgument( "node_ids size should match 0th dim of gradients. node ids " "size: ", node_ids.size(), ", gradients dim0: ", gradients.dimension(0))); // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); const auto hessians = hessians_t->matrix<float>(); // feature. const Tensor* feature_t; OP_REQUIRES_OK(context, context->input("feature", &feature_t)); const auto feature = feature_t->matrix<int32>(); // Infer batch size, feature dimension and stats dimension. const int64_t batch_size = node_ids_t->dim_size(0); const int64_t logits_dims = gradients_t->dim_size(1); const int64_t hessians_dims = hessians_t->dim_size(1); const int64_t stats_dims = logits_dims + hessians_dims; const int64_t feature_dims = feature_t->dim_size(1); // Allocate temporary stats tensor (Rank 4), upcasting to double. // A default bucket is added to the end for missing/default values. Tensor temp_stats_double_t; OP_REQUIRES_OK( context, context->allocate_temp( DT_DOUBLE, {max_splits_, feature_dims, num_buckets_ + 1, stats_dims}, &temp_stats_double_t)); auto temp_stats_double = temp_stats_double_t.tensor<double, 4>(); temp_stats_double.setZero(); for (int i = 0; i < batch_size; ++i) { const int32_t node = node_ids(i); OP_REQUIRES(context, node >= 0, errors::InvalidArgument( "node_ids ", i, "th entry should be >=0, got: ", node)); for (int feature_dim = 0; feature_dim < feature_dims; ++feature_dim) { const int32_t feature_value = feature(i, feature_dim); const int32_t bucket = (feature_value == -1) ? num_buckets_ : feature_value; for (int stat_dim = 0; stat_dim < logits_dims; ++stat_dim) { temp_stats_double(node, feature_dim, bucket, stat_dim) += gradients(i, stat_dim); } for (int stat_dim = logits_dims; stat_dim < stats_dims; ++stat_dim) { temp_stats_double(node, feature_dim, bucket, stat_dim) += hessians(i, stat_dim - logits_dims); } } } // Copy temp tensor over to output tensor, downcasting to float. Tensor* output_stats_summary_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( "stats_summary", temp_stats_double_t.shape(), &output_stats_summary_t)); output_stats_summary_t->tensor<float, 4>() = temp_stats_double.template cast<float>(); } private: int max_splits_; int num_buckets_; }; REGISTER_KERNEL_BUILDER(Name("BoostedTreesAggregateStats").Device(DEVICE_CPU), BoostedTreesAggregateStatsOp); // Key based on node id, feature dimension and bucket id. struct StatsPartitionKey { StatsPartitionKey(const int32_t node_id, const int32_t feature_dim, const int32_t bucket_id) : node_id(node_id), feature_dim(feature_dim), bucket_id(bucket_id) {} bool operator==(const StatsPartitionKey& other) const { return (node_id == other.node_id) && (feature_dim == other.feature_dim) && (bucket_id == other.bucket_id); } // Compare for StatsPartitionKey. struct Less { bool operator()(const StatsPartitionKey& a, const StatsPartitionKey& b) const { if (a.node_id < b.node_id) { return true; } if ((a.node_id == b.node_id) && (a.feature_dim < b.feature_dim)) { return true; } if ((a.node_id == b.node_id) && (a.feature_dim == b.feature_dim) && (a.bucket_id < b.bucket_id)) { return true; } return false; } }; // Tree node id. int32 node_id; // Dimension within feature column. int32 feature_dim; // bucketized feature value . int32 bucket_id; }; typedef std::map<StatsPartitionKey, std::vector<float>, StatsPartitionKey::Less> StatsPartitionMap; typedef StatsPartitionMap::iterator StatsPartitionIterator; // Key based on instance and feature dimension. struct InstanceFeatureDimKey { InstanceFeatureDimKey() : instance(-1), feature_dim(-1) {} InstanceFeatureDimKey(const int32_t instance, const int32_t feature_dim) : instance(instance), feature_dim(feature_dim) {} bool operator==(const InstanceFeatureDimKey& other) const { return (instance == other.instance) && (feature_dim == other.feature_dim); } // Compare for InstanceFeatureDimKey. struct Less { bool operator()(const InstanceFeatureDimKey& a, const InstanceFeatureDimKey& b) const { if (a.instance < b.instance) { return true; } if ((a.instance == b.instance) && (a.feature_dim < b.feature_dim)) { return true; } return false; } }; // Instance id within a batch. int32 instance; // Dimension within feature column. int32 feature_dim; }; // Add statistics to StatsPartitionMap for (instance, feature dim, bucket id). static void AddInstanceStatsToMap( const int32_t instance, const int32_t feature_dim, const int32_t bucket_id, const int32_t logits_dims, const int32_t stats_dims, StatsPartitionMap* stats_map, const TTypes<float>::ConstMatrix& gradients, const TTypes<float>::ConstMatrix& hessians, const TTypes<int32>::ConstVec& node_ids) { const int32_t node_id = node_ids(instance); const auto key = StatsPartitionKey(node_id, feature_dim, bucket_id); std::pair<StatsPartitionIterator, bool> const& insert_result = stats_map->insert(StatsPartitionIterator::value_type( key, std::vector<float>(stats_dims, 0.0f))); auto& stats = insert_result.first->second; for (int stat_dim = 0; stat_dim < logits_dims; ++stat_dim) { stats[stat_dim] += gradients(instance, stat_dim); } for (int stat_dim = logits_dims; stat_dim < stats_dims; ++stat_dim) { stats[stat_dim] += hessians(instance, stat_dim - logits_dims); } } // Add statistics to StatsPartitionMap for bucket_id ranging from // (start_instance, start_feature_dim) to (end_instance, end_feature_dim), // inclusive on start and end instances, exclusive on end feature dim. static void AddRangeStats(const int start_instance, const int end_instance, const int start_feature_dim, const int end_feature_dim, StatsPartitionMap* stats_map, const TTypes<float>::ConstMatrix& gradients, const TTypes<float>::ConstMatrix& hessians, const TTypes<int32>::ConstVec& node_ids, const int32_t feature_dims, const int32_t bucket_id, const int32_t logits_dims, const int32_t stats_dims) { DCHECK_LE(start_instance, end_instance); if (start_instance == end_instance) { DCHECK_LT(start_feature_dim, end_feature_dim); } for (int32_t instance = start_instance; instance <= end_instance; ++instance) { const int32_t start_f_dim = (instance == start_instance) ? start_feature_dim + 1 : 0; const int32_t end_f_dim = (instance == end_instance) ? end_feature_dim : feature_dims; for (int32_t f_dim = start_f_dim; f_dim < end_f_dim; ++f_dim) { AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims, stats_map, gradients, hessians, node_ids); } } } class BoostedTreesSparseAggregateStatsOp : public OpKernel { public: explicit BoostedTreesSparseAggregateStatsOp( OpKernelConstruction* const context) : OpKernel(context) { OP_REQUIRES_OK(context, context->GetAttr("max_splits", &max_splits_)); OP_REQUIRES_OK(context, context->GetAttr("num_buckets", &num_buckets_)); } void Compute(OpKernelContext* const context) override { // node_ids. const Tensor* node_ids_t; OP_REQUIRES_OK(context, context->input("node_ids", &node_ids_t)); const auto node_ids = node_ids_t->vec<int32>(); // gradients. const Tensor* gradients_t; OP_REQUIRES_OK(context, context->input("gradients", &gradients_t)); const auto gradients = gradients_t->matrix<float>(); // hessians. const Tensor* hessians_t; OP_REQUIRES_OK(context, context->input("hessians", &hessians_t)); const auto hessians = hessians_t->matrix<float>(); // feature indices. const Tensor* feature_indices_t; OP_REQUIRES_OK(context, context->input("feature_indices", &feature_indices_t)); const auto feature_indices = feature_indices_t->matrix<int32>(); // feature values. const Tensor* feature_values_t; OP_REQUIRES_OK(context, context->input("feature_values", &feature_values_t)); const auto feature_values = feature_values_t->vec<int32>(); // feature shape. const Tensor* feature_shape_t; OP_REQUIRES_OK(context, context->input("feature_shape", &feature_shape_t)); OP_REQUIRES(context, TensorShapeUtils::IsVector(feature_shape_t->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", feature_shape_t->shape().DebugString())); const auto feature_shape = feature_shape_t->vec<int32>(); const int64_t batch_size = gradients_t->dim_size(0); const int64_t logits_dims = gradients_t->dim_size(1); const int64_t hessians_dims = hessians_t->dim_size(1); const int64_t stats_dims = logits_dims + hessians_dims; const int64_t num_sparse_entries = feature_indices_t->dim_size(0); const int32_t feature_dims = feature_shape(1); OP_REQUIRES(context, num_sparse_entries <= batch_size * feature_dims, errors::InvalidArgument( "feature_indices dim0 should be <= gradients dim0 * " "feature_shape[1]. features_indices dim0: ", num_sparse_entries, " gradients dim0: ", batch_size, ", feature_shape[1]: ", feature_dims)); // Aggregate statistics info to map. StatsPartitionMap stats_map; int prev_instance = 0; int prev_f_dim = -1; for (int i = 0; i < num_sparse_entries; ++i) { // the instance number within a batch const int32_t instance = feature_indices(i, 0); DCHECK_LE(instance, batch_size); DCHECK_GE(instance, prev_instance); // the node id within a tree. const int32_t node_id = node_ids(instance); DCHECK_LE(node_id, max_splits_); // the feature dimension. const int32_t f_dim = feature_indices(i, 1); DCHECK_LE(f_dim, feature_dims); // the bucket id of the value. const int32_t bucket_id = feature_values(i); DCHECK_LE(bucket_id, num_buckets_); // Add statistics for the missing entries into default bucket. // The last bucket is default bucket. const int missing_entry_bucket = num_buckets_; AddRangeStats(prev_instance, instance, prev_f_dim, f_dim, &stats_map, gradients, hessians, node_ids, feature_dims, missing_entry_bucket, logits_dims, stats_dims); prev_instance = instance; prev_f_dim = f_dim; // Add statistics for the non-missing entry into // (cur_instance, cur_f_dim, bucket_id). AddInstanceStatsToMap(instance, f_dim, bucket_id, logits_dims, stats_dims, &stats_map, gradients, hessians, node_ids); } AddRangeStats(prev_instance, batch_size - 1, prev_f_dim, feature_dims, &stats_map, gradients, hessians, node_ids, feature_dims, num_buckets_, logits_dims, stats_dims); // Serialize statistics info map to tensor output. const int64_t num_slots = stats_map.size() * stats_dims; Tensor* summary_indices_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("stats_summary_indices", TensorShape({num_slots, 4}), &summary_indices_t)); auto summary_indices = summary_indices_t->matrix<int32>(); Tensor* summary_values_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("stats_summary_values", TensorShape({num_slots}), &summary_values_t)); auto summary_values = summary_values_t->vec<float>(); int entry_index = 0; for (auto& iter : stats_map) { for (int stat_dim = 0; stat_dim < stats_dims; ++stat_dim) { summary_indices(entry_index, 0) = iter.first.node_id; summary_indices(entry_index, 1) = iter.first.feature_dim; summary_indices(entry_index, 2) = iter.first.bucket_id; summary_indices(entry_index, 3) = stat_dim; summary_values(entry_index) = iter.second[stat_dim]; ++entry_index; } } Tensor* summary_shape_t = nullptr; OP_REQUIRES_OK( context, context->allocate_output("stats_summary_shape", TensorShape({4}), &summary_shape_t)); auto summary_shape = summary_shape_t->vec<int32>(); summary_shape(0) = max_splits_; summary_shape(1) = feature_dims; summary_shape(2) = num_buckets_ + 1; summary_shape(3) = stats_dims; } private: int max_splits_; int num_buckets_; }; REGISTER_KERNEL_BUILDER( Name("BoostedTreesSparseAggregateStats").Device(DEVICE_CPU), BoostedTreesSparseAggregateStatsOp); } // namespace tensorflow
void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; DCHECK_GT(hessian_dim, 0); DCHECK_LE(hessian_dim, logits_dim * logits_dim); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } }
void Compute(OpKernelContext* const context) override { // node_id_range const Tensor* node_id_range_t; OP_REQUIRES_OK(context, context->input("node_id_range", &node_id_range_t)); OP_REQUIRES( context, node_id_range_t->NumElements() == 2, errors::InvalidArgument("node_id_range argument must have shape [2]")); const auto node_id_range = node_id_range_t->vec<int32>(); const int32_t node_id_first = node_id_range(0); // inclusive const int32_t node_id_last = node_id_range(1); // exclusive const Tensor* stats_summary_t; OP_REQUIRES_OK(context, context->input("stats_summary", &stats_summary_t)); OP_REQUIRES( context, stats_summary_t->shape().dims() == 4, errors::InvalidArgument("stats_summary argument must have rank 4")); TTypes<float, 4>::ConstTensor stats_summary = stats_summary_t->tensor<float, 4>(); const int32_t feature_dims = stats_summary_t->dim_size(1); // The last bucket is for default/missing value. const int32_t num_buckets = stats_summary_t->dim_size(2) - 1; const int32_t logits_dim = logits_dim_; const int32_t hessian_dim = stats_summary_t->dim_size(3) - logits_dim; OP_REQUIRES(context, hessian_dim > 0, errors::InvalidArgument("hessian dim should be < 0, got ", hessian_dim)); OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim, errors::InvalidArgument( "hessian dim should be <= ", logits_dim * logits_dim, " but got: ", hessian_dim)); const Tensor* l1_t; OP_REQUIRES_OK(context, context->input("l1", &l1_t)); OP_REQUIRES(context, l1_t->NumElements() == 1, errors::InvalidArgument("l1 argument must be a scalar")); const auto l1 = l1_t->scalar<float>()(); DCHECK_GE(l1, 0); if (logits_dim_ > 1) { // Multi-class L1 regularization not supported yet. DCHECK_EQ(l1, 0); } const Tensor* l2_t; OP_REQUIRES_OK(context, context->input("l2", &l2_t)); OP_REQUIRES(context, l2_t->NumElements() == 1, errors::InvalidArgument("l2 argument must be a scalar")); const auto l2 = l2_t->scalar<float>()(); DCHECK_GE(l2, 0); const Tensor* tree_complexity_t; OP_REQUIRES_OK(context, context->input("tree_complexity", &tree_complexity_t)); OP_REQUIRES( context, tree_complexity_t->NumElements() == 1, errors::InvalidArgument("tree_complexity argument must be a scalar")); const auto tree_complexity = tree_complexity_t->scalar<float>()(); const Tensor* min_node_weight_t; OP_REQUIRES_OK(context, context->input("min_node_weight", &min_node_weight_t)); OP_REQUIRES( context, min_node_weight_t->NumElements() == 1, errors::InvalidArgument("min_node_weight argument must be a scalar")); const auto min_node_weight = min_node_weight_t->scalar<float>()(); std::vector<int32> output_node_ids; std::vector<float> output_gains; std::vector<int32> output_feature_dimensions; std::vector<int32> output_thresholds; std::vector<Eigen::VectorXf> output_left_node_contribs; std::vector<Eigen::VectorXf> output_right_node_contribs; std::vector<std::string> output_split_types; // TODO(tanzheny) parallelize the computation. // Iterate each node and find the best gain per node. for (int32_t node_id = node_id_first; node_id < node_id_last; ++node_id) { float best_gain = std::numeric_limits<float>::lowest(); int32_t best_bucket = 0; int32_t best_f_dim = 0; string best_split_type; Eigen::VectorXf best_contrib_for_left(logits_dim); Eigen::VectorXf best_contrib_for_right(logits_dim); float parent_gain; // Including default bucket. ConstMatrixMap stats_mat(&stats_summary(node_id, 0, 0, 0), num_buckets + 1, logits_dim + hessian_dim); const Eigen::VectorXf total_grad = stats_mat.leftCols(logits_dim).colwise().sum(); const Eigen::VectorXf total_hess = stats_mat.rightCols(hessian_dim).colwise().sum(); if (total_hess.norm() < min_node_weight) { continue; } Eigen::VectorXf parent_weight(logits_dim); CalculateWeightsAndGains(total_grad, total_hess, l1, l2, &parent_weight, &parent_gain); if (split_type_ == "inequality") { CalculateBestInequalitySplit( stats_summary, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, min_node_weight, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } else { CalculateBestEqualitySplit( stats_summary, total_grad, total_hess, node_id, feature_dims, logits_dim, hessian_dim, num_buckets, l1, l2, &best_gain, &best_bucket, &best_f_dim, &best_split_type, &best_contrib_for_left, &best_contrib_for_right); } if (best_gain == std::numeric_limits<float>::lowest()) { // Do not add the node if not split if found. continue; } output_node_ids.push_back(node_id); // Remove the parent gain for the parent node. output_gains.push_back(best_gain - parent_gain); output_feature_dimensions.push_back(best_f_dim); // default direction is fixed for dense splits. // TODO(tanzheny) account for default values. output_split_types.push_back(best_split_type); output_thresholds.push_back(best_bucket); output_left_node_contribs.push_back(best_contrib_for_left); output_right_node_contribs.push_back(best_contrib_for_right); } // for node id const int num_nodes = output_node_ids.size(); // output_node_ids Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {num_nodes}, &output_node_ids_t)); auto output_node_ids_vec = output_node_ids_t->vec<int32>(); // output_gains Tensor* output_gains_t; OP_REQUIRES_OK(context, context->allocate_output("gains", {num_nodes}, &output_gains_t)); auto output_gains_vec = output_gains_t->vec<float>(); // output_feature_dimensions Tensor* output_feature_dimension_t; OP_REQUIRES_OK(context, context->allocate_output("feature_dimensions", {num_nodes}, &output_feature_dimension_t)); auto output_feature_dimensions_vec = output_feature_dimension_t->vec<int32>(); // output_thresholds Tensor* output_thresholds_t; OP_REQUIRES_OK(context, context->allocate_output("thresholds", {num_nodes}, &output_thresholds_t)); auto output_thresholds_vec = output_thresholds_t->vec<int32>(); // output_left_node_contribs Tensor* output_left_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "left_node_contribs", {num_nodes, logits_dim}, &output_left_node_contribs_t)); auto output_left_node_contribs_matrix = output_left_node_contribs_t->matrix<float>(); // output_right_node_contribs Tensor* output_right_node_contribs_t; OP_REQUIRES_OK(context, context->allocate_output( "right_node_contribs", {num_nodes, logits_dim}, &output_right_node_contribs_t)); auto output_right_node_contribs_matrix = output_right_node_contribs_t->matrix<float>(); // split type Tensor* output_split_types_t; OP_REQUIRES_OK( context, context->allocate_output("split_with_default_directions", {num_nodes}, &output_split_types_t)); auto output_split_types_vec = output_split_types_t->vec<tstring>(); // Sets output tensors from vectors. for (int i = 0; i < num_nodes; ++i) { output_node_ids_vec(i) = output_node_ids[i]; // Adjust the gains to penalize by tree complexity. output_gains_vec(i) = output_gains[i] - tree_complexity; output_feature_dimensions_vec(i) = output_feature_dimensions[i]; output_thresholds_vec(i) = output_thresholds[i]; for (int j = 0; j < logits_dim; ++j) { output_left_node_contribs_matrix(i, j) = output_left_node_contribs[i][j]; output_right_node_contribs_matrix(i, j) = output_right_node_contribs[i][j]; } output_split_types_vec(i) = output_split_types[i]; } }
{'added': [(75, ' OP_REQUIRES(context, stats_summary_list[0].dim_size(2) == 2,'), (76, ' errors::InvalidArgument("stats_summary_list[0] must have "'), (77, ' "exactly 2 dimensions, obtained: ",'), (78, ' stats_summary_list[0].dim_size(2)));'), (281, ' OP_REQUIRES(context, hessian_dim > 0,'), (282, ' errors::InvalidArgument("hessian dim should be < 0, got ",'), (283, ' hessian_dim));'), (284, ' OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim,'), (285, ' errors::InvalidArgument('), (286, ' "hessian dim should be <= ", logits_dim * logits_dim,'), (287, ' " but got: ", hessian_dim));'), (635, ' OP_REQUIRES(context, hessian_dim > 0,'), (636, ' errors::InvalidArgument("hessian dim should be < 0, got ",'), (637, ' hessian_dim));'), (638, ' OP_REQUIRES(context, hessian_dim <= logits_dim * logits_dim,'), (639, ' errors::InvalidArgument('), (640, ' "hessian dim should be <= ", logits_dim * logits_dim,'), (641, ' " but got: ", hessian_dim));'), (1018, ' OP_REQUIRES('), (1019, ' context, node_id_range.size() == 2,'), (1020, ' errors::InvalidArgument("node_id_range should have 2 entries, got: ",'), (1021, ' node_id_range.size()));'), (1095, ' OP_REQUIRES(context, stat_dim >= 0,'), (1096, ' errors::InvalidArgument('), (1097, ' "Stat dim, the sum of logits dim and hessian dim in "'), (1098, ' "stats_summary_indices, should be >= 0, which was ",'), (1099, ' stat_dim, " at index ", idx));'), (1332, ' OP_REQUIRES('), (1333, ' context, node_ids.size() == gradients.dimension(0),'), (1334, ' errors::InvalidArgument('), (1335, ' "node_ids size should match 0th dim of gradients. node ids "'), (1336, ' "size: ",'), (1337, ' node_ids.size(), ", gradients dim0: ", gradients.dimension(0)));'), (1407, ' OP_REQUIRES('), (1408, ' context, node_ids.size() == gradients.dimension(0),'), (1409, ' errors::InvalidArgument('), (1410, ' "node_ids size should match 0th dim of gradients. node ids "'), (1411, ' "size: ",'), (1412, ' node_ids.size(), ", gradients dim0: ", gradients.dimension(0)));'), (1413, ''), (1444, ' OP_REQUIRES(context, node >= 0,'), (1445, ' errors::InvalidArgument('), (1446, ' "node_ids ", i, "th entry should be >=0, got: ", node));'), (1653, ' OP_REQUIRES(context, num_sparse_entries <= batch_size * feature_dims,'), (1654, ' errors::InvalidArgument('), (1655, ' "feature_indices dim0 should be <= gradients dim0 * "'), (1656, ' "feature_shape[1]. features_indices dim0: ",'), (1657, ' num_sparse_entries, " gradients dim0: ", batch_size,'), (1658, ' ", feature_shape[1]: ", feature_dims));')], 'deleted': [(75, ' DCHECK_EQ(stats_summary_list[0].dim_size(2), 2);'), (278, ' DCHECK_GT(hessian_dim, 0);'), (279, ' DCHECK_LE(hessian_dim, logits_dim * logits_dim);'), (627, ' DCHECK_GT(hessian_dim, 0);'), (628, ' DCHECK_LE(hessian_dim, logits_dim * logits_dim);'), (1615, ' DCHECK_LE(num_sparse_entries, batch_size * feature_dims);')]}
49
6
1,404
10,654
152
1,219
8
https://github.com/tensorflow/tensorflow
CVE-2021-41208
CWE-476
41
dnxhddec.c
C
dnxhd_decode_header
/* * VC3/DNxHD decoder. * Copyright (c) 2007 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> * Copyright (c) 2011 MirriAd Ltd * Copyright (c) 2015 Christophe Gisquet * * 10 bit support added by MirriAd Ltd, Joseph Artsimovich <joseph@mirriad.com> * Slice multithreading and MB interlaced support added by Christophe Gisquet * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/imgutils.h" #include "libavutil/timer.h" #include "avcodec.h" #include "blockdsp.h" #define UNCHECKED_BITSTREAM_READER 1 #include "get_bits.h" #include "dnxhddata.h" #include "idctdsp.h" #include "internal.h" #include "profiles.h" #include "thread.h" typedef struct RowContext { DECLARE_ALIGNED(16, int16_t, blocks)[12][64]; int luma_scale[64]; int chroma_scale[64]; GetBitContext gb; int last_dc[3]; int last_qscale; int errors; /** -1:not set yet 0:off=RGB 1:on=YUV 2:variable */ int format; } RowContext; typedef struct DNXHDContext { AVCodecContext *avctx; RowContext *rows; BlockDSPContext bdsp; const uint8_t* buf; int buf_size; int64_t cid; ///< compression id unsigned int width, height; enum AVPixelFormat pix_fmt; unsigned int mb_width, mb_height; uint32_t mb_scan_index[512]; int data_offset; // End of mb_scan_index, where macroblocks start int cur_field; ///< current interlaced field VLC ac_vlc, dc_vlc, run_vlc; IDCTDSPContext idsp; ScanTable scantable; const CIDEntry *cid_table; int bit_depth; // 8, 10, 12 or 0 if not initialized at all. int is_444; int mbaff; int act; int (*decode_dct_block)(const struct DNXHDContext *ctx, RowContext *row, int n); } DNXHDContext; #define DNXHD_VLC_BITS 9 #define DNXHD_DC_VLC_BITS 7 static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_10(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_12(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_12_444(const DNXHDContext *ctx, RowContext *row, int n); static av_cold int dnxhd_decode_init(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; ctx->cid = -1; avctx->colorspace = AVCOL_SPC_BT709; avctx->coded_width = FFALIGN(avctx->width, 16); avctx->coded_height = FFALIGN(avctx->height, 16); ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); if (!ctx->rows) return AVERROR(ENOMEM); return 0; } static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth) { if (cid != ctx->cid) { int index; if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unsupported cid %"PRIu32"\n", cid); return AVERROR(ENOSYS); } if (ff_dnxhd_cid_table[index].bit_depth != bitdepth && ff_dnxhd_cid_table[index].bit_depth != DNXHD_VARIABLE) { av_log(ctx->avctx, AV_LOG_ERROR, "bit depth mismatches %d %d\n", ff_dnxhd_cid_table[index].bit_depth, bitdepth); return AVERROR_INVALIDDATA; } ctx->cid_table = &ff_dnxhd_cid_table[index]; av_log(ctx->avctx, AV_LOG_VERBOSE, "Profile cid %"PRIu32".\n", cid); ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->run_vlc); init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, ctx->cid_table->ac_bits, 1, 1, ctx->cid_table->ac_codes, 2, 2, 0); init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth > 8 ? 14 : 12, ctx->cid_table->dc_bits, 1, 1, ctx->cid_table->dc_codes, 1, 1, 0); init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, ctx->cid_table->run_bits, 1, 1, ctx->cid_table->run_codes, 2, 2, 0); ctx->cid = cid; } return 0; } static av_cold int dnxhd_decode_init_thread_copy(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; // make sure VLC tables will be loaded when cid is parsed ctx->cid = -1; ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); if (!ctx->rows) return AVERROR(ENOMEM); return 0; } static int dnxhd_get_profile(int cid) { switch(cid) { case 1270: return FF_PROFILE_DNXHR_444; case 1271: return FF_PROFILE_DNXHR_HQX; case 1272: return FF_PROFILE_DNXHR_HQ; case 1273: return FF_PROFILE_DNXHR_SQ; case 1274: return FF_PROFILE_DNXHR_LB; } return FF_PROFILE_DNXHD; } static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { int i, cid, ret; int old_bit_depth = ctx->bit_depth, bitdepth; uint64_t header_prefix; if (buf_size < 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n", buf_size); return AVERROR_INVALIDDATA; } header_prefix = ff_dnxhd_parse_header_prefix(buf); if (header_prefix == 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } else { ctx->cur_field = 0; } ctx->mbaff = (buf[0x6] >> 5) & 1; ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); switch(buf[0x21] >> 5) { case 1: bitdepth = 8; break; case 2: bitdepth = 10; break; case 3: bitdepth = 12; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); return AVERROR_INVALIDDATA; } cid = AV_RB32(buf + 0x28); ctx->avctx->profile = dnxhd_get_profile(cid); if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) return ret; if (ctx->mbaff && ctx->cid_table->cid != 1260) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive MB interlace flag in an unsupported profile.\n"); ctx->act = buf[0x2C] & 7; if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive color transform in an unsupported profile.\n"); ctx->is_444 = (buf[0x2C] >> 6) & 1; if (ctx->is_444) { if (bitdepth == 8) { avpriv_request_sample(ctx->avctx, "4:4:4 8 bits"); return AVERROR_INVALIDDATA; } else if (bitdepth == 10) { ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_GBRP10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_GBRP12; } } else if (bitdepth == 12) { ctx->decode_dct_block = dnxhd_decode_dct_block_12; ctx->pix_fmt = AV_PIX_FMT_YUV422P12; } else if (bitdepth == 10) { if (ctx->avctx->profile == FF_PROFILE_DNXHR_HQX) ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; else ctx->decode_dct_block = dnxhd_decode_dct_block_10; ctx->pix_fmt = AV_PIX_FMT_YUV422P10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_8; ctx->pix_fmt = AV_PIX_FMT_YUV422P; } ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; if (ctx->bit_depth != old_bit_depth) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } // make sure profile size constraints are respected // DNx100 allows 1920->1440 and 1280->960 subsampling if (ctx->width != ctx->cid_table->width && ctx->cid_table->width != DNXHD_VARIABLE) { av_reduce(&ctx->avctx->sample_aspect_ratio.num, &ctx->avctx->sample_aspect_ratio.den, ctx->width, ctx->cid_table->width, 255); ctx->width = ctx->cid_table->width; } if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %u).\n", buf_size, ctx->cid_table->coding_unit_size); return AVERROR_INVALIDDATA; } ctx->mb_width = (ctx->width + 15)>> 4; ctx->mb_height = AV_RB16(buf + 0x16c); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", ctx->bit_depth, ctx->mbaff, ctx->act); // Newer format supports variable mb_scan_index sizes if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { if (ctx->mb_height > 68 || (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); return AVERROR_INVALIDDATA; } if (ctx->mb_height > FF_ARRAY_ELEMS(ctx->mb_scan_index)) { av_log(ctx->avctx, AV_LOG_ERROR, "mb_height too big (%d > %"SIZE_SPECIFIER").\n", ctx->mb_height, FF_ARRAY_ELEMS(ctx->mb_scan_index)); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %"PRIu32"\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index (%"PRIu32" vs %u).\n", ctx->mb_scan_index[i], buf_size - ctx->data_offset); return AVERROR_INVALIDDATA; } } return 0; } static av_always_inline int dnxhd_decode_dct_block(const DNXHDContext *ctx, RowContext *row, int n, int index_bits, int level_bias, int level_shift, int dc_shift) { int i, j, index1, index2, len, flags; int level, component, sign; const int *scale; const uint8_t *weight_matrix; const uint8_t *ac_info = ctx->cid_table->ac_info; int16_t *block = row->blocks[n]; const int eob_index = ctx->cid_table->eob_index; int ret = 0; OPEN_READER(bs, &row->gb); ctx->bdsp.clear_block(block); if (!ctx->is_444) { if (n & 2) { component = 1 + (n & 1); scale = row->chroma_scale; weight_matrix = ctx->cid_table->chroma_weight; } else { component = 0; scale = row->luma_scale; weight_matrix = ctx->cid_table->luma_weight; } } else { component = (n >> 1) % 3; if (component) { scale = row->chroma_scale; weight_matrix = ctx->cid_table->chroma_weight; } else { scale = row->luma_scale; weight_matrix = ctx->cid_table->luma_weight; } } UPDATE_CACHE(bs, &row->gb); GET_VLC(len, bs, &row->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); if (len) { level = GET_CACHE(bs, &row->gb); LAST_SKIP_BITS(bs, &row->gb, len); sign = ~level >> 31; level = (NEG_USR32(sign ^ level, len) ^ sign) - sign; row->last_dc[component] += level * (1 << dc_shift); } block[0] = row->last_dc[component]; i = 0; UPDATE_CACHE(bs, &row->gb); GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); while (index1 != eob_index) { level = ac_info[2*index1+0]; flags = ac_info[2*index1+1]; sign = SHOW_SBITS(bs, &row->gb, 1); SKIP_BITS(bs, &row->gb, 1); if (flags & 1) { level += SHOW_UBITS(bs, &row->gb, index_bits) << 7; SKIP_BITS(bs, &row->gb, index_bits); } if (flags & 2) { UPDATE_CACHE(bs, &row->gb); GET_VLC(index2, bs, &row->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2); i += ctx->cid_table->run[index2]; } if (++i > 63) { av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); ret = -1; break; } j = ctx->scantable.permutated[i]; level *= scale[i]; level += scale[i] >> 1; if (level_bias < 32 || weight_matrix[i] != level_bias) level += level_bias; // 1<<(level_shift-1) level >>= level_shift; block[j] = (level ^ sign) - sign; UPDATE_CACHE(bs, &row->gb); GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); } CLOSE_READER(bs, &row->gb); return ret; } static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 4, 32, 6, 0); } static int dnxhd_decode_dct_block_10(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4, 0); } static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 6, 0); } static int dnxhd_decode_dct_block_12(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4, 2); } static int dnxhd_decode_dct_block_12_444(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 4, 2); } static int dnxhd_decode_macroblock(const DNXHDContext *ctx, RowContext *row, AVFrame *frame, int x, int y) { int shift1 = ctx->bit_depth >= 10; int dct_linesize_luma = frame->linesize[0]; int dct_linesize_chroma = frame->linesize[1]; uint8_t *dest_y, *dest_u, *dest_v; int dct_y_offset, dct_x_offset; int qscale, i, act; int interlaced_mb = 0; if (ctx->mbaff) { interlaced_mb = get_bits1(&row->gb); qscale = get_bits(&row->gb, 10); } else { qscale = get_bits(&row->gb, 11); } act = get_bits1(&row->gb); if (act) { if (!ctx->act) { static int act_warned; if (!act_warned) { act_warned = 1; av_log(ctx->avctx, AV_LOG_ERROR, "ACT flag set, in violation of frame header.\n"); } } else if (row->format == -1) { row->format = act; } else if (row->format != act) { row->format = 2; // Variable } } if (qscale != row->last_qscale) { for (i = 0; i < 64; i++) { row->luma_scale[i] = qscale * ctx->cid_table->luma_weight[i]; row->chroma_scale[i] = qscale * ctx->cid_table->chroma_weight[i]; } row->last_qscale = qscale; } for (i = 0; i < 8 + 4 * ctx->is_444; i++) { if (ctx->decode_dct_block(ctx, row, i) < 0) return AVERROR_INVALIDDATA; } if (frame->interlaced_frame) { dct_linesize_luma <<= 1; dct_linesize_chroma <<= 1; } dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1)); dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444)); dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444)); if (frame->interlaced_frame && ctx->cur_field) { dest_y += frame->linesize[0]; dest_u += frame->linesize[1]; dest_v += frame->linesize[2]; } if (interlaced_mb) { dct_linesize_luma <<= 1; dct_linesize_chroma <<= 1; } dct_y_offset = interlaced_mb ? frame->linesize[0] : (dct_linesize_luma << 3); dct_x_offset = 8 << shift1; if (!ctx->is_444) { ctx->idsp.idct_put(dest_y, dct_linesize_luma, row->blocks[0]); ctx->idsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, row->blocks[1]); ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, row->blocks[4]); ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, row->blocks[5]); if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { dct_y_offset = interlaced_mb ? frame->linesize[1] : (dct_linesize_chroma << 3); ctx->idsp.idct_put(dest_u, dct_linesize_chroma, row->blocks[2]); ctx->idsp.idct_put(dest_v, dct_linesize_chroma, row->blocks[3]); ctx->idsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, row->blocks[6]); ctx->idsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, row->blocks[7]); } } else { ctx->idsp.idct_put(dest_y, dct_linesize_luma, row->blocks[0]); ctx->idsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, row->blocks[1]); ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, row->blocks[6]); ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, row->blocks[7]); if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { dct_y_offset = interlaced_mb ? frame->linesize[1] : (dct_linesize_chroma << 3); ctx->idsp.idct_put(dest_u, dct_linesize_chroma, row->blocks[2]); ctx->idsp.idct_put(dest_u + dct_x_offset, dct_linesize_chroma, row->blocks[3]); ctx->idsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, row->blocks[8]); ctx->idsp.idct_put(dest_u + dct_y_offset + dct_x_offset, dct_linesize_chroma, row->blocks[9]); ctx->idsp.idct_put(dest_v, dct_linesize_chroma, row->blocks[4]); ctx->idsp.idct_put(dest_v + dct_x_offset, dct_linesize_chroma, row->blocks[5]); ctx->idsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, row->blocks[10]); ctx->idsp.idct_put(dest_v + dct_y_offset + dct_x_offset, dct_linesize_chroma, row->blocks[11]); } } return 0; } static int dnxhd_decode_row(AVCodecContext *avctx, void *data, int rownb, int threadnb) { const DNXHDContext *ctx = avctx->priv_data; uint32_t offset = ctx->mb_scan_index[rownb]; RowContext *row = ctx->rows + threadnb; int x; row->last_dc[0] = row->last_dc[1] = row->last_dc[2] = 1 << (ctx->bit_depth + 2); // for levels +2^(bitdepth-1) init_get_bits(&row->gb, ctx->buf + offset, (ctx->buf_size - offset) << 3); for (x = 0; x < ctx->mb_width; x++) { //START_TIMER; int ret = dnxhd_decode_macroblock(ctx, row, data, x, rownb); if (ret < 0) { row->errors++; return ret; } //STOP_TIMER("decode macroblock"); } return 0; } static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DNXHDContext *ctx = avctx->priv_data; ThreadFrame frame = { .f = data }; AVFrame *picture = data; int first_field = 1; int ret, i; ff_dlog(avctx, "frame size %d\n", buf_size); for (i = 0; i < avctx->thread_count; i++) ctx->rows[i].format = -1; decode_coding_unit: if ((ret = dnxhd_decode_header(ctx, picture, buf, buf_size, first_field)) < 0) return ret; if ((avctx->width || avctx->height) && (ctx->width != avctx->width || ctx->height != avctx->height)) { av_log(avctx, AV_LOG_WARNING, "frame size changed: %dx%d -> %ux%u\n", avctx->width, avctx->height, ctx->width, ctx->height); first_field = 1; } if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) { av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n", av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt)); first_field = 1; } avctx->pix_fmt = ctx->pix_fmt; ret = ff_set_dimensions(avctx, ctx->width, ctx->height); if (ret < 0) return ret; if (first_field) { if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; picture->pict_type = AV_PICTURE_TYPE_I; picture->key_frame = 1; } ctx->buf_size = buf_size - ctx->data_offset; ctx->buf = buf + ctx->data_offset; avctx->execute2(avctx, dnxhd_decode_row, picture, NULL, ctx->mb_height); if (first_field && picture->interlaced_frame) { buf += ctx->cid_table->coding_unit_size; buf_size -= ctx->cid_table->coding_unit_size; first_field = 0; goto decode_coding_unit; } ret = 0; for (i = 0; i < avctx->thread_count; i++) { ret += ctx->rows[i].errors; ctx->rows[i].errors = 0; } if (ctx->act) { static int act_warned; int format = ctx->rows[0].format; for (i = 1; i < avctx->thread_count; i++) { if (ctx->rows[i].format != format && ctx->rows[i].format != -1 /* not run */) { format = 2; break; } } switch (format) { case -1: case 2: if (!act_warned) { act_warned = 1; av_log(ctx->avctx, AV_LOG_ERROR, "Unsupported: variable ACT flag.\n"); } break; case 0: ctx->pix_fmt = ctx->bit_depth==10 ? AV_PIX_FMT_GBRP10 : AV_PIX_FMT_GBRP12; break; case 1: ctx->pix_fmt = ctx->bit_depth==10 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV444P12; break; } } avctx->pix_fmt = ctx->pix_fmt; if (ret) { av_log(ctx->avctx, AV_LOG_ERROR, "%d lines with errors\n", ret); return AVERROR_INVALIDDATA; } *got_frame = 1; return avpkt->size; } static av_cold int dnxhd_decode_close(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->run_vlc); av_freep(&ctx->rows); return 0; } AVCodec ff_dnxhd_decoder = { .name = "dnxhd", .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_DNXHD, .priv_data_size = sizeof(DNXHDContext), .init = dnxhd_decode_init, .close = dnxhd_decode_close, .decode = dnxhd_decode_frame, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS, .init_thread_copy = ONLY_IF_THREADS_ENABLED(dnxhd_decode_init_thread_copy), .profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles), };
/* * VC3/DNxHD decoder. * Copyright (c) 2007 SmartJog S.A., Baptiste Coudurier <baptiste dot coudurier at smartjog dot com> * Copyright (c) 2011 MirriAd Ltd * Copyright (c) 2015 Christophe Gisquet * * 10 bit support added by MirriAd Ltd, Joseph Artsimovich <joseph@mirriad.com> * Slice multithreading and MB interlaced support added by Christophe Gisquet * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "libavutil/imgutils.h" #include "libavutil/timer.h" #include "avcodec.h" #include "blockdsp.h" #define UNCHECKED_BITSTREAM_READER 1 #include "get_bits.h" #include "dnxhddata.h" #include "idctdsp.h" #include "internal.h" #include "profiles.h" #include "thread.h" typedef struct RowContext { DECLARE_ALIGNED(16, int16_t, blocks)[12][64]; int luma_scale[64]; int chroma_scale[64]; GetBitContext gb; int last_dc[3]; int last_qscale; int errors; /** -1:not set yet 0:off=RGB 1:on=YUV 2:variable */ int format; } RowContext; typedef struct DNXHDContext { AVCodecContext *avctx; RowContext *rows; BlockDSPContext bdsp; const uint8_t* buf; int buf_size; int64_t cid; ///< compression id unsigned int width, height; enum AVPixelFormat pix_fmt; unsigned int mb_width, mb_height; uint32_t mb_scan_index[512]; int data_offset; // End of mb_scan_index, where macroblocks start int cur_field; ///< current interlaced field VLC ac_vlc, dc_vlc, run_vlc; IDCTDSPContext idsp; ScanTable scantable; const CIDEntry *cid_table; int bit_depth; // 8, 10, 12 or 0 if not initialized at all. int is_444; int mbaff; int act; int (*decode_dct_block)(const struct DNXHDContext *ctx, RowContext *row, int n); } DNXHDContext; #define DNXHD_VLC_BITS 9 #define DNXHD_DC_VLC_BITS 7 static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_10(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_12(const DNXHDContext *ctx, RowContext *row, int n); static int dnxhd_decode_dct_block_12_444(const DNXHDContext *ctx, RowContext *row, int n); static av_cold int dnxhd_decode_init(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; ctx->cid = -1; avctx->colorspace = AVCOL_SPC_BT709; avctx->coded_width = FFALIGN(avctx->width, 16); avctx->coded_height = FFALIGN(avctx->height, 16); ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); if (!ctx->rows) return AVERROR(ENOMEM); return 0; } static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth) { if (cid != ctx->cid) { int index; if ((index = ff_dnxhd_get_cid_table(cid)) < 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unsupported cid %"PRIu32"\n", cid); return AVERROR(ENOSYS); } if (ff_dnxhd_cid_table[index].bit_depth != bitdepth && ff_dnxhd_cid_table[index].bit_depth != DNXHD_VARIABLE) { av_log(ctx->avctx, AV_LOG_ERROR, "bit depth mismatches %d %d\n", ff_dnxhd_cid_table[index].bit_depth, bitdepth); return AVERROR_INVALIDDATA; } ctx->cid_table = &ff_dnxhd_cid_table[index]; av_log(ctx->avctx, AV_LOG_VERBOSE, "Profile cid %"PRIu32".\n", cid); ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->run_vlc); init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257, ctx->cid_table->ac_bits, 1, 1, ctx->cid_table->ac_codes, 2, 2, 0); init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth > 8 ? 14 : 12, ctx->cid_table->dc_bits, 1, 1, ctx->cid_table->dc_codes, 1, 1, 0); init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62, ctx->cid_table->run_bits, 1, 1, ctx->cid_table->run_codes, 2, 2, 0); ctx->cid = cid; } return 0; } static av_cold int dnxhd_decode_init_thread_copy(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ctx->avctx = avctx; // make sure VLC tables will be loaded when cid is parsed ctx->cid = -1; ctx->rows = av_mallocz_array(avctx->thread_count, sizeof(RowContext)); if (!ctx->rows) return AVERROR(ENOMEM); return 0; } static int dnxhd_get_profile(int cid) { switch(cid) { case 1270: return FF_PROFILE_DNXHR_444; case 1271: return FF_PROFILE_DNXHR_HQX; case 1272: return FF_PROFILE_DNXHR_HQ; case 1273: return FF_PROFILE_DNXHR_SQ; case 1274: return FF_PROFILE_DNXHR_LB; } return FF_PROFILE_DNXHD; } static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { int i, cid, ret; int old_bit_depth = ctx->bit_depth, bitdepth; uint64_t header_prefix; if (buf_size < 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n", buf_size); return AVERROR_INVALIDDATA; } header_prefix = ff_dnxhd_parse_header_prefix(buf); if (header_prefix == 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } else { ctx->cur_field = 0; } ctx->mbaff = (buf[0x6] >> 5) & 1; ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); switch(buf[0x21] >> 5) { case 1: bitdepth = 8; break; case 2: bitdepth = 10; break; case 3: bitdepth = 12; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); return AVERROR_INVALIDDATA; } cid = AV_RB32(buf + 0x28); ctx->avctx->profile = dnxhd_get_profile(cid); if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) return ret; if (ctx->mbaff && ctx->cid_table->cid != 1260) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive MB interlace flag in an unsupported profile.\n"); ctx->act = buf[0x2C] & 7; if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive color transform in an unsupported profile.\n"); ctx->is_444 = (buf[0x2C] >> 6) & 1; if (ctx->is_444) { if (bitdepth == 8) { avpriv_request_sample(ctx->avctx, "4:4:4 8 bits"); return AVERROR_INVALIDDATA; } else if (bitdepth == 10) { ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_GBRP10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_GBRP12; } } else if (bitdepth == 12) { ctx->decode_dct_block = dnxhd_decode_dct_block_12; ctx->pix_fmt = AV_PIX_FMT_YUV422P12; } else if (bitdepth == 10) { if (ctx->avctx->profile == FF_PROFILE_DNXHR_HQX) ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; else ctx->decode_dct_block = dnxhd_decode_dct_block_10; ctx->pix_fmt = AV_PIX_FMT_YUV422P10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_8; ctx->pix_fmt = AV_PIX_FMT_YUV422P; } ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; if (ctx->bit_depth != old_bit_depth) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } // make sure profile size constraints are respected // DNx100 allows 1920->1440 and 1280->960 subsampling if (ctx->width != ctx->cid_table->width && ctx->cid_table->width != DNXHD_VARIABLE) { av_reduce(&ctx->avctx->sample_aspect_ratio.num, &ctx->avctx->sample_aspect_ratio.den, ctx->width, ctx->cid_table->width, 255); ctx->width = ctx->cid_table->width; } if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %u).\n", buf_size, ctx->cid_table->coding_unit_size); return AVERROR_INVALIDDATA; } ctx->mb_width = (ctx->width + 15)>> 4; ctx->mb_height = AV_RB16(buf + 0x16c); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", ctx->bit_depth, ctx->mbaff, ctx->act); // Newer format supports variable mb_scan_index sizes if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { if (ctx->mb_height > 68) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } if ((ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); return AVERROR_INVALIDDATA; } if (ctx->mb_height > FF_ARRAY_ELEMS(ctx->mb_scan_index)) { av_log(ctx->avctx, AV_LOG_ERROR, "mb_height too big (%d > %"SIZE_SPECIFIER").\n", ctx->mb_height, FF_ARRAY_ELEMS(ctx->mb_scan_index)); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %"PRIu32"\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index (%"PRIu32" vs %u).\n", ctx->mb_scan_index[i], buf_size - ctx->data_offset); return AVERROR_INVALIDDATA; } } return 0; } static av_always_inline int dnxhd_decode_dct_block(const DNXHDContext *ctx, RowContext *row, int n, int index_bits, int level_bias, int level_shift, int dc_shift) { int i, j, index1, index2, len, flags; int level, component, sign; const int *scale; const uint8_t *weight_matrix; const uint8_t *ac_info = ctx->cid_table->ac_info; int16_t *block = row->blocks[n]; const int eob_index = ctx->cid_table->eob_index; int ret = 0; OPEN_READER(bs, &row->gb); ctx->bdsp.clear_block(block); if (!ctx->is_444) { if (n & 2) { component = 1 + (n & 1); scale = row->chroma_scale; weight_matrix = ctx->cid_table->chroma_weight; } else { component = 0; scale = row->luma_scale; weight_matrix = ctx->cid_table->luma_weight; } } else { component = (n >> 1) % 3; if (component) { scale = row->chroma_scale; weight_matrix = ctx->cid_table->chroma_weight; } else { scale = row->luma_scale; weight_matrix = ctx->cid_table->luma_weight; } } UPDATE_CACHE(bs, &row->gb); GET_VLC(len, bs, &row->gb, ctx->dc_vlc.table, DNXHD_DC_VLC_BITS, 1); if (len) { level = GET_CACHE(bs, &row->gb); LAST_SKIP_BITS(bs, &row->gb, len); sign = ~level >> 31; level = (NEG_USR32(sign ^ level, len) ^ sign) - sign; row->last_dc[component] += level * (1 << dc_shift); } block[0] = row->last_dc[component]; i = 0; UPDATE_CACHE(bs, &row->gb); GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); while (index1 != eob_index) { level = ac_info[2*index1+0]; flags = ac_info[2*index1+1]; sign = SHOW_SBITS(bs, &row->gb, 1); SKIP_BITS(bs, &row->gb, 1); if (flags & 1) { level += SHOW_UBITS(bs, &row->gb, index_bits) << 7; SKIP_BITS(bs, &row->gb, index_bits); } if (flags & 2) { UPDATE_CACHE(bs, &row->gb); GET_VLC(index2, bs, &row->gb, ctx->run_vlc.table, DNXHD_VLC_BITS, 2); i += ctx->cid_table->run[index2]; } if (++i > 63) { av_log(ctx->avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", n, i); ret = -1; break; } j = ctx->scantable.permutated[i]; level *= scale[i]; level += scale[i] >> 1; if (level_bias < 32 || weight_matrix[i] != level_bias) level += level_bias; // 1<<(level_shift-1) level >>= level_shift; block[j] = (level ^ sign) - sign; UPDATE_CACHE(bs, &row->gb); GET_VLC(index1, bs, &row->gb, ctx->ac_vlc.table, DNXHD_VLC_BITS, 2); } CLOSE_READER(bs, &row->gb); return ret; } static int dnxhd_decode_dct_block_8(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 4, 32, 6, 0); } static int dnxhd_decode_dct_block_10(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4, 0); } static int dnxhd_decode_dct_block_10_444(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 6, 0); } static int dnxhd_decode_dct_block_12(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 8, 4, 2); } static int dnxhd_decode_dct_block_12_444(const DNXHDContext *ctx, RowContext *row, int n) { return dnxhd_decode_dct_block(ctx, row, n, 6, 32, 4, 2); } static int dnxhd_decode_macroblock(const DNXHDContext *ctx, RowContext *row, AVFrame *frame, int x, int y) { int shift1 = ctx->bit_depth >= 10; int dct_linesize_luma = frame->linesize[0]; int dct_linesize_chroma = frame->linesize[1]; uint8_t *dest_y, *dest_u, *dest_v; int dct_y_offset, dct_x_offset; int qscale, i, act; int interlaced_mb = 0; if (ctx->mbaff) { interlaced_mb = get_bits1(&row->gb); qscale = get_bits(&row->gb, 10); } else { qscale = get_bits(&row->gb, 11); } act = get_bits1(&row->gb); if (act) { if (!ctx->act) { static int act_warned; if (!act_warned) { act_warned = 1; av_log(ctx->avctx, AV_LOG_ERROR, "ACT flag set, in violation of frame header.\n"); } } else if (row->format == -1) { row->format = act; } else if (row->format != act) { row->format = 2; // Variable } } if (qscale != row->last_qscale) { for (i = 0; i < 64; i++) { row->luma_scale[i] = qscale * ctx->cid_table->luma_weight[i]; row->chroma_scale[i] = qscale * ctx->cid_table->chroma_weight[i]; } row->last_qscale = qscale; } for (i = 0; i < 8 + 4 * ctx->is_444; i++) { if (ctx->decode_dct_block(ctx, row, i) < 0) return AVERROR_INVALIDDATA; } if (frame->interlaced_frame) { dct_linesize_luma <<= 1; dct_linesize_chroma <<= 1; } dest_y = frame->data[0] + ((y * dct_linesize_luma) << 4) + (x << (4 + shift1)); dest_u = frame->data[1] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444)); dest_v = frame->data[2] + ((y * dct_linesize_chroma) << 4) + (x << (3 + shift1 + ctx->is_444)); if (frame->interlaced_frame && ctx->cur_field) { dest_y += frame->linesize[0]; dest_u += frame->linesize[1]; dest_v += frame->linesize[2]; } if (interlaced_mb) { dct_linesize_luma <<= 1; dct_linesize_chroma <<= 1; } dct_y_offset = interlaced_mb ? frame->linesize[0] : (dct_linesize_luma << 3); dct_x_offset = 8 << shift1; if (!ctx->is_444) { ctx->idsp.idct_put(dest_y, dct_linesize_luma, row->blocks[0]); ctx->idsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, row->blocks[1]); ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, row->blocks[4]); ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, row->blocks[5]); if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { dct_y_offset = interlaced_mb ? frame->linesize[1] : (dct_linesize_chroma << 3); ctx->idsp.idct_put(dest_u, dct_linesize_chroma, row->blocks[2]); ctx->idsp.idct_put(dest_v, dct_linesize_chroma, row->blocks[3]); ctx->idsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, row->blocks[6]); ctx->idsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, row->blocks[7]); } } else { ctx->idsp.idct_put(dest_y, dct_linesize_luma, row->blocks[0]); ctx->idsp.idct_put(dest_y + dct_x_offset, dct_linesize_luma, row->blocks[1]); ctx->idsp.idct_put(dest_y + dct_y_offset, dct_linesize_luma, row->blocks[6]); ctx->idsp.idct_put(dest_y + dct_y_offset + dct_x_offset, dct_linesize_luma, row->blocks[7]); if (!(ctx->avctx->flags & AV_CODEC_FLAG_GRAY)) { dct_y_offset = interlaced_mb ? frame->linesize[1] : (dct_linesize_chroma << 3); ctx->idsp.idct_put(dest_u, dct_linesize_chroma, row->blocks[2]); ctx->idsp.idct_put(dest_u + dct_x_offset, dct_linesize_chroma, row->blocks[3]); ctx->idsp.idct_put(dest_u + dct_y_offset, dct_linesize_chroma, row->blocks[8]); ctx->idsp.idct_put(dest_u + dct_y_offset + dct_x_offset, dct_linesize_chroma, row->blocks[9]); ctx->idsp.idct_put(dest_v, dct_linesize_chroma, row->blocks[4]); ctx->idsp.idct_put(dest_v + dct_x_offset, dct_linesize_chroma, row->blocks[5]); ctx->idsp.idct_put(dest_v + dct_y_offset, dct_linesize_chroma, row->blocks[10]); ctx->idsp.idct_put(dest_v + dct_y_offset + dct_x_offset, dct_linesize_chroma, row->blocks[11]); } } return 0; } static int dnxhd_decode_row(AVCodecContext *avctx, void *data, int rownb, int threadnb) { const DNXHDContext *ctx = avctx->priv_data; uint32_t offset = ctx->mb_scan_index[rownb]; RowContext *row = ctx->rows + threadnb; int x; row->last_dc[0] = row->last_dc[1] = row->last_dc[2] = 1 << (ctx->bit_depth + 2); // for levels +2^(bitdepth-1) init_get_bits(&row->gb, ctx->buf + offset, (ctx->buf_size - offset) << 3); for (x = 0; x < ctx->mb_width; x++) { //START_TIMER; int ret = dnxhd_decode_macroblock(ctx, row, data, x, rownb); if (ret < 0) { row->errors++; return ret; } //STOP_TIMER("decode macroblock"); } return 0; } static int dnxhd_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; DNXHDContext *ctx = avctx->priv_data; ThreadFrame frame = { .f = data }; AVFrame *picture = data; int first_field = 1; int ret, i; ff_dlog(avctx, "frame size %d\n", buf_size); for (i = 0; i < avctx->thread_count; i++) ctx->rows[i].format = -1; decode_coding_unit: if ((ret = dnxhd_decode_header(ctx, picture, buf, buf_size, first_field)) < 0) return ret; if ((avctx->width || avctx->height) && (ctx->width != avctx->width || ctx->height != avctx->height)) { av_log(avctx, AV_LOG_WARNING, "frame size changed: %dx%d -> %ux%u\n", avctx->width, avctx->height, ctx->width, ctx->height); first_field = 1; } if (avctx->pix_fmt != AV_PIX_FMT_NONE && avctx->pix_fmt != ctx->pix_fmt) { av_log(avctx, AV_LOG_WARNING, "pix_fmt changed: %s -> %s\n", av_get_pix_fmt_name(avctx->pix_fmt), av_get_pix_fmt_name(ctx->pix_fmt)); first_field = 1; } avctx->pix_fmt = ctx->pix_fmt; ret = ff_set_dimensions(avctx, ctx->width, ctx->height); if (ret < 0) return ret; if (first_field) { if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0) return ret; picture->pict_type = AV_PICTURE_TYPE_I; picture->key_frame = 1; } ctx->buf_size = buf_size - ctx->data_offset; ctx->buf = buf + ctx->data_offset; avctx->execute2(avctx, dnxhd_decode_row, picture, NULL, ctx->mb_height); if (first_field && picture->interlaced_frame) { buf += ctx->cid_table->coding_unit_size; buf_size -= ctx->cid_table->coding_unit_size; first_field = 0; goto decode_coding_unit; } ret = 0; for (i = 0; i < avctx->thread_count; i++) { ret += ctx->rows[i].errors; ctx->rows[i].errors = 0; } if (ctx->act) { static int act_warned; int format = ctx->rows[0].format; for (i = 1; i < avctx->thread_count; i++) { if (ctx->rows[i].format != format && ctx->rows[i].format != -1 /* not run */) { format = 2; break; } } switch (format) { case -1: case 2: if (!act_warned) { act_warned = 1; av_log(ctx->avctx, AV_LOG_ERROR, "Unsupported: variable ACT flag.\n"); } break; case 0: ctx->pix_fmt = ctx->bit_depth==10 ? AV_PIX_FMT_GBRP10 : AV_PIX_FMT_GBRP12; break; case 1: ctx->pix_fmt = ctx->bit_depth==10 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV444P12; break; } } avctx->pix_fmt = ctx->pix_fmt; if (ret) { av_log(ctx->avctx, AV_LOG_ERROR, "%d lines with errors\n", ret); return AVERROR_INVALIDDATA; } *got_frame = 1; return avpkt->size; } static av_cold int dnxhd_decode_close(AVCodecContext *avctx) { DNXHDContext *ctx = avctx->priv_data; ff_free_vlc(&ctx->ac_vlc); ff_free_vlc(&ctx->dc_vlc); ff_free_vlc(&ctx->run_vlc); av_freep(&ctx->rows); return 0; } AVCodec ff_dnxhd_decoder = { .name = "dnxhd", .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_DNXHD, .priv_data_size = sizeof(DNXHDContext), .init = dnxhd_decode_init, .close = dnxhd_decode_close, .decode = dnxhd_decode_frame, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS, .init_thread_copy = ONLY_IF_THREADS_ENABLED(dnxhd_decode_init_thread_copy), .profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles), };
static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { int i, cid, ret; int old_bit_depth = ctx->bit_depth, bitdepth; uint64_t header_prefix; if (buf_size < 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n", buf_size); return AVERROR_INVALIDDATA; } header_prefix = ff_dnxhd_parse_header_prefix(buf); if (header_prefix == 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } else { ctx->cur_field = 0; } ctx->mbaff = (buf[0x6] >> 5) & 1; ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); switch(buf[0x21] >> 5) { case 1: bitdepth = 8; break; case 2: bitdepth = 10; break; case 3: bitdepth = 12; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); return AVERROR_INVALIDDATA; } cid = AV_RB32(buf + 0x28); ctx->avctx->profile = dnxhd_get_profile(cid); if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) return ret; if (ctx->mbaff && ctx->cid_table->cid != 1260) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive MB interlace flag in an unsupported profile.\n"); ctx->act = buf[0x2C] & 7; if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive color transform in an unsupported profile.\n"); ctx->is_444 = (buf[0x2C] >> 6) & 1; if (ctx->is_444) { if (bitdepth == 8) { avpriv_request_sample(ctx->avctx, "4:4:4 8 bits"); return AVERROR_INVALIDDATA; } else if (bitdepth == 10) { ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_GBRP10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_GBRP12; } } else if (bitdepth == 12) { ctx->decode_dct_block = dnxhd_decode_dct_block_12; ctx->pix_fmt = AV_PIX_FMT_YUV422P12; } else if (bitdepth == 10) { if (ctx->avctx->profile == FF_PROFILE_DNXHR_HQX) ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; else ctx->decode_dct_block = dnxhd_decode_dct_block_10; ctx->pix_fmt = AV_PIX_FMT_YUV422P10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_8; ctx->pix_fmt = AV_PIX_FMT_YUV422P; } ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; if (ctx->bit_depth != old_bit_depth) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } // make sure profile size constraints are respected // DNx100 allows 1920->1440 and 1280->960 subsampling if (ctx->width != ctx->cid_table->width && ctx->cid_table->width != DNXHD_VARIABLE) { av_reduce(&ctx->avctx->sample_aspect_ratio.num, &ctx->avctx->sample_aspect_ratio.den, ctx->width, ctx->cid_table->width, 255); ctx->width = ctx->cid_table->width; } if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %u).\n", buf_size, ctx->cid_table->coding_unit_size); return AVERROR_INVALIDDATA; } ctx->mb_width = (ctx->width + 15)>> 4; ctx->mb_height = AV_RB16(buf + 0x16c); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", ctx->bit_depth, ctx->mbaff, ctx->act); // Newer format supports variable mb_scan_index sizes if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { if (ctx->mb_height > 68 || (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); return AVERROR_INVALIDDATA; } if (ctx->mb_height > FF_ARRAY_ELEMS(ctx->mb_scan_index)) { av_log(ctx->avctx, AV_LOG_ERROR, "mb_height too big (%d > %"SIZE_SPECIFIER").\n", ctx->mb_height, FF_ARRAY_ELEMS(ctx->mb_scan_index)); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %"PRIu32"\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index (%"PRIu32" vs %u).\n", ctx->mb_scan_index[i], buf_size - ctx->data_offset); return AVERROR_INVALIDDATA; } } return 0; }
static int dnxhd_decode_header(DNXHDContext *ctx, AVFrame *frame, const uint8_t *buf, int buf_size, int first_field) { int i, cid, ret; int old_bit_depth = ctx->bit_depth, bitdepth; uint64_t header_prefix; if (buf_size < 0x280) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < 640).\n", buf_size); return AVERROR_INVALIDDATA; } header_prefix = ff_dnxhd_parse_header_prefix(buf); if (header_prefix == 0) { av_log(ctx->avctx, AV_LOG_ERROR, "unknown header 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X\n", buf[0], buf[1], buf[2], buf[3], buf[4]); return AVERROR_INVALIDDATA; } if (buf[5] & 2) { /* interlaced */ ctx->cur_field = buf[5] & 1; frame->interlaced_frame = 1; frame->top_field_first = first_field ^ ctx->cur_field; av_log(ctx->avctx, AV_LOG_DEBUG, "interlaced %d, cur field %d\n", buf[5] & 3, ctx->cur_field); } else { ctx->cur_field = 0; } ctx->mbaff = (buf[0x6] >> 5) & 1; ctx->height = AV_RB16(buf + 0x18); ctx->width = AV_RB16(buf + 0x1a); switch(buf[0x21] >> 5) { case 1: bitdepth = 8; break; case 2: bitdepth = 10; break; case 3: bitdepth = 12; break; default: av_log(ctx->avctx, AV_LOG_ERROR, "Unknown bitdepth indicator (%d)\n", buf[0x21] >> 5); return AVERROR_INVALIDDATA; } cid = AV_RB32(buf + 0x28); ctx->avctx->profile = dnxhd_get_profile(cid); if ((ret = dnxhd_init_vlc(ctx, cid, bitdepth)) < 0) return ret; if (ctx->mbaff && ctx->cid_table->cid != 1260) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive MB interlace flag in an unsupported profile.\n"); ctx->act = buf[0x2C] & 7; if (ctx->act && ctx->cid_table->cid != 1256 && ctx->cid_table->cid != 1270) av_log(ctx->avctx, AV_LOG_WARNING, "Adaptive color transform in an unsupported profile.\n"); ctx->is_444 = (buf[0x2C] >> 6) & 1; if (ctx->is_444) { if (bitdepth == 8) { avpriv_request_sample(ctx->avctx, "4:4:4 8 bits"); return AVERROR_INVALIDDATA; } else if (bitdepth == 10) { ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_GBRP10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_12_444; ctx->pix_fmt = ctx->act ? AV_PIX_FMT_YUV444P12 : AV_PIX_FMT_GBRP12; } } else if (bitdepth == 12) { ctx->decode_dct_block = dnxhd_decode_dct_block_12; ctx->pix_fmt = AV_PIX_FMT_YUV422P12; } else if (bitdepth == 10) { if (ctx->avctx->profile == FF_PROFILE_DNXHR_HQX) ctx->decode_dct_block = dnxhd_decode_dct_block_10_444; else ctx->decode_dct_block = dnxhd_decode_dct_block_10; ctx->pix_fmt = AV_PIX_FMT_YUV422P10; } else { ctx->decode_dct_block = dnxhd_decode_dct_block_8; ctx->pix_fmt = AV_PIX_FMT_YUV422P; } ctx->avctx->bits_per_raw_sample = ctx->bit_depth = bitdepth; if (ctx->bit_depth != old_bit_depth) { ff_blockdsp_init(&ctx->bdsp, ctx->avctx); ff_idctdsp_init(&ctx->idsp, ctx->avctx); ff_init_scantable(ctx->idsp.idct_permutation, &ctx->scantable, ff_zigzag_direct); } // make sure profile size constraints are respected // DNx100 allows 1920->1440 and 1280->960 subsampling if (ctx->width != ctx->cid_table->width && ctx->cid_table->width != DNXHD_VARIABLE) { av_reduce(&ctx->avctx->sample_aspect_ratio.num, &ctx->avctx->sample_aspect_ratio.den, ctx->width, ctx->cid_table->width, 255); ctx->width = ctx->cid_table->width; } if (buf_size < ctx->cid_table->coding_unit_size) { av_log(ctx->avctx, AV_LOG_ERROR, "incorrect frame size (%d < %u).\n", buf_size, ctx->cid_table->coding_unit_size); return AVERROR_INVALIDDATA; } ctx->mb_width = (ctx->width + 15)>> 4; ctx->mb_height = AV_RB16(buf + 0x16c); if ((ctx->height + 15) >> 4 == ctx->mb_height && frame->interlaced_frame) ctx->height <<= 1; av_log(ctx->avctx, AV_LOG_VERBOSE, "%dx%d, 4:%s %d bits, MBAFF=%d ACT=%d\n", ctx->width, ctx->height, ctx->is_444 ? "4:4" : "2:2", ctx->bit_depth, ctx->mbaff, ctx->act); // Newer format supports variable mb_scan_index sizes if (ctx->mb_height > 68 && ff_dnxhd_check_header_prefix_hr(header_prefix)) { ctx->data_offset = 0x170 + (ctx->mb_height << 2); } else { if (ctx->mb_height > 68) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } ctx->data_offset = 0x280; } if ((ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) { av_log(ctx->avctx, AV_LOG_ERROR, "mb height too big: %d\n", ctx->mb_height); return AVERROR_INVALIDDATA; } if (buf_size < ctx->data_offset) { av_log(ctx->avctx, AV_LOG_ERROR, "buffer too small (%d < %d).\n", buf_size, ctx->data_offset); return AVERROR_INVALIDDATA; } if (ctx->mb_height > FF_ARRAY_ELEMS(ctx->mb_scan_index)) { av_log(ctx->avctx, AV_LOG_ERROR, "mb_height too big (%d > %"SIZE_SPECIFIER").\n", ctx->mb_height, FF_ARRAY_ELEMS(ctx->mb_scan_index)); return AVERROR_INVALIDDATA; } for (i = 0; i < ctx->mb_height; i++) { ctx->mb_scan_index[i] = AV_RB32(buf + 0x170 + (i << 2)); ff_dlog(ctx->avctx, "mb scan index %d, pos %d: %"PRIu32"\n", i, 0x170 + (i << 2), ctx->mb_scan_index[i]); if (buf_size - ctx->data_offset < ctx->mb_scan_index[i]) { av_log(ctx->avctx, AV_LOG_ERROR, "invalid mb scan index (%"PRIu32" vs %u).\n", ctx->mb_scan_index[i], buf_size - ctx->data_offset); return AVERROR_INVALIDDATA; } } return 0; }
{'added': [(301, ' if (ctx->mb_height > 68) {'), (308, ' if ((ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) {'), (309, ' av_log(ctx->avctx, AV_LOG_ERROR,'), (310, ' "mb height too big: %d\\n", ctx->mb_height);'), (311, ' return AVERROR_INVALIDDATA;'), (312, ' }')], 'deleted': [(301, ' if (ctx->mb_height > 68 ||'), (302, ' (ctx->mb_height << frame->interlaced_frame) > (ctx->height + 15) >> 4) {')]}
6
2
599
4,605
138
1,095
36
https://github.com/FFmpeg/FFmpeg
CVE-2017-11719
CWE-125
1,752
websockets.c
C
webSocketsDecodeHybi
/* * websockets.c - deal with WebSockets clients. * * This code should be independent of any changes in the RFB protocol. It is * an additional handshake and framing of normal sockets: * http://www.whatwg.org/specs/web-socket-protocol/ * */ /* * Copyright (C) 2010 Joel Martin * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #ifdef __STRICT_ANSI__ #define _BSD_SOURCE #endif #include <rfb/rfb.h> /* errno */ #include <errno.h> #ifndef _MSC_VER #include <resolv.h> /* __b64_ntop */ #endif #ifdef LIBVNCSERVER_HAVE_ENDIAN_H #include <endian.h> #elif LIBVNCSERVER_HAVE_SYS_ENDIAN_H #include <sys/endian.h> #endif #ifdef LIBVNCSERVER_HAVE_SYS_TYPES_H #include <sys/types.h> #endif #include <string.h> #if LIBVNCSERVER_UNISTD_H #include <unistd.h> #endif #include "rfb/rfbconfig.h" #include "rfbssl.h" #include "rfbcrypto.h" #if defined(__APPLE__) #include <libkern/OSByteOrder.h> #define WS_NTOH64(n) OSSwapBigToHostInt64(n) #define WS_NTOH32(n) OSSwapBigToHostInt32(n) #define WS_NTOH16(n) OSSwapBigToHostInt16(n) #define WS_HTON64(n) OSSwapHostToBigInt64(n) #define WS_HTON16(n) OSSwapHostToBigInt16(n) #else #define WS_NTOH64(n) htobe64(n) #define WS_NTOH32(n) htobe32(n) #define WS_NTOH16(n) htobe16(n) #define WS_HTON64(n) htobe64(n) #define WS_HTON16(n) htobe16(n) #endif #define B64LEN(__x) (((__x + 2) / 3) * 12 / 3) #define WSHLENMAX 14 /* 2 + sizeof(uint64_t) + sizeof(uint32_t) */ enum { WEBSOCKETS_VERSION_HIXIE, WEBSOCKETS_VERSION_HYBI }; #if 0 #include <sys/syscall.h> static int gettid() { return (int)syscall(SYS_gettid); } #endif typedef int (*wsEncodeFunc)(rfbClientPtr cl, const char *src, int len, char **dst); typedef int (*wsDecodeFunc)(rfbClientPtr cl, char *dst, int len); typedef struct ws_ctx_s { char codeBufDecode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */ char codeBufEncode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */ char readbuf[8192]; int readbufstart; int readbuflen; int dblen; char carryBuf[3]; /* For base64 carry-over */ int carrylen; int version; int base64; wsEncodeFunc encode; wsDecodeFunc decode; } ws_ctx_t; typedef union ws_mask_s { char c[4]; uint32_t u; } ws_mask_t; /* XXX: The union and the structs do not need to be named. * We are working around a bug present in GCC < 4.6 which prevented * it from recognizing anonymous structs and unions. * See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=4784 */ typedef struct #if __GNUC__ __attribute__ ((__packed__)) #endif ws_header_s { unsigned char b0; unsigned char b1; union { struct #if __GNUC__ __attribute__ ((__packed__)) #endif { uint16_t l16; ws_mask_t m16; } s16; struct #if __GNUC__ __attribute__ ((__packed__)) #endif { uint64_t l64; ws_mask_t m64; } s64; ws_mask_t m; } u; } ws_header_t; enum { WS_OPCODE_CONTINUATION = 0x0, WS_OPCODE_TEXT_FRAME, WS_OPCODE_BINARY_FRAME, WS_OPCODE_CLOSE = 0x8, WS_OPCODE_PING, WS_OPCODE_PONG }; #define FLASH_POLICY_RESPONSE "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\" /></cross-domain-policy>\n" #define SZ_FLASH_POLICY_RESPONSE 93 /* * draft-ietf-hybi-thewebsocketprotocol-10 * 5.2.2. Sending the Server's Opening Handshake */ #define GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" #define SERVER_HANDSHAKE_HIXIE "HTTP/1.1 101 Web Socket Protocol Handshake\r\n\ Upgrade: WebSocket\r\n\ Connection: Upgrade\r\n\ %sWebSocket-Origin: %s\r\n\ %sWebSocket-Location: %s://%s%s\r\n\ %sWebSocket-Protocol: %s\r\n\ \r\n%s" #define SERVER_HANDSHAKE_HYBI "HTTP/1.1 101 Switching Protocols\r\n\ Upgrade: websocket\r\n\ Connection: Upgrade\r\n\ Sec-WebSocket-Accept: %s\r\n\ Sec-WebSocket-Protocol: %s\r\n\ \r\n" #define SERVER_HANDSHAKE_HYBI_NO_PROTOCOL "HTTP/1.1 101 Switching Protocols\r\n\ Upgrade: websocket\r\n\ Connection: Upgrade\r\n\ Sec-WebSocket-Accept: %s\r\n\ \r\n" #define WEBSOCKETS_CLIENT_CONNECT_WAIT_MS 100 #define WEBSOCKETS_CLIENT_SEND_WAIT_MS 100 #define WEBSOCKETS_MAX_HANDSHAKE_LEN 4096 #if defined(__linux__) && defined(NEED_TIMEVAL) struct timeval { long int tv_sec,tv_usec; } ; #endif static rfbBool webSocketsHandshake(rfbClientPtr cl, char *scheme); void webSocketsGenMd5(char * target, char *key1, char *key2, char *key3); static int webSocketsEncodeHybi(rfbClientPtr cl, const char *src, int len, char **dst); static int webSocketsEncodeHixie(rfbClientPtr cl, const char *src, int len, char **dst); static int webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len); static int webSocketsDecodeHixie(rfbClientPtr cl, char *dst, int len); static int min (int a, int b) { return a < b ? a : b; } static void webSocketsGenSha1Key(char *target, int size, char *key) { struct iovec iov[2]; unsigned char hash[20]; iov[0].iov_base = key; iov[0].iov_len = strlen(key); iov[1].iov_base = GUID; iov[1].iov_len = sizeof(GUID) - 1; digestsha1(iov, 2, hash); if (-1 == b64_ntop(hash, sizeof(hash), target, size)) rfbErr("b64_ntop failed\n"); } /* * rfbWebSocketsHandshake is called to handle new WebSockets connections */ rfbBool webSocketsCheck (rfbClientPtr cl) { char bbuf[4], *scheme; int ret; ret = rfbPeekExactTimeout(cl, bbuf, 4, WEBSOCKETS_CLIENT_CONNECT_WAIT_MS); if ((ret < 0) && (errno == ETIMEDOUT)) { rfbLog("Normal socket connection\n"); return TRUE; } else if (ret <= 0) { rfbErr("webSocketsHandshake: unknown connection error\n"); return FALSE; } if (strncmp(bbuf, "<", 1) == 0) { rfbLog("Got Flash policy request, sending response\n"); if (rfbWriteExact(cl, FLASH_POLICY_RESPONSE, SZ_FLASH_POLICY_RESPONSE) < 0) { rfbErr("webSocketsHandshake: failed sending Flash policy response"); } return FALSE; } else if (strncmp(bbuf, "\x16", 1) == 0 || strncmp(bbuf, "\x80", 1) == 0) { rfbLog("Got TLS/SSL WebSockets connection\n"); if (-1 == rfbssl_init(cl)) { rfbErr("webSocketsHandshake: rfbssl_init failed\n"); return FALSE; } ret = rfbPeekExactTimeout(cl, bbuf, 4, WEBSOCKETS_CLIENT_CONNECT_WAIT_MS); scheme = "wss"; } else { scheme = "ws"; } if (strncmp(bbuf, "GET ", 4) != 0) { rfbErr("webSocketsHandshake: invalid client header\n"); return FALSE; } rfbLog("Got '%s' WebSockets handshake\n", scheme); if (!webSocketsHandshake(cl, scheme)) { return FALSE; } /* Start WebSockets framing */ return TRUE; } static rfbBool webSocketsHandshake(rfbClientPtr cl, char *scheme) { char *buf, *response, *line; int n, linestart = 0, len = 0, llen, base64 = TRUE; char prefix[5], trailer[17]; char *path = NULL, *host = NULL, *origin = NULL, *protocol = NULL; char *key1 = NULL, *key2 = NULL, *key3 = NULL; char *sec_ws_origin = NULL; char *sec_ws_key = NULL; char sec_ws_version = 0; ws_ctx_t *wsctx = NULL; buf = (char *) malloc(WEBSOCKETS_MAX_HANDSHAKE_LEN); if (!buf) { rfbLogPerror("webSocketsHandshake: malloc"); return FALSE; } response = (char *) malloc(WEBSOCKETS_MAX_HANDSHAKE_LEN); if (!response) { free(buf); rfbLogPerror("webSocketsHandshake: malloc"); return FALSE; } while (len < WEBSOCKETS_MAX_HANDSHAKE_LEN-1) { if ((n = rfbReadExactTimeout(cl, buf+len, 1, WEBSOCKETS_CLIENT_SEND_WAIT_MS)) <= 0) { if ((n < 0) && (errno == ETIMEDOUT)) { break; } if (n == 0) rfbLog("webSocketsHandshake: client gone\n"); else rfbLogPerror("webSocketsHandshake: read"); free(response); free(buf); return FALSE; } len += 1; llen = len - linestart; if (((llen >= 2)) && (buf[len-1] == '\n')) { line = buf+linestart; if ((llen == 2) && (strncmp("\r\n", line, 2) == 0)) { if (key1 && key2) { if ((n = rfbReadExact(cl, buf+len, 8)) <= 0) { if ((n < 0) && (errno == ETIMEDOUT)) { break; } if (n == 0) rfbLog("webSocketsHandshake: client gone\n"); else rfbLogPerror("webSocketsHandshake: read"); free(response); free(buf); return FALSE; } rfbLog("Got key3\n"); key3 = buf+len; len += 8; } else { buf[len] = '\0'; } break; } else if ((llen >= 16) && ((strncmp("GET ", line, min(llen,4))) == 0)) { /* 16 = 4 ("GET ") + 1 ("/.*") + 11 (" HTTP/1.1\r\n") */ path = line+4; buf[len-11] = '\0'; /* Trim trailing " HTTP/1.1\r\n" */ cl->wspath = strdup(path); /* rfbLog("Got path: %s\n", path); */ } else if ((strncasecmp("host: ", line, min(llen,6))) == 0) { host = line+6; buf[len-2] = '\0'; /* rfbLog("Got host: %s\n", host); */ } else if ((strncasecmp("origin: ", line, min(llen,8))) == 0) { origin = line+8; buf[len-2] = '\0'; /* rfbLog("Got origin: %s\n", origin); */ } else if ((strncasecmp("sec-websocket-key1: ", line, min(llen,20))) == 0) { key1 = line+20; buf[len-2] = '\0'; /* rfbLog("Got key1: %s\n", key1); */ } else if ((strncasecmp("sec-websocket-key2: ", line, min(llen,20))) == 0) { key2 = line+20; buf[len-2] = '\0'; /* rfbLog("Got key2: %s\n", key2); */ /* HyBI */ } else if ((strncasecmp("sec-websocket-protocol: ", line, min(llen,24))) == 0) { protocol = line+24; buf[len-2] = '\0'; rfbLog("Got protocol: %s\n", protocol); } else if ((strncasecmp("sec-websocket-origin: ", line, min(llen,22))) == 0) { sec_ws_origin = line+22; buf[len-2] = '\0'; } else if ((strncasecmp("sec-websocket-key: ", line, min(llen,19))) == 0) { sec_ws_key = line+19; buf[len-2] = '\0'; } else if ((strncasecmp("sec-websocket-version: ", line, min(llen,23))) == 0) { sec_ws_version = strtol(line+23, NULL, 10); buf[len-2] = '\0'; } linestart = len; } } if (!(path && host && (origin || sec_ws_origin))) { rfbErr("webSocketsHandshake: incomplete client handshake\n"); free(response); free(buf); return FALSE; } if ((protocol) && (strstr(protocol, "binary"))) { if (! sec_ws_version) { rfbErr("webSocketsHandshake: 'binary' protocol not supported with Hixie\n"); free(response); free(buf); return FALSE; } rfbLog(" - webSocketsHandshake: using binary/raw encoding\n"); base64 = FALSE; protocol = "binary"; } else { rfbLog(" - webSocketsHandshake: using base64 encoding\n"); base64 = TRUE; if ((protocol) && (strstr(protocol, "base64"))) { protocol = "base64"; } else { protocol = ""; } } /* * Generate the WebSockets server response based on the the headers sent * by the client. */ if (sec_ws_version) { char accept[B64LEN(SHA1_HASH_SIZE) + 1]; rfbLog(" - WebSockets client version hybi-%02d\n", sec_ws_version); webSocketsGenSha1Key(accept, sizeof(accept), sec_ws_key); if(strlen(protocol) > 0) len = snprintf(response, WEBSOCKETS_MAX_HANDSHAKE_LEN, SERVER_HANDSHAKE_HYBI, accept, protocol); else len = snprintf(response, WEBSOCKETS_MAX_HANDSHAKE_LEN, SERVER_HANDSHAKE_HYBI_NO_PROTOCOL, accept); } else { /* older hixie handshake, this could be removed if * a final standard is established */ if (!(key1 && key2 && key3)) { rfbLog(" - WebSockets client version hixie-75\n"); prefix[0] = '\0'; trailer[0] = '\0'; } else { rfbLog(" - WebSockets client version hixie-76\n"); snprintf(prefix, 5, "Sec-"); webSocketsGenMd5(trailer, key1, key2, key3); } len = snprintf(response, WEBSOCKETS_MAX_HANDSHAKE_LEN, SERVER_HANDSHAKE_HIXIE, prefix, origin, prefix, scheme, host, path, prefix, protocol, trailer); } if (rfbWriteExact(cl, response, len) < 0) { rfbErr("webSocketsHandshake: failed sending WebSockets response\n"); free(response); free(buf); return FALSE; } /* rfbLog("webSocketsHandshake: %s\n", response); */ free(response); free(buf); wsctx = calloc(1, sizeof(ws_ctx_t)); if (sec_ws_version) { wsctx->version = WEBSOCKETS_VERSION_HYBI; wsctx->encode = webSocketsEncodeHybi; wsctx->decode = webSocketsDecodeHybi; } else { wsctx->version = WEBSOCKETS_VERSION_HIXIE; wsctx->encode = webSocketsEncodeHixie; wsctx->decode = webSocketsDecodeHixie; } wsctx->base64 = base64; cl->wsctx = (wsCtx *)wsctx; return TRUE; } void webSocketsGenMd5(char * target, char *key1, char *key2, char *key3) { unsigned int i, spaces1 = 0, spaces2 = 0; unsigned long num1 = 0, num2 = 0; unsigned char buf[17]; struct iovec iov[1]; for (i=0; i < strlen(key1); i++) { if (key1[i] == ' ') { spaces1 += 1; } if ((key1[i] >= 48) && (key1[i] <= 57)) { num1 = num1 * 10 + (key1[i] - 48); } } num1 = num1 / spaces1; for (i=0; i < strlen(key2); i++) { if (key2[i] == ' ') { spaces2 += 1; } if ((key2[i] >= 48) && (key2[i] <= 57)) { num2 = num2 * 10 + (key2[i] - 48); } } num2 = num2 / spaces2; /* Pack it big-endian */ buf[0] = (num1 & 0xff000000) >> 24; buf[1] = (num1 & 0xff0000) >> 16; buf[2] = (num1 & 0xff00) >> 8; buf[3] = num1 & 0xff; buf[4] = (num2 & 0xff000000) >> 24; buf[5] = (num2 & 0xff0000) >> 16; buf[6] = (num2 & 0xff00) >> 8; buf[7] = num2 & 0xff; strncpy((char *)buf+8, key3, 8); buf[16] = '\0'; iov[0].iov_base = buf; iov[0].iov_len = 16; digestmd5(iov, 1, target); target[16] = '\0'; return; } static int webSocketsEncodeHixie(rfbClientPtr cl, const char *src, int len, char **dst) { int sz = 0; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; wsctx->codeBufEncode[sz++] = '\x00'; len = b64_ntop((unsigned char *)src, len, wsctx->codeBufEncode+sz, sizeof(wsctx->codeBufEncode) - (sz + 1)); if (len < 0) { return len; } sz += len; wsctx->codeBufEncode[sz++] = '\xff'; *dst = wsctx->codeBufEncode; return sz; } static int ws_read(rfbClientPtr cl, char *buf, int len) { int n; if (cl->sslctx) { n = rfbssl_read(cl, buf, len); } else { n = read(cl->sock, buf, len); } return n; } static int ws_peek(rfbClientPtr cl, char *buf, int len) { int n; if (cl->sslctx) { n = rfbssl_peek(cl, buf, len); } else { while (-1 == (n = recv(cl->sock, buf, len, MSG_PEEK))) { if (errno != EAGAIN) break; } } return n; } static int webSocketsDecodeHixie(rfbClientPtr cl, char *dst, int len) { int retlen = 0, n, i, avail, modlen, needlen; char *buf, *end = NULL; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; buf = wsctx->codeBufDecode; n = ws_peek(cl, buf, len*2+2); if (n <= 0) { /* save errno because rfbErr() will tamper it */ int olderrno = errno; rfbErr("%s: peek (%d) %m\n", __func__, errno); errno = olderrno; return n; } /* Base64 encoded WebSockets stream */ if (buf[0] == '\xff') { i = ws_read(cl, buf, 1); /* Consume marker */ buf++; n--; } if (n == 0) { errno = EAGAIN; return -1; } if (buf[0] == '\x00') { i = ws_read(cl, buf, 1); /* Consume marker */ buf++; n--; } if (n == 0) { errno = EAGAIN; return -1; } /* end = memchr(buf, '\xff', len*2+2); */ end = memchr(buf, '\xff', n); if (!end) { end = buf + n; } avail = end - buf; len -= wsctx->carrylen; /* Determine how much base64 data we need */ modlen = len + (len+2)/3; needlen = modlen; if (needlen % 4) { needlen += 4 - (needlen % 4); } if (needlen > avail) { /* rfbLog("Waiting for more base64 data\n"); */ errno = EAGAIN; return -1; } /* Any carryover from previous decode */ for (i=0; i < wsctx->carrylen; i++) { /* rfbLog("Adding carryover %d\n", wsctx->carryBuf[i]); */ dst[i] = wsctx->carryBuf[i]; retlen += 1; } /* Decode the rest of what we need */ buf[needlen] = '\x00'; /* Replace end marker with end of string */ /* rfbLog("buf: %s\n", buf); */ n = b64_pton(buf, (unsigned char *)dst+retlen, 2+len); if (n < len) { rfbErr("Base64 decode error\n"); errno = EIO; return -1; } retlen += n; /* Consume the data from socket */ i = ws_read(cl, buf, needlen); wsctx->carrylen = n - len; retlen -= wsctx->carrylen; for (i=0; i < wsctx->carrylen; i++) { /* rfbLog("Saving carryover %d\n", dst[retlen + i]); */ wsctx->carryBuf[i] = dst[retlen + i]; } /* rfbLog("<< webSocketsDecode, retlen: %d\n", retlen); */ return retlen; } static int webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len) { char *buf, *payload; uint32_t *payload32; int ret = -1, result = -1; int total = 0; ws_mask_t mask; ws_header_t *header; int i; unsigned char opcode; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; int flength, fhlen; /* int fin; */ /* not used atm */ /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */ if (wsctx->readbuflen) { /* simply return what we have */ if (wsctx->readbuflen > len) { memcpy(dst, wsctx->readbuf + wsctx->readbufstart, len); result = len; wsctx->readbuflen -= len; wsctx->readbufstart += len; } else { memcpy(dst, wsctx->readbuf + wsctx->readbufstart, wsctx->readbuflen); result = wsctx->readbuflen; wsctx->readbuflen = 0; wsctx->readbufstart = 0; } goto spor; } buf = wsctx->codeBufDecode; header = (ws_header_t *)wsctx->codeBufDecode; ret = ws_peek(cl, buf, B64LEN(len) + WSHLENMAX); if (ret < 2) { /* save errno because rfbErr() will tamper it */ if (-1 == ret) { int olderrno = errno; rfbErr("%s: peek; %m\n", __func__); errno = olderrno; } else if (0 == ret) { result = 0; } else { errno = EAGAIN; } goto spor; } opcode = header->b0 & 0x0f; /* fin = (header->b0 & 0x80) >> 7; */ /* not used atm */ flength = header->b1 & 0x7f; /* * 4.3. Client-to-Server Masking * * The client MUST mask all frames sent to the server. A server MUST * close the connection upon receiving a frame with the MASK bit set to 0. **/ if (!(header->b1 & 0x80)) { rfbErr("%s: got frame without mask\n", __func__, ret); errno = EIO; goto spor; } if (flength < 126) { fhlen = 2; mask = header->u.m; } else if (flength == 126 && 4 <= ret) { flength = WS_NTOH16(header->u.s16.l16); fhlen = 4; mask = header->u.s16.m16; } else if (flength == 127 && 10 <= ret) { flength = WS_NTOH64(header->u.s64.l64); fhlen = 10; mask = header->u.s64.m64; } else { /* Incomplete frame header */ rfbErr("%s: incomplete frame header\n", __func__, ret); errno = EIO; goto spor; } /* absolute length of frame */ total = fhlen + flength + 4; payload = buf + fhlen + 4; /* header length + mask */ if (-1 == (ret = ws_read(cl, buf, total))) { int olderrno = errno; rfbErr("%s: read; %m", __func__); errno = olderrno; return ret; } else if (ret < total) { /* GT TODO: hmm? */ rfbLog("%s: read; got partial data\n", __func__); } else { buf[ret] = '\0'; } /* process 1 frame (32 bit op) */ payload32 = (uint32_t *)payload; for (i = 0; i < flength / 4; i++) { payload32[i] ^= mask.u; } /* process the remaining bytes (if any) */ for (i*=4; i < flength; i++) { payload[i] ^= mask.c[i % 4]; } switch (opcode) { case WS_OPCODE_CLOSE: rfbLog("got closure, reason %d\n", WS_NTOH16(((uint16_t *)payload)[0])); errno = ECONNRESET; break; case WS_OPCODE_TEXT_FRAME: if (-1 == (flength = b64_pton(payload, (unsigned char *)wsctx->codeBufDecode, sizeof(wsctx->codeBufDecode)))) { rfbErr("%s: Base64 decode error; %m\n", __func__); break; } payload = wsctx->codeBufDecode; /* fall through */ case WS_OPCODE_BINARY_FRAME: if (flength > len) { memcpy(wsctx->readbuf, payload + len, flength - len); wsctx->readbufstart = 0; wsctx->readbuflen = flength - len; flength = len; } memcpy(dst, payload, flength); result = flength; break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)opcode, header->b0, header->b1); } /* single point of return, if someone has questions :-) */ spor: /* rfbLog("%s: ret: %d/%d\n", __func__, result, len); */ return result; } static int webSocketsEncodeHybi(rfbClientPtr cl, const char *src, int len, char **dst) { int blen, ret = -1, sz = 0; unsigned char opcode = '\0'; /* TODO: option! */ ws_header_t *header; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* Optional opcode: * 0x0 - continuation * 0x1 - text frame (base64 encode buf) * 0x2 - binary frame (use raw buf) * 0x8 - connection close * 0x9 - ping * 0xA - pong **/ if (!len) { /* nothing to encode */ return 0; } header = (ws_header_t *)wsctx->codeBufEncode; if (wsctx->base64) { opcode = WS_OPCODE_TEXT_FRAME; /* calculate the resulting size */ blen = B64LEN(len); } else { opcode = WS_OPCODE_BINARY_FRAME; blen = len; } header->b0 = 0x80 | (opcode & 0x0f); if (blen <= 125) { header->b1 = (uint8_t)blen; sz = 2; } else if (blen <= 65536) { header->b1 = 0x7e; header->u.s16.l16 = WS_HTON16((uint16_t)blen); sz = 4; } else { header->b1 = 0x7f; header->u.s64.l64 = WS_HTON64(blen); sz = 10; } if (wsctx->base64) { if (-1 == (ret = b64_ntop((unsigned char *)src, len, wsctx->codeBufEncode + sz, sizeof(wsctx->codeBufEncode) - sz))) { rfbErr("%s: Base 64 encode failed\n", __func__); } else { if (ret != blen) rfbErr("%s: Base 64 encode; something weird happened\n", __func__); ret += sz; } } else { memcpy(wsctx->codeBufEncode + sz, src, len); ret = sz + len; } *dst = wsctx->codeBufEncode; return ret; } int webSocketsEncode(rfbClientPtr cl, const char *src, int len, char **dst) { return ((ws_ctx_t *)cl->wsctx)->encode(cl, src, len, dst); } int webSocketsDecode(rfbClientPtr cl, char *dst, int len) { return ((ws_ctx_t *)cl->wsctx)->decode(cl, dst, len); } /* returns TRUE if client sent a close frame or a single 'end of frame' * marker was received, FALSE otherwise * * Note: This is a Hixie-only hack! **/ rfbBool webSocketCheckDisconnect(rfbClientPtr cl) { ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* With Base64 encoding we need at least 4 bytes */ char peekbuf[4]; int n; if (wsctx->version == WEBSOCKETS_VERSION_HYBI) return FALSE; if (cl->sslctx) n = rfbssl_peek(cl, peekbuf, 4); else n = recv(cl->sock, peekbuf, 4, MSG_PEEK); if (n <= 0) { if (n != 0) rfbErr("%s: peek; %m", __func__); rfbCloseClient(cl); return TRUE; } if (peekbuf[0] == '\xff') { int doclose = 0; /* Make sure we don't miss a client disconnect on an end frame * marker. Because we use a peek buffer in some cases it is not * applicable to wait for more data per select(). */ switch (n) { case 3: if (peekbuf[1] == '\xff' && peekbuf[2] == '\x00') doclose = 1; break; case 2: if (peekbuf[1] == '\x00') doclose = 1; break; default: return FALSE; } if (cl->sslctx) n = rfbssl_read(cl, peekbuf, n); else n = read(cl->sock, peekbuf, n); if (doclose) { rfbErr("%s: websocket close frame received\n", __func__); rfbCloseClient(cl); } return TRUE; } return FALSE; } /* returns TRUE if there is data waiting to be read in our internal buffer * or if is there any pending data in the buffer of the SSL implementation */ rfbBool webSocketsHasDataInBuffer(rfbClientPtr cl) { ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; if (wsctx && wsctx->readbuflen) return TRUE; return (cl->sslctx && rfbssl_pending(cl) > 0); }
/* * websockets.c - deal with WebSockets clients. * * This code should be independent of any changes in the RFB protocol. It is * an additional handshake and framing of normal sockets: * http://www.whatwg.org/specs/web-socket-protocol/ * */ /* * Copyright (C) 2010 Joel Martin * * This is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This software is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this software; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, * USA. */ #ifdef __STRICT_ANSI__ #define _BSD_SOURCE #endif #include <rfb/rfb.h> /* errno */ #include <errno.h> #ifndef _MSC_VER #include <resolv.h> /* __b64_ntop */ #endif #ifdef LIBVNCSERVER_HAVE_ENDIAN_H #include <endian.h> #elif LIBVNCSERVER_HAVE_SYS_ENDIAN_H #include <sys/endian.h> #endif #ifdef LIBVNCSERVER_HAVE_SYS_TYPES_H #include <sys/types.h> #endif #include <string.h> #if LIBVNCSERVER_UNISTD_H #include <unistd.h> #endif #include "rfb/rfbconfig.h" #include "rfbssl.h" #include "rfbcrypto.h" #if defined(__APPLE__) #include <libkern/OSByteOrder.h> #define WS_NTOH64(n) OSSwapBigToHostInt64(n) #define WS_NTOH32(n) OSSwapBigToHostInt32(n) #define WS_NTOH16(n) OSSwapBigToHostInt16(n) #define WS_HTON64(n) OSSwapHostToBigInt64(n) #define WS_HTON16(n) OSSwapHostToBigInt16(n) #else #define WS_NTOH64(n) htobe64(n) #define WS_NTOH32(n) htobe32(n) #define WS_NTOH16(n) htobe16(n) #define WS_HTON64(n) htobe64(n) #define WS_HTON16(n) htobe16(n) #endif #define B64LEN(__x) (((__x + 2) / 3) * 12 / 3) #define WSHLENMAX 14 /* 2 + sizeof(uint64_t) + sizeof(uint32_t) */ #define WS_HYBI_MASK_LEN 4 #define ARRAYSIZE(a) ((sizeof(a) / sizeof((a[0]))) / (size_t)(!(sizeof(a) % sizeof((a[0]))))) enum { WEBSOCKETS_VERSION_HIXIE, WEBSOCKETS_VERSION_HYBI }; #if 0 #include <sys/syscall.h> static int gettid() { return (int)syscall(SYS_gettid); } #endif typedef int (*wsEncodeFunc)(rfbClientPtr cl, const char *src, int len, char **dst); typedef int (*wsDecodeFunc)(rfbClientPtr cl, char *dst, int len); enum { /* header not yet received completely */ WS_HYBI_STATE_HEADER_PENDING, /* data available */ WS_HYBI_STATE_DATA_AVAILABLE, WS_HYBI_STATE_DATA_NEEDED, /* received a complete frame */ WS_HYBI_STATE_FRAME_COMPLETE, /* received part of a 'close' frame */ WS_HYBI_STATE_CLOSE_REASON_PENDING, /* */ WS_HYBI_STATE_ERR }; typedef union ws_mask_s { char c[4]; uint32_t u; } ws_mask_t; /* XXX: The union and the structs do not need to be named. * We are working around a bug present in GCC < 4.6 which prevented * it from recognizing anonymous structs and unions. * See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=4784 */ typedef struct #if __GNUC__ __attribute__ ((__packed__)) #endif ws_header_s { unsigned char b0; unsigned char b1; union { struct #if __GNUC__ __attribute__ ((__packed__)) #endif { uint16_t l16; ws_mask_t m16; } s16; struct #if __GNUC__ __attribute__ ((__packed__)) #endif { uint64_t l64; ws_mask_t m64; } s64; ws_mask_t m; } u; } ws_header_t; typedef struct ws_header_data_s { ws_header_t *data; /** bytes read */ int nRead; /** mask value */ ws_mask_t mask; /** length of frame header including payload len, but without mask */ int headerLen; /** length of the payload data */ int payloadLen; /** opcode */ unsigned char opcode; } ws_header_data_t; typedef struct ws_ctx_s { char codeBufDecode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */ char codeBufEncode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */ char *writePos; unsigned char *readPos; int readlen; int hybiDecodeState; char carryBuf[3]; /* For base64 carry-over */ int carrylen; int version; int base64; ws_header_data_t header; int nReadRaw; int nToRead; wsEncodeFunc encode; wsDecodeFunc decode; } ws_ctx_t; enum { WS_OPCODE_CONTINUATION = 0x0, WS_OPCODE_TEXT_FRAME, WS_OPCODE_BINARY_FRAME, WS_OPCODE_CLOSE = 0x8, WS_OPCODE_PING, WS_OPCODE_PONG }; #define FLASH_POLICY_RESPONSE "<cross-domain-policy><allow-access-from domain=\"*\" to-ports=\"*\" /></cross-domain-policy>\n" #define SZ_FLASH_POLICY_RESPONSE 93 /* * draft-ietf-hybi-thewebsocketprotocol-10 * 5.2.2. Sending the Server's Opening Handshake */ #define GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" #define SERVER_HANDSHAKE_HIXIE "HTTP/1.1 101 Web Socket Protocol Handshake\r\n\ Upgrade: WebSocket\r\n\ Connection: Upgrade\r\n\ %sWebSocket-Origin: %s\r\n\ %sWebSocket-Location: %s://%s%s\r\n\ %sWebSocket-Protocol: %s\r\n\ \r\n%s" #define SERVER_HANDSHAKE_HYBI "HTTP/1.1 101 Switching Protocols\r\n\ Upgrade: websocket\r\n\ Connection: Upgrade\r\n\ Sec-WebSocket-Accept: %s\r\n\ Sec-WebSocket-Protocol: %s\r\n\ \r\n" #define SERVER_HANDSHAKE_HYBI_NO_PROTOCOL "HTTP/1.1 101 Switching Protocols\r\n\ Upgrade: websocket\r\n\ Connection: Upgrade\r\n\ Sec-WebSocket-Accept: %s\r\n\ \r\n" #define WEBSOCKETS_CLIENT_CONNECT_WAIT_MS 100 #define WEBSOCKETS_CLIENT_SEND_WAIT_MS 100 #define WEBSOCKETS_MAX_HANDSHAKE_LEN 4096 #if defined(__linux__) && defined(NEED_TIMEVAL) struct timeval { long int tv_sec,tv_usec; } ; #endif static rfbBool webSocketsHandshake(rfbClientPtr cl, char *scheme); void webSocketsGenMd5(char * target, char *key1, char *key2, char *key3); static int webSocketsEncodeHybi(rfbClientPtr cl, const char *src, int len, char **dst); static int webSocketsEncodeHixie(rfbClientPtr cl, const char *src, int len, char **dst); static int webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len); static int webSocketsDecodeHixie(rfbClientPtr cl, char *dst, int len); static void hybiDecodeCleanup(ws_ctx_t *wsctx); static int min (int a, int b) { return a < b ? a : b; } static void webSocketsGenSha1Key(char *target, int size, char *key) { struct iovec iov[2]; unsigned char hash[20]; iov[0].iov_base = key; iov[0].iov_len = strlen(key); iov[1].iov_base = GUID; iov[1].iov_len = sizeof(GUID) - 1; digestsha1(iov, 2, hash); if (-1 == b64_ntop(hash, sizeof(hash), target, size)) rfbErr("b64_ntop failed\n"); } /* * rfbWebSocketsHandshake is called to handle new WebSockets connections */ rfbBool webSocketsCheck (rfbClientPtr cl) { char bbuf[4], *scheme; int ret; ret = rfbPeekExactTimeout(cl, bbuf, 4, WEBSOCKETS_CLIENT_CONNECT_WAIT_MS); if ((ret < 0) && (errno == ETIMEDOUT)) { rfbLog("Normal socket connection\n"); return TRUE; } else if (ret <= 0) { rfbErr("webSocketsHandshake: unknown connection error\n"); return FALSE; } if (strncmp(bbuf, "<", 1) == 0) { rfbLog("Got Flash policy request, sending response\n"); if (rfbWriteExact(cl, FLASH_POLICY_RESPONSE, SZ_FLASH_POLICY_RESPONSE) < 0) { rfbErr("webSocketsHandshake: failed sending Flash policy response"); } return FALSE; } else if (strncmp(bbuf, "\x16", 1) == 0 || strncmp(bbuf, "\x80", 1) == 0) { rfbLog("Got TLS/SSL WebSockets connection\n"); if (-1 == rfbssl_init(cl)) { rfbErr("webSocketsHandshake: rfbssl_init failed\n"); return FALSE; } ret = rfbPeekExactTimeout(cl, bbuf, 4, WEBSOCKETS_CLIENT_CONNECT_WAIT_MS); scheme = "wss"; } else { scheme = "ws"; } if (strncmp(bbuf, "GET ", 4) != 0) { rfbErr("webSocketsHandshake: invalid client header\n"); return FALSE; } rfbLog("Got '%s' WebSockets handshake\n", scheme); if (!webSocketsHandshake(cl, scheme)) { return FALSE; } /* Start WebSockets framing */ return TRUE; } static rfbBool webSocketsHandshake(rfbClientPtr cl, char *scheme) { char *buf, *response, *line; int n, linestart = 0, len = 0, llen, base64 = TRUE; char prefix[5], trailer[17]; char *path = NULL, *host = NULL, *origin = NULL, *protocol = NULL; char *key1 = NULL, *key2 = NULL, *key3 = NULL; char *sec_ws_origin = NULL; char *sec_ws_key = NULL; char sec_ws_version = 0; ws_ctx_t *wsctx = NULL; buf = (char *) malloc(WEBSOCKETS_MAX_HANDSHAKE_LEN); if (!buf) { rfbLogPerror("webSocketsHandshake: malloc"); return FALSE; } response = (char *) malloc(WEBSOCKETS_MAX_HANDSHAKE_LEN); if (!response) { free(buf); rfbLogPerror("webSocketsHandshake: malloc"); return FALSE; } while (len < WEBSOCKETS_MAX_HANDSHAKE_LEN-1) { if ((n = rfbReadExactTimeout(cl, buf+len, 1, WEBSOCKETS_CLIENT_SEND_WAIT_MS)) <= 0) { if ((n < 0) && (errno == ETIMEDOUT)) { break; } if (n == 0) rfbLog("webSocketsHandshake: client gone\n"); else rfbLogPerror("webSocketsHandshake: read"); free(response); free(buf); return FALSE; } len += 1; llen = len - linestart; if (((llen >= 2)) && (buf[len-1] == '\n')) { line = buf+linestart; if ((llen == 2) && (strncmp("\r\n", line, 2) == 0)) { if (key1 && key2) { if ((n = rfbReadExact(cl, buf+len, 8)) <= 0) { if ((n < 0) && (errno == ETIMEDOUT)) { break; } if (n == 0) rfbLog("webSocketsHandshake: client gone\n"); else rfbLogPerror("webSocketsHandshake: read"); free(response); free(buf); return FALSE; } rfbLog("Got key3\n"); key3 = buf+len; len += 8; } else { buf[len] = '\0'; } break; } else if ((llen >= 16) && ((strncmp("GET ", line, min(llen,4))) == 0)) { /* 16 = 4 ("GET ") + 1 ("/.*") + 11 (" HTTP/1.1\r\n") */ path = line+4; buf[len-11] = '\0'; /* Trim trailing " HTTP/1.1\r\n" */ cl->wspath = strdup(path); /* rfbLog("Got path: %s\n", path); */ } else if ((strncasecmp("host: ", line, min(llen,6))) == 0) { host = line+6; buf[len-2] = '\0'; /* rfbLog("Got host: %s\n", host); */ } else if ((strncasecmp("origin: ", line, min(llen,8))) == 0) { origin = line+8; buf[len-2] = '\0'; /* rfbLog("Got origin: %s\n", origin); */ } else if ((strncasecmp("sec-websocket-key1: ", line, min(llen,20))) == 0) { key1 = line+20; buf[len-2] = '\0'; /* rfbLog("Got key1: %s\n", key1); */ } else if ((strncasecmp("sec-websocket-key2: ", line, min(llen,20))) == 0) { key2 = line+20; buf[len-2] = '\0'; /* rfbLog("Got key2: %s\n", key2); */ /* HyBI */ } else if ((strncasecmp("sec-websocket-protocol: ", line, min(llen,24))) == 0) { protocol = line+24; buf[len-2] = '\0'; rfbLog("Got protocol: %s\n", protocol); } else if ((strncasecmp("sec-websocket-origin: ", line, min(llen,22))) == 0) { sec_ws_origin = line+22; buf[len-2] = '\0'; } else if ((strncasecmp("sec-websocket-key: ", line, min(llen,19))) == 0) { sec_ws_key = line+19; buf[len-2] = '\0'; } else if ((strncasecmp("sec-websocket-version: ", line, min(llen,23))) == 0) { sec_ws_version = strtol(line+23, NULL, 10); buf[len-2] = '\0'; } linestart = len; } } if (!(path && host && (origin || sec_ws_origin))) { rfbErr("webSocketsHandshake: incomplete client handshake\n"); free(response); free(buf); return FALSE; } if ((protocol) && (strstr(protocol, "binary"))) { if (! sec_ws_version) { rfbErr("webSocketsHandshake: 'binary' protocol not supported with Hixie\n"); free(response); free(buf); return FALSE; } rfbLog(" - webSocketsHandshake: using binary/raw encoding\n"); base64 = FALSE; protocol = "binary"; } else { rfbLog(" - webSocketsHandshake: using base64 encoding\n"); base64 = TRUE; if ((protocol) && (strstr(protocol, "base64"))) { protocol = "base64"; } else { protocol = ""; } } /* * Generate the WebSockets server response based on the the headers sent * by the client. */ if (sec_ws_version) { char accept[B64LEN(SHA1_HASH_SIZE) + 1]; rfbLog(" - WebSockets client version hybi-%02d\n", sec_ws_version); webSocketsGenSha1Key(accept, sizeof(accept), sec_ws_key); if(strlen(protocol) > 0) len = snprintf(response, WEBSOCKETS_MAX_HANDSHAKE_LEN, SERVER_HANDSHAKE_HYBI, accept, protocol); else len = snprintf(response, WEBSOCKETS_MAX_HANDSHAKE_LEN, SERVER_HANDSHAKE_HYBI_NO_PROTOCOL, accept); } else { /* older hixie handshake, this could be removed if * a final standard is established */ if (!(key1 && key2 && key3)) { rfbLog(" - WebSockets client version hixie-75\n"); prefix[0] = '\0'; trailer[0] = '\0'; } else { rfbLog(" - WebSockets client version hixie-76\n"); snprintf(prefix, 5, "Sec-"); webSocketsGenMd5(trailer, key1, key2, key3); } len = snprintf(response, WEBSOCKETS_MAX_HANDSHAKE_LEN, SERVER_HANDSHAKE_HIXIE, prefix, origin, prefix, scheme, host, path, prefix, protocol, trailer); } if (rfbWriteExact(cl, response, len) < 0) { rfbErr("webSocketsHandshake: failed sending WebSockets response\n"); free(response); free(buf); return FALSE; } /* rfbLog("webSocketsHandshake: %s\n", response); */ free(response); free(buf); wsctx = calloc(1, sizeof(ws_ctx_t)); if (sec_ws_version) { wsctx->version = WEBSOCKETS_VERSION_HYBI; wsctx->encode = webSocketsEncodeHybi; wsctx->decode = webSocketsDecodeHybi; } else { wsctx->version = WEBSOCKETS_VERSION_HIXIE; wsctx->encode = webSocketsEncodeHixie; wsctx->decode = webSocketsDecodeHixie; } wsctx->base64 = base64; hybiDecodeCleanup(wsctx); cl->wsctx = (wsCtx *)wsctx; return TRUE; } void webSocketsGenMd5(char * target, char *key1, char *key2, char *key3) { unsigned int i, spaces1 = 0, spaces2 = 0; unsigned long num1 = 0, num2 = 0; unsigned char buf[17]; struct iovec iov[1]; for (i=0; i < strlen(key1); i++) { if (key1[i] == ' ') { spaces1 += 1; } if ((key1[i] >= 48) && (key1[i] <= 57)) { num1 = num1 * 10 + (key1[i] - 48); } } num1 = num1 / spaces1; for (i=0; i < strlen(key2); i++) { if (key2[i] == ' ') { spaces2 += 1; } if ((key2[i] >= 48) && (key2[i] <= 57)) { num2 = num2 * 10 + (key2[i] - 48); } } num2 = num2 / spaces2; /* Pack it big-endian */ buf[0] = (num1 & 0xff000000) >> 24; buf[1] = (num1 & 0xff0000) >> 16; buf[2] = (num1 & 0xff00) >> 8; buf[3] = num1 & 0xff; buf[4] = (num2 & 0xff000000) >> 24; buf[5] = (num2 & 0xff0000) >> 16; buf[6] = (num2 & 0xff00) >> 8; buf[7] = num2 & 0xff; strncpy((char *)buf+8, key3, 8); buf[16] = '\0'; iov[0].iov_base = buf; iov[0].iov_len = 16; digestmd5(iov, 1, target); target[16] = '\0'; return; } static int webSocketsEncodeHixie(rfbClientPtr cl, const char *src, int len, char **dst) { int sz = 0; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; wsctx->codeBufEncode[sz++] = '\x00'; len = b64_ntop((unsigned char *)src, len, wsctx->codeBufEncode+sz, sizeof(wsctx->codeBufEncode) - (sz + 1)); if (len < 0) { return len; } sz += len; wsctx->codeBufEncode[sz++] = '\xff'; *dst = wsctx->codeBufEncode; return sz; } static int ws_read(rfbClientPtr cl, char *buf, int len) { int n; if (cl->sslctx) { n = rfbssl_read(cl, buf, len); } else { n = read(cl->sock, buf, len); } return n; } static int ws_peek(rfbClientPtr cl, char *buf, int len) { int n; if (cl->sslctx) { n = rfbssl_peek(cl, buf, len); } else { while (-1 == (n = recv(cl->sock, buf, len, MSG_PEEK))) { if (errno != EAGAIN) break; } } return n; } static int webSocketsDecodeHixie(rfbClientPtr cl, char *dst, int len) { int retlen = 0, n, i, avail, modlen, needlen; char *buf, *end = NULL; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; buf = wsctx->codeBufDecode; n = ws_peek(cl, buf, len*2+2); if (n <= 0) { /* save errno because rfbErr() will tamper it */ int olderrno = errno; rfbErr("%s: peek (%d) %m\n", __func__, errno); errno = olderrno; return n; } /* Base64 encoded WebSockets stream */ if (buf[0] == '\xff') { i = ws_read(cl, buf, 1); /* Consume marker */ buf++; n--; } if (n == 0) { errno = EAGAIN; return -1; } if (buf[0] == '\x00') { i = ws_read(cl, buf, 1); /* Consume marker */ buf++; n--; } if (n == 0) { errno = EAGAIN; return -1; } /* end = memchr(buf, '\xff', len*2+2); */ end = memchr(buf, '\xff', n); if (!end) { end = buf + n; } avail = end - buf; len -= wsctx->carrylen; /* Determine how much base64 data we need */ modlen = len + (len+2)/3; needlen = modlen; if (needlen % 4) { needlen += 4 - (needlen % 4); } if (needlen > avail) { /* rfbLog("Waiting for more base64 data\n"); */ errno = EAGAIN; return -1; } /* Any carryover from previous decode */ for (i=0; i < wsctx->carrylen; i++) { /* rfbLog("Adding carryover %d\n", wsctx->carryBuf[i]); */ dst[i] = wsctx->carryBuf[i]; retlen += 1; } /* Decode the rest of what we need */ buf[needlen] = '\x00'; /* Replace end marker with end of string */ /* rfbLog("buf: %s\n", buf); */ n = b64_pton(buf, (unsigned char *)dst+retlen, 2+len); if (n < len) { rfbErr("Base64 decode error\n"); errno = EIO; return -1; } retlen += n; /* Consume the data from socket */ i = ws_read(cl, buf, needlen); wsctx->carrylen = n - len; retlen -= wsctx->carrylen; for (i=0; i < wsctx->carrylen; i++) { /* rfbLog("Saving carryover %d\n", dst[retlen + i]); */ wsctx->carryBuf[i] = dst[retlen + i]; } /* rfbLog("<< webSocketsDecode, retlen: %d\n", retlen); */ return retlen; } static int hybiRemaining(ws_ctx_t *wsctx) { return wsctx->nToRead - wsctx->nReadRaw; } static void hybiDecodeCleanup(ws_ctx_t *wsctx) { wsctx->header.payloadLen = 0; wsctx->header.mask.u = 0; wsctx->nReadRaw = 0; wsctx->nToRead= 0; wsctx->carrylen = 0; wsctx->readPos = (unsigned char *)wsctx->codeBufDecode; wsctx->readlen = 0; wsctx->hybiDecodeState = WS_HYBI_STATE_HEADER_PENDING; wsctx->writePos = NULL; rfbLog("cleaned up wsctx\n"); } /** * Return payload data that has been decoded/unmasked from * a websocket frame. * * @param[out] dst destination buffer * @param[in] len bytes to copy to destination buffer * @param[in,out] wsctx internal state of decoding procedure * @param[out] number of bytes actually written to dst buffer * @return next hybi decoding state */ static int hybiReturnData(char *dst, int len, ws_ctx_t *wsctx, int *nWritten) { int nextState = WS_HYBI_STATE_ERR; /* if we have something already decoded copy and return */ if (wsctx->readlen > 0) { /* simply return what we have */ if (wsctx->readlen > len) { rfbLog("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", len, wsctx->readPos, wsctx->readlen); memcpy(dst, wsctx->readPos, len); *nWritten = len; wsctx->readlen -= len; wsctx->readPos += len; nextState = WS_HYBI_STATE_DATA_AVAILABLE; } else { rfbLog("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\n", wsctx->readlen, wsctx->readPos, wsctx->readlen); memcpy(dst, wsctx->readPos, wsctx->readlen); *nWritten = wsctx->readlen; wsctx->readlen = 0; wsctx->readPos = NULL; if (hybiRemaining(wsctx) == 0) { nextState = WS_HYBI_STATE_FRAME_COMPLETE; } else { nextState = WS_HYBI_STATE_DATA_NEEDED; } } rfbLog("after copy: readPos=%p, readLen=%d\n", wsctx->readPos, wsctx->readlen); } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_CLOSE_REASON_PENDING) { nextState = WS_HYBI_STATE_CLOSE_REASON_PENDING; } return nextState; } /** * Read an RFC 6455 websocket frame (IETF hybi working group). * * Internal state is updated according to bytes received and the * decoding of header information. * * @param[in] cl client ptr with ptr to raw socket and ws_ctx_t ptr * @param[out] sockRet emulated recv return value * @return next hybi decoding state; WS_HYBI_STATE_HEADER_PENDING indicates * that the header was not received completely. */ static int hybiReadHeader(rfbClientPtr cl, int *sockRet) { int ret; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; char *headerDst = wsctx->codeBufDecode + wsctx->nReadRaw; int n = WSHLENMAX - wsctx->nReadRaw; rfbLog("header_read to %p with len=%d\n", headerDst, n); ret = ws_read(cl, headerDst, n); rfbLog("read %d bytes from socket\n", ret); if (ret <= 0) { if (-1 == ret) { /* save errno because rfbErr() will tamper it */ int olderrno = errno; rfbErr("%s: peek; %m\n", __func__); errno = olderrno; *sockRet = -1; } else { *sockRet = 0; } return WS_HYBI_STATE_ERR; } wsctx->nReadRaw += ret; if (wsctx->nReadRaw < 2) { /* cannot decode header with less than two bytes */ errno = EAGAIN; *sockRet = -1; return WS_HYBI_STATE_HEADER_PENDING; } /* first two header bytes received; interpret header data and get rest */ wsctx->header.data = (ws_header_t *)wsctx->codeBufDecode; wsctx->header.opcode = wsctx->header.data->b0 & 0x0f; /* fin = (header->b0 & 0x80) >> 7; */ /* not used atm */ wsctx->header.payloadLen = wsctx->header.data->b1 & 0x7f; rfbLog("first header bytes received; opcode=%d lenbyte=%d\n", wsctx->header.opcode, wsctx->header.payloadLen); /* * 4.3. Client-to-Server Masking * * The client MUST mask all frames sent to the server. A server MUST * close the connection upon receiving a frame with the MASK bit set to 0. **/ if (!(wsctx->header.data->b1 & 0x80)) { rfbErr("%s: got frame without mask ret=%d\n", __func__, ret); errno = EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } if (wsctx->header.payloadLen < 126 && wsctx->nReadRaw >= 6) { wsctx->header.headerLen = 2 + WS_HYBI_MASK_LEN; wsctx->header.mask = wsctx->header.data->u.m; } else if (wsctx->header.payloadLen == 126 && 8 <= wsctx->nReadRaw) { wsctx->header.headerLen = 4 + WS_HYBI_MASK_LEN; wsctx->header.payloadLen = WS_NTOH16(wsctx->header.data->u.s16.l16); wsctx->header.mask = wsctx->header.data->u.s16.m16; } else if (wsctx->header.payloadLen == 127 && 14 <= wsctx->nReadRaw) { wsctx->header.headerLen = 10 + WS_HYBI_MASK_LEN; wsctx->header.payloadLen = WS_NTOH64(wsctx->header.data->u.s64.l64); wsctx->header.mask = wsctx->header.data->u.s64.m64; } else { /* Incomplete frame header, try again */ rfbErr("%s: incomplete frame header; ret=%d\n", __func__, ret); errno = EAGAIN; *sockRet = -1; return WS_HYBI_STATE_HEADER_PENDING; } /* absolute length of frame */ wsctx->nToRead = wsctx->header.headerLen + wsctx->header.payloadLen; /* set payload pointer just after header */ wsctx->writePos = wsctx->codeBufDecode + wsctx->nReadRaw; wsctx->readPos = (unsigned char *)(wsctx->codeBufDecode + wsctx->header.headerLen); rfbLog("header complete: state=%d flen=%d writeTo=%p\n", wsctx->hybiDecodeState, wsctx->nToRead, wsctx->writePos); return WS_HYBI_STATE_DATA_NEEDED; } static int hybiWsFrameComplete(ws_ctx_t *wsctx) { return wsctx != NULL && hybiRemaining(wsctx) == 0; } static char * hybiPayloadStart(ws_ctx_t *wsctx) { return wsctx->codeBufDecode + wsctx->header.headerLen; } /** * Read the remaining payload bytes from associated raw socket. * * - try to read remaining bytes from socket * - unmask all multiples of 4 * - if frame incomplete but some bytes are left, these are copied to * the carry buffer * - if opcode is TEXT: Base64-decode all unmasked received bytes * - set state for reading decoded data * - reset write position to begin of buffer (+ header) * --> before we retrieve more data we let the caller clear all bytes * from the reception buffer * - execute return data routine * * Sets errno corresponding to what it gets from the underlying * socket or EIO if some internal sanity check fails. * * @param[in] cl client ptr with raw socket reference * @param[out] dst destination buffer * @param[in] len size of destination buffer * @param[out] sockRet emulated recv return value * @return next hybi decode state */ static int hybiReadAndDecode(rfbClientPtr cl, char *dst, int len, int *sockRet) { int n; int i; int toReturn; int toDecode; int bufsize; int nextRead; unsigned char *data; uint32_t *data32; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* if data was carried over, copy to start of buffer */ memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen); wsctx->writePos += wsctx->carrylen; /* -1 accounts for potential '\0' terminator for base64 decoding */ bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1; if (hybiRemaining(wsctx) > bufsize) { nextRead = bufsize; } else { nextRead = hybiRemaining(wsctx); } rfbLog("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d\n)", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen); if (wsctx->nReadRaw < wsctx->nToRead) { /* decode more data */ if (-1 == (n = ws_read(cl, wsctx->writePos, nextRead))) { int olderrno = errno; rfbErr("%s: read; %m", __func__); errno = olderrno; *sockRet = -1; return WS_HYBI_STATE_ERR; } else if (n == 0) { *sockRet = 0; return WS_HYBI_STATE_ERR; } wsctx->nReadRaw += n; rfbLog("read %d bytes from socket; nRead=%d\n", n, wsctx->nReadRaw); } else { n = 0; } wsctx->writePos += n; if (wsctx->nReadRaw >= wsctx->nToRead) { if (wsctx->nReadRaw > wsctx->nToRead) { rfbErr("%s: internal error, read past websocket frame", __func__); errno=EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } } toDecode = wsctx->writePos - hybiPayloadStart(wsctx); rfbLog("toDecode=%d from n=%d carrylen=%d headerLen=%d\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen); if (toDecode < 0) { rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode); errno=EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } /* for a possible base64 decoding, we decode multiples of 4 bytes until * the whole frame is received and carry over any remaining bytes in the carry buf*/ data = (unsigned char *)hybiPayloadStart(wsctx); data32= (uint32_t *)data; for (i = 0; i < (toDecode >> 2); i++) { data32[i] ^= wsctx->header.mask.u; } rfbLog("mask decoding; i=%d toDecode=%d\n", i, toDecode); if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { /* process the remaining bytes (if any) */ for (i*=4; i < toDecode; i++) { data[i] ^= wsctx->header.mask.c[i % 4]; } /* all data is here, no carrying */ wsctx->carrylen = 0; } else { /* carry over remaining, non-multiple-of-four bytes */ wsctx->carrylen = toDecode - (i * 4); if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) { rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i); *sockRet = -1; errno = EIO; return WS_HYBI_STATE_ERR; } rfbLog("carrying over %d bytes from %p to %p\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf); memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen); } toReturn = toDecode - wsctx->carrylen; switch (wsctx->header.opcode) { case WS_OPCODE_CLOSE: /* this data is not returned as payload data */ if (hybiWsFrameComplete(wsctx)) { rfbLog("got closure, reason %d\n", WS_NTOH16(((uint16_t *)data)[0])); errno = ECONNRESET; *sockRet = -1; return WS_HYBI_STATE_FRAME_COMPLETE; } else { rfbErr("%s: close reason with long frame not supported", __func__); errno = EIO; *sockRet = -1; return WS_HYBI_STATE_ERR; } break; case WS_OPCODE_TEXT_FRAME: data[toReturn] = '\0'; rfbLog("Initiate Base64 decoding in %p with max size %d and '\\0' at %p\n", data, bufsize, data + toReturn); if (-1 == (wsctx->readlen = b64_pton((char *)data, data, bufsize))) { rfbErr("Base64 decode error in %s; data=%p bufsize=%d", __func__, data, bufsize); rfbErr("%s: Base64 decode error; %m\n", __func__); } wsctx->writePos = hybiPayloadStart(wsctx); break; case WS_OPCODE_BINARY_FRAME: wsctx->readlen = toReturn; wsctx->writePos = hybiPayloadStart(wsctx); break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1); } wsctx->readPos = data; return hybiReturnData(dst, len, wsctx, sockRet); } /** * Read function for websocket-socket emulation. * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-------+-+-------------+-------------------------------+ * |F|R|R|R| opcode|M| Payload len | Extended payload length | * |I|S|S|S| (4) |A| (7) | (16/64) | * |N|V|V|V| |S| | (if payload len==126/127) | * | |1|2|3| |K| | | * +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - + * | Extended payload length continued, if payload len == 127 | * + - - - - - - - - - - - - - - - +-------------------------------+ * | |Masking-key, if MASK set to 1 | * +-------------------------------+-------------------------------+ * | Masking-key (continued) | Payload Data | * +-------------------------------- - - - - - - - - - - - - - - - + * : Payload Data continued ... : * + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + * | Payload Data continued ... | * +---------------------------------------------------------------+ * * Using the decode buffer, this function: * - reads the complete header from the underlying socket * - reads any remaining data bytes * - unmasks the payload data using the provided mask * - decodes Base64 encoded text data * - copies len bytes of decoded payload data into dst * * Emulates a read call on a socket. */ static int webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len) { int result = -1; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* int fin; */ /* not used atm */ /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */ rfbLog("%s_enter: len=%d; " "CTX: readlen=%d readPos=%p " "writeTo=%p " "state=%d toRead=%d remaining=%d " " nReadRaw=%d carrylen=%d carryBuf=%p\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx), wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf); switch (wsctx->hybiDecodeState){ case WS_HYBI_STATE_HEADER_PENDING: wsctx->hybiDecodeState = hybiReadHeader(cl, &result); if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { goto spor; } if (wsctx->hybiDecodeState != WS_HYBI_STATE_HEADER_PENDING) { /* when header is complete, try to read some more data */ wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); } break; case WS_HYBI_STATE_DATA_AVAILABLE: wsctx->hybiDecodeState = hybiReturnData(dst, len, wsctx, &result); break; case WS_HYBI_STATE_DATA_NEEDED: wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); break; case WS_HYBI_STATE_CLOSE_REASON_PENDING: wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); break; default: /* invalid state */ rfbErr("%s: called with invalid state %d\n", wsctx->hybiDecodeState); result = -1; errno = EIO; wsctx->hybiDecodeState = WS_HYBI_STATE_ERR; } /* single point of return, if someone has questions :-) */ spor: /* rfbLog("%s: ret: %d/%d\n", __func__, result, len); */ if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { rfbLog("frame received successfully, cleaning up: read=%d hlen=%d plen=%d\n", wsctx->header.nRead, wsctx->header.headerLen, wsctx->header.payloadLen); /* frame finished, cleanup state */ hybiDecodeCleanup(wsctx); } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { hybiDecodeCleanup(wsctx); } rfbLog("%s_exit: len=%d; " "CTX: readlen=%d readPos=%p " "writePos=%p " "state=%d toRead=%d remaining=%d " "nRead=%d carrylen=%d carryBuf=%p " "result=%d\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx), wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf, result); return result; } static int webSocketsEncodeHybi(rfbClientPtr cl, const char *src, int len, char **dst) { int blen, ret = -1, sz = 0; unsigned char opcode = '\0'; /* TODO: option! */ ws_header_t *header; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* Optional opcode: * 0x0 - continuation * 0x1 - text frame (base64 encode buf) * 0x2 - binary frame (use raw buf) * 0x8 - connection close * 0x9 - ping * 0xA - pong **/ if (!len) { /* nothing to encode */ return 0; } header = (ws_header_t *)wsctx->codeBufEncode; if (wsctx->base64) { opcode = WS_OPCODE_TEXT_FRAME; /* calculate the resulting size */ blen = B64LEN(len); } else { opcode = WS_OPCODE_BINARY_FRAME; blen = len; } header->b0 = 0x80 | (opcode & 0x0f); if (blen <= 125) { header->b1 = (uint8_t)blen; sz = 2; } else if (blen <= 65536) { header->b1 = 0x7e; header->u.s16.l16 = WS_HTON16((uint16_t)blen); sz = 4; } else { header->b1 = 0x7f; header->u.s64.l64 = WS_HTON64(blen); sz = 10; } if (wsctx->base64) { if (-1 == (ret = b64_ntop((unsigned char *)src, len, wsctx->codeBufEncode + sz, sizeof(wsctx->codeBufEncode) - sz))) { rfbErr("%s: Base 64 encode failed\n", __func__); } else { if (ret != blen) rfbErr("%s: Base 64 encode; something weird happened\n", __func__); ret += sz; } } else { memcpy(wsctx->codeBufEncode + sz, src, len); ret = sz + len; } *dst = wsctx->codeBufEncode; return ret; } int webSocketsEncode(rfbClientPtr cl, const char *src, int len, char **dst) { return ((ws_ctx_t *)cl->wsctx)->encode(cl, src, len, dst); } int webSocketsDecode(rfbClientPtr cl, char *dst, int len) { return ((ws_ctx_t *)cl->wsctx)->decode(cl, dst, len); } /* returns TRUE if client sent a close frame or a single 'end of frame' * marker was received, FALSE otherwise * * Note: This is a Hixie-only hack! **/ rfbBool webSocketCheckDisconnect(rfbClientPtr cl) { ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* With Base64 encoding we need at least 4 bytes */ char peekbuf[4]; int n; if (wsctx->version == WEBSOCKETS_VERSION_HYBI) return FALSE; if (cl->sslctx) n = rfbssl_peek(cl, peekbuf, 4); else n = recv(cl->sock, peekbuf, 4, MSG_PEEK); if (n <= 0) { if (n != 0) rfbErr("%s: peek; %m", __func__); rfbCloseClient(cl); return TRUE; } if (peekbuf[0] == '\xff') { int doclose = 0; /* Make sure we don't miss a client disconnect on an end frame * marker. Because we use a peek buffer in some cases it is not * applicable to wait for more data per select(). */ switch (n) { case 3: if (peekbuf[1] == '\xff' && peekbuf[2] == '\x00') doclose = 1; break; case 2: if (peekbuf[1] == '\x00') doclose = 1; break; default: return FALSE; } if (cl->sslctx) n = rfbssl_read(cl, peekbuf, n); else n = read(cl->sock, peekbuf, n); if (doclose) { rfbErr("%s: websocket close frame received\n", __func__); rfbCloseClient(cl); } return TRUE; } return FALSE; } /* returns TRUE if there is data waiting to be read in our internal buffer * or if is there any pending data in the buffer of the SSL implementation */ rfbBool webSocketsHasDataInBuffer(rfbClientPtr cl) { ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; if (wsctx && wsctx->readlen) return TRUE; return (cl->sslctx && rfbssl_pending(cl) > 0); }
webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len) { char *buf, *payload; uint32_t *payload32; int ret = -1, result = -1; int total = 0; ws_mask_t mask; ws_header_t *header; int i; unsigned char opcode; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; int flength, fhlen; /* int fin; */ /* not used atm */ /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */ if (wsctx->readbuflen) { /* simply return what we have */ if (wsctx->readbuflen > len) { memcpy(dst, wsctx->readbuf + wsctx->readbufstart, len); result = len; wsctx->readbuflen -= len; wsctx->readbufstart += len; } else { memcpy(dst, wsctx->readbuf + wsctx->readbufstart, wsctx->readbuflen); result = wsctx->readbuflen; wsctx->readbuflen = 0; wsctx->readbufstart = 0; } goto spor; } buf = wsctx->codeBufDecode; header = (ws_header_t *)wsctx->codeBufDecode; ret = ws_peek(cl, buf, B64LEN(len) + WSHLENMAX); if (ret < 2) { /* save errno because rfbErr() will tamper it */ if (-1 == ret) { int olderrno = errno; rfbErr("%s: peek; %m\n", __func__); errno = olderrno; } else if (0 == ret) { result = 0; } else { errno = EAGAIN; } goto spor; } opcode = header->b0 & 0x0f; /* fin = (header->b0 & 0x80) >> 7; */ /* not used atm */ flength = header->b1 & 0x7f; /* * 4.3. Client-to-Server Masking * * The client MUST mask all frames sent to the server. A server MUST * close the connection upon receiving a frame with the MASK bit set to 0. **/ if (!(header->b1 & 0x80)) { rfbErr("%s: got frame without mask\n", __func__, ret); errno = EIO; goto spor; } if (flength < 126) { fhlen = 2; mask = header->u.m; } else if (flength == 126 && 4 <= ret) { flength = WS_NTOH16(header->u.s16.l16); fhlen = 4; mask = header->u.s16.m16; } else if (flength == 127 && 10 <= ret) { flength = WS_NTOH64(header->u.s64.l64); fhlen = 10; mask = header->u.s64.m64; } else { /* Incomplete frame header */ rfbErr("%s: incomplete frame header\n", __func__, ret); errno = EIO; goto spor; } /* absolute length of frame */ total = fhlen + flength + 4; payload = buf + fhlen + 4; /* header length + mask */ if (-1 == (ret = ws_read(cl, buf, total))) { int olderrno = errno; rfbErr("%s: read; %m", __func__); errno = olderrno; return ret; } else if (ret < total) { /* GT TODO: hmm? */ rfbLog("%s: read; got partial data\n", __func__); } else { buf[ret] = '\0'; } /* process 1 frame (32 bit op) */ payload32 = (uint32_t *)payload; for (i = 0; i < flength / 4; i++) { payload32[i] ^= mask.u; } /* process the remaining bytes (if any) */ for (i*=4; i < flength; i++) { payload[i] ^= mask.c[i % 4]; } switch (opcode) { case WS_OPCODE_CLOSE: rfbLog("got closure, reason %d\n", WS_NTOH16(((uint16_t *)payload)[0])); errno = ECONNRESET; break; case WS_OPCODE_TEXT_FRAME: if (-1 == (flength = b64_pton(payload, (unsigned char *)wsctx->codeBufDecode, sizeof(wsctx->codeBufDecode)))) { rfbErr("%s: Base64 decode error; %m\n", __func__); break; } payload = wsctx->codeBufDecode; /* fall through */ case WS_OPCODE_BINARY_FRAME: if (flength > len) { memcpy(wsctx->readbuf, payload + len, flength - len); wsctx->readbufstart = 0; wsctx->readbuflen = flength - len; flength = len; } memcpy(dst, payload, flength); result = flength; break; default: rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\n", __func__, (int)opcode, header->b0, header->b1); } /* single point of return, if someone has questions :-) */ spor: /* rfbLog("%s: ret: %d/%d\n", __func__, result, len); */ return result; }
webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len) { int result = -1; ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx; /* int fin; */ /* not used atm */ /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */ rfbLog("%s_enter: len=%d; " "CTX: readlen=%d readPos=%p " "writeTo=%p " "state=%d toRead=%d remaining=%d " " nReadRaw=%d carrylen=%d carryBuf=%p\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx), wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf); switch (wsctx->hybiDecodeState){ case WS_HYBI_STATE_HEADER_PENDING: wsctx->hybiDecodeState = hybiReadHeader(cl, &result); if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { goto spor; } if (wsctx->hybiDecodeState != WS_HYBI_STATE_HEADER_PENDING) { /* when header is complete, try to read some more data */ wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); } break; case WS_HYBI_STATE_DATA_AVAILABLE: wsctx->hybiDecodeState = hybiReturnData(dst, len, wsctx, &result); break; case WS_HYBI_STATE_DATA_NEEDED: wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); break; case WS_HYBI_STATE_CLOSE_REASON_PENDING: wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result); break; default: /* invalid state */ rfbErr("%s: called with invalid state %d\n", wsctx->hybiDecodeState); result = -1; errno = EIO; wsctx->hybiDecodeState = WS_HYBI_STATE_ERR; } /* single point of return, if someone has questions :-) */ spor: /* rfbLog("%s: ret: %d/%d\n", __func__, result, len); */ if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) { rfbLog("frame received successfully, cleaning up: read=%d hlen=%d plen=%d\n", wsctx->header.nRead, wsctx->header.headerLen, wsctx->header.payloadLen); /* frame finished, cleanup state */ hybiDecodeCleanup(wsctx); } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) { hybiDecodeCleanup(wsctx); } rfbLog("%s_exit: len=%d; " "CTX: readlen=%d readPos=%p " "writePos=%p " "state=%d toRead=%d remaining=%d " "nRead=%d carrylen=%d carryBuf=%p " "result=%d\n", __func__, len, wsctx->readlen, wsctx->readPos, wsctx->writePos, wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx), wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf, result); return result; }
{'added': [(80, '#define WS_HYBI_MASK_LEN 4'), (81, ''), (82, '#define ARRAYSIZE(a) ((sizeof(a) / sizeof((a[0]))) / (size_t)(!(sizeof(a) % sizeof((a[0])))))'), (99, ''), (100, 'enum {'), (101, ' /* header not yet received completely */'), (102, ' WS_HYBI_STATE_HEADER_PENDING,'), (103, ' /* data available */'), (104, ' WS_HYBI_STATE_DATA_AVAILABLE,'), (105, ' WS_HYBI_STATE_DATA_NEEDED,'), (106, ' /* received a complete frame */'), (107, ' WS_HYBI_STATE_FRAME_COMPLETE,'), (108, " /* received part of a 'close' frame */"), (109, ' WS_HYBI_STATE_CLOSE_REASON_PENDING,'), (110, ' /* */'), (111, ' WS_HYBI_STATE_ERR'), (112, '};'), (152, 'typedef struct ws_header_data_s {'), (153, ' ws_header_t *data;'), (154, ' /** bytes read */'), (155, ' int nRead;'), (156, ' /** mask value */'), (157, ' ws_mask_t mask;'), (158, ' /** length of frame header including payload len, but without mask */'), (159, ' int headerLen;'), (160, ' /** length of the payload data */'), (161, ' int payloadLen;'), (162, ' /** opcode */'), (163, ' unsigned char opcode;'), (164, '} ws_header_data_t;'), (165, ''), (166, 'typedef struct ws_ctx_s {'), (167, ' char codeBufDecode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */'), (168, ' char codeBufEncode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */'), (169, ' char *writePos;'), (170, ' unsigned char *readPos;'), (171, ' int readlen;'), (172, ' int hybiDecodeState;'), (173, ' char carryBuf[3]; /* For base64 carry-over */'), (174, ' int carrylen;'), (175, ' int version;'), (176, ' int base64;'), (177, ' ws_header_data_t header;'), (178, ' int nReadRaw;'), (179, ' int nToRead;'), (180, ' wsEncodeFunc encode;'), (181, ' wsDecodeFunc decode;'), (182, '} ws_ctx_t;'), (183, ''), (244, 'static void hybiDecodeCleanup(ws_ctx_t *wsctx);'), (245, ''), (507, ' hybiDecodeCleanup(wsctx);'), (511, ''), (703, 'hybiRemaining(ws_ctx_t *wsctx)'), (705, ' return wsctx->nToRead - wsctx->nReadRaw;'), (706, '}'), (708, 'static void'), (709, 'hybiDecodeCleanup(ws_ctx_t *wsctx)'), (710, '{'), (711, ' wsctx->header.payloadLen = 0;'), (712, ' wsctx->header.mask.u = 0;'), (713, ' wsctx->nReadRaw = 0;'), (714, ' wsctx->nToRead= 0;'), (715, ' wsctx->carrylen = 0;'), (716, ' wsctx->readPos = (unsigned char *)wsctx->codeBufDecode;'), (717, ' wsctx->readlen = 0;'), (718, ' wsctx->hybiDecodeState = WS_HYBI_STATE_HEADER_PENDING;'), (719, ' wsctx->writePos = NULL;'), (720, ' rfbLog("cleaned up wsctx\\n");'), (721, '}'), (723, '/**'), (724, ' * Return payload data that has been decoded/unmasked from'), (725, ' * a websocket frame.'), (726, ' *'), (727, ' * @param[out] dst destination buffer'), (728, ' * @param[in] len bytes to copy to destination buffer'), (729, ' * @param[in,out] wsctx internal state of decoding procedure'), (730, ' * @param[out] number of bytes actually written to dst buffer'), (731, ' * @return next hybi decoding state'), (732, ' */'), (733, 'static int'), (734, 'hybiReturnData(char *dst, int len, ws_ctx_t *wsctx, int *nWritten)'), (735, '{'), (736, ' int nextState = WS_HYBI_STATE_ERR;'), (737, ''), (738, ' /* if we have something already decoded copy and return */'), (739, ' if (wsctx->readlen > 0) {'), (740, ' /* simply return what we have */'), (741, ' if (wsctx->readlen > len) {'), (742, ' rfbLog("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\\n", len, wsctx->readPos, wsctx->readlen);'), (743, ' memcpy(dst, wsctx->readPos, len);'), (744, ' *nWritten = len;'), (745, ' wsctx->readlen -= len;'), (746, ' wsctx->readPos += len;'), (747, ' nextState = WS_HYBI_STATE_DATA_AVAILABLE;'), (748, ' } else {'), (749, ' rfbLog("copy to %d bytes to dst buffer; readPos=%p, readLen=%d\\n", wsctx->readlen, wsctx->readPos, wsctx->readlen);'), (750, ' memcpy(dst, wsctx->readPos, wsctx->readlen);'), (751, ' *nWritten = wsctx->readlen;'), (752, ' wsctx->readlen = 0;'), (753, ' wsctx->readPos = NULL;'), (754, ' if (hybiRemaining(wsctx) == 0) {'), (755, ' nextState = WS_HYBI_STATE_FRAME_COMPLETE;'), (757, ' nextState = WS_HYBI_STATE_DATA_NEEDED;'), (760, ' rfbLog("after copy: readPos=%p, readLen=%d\\n", wsctx->readPos, wsctx->readlen);'), (761, ' } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_CLOSE_REASON_PENDING) {'), (762, ' nextState = WS_HYBI_STATE_CLOSE_REASON_PENDING;'), (763, ' }'), (764, ' return nextState;'), (765, '}'), (767, '/**'), (768, ' * Read an RFC 6455 websocket frame (IETF hybi working group).'), (769, ' *'), (770, ' * Internal state is updated according to bytes received and the'), (771, ' * decoding of header information.'), (772, ' *'), (773, ' * @param[in] cl client ptr with ptr to raw socket and ws_ctx_t ptr'), (774, ' * @param[out] sockRet emulated recv return value'), (775, ' * @return next hybi decoding state; WS_HYBI_STATE_HEADER_PENDING indicates'), (776, ' * that the header was not received completely.'), (777, ' */'), (778, 'static int'), (779, 'hybiReadHeader(rfbClientPtr cl, int *sockRet)'), (780, '{'), (781, ' int ret;'), (782, ' ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx;'), (783, ' char *headerDst = wsctx->codeBufDecode + wsctx->nReadRaw;'), (784, ' int n = WSHLENMAX - wsctx->nReadRaw;'), (785, ''), (786, ' rfbLog("header_read to %p with len=%d\\n", headerDst, n);'), (787, ' ret = ws_read(cl, headerDst, n);'), (788, ' rfbLog("read %d bytes from socket\\n", ret);'), (789, ' if (ret <= 0) {'), (790, ' if (-1 == ret) {'), (791, ' /* save errno because rfbErr() will tamper it */'), (792, ' int olderrno = errno;'), (793, ' rfbErr("%s: peek; %m\\n", __func__);'), (794, ' errno = olderrno;'), (795, ' *sockRet = -1;'), (796, ' } else {'), (797, ' *sockRet = 0;'), (799, ' return WS_HYBI_STATE_ERR;'), (800, ' }'), (801, ''), (802, ' wsctx->nReadRaw += ret;'), (803, ' if (wsctx->nReadRaw < 2) {'), (804, ' /* cannot decode header with less than two bytes */'), (805, ' errno = EAGAIN;'), (806, ' *sockRet = -1;'), (807, ' return WS_HYBI_STATE_HEADER_PENDING;'), (808, ' }'), (809, ''), (810, ' /* first two header bytes received; interpret header data and get rest */'), (811, ' wsctx->header.data = (ws_header_t *)wsctx->codeBufDecode;'), (812, ''), (813, ' wsctx->header.opcode = wsctx->header.data->b0 & 0x0f;'), (814, ''), (815, ' /* fin = (header->b0 & 0x80) >> 7; */ /* not used atm */'), (816, ' wsctx->header.payloadLen = wsctx->header.data->b1 & 0x7f;'), (817, ' rfbLog("first header bytes received; opcode=%d lenbyte=%d\\n", wsctx->header.opcode, wsctx->header.payloadLen);'), (818, ''), (819, ' /*'), (820, ' * 4.3. Client-to-Server Masking'), (821, ' *'), (822, ' * The client MUST mask all frames sent to the server. A server MUST'), (823, ' * close the connection upon receiving a frame with the MASK bit set to 0.'), (824, ' **/'), (825, ' if (!(wsctx->header.data->b1 & 0x80)) {'), (826, ' rfbErr("%s: got frame without mask ret=%d\\n", __func__, ret);'), (827, ' errno = EIO;'), (828, ' *sockRet = -1;'), (829, ' return WS_HYBI_STATE_ERR;'), (830, ' }'), (831, ''), (832, ' if (wsctx->header.payloadLen < 126 && wsctx->nReadRaw >= 6) {'), (833, ' wsctx->header.headerLen = 2 + WS_HYBI_MASK_LEN;'), (834, ' wsctx->header.mask = wsctx->header.data->u.m;'), (835, ' } else if (wsctx->header.payloadLen == 126 && 8 <= wsctx->nReadRaw) {'), (836, ' wsctx->header.headerLen = 4 + WS_HYBI_MASK_LEN;'), (837, ' wsctx->header.payloadLen = WS_NTOH16(wsctx->header.data->u.s16.l16);'), (838, ' wsctx->header.mask = wsctx->header.data->u.s16.m16;'), (839, ' } else if (wsctx->header.payloadLen == 127 && 14 <= wsctx->nReadRaw) {'), (840, ' wsctx->header.headerLen = 10 + WS_HYBI_MASK_LEN;'), (841, ' wsctx->header.payloadLen = WS_NTOH64(wsctx->header.data->u.s64.l64);'), (842, ' wsctx->header.mask = wsctx->header.data->u.s64.m64;'), (843, ' } else {'), (844, ' /* Incomplete frame header, try again */'), (845, ' rfbErr("%s: incomplete frame header; ret=%d\\n", __func__, ret);'), (846, ' errno = EAGAIN;'), (847, ' *sockRet = -1;'), (848, ' return WS_HYBI_STATE_HEADER_PENDING;'), (849, ' }'), (850, ''), (851, ' /* absolute length of frame */'), (852, ' wsctx->nToRead = wsctx->header.headerLen + wsctx->header.payloadLen;'), (853, ''), (854, ' /* set payload pointer just after header */'), (855, ' wsctx->writePos = wsctx->codeBufDecode + wsctx->nReadRaw;'), (856, ''), (857, ' wsctx->readPos = (unsigned char *)(wsctx->codeBufDecode + wsctx->header.headerLen);'), (858, ''), (859, ' rfbLog("header complete: state=%d flen=%d writeTo=%p\\n", wsctx->hybiDecodeState, wsctx->nToRead, wsctx->writePos);'), (860, ''), (861, ' return WS_HYBI_STATE_DATA_NEEDED;'), (862, '}'), (864, 'static int'), (865, 'hybiWsFrameComplete(ws_ctx_t *wsctx)'), (866, '{'), (867, ' return wsctx != NULL && hybiRemaining(wsctx) == 0;'), (868, '}'), (870, 'static char *'), (871, 'hybiPayloadStart(ws_ctx_t *wsctx)'), (872, '{'), (873, ' return wsctx->codeBufDecode + wsctx->header.headerLen;'), (874, '}'), (877, '/**'), (878, ' * Read the remaining payload bytes from associated raw socket.'), (879, ' *'), (880, ' * - try to read remaining bytes from socket'), (881, ' * - unmask all multiples of 4'), (882, ' * - if frame incomplete but some bytes are left, these are copied to'), (883, ' * the carry buffer'), (884, ' * - if opcode is TEXT: Base64-decode all unmasked received bytes'), (885, ' * - set state for reading decoded data'), (886, ' * - reset write position to begin of buffer (+ header)'), (887, ' * --> before we retrieve more data we let the caller clear all bytes'), (888, ' * from the reception buffer'), (889, ' * - execute return data routine'), (890, ' *'), (891, ' * Sets errno corresponding to what it gets from the underlying'), (892, ' * socket or EIO if some internal sanity check fails.'), (893, ' *'), (894, ' * @param[in] cl client ptr with raw socket reference'), (895, ' * @param[out] dst destination buffer'), (896, ' * @param[in] len size of destination buffer'), (897, ' * @param[out] sockRet emulated recv return value'), (898, ' * @return next hybi decode state'), (899, ' */'), (900, 'static int'), (901, 'hybiReadAndDecode(rfbClientPtr cl, char *dst, int len, int *sockRet)'), (902, '{'), (903, ' int n;'), (904, ' int i;'), (905, ' int toReturn;'), (906, ' int toDecode;'), (907, ' int bufsize;'), (908, ' int nextRead;'), (909, ' unsigned char *data;'), (910, ' uint32_t *data32;'), (911, ' ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx;'), (912, ''), (913, ' /* if data was carried over, copy to start of buffer */'), (914, ' memcpy(wsctx->writePos, wsctx->carryBuf, wsctx->carrylen);'), (915, ' wsctx->writePos += wsctx->carrylen;'), (916, ''), (917, " /* -1 accounts for potential '\\0' terminator for base64 decoding */"), (918, ' bufsize = wsctx->codeBufDecode + ARRAYSIZE(wsctx->codeBufDecode) - wsctx->writePos - 1;'), (919, ' if (hybiRemaining(wsctx) > bufsize) {'), (920, ' nextRead = bufsize;'), (921, ' } else {'), (922, ' nextRead = hybiRemaining(wsctx);'), (923, ' }'), (924, ''), (925, ' rfbLog("calling read with buf=%p and len=%d (decodebuf=%p headerLen=%d\\n)", wsctx->writePos, nextRead, wsctx->codeBufDecode, wsctx->header.headerLen);'), (926, ''), (927, ' if (wsctx->nReadRaw < wsctx->nToRead) {'), (928, ' /* decode more data */'), (929, ' if (-1 == (n = ws_read(cl, wsctx->writePos, nextRead))) {'), (933, ' *sockRet = -1;'), (934, ' return WS_HYBI_STATE_ERR;'), (935, ' } else if (n == 0) {'), (936, ' *sockRet = 0;'), (937, ' return WS_HYBI_STATE_ERR;'), (939, ' wsctx->nReadRaw += n;'), (940, ' rfbLog("read %d bytes from socket; nRead=%d\\n", n, wsctx->nReadRaw);'), (941, ' } else {'), (942, ' n = 0;'), (943, ' }'), (944, ''), (945, ' wsctx->writePos += n;'), (946, ''), (947, ' if (wsctx->nReadRaw >= wsctx->nToRead) {'), (948, ' if (wsctx->nReadRaw > wsctx->nToRead) {'), (949, ' rfbErr("%s: internal error, read past websocket frame", __func__);'), (950, ' errno=EIO;'), (951, ' *sockRet = -1;'), (952, ' return WS_HYBI_STATE_ERR;'), (954, ' }'), (955, ''), (956, ' toDecode = wsctx->writePos - hybiPayloadStart(wsctx);'), (957, ' rfbLog("toDecode=%d from n=%d carrylen=%d headerLen=%d\\n", toDecode, n, wsctx->carrylen, wsctx->header.headerLen);'), (958, ' if (toDecode < 0) {'), (959, ' rfbErr("%s: internal error; negative number of bytes to decode: %d", __func__, toDecode);'), (960, ' errno=EIO;'), (961, ' *sockRet = -1;'), (962, ' return WS_HYBI_STATE_ERR;'), (963, ' }'), (964, ''), (965, ' /* for a possible base64 decoding, we decode multiples of 4 bytes until'), (966, ' * the whole frame is received and carry over any remaining bytes in the carry buf*/'), (967, ' data = (unsigned char *)hybiPayloadStart(wsctx);'), (968, ' data32= (uint32_t *)data;'), (969, ''), (970, ' for (i = 0; i < (toDecode >> 2); i++) {'), (971, ' data32[i] ^= wsctx->header.mask.u;'), (972, ' }'), (973, ' rfbLog("mask decoding; i=%d toDecode=%d\\n", i, toDecode);'), (974, ''), (975, ' if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) {'), (977, ' for (i*=4; i < toDecode; i++) {'), (978, ' data[i] ^= wsctx->header.mask.c[i % 4];'), (979, ' }'), (980, ''), (981, ' /* all data is here, no carrying */'), (982, ' wsctx->carrylen = 0;'), (983, ' } else {'), (984, ' /* carry over remaining, non-multiple-of-four bytes */'), (985, ' wsctx->carrylen = toDecode - (i * 4);'), (986, ' if (wsctx->carrylen < 0 || wsctx->carrylen > ARRAYSIZE(wsctx->carryBuf)) {'), (987, ' rfbErr("%s: internal error, invalid carry over size: carrylen=%d, toDecode=%d, i=%d", __func__, wsctx->carrylen, toDecode, i);'), (988, ' *sockRet = -1;'), (989, ' errno = EIO;'), (990, ' return WS_HYBI_STATE_ERR;'), (991, ' }'), (992, ' rfbLog("carrying over %d bytes from %p to %p\\n", wsctx->carrylen, wsctx->writePos + (i * 4), wsctx->carryBuf);'), (993, ' memcpy(wsctx->carryBuf, data + (i * 4), wsctx->carrylen);'), (994, ' }'), (995, ''), (996, ' toReturn = toDecode - wsctx->carrylen;'), (997, ''), (998, ' switch (wsctx->header.opcode) {'), (999, ' case WS_OPCODE_CLOSE:'), (1000, ''), (1001, ' /* this data is not returned as payload data */'), (1002, ' if (hybiWsFrameComplete(wsctx)) {'), (1003, ' rfbLog("got closure, reason %d\\n", WS_NTOH16(((uint16_t *)data)[0]));'), (1004, ' errno = ECONNRESET;'), (1005, ' *sockRet = -1;'), (1006, ' return WS_HYBI_STATE_FRAME_COMPLETE;'), (1007, ' } else {'), (1008, ' rfbErr("%s: close reason with long frame not supported", __func__);'), (1009, ' errno = EIO;'), (1010, ' *sockRet = -1;'), (1011, ' return WS_HYBI_STATE_ERR;'), (1012, ' }'), (1013, ' break;'), (1014, ' case WS_OPCODE_TEXT_FRAME:'), (1015, " data[toReturn] = '\\0';"), (1016, ' rfbLog("Initiate Base64 decoding in %p with max size %d and \'\\\\0\' at %p\\n", data, bufsize, data + toReturn);'), (1017, ' if (-1 == (wsctx->readlen = b64_pton((char *)data, data, bufsize))) {'), (1018, ' rfbErr("Base64 decode error in %s; data=%p bufsize=%d", __func__, data, bufsize);'), (1019, ' rfbErr("%s: Base64 decode error; %m\\n", __func__);'), (1020, ' }'), (1021, ' wsctx->writePos = hybiPayloadStart(wsctx);'), (1022, ' break;'), (1023, ' case WS_OPCODE_BINARY_FRAME:'), (1024, ' wsctx->readlen = toReturn;'), (1025, ' wsctx->writePos = hybiPayloadStart(wsctx);'), (1026, ' break;'), (1027, ' default:'), (1028, ' rfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\\n", __func__, (int)wsctx->header.opcode, wsctx->header.data->b0, wsctx->header.data->b1);'), (1029, ' }'), (1030, ' wsctx->readPos = data;'), (1031, ''), (1032, ' return hybiReturnData(dst, len, wsctx, sockRet);'), (1033, '}'), (1034, ''), (1035, '/**'), (1036, ' * Read function for websocket-socket emulation.'), (1037, ' *'), (1038, ' * 0 1 2 3'), (1039, ' * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1'), (1040, ' * +-+-+-+-+-------+-+-------------+-------------------------------+'), (1041, ' * |F|R|R|R| opcode|M| Payload len | Extended payload length |'), (1042, ' * |I|S|S|S| (4) |A| (7) | (16/64) |'), (1043, ' * |N|V|V|V| |S| | (if payload len==126/127) |'), (1044, ' * | |1|2|3| |K| | |'), (1045, ' * +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +'), (1046, ' * | Extended payload length continued, if payload len == 127 |'), (1047, ' * + - - - - - - - - - - - - - - - +-------------------------------+'), (1048, ' * | |Masking-key, if MASK set to 1 |'), (1049, ' * +-------------------------------+-------------------------------+'), (1050, ' * | Masking-key (continued) | Payload Data |'), (1051, ' * +-------------------------------- - - - - - - - - - - - - - - - +'), (1052, ' * : Payload Data continued ... :'), (1053, ' * + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +'), (1054, ' * | Payload Data continued ... |'), (1055, ' * +---------------------------------------------------------------+'), (1056, ' *'), (1057, ' * Using the decode buffer, this function:'), (1058, ' * - reads the complete header from the underlying socket'), (1059, ' * - reads any remaining data bytes'), (1060, ' * - unmasks the payload data using the provided mask'), (1061, ' * - decodes Base64 encoded text data'), (1062, ' * - copies len bytes of decoded payload data into dst'), (1063, ' *'), (1064, ' * Emulates a read call on a socket.'), (1065, ' */'), (1066, 'static int'), (1067, 'webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len)'), (1068, '{'), (1069, ' int result = -1;'), (1070, ' ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx;'), (1071, ' /* int fin; */ /* not used atm */'), (1072, ''), (1073, ' /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */'), (1074, ' rfbLog("%s_enter: len=%d; "'), (1075, ' "CTX: readlen=%d readPos=%p "'), (1076, ' "writeTo=%p "'), (1077, ' "state=%d toRead=%d remaining=%d "'), (1078, ' " nReadRaw=%d carrylen=%d carryBuf=%p\\n",'), (1079, ' __func__, len,'), (1080, ' wsctx->readlen, wsctx->readPos,'), (1081, ' wsctx->writePos,'), (1082, ' wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx),'), (1083, ' wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf);'), (1084, ''), (1085, ' switch (wsctx->hybiDecodeState){'), (1086, ' case WS_HYBI_STATE_HEADER_PENDING:'), (1087, ' wsctx->hybiDecodeState = hybiReadHeader(cl, &result);'), (1088, ' if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) {'), (1089, ' goto spor;'), (1090, ' }'), (1091, ' if (wsctx->hybiDecodeState != WS_HYBI_STATE_HEADER_PENDING) {'), (1092, ''), (1093, ' /* when header is complete, try to read some more data */'), (1094, ' wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result);'), (1095, ' }'), (1096, ' break;'), (1097, ' case WS_HYBI_STATE_DATA_AVAILABLE:'), (1098, ' wsctx->hybiDecodeState = hybiReturnData(dst, len, wsctx, &result);'), (1099, ' break;'), (1100, ' case WS_HYBI_STATE_DATA_NEEDED:'), (1101, ' wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result);'), (1102, ' break;'), (1103, ' case WS_HYBI_STATE_CLOSE_REASON_PENDING:'), (1104, ' wsctx->hybiDecodeState = hybiReadAndDecode(cl, dst, len, &result);'), (1105, ' break;'), (1107, ' /* invalid state */'), (1108, ' rfbErr("%s: called with invalid state %d\\n", wsctx->hybiDecodeState);'), (1109, ' result = -1;'), (1110, ' errno = EIO;'), (1111, ' wsctx->hybiDecodeState = WS_HYBI_STATE_ERR;'), (1117, ' if (wsctx->hybiDecodeState == WS_HYBI_STATE_FRAME_COMPLETE) {'), (1118, ' rfbLog("frame received successfully, cleaning up: read=%d hlen=%d plen=%d\\n", wsctx->header.nRead, wsctx->header.headerLen, wsctx->header.payloadLen);'), (1119, ' /* frame finished, cleanup state */'), (1120, ' hybiDecodeCleanup(wsctx);'), (1121, ' } else if (wsctx->hybiDecodeState == WS_HYBI_STATE_ERR) {'), (1122, ' hybiDecodeCleanup(wsctx);'), (1123, ' }'), (1124, ' rfbLog("%s_exit: len=%d; "'), (1125, ' "CTX: readlen=%d readPos=%p "'), (1126, ' "writePos=%p "'), (1127, ' "state=%d toRead=%d remaining=%d "'), (1128, ' "nRead=%d carrylen=%d carryBuf=%p "'), (1129, ' "result=%d\\n",'), (1130, ' __func__, len,'), (1131, ' wsctx->readlen, wsctx->readPos,'), (1132, ' wsctx->writePos,'), (1133, ' wsctx->hybiDecodeState, wsctx->nToRead, hybiRemaining(wsctx),'), (1134, ' wsctx->nReadRaw, wsctx->carrylen, wsctx->carryBuf,'), (1135, ' result);'), (1285, ' if (wsctx && wsctx->readlen)')], 'deleted': [(96, 'typedef struct ws_ctx_s {'), (97, ' char codeBufDecode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */'), (98, '\tchar codeBufEncode[B64LEN(UPDATE_BUF_SIZE) + WSHLENMAX]; /* base64 + maximum frame header length */'), (99, '\tchar readbuf[8192];'), (100, ' int readbufstart;'), (101, ' int readbuflen;'), (102, ' int dblen;'), (103, ' char carryBuf[3]; /* For base64 carry-over */'), (104, ' int carrylen;'), (105, ' int version;'), (106, ' int base64;'), (107, ' wsEncodeFunc encode;'), (108, ' wsDecodeFunc decode;'), (109, '} ws_ctx_t;'), (473, ''), (665, 'webSocketsDecodeHybi(rfbClientPtr cl, char *dst, int len)'), (667, ' char *buf, *payload;'), (668, ' uint32_t *payload32;'), (669, ' int ret = -1, result = -1;'), (670, ' int total = 0;'), (671, ' ws_mask_t mask;'), (672, ' ws_header_t *header;'), (673, ' int i;'), (674, ' unsigned char opcode;'), (675, ' ws_ctx_t *wsctx = (ws_ctx_t *)cl->wsctx;'), (676, ' int flength, fhlen;'), (677, ' /* int fin; */ /* not used atm */'), (679, ' /* rfbLog(" <== %s[%d]: %d cl: %p, wsctx: %p-%p (%d)\\n", __func__, gettid(), len, cl, wsctx, (char *)wsctx + sizeof(ws_ctx_t), sizeof(ws_ctx_t)); */'), (681, ' if (wsctx->readbuflen) {'), (682, ' /* simply return what we have */'), (683, ' if (wsctx->readbuflen > len) {'), (684, '\tmemcpy(dst, wsctx->readbuf + wsctx->readbufstart, len);'), (685, '\tresult = len;'), (686, '\twsctx->readbuflen -= len;'), (687, '\twsctx->readbufstart += len;'), (689, '\tmemcpy(dst, wsctx->readbuf + wsctx->readbufstart, wsctx->readbuflen);'), (690, '\tresult = wsctx->readbuflen;'), (691, '\twsctx->readbuflen = 0;'), (692, '\twsctx->readbufstart = 0;'), (694, ' goto spor;'), (697, ' buf = wsctx->codeBufDecode;'), (698, ' header = (ws_header_t *)wsctx->codeBufDecode;'), (699, ''), (700, ' ret = ws_peek(cl, buf, B64LEN(len) + WSHLENMAX);'), (701, ''), (702, ' if (ret < 2) {'), (703, ' /* save errno because rfbErr() will tamper it */'), (704, ' if (-1 == ret) {'), (705, ' int olderrno = errno;'), (706, ' rfbErr("%s: peek; %m\\n", __func__);'), (707, ' errno = olderrno;'), (708, ' } else if (0 == ret) {'), (709, ' result = 0;'), (710, ' } else {'), (711, ' errno = EAGAIN;'), (712, ' }'), (713, ' goto spor;'), (716, ' opcode = header->b0 & 0x0f;'), (717, ' /* fin = (header->b0 & 0x80) >> 7; */ /* not used atm */'), (718, ' flength = header->b1 & 0x7f;'), (720, ' /*'), (721, ' * 4.3. Client-to-Server Masking'), (722, ' *'), (723, ' * The client MUST mask all frames sent to the server. A server MUST'), (724, ' * close the connection upon receiving a frame with the MASK bit set to 0.'), (725, ' **/'), (726, ' if (!(header->b1 & 0x80)) {'), (727, '\trfbErr("%s: got frame without mask\\n", __func__, ret);'), (728, '\terrno = EIO;'), (729, '\tgoto spor;'), (730, ' }'), (731, ''), (732, ' if (flength < 126) {'), (733, '\tfhlen = 2;'), (734, '\tmask = header->u.m;'), (735, ' } else if (flength == 126 && 4 <= ret) {'), (736, '\tflength = WS_NTOH16(header->u.s16.l16);'), (737, '\tfhlen = 4;'), (738, '\tmask = header->u.s16.m16;'), (739, ' } else if (flength == 127 && 10 <= ret) {'), (740, '\tflength = WS_NTOH64(header->u.s64.l64);'), (741, '\tfhlen = 10;'), (742, '\tmask = header->u.s64.m64;'), (743, ' } else {'), (744, ' /* Incomplete frame header */'), (745, ' rfbErr("%s: incomplete frame header\\n", __func__, ret);'), (746, ' errno = EIO;'), (747, ' goto spor;'), (748, ' }'), (750, ' /* absolute length of frame */'), (751, ' total = fhlen + flength + 4;'), (752, ' payload = buf + fhlen + 4; /* header length + mask */'), (754, ' if (-1 == (ret = ws_read(cl, buf, total))) {'), (758, ' return ret;'), (759, ' } else if (ret < total) {'), (760, ' /* GT TODO: hmm? */'), (761, ' rfbLog("%s: read; got partial data\\n", __func__);'), (762, ' } else {'), (763, " buf[ret] = '\\0';"), (765, ''), (766, ' /* process 1 frame (32 bit op) */'), (767, ' payload32 = (uint32_t *)payload;'), (768, ' for (i = 0; i < flength / 4; i++) {'), (769, '\tpayload32[i] ^= mask.u;'), (772, ' for (i*=4; i < flength; i++) {'), (773, '\tpayload[i] ^= mask.c[i % 4];'), (774, ' }'), (775, ''), (776, ' switch (opcode) {'), (777, ' case WS_OPCODE_CLOSE:'), (778, '\trfbLog("got closure, reason %d\\n", WS_NTOH16(((uint16_t *)payload)[0]));'), (779, '\terrno = ECONNRESET;'), (780, '\tbreak;'), (781, ' case WS_OPCODE_TEXT_FRAME:'), (782, '\tif (-1 == (flength = b64_pton(payload, (unsigned char *)wsctx->codeBufDecode, sizeof(wsctx->codeBufDecode)))) {'), (783, '\t rfbErr("%s: Base64 decode error; %m\\n", __func__);'), (784, '\t break;'), (785, '\t}'), (786, '\tpayload = wsctx->codeBufDecode;'), (787, '\t/* fall through */'), (788, ' case WS_OPCODE_BINARY_FRAME:'), (789, '\tif (flength > len) {'), (790, '\t memcpy(wsctx->readbuf, payload + len, flength - len);'), (791, '\t wsctx->readbufstart = 0;'), (792, '\t wsctx->readbuflen = flength - len;'), (793, '\t flength = len;'), (794, '\t}'), (795, '\tmemcpy(dst, payload, flength);'), (796, '\tresult = flength;'), (797, '\tbreak;'), (799, '\trfbErr("%s: unhandled opcode %d, b0: %02x, b1: %02x\\n", __func__, (int)opcode, header->b0, header->b1);'), (954, ' if (wsctx && wsctx->readbuflen)')]}
463
132
872
5,734
110
729
21
https://github.com/LibVNC/libvncserver
CVE-2017-18922
CWE-787
721
redis.c
C
genRedisInfoString
/* * Copyright (c) 2009-2010, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #ifdef HAVE_BACKTRACE #include <execinfo.h> #include <ucontext.h> #endif /* HAVE_BACKTRACE */ #include <time.h> #include <signal.h> #include <sys/wait.h> #include <errno.h> #include <assert.h> #include <ctype.h> #include <stdarg.h> #include <arpa/inet.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/uio.h> #include <limits.h> #include <float.h> #include <math.h> #include <pthread.h> #include <sys/resource.h> /* Our shared "common" objects */ struct sharedObjectsStruct shared; /* Global vars that are actally used as constants. The following double * values are used for double on-disk serialization, and are initialized * at runtime to avoid strange compiler optimizations. */ double R_Zero, R_PosInf, R_NegInf, R_Nan; /*================================= Globals ================================= */ /* Global vars */ struct redisServer server; /* server global state */ struct redisCommand *commandTable; struct redisCommand readonlyCommandTable[] = { {"get",getCommand,2,0,NULL,1,1,1}, {"set",setCommand,3,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"setnx",setnxCommand,3,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"setex",setexCommand,4,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"append",appendCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"strlen",strlenCommand,2,0,NULL,1,1,1}, {"del",delCommand,-2,0,NULL,0,0,0}, {"exists",existsCommand,2,0,NULL,1,1,1}, {"setbit",setbitCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getbit",getbitCommand,3,0,NULL,1,1,1}, {"setrange",setrangeCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getrange",getrangeCommand,4,0,NULL,1,1,1}, {"substr",getrangeCommand,4,0,NULL,1,1,1}, {"incr",incrCommand,2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"decr",decrCommand,2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"mget",mgetCommand,-2,0,NULL,1,-1,1}, {"rpush",rpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lpush",lpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"rpushx",rpushxCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lpushx",lpushxCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"linsert",linsertCommand,5,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"rpop",rpopCommand,2,0,NULL,1,1,1}, {"lpop",lpopCommand,2,0,NULL,1,1,1}, {"brpop",brpopCommand,-3,0,NULL,1,1,1}, {"brpoplpush",brpoplpushCommand,4,REDIS_CMD_DENYOOM,NULL,1,2,1}, {"blpop",blpopCommand,-3,0,NULL,1,1,1}, {"llen",llenCommand,2,0,NULL,1,1,1}, {"lindex",lindexCommand,3,0,NULL,1,1,1}, {"lset",lsetCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lrange",lrangeCommand,4,0,NULL,1,1,1}, {"ltrim",ltrimCommand,4,0,NULL,1,1,1}, {"lrem",lremCommand,4,0,NULL,1,1,1}, {"rpoplpush",rpoplpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,2,1}, {"sadd",saddCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"srem",sremCommand,3,0,NULL,1,1,1}, {"smove",smoveCommand,4,0,NULL,1,2,1}, {"sismember",sismemberCommand,3,0,NULL,1,1,1}, {"scard",scardCommand,2,0,NULL,1,1,1}, {"spop",spopCommand,2,0,NULL,1,1,1}, {"srandmember",srandmemberCommand,2,0,NULL,1,1,1}, {"sinter",sinterCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sinterstore",sinterstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"sunion",sunionCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sunionstore",sunionstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"sdiff",sdiffCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sdiffstore",sdiffstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"smembers",sinterCommand,2,0,NULL,1,1,1}, {"zadd",zaddCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"zincrby",zincrbyCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"zrem",zremCommand,3,0,NULL,1,1,1}, {"zremrangebyscore",zremrangebyscoreCommand,4,0,NULL,1,1,1}, {"zremrangebyrank",zremrangebyrankCommand,4,0,NULL,1,1,1}, {"zunionstore",zunionstoreCommand,-4,REDIS_CMD_DENYOOM,zunionInterBlockClientOnSwappedKeys,0,0,0}, {"zinterstore",zinterstoreCommand,-4,REDIS_CMD_DENYOOM,zunionInterBlockClientOnSwappedKeys,0,0,0}, {"zrange",zrangeCommand,-4,0,NULL,1,1,1}, {"zrangebyscore",zrangebyscoreCommand,-4,0,NULL,1,1,1}, {"zrevrangebyscore",zrevrangebyscoreCommand,-4,0,NULL,1,1,1}, {"zcount",zcountCommand,4,0,NULL,1,1,1}, {"zrevrange",zrevrangeCommand,-4,0,NULL,1,1,1}, {"zcard",zcardCommand,2,0,NULL,1,1,1}, {"zscore",zscoreCommand,3,0,NULL,1,1,1}, {"zrank",zrankCommand,3,0,NULL,1,1,1}, {"zrevrank",zrevrankCommand,3,0,NULL,1,1,1}, {"hset",hsetCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hsetnx",hsetnxCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hget",hgetCommand,3,0,NULL,1,1,1}, {"hmset",hmsetCommand,-4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hmget",hmgetCommand,-3,0,NULL,1,1,1}, {"hincrby",hincrbyCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hdel",hdelCommand,3,0,NULL,1,1,1}, {"hlen",hlenCommand,2,0,NULL,1,1,1}, {"hkeys",hkeysCommand,2,0,NULL,1,1,1}, {"hvals",hvalsCommand,2,0,NULL,1,1,1}, {"hgetall",hgetallCommand,2,0,NULL,1,1,1}, {"hexists",hexistsCommand,3,0,NULL,1,1,1}, {"incrby",incrbyCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"decrby",decrbyCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getset",getsetCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"mset",msetCommand,-3,REDIS_CMD_DENYOOM,NULL,1,-1,2}, {"msetnx",msetnxCommand,-3,REDIS_CMD_DENYOOM,NULL,1,-1,2}, {"randomkey",randomkeyCommand,1,0,NULL,0,0,0}, {"select",selectCommand,2,0,NULL,0,0,0}, {"move",moveCommand,3,0,NULL,1,1,1}, {"rename",renameCommand,3,0,NULL,1,1,1}, {"renamenx",renamenxCommand,3,0,NULL,1,1,1}, {"expire",expireCommand,3,0,NULL,0,0,0}, {"expireat",expireatCommand,3,0,NULL,0,0,0}, {"keys",keysCommand,2,0,NULL,0,0,0}, {"dbsize",dbsizeCommand,1,0,NULL,0,0,0}, {"auth",authCommand,2,0,NULL,0,0,0}, {"ping",pingCommand,1,0,NULL,0,0,0}, {"echo",echoCommand,2,0,NULL,0,0,0}, {"save",saveCommand,1,0,NULL,0,0,0}, {"bgsave",bgsaveCommand,1,0,NULL,0,0,0}, {"bgrewriteaof",bgrewriteaofCommand,1,0,NULL,0,0,0}, {"shutdown",shutdownCommand,1,0,NULL,0,0,0}, {"lastsave",lastsaveCommand,1,0,NULL,0,0,0}, {"type",typeCommand,2,0,NULL,1,1,1}, {"multi",multiCommand,1,0,NULL,0,0,0}, {"exec",execCommand,1,REDIS_CMD_DENYOOM,execBlockClientOnSwappedKeys,0,0,0}, {"discard",discardCommand,1,0,NULL,0,0,0}, {"sync",syncCommand,1,0,NULL,0,0,0}, {"flushdb",flushdbCommand,1,0,NULL,0,0,0}, {"flushall",flushallCommand,1,0,NULL,0,0,0}, {"sort",sortCommand,-2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"info",infoCommand,1,0,NULL,0,0,0}, {"monitor",monitorCommand,1,0,NULL,0,0,0}, {"ttl",ttlCommand,2,0,NULL,1,1,1}, {"persist",persistCommand,2,0,NULL,1,1,1}, {"slaveof",slaveofCommand,3,0,NULL,0,0,0}, {"debug",debugCommand,-2,0,NULL,0,0,0}, {"config",configCommand,-2,0,NULL,0,0,0}, {"subscribe",subscribeCommand,-2,0,NULL,0,0,0}, {"unsubscribe",unsubscribeCommand,-1,0,NULL,0,0,0}, {"psubscribe",psubscribeCommand,-2,0,NULL,0,0,0}, {"punsubscribe",punsubscribeCommand,-1,0,NULL,0,0,0}, {"publish",publishCommand,3,REDIS_CMD_FORCE_REPLICATION,NULL,0,0,0}, {"watch",watchCommand,-2,0,NULL,0,0,0}, {"unwatch",unwatchCommand,1,0,NULL,0,0,0} }; /*============================ Utility functions ============================ */ void redisLog(int level, const char *fmt, ...) { const int syslogLevelMap[] = { LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARNING }; const char *c = ".-*#"; time_t now = time(NULL); va_list ap; FILE *fp; char buf[64]; char msg[REDIS_MAX_LOGMSG_LEN]; if (level < server.verbosity) return; fp = (server.logfile == NULL) ? stdout : fopen(server.logfile,"a"); if (!fp) return; va_start(ap, fmt); vsnprintf(msg, sizeof(msg), fmt, ap); va_end(ap); strftime(buf,sizeof(buf),"%d %b %H:%M:%S",localtime(&now)); fprintf(fp,"[%d] %s %c %s\n",(int)getpid(),buf,c[level],msg); fflush(fp); if (server.logfile) fclose(fp); if (server.syslog_enabled) syslog(syslogLevelMap[level], "%s", msg); } /* Redis generally does not try to recover from out of memory conditions * when allocating objects or strings, it is not clear if it will be possible * to report this condition to the client since the networking layer itself * is based on heap allocation for send buffers, so we simply abort. * At least the code will be simpler to read... */ void oom(const char *msg) { redisLog(REDIS_WARNING, "%s: Out of memory\n",msg); sleep(1); abort(); } /*====================== Hash table type implementation ==================== */ /* This is an hash table type that uses the SDS dynamic strings libary as * keys and radis objects as values (objects can hold SDS strings, * lists, sets). */ void dictVanillaFree(void *privdata, void *val) { DICT_NOTUSED(privdata); zfree(val); } void dictListDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); listRelease((list*)val); } int dictSdsKeyCompare(void *privdata, const void *key1, const void *key2) { int l1,l2; DICT_NOTUSED(privdata); l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; return memcmp(key1, key2, l1) == 0; } /* A case insensitive version used for the command lookup table. */ int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2) { DICT_NOTUSED(privdata); return strcasecmp(key1, key2) == 0; } void dictRedisObjectDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); if (val == NULL) return; /* Values of swapped out keys as set to NULL */ decrRefCount(val); } void dictSdsDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); sdsfree(val); } int dictObjKeyCompare(void *privdata, const void *key1, const void *key2) { const robj *o1 = key1, *o2 = key2; return dictSdsKeyCompare(privdata,o1->ptr,o2->ptr); } unsigned int dictObjHash(const void *key) { const robj *o = key; return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); } unsigned int dictSdsHash(const void *key) { return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); } unsigned int dictSdsCaseHash(const void *key) { return dictGenCaseHashFunction((unsigned char*)key, sdslen((char*)key)); } int dictEncObjKeyCompare(void *privdata, const void *key1, const void *key2) { robj *o1 = (robj*) key1, *o2 = (robj*) key2; int cmp; if (o1->encoding == REDIS_ENCODING_INT && o2->encoding == REDIS_ENCODING_INT) return o1->ptr == o2->ptr; o1 = getDecodedObject(o1); o2 = getDecodedObject(o2); cmp = dictSdsKeyCompare(privdata,o1->ptr,o2->ptr); decrRefCount(o1); decrRefCount(o2); return cmp; } unsigned int dictEncObjHash(const void *key) { robj *o = (robj*) key; if (o->encoding == REDIS_ENCODING_RAW) { return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); } else { if (o->encoding == REDIS_ENCODING_INT) { char buf[32]; int len; len = ll2string(buf,32,(long)o->ptr); return dictGenHashFunction((unsigned char*)buf, len); } else { unsigned int hash; o = getDecodedObject(o); hash = dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); decrRefCount(o); return hash; } } } /* Sets type */ dictType setDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ NULL /* val destructor */ }; /* Sorted sets hash (note: a skiplist is used in addition to the hash table) */ dictType zsetDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ NULL /* val destructor */ }; /* Db->dict, keys are sds strings, vals are Redis objects. */ dictType dbDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictRedisObjectDestructor /* val destructor */ }; /* Db->expires */ dictType keyptrDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL /* val destructor */ }; /* Command table. sds string -> command struct pointer. */ dictType commandTableDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL /* val destructor */ }; /* Hash type hash table (note that small hashes are represented with zimpaps) */ dictType hashDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ dictRedisObjectDestructor /* val destructor */ }; /* Keylist hash table type has unencoded redis objects as keys and * lists as values. It's used for blocking operations (BLPOP) and to * map swapped keys to a list of clients waiting for this keys to be loaded. */ dictType keylistDictType = { dictObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ dictListDestructor /* val destructor */ }; int htNeedsResize(dict *dict) { long long size, used; size = dictSlots(dict); used = dictSize(dict); return (size && used && size > DICT_HT_INITIAL_SIZE && (used*100/size < REDIS_HT_MINFILL)); } /* If the percentage of used slots in the HT reaches REDIS_HT_MINFILL * we resize the hash table to save memory */ void tryResizeHashTables(void) { int j; for (j = 0; j < server.dbnum; j++) { if (htNeedsResize(server.db[j].dict)) dictResize(server.db[j].dict); if (htNeedsResize(server.db[j].expires)) dictResize(server.db[j].expires); } } /* Our hash table implementation performs rehashing incrementally while * we write/read from the hash table. Still if the server is idle, the hash * table will use two tables for a long time. So we try to use 1 millisecond * of CPU time at every serverCron() loop in order to rehash some key. */ void incrementallyRehash(void) { int j; for (j = 0; j < server.dbnum; j++) { if (dictIsRehashing(server.db[j].dict)) { dictRehashMilliseconds(server.db[j].dict,1); break; /* already used our millisecond for this loop... */ } } } /* This function is called once a background process of some kind terminates, * as we want to avoid resizing the hash tables when there is a child in order * to play well with copy-on-write (otherwise when a resize happens lots of * memory pages are copied). The goal of this function is to update the ability * for dict.c to resize the hash tables accordingly to the fact we have o not * running childs. */ void updateDictResizePolicy(void) { if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) dictEnableResize(); else dictDisableResize(); } /* ======================= Cron: called every 100 ms ======================== */ /* Try to expire a few timed out keys. The algorithm used is adaptive and * will use few CPU cycles if there are few expiring keys, otherwise * it will get more aggressive to avoid that too much memory is used by * keys that can be removed from the keyspace. */ void activeExpireCycle(void) { int j; for (j = 0; j < server.dbnum; j++) { int expired; redisDb *db = server.db+j; /* Continue to expire if at the end of the cycle more than 25% * of the keys were expired. */ do { long num = dictSize(db->expires); time_t now = time(NULL); expired = 0; if (num > REDIS_EXPIRELOOKUPS_PER_CRON) num = REDIS_EXPIRELOOKUPS_PER_CRON; while (num--) { dictEntry *de; time_t t; if ((de = dictGetRandomKey(db->expires)) == NULL) break; t = (time_t) dictGetEntryVal(de); if (now > t) { sds key = dictGetEntryKey(de); robj *keyobj = createStringObject(key,sdslen(key)); propagateExpire(db,keyobj); dbDelete(db,keyobj); decrRefCount(keyobj); expired++; server.stat_expiredkeys++; } } } while (expired > REDIS_EXPIRELOOKUPS_PER_CRON/4); } } void updateLRUClock(void) { server.lruclock = (time(NULL)/REDIS_LRU_CLOCK_RESOLUTION) & REDIS_LRU_CLOCK_MAX; } int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { int j, loops = server.cronloops++; REDIS_NOTUSED(eventLoop); REDIS_NOTUSED(id); REDIS_NOTUSED(clientData); /* We take a cached value of the unix time in the global state because * with virtual memory and aging there is to store the current time * in objects at every object access, and accuracy is not needed. * To access a global var is faster than calling time(NULL) */ server.unixtime = time(NULL); /* We have just 22 bits per object for LRU information. * So we use an (eventually wrapping) LRU clock with 10 seconds resolution. * 2^22 bits with 10 seconds resoluton is more or less 1.5 years. * * Note that even if this will wrap after 1.5 years it's not a problem, * everything will still work but just some object will appear younger * to Redis. But for this to happen a given object should never be touched * for 1.5 years. * * Note that you can change the resolution altering the * REDIS_LRU_CLOCK_RESOLUTION define. */ updateLRUClock(); /* We received a SIGTERM, shutting down here in a safe way, as it is * not ok doing so inside the signal handler. */ if (server.shutdown_asap) { if (prepareForShutdown() == REDIS_OK) exit(0); redisLog(REDIS_WARNING,"SIGTERM received but errors trying to shut down the server, check the logs for more information"); } /* Show some info about non-empty databases */ for (j = 0; j < server.dbnum; j++) { long long size, used, vkeys; size = dictSlots(server.db[j].dict); used = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (!(loops % 50) && (used || vkeys)) { redisLog(REDIS_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size); /* dictPrintStats(server.dict); */ } } /* We don't want to resize the hash tables while a bacground saving * is in progress: the saving child is created using fork() that is * implemented with a copy-on-write semantic in most modern systems, so * if we resize the HT while there is the saving child at work actually * a lot of memory movements in the parent will cause a lot of pages * copied. */ if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) { if (!(loops % 10)) tryResizeHashTables(); if (server.activerehashing) incrementallyRehash(); } /* Show information about connected clients */ if (!(loops % 50)) { redisLog(REDIS_VERBOSE,"%d clients connected (%d slaves), %zu bytes in use", listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), zmalloc_used_memory()); } /* Close connections of timedout clients */ if ((server.maxidletime && !(loops % 100)) || server.bpop_blocked_clients) closeTimedoutClients(); /* Check if a background saving or AOF rewrite in progress terminated */ if (server.bgsavechildpid != -1 || server.bgrewritechildpid != -1) { int statloc; pid_t pid; if ((pid = wait3(&statloc,WNOHANG,NULL)) != 0) { if (pid == server.bgsavechildpid) { backgroundSaveDoneHandler(statloc); } else { backgroundRewriteDoneHandler(statloc); } updateDictResizePolicy(); } } else { /* If there is not a background saving in progress check if * we have to save now */ time_t now = time(NULL); for (j = 0; j < server.saveparamslen; j++) { struct saveparam *sp = server.saveparams+j; if (server.dirty >= sp->changes && now-server.lastsave > sp->seconds) { redisLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...", sp->changes, sp->seconds); rdbSaveBackground(server.dbfilename); break; } } } /* Expire a few keys per cycle, only if this is a master. * On slaves we wait for DEL operations synthesized by the master * in order to guarantee a strict consistency. */ if (server.masterhost == NULL) activeExpireCycle(); /* Swap a few keys on disk if we are over the memory limit and VM * is enbled. Try to free objects from the free list first. */ if (vmCanSwapOut()) { while (server.vm_enabled && zmalloc_used_memory() > server.vm_max_memory) { int retval = (server.vm_max_threads == 0) ? vmSwapOneObjectBlocking() : vmSwapOneObjectThreaded(); if (retval == REDIS_ERR && !(loops % 300) && zmalloc_used_memory() > (server.vm_max_memory+server.vm_max_memory/10)) { redisLog(REDIS_WARNING,"WARNING: vm-max-memory limit exceeded by more than 10%% but unable to swap more objects out!"); } /* Note that when using threade I/O we free just one object, * because anyway when the I/O thread in charge to swap this * object out will finish, the handler of completed jobs * will try to swap more objects if we are still out of memory. */ if (retval == REDIS_ERR || server.vm_max_threads > 0) break; } } /* Replication cron function -- used to reconnect to master and * to detect transfer failures. */ if (!(loops % 10)) replicationCron(); return 100; } /* This function gets called every time Redis is entering the * main loop of the event driven library, that is, before to sleep * for ready file descriptors. */ void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the swapped keys they requested */ if (server.vm_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.vm_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); } /* =========================== Server initialization ======================== */ void createSharedObjects(void) { int j; shared.crlf = createObject(REDIS_STRING,sdsnew("\r\n")); shared.ok = createObject(REDIS_STRING,sdsnew("+OK\r\n")); shared.err = createObject(REDIS_STRING,sdsnew("-ERR\r\n")); shared.emptybulk = createObject(REDIS_STRING,sdsnew("$0\r\n\r\n")); shared.czero = createObject(REDIS_STRING,sdsnew(":0\r\n")); shared.cone = createObject(REDIS_STRING,sdsnew(":1\r\n")); shared.cnegone = createObject(REDIS_STRING,sdsnew(":-1\r\n")); shared.nullbulk = createObject(REDIS_STRING,sdsnew("$-1\r\n")); shared.nullmultibulk = createObject(REDIS_STRING,sdsnew("*-1\r\n")); shared.emptymultibulk = createObject(REDIS_STRING,sdsnew("*0\r\n")); shared.pong = createObject(REDIS_STRING,sdsnew("+PONG\r\n")); shared.queued = createObject(REDIS_STRING,sdsnew("+QUEUED\r\n")); shared.wrongtypeerr = createObject(REDIS_STRING,sdsnew( "-ERR Operation against a key holding the wrong kind of value\r\n")); shared.nokeyerr = createObject(REDIS_STRING,sdsnew( "-ERR no such key\r\n")); shared.syntaxerr = createObject(REDIS_STRING,sdsnew( "-ERR syntax error\r\n")); shared.sameobjecterr = createObject(REDIS_STRING,sdsnew( "-ERR source and destination objects are the same\r\n")); shared.outofrangeerr = createObject(REDIS_STRING,sdsnew( "-ERR index out of range\r\n")); shared.loadingerr = createObject(REDIS_STRING,sdsnew( "-LOADING Redis is loading the dataset in memory\r\n")); shared.space = createObject(REDIS_STRING,sdsnew(" ")); shared.colon = createObject(REDIS_STRING,sdsnew(":")); shared.plus = createObject(REDIS_STRING,sdsnew("+")); shared.select0 = createStringObject("select 0\r\n",10); shared.select1 = createStringObject("select 1\r\n",10); shared.select2 = createStringObject("select 2\r\n",10); shared.select3 = createStringObject("select 3\r\n",10); shared.select4 = createStringObject("select 4\r\n",10); shared.select5 = createStringObject("select 5\r\n",10); shared.select6 = createStringObject("select 6\r\n",10); shared.select7 = createStringObject("select 7\r\n",10); shared.select8 = createStringObject("select 8\r\n",10); shared.select9 = createStringObject("select 9\r\n",10); shared.messagebulk = createStringObject("$7\r\nmessage\r\n",13); shared.pmessagebulk = createStringObject("$8\r\npmessage\r\n",14); shared.subscribebulk = createStringObject("$9\r\nsubscribe\r\n",15); shared.unsubscribebulk = createStringObject("$11\r\nunsubscribe\r\n",18); shared.psubscribebulk = createStringObject("$10\r\npsubscribe\r\n",17); shared.punsubscribebulk = createStringObject("$12\r\npunsubscribe\r\n",19); shared.mbulk3 = createStringObject("*3\r\n",4); shared.mbulk4 = createStringObject("*4\r\n",4); for (j = 0; j < REDIS_SHARED_INTEGERS; j++) { shared.integers[j] = createObject(REDIS_STRING,(void*)(long)j); shared.integers[j]->encoding = REDIS_ENCODING_INT; } } void initServerConfig() { server.port = REDIS_SERVERPORT; server.bindaddr = NULL; server.unixsocket = NULL; server.ipfd = -1; server.sofd = -1; server.dbnum = REDIS_DEFAULT_DBNUM; server.verbosity = REDIS_VERBOSE; server.maxidletime = REDIS_MAXIDLETIME; server.saveparams = NULL; server.loading = 0; server.logfile = NULL; /* NULL = log on standard output */ server.syslog_enabled = 0; server.syslog_ident = zstrdup("redis"); server.syslog_facility = LOG_LOCAL0; server.glueoutputbuf = 1; server.daemonize = 0; server.appendonly = 0; server.appendfsync = APPENDFSYNC_EVERYSEC; server.no_appendfsync_on_rewrite = 0; server.lastfsync = time(NULL); server.appendfd = -1; server.appendseldb = -1; /* Make sure the first time will not match */ server.pidfile = zstrdup("/var/run/redis.pid"); server.dbfilename = zstrdup("dump.rdb"); server.appendfilename = zstrdup("appendonly.aof"); server.requirepass = NULL; server.rdbcompression = 1; server.activerehashing = 1; server.maxclients = 0; server.bpop_blocked_clients = 0; server.maxmemory = 0; server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; server.maxmemory_samples = 3; server.vm_enabled = 0; server.vm_swap_file = zstrdup("/tmp/redis-%p.vm"); server.vm_page_size = 256; /* 256 bytes per page */ server.vm_pages = 1024*1024*100; /* 104 millions of pages */ server.vm_max_memory = 1024LL*1024*1024*1; /* 1 GB of RAM */ server.vm_max_threads = 4; server.vm_blocked_clients = 0; server.hash_max_zipmap_entries = REDIS_HASH_MAX_ZIPMAP_ENTRIES; server.hash_max_zipmap_value = REDIS_HASH_MAX_ZIPMAP_VALUE; server.list_max_ziplist_entries = REDIS_LIST_MAX_ZIPLIST_ENTRIES; server.list_max_ziplist_value = REDIS_LIST_MAX_ZIPLIST_VALUE; server.set_max_intset_entries = REDIS_SET_MAX_INTSET_ENTRIES; server.shutdown_asap = 0; updateLRUClock(); resetServerSaveParams(); appendServerSaveParams(60*60,1); /* save after 1 hour and 1 change */ appendServerSaveParams(300,100); /* save after 5 minutes and 100 changes */ appendServerSaveParams(60,10000); /* save after 1 minute and 10000 changes */ /* Replication related */ server.isslave = 0; server.masterauth = NULL; server.masterhost = NULL; server.masterport = 6379; server.master = NULL; server.replstate = REDIS_REPL_NONE; server.repl_serve_stale_data = 1; /* Double constants initialization */ R_Zero = 0.0; R_PosInf = 1.0/R_Zero; R_NegInf = -1.0/R_Zero; R_Nan = R_Zero/R_Zero; /* Command table -- we intiialize it here as it is part of the * initial configuration, since command names may be changed via * redis.conf using the rename-command directive. */ server.commands = dictCreate(&commandTableDictType,NULL); populateCommandTable(); server.delCommand = lookupCommandByCString("del"); server.multiCommand = lookupCommandByCString("multi"); } void initServer() { int j; signal(SIGHUP, SIG_IGN); signal(SIGPIPE, SIG_IGN); setupSigSegvAction(); if (server.syslog_enabled) { openlog(server.syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, server.syslog_facility); } server.mainthread = pthread_self(); server.clients = listCreate(); server.slaves = listCreate(); server.monitors = listCreate(); server.unblocked_clients = listCreate(); createSharedObjects(); server.el = aeCreateEventLoop(); server.db = zmalloc(sizeof(redisDb)*server.dbnum); server.ipfd = anetTcpServer(server.neterr,server.port,server.bindaddr); if (server.ipfd == ANET_ERR) { redisLog(REDIS_WARNING, "Opening port: %s", server.neterr); exit(1); } if (server.unixsocket != NULL) { unlink(server.unixsocket); /* don't care if this fails */ server.sofd = anetUnixServer(server.neterr,server.unixsocket); if (server.sofd == ANET_ERR) { redisLog(REDIS_WARNING, "Opening socket: %s", server.neterr); exit(1); } } if (server.ipfd < 0 && server.sofd < 0) { redisLog(REDIS_WARNING, "Configured to not listen anywhere, exiting."); exit(1); } for (j = 0; j < server.dbnum; j++) { server.db[j].dict = dictCreate(&dbDictType,NULL); server.db[j].expires = dictCreate(&keyptrDictType,NULL); server.db[j].blocking_keys = dictCreate(&keylistDictType,NULL); server.db[j].watched_keys = dictCreate(&keylistDictType,NULL); if (server.vm_enabled) server.db[j].io_keys = dictCreate(&keylistDictType,NULL); server.db[j].id = j; } server.pubsub_channels = dictCreate(&keylistDictType,NULL); server.pubsub_patterns = listCreate(); listSetFreeMethod(server.pubsub_patterns,freePubsubPattern); listSetMatchMethod(server.pubsub_patterns,listMatchPubsubPattern); server.cronloops = 0; server.bgsavechildpid = -1; server.bgrewritechildpid = -1; server.bgrewritebuf = sdsempty(); server.aofbuf = sdsempty(); server.lastsave = time(NULL); server.dirty = 0; server.stat_numcommands = 0; server.stat_numconnections = 0; server.stat_expiredkeys = 0; server.stat_evictedkeys = 0; server.stat_starttime = time(NULL); server.stat_keyspace_misses = 0; server.stat_keyspace_hits = 0; server.unixtime = time(NULL); aeCreateTimeEvent(server.el, 1, serverCron, NULL, NULL); if (server.ipfd > 0 && aeCreateFileEvent(server.el,server.ipfd,AE_READABLE, acceptTcpHandler,NULL) == AE_ERR) oom("creating file event"); if (server.sofd > 0 && aeCreateFileEvent(server.el,server.sofd,AE_READABLE, acceptUnixHandler,NULL) == AE_ERR) oom("creating file event"); if (server.appendonly) { server.appendfd = open(server.appendfilename,O_WRONLY|O_APPEND|O_CREAT,0644); if (server.appendfd == -1) { redisLog(REDIS_WARNING, "Can't open the append-only file: %s", strerror(errno)); exit(1); } } if (server.vm_enabled) vmInit(); } /* Populates the Redis Command Table starting from the hard coded list * we have on top of redis.c file. */ void populateCommandTable(void) { int j; int numcommands = sizeof(readonlyCommandTable)/sizeof(struct redisCommand); for (j = 0; j < numcommands; j++) { struct redisCommand *c = readonlyCommandTable+j; int retval; retval = dictAdd(server.commands, sdsnew(c->name), c); assert(retval == DICT_OK); } } /* ====================== Commands lookup and execution ===================== */ struct redisCommand *lookupCommand(sds name) { return dictFetchValue(server.commands, name); } struct redisCommand *lookupCommandByCString(char *s) { struct redisCommand *cmd; sds name = sdsnew(s); cmd = dictFetchValue(server.commands, name); sdsfree(name); return cmd; } /* Call() is the core of Redis execution of a command */ void call(redisClient *c, struct redisCommand *cmd) { long long dirty; dirty = server.dirty; cmd->proc(c); dirty = server.dirty-dirty; if (server.appendonly && dirty) feedAppendOnlyFile(cmd,c->db->id,c->argv,c->argc); if ((dirty || cmd->flags & REDIS_CMD_FORCE_REPLICATION) && listLength(server.slaves)) replicationFeedSlaves(server.slaves,c->db->id,c->argv,c->argc); if (listLength(server.monitors)) replicationFeedMonitors(server.monitors,c->db->id,c->argv,c->argc); server.stat_numcommands++; } /* If this function gets called we already read a whole * command, argments are in the client argv/argc fields. * processCommand() execute the command or prepare the * server for a bulk read from the client. * * If 1 is returned the client is still alive and valid and * and other operations can be performed by the caller. Otherwise * if 0 is returned the client was destroied (i.e. after QUIT). */ int processCommand(redisClient *c) { struct redisCommand *cmd; /* The QUIT command is handled separately. Normal command procs will * go through checking for replication and QUIT will cause trouble * when FORCE_REPLICATION is enabled and would be implemented in * a regular command proc. */ if (!strcasecmp(c->argv[0]->ptr,"quit")) { addReply(c,shared.ok); c->flags |= REDIS_CLOSE_AFTER_REPLY; return REDIS_ERR; } /* Now lookup the command and check ASAP about trivial error conditions * such wrong arity, bad command name and so forth. */ cmd = lookupCommand(c->argv[0]->ptr); if (!cmd) { addReplyErrorFormat(c,"unknown command '%s'", (char*)c->argv[0]->ptr); return REDIS_OK; } else if ((cmd->arity > 0 && cmd->arity != c->argc) || (c->argc < -cmd->arity)) { addReplyErrorFormat(c,"wrong number of arguments for '%s' command", cmd->name); return REDIS_OK; } /* Check if the user is authenticated */ if (server.requirepass && !c->authenticated && cmd->proc != authCommand) { addReplyError(c,"operation not permitted"); return REDIS_OK; } /* Handle the maxmemory directive. * * First we try to free some memory if possible (if there are volatile * keys in the dataset). If there are not the only thing we can do * is returning an error. */ if (server.maxmemory) freeMemoryIfNeeded(); if (server.maxmemory && (cmd->flags & REDIS_CMD_DENYOOM) && zmalloc_used_memory() > server.maxmemory) { addReplyError(c,"command not allowed when used memory > 'maxmemory'"); return REDIS_OK; } /* Only allow SUBSCRIBE and UNSUBSCRIBE in the context of Pub/Sub */ if ((dictSize(c->pubsub_channels) > 0 || listLength(c->pubsub_patterns) > 0) && cmd->proc != subscribeCommand && cmd->proc != unsubscribeCommand && cmd->proc != psubscribeCommand && cmd->proc != punsubscribeCommand) { addReplyError(c,"only (P)SUBSCRIBE / (P)UNSUBSCRIBE / QUIT allowed in this context"); return REDIS_OK; } /* Only allow INFO and SLAVEOF when slave-serve-stale-data is no and * we are a slave with a broken link with master. */ if (server.masterhost && server.replstate != REDIS_REPL_CONNECTED && server.repl_serve_stale_data == 0 && cmd->proc != infoCommand && cmd->proc != slaveofCommand) { addReplyError(c, "link with MASTER is down and slave-serve-stale-data is set to no"); return REDIS_OK; } /* Loading DB? Return an error if the command is not INFO */ if (server.loading && cmd->proc != infoCommand) { addReply(c, shared.loadingerr); return REDIS_OK; } /* Exec the command */ if (c->flags & REDIS_MULTI && cmd->proc != execCommand && cmd->proc != discardCommand && cmd->proc != multiCommand && cmd->proc != watchCommand) { queueMultiCommand(c,cmd); addReply(c,shared.queued); } else { if (server.vm_enabled && server.vm_max_threads > 0 && blockClientOnSwappedKeys(c,cmd)) return REDIS_ERR; call(c,cmd); } return REDIS_OK; } /*================================== Shutdown =============================== */ int prepareForShutdown() { redisLog(REDIS_WARNING,"User requested shutdown, saving DB..."); /* Kill the saving child if there is a background saving in progress. We want to avoid race conditions, for instance our saving child may overwrite the synchronous saving did by SHUTDOWN. */ if (server.bgsavechildpid != -1) { redisLog(REDIS_WARNING,"There is a live saving child. Killing it!"); kill(server.bgsavechildpid,SIGKILL); rdbRemoveTempFile(server.bgsavechildpid); } if (server.appendonly) { /* Append only file: fsync() the AOF and exit */ aof_fsync(server.appendfd); if (server.vm_enabled) unlink(server.vm_swap_file); } else if (server.saveparamslen > 0) { /* Snapshotting. Perform a SYNC SAVE and exit */ if (rdbSave(server.dbfilename) != REDIS_OK) { /* Ooops.. error saving! The best we can do is to continue * operating. Note that if there was a background saving process, * in the next cron() Redis will be notified that the background * saving aborted, handling special stuff like slaves pending for * synchronization... */ redisLog(REDIS_WARNING,"Error trying to save the DB, can't exit"); return REDIS_ERR; } } else { redisLog(REDIS_WARNING,"Not saving DB."); } if (server.daemonize) unlink(server.pidfile); redisLog(REDIS_WARNING,"Server exit now, bye bye..."); return REDIS_OK; } /*================================== Commands =============================== */ void authCommand(redisClient *c) { if (!server.requirepass || !strcmp(c->argv[1]->ptr, server.requirepass)) { c->authenticated = 1; addReply(c,shared.ok); } else { c->authenticated = 0; addReplyError(c,"invalid password"); } } void pingCommand(redisClient *c) { addReply(c,shared.pong); } void echoCommand(redisClient *c) { addReplyBulk(c,c->argv[1]); } /* Convert an amount of bytes into a human readable string in the form * of 100B, 2G, 100M, 4K, and so forth. */ void bytesToHuman(char *s, unsigned long long n) { double d; if (n < 1024) { /* Bytes */ sprintf(s,"%lluB",n); return; } else if (n < (1024*1024)) { d = (double)n/(1024); sprintf(s,"%.2fK",d); } else if (n < (1024LL*1024*1024)) { d = (double)n/(1024*1024); sprintf(s,"%.2fM",d); } else if (n < (1024LL*1024*1024*1024)) { d = (double)n/(1024LL*1024*1024); sprintf(s,"%.2fG",d); } } /* Create the string returned by the INFO command. This is decoupled * by the INFO command itself as we need to report the same information * on memory corruption problems. */ sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "vm_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.vm_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.vm_enabled) { lockThreadedIO(); info = sdscatprintf(info, "vm_conf_max_memory:%llu\r\n" "vm_conf_page_size:%llu\r\n" "vm_conf_pages:%llu\r\n" "vm_stats_used_pages:%llu\r\n" "vm_stats_swapped_objects:%llu\r\n" "vm_stats_swappin_count:%llu\r\n" "vm_stats_swappout_count:%llu\r\n" "vm_stats_io_newjobs_len:%lu\r\n" "vm_stats_io_processing_len:%lu\r\n" "vm_stats_io_processed_len:%lu\r\n" "vm_stats_io_active_threads:%lu\r\n" "vm_stats_blocked_clients:%lu\r\n" ,(unsigned long long) server.vm_max_memory, (unsigned long long) server.vm_page_size, (unsigned long long) server.vm_pages, (unsigned long long) server.vm_stats_used_pages, (unsigned long long) server.vm_stats_swapped_objects, (unsigned long long) server.vm_stats_swapins, (unsigned long long) server.vm_stats_swapouts, (unsigned long) listLength(server.io_newjobs), (unsigned long) listLength(server.io_processing), (unsigned long) listLength(server.io_processed), (unsigned long) server.io_active_threads, (unsigned long) server.vm_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; } void infoCommand(redisClient *c) { sds info = genRedisInfoString(); addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n", (unsigned long)sdslen(info))); addReplySds(c,info); addReply(c,shared.crlf); } void monitorCommand(redisClient *c) { /* ignore MONITOR if aleady slave or in monitor mode */ if (c->flags & REDIS_SLAVE) return; c->flags |= (REDIS_SLAVE|REDIS_MONITOR); c->slaveseldb = 0; listAddNodeTail(server.monitors,c); addReply(c,shared.ok); } /* ============================ Maxmemory directive ======================== */ /* This function gets called when 'maxmemory' is set on the config file to limit * the max memory used by the server, and we are out of memory. * This function will try to, in order: * * - Free objects from the free list * - Try to remove keys with an EXPIRE set * * It is not possible to free enough memory to reach used-memory < maxmemory * the server will start refusing commands that will enlarge even more the * memory usage. */ void freeMemoryIfNeeded(void) { /* Remove keys accordingly to the active policy as long as we are * over the memory limit. */ if (server.maxmemory_policy == REDIS_MAXMEMORY_NO_EVICTION) return; while (server.maxmemory && zmalloc_used_memory() > server.maxmemory) { int j, k, freed = 0; for (j = 0; j < server.dbnum; j++) { long bestval = 0; /* just to prevent warning */ sds bestkey = NULL; struct dictEntry *de; redisDb *db = server.db+j; dict *dict; if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_RANDOM) { dict = server.db[j].dict; } else { dict = server.db[j].expires; } if (dictSize(dict) == 0) continue; /* volatile-random and allkeys-random policy */ if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_RANDOM || server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_RANDOM) { de = dictGetRandomKey(dict); bestkey = dictGetEntryKey(de); } /* volatile-lru and allkeys-lru policy */ else if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_LRU) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisval; robj *o; de = dictGetRandomKey(dict); thiskey = dictGetEntryKey(de); /* When policy is volatile-lru we need an additonal lookup * to locate the real key, as dict is set to db->expires. */ if (server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_LRU) de = dictFind(db->dict, thiskey); o = dictGetEntryVal(de); thisval = estimateObjectIdleTime(o); /* Higher idle time is better candidate for deletion */ if (bestkey == NULL || thisval > bestval) { bestkey = thiskey; bestval = thisval; } } } /* volatile-ttl */ else if (server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_TTL) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisval; de = dictGetRandomKey(dict); thiskey = dictGetEntryKey(de); thisval = (long) dictGetEntryVal(de); /* Expire sooner (minor expire unix timestamp) is better * candidate for deletion */ if (bestkey == NULL || thisval < bestval) { bestkey = thiskey; bestval = thisval; } } } /* Finally remove the selected key. */ if (bestkey) { robj *keyobj = createStringObject(bestkey,sdslen(bestkey)); dbDelete(db,keyobj); server.stat_evictedkeys++; decrRefCount(keyobj); freed++; } } if (!freed) return; /* nothing to free... */ } } /* =================================== Main! ================================ */ #ifdef __linux__ int linuxOvercommitMemoryValue(void) { FILE *fp = fopen("/proc/sys/vm/overcommit_memory","r"); char buf[64]; if (!fp) return -1; if (fgets(buf,64,fp) == NULL) { fclose(fp); return -1; } fclose(fp); return atoi(buf); } void linuxOvercommitMemoryWarning(void) { if (linuxOvercommitMemoryValue() == 0) { redisLog(REDIS_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); } } #endif /* __linux__ */ void createPidFile(void) { /* Try to write the pid file in a best-effort way. */ FILE *fp = fopen(server.pidfile,"w"); if (fp) { fprintf(fp,"%d\n",getpid()); fclose(fp); } } void daemonize(void) { int fd; if (fork() != 0) exit(0); /* parent exits */ setsid(); /* create a new session */ /* Every output goes to /dev/null. If Redis is daemonized but * the 'logfile' is set to 'stdout' in the configuration file * it will not log at all. */ if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { dup2(fd, STDIN_FILENO); dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); if (fd > STDERR_FILENO) close(fd); } } void version() { printf("Redis server version %s (%s:%d)\n", REDIS_VERSION, redisGitSHA1(), atoi(redisGitDirty()) > 0); exit(0); } void usage() { fprintf(stderr,"Usage: ./redis-server [/path/to/redis.conf]\n"); fprintf(stderr," ./redis-server - (read config from stdin)\n"); exit(1); } int main(int argc, char **argv) { time_t start; initServerConfig(); if (argc == 2) { if (strcmp(argv[1], "-v") == 0 || strcmp(argv[1], "--version") == 0) version(); if (strcmp(argv[1], "--help") == 0) usage(); resetServerSaveParams(); loadServerConfig(argv[1]); } else if ((argc > 2)) { usage(); } else { redisLog(REDIS_WARNING,"Warning: no config file specified, using the default config. In order to specify a config file use 'redis-server /path/to/redis.conf'"); } if (server.daemonize) daemonize(); initServer(); if (server.daemonize) createPidFile(); redisLog(REDIS_NOTICE,"Server started, Redis version " REDIS_VERSION); #ifdef __linux__ linuxOvercommitMemoryWarning(); #endif start = time(NULL); if (server.appendonly) { if (loadAppendOnlyFile(server.appendfilename) == REDIS_OK) redisLog(REDIS_NOTICE,"DB loaded from append only file: %ld seconds",time(NULL)-start); } else { if (rdbLoad(server.dbfilename) == REDIS_OK) redisLog(REDIS_NOTICE,"DB loaded from disk: %ld seconds",time(NULL)-start); } if (server.ipfd > 0) redisLog(REDIS_NOTICE,"The server is now ready to accept connections on port %d", server.port); if (server.sofd > 0) redisLog(REDIS_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket); aeSetBeforeSleepProc(server.el,beforeSleep); aeMain(server.el); aeDeleteEventLoop(server.el); return 0; } /* ============================= Backtrace support ========================= */ #ifdef HAVE_BACKTRACE void *getMcontextEip(ucontext_t *uc) { #if defined(__FreeBSD__) return (void*) uc->uc_mcontext.mc_eip; #elif defined(__dietlibc__) return (void*) uc->uc_mcontext.eip; #elif defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #if __x86_64__ return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__i386__) return (void*) uc->uc_mcontext.gregs[14]; /* Linux 32 */ #elif defined(__X86_64__) || defined(__x86_64__) return (void*) uc->uc_mcontext.gregs[16]; /* Linux 64 */ #elif defined(__ia64__) /* Linux IA64 */ return (void*) uc->uc_mcontext.sc_ip; #else return NULL; #endif } void segvHandler(int sig, siginfo_t *info, void *secret) { void *trace[100]; char **messages = NULL; int i, trace_size = 0; ucontext_t *uc = (ucontext_t*) secret; sds infostring; struct sigaction act; REDIS_NOTUSED(info); redisLog(REDIS_WARNING, "======= Ooops! Redis %s got signal: -%d- =======", REDIS_VERSION, sig); infostring = genRedisInfoString(); redisLog(REDIS_WARNING, "%s",infostring); /* It's not safe to sdsfree() the returned string under memory * corruption conditions. Let it leak as we are going to abort */ trace_size = backtrace(trace, 100); /* overwrite sigaction with caller's address */ if (getMcontextEip(uc) != NULL) { trace[1] = getMcontextEip(uc); } messages = backtrace_symbols(trace, trace_size); for (i=1; i<trace_size; ++i) redisLog(REDIS_WARNING,"%s", messages[i]); /* free(messages); Don't call free() with possibly corrupted memory. */ if (server.daemonize) unlink(server.pidfile); /* Make sure we exit with the right signal at the end. So for instance * the core will be dumped if enabled. */ sigemptyset (&act.sa_mask); /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction * is used. Otherwise, sa_handler is used */ act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = SIG_DFL; sigaction (sig, &act, NULL); kill(getpid(),sig); } void sigtermHandler(int sig) { REDIS_NOTUSED(sig); redisLog(REDIS_WARNING,"SIGTERM received, scheduling shutting down..."); server.shutdown_asap = 1; } void setupSigSegvAction(void) { struct sigaction act; sigemptyset (&act.sa_mask); /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction * is used. Otherwise, sa_handler is used */ act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND | SA_SIGINFO; act.sa_sigaction = segvHandler; sigaction (SIGSEGV, &act, NULL); sigaction (SIGBUS, &act, NULL); sigaction (SIGFPE, &act, NULL); sigaction (SIGILL, &act, NULL); sigaction (SIGBUS, &act, NULL); act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = sigtermHandler; sigaction (SIGTERM, &act, NULL); return; } #else /* HAVE_BACKTRACE */ void setupSigSegvAction(void) { } #endif /* HAVE_BACKTRACE */ /* The End */
/* * Copyright (c) 2009-2010, Salvatore Sanfilippo <antirez at gmail dot com> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "redis.h" #ifdef HAVE_BACKTRACE #include <execinfo.h> #include <ucontext.h> #endif /* HAVE_BACKTRACE */ #include <time.h> #include <signal.h> #include <sys/wait.h> #include <errno.h> #include <assert.h> #include <ctype.h> #include <stdarg.h> #include <arpa/inet.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/uio.h> #include <limits.h> #include <float.h> #include <math.h> #include <pthread.h> #include <sys/resource.h> /* Our shared "common" objects */ struct sharedObjectsStruct shared; /* Global vars that are actally used as constants. The following double * values are used for double on-disk serialization, and are initialized * at runtime to avoid strange compiler optimizations. */ double R_Zero, R_PosInf, R_NegInf, R_Nan; /*================================= Globals ================================= */ /* Global vars */ struct redisServer server; /* server global state */ struct redisCommand *commandTable; struct redisCommand readonlyCommandTable[] = { {"get",getCommand,2,0,NULL,1,1,1}, {"set",setCommand,3,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"setnx",setnxCommand,3,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"setex",setexCommand,4,REDIS_CMD_DENYOOM,NULL,0,0,0}, {"append",appendCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"strlen",strlenCommand,2,0,NULL,1,1,1}, {"del",delCommand,-2,0,NULL,0,0,0}, {"exists",existsCommand,2,0,NULL,1,1,1}, {"setbit",setbitCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getbit",getbitCommand,3,0,NULL,1,1,1}, {"setrange",setrangeCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getrange",getrangeCommand,4,0,NULL,1,1,1}, {"substr",getrangeCommand,4,0,NULL,1,1,1}, {"incr",incrCommand,2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"decr",decrCommand,2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"mget",mgetCommand,-2,0,NULL,1,-1,1}, {"rpush",rpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lpush",lpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"rpushx",rpushxCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lpushx",lpushxCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"linsert",linsertCommand,5,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"rpop",rpopCommand,2,0,NULL,1,1,1}, {"lpop",lpopCommand,2,0,NULL,1,1,1}, {"brpop",brpopCommand,-3,0,NULL,1,1,1}, {"brpoplpush",brpoplpushCommand,4,REDIS_CMD_DENYOOM,NULL,1,2,1}, {"blpop",blpopCommand,-3,0,NULL,1,1,1}, {"llen",llenCommand,2,0,NULL,1,1,1}, {"lindex",lindexCommand,3,0,NULL,1,1,1}, {"lset",lsetCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"lrange",lrangeCommand,4,0,NULL,1,1,1}, {"ltrim",ltrimCommand,4,0,NULL,1,1,1}, {"lrem",lremCommand,4,0,NULL,1,1,1}, {"rpoplpush",rpoplpushCommand,3,REDIS_CMD_DENYOOM,NULL,1,2,1}, {"sadd",saddCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"srem",sremCommand,3,0,NULL,1,1,1}, {"smove",smoveCommand,4,0,NULL,1,2,1}, {"sismember",sismemberCommand,3,0,NULL,1,1,1}, {"scard",scardCommand,2,0,NULL,1,1,1}, {"spop",spopCommand,2,0,NULL,1,1,1}, {"srandmember",srandmemberCommand,2,0,NULL,1,1,1}, {"sinter",sinterCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sinterstore",sinterstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"sunion",sunionCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sunionstore",sunionstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"sdiff",sdiffCommand,-2,REDIS_CMD_DENYOOM,NULL,1,-1,1}, {"sdiffstore",sdiffstoreCommand,-3,REDIS_CMD_DENYOOM,NULL,2,-1,1}, {"smembers",sinterCommand,2,0,NULL,1,1,1}, {"zadd",zaddCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"zincrby",zincrbyCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"zrem",zremCommand,3,0,NULL,1,1,1}, {"zremrangebyscore",zremrangebyscoreCommand,4,0,NULL,1,1,1}, {"zremrangebyrank",zremrangebyrankCommand,4,0,NULL,1,1,1}, {"zunionstore",zunionstoreCommand,-4,REDIS_CMD_DENYOOM,zunionInterBlockClientOnSwappedKeys,0,0,0}, {"zinterstore",zinterstoreCommand,-4,REDIS_CMD_DENYOOM,zunionInterBlockClientOnSwappedKeys,0,0,0}, {"zrange",zrangeCommand,-4,0,NULL,1,1,1}, {"zrangebyscore",zrangebyscoreCommand,-4,0,NULL,1,1,1}, {"zrevrangebyscore",zrevrangebyscoreCommand,-4,0,NULL,1,1,1}, {"zcount",zcountCommand,4,0,NULL,1,1,1}, {"zrevrange",zrevrangeCommand,-4,0,NULL,1,1,1}, {"zcard",zcardCommand,2,0,NULL,1,1,1}, {"zscore",zscoreCommand,3,0,NULL,1,1,1}, {"zrank",zrankCommand,3,0,NULL,1,1,1}, {"zrevrank",zrevrankCommand,3,0,NULL,1,1,1}, {"hset",hsetCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hsetnx",hsetnxCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hget",hgetCommand,3,0,NULL,1,1,1}, {"hmset",hmsetCommand,-4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hmget",hmgetCommand,-3,0,NULL,1,1,1}, {"hincrby",hincrbyCommand,4,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"hdel",hdelCommand,3,0,NULL,1,1,1}, {"hlen",hlenCommand,2,0,NULL,1,1,1}, {"hkeys",hkeysCommand,2,0,NULL,1,1,1}, {"hvals",hvalsCommand,2,0,NULL,1,1,1}, {"hgetall",hgetallCommand,2,0,NULL,1,1,1}, {"hexists",hexistsCommand,3,0,NULL,1,1,1}, {"incrby",incrbyCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"decrby",decrbyCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"getset",getsetCommand,3,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"mset",msetCommand,-3,REDIS_CMD_DENYOOM,NULL,1,-1,2}, {"msetnx",msetnxCommand,-3,REDIS_CMD_DENYOOM,NULL,1,-1,2}, {"randomkey",randomkeyCommand,1,0,NULL,0,0,0}, {"select",selectCommand,2,0,NULL,0,0,0}, {"move",moveCommand,3,0,NULL,1,1,1}, {"rename",renameCommand,3,0,NULL,1,1,1}, {"renamenx",renamenxCommand,3,0,NULL,1,1,1}, {"expire",expireCommand,3,0,NULL,0,0,0}, {"expireat",expireatCommand,3,0,NULL,0,0,0}, {"keys",keysCommand,2,0,NULL,0,0,0}, {"dbsize",dbsizeCommand,1,0,NULL,0,0,0}, {"auth",authCommand,2,0,NULL,0,0,0}, {"ping",pingCommand,1,0,NULL,0,0,0}, {"echo",echoCommand,2,0,NULL,0,0,0}, {"save",saveCommand,1,0,NULL,0,0,0}, {"bgsave",bgsaveCommand,1,0,NULL,0,0,0}, {"bgrewriteaof",bgrewriteaofCommand,1,0,NULL,0,0,0}, {"shutdown",shutdownCommand,1,0,NULL,0,0,0}, {"lastsave",lastsaveCommand,1,0,NULL,0,0,0}, {"type",typeCommand,2,0,NULL,1,1,1}, {"multi",multiCommand,1,0,NULL,0,0,0}, {"exec",execCommand,1,REDIS_CMD_DENYOOM,execBlockClientOnSwappedKeys,0,0,0}, {"discard",discardCommand,1,0,NULL,0,0,0}, {"sync",syncCommand,1,0,NULL,0,0,0}, {"flushdb",flushdbCommand,1,0,NULL,0,0,0}, {"flushall",flushallCommand,1,0,NULL,0,0,0}, {"sort",sortCommand,-2,REDIS_CMD_DENYOOM,NULL,1,1,1}, {"info",infoCommand,1,0,NULL,0,0,0}, {"monitor",monitorCommand,1,0,NULL,0,0,0}, {"ttl",ttlCommand,2,0,NULL,1,1,1}, {"persist",persistCommand,2,0,NULL,1,1,1}, {"slaveof",slaveofCommand,3,0,NULL,0,0,0}, {"debug",debugCommand,-2,0,NULL,0,0,0}, {"config",configCommand,-2,0,NULL,0,0,0}, {"subscribe",subscribeCommand,-2,0,NULL,0,0,0}, {"unsubscribe",unsubscribeCommand,-1,0,NULL,0,0,0}, {"psubscribe",psubscribeCommand,-2,0,NULL,0,0,0}, {"punsubscribe",punsubscribeCommand,-1,0,NULL,0,0,0}, {"publish",publishCommand,3,REDIS_CMD_FORCE_REPLICATION,NULL,0,0,0}, {"watch",watchCommand,-2,0,NULL,0,0,0}, {"unwatch",unwatchCommand,1,0,NULL,0,0,0} }; /*============================ Utility functions ============================ */ void redisLog(int level, const char *fmt, ...) { const int syslogLevelMap[] = { LOG_DEBUG, LOG_INFO, LOG_NOTICE, LOG_WARNING }; const char *c = ".-*#"; time_t now = time(NULL); va_list ap; FILE *fp; char buf[64]; char msg[REDIS_MAX_LOGMSG_LEN]; if (level < server.verbosity) return; fp = (server.logfile == NULL) ? stdout : fopen(server.logfile,"a"); if (!fp) return; va_start(ap, fmt); vsnprintf(msg, sizeof(msg), fmt, ap); va_end(ap); strftime(buf,sizeof(buf),"%d %b %H:%M:%S",localtime(&now)); fprintf(fp,"[%d] %s %c %s\n",(int)getpid(),buf,c[level],msg); fflush(fp); if (server.logfile) fclose(fp); if (server.syslog_enabled) syslog(syslogLevelMap[level], "%s", msg); } /* Redis generally does not try to recover from out of memory conditions * when allocating objects or strings, it is not clear if it will be possible * to report this condition to the client since the networking layer itself * is based on heap allocation for send buffers, so we simply abort. * At least the code will be simpler to read... */ void oom(const char *msg) { redisLog(REDIS_WARNING, "%s: Out of memory\n",msg); sleep(1); abort(); } /*====================== Hash table type implementation ==================== */ /* This is an hash table type that uses the SDS dynamic strings libary as * keys and radis objects as values (objects can hold SDS strings, * lists, sets). */ void dictVanillaFree(void *privdata, void *val) { DICT_NOTUSED(privdata); zfree(val); } void dictListDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); listRelease((list*)val); } int dictSdsKeyCompare(void *privdata, const void *key1, const void *key2) { int l1,l2; DICT_NOTUSED(privdata); l1 = sdslen((sds)key1); l2 = sdslen((sds)key2); if (l1 != l2) return 0; return memcmp(key1, key2, l1) == 0; } /* A case insensitive version used for the command lookup table. */ int dictSdsKeyCaseCompare(void *privdata, const void *key1, const void *key2) { DICT_NOTUSED(privdata); return strcasecmp(key1, key2) == 0; } void dictRedisObjectDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); if (val == NULL) return; /* Values of swapped out keys as set to NULL */ decrRefCount(val); } void dictSdsDestructor(void *privdata, void *val) { DICT_NOTUSED(privdata); sdsfree(val); } int dictObjKeyCompare(void *privdata, const void *key1, const void *key2) { const robj *o1 = key1, *o2 = key2; return dictSdsKeyCompare(privdata,o1->ptr,o2->ptr); } unsigned int dictObjHash(const void *key) { const robj *o = key; return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); } unsigned int dictSdsHash(const void *key) { return dictGenHashFunction((unsigned char*)key, sdslen((char*)key)); } unsigned int dictSdsCaseHash(const void *key) { return dictGenCaseHashFunction((unsigned char*)key, sdslen((char*)key)); } int dictEncObjKeyCompare(void *privdata, const void *key1, const void *key2) { robj *o1 = (robj*) key1, *o2 = (robj*) key2; int cmp; if (o1->encoding == REDIS_ENCODING_INT && o2->encoding == REDIS_ENCODING_INT) return o1->ptr == o2->ptr; o1 = getDecodedObject(o1); o2 = getDecodedObject(o2); cmp = dictSdsKeyCompare(privdata,o1->ptr,o2->ptr); decrRefCount(o1); decrRefCount(o2); return cmp; } unsigned int dictEncObjHash(const void *key) { robj *o = (robj*) key; if (o->encoding == REDIS_ENCODING_RAW) { return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); } else { if (o->encoding == REDIS_ENCODING_INT) { char buf[32]; int len; len = ll2string(buf,32,(long)o->ptr); return dictGenHashFunction((unsigned char*)buf, len); } else { unsigned int hash; o = getDecodedObject(o); hash = dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); decrRefCount(o); return hash; } } } /* Sets type */ dictType setDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ NULL /* val destructor */ }; /* Sorted sets hash (note: a skiplist is used in addition to the hash table) */ dictType zsetDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ NULL /* val destructor */ }; /* Db->dict, keys are sds strings, vals are Redis objects. */ dictType dbDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ dictSdsDestructor, /* key destructor */ dictRedisObjectDestructor /* val destructor */ }; /* Db->expires */ dictType keyptrDictType = { dictSdsHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCompare, /* key compare */ NULL, /* key destructor */ NULL /* val destructor */ }; /* Command table. sds string -> command struct pointer. */ dictType commandTableDictType = { dictSdsCaseHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictSdsKeyCaseCompare, /* key compare */ dictSdsDestructor, /* key destructor */ NULL /* val destructor */ }; /* Hash type hash table (note that small hashes are represented with zimpaps) */ dictType hashDictType = { dictEncObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictEncObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ dictRedisObjectDestructor /* val destructor */ }; /* Keylist hash table type has unencoded redis objects as keys and * lists as values. It's used for blocking operations (BLPOP) and to * map swapped keys to a list of clients waiting for this keys to be loaded. */ dictType keylistDictType = { dictObjHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictObjKeyCompare, /* key compare */ dictRedisObjectDestructor, /* key destructor */ dictListDestructor /* val destructor */ }; int htNeedsResize(dict *dict) { long long size, used; size = dictSlots(dict); used = dictSize(dict); return (size && used && size > DICT_HT_INITIAL_SIZE && (used*100/size < REDIS_HT_MINFILL)); } /* If the percentage of used slots in the HT reaches REDIS_HT_MINFILL * we resize the hash table to save memory */ void tryResizeHashTables(void) { int j; for (j = 0; j < server.dbnum; j++) { if (htNeedsResize(server.db[j].dict)) dictResize(server.db[j].dict); if (htNeedsResize(server.db[j].expires)) dictResize(server.db[j].expires); } } /* Our hash table implementation performs rehashing incrementally while * we write/read from the hash table. Still if the server is idle, the hash * table will use two tables for a long time. So we try to use 1 millisecond * of CPU time at every serverCron() loop in order to rehash some key. */ void incrementallyRehash(void) { int j; for (j = 0; j < server.dbnum; j++) { if (dictIsRehashing(server.db[j].dict)) { dictRehashMilliseconds(server.db[j].dict,1); break; /* already used our millisecond for this loop... */ } } } /* This function is called once a background process of some kind terminates, * as we want to avoid resizing the hash tables when there is a child in order * to play well with copy-on-write (otherwise when a resize happens lots of * memory pages are copied). The goal of this function is to update the ability * for dict.c to resize the hash tables accordingly to the fact we have o not * running childs. */ void updateDictResizePolicy(void) { if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) dictEnableResize(); else dictDisableResize(); } /* ======================= Cron: called every 100 ms ======================== */ /* Try to expire a few timed out keys. The algorithm used is adaptive and * will use few CPU cycles if there are few expiring keys, otherwise * it will get more aggressive to avoid that too much memory is used by * keys that can be removed from the keyspace. */ void activeExpireCycle(void) { int j; for (j = 0; j < server.dbnum; j++) { int expired; redisDb *db = server.db+j; /* Continue to expire if at the end of the cycle more than 25% * of the keys were expired. */ do { long num = dictSize(db->expires); time_t now = time(NULL); expired = 0; if (num > REDIS_EXPIRELOOKUPS_PER_CRON) num = REDIS_EXPIRELOOKUPS_PER_CRON; while (num--) { dictEntry *de; time_t t; if ((de = dictGetRandomKey(db->expires)) == NULL) break; t = (time_t) dictGetEntryVal(de); if (now > t) { sds key = dictGetEntryKey(de); robj *keyobj = createStringObject(key,sdslen(key)); propagateExpire(db,keyobj); dbDelete(db,keyobj); decrRefCount(keyobj); expired++; server.stat_expiredkeys++; } } } while (expired > REDIS_EXPIRELOOKUPS_PER_CRON/4); } } void updateLRUClock(void) { server.lruclock = (time(NULL)/REDIS_LRU_CLOCK_RESOLUTION) & REDIS_LRU_CLOCK_MAX; } int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { int j, loops = server.cronloops++; REDIS_NOTUSED(eventLoop); REDIS_NOTUSED(id); REDIS_NOTUSED(clientData); /* We take a cached value of the unix time in the global state because * with virtual memory and aging there is to store the current time * in objects at every object access, and accuracy is not needed. * To access a global var is faster than calling time(NULL) */ server.unixtime = time(NULL); /* We have just 22 bits per object for LRU information. * So we use an (eventually wrapping) LRU clock with 10 seconds resolution. * 2^22 bits with 10 seconds resoluton is more or less 1.5 years. * * Note that even if this will wrap after 1.5 years it's not a problem, * everything will still work but just some object will appear younger * to Redis. But for this to happen a given object should never be touched * for 1.5 years. * * Note that you can change the resolution altering the * REDIS_LRU_CLOCK_RESOLUTION define. */ updateLRUClock(); /* We received a SIGTERM, shutting down here in a safe way, as it is * not ok doing so inside the signal handler. */ if (server.shutdown_asap) { if (prepareForShutdown() == REDIS_OK) exit(0); redisLog(REDIS_WARNING,"SIGTERM received but errors trying to shut down the server, check the logs for more information"); } /* Show some info about non-empty databases */ for (j = 0; j < server.dbnum; j++) { long long size, used, vkeys; size = dictSlots(server.db[j].dict); used = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (!(loops % 50) && (used || vkeys)) { redisLog(REDIS_VERBOSE,"DB %d: %lld keys (%lld volatile) in %lld slots HT.",j,used,vkeys,size); /* dictPrintStats(server.dict); */ } } /* We don't want to resize the hash tables while a bacground saving * is in progress: the saving child is created using fork() that is * implemented with a copy-on-write semantic in most modern systems, so * if we resize the HT while there is the saving child at work actually * a lot of memory movements in the parent will cause a lot of pages * copied. */ if (server.bgsavechildpid == -1 && server.bgrewritechildpid == -1) { if (!(loops % 10)) tryResizeHashTables(); if (server.activerehashing) incrementallyRehash(); } /* Show information about connected clients */ if (!(loops % 50)) { redisLog(REDIS_VERBOSE,"%d clients connected (%d slaves), %zu bytes in use", listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), zmalloc_used_memory()); } /* Close connections of timedout clients */ if ((server.maxidletime && !(loops % 100)) || server.bpop_blocked_clients) closeTimedoutClients(); /* Check if a background saving or AOF rewrite in progress terminated */ if (server.bgsavechildpid != -1 || server.bgrewritechildpid != -1) { int statloc; pid_t pid; if ((pid = wait3(&statloc,WNOHANG,NULL)) != 0) { if (pid == server.bgsavechildpid) { backgroundSaveDoneHandler(statloc); } else { backgroundRewriteDoneHandler(statloc); } updateDictResizePolicy(); } } else { /* If there is not a background saving in progress check if * we have to save now */ time_t now = time(NULL); for (j = 0; j < server.saveparamslen; j++) { struct saveparam *sp = server.saveparams+j; if (server.dirty >= sp->changes && now-server.lastsave > sp->seconds) { redisLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...", sp->changes, sp->seconds); rdbSaveBackground(server.dbfilename); break; } } } /* Expire a few keys per cycle, only if this is a master. * On slaves we wait for DEL operations synthesized by the master * in order to guarantee a strict consistency. */ if (server.masterhost == NULL) activeExpireCycle(); /* Remove a few cached objects from memory if we are over the * configured memory limit */ while (server.ds_enabled && zmalloc_used_memory() > server.cache_max_memory) { cacheFreeOneEntry(); } /* Replication cron function -- used to reconnect to master and * to detect transfer failures. */ if (!(loops % 10)) replicationCron(); return 100; } /* This function gets called every time Redis is entering the * main loop of the event driven library, that is, before to sleep * for ready file descriptors. */ void beforeSleep(struct aeEventLoop *eventLoop) { REDIS_NOTUSED(eventLoop); listNode *ln; redisClient *c; /* Awake clients that got all the on disk keys they requested */ if (server.ds_enabled && listLength(server.io_ready_clients)) { listIter li; listRewind(server.io_ready_clients,&li); while((ln = listNext(&li))) { c = ln->value; struct redisCommand *cmd; /* Resume the client. */ listDelNode(server.io_ready_clients,ln); c->flags &= (~REDIS_IO_WAIT); server.cache_blocked_clients--; aeCreateFileEvent(server.el, c->fd, AE_READABLE, readQueryFromClient, c); cmd = lookupCommand(c->argv[0]->ptr); redisAssert(cmd != NULL); call(c,cmd); resetClient(c); /* There may be more data to process in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } } /* Try to process pending commands for clients that were just unblocked. */ while (listLength(server.unblocked_clients)) { ln = listFirst(server.unblocked_clients); redisAssert(ln != NULL); c = ln->value; listDelNode(server.unblocked_clients,ln); /* Process remaining data in the input buffer. */ if (c->querybuf && sdslen(c->querybuf) > 0) processInputBuffer(c); } /* Write the AOF buffer on disk */ flushAppendOnlyFile(); } /* =========================== Server initialization ======================== */ void createSharedObjects(void) { int j; shared.crlf = createObject(REDIS_STRING,sdsnew("\r\n")); shared.ok = createObject(REDIS_STRING,sdsnew("+OK\r\n")); shared.err = createObject(REDIS_STRING,sdsnew("-ERR\r\n")); shared.emptybulk = createObject(REDIS_STRING,sdsnew("$0\r\n\r\n")); shared.czero = createObject(REDIS_STRING,sdsnew(":0\r\n")); shared.cone = createObject(REDIS_STRING,sdsnew(":1\r\n")); shared.cnegone = createObject(REDIS_STRING,sdsnew(":-1\r\n")); shared.nullbulk = createObject(REDIS_STRING,sdsnew("$-1\r\n")); shared.nullmultibulk = createObject(REDIS_STRING,sdsnew("*-1\r\n")); shared.emptymultibulk = createObject(REDIS_STRING,sdsnew("*0\r\n")); shared.pong = createObject(REDIS_STRING,sdsnew("+PONG\r\n")); shared.queued = createObject(REDIS_STRING,sdsnew("+QUEUED\r\n")); shared.wrongtypeerr = createObject(REDIS_STRING,sdsnew( "-ERR Operation against a key holding the wrong kind of value\r\n")); shared.nokeyerr = createObject(REDIS_STRING,sdsnew( "-ERR no such key\r\n")); shared.syntaxerr = createObject(REDIS_STRING,sdsnew( "-ERR syntax error\r\n")); shared.sameobjecterr = createObject(REDIS_STRING,sdsnew( "-ERR source and destination objects are the same\r\n")); shared.outofrangeerr = createObject(REDIS_STRING,sdsnew( "-ERR index out of range\r\n")); shared.loadingerr = createObject(REDIS_STRING,sdsnew( "-LOADING Redis is loading the dataset in memory\r\n")); shared.space = createObject(REDIS_STRING,sdsnew(" ")); shared.colon = createObject(REDIS_STRING,sdsnew(":")); shared.plus = createObject(REDIS_STRING,sdsnew("+")); shared.select0 = createStringObject("select 0\r\n",10); shared.select1 = createStringObject("select 1\r\n",10); shared.select2 = createStringObject("select 2\r\n",10); shared.select3 = createStringObject("select 3\r\n",10); shared.select4 = createStringObject("select 4\r\n",10); shared.select5 = createStringObject("select 5\r\n",10); shared.select6 = createStringObject("select 6\r\n",10); shared.select7 = createStringObject("select 7\r\n",10); shared.select8 = createStringObject("select 8\r\n",10); shared.select9 = createStringObject("select 9\r\n",10); shared.messagebulk = createStringObject("$7\r\nmessage\r\n",13); shared.pmessagebulk = createStringObject("$8\r\npmessage\r\n",14); shared.subscribebulk = createStringObject("$9\r\nsubscribe\r\n",15); shared.unsubscribebulk = createStringObject("$11\r\nunsubscribe\r\n",18); shared.psubscribebulk = createStringObject("$10\r\npsubscribe\r\n",17); shared.punsubscribebulk = createStringObject("$12\r\npunsubscribe\r\n",19); shared.mbulk3 = createStringObject("*3\r\n",4); shared.mbulk4 = createStringObject("*4\r\n",4); for (j = 0; j < REDIS_SHARED_INTEGERS; j++) { shared.integers[j] = createObject(REDIS_STRING,(void*)(long)j); shared.integers[j]->encoding = REDIS_ENCODING_INT; } } void initServerConfig() { server.port = REDIS_SERVERPORT; server.bindaddr = NULL; server.unixsocket = NULL; server.ipfd = -1; server.sofd = -1; server.dbnum = REDIS_DEFAULT_DBNUM; server.verbosity = REDIS_VERBOSE; server.maxidletime = REDIS_MAXIDLETIME; server.saveparams = NULL; server.loading = 0; server.logfile = NULL; /* NULL = log on standard output */ server.syslog_enabled = 0; server.syslog_ident = zstrdup("redis"); server.syslog_facility = LOG_LOCAL0; server.glueoutputbuf = 1; server.daemonize = 0; server.appendonly = 0; server.appendfsync = APPENDFSYNC_EVERYSEC; server.no_appendfsync_on_rewrite = 0; server.lastfsync = time(NULL); server.appendfd = -1; server.appendseldb = -1; /* Make sure the first time will not match */ server.pidfile = zstrdup("/var/run/redis.pid"); server.dbfilename = zstrdup("dump.rdb"); server.appendfilename = zstrdup("appendonly.aof"); server.requirepass = NULL; server.rdbcompression = 1; server.activerehashing = 1; server.maxclients = 0; server.bpop_blocked_clients = 0; server.maxmemory = 0; server.maxmemory_policy = REDIS_MAXMEMORY_VOLATILE_LRU; server.maxmemory_samples = 3; server.ds_enabled = 0; server.ds_path = zstrdup("/tmp/redis.ds"); server.cache_max_memory = 64LL*1024*1024; /* 64 MB of RAM */ server.cache_blocked_clients = 0; server.hash_max_zipmap_entries = REDIS_HASH_MAX_ZIPMAP_ENTRIES; server.hash_max_zipmap_value = REDIS_HASH_MAX_ZIPMAP_VALUE; server.list_max_ziplist_entries = REDIS_LIST_MAX_ZIPLIST_ENTRIES; server.list_max_ziplist_value = REDIS_LIST_MAX_ZIPLIST_VALUE; server.set_max_intset_entries = REDIS_SET_MAX_INTSET_ENTRIES; server.shutdown_asap = 0; updateLRUClock(); resetServerSaveParams(); appendServerSaveParams(60*60,1); /* save after 1 hour and 1 change */ appendServerSaveParams(300,100); /* save after 5 minutes and 100 changes */ appendServerSaveParams(60,10000); /* save after 1 minute and 10000 changes */ /* Replication related */ server.isslave = 0; server.masterauth = NULL; server.masterhost = NULL; server.masterport = 6379; server.master = NULL; server.replstate = REDIS_REPL_NONE; server.repl_serve_stale_data = 1; /* Double constants initialization */ R_Zero = 0.0; R_PosInf = 1.0/R_Zero; R_NegInf = -1.0/R_Zero; R_Nan = R_Zero/R_Zero; /* Command table -- we intiialize it here as it is part of the * initial configuration, since command names may be changed via * redis.conf using the rename-command directive. */ server.commands = dictCreate(&commandTableDictType,NULL); populateCommandTable(); server.delCommand = lookupCommandByCString("del"); server.multiCommand = lookupCommandByCString("multi"); } void initServer() { int j; signal(SIGHUP, SIG_IGN); signal(SIGPIPE, SIG_IGN); setupSigSegvAction(); if (server.syslog_enabled) { openlog(server.syslog_ident, LOG_PID | LOG_NDELAY | LOG_NOWAIT, server.syslog_facility); } server.mainthread = pthread_self(); server.clients = listCreate(); server.slaves = listCreate(); server.monitors = listCreate(); server.unblocked_clients = listCreate(); createSharedObjects(); server.el = aeCreateEventLoop(); server.db = zmalloc(sizeof(redisDb)*server.dbnum); server.ipfd = anetTcpServer(server.neterr,server.port,server.bindaddr); if (server.ipfd == ANET_ERR) { redisLog(REDIS_WARNING, "Opening port: %s", server.neterr); exit(1); } if (server.unixsocket != NULL) { unlink(server.unixsocket); /* don't care if this fails */ server.sofd = anetUnixServer(server.neterr,server.unixsocket); if (server.sofd == ANET_ERR) { redisLog(REDIS_WARNING, "Opening socket: %s", server.neterr); exit(1); } } if (server.ipfd < 0 && server.sofd < 0) { redisLog(REDIS_WARNING, "Configured to not listen anywhere, exiting."); exit(1); } for (j = 0; j < server.dbnum; j++) { server.db[j].dict = dictCreate(&dbDictType,NULL); server.db[j].expires = dictCreate(&keyptrDictType,NULL); server.db[j].blocking_keys = dictCreate(&keylistDictType,NULL); server.db[j].watched_keys = dictCreate(&keylistDictType,NULL); if (server.ds_enabled) server.db[j].io_keys = dictCreate(&keylistDictType,NULL); server.db[j].id = j; } server.pubsub_channels = dictCreate(&keylistDictType,NULL); server.pubsub_patterns = listCreate(); listSetFreeMethod(server.pubsub_patterns,freePubsubPattern); listSetMatchMethod(server.pubsub_patterns,listMatchPubsubPattern); server.cronloops = 0; server.bgsavechildpid = -1; server.bgrewritechildpid = -1; server.bgrewritebuf = sdsempty(); server.aofbuf = sdsempty(); server.lastsave = time(NULL); server.dirty = 0; server.stat_numcommands = 0; server.stat_numconnections = 0; server.stat_expiredkeys = 0; server.stat_evictedkeys = 0; server.stat_starttime = time(NULL); server.stat_keyspace_misses = 0; server.stat_keyspace_hits = 0; server.unixtime = time(NULL); aeCreateTimeEvent(server.el, 1, serverCron, NULL, NULL); if (server.ipfd > 0 && aeCreateFileEvent(server.el,server.ipfd,AE_READABLE, acceptTcpHandler,NULL) == AE_ERR) oom("creating file event"); if (server.sofd > 0 && aeCreateFileEvent(server.el,server.sofd,AE_READABLE, acceptUnixHandler,NULL) == AE_ERR) oom("creating file event"); if (server.appendonly) { server.appendfd = open(server.appendfilename,O_WRONLY|O_APPEND|O_CREAT,0644); if (server.appendfd == -1) { redisLog(REDIS_WARNING, "Can't open the append-only file: %s", strerror(errno)); exit(1); } } if (server.ds_enabled) dsInit(); } /* Populates the Redis Command Table starting from the hard coded list * we have on top of redis.c file. */ void populateCommandTable(void) { int j; int numcommands = sizeof(readonlyCommandTable)/sizeof(struct redisCommand); for (j = 0; j < numcommands; j++) { struct redisCommand *c = readonlyCommandTable+j; int retval; retval = dictAdd(server.commands, sdsnew(c->name), c); assert(retval == DICT_OK); } } /* ====================== Commands lookup and execution ===================== */ struct redisCommand *lookupCommand(sds name) { return dictFetchValue(server.commands, name); } struct redisCommand *lookupCommandByCString(char *s) { struct redisCommand *cmd; sds name = sdsnew(s); cmd = dictFetchValue(server.commands, name); sdsfree(name); return cmd; } /* Call() is the core of Redis execution of a command */ void call(redisClient *c, struct redisCommand *cmd) { long long dirty; dirty = server.dirty; cmd->proc(c); dirty = server.dirty-dirty; if (server.appendonly && dirty) feedAppendOnlyFile(cmd,c->db->id,c->argv,c->argc); if ((dirty || cmd->flags & REDIS_CMD_FORCE_REPLICATION) && listLength(server.slaves)) replicationFeedSlaves(server.slaves,c->db->id,c->argv,c->argc); if (listLength(server.monitors)) replicationFeedMonitors(server.monitors,c->db->id,c->argv,c->argc); server.stat_numcommands++; } /* If this function gets called we already read a whole * command, argments are in the client argv/argc fields. * processCommand() execute the command or prepare the * server for a bulk read from the client. * * If 1 is returned the client is still alive and valid and * and other operations can be performed by the caller. Otherwise * if 0 is returned the client was destroied (i.e. after QUIT). */ int processCommand(redisClient *c) { struct redisCommand *cmd; /* The QUIT command is handled separately. Normal command procs will * go through checking for replication and QUIT will cause trouble * when FORCE_REPLICATION is enabled and would be implemented in * a regular command proc. */ if (!strcasecmp(c->argv[0]->ptr,"quit")) { addReply(c,shared.ok); c->flags |= REDIS_CLOSE_AFTER_REPLY; return REDIS_ERR; } /* Now lookup the command and check ASAP about trivial error conditions * such wrong arity, bad command name and so forth. */ cmd = lookupCommand(c->argv[0]->ptr); if (!cmd) { addReplyErrorFormat(c,"unknown command '%s'", (char*)c->argv[0]->ptr); return REDIS_OK; } else if ((cmd->arity > 0 && cmd->arity != c->argc) || (c->argc < -cmd->arity)) { addReplyErrorFormat(c,"wrong number of arguments for '%s' command", cmd->name); return REDIS_OK; } /* Check if the user is authenticated */ if (server.requirepass && !c->authenticated && cmd->proc != authCommand) { addReplyError(c,"operation not permitted"); return REDIS_OK; } /* Handle the maxmemory directive. * * First we try to free some memory if possible (if there are volatile * keys in the dataset). If there are not the only thing we can do * is returning an error. */ if (server.maxmemory) freeMemoryIfNeeded(); if (server.maxmemory && (cmd->flags & REDIS_CMD_DENYOOM) && zmalloc_used_memory() > server.maxmemory) { addReplyError(c,"command not allowed when used memory > 'maxmemory'"); return REDIS_OK; } /* Only allow SUBSCRIBE and UNSUBSCRIBE in the context of Pub/Sub */ if ((dictSize(c->pubsub_channels) > 0 || listLength(c->pubsub_patterns) > 0) && cmd->proc != subscribeCommand && cmd->proc != unsubscribeCommand && cmd->proc != psubscribeCommand && cmd->proc != punsubscribeCommand) { addReplyError(c,"only (P)SUBSCRIBE / (P)UNSUBSCRIBE / QUIT allowed in this context"); return REDIS_OK; } /* Only allow INFO and SLAVEOF when slave-serve-stale-data is no and * we are a slave with a broken link with master. */ if (server.masterhost && server.replstate != REDIS_REPL_CONNECTED && server.repl_serve_stale_data == 0 && cmd->proc != infoCommand && cmd->proc != slaveofCommand) { addReplyError(c, "link with MASTER is down and slave-serve-stale-data is set to no"); return REDIS_OK; } /* Loading DB? Return an error if the command is not INFO */ if (server.loading && cmd->proc != infoCommand) { addReply(c, shared.loadingerr); return REDIS_OK; } /* Exec the command */ if (c->flags & REDIS_MULTI && cmd->proc != execCommand && cmd->proc != discardCommand && cmd->proc != multiCommand && cmd->proc != watchCommand) { queueMultiCommand(c,cmd); addReply(c,shared.queued); } else { if (server.ds_enabled && blockClientOnSwappedKeys(c,cmd)) return REDIS_ERR; call(c,cmd); } return REDIS_OK; } /*================================== Shutdown =============================== */ int prepareForShutdown() { redisLog(REDIS_WARNING,"User requested shutdown, saving DB..."); /* Kill the saving child if there is a background saving in progress. We want to avoid race conditions, for instance our saving child may overwrite the synchronous saving did by SHUTDOWN. */ if (server.bgsavechildpid != -1) { redisLog(REDIS_WARNING,"There is a live saving child. Killing it!"); kill(server.bgsavechildpid,SIGKILL); rdbRemoveTempFile(server.bgsavechildpid); } if (server.appendonly) { /* Append only file: fsync() the AOF and exit */ aof_fsync(server.appendfd); } else if (server.saveparamslen > 0) { /* Snapshotting. Perform a SYNC SAVE and exit */ if (rdbSave(server.dbfilename) != REDIS_OK) { /* Ooops.. error saving! The best we can do is to continue * operating. Note that if there was a background saving process, * in the next cron() Redis will be notified that the background * saving aborted, handling special stuff like slaves pending for * synchronization... */ redisLog(REDIS_WARNING,"Error trying to save the DB, can't exit"); return REDIS_ERR; } } else { redisLog(REDIS_WARNING,"Not saving DB."); } if (server.daemonize) unlink(server.pidfile); redisLog(REDIS_WARNING,"Server exit now, bye bye..."); return REDIS_OK; } /*================================== Commands =============================== */ void authCommand(redisClient *c) { if (!server.requirepass || !strcmp(c->argv[1]->ptr, server.requirepass)) { c->authenticated = 1; addReply(c,shared.ok); } else { c->authenticated = 0; addReplyError(c,"invalid password"); } } void pingCommand(redisClient *c) { addReply(c,shared.pong); } void echoCommand(redisClient *c) { addReplyBulk(c,c->argv[1]); } /* Convert an amount of bytes into a human readable string in the form * of 100B, 2G, 100M, 4K, and so forth. */ void bytesToHuman(char *s, unsigned long long n) { double d; if (n < 1024) { /* Bytes */ sprintf(s,"%lluB",n); return; } else if (n < (1024*1024)) { d = (double)n/(1024); sprintf(s,"%.2fK",d); } else if (n < (1024LL*1024*1024)) { d = (double)n/(1024*1024); sprintf(s,"%.2fM",d); } else if (n < (1024LL*1024*1024*1024)) { d = (double)n/(1024LL*1024*1024); sprintf(s,"%.2fG",d); } } /* Create the string returned by the INFO command. This is decoupled * by the INFO command itself as we need to report the same information * on memory corruption problems. */ sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "ds_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.ds_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.ds_enabled) { lockThreadedIO(); info = sdscatprintf(info, "cache_max_memory:%llu\r\n" "cache_blocked_clients:%lu\r\n" ,(unsigned long long) server.cache_max_memory, (unsigned long) server.cache_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; } void infoCommand(redisClient *c) { sds info = genRedisInfoString(); addReplySds(c,sdscatprintf(sdsempty(),"$%lu\r\n", (unsigned long)sdslen(info))); addReplySds(c,info); addReply(c,shared.crlf); } void monitorCommand(redisClient *c) { /* ignore MONITOR if aleady slave or in monitor mode */ if (c->flags & REDIS_SLAVE) return; c->flags |= (REDIS_SLAVE|REDIS_MONITOR); c->slaveseldb = 0; listAddNodeTail(server.monitors,c); addReply(c,shared.ok); } /* ============================ Maxmemory directive ======================== */ /* This function gets called when 'maxmemory' is set on the config file to limit * the max memory used by the server, and we are out of memory. * This function will try to, in order: * * - Free objects from the free list * - Try to remove keys with an EXPIRE set * * It is not possible to free enough memory to reach used-memory < maxmemory * the server will start refusing commands that will enlarge even more the * memory usage. */ void freeMemoryIfNeeded(void) { /* Remove keys accordingly to the active policy as long as we are * over the memory limit. */ if (server.maxmemory_policy == REDIS_MAXMEMORY_NO_EVICTION) return; while (server.maxmemory && zmalloc_used_memory() > server.maxmemory) { int j, k, freed = 0; for (j = 0; j < server.dbnum; j++) { long bestval = 0; /* just to prevent warning */ sds bestkey = NULL; struct dictEntry *de; redisDb *db = server.db+j; dict *dict; if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_RANDOM) { dict = server.db[j].dict; } else { dict = server.db[j].expires; } if (dictSize(dict) == 0) continue; /* volatile-random and allkeys-random policy */ if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_RANDOM || server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_RANDOM) { de = dictGetRandomKey(dict); bestkey = dictGetEntryKey(de); } /* volatile-lru and allkeys-lru policy */ else if (server.maxmemory_policy == REDIS_MAXMEMORY_ALLKEYS_LRU || server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_LRU) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisval; robj *o; de = dictGetRandomKey(dict); thiskey = dictGetEntryKey(de); /* When policy is volatile-lru we need an additonal lookup * to locate the real key, as dict is set to db->expires. */ if (server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_LRU) de = dictFind(db->dict, thiskey); o = dictGetEntryVal(de); thisval = estimateObjectIdleTime(o); /* Higher idle time is better candidate for deletion */ if (bestkey == NULL || thisval > bestval) { bestkey = thiskey; bestval = thisval; } } } /* volatile-ttl */ else if (server.maxmemory_policy == REDIS_MAXMEMORY_VOLATILE_TTL) { for (k = 0; k < server.maxmemory_samples; k++) { sds thiskey; long thisval; de = dictGetRandomKey(dict); thiskey = dictGetEntryKey(de); thisval = (long) dictGetEntryVal(de); /* Expire sooner (minor expire unix timestamp) is better * candidate for deletion */ if (bestkey == NULL || thisval < bestval) { bestkey = thiskey; bestval = thisval; } } } /* Finally remove the selected key. */ if (bestkey) { robj *keyobj = createStringObject(bestkey,sdslen(bestkey)); dbDelete(db,keyobj); server.stat_evictedkeys++; decrRefCount(keyobj); freed++; } } if (!freed) return; /* nothing to free... */ } } /* =================================== Main! ================================ */ #ifdef __linux__ int linuxOvercommitMemoryValue(void) { FILE *fp = fopen("/proc/sys/vm/overcommit_memory","r"); char buf[64]; if (!fp) return -1; if (fgets(buf,64,fp) == NULL) { fclose(fp); return -1; } fclose(fp); return atoi(buf); } void linuxOvercommitMemoryWarning(void) { if (linuxOvercommitMemoryValue() == 0) { redisLog(REDIS_WARNING,"WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect."); } } #endif /* __linux__ */ void createPidFile(void) { /* Try to write the pid file in a best-effort way. */ FILE *fp = fopen(server.pidfile,"w"); if (fp) { fprintf(fp,"%d\n",getpid()); fclose(fp); } } void daemonize(void) { int fd; if (fork() != 0) exit(0); /* parent exits */ setsid(); /* create a new session */ /* Every output goes to /dev/null. If Redis is daemonized but * the 'logfile' is set to 'stdout' in the configuration file * it will not log at all. */ if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { dup2(fd, STDIN_FILENO); dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); if (fd > STDERR_FILENO) close(fd); } } void version() { printf("Redis server version %s (%s:%d)\n", REDIS_VERSION, redisGitSHA1(), atoi(redisGitDirty()) > 0); exit(0); } void usage() { fprintf(stderr,"Usage: ./redis-server [/path/to/redis.conf]\n"); fprintf(stderr," ./redis-server - (read config from stdin)\n"); exit(1); } int main(int argc, char **argv) { time_t start; initServerConfig(); if (argc == 2) { if (strcmp(argv[1], "-v") == 0 || strcmp(argv[1], "--version") == 0) version(); if (strcmp(argv[1], "--help") == 0) usage(); resetServerSaveParams(); loadServerConfig(argv[1]); } else if ((argc > 2)) { usage(); } else { redisLog(REDIS_WARNING,"Warning: no config file specified, using the default config. In order to specify a config file use 'redis-server /path/to/redis.conf'"); } if (server.daemonize) daemonize(); initServer(); if (server.daemonize) createPidFile(); redisLog(REDIS_NOTICE,"Server started, Redis version " REDIS_VERSION); #ifdef __linux__ linuxOvercommitMemoryWarning(); #endif start = time(NULL); if (server.appendonly) { if (loadAppendOnlyFile(server.appendfilename) == REDIS_OK) redisLog(REDIS_NOTICE,"DB loaded from append only file: %ld seconds",time(NULL)-start); } else { if (rdbLoad(server.dbfilename) == REDIS_OK) redisLog(REDIS_NOTICE,"DB loaded from disk: %ld seconds",time(NULL)-start); } if (server.ipfd > 0) redisLog(REDIS_NOTICE,"The server is now ready to accept connections on port %d", server.port); if (server.sofd > 0) redisLog(REDIS_NOTICE,"The server is now ready to accept connections at %s", server.unixsocket); aeSetBeforeSleepProc(server.el,beforeSleep); aeMain(server.el); aeDeleteEventLoop(server.el); return 0; } /* ============================= Backtrace support ========================= */ #ifdef HAVE_BACKTRACE void *getMcontextEip(ucontext_t *uc) { #if defined(__FreeBSD__) return (void*) uc->uc_mcontext.mc_eip; #elif defined(__dietlibc__) return (void*) uc->uc_mcontext.eip; #elif defined(__APPLE__) && !defined(MAC_OS_X_VERSION_10_6) #if __x86_64__ return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__APPLE__) && defined(MAC_OS_X_VERSION_10_6) #if defined(_STRUCT_X86_THREAD_STATE64) && !defined(__i386__) return (void*) uc->uc_mcontext->__ss.__rip; #else return (void*) uc->uc_mcontext->__ss.__eip; #endif #elif defined(__i386__) return (void*) uc->uc_mcontext.gregs[14]; /* Linux 32 */ #elif defined(__X86_64__) || defined(__x86_64__) return (void*) uc->uc_mcontext.gregs[16]; /* Linux 64 */ #elif defined(__ia64__) /* Linux IA64 */ return (void*) uc->uc_mcontext.sc_ip; #else return NULL; #endif } void segvHandler(int sig, siginfo_t *info, void *secret) { void *trace[100]; char **messages = NULL; int i, trace_size = 0; ucontext_t *uc = (ucontext_t*) secret; sds infostring; struct sigaction act; REDIS_NOTUSED(info); redisLog(REDIS_WARNING, "======= Ooops! Redis %s got signal: -%d- =======", REDIS_VERSION, sig); infostring = genRedisInfoString(); redisLog(REDIS_WARNING, "%s",infostring); /* It's not safe to sdsfree() the returned string under memory * corruption conditions. Let it leak as we are going to abort */ trace_size = backtrace(trace, 100); /* overwrite sigaction with caller's address */ if (getMcontextEip(uc) != NULL) { trace[1] = getMcontextEip(uc); } messages = backtrace_symbols(trace, trace_size); for (i=1; i<trace_size; ++i) redisLog(REDIS_WARNING,"%s", messages[i]); /* free(messages); Don't call free() with possibly corrupted memory. */ if (server.daemonize) unlink(server.pidfile); /* Make sure we exit with the right signal at the end. So for instance * the core will be dumped if enabled. */ sigemptyset (&act.sa_mask); /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction * is used. Otherwise, sa_handler is used */ act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = SIG_DFL; sigaction (sig, &act, NULL); kill(getpid(),sig); } void sigtermHandler(int sig) { REDIS_NOTUSED(sig); redisLog(REDIS_WARNING,"SIGTERM received, scheduling shutting down..."); server.shutdown_asap = 1; } void setupSigSegvAction(void) { struct sigaction act; sigemptyset (&act.sa_mask); /* When the SA_SIGINFO flag is set in sa_flags then sa_sigaction * is used. Otherwise, sa_handler is used */ act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND | SA_SIGINFO; act.sa_sigaction = segvHandler; sigaction (SIGSEGV, &act, NULL); sigaction (SIGBUS, &act, NULL); sigaction (SIGFPE, &act, NULL); sigaction (SIGILL, &act, NULL); sigaction (SIGBUS, &act, NULL); act.sa_flags = SA_NODEFER | SA_ONSTACK | SA_RESETHAND; act.sa_handler = sigtermHandler; sigaction (SIGTERM, &act, NULL); return; } #else /* HAVE_BACKTRACE */ void setupSigSegvAction(void) { } #endif /* HAVE_BACKTRACE */ /* The End */
sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "vm_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.vm_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.vm_enabled) { lockThreadedIO(); info = sdscatprintf(info, "vm_conf_max_memory:%llu\r\n" "vm_conf_page_size:%llu\r\n" "vm_conf_pages:%llu\r\n" "vm_stats_used_pages:%llu\r\n" "vm_stats_swapped_objects:%llu\r\n" "vm_stats_swappin_count:%llu\r\n" "vm_stats_swappout_count:%llu\r\n" "vm_stats_io_newjobs_len:%lu\r\n" "vm_stats_io_processing_len:%lu\r\n" "vm_stats_io_processed_len:%lu\r\n" "vm_stats_io_active_threads:%lu\r\n" "vm_stats_blocked_clients:%lu\r\n" ,(unsigned long long) server.vm_max_memory, (unsigned long long) server.vm_page_size, (unsigned long long) server.vm_pages, (unsigned long long) server.vm_stats_used_pages, (unsigned long long) server.vm_stats_swapped_objects, (unsigned long long) server.vm_stats_swapins, (unsigned long long) server.vm_stats_swapouts, (unsigned long) listLength(server.io_newjobs), (unsigned long) listLength(server.io_processing), (unsigned long) listLength(server.io_processed), (unsigned long) server.io_active_threads, (unsigned long) server.vm_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; }
sds genRedisInfoString(void) { sds info; time_t uptime = time(NULL)-server.stat_starttime; int j; char hmem[64]; struct rusage self_ru, c_ru; getrusage(RUSAGE_SELF, &self_ru); getrusage(RUSAGE_CHILDREN, &c_ru); bytesToHuman(hmem,zmalloc_used_memory()); info = sdscatprintf(sdsempty(), "redis_version:%s\r\n" "redis_git_sha1:%s\r\n" "redis_git_dirty:%d\r\n" "arch_bits:%s\r\n" "multiplexing_api:%s\r\n" "process_id:%ld\r\n" "uptime_in_seconds:%ld\r\n" "uptime_in_days:%ld\r\n" "lru_clock:%ld\r\n" "used_cpu_sys:%.2f\r\n" "used_cpu_user:%.2f\r\n" "used_cpu_sys_childrens:%.2f\r\n" "used_cpu_user_childrens:%.2f\r\n" "connected_clients:%d\r\n" "connected_slaves:%d\r\n" "blocked_clients:%d\r\n" "used_memory:%zu\r\n" "used_memory_human:%s\r\n" "used_memory_rss:%zu\r\n" "mem_fragmentation_ratio:%.2f\r\n" "use_tcmalloc:%d\r\n" "loading:%d\r\n" "aof_enabled:%d\r\n" "changes_since_last_save:%lld\r\n" "bgsave_in_progress:%d\r\n" "last_save_time:%ld\r\n" "bgrewriteaof_in_progress:%d\r\n" "total_connections_received:%lld\r\n" "total_commands_processed:%lld\r\n" "expired_keys:%lld\r\n" "evicted_keys:%lld\r\n" "keyspace_hits:%lld\r\n" "keyspace_misses:%lld\r\n" "hash_max_zipmap_entries:%zu\r\n" "hash_max_zipmap_value:%zu\r\n" "pubsub_channels:%ld\r\n" "pubsub_patterns:%u\r\n" "ds_enabled:%d\r\n" "role:%s\r\n" ,REDIS_VERSION, redisGitSHA1(), strtol(redisGitDirty(),NULL,10) > 0, (sizeof(long) == 8) ? "64" : "32", aeGetApiName(), (long) getpid(), uptime, uptime/(3600*24), (unsigned long) server.lruclock, (float)self_ru.ru_utime.tv_sec+(float)self_ru.ru_utime.tv_usec/1000000, (float)self_ru.ru_stime.tv_sec+(float)self_ru.ru_stime.tv_usec/1000000, (float)c_ru.ru_utime.tv_sec+(float)c_ru.ru_utime.tv_usec/1000000, (float)c_ru.ru_stime.tv_sec+(float)c_ru.ru_stime.tv_usec/1000000, listLength(server.clients)-listLength(server.slaves), listLength(server.slaves), server.bpop_blocked_clients, zmalloc_used_memory(), hmem, zmalloc_get_rss(), zmalloc_get_fragmentation_ratio(), #ifdef USE_TCMALLOC 1, #else 0, #endif server.loading, server.appendonly, server.dirty, server.bgsavechildpid != -1, server.lastsave, server.bgrewritechildpid != -1, server.stat_numconnections, server.stat_numcommands, server.stat_expiredkeys, server.stat_evictedkeys, server.stat_keyspace_hits, server.stat_keyspace_misses, server.hash_max_zipmap_entries, server.hash_max_zipmap_value, dictSize(server.pubsub_channels), listLength(server.pubsub_patterns), server.ds_enabled != 0, server.masterhost == NULL ? "master" : "slave" ); if (server.masterhost) { info = sdscatprintf(info, "master_host:%s\r\n" "master_port:%d\r\n" "master_link_status:%s\r\n" "master_last_io_seconds_ago:%d\r\n" "master_sync_in_progress:%d\r\n" ,server.masterhost, server.masterport, (server.replstate == REDIS_REPL_CONNECTED) ? "up" : "down", server.master ? ((int)(time(NULL)-server.master->lastinteraction)) : -1, server.replstate == REDIS_REPL_TRANSFER ); if (server.replstate == REDIS_REPL_TRANSFER) { info = sdscatprintf(info, "master_sync_left_bytes:%ld\r\n" "master_sync_last_io_seconds_ago:%d\r\n" ,(long)server.repl_transfer_left, (int)(time(NULL)-server.repl_transfer_lastio) ); } } if (server.ds_enabled) { lockThreadedIO(); info = sdscatprintf(info, "cache_max_memory:%llu\r\n" "cache_blocked_clients:%lu\r\n" ,(unsigned long long) server.cache_max_memory, (unsigned long) server.cache_blocked_clients ); unlockThreadedIO(); } if (server.loading) { double perc; time_t eta, elapsed; off_t remaining_bytes = server.loading_total_bytes- server.loading_loaded_bytes; perc = ((double)server.loading_loaded_bytes / server.loading_total_bytes) * 100; elapsed = time(NULL)-server.loading_start_time; if (elapsed == 0) { eta = 1; /* A fake 1 second figure if we don't have enough info */ } else { eta = (elapsed*remaining_bytes)/server.loading_loaded_bytes; } info = sdscatprintf(info, "loading_start_time:%ld\r\n" "loading_total_bytes:%llu\r\n" "loading_loaded_bytes:%llu\r\n" "loading_loaded_perc:%.2f\r\n" "loading_eta_seconds:%ld\r\n" ,(unsigned long) server.loading_start_time, (unsigned long long) server.loading_total_bytes, (unsigned long long) server.loading_loaded_bytes, perc, eta ); } for (j = 0; j < server.dbnum; j++) { long long keys, vkeys; keys = dictSize(server.db[j].dict); vkeys = dictSize(server.db[j].expires); if (keys || vkeys) { info = sdscatprintf(info, "db%d:keys=%lld,expires=%lld\r\n", j, keys, vkeys); } } return info; }
{'added': [(621, ' /* Remove a few cached objects from memory if we are over the'), (622, ' * configured memory limit */'), (623, ' while (server.ds_enabled && zmalloc_used_memory() >'), (624, ' server.cache_max_memory)'), (625, ' {'), (626, ' cacheFreeOneEntry();'), (644, ' /* Awake clients that got all the on disk keys they requested */'), (645, ' if (server.ds_enabled && listLength(server.io_ready_clients)) {'), (656, ' server.cache_blocked_clients--;'), (775, ' server.ds_enabled = 0;'), (776, ' server.ds_path = zstrdup("/tmp/redis.ds");'), (777, ' server.cache_max_memory = 64LL*1024*1024; /* 64 MB of RAM */'), (778, ' server.cache_blocked_clients = 0;'), (858, ' if (server.ds_enabled)'), (896, ' if (server.ds_enabled) dsInit();'), (1035, ' if (server.ds_enabled && blockClientOnSwappedKeys(c,cmd))'), (1036, ' return REDIS_ERR;'), (1169, ' "ds_enabled:%d\\r\\n"'), (1212, ' server.ds_enabled != 0,'), (1239, ' if (server.ds_enabled) {'), (1242, ' "cache_max_memory:%llu\\r\\n"'), (1243, ' "cache_blocked_clients:%lu\\r\\n"'), (1244, ' ,(unsigned long long) server.cache_max_memory,'), (1245, ' (unsigned long) server.cache_blocked_clients')], 'deleted': [(621, ' /* Swap a few keys on disk if we are over the memory limit and VM'), (622, ' * is enbled. Try to free objects from the free list first. */'), (623, ' if (vmCanSwapOut()) {'), (624, ' while (server.vm_enabled && zmalloc_used_memory() >'), (625, ' server.vm_max_memory)'), (626, ' {'), (627, ' int retval = (server.vm_max_threads == 0) ?'), (628, ' vmSwapOneObjectBlocking() :'), (629, ' vmSwapOneObjectThreaded();'), (630, ' if (retval == REDIS_ERR && !(loops % 300) &&'), (631, ' zmalloc_used_memory() >'), (632, ' (server.vm_max_memory+server.vm_max_memory/10))'), (633, ' {'), (634, ' redisLog(REDIS_WARNING,"WARNING: vm-max-memory limit exceeded by more than 10%% but unable to swap more objects out!");'), (635, ' }'), (636, ' /* Note that when using threade I/O we free just one object,'), (637, ' * because anyway when the I/O thread in charge to swap this'), (638, ' * object out will finish, the handler of completed jobs'), (639, ' * will try to swap more objects if we are still out of memory. */'), (640, ' if (retval == REDIS_ERR || server.vm_max_threads > 0) break;'), (641, ' }'), (659, ' /* Awake clients that got all the swapped keys they requested */'), (660, ' if (server.vm_enabled && listLength(server.io_ready_clients)) {'), (671, ' server.vm_blocked_clients--;'), (790, ' server.vm_enabled = 0;'), (791, ' server.vm_swap_file = zstrdup("/tmp/redis-%p.vm");'), (792, ' server.vm_page_size = 256; /* 256 bytes per page */'), (793, ' server.vm_pages = 1024*1024*100; /* 104 millions of pages */'), (794, ' server.vm_max_memory = 1024LL*1024*1024*1; /* 1 GB of RAM */'), (795, ' server.vm_max_threads = 4;'), (796, ' server.vm_blocked_clients = 0;'), (876, ' if (server.vm_enabled)'), (914, ' if (server.vm_enabled) vmInit();'), (1053, ' if (server.vm_enabled && server.vm_max_threads > 0 &&'), (1054, ' blockClientOnSwappedKeys(c,cmd)) return REDIS_ERR;'), (1075, ' if (server.vm_enabled) unlink(server.vm_swap_file);'), (1188, ' "vm_enabled:%d\\r\\n"'), (1231, ' server.vm_enabled != 0,'), (1258, ' if (server.vm_enabled) {'), (1261, ' "vm_conf_max_memory:%llu\\r\\n"'), (1262, ' "vm_conf_page_size:%llu\\r\\n"'), (1263, ' "vm_conf_pages:%llu\\r\\n"'), (1264, ' "vm_stats_used_pages:%llu\\r\\n"'), (1265, ' "vm_stats_swapped_objects:%llu\\r\\n"'), (1266, ' "vm_stats_swappin_count:%llu\\r\\n"'), (1267, ' "vm_stats_swappout_count:%llu\\r\\n"'), (1268, ' "vm_stats_io_newjobs_len:%lu\\r\\n"'), (1269, ' "vm_stats_io_processing_len:%lu\\r\\n"'), (1270, ' "vm_stats_io_processed_len:%lu\\r\\n"'), (1271, ' "vm_stats_io_active_threads:%lu\\r\\n"'), (1272, ' "vm_stats_blocked_clients:%lu\\r\\n"'), (1273, ' ,(unsigned long long) server.vm_max_memory,'), (1274, ' (unsigned long long) server.vm_page_size,'), (1275, ' (unsigned long long) server.vm_pages,'), (1276, ' (unsigned long long) server.vm_stats_used_pages,'), (1277, ' (unsigned long long) server.vm_stats_swapped_objects,'), (1278, ' (unsigned long long) server.vm_stats_swapins,'), (1279, ' (unsigned long long) server.vm_stats_swapouts,'), (1280, ' (unsigned long) listLength(server.io_newjobs),'), (1281, ' (unsigned long) listLength(server.io_processing),'), (1282, ' (unsigned long) listLength(server.io_processed),'), (1283, ' (unsigned long) server.io_active_threads,'), (1284, ' (unsigned long) server.vm_blocked_clients')]}
24
63
1,198
9,350
180
849
14
https://github.com/antirez/redis
CVE-2013-0178
CWE-20
2,555
verifier.c
C
adjust_scalar_min_max_vals
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include <linux/bsearch.h> #include <linux/sort.h> #include <linux/perf_event.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { return aux->map_state & BPF_MAP_PTR_UNPRIV; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, const struct bpf_map *map, bool unpriv) { BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); aux->map_state = (unsigned long)map | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); } struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; s64 msize_smax_value; u64 msize_umax_value; }; static DEFINE_MUTEX(bpf_verifier_lock); void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args) { unsigned int n; n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } /* log_level controls verbosity level of eBPF verifier. * bpf_verifier_log_write() is used to dump the verification trace to the log, * so the user can figure out what's wrong with the program */ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...) { va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } EXPORT_SYMBOL_GPL(bpf_verifier_log_write); __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) { struct bpf_verifier_env *env = private_data; va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_liveness(struct bpf_verifier_env *env, enum bpf_reg_liveness live) { if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN)) verbose(env, "_"); if (live & REG_LIVE_READ) verbose(env, "r"); if (live & REG_LIVE_WRITTEN) verbose(env, "w"); } static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg) { struct bpf_verifier_state *cur = env->cur_state; return cur->frame[reg->frameno]; } static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { const struct bpf_reg_state *reg; enum bpf_reg_type t; int i; if (state->frameno) verbose(env, " frame%d:", state->frameno); for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d", i); print_liveness(env, reg->live); verbose(env, "=%s", reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); if (t == PTR_TO_STACK) verbose(env, ",call_%d", func(env, reg)->callsite); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) { verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); verbose(env, "=%s", reg_type_str[state->stack[i].spilled_ptr.type]); } if (state->stack[i].slot_type[0] == STACK_ZERO) verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_func_state(struct bpf_func_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_func_state(struct bpf_func_state *state) { if (!state) return; kfree(state->stack); kfree(state); } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { int i; for (i = 0; i <= state->curframe; i++) { free_func_state(state->frame[i]); state->frame[i] = NULL; } if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_func_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { int err; err = realloc_func_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack)); return copy_stack_state(dst, src); } static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; int i, err; /* if dst has more stack frames then src frame, free them */ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { free_func_state(dst_state->frame[i]); dst_state->frame[i] = NULL; } dst_state->curframe = src->curframe; dst_state->parent = src->parent; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; } err = copy_func_state(dst, src->frame[i]); if (err) return err; } return 0; } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: free_verifier_state(env->cur_state, true); env->cur_state = NULL; /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); reg->off = 0; reg->type = SCALAR_VALUE; } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; reg->frameno = 0; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs; int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); regs[BPF_REG_FP].frameno = state->frameno; /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } #define BPF_MAIN_FUNC (-1) static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno, int subprogno) { state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; init_reg_state(env, state); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static int cmp_subprogs(const void *a, const void *b) { return ((struct bpf_subprog_info *)a)->start - ((struct bpf_subprog_info *)b)->start; } static int find_subprog(struct bpf_verifier_env *env, int off) { struct bpf_subprog_info *p; p = bsearch(&off, env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs); if (!p) return -ENOENT; return p - env->subprog_info; } static int add_subprog(struct bpf_verifier_env *env, int off) { int insn_cnt = env->prog->len; int ret; if (off >= insn_cnt || off < 0) { verbose(env, "call to invalid destination\n"); return -EINVAL; } ret = find_subprog(env, off); if (ret >= 0) return 0; if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { verbose(env, "too many subprograms\n"); return -E2BIG; } env->subprog_info[env->subprog_cnt++].start = off; sort(env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs, NULL); return 0; } static int check_subprogs(struct bpf_verifier_env *env) { int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; /* Add entry function. */ ret = add_subprog(env, 0); if (ret < 0) return ret; /* determine subprog starts. The end is one before the next starts */ for (i = 0; i < insn_cnt; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; if (!env->allow_ptr_leaks) { verbose(env, "function calls to other bpf functions are allowed for root only\n"); return -EPERM; } if (bpf_prog_is_dev_bound(env->prog->aux)) { verbose(env, "function calls in offloaded programs are not supported yet\n"); return -EINVAL; } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; } /* Add a fake 'exit' subprog which could simplify subprog iteration * logic. 'subprog_cnt' should not be increased. */ subprog[env->subprog_cnt].start = insn_cnt; if (env->log.level > 1) for (i = 0; i < env->subprog_cnt; i++) verbose(env, "func#%d @%d\n", i, subprog[i].start); /* now check that all jumps are within the same subprog */ subprog_start = subprog[cur_subprog].start; subprog_end = subprog[cur_subprog + 1].start; for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; if (BPF_CLASS(code) != BPF_JMP) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; off = i + insn[i].off + 1; if (off < subprog_start || off >= subprog_end) { verbose(env, "jump out of range from insn %d to %d\n", i, off); return -EINVAL; } next: if (i == subprog_end - 1) { /* to avoid fall-through from one subprog into another * the last insn of the subprog should be either exit * or unconditional jump back */ if (code != (BPF_JMP | BPF_EXIT) && code != (BPF_JMP | BPF_JA)) { verbose(env, "last insn is not an exit or jmp\n"); return -EINVAL; } subprog_start = subprog_end; cur_subprog++; if (cur_subprog < env->subprog_cnt) subprog_end = subprog[cur_subprog + 1].start; } } return 0; } static struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, u32 regno) { struct bpf_verifier_state *tmp = NULL; /* 'parent' could be a state of caller and * 'state' could be a state of callee. In such case * parent->curframe < state->curframe * and it's ok for r1 - r5 registers * * 'parent' could be a callee's state after it bpf_exit-ed. * In such case parent->curframe > state->curframe * and it's ok for r0 only */ if (parent->curframe == state->curframe || (parent->curframe < state->curframe && regno >= BPF_REG_1 && regno <= BPF_REG_5) || (parent->curframe > state->curframe && regno == BPF_REG_0)) return parent; if (parent->curframe > state->curframe && regno >= BPF_REG_6) { /* for callee saved regs we have to skip the whole chain * of states that belong to callee and mark as LIVE_READ * the registers before the call */ tmp = parent; while (tmp && tmp->curframe != state->curframe) { tmp = tmp->parent; } if (!tmp) goto bug; parent = tmp; } else { goto bug; } return parent; bug: verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); verbose(env, "regno %d parent frame %d current frame %d\n", regno, parent->curframe, state->curframe); return NULL; } static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, u32 regno) { bool writes = parent == state->parent; /* Observe write marks */ if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return 0; while (parent) { /* if read wasn't screened by an earlier write ... */ if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) break; parent = skip_callee(env, state, parent, regno); if (!parent) return -EFAULT; /* ... then we depend on parent's value */ parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; } return 0; } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } return mark_reg_read(env, vstate, vstate->parent, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state *reg) { return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_func_state *state, /* func where register points to */ int off, int size, int value_regno, int insn_idx) { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; enum bpf_reg_type type; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0 && is_spillable_regtype((type = cur->regs[value_regno].type))) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } if (state != cur && type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } /* save register state */ state->stack[spi].spilled_ptr = cur->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) { if (state->stack[spi].slot_type[i] == STACK_MISC && !env->allow_ptr_leaks) { int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; int soff = (-spi - 1) * BPF_REG_SIZE; /* detected reuse of integer stack slot with a pointer * which means either llvm is reusing stack slot or * an attacker is trying to exploit CVE-2018-3639 * (speculative store bypass) * Have to sanitize that slot with preemptive * store of zero. */ if (*poff && *poff != soff) { /* disallow programs where single insn stores * into two different stack slots, since verifier * cannot sanitize them */ verbose(env, "insn %d cannot access two stack slots fp%d and fp%d", insn_idx, *poff, soff); return -EINVAL; } *poff = soff; } state->stack[spi].slot_type[i] = STACK_SPILL; } } else { u8 type = STACK_MISC; /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon * when stack slots are partially written. * This heuristic means that read propagation will be * conservative, since it will add reg_live_read marks * to stack slots all the way to first state when programs * writes+reads less than 8 bytes */ if (size == BPF_REG_SIZE) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ if (value_regno >= 0 && register_is_null(&cur->regs[value_regno])) type = STACK_ZERO; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; } return 0; } /* registers of every function are unique and mark_reg_read() propagates * the liveness in the following cases: * - from callee into caller for R1 - R5 that were used as arguments * - from caller into callee for R0 that used as result of the call * - from caller to the same caller skipping states of the callee for R6 - R9, * since R6 - R9 are callee saved by implicit function prologue and * caller's R6 != callee's R6, so when we propagate liveness up to * parent states we need to skip callee states for R6 - R9. * * stack slot marking is different, since stacks of caller and callee are * accessible in both (since caller can pass a pointer to caller's stack to * callee which can pass it to another function), hence mark_stack_slot_read() * has to propagate the stack liveness to all parent states at given frame number. * Consider code: * f1() { * ptr = fp - 8; * *ptr = ctx; * call f2 { * .. = *ptr; * } * .. = *ptr; * } * First *ptr is reading from f1's stack and mark_stack_slot_read() has * to mark liveness at the f1's frame and not f2's frame. * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has * to propagate liveness to f2 states at f1's frame level and further into * f1 states at f1's frame level until write into that stack slot */ static void mark_stack_slot_read(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, int slot, int frameno) { bool writes = parent == state->parent; /* Observe write marks */ while (parent) { if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) /* since LIVE_WRITTEN mark is only done for full 8-byte * write the read marks are conservative and parent * state may not even have the stack allocated. In such case * end the propagation, since the loop reached beginning * of the function */ break; /* if read wasn't screened by an earlier write ... */ if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (reg_state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = reg_state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } mark_stack_slot_read(env, vstate, vstate->parent, spi, reg_state->frameno); return 0; } else { int zeros = 0; for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) continue; if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { zeros++; continue; } verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } mark_stack_slot_read(env, vstate, vstate->parent, spi, reg_state->frameno); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, * so the whole register == const_zero */ __mark_reg_const_zero(&state->regs[value_regno]); } else { /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); } state->regs[value_regno].live |= REG_LIVE_WRITTEN; } return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, env->prog, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = cur_regs(env) + regno; return reg->type == PTR_TO_CTX; } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = cur_regs(env) + regno; return type_is_pkt_pointer(reg->type); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict_alignment_once) { bool strict = env->strict_alignment || strict_alignment_once; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } static int update_stack_depth(struct bpf_verifier_env *env, const struct bpf_func_state *func, int off) { u16 stack = env->subprog_info[func->subprogno].stack_depth; if (stack >= -off) return 0; /* update known max for given subprogram */ env->subprog_info[func->subprogno].stack_depth = -off; return 0; } /* starting from main bpf function walk all instructions of the function * and recursively walk all callees that given function can call. * Ignore jump and exit insns. * Since recursion is prevented by check_cfg() this algorithm * only needs a local stack of MAX_CALL_FRAMES to remember callsites */ static int check_max_stack_depth(struct bpf_verifier_env *env) { int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int ret_insn[MAX_CALL_FRAMES]; int ret_prog[MAX_CALL_FRAMES]; process_func: /* round up to 32-bytes, since this is granularity * of interpreter stack size */ depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); if (depth > MAX_BPF_STACK) { verbose(env, "combined stack size of %d calls is %d. Too large\n", frame + 1, depth); return -EACCES; } continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; /* remember insn and function to return to */ ret_insn[frame] = i + 1; ret_prog[frame] = idx; /* find the callee */ i = i + insn[i].imm + 1; idx = find_subprog(env, i); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i); return -EFAULT; } frame++; if (frame >= MAX_CALL_FRAMES) { WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); return -EFAULT; } goto process_func; } /* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT */ if (frame == 0) return 0; depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); frame--; i = ret_insn[frame]; idx = ret_prog[frame]; goto continue_func; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON static int get_callee_stack_depth(struct bpf_verifier_env *env, const struct bpf_insn *insn, int idx) { int start = idx + insn->imm + 1, subprog; subprog = find_subprog(env, start); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", start); return -EFAULT; } return env->subprog_info[subprog].stack_depth; } #endif static int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno) { /* Access to ctx or passing it to a helper is only allowed in * its original, unmodified form. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", regno, reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); return -EACCES; } return 0; } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno, bool strict_alignment_once) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; struct bpf_func_state *state; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } err = check_ctx_reg(env, reg, regno); if (err < 0) return err; err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } state = func(env, reg); err = update_stack_depth(env, state, off); if (err) return err; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno, insn_idx); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? "context" : "packet"); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1, true); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *reg = cur_regs(env) + regno; struct bpf_func_state *state = func(env, reg); int off, i, slot, spi; if (reg->type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(reg)) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[reg->type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = reg->off + reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { u8 *stype; slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) goto err; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; if (*stype == STACK_MISC) goto mark; if (*stype == STACK_ZERO) { /* helper can write anything into the stack */ *stype = STACK_MISC; goto mark; } err: verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, spi, state->frameno); } return update_stack_depth(env, state, off); } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || type == ARG_PTR_TO_MEM_OR_NULL || type == ARG_PTR_TO_UNINIT_MEM; } static bool arg_type_is_mem_size(enum bpf_arg_type type) { return type == ARG_CONST_SIZE || type == ARG_CONST_SIZE_OR_ZERO; } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; err = check_ctx_reg(env, reg, regno); if (err < 0) return err; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* remember the mem_size which may be used later * to refine return values. */ meta->msize_smax_value = reg->smax_value; meta->msize_umax_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap and xskmap, open when use-cases * appear. */ case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map) goto error; break; case BPF_MAP_TYPE_SOCKHASH: if (func_id != BPF_FUNC_sk_redirect_hash && func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: if (func_id != BPF_FUNC_sk_select_reuseport) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; if (env->subprog_cnt > 1) { verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); return -EINVAL; } break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; break; case BPF_FUNC_sk_redirect_map: case BPF_FUNC_msg_redirect_map: case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sk_redirect_hash: case BPF_FUNC_msg_redirect_hash: case BPF_FUNC_sock_hash_update: if (map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_get_local_storage: if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; /* We only support one arg being in raw mode at the moment, * which is sufficient for the helper functions we have * right now. */ return count <= 1; } static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, enum bpf_arg_type arg_next) { return (arg_type_is_mem_ptr(arg_curr) && !arg_type_is_mem_size(arg_next)) || (!arg_type_is_mem_ptr(arg_curr) && arg_type_is_mem_size(arg_next)); } static bool check_arg_pair_ok(const struct bpf_func_proto *fn) { /* bpf_xxx(..., buf, len) call will access 'len' * bytes from memory 'buf'. Both arg types need * to be paired, so make sure there's no buggy * helper function specification. */ if (arg_type_is_mem_size(fn->arg1_type) || arg_type_is_mem_ptr(fn->arg5_type) || check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) return false; return true; } static int check_func_proto(const struct bpf_func_proto *fn) { return check_raw_mode_ok(fn) && check_arg_pair_ok(fn) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) __clear_all_pkt_pointers(env, vstate->frame[i]); } static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; int i, subprog, target_insn; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", state->curframe + 2); return -E2BIG; } target_insn = *insn_idx + insn->imm; subprog = find_subprog(env, target_insn + 1); if (subprog < 0) { verbose(env, "verifier bug. No program starts at insn %d\n", target_insn + 1); return -EFAULT; } caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; state->frame[state->curframe + 1] = callee; /* callee cannot access r0, r6 - r9 for reading and has to write * into its own stack before reading from it. * callee can read/write into caller's stack */ init_func_state(env, callee, /* remember the callsite, it will be used by bpf_exit */ *insn_idx /* callsite */, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); /* copy r1 - r5 args that callee can access */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; /* after the call regsiters r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* only increment it after check_reg_arg() finished */ state->curframe++; /* and go analyze first insn of the callee */ *insn_idx = target_insn; if (env->log.level) { verbose(env, "caller:\n"); print_verifier_state(env, caller); verbose(env, "callee:\n"); print_verifier_state(env, callee); } return 0; } static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; if (r0->type == PTR_TO_STACK) { /* technically it's ok to return caller's stack pointer * (or caller's caller's pointer) back to the caller, * since these pointers are valid. Only current stack * pointer will be invalid as soon as function exits, * but let's be conservative */ verbose(env, "cannot return stack pointer to the caller\n"); return -EINVAL; } state->curframe--; caller = state->frame[state->curframe]; /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; *insn_idx = callee->callsite + 1; if (env->log.level) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee); verbose(env, "to caller at %d:\n", *insn_idx); print_verifier_state(env, caller); } /* clear everything in the callee */ free_func_state(callee); state->frame[state->curframe + 1] = NULL; return 0; } static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = &regs[BPF_REG_0]; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str)) return; ret_reg->smax_value = meta->msize_smax_value; ret_reg->umax_value = meta->msize_umax_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); } static int record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx) { struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; if (func_id != BPF_FUNC_tail_call && func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_update_elem && func_id != BPF_FUNC_map_delete_elem) return 0; if (meta->map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } if (!BPF_MAP_PTR(aux->map_state)) bpf_map_ptr_store(aux, meta->map_ptr, meta->map_ptr->unpriv_array); else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, meta->map_ptr->unpriv_array); return 0; } static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id, env->prog); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; err = check_func_proto(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; err = record_func_map(env, &meta, func_id, insn_idx); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false); if (err) return err; } regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, * this is required because get_local_storage() can't return an error. */ if (func_id == BPF_FUNC_get_local_storage && !register_is_null(&regs[BPF_REG_2])) { verbose(env, "get_local_storage() doesn't support non-zero flags\n"); return -EINVAL; } /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { if (fn->ret_type == RET_PTR_TO_MAP_VALUE) regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; else regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS err = get_callchain_buffers(sysctl_perf_event_max_stack); err_str = "cannot get callchain buffer for func %s#%d\n"; #else err = -ENOTSUPP; err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; #endif if (err) { verbose(env, err_str, func_id_name(func_id), func_id); return err; } env->prog->has_callchain_buf = true; } if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) { bool known = tnum_is_const(reg->var_off); s64 val = reg->var_off.value; s64 smin = reg->smin_value; if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { verbose(env, "math between %s pointer and %lld is not allowed\n", reg_type_str[type], val); return false; } if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { verbose(env, "%s pointer offset %d is not allowed\n", reg_type_str[type], reg->off); return false; } if (smin == S64_MIN) { verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", reg_type_str[type]); return false; } if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { verbose(env, "value %lld makes %s pointer be out of bounds\n", smin, reg_type_str[type]); return false; } return true; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if ((known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit. */ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. Disallow all math except * pointer subtraction */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ return adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); } } else if (ptr_reg) { /* pointer += scalar */ return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) /* pointer += K */ return adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand, mark as required later */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ /* clear any state __mark_reg_known doesn't set */ mark_reg_unknown(env, regs, insn->dst_reg); regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { verbose(env, "BPF_ARSH not supported for 32 bit ALU\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i, j; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i, j; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *other_branch; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_is_const(dst_reg->var_off)) { if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; other_branch_regs = other_branch->frame[other_branch->curframe]->regs; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch->frame[this_branch->curframe]); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (!env->ops->gen_ld_abs) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (env->subprog_cnt > 1) { /* when program has LD_ABS insn JITs and interpreter assume * that r1 == ctx == skb which is not the case for callees * that can have arbitrary arguments. It's problematic * for main prog as well since JITs would need to analyze * all functions in order to make proper register save/restore * decisions in the main prog. Hence disallow LD_ABS with calls */ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; ret = check_subprogs(env); if (ret < 0) return ret; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; if (insns[t].src_reg == BPF_PSEUDO_CALL) { env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { bool equal; if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to * the same stack frame, since fp-8 in foo != fp-8 in bar */ return equal && rold->frameno == rcur->frameno; if (equal) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_func_state *old, struct bpf_func_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) /* explored state didn't use this */ continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; /* if old state was safe with misc data in the stack * it will be safe with zero-initialized stack. * The opposite is not true */ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool func_states_equal(struct bpf_func_state *old, struct bpf_func_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at an * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison * in mark_reg_read() and mark_stack_slot_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, struct bpf_verifier_state *vparent) { int i, frame, err = 0; struct bpf_func_state *state, *parent; if (vparent->curframe != vstate->curframe) { WARN(1, "propagate_live: parent frame %d current frame %d\n", vparent->curframe, vstate->curframe); return -EFAULT; } /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { err = mark_reg_read(env, vstate, vparent, i); if (err) return err; } } /* ... and stack slots */ for (frame = 0; frame <= vstate->curframe; frame++) { state = vstate->frame[frame]; parent = vparent->frame[frame]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) mark_stack_slot_read(env, vstate, vparent, i, frame); } } return err; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, j, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ err = propagate_liveness(env, &sl->state, cur); if (err) return err; return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach outer most bpf_exit (which means it's safe) * or it will be rejected. Since there are no loops, we won't be * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) * again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE; /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; } return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len, i; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; state->curframe = 0; state->parent = NULL; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); return -ENOMEM; } env->cur_state = state; init_func_state(env, state->frame[0], BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, 0 /* subprogno, zero == main subprog */); insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state->frame[state->curframe]); do_print_state = false; } if (env->log.level) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, .private_data = env, }; verbose(env, "%d: ", insn_idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } if (bpf_prog_is_dev_bound(env->prog->aux)) { err = bpf_prog_offload_verify_insn(env, insn_idx, prev_insn_idx); if (err) return err; } regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, false); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, false); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d context is not allowed\n", insn->dst_reg); return -EACCES; } /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, false); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &insn_idx); else err = check_helper_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } if (state->curframe) { /* exit from nested function */ prev_insn_idx = insn_idx; err = prepare_func_exit(env, &insn_idx); if (err) return err; do_print_state = true; continue; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns (limit %d), stack depth ", insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); for (i = 0; i < env->subprog_cnt; i++) { u32 depth = env->subprog_info[i].stack_depth; verbose(env, "%d", depth); if (i + 1 < env->subprog_cnt) verbose(env, "+"); } verbose(env, "\n"); env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE && bpf_cgroup_storage_assign(env->prog, map)) { verbose(env, "only one cgroup storage is allowed\n"); fdput(f); return -EBUSY; } fdput(f); next_insn: insn++; i++; continue; } /* Basic sanity check before we invest more work here. */ if (!bpf_opcode_in_insntable(insn->code)) { verbose(env, "unknown opcode %02x\n", insn->code); return -EINVAL; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; if (env->prog->aux->cgroup_storage) bpf_cgroup_storage_release(env->prog, env->prog->aux->cgroup_storage); for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(array_size(prog_len, sizeof(struct bpf_insn_aux_data))); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) { int i; if (len == 1) return; /* NOTE: fake 'exit' subprog should be updated as well. */ for (i = 0; i <= env->subprog_cnt; i++) { if (env->subprog_info[i].start < off) continue; env->subprog_info[i].start += len - 1; } } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; adjust_subprog_starts(env, off, len); return new_prog; } /* The verifier does more data flow analysis than llvm and will not * explore branches that are dead at run time. Malicious programs can * have dead code too. Therefore replace all dead at-run-time code * with 'ja -1'. * * Just nops are not optimal, e.g. if they would sit at the end of the * program and through another bug we would manage to jump there, then * we'd execute beyond program memory otherwise. Returning exception * code also wouldn't work since we can have subprogs where the dead * code could be located. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (type == BPF_WRITE && env->insn_aux_data[i + delta].sanitize_stack_off) { struct bpf_insn patch[] = { /* Sanitize suspicious stack slot with zero. * There are no memory dependencies for this store, * since it's only using frame pointer and immediate * constant of zero */ BPF_ST_MEM(BPF_DW, BPF_REG_FP, env->insn_aux_data[i + delta].sanitize_stack_off, 0), /* the original STX instruction will immediately * overwrite the same stack slot with appropriate value */ *insn, }; cnt = ARRAY_SIZE(patch); new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(size_default - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } static int jit_subprogs(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog, **func, *tmp; int i, j, subprog_start, subprog_end = 0, len, subprog; struct bpf_insn *insn; void *old_bpf_func; int err = -ENOMEM; if (env->subprog_cnt <= 1) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. */ subprog = find_subprog(env, i + insn->imm + 1); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i + insn->imm + 1); return -EFAULT; } /* temporarily remember subprog id inside insn instead of * aux_data, since next loop will split up all insns into funcs */ insn->off = subprog; /* remember original imm in case JIT fails and fallback * to interpreter will be needed */ env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; } func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); if (!func) goto out_undo_insn; for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; subprog_end = env->subprog_info[i + 1].start; len = subprog_end - subprog_start; func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); if (!func[i]) goto out_free; memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); func[i]->type = prog->type; func[i]->len = len; if (bpf_prog_calc_tag(func[i])) goto out_free; func[i]->is_func = 1; /* Use bpf_prog_F_tag to indicate functions in stack traces. * Long term would need debug info to populate names */ func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i] = bpf_int_jit_compile(func[i]); if (!func[i]->jited) { err = -ENOTSUPP; goto out_free; } cond_resched(); } /* at this point all bpf functions were successfully JITed * now populate all bpf_calls with correct addresses and * run last pass of JIT */ for (i = 0; i < env->subprog_cnt; i++) { insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; subprog = insn->off; insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) func[subprog]->bpf_func - __bpf_call_base; } /* we use the aux data to keep a list of the start addresses * of the JITed images for each function in the program * * for some architectures, such as powerpc64, the imm field * might not be large enough to hold the offset of the start * address of the callee's JITed image from __bpf_call_base * * in such cases, we can lookup the start address of a callee * by using its subprog id, available from the off field of * the call instruction, as an index for this list */ func[i]->aux->func = func; func[i]->aux->func_cnt = env->subprog_cnt; } for (i = 0; i < env->subprog_cnt; i++) { old_bpf_func = func[i]->bpf_func; tmp = bpf_int_jit_compile(func[i]); if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); err = -ENOTSUPP; goto out_free; } cond_resched(); } /* finally lock prog and jit images for all functions and * populate kallsysm */ for (i = 0; i < env->subprog_cnt; i++) { bpf_prog_lock_ro(func[i]); bpf_prog_kallsyms_add(func[i]); } /* Last step: make now unused interpreter insns from main * prog consistent for later dump requests, so they can * later look the same as if they were interpreted only. */ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = env->insn_aux_data[i].call_imm; subprog = find_subprog(env, i + insn->off + 1); insn->imm = subprog; } prog->jited = 1; prog->bpf_func = func[0]->bpf_func; prog->aux->func = func; prog->aux->func_cnt = env->subprog_cnt; return 0; out_free: for (i = 0; i < env->subprog_cnt; i++) if (func[i]) bpf_jit_free(func[i]); kfree(func); out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = 0; insn->imm = env->insn_aux_data[i].call_imm; } return err; } static int fixup_call_args(struct bpf_verifier_env *env) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; int i, depth; #endif int err; err = 0; if (env->prog->jit_requested) { err = jit_subprogs(env); if (err == 0) return 0; if (err == -EFAULT) return err; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; depth = get_callee_stack_depth(env, insn, i); if (depth < 0) return depth; bpf_patch_call_args(insn, depth); } err = 0; #endif return err; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; struct bpf_insn mask_and_mod[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; struct bpf_insn *patchlet; if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { patchlet = mask_and_div + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); } else { patchlet = mask_and_mod + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->src_reg == BPF_PSEUDO_CALL) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_override_return) prog->kprobe_override = 1; if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; if (!bpf_map_ptr_unpriv(aux)) continue; /* instead of changing every JIT dealing with tail_call * emit two extra insns: * if (index >= max_entries) goto out; * index &= array->index_mask; * to avoid out-of-bounds cpu speculation */ if (bpf_map_ptr_poisoned(aux)) { verbose(env, "tail_call abusing map_ptr\n"); return -EINVAL; } map_ptr = BPF_MAP_PTR(aux->map_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask); insn_buf[2] = *insn; cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. */ if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_delete_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; map_ptr = BPF_MAP_PTR(aux->map_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base; continue; case BPF_FUNC_map_update_elem: insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base; continue; case BPF_FUNC_map_delete_elem: insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; } goto patch_call_imm; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifier_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(array_size(sizeof(struct bpf_insn_aux_data), (*prog)->len)); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto skip_full_check; } env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = check_cfg(env); if (ret < 0) goto skip_full_check; ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) ret = check_max_stack_depth(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (ret == 0) ret = fixup_call_args(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_used_maps() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * Copyright (c) 2016 Facebook * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/bpf.h> #include <linux/bpf_verifier.h> #include <linux/filter.h> #include <net/netlink.h> #include <linux/file.h> #include <linux/vmalloc.h> #include <linux/stringify.h> #include <linux/bsearch.h> #include <linux/sort.h> #include <linux/perf_event.h> #include "disasm.h" static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { #define BPF_PROG_TYPE(_id, _name) \ [_id] = & _name ## _verifier_ops, #define BPF_MAP_TYPE(_id, _ops) #include <linux/bpf_types.h> #undef BPF_PROG_TYPE #undef BPF_MAP_TYPE }; /* bpf_check() is a static code analyzer that walks eBPF program * instruction by instruction and updates register/stack state. * All paths of conditional branches are analyzed until 'bpf_exit' insn. * * The first pass is depth-first-search to check that the program is a DAG. * It rejects the following programs: * - larger than BPF_MAXINSNS insns * - if loop is present (detected via back-edge) * - unreachable insns exist (shouldn't be a forest. program = one function) * - out of bounds or malformed jumps * The second pass is all possible path descent from the 1st insn. * Since it's analyzing all pathes through the program, the length of the * analysis is limited to 64k insn, which may be hit even if total number of * insn is less then 4K, but there are too many branches that change stack/regs. * Number of 'branches to be analyzed' is limited to 1k * * On entry to each instruction, each register has a type, and the instruction * changes the types of the registers depending on instruction semantics. * If instruction is BPF_MOV64_REG(BPF_REG_1, BPF_REG_5), then type of R5 is * copied to R1. * * All registers are 64-bit. * R0 - return register * R1-R5 argument passing registers * R6-R9 callee saved registers * R10 - frame pointer read-only * * At the start of BPF program the register R1 contains a pointer to bpf_context * and has type PTR_TO_CTX. * * Verifier tracks arithmetic operations on pointers in case: * BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), * BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -20), * 1st insn copies R10 (which has FRAME_PTR) type into R1 * and 2nd arithmetic instruction is pattern matched to recognize * that it wants to construct a pointer to some element within stack. * So after 2nd insn, the register R1 has type PTR_TO_STACK * (and -20 constant is saved for further stack bounds checking). * Meaning that this reg is a pointer to stack plus known immediate constant. * * Most of the time the registers have SCALAR_VALUE type, which * means the register has some value, but it's not a valid pointer. * (like pointer plus pointer becomes SCALAR_VALUE type) * * When verifier sees load or store instructions the type of base register * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK. These are three pointer * types recognized by check_mem_access() function. * * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' * and the range of [ptr, ptr + map's value_size) is accessible. * * registers used to pass values to function calls are checked against * function argument constraints. * * ARG_PTR_TO_MAP_KEY is one of such argument constraints. * It means that the register type passed to this function must be * PTR_TO_STACK and it will be used inside the function as * 'pointer to map element key' * * For example the argument constraints for bpf_map_lookup_elem(): * .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, * .arg1_type = ARG_CONST_MAP_PTR, * .arg2_type = ARG_PTR_TO_MAP_KEY, * * ret_type says that this function returns 'pointer to map elem value or null' * function expects 1st argument to be a const pointer to 'struct bpf_map' and * 2nd argument should be a pointer to stack, which will be used inside * the helper function as a pointer to map element key. * * On the kernel side the helper function looks like: * u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) * { * struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; * void *key = (void *) (unsigned long) r2; * void *value; * * here kernel can access 'key' and 'map' pointers safely, knowing that * [key, key + map->key_size) bytes are valid and were initialized on * the stack of eBPF program. * } * * Corresponding eBPF program may look like: * BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), // after this insn R2 type is FRAME_PTR * BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), // after this insn R2 type is PTR_TO_STACK * BPF_LD_MAP_FD(BPF_REG_1, map_fd), // after this insn R1 type is CONST_PTR_TO_MAP * BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), * here verifier looks at prototype of map_lookup_elem() and sees: * .arg1_type == ARG_CONST_MAP_PTR and R1->type == CONST_PTR_TO_MAP, which is ok, * Now verifier knows that this map has key of R1->map_ptr->key_size bytes * * Then .arg2_type == ARG_PTR_TO_MAP_KEY and R2->type == PTR_TO_STACK, ok so far, * Now verifier checks that [R2, R2 + map's key_size) are within stack limits * and were initialized prior to this call. * If it's ok, then verifier allows this BPF_CALL insn and looks at * .ret_type which is RET_PTR_TO_MAP_VALUE_OR_NULL, so it sets * R0->type = PTR_TO_MAP_VALUE_OR_NULL which means bpf_map_lookup_elem() function * returns ether pointer to map value or NULL. * * When type PTR_TO_MAP_VALUE_OR_NULL passes through 'if (reg != 0) goto +off' * insn, the register holding that pointer in the true branch changes state to * PTR_TO_MAP_VALUE and the same register changes state to CONST_IMM in the false * branch. See check_cond_jmp_op(). * * After the call R0 is set to return type of the function and registers R1-R5 * are set to NOT_INIT to indicate that they are no longer readable. */ /* verifier_state + insn_idx are pushed to stack when branch is encountered */ struct bpf_verifier_stack_elem { /* verifer state is 'st' * before processing instruction 'insn_idx' * and after processing instruction 'prev_insn_idx' */ struct bpf_verifier_state st; int insn_idx; int prev_insn_idx; struct bpf_verifier_stack_elem *next; }; #define BPF_COMPLEXITY_LIMIT_INSNS 131072 #define BPF_COMPLEXITY_LIMIT_STACK 1024 #define BPF_MAP_PTR_UNPRIV 1UL #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ POISON_POINTER_DELTA)) #define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { return aux->map_state & BPF_MAP_PTR_UNPRIV; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, const struct bpf_map *map, bool unpriv) { BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); aux->map_state = (unsigned long)map | (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); } struct bpf_call_arg_meta { struct bpf_map *map_ptr; bool raw_mode; bool pkt_access; int regno; int access_size; s64 msize_smax_value; u64 msize_umax_value; }; static DEFINE_MUTEX(bpf_verifier_lock); void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, va_list args) { unsigned int n; n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, "verifier log line truncated - local buffer too short\n"); n = min(log->len_total - log->len_used - 1, n); log->kbuf[n] = '\0'; if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) log->len_used += n; else log->ubuf = NULL; } /* log_level controls verbosity level of eBPF verifier. * bpf_verifier_log_write() is used to dump the verification trace to the log, * so the user can figure out what's wrong with the program */ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, const char *fmt, ...) { va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } EXPORT_SYMBOL_GPL(bpf_verifier_log_write); __printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) { struct bpf_verifier_env *env = private_data; va_list args; if (!bpf_verifier_log_needed(&env->log)) return; va_start(args, fmt); bpf_verifier_vlog(&env->log, fmt, args); va_end(args); } static bool type_is_pkt_pointer(enum bpf_reg_type type) { return type == PTR_TO_PACKET || type == PTR_TO_PACKET_META; } /* string representation of 'enum bpf_reg_type' */ static const char * const reg_type_str[] = { [NOT_INIT] = "?", [SCALAR_VALUE] = "inv", [PTR_TO_CTX] = "ctx", [CONST_PTR_TO_MAP] = "map_ptr", [PTR_TO_MAP_VALUE] = "map_value", [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", [PTR_TO_STACK] = "fp", [PTR_TO_PACKET] = "pkt", [PTR_TO_PACKET_META] = "pkt_meta", [PTR_TO_PACKET_END] = "pkt_end", }; static void print_liveness(struct bpf_verifier_env *env, enum bpf_reg_liveness live) { if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN)) verbose(env, "_"); if (live & REG_LIVE_READ) verbose(env, "r"); if (live & REG_LIVE_WRITTEN) verbose(env, "w"); } static struct bpf_func_state *func(struct bpf_verifier_env *env, const struct bpf_reg_state *reg) { struct bpf_verifier_state *cur = env->cur_state; return cur->frame[reg->frameno]; } static void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state) { const struct bpf_reg_state *reg; enum bpf_reg_type t; int i; if (state->frameno) verbose(env, " frame%d:", state->frameno); for (i = 0; i < MAX_BPF_REG; i++) { reg = &state->regs[i]; t = reg->type; if (t == NOT_INIT) continue; verbose(env, " R%d", i); print_liveness(env, reg->live); verbose(env, "=%s", reg_type_str[t]); if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && tnum_is_const(reg->var_off)) { /* reg->off should be 0 for SCALAR_VALUE */ verbose(env, "%lld", reg->var_off.value + reg->off); if (t == PTR_TO_STACK) verbose(env, ",call_%d", func(env, reg)->callsite); } else { verbose(env, "(id=%d", reg->id); if (t != SCALAR_VALUE) verbose(env, ",off=%d", reg->off); if (type_is_pkt_pointer(t)) verbose(env, ",r=%d", reg->range); else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || t == PTR_TO_MAP_VALUE_OR_NULL) verbose(env, ",ks=%d,vs=%d", reg->map_ptr->key_size, reg->map_ptr->value_size); if (tnum_is_const(reg->var_off)) { /* Typically an immediate SCALAR_VALUE, but * could be a pointer whose offset is too big * for reg->off */ verbose(env, ",imm=%llx", reg->var_off.value); } else { if (reg->smin_value != reg->umin_value && reg->smin_value != S64_MIN) verbose(env, ",smin_value=%lld", (long long)reg->smin_value); if (reg->smax_value != reg->umax_value && reg->smax_value != S64_MAX) verbose(env, ",smax_value=%lld", (long long)reg->smax_value); if (reg->umin_value != 0) verbose(env, ",umin_value=%llu", (unsigned long long)reg->umin_value); if (reg->umax_value != U64_MAX) verbose(env, ",umax_value=%llu", (unsigned long long)reg->umax_value); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, ",var_off=%s", tn_buf); } } verbose(env, ")"); } } for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] == STACK_SPILL) { verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); print_liveness(env, state->stack[i].spilled_ptr.live); verbose(env, "=%s", reg_type_str[state->stack[i].spilled_ptr.type]); } if (state->stack[i].slot_type[0] == STACK_ZERO) verbose(env, " fp%d=0", (-i - 1) * BPF_REG_SIZE); } verbose(env, "\n"); } static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { if (!src->stack) return 0; if (WARN_ON_ONCE(dst->allocated_stack < src->allocated_stack)) { /* internal bug, make state invalid to reject the program */ memset(dst, 0, sizeof(*dst)); return -EFAULT; } memcpy(dst->stack, src->stack, sizeof(*src->stack) * (src->allocated_stack / BPF_REG_SIZE)); return 0; } /* do_check() starts with zero-sized stack in struct bpf_verifier_state to * make it consume minimal amount of memory. check_stack_write() access from * the program calls into realloc_func_state() to grow the stack size. * Note there is a non-zero 'parent' pointer inside bpf_verifier_state * which this function copies over. It points to previous bpf_verifier_state * which is never reallocated */ static int realloc_func_state(struct bpf_func_state *state, int size, bool copy_old) { u32 old_size = state->allocated_stack; struct bpf_stack_state *new_stack; int slot = size / BPF_REG_SIZE; if (size <= old_size || !size) { if (copy_old) return 0; state->allocated_stack = slot * BPF_REG_SIZE; if (!size && old_size) { kfree(state->stack); state->stack = NULL; } return 0; } new_stack = kmalloc_array(slot, sizeof(struct bpf_stack_state), GFP_KERNEL); if (!new_stack) return -ENOMEM; if (copy_old) { if (state->stack) memcpy(new_stack, state->stack, sizeof(*new_stack) * (old_size / BPF_REG_SIZE)); memset(new_stack + old_size / BPF_REG_SIZE, 0, sizeof(*new_stack) * (size - old_size) / BPF_REG_SIZE); } state->allocated_stack = slot * BPF_REG_SIZE; kfree(state->stack); state->stack = new_stack; return 0; } static void free_func_state(struct bpf_func_state *state) { if (!state) return; kfree(state->stack); kfree(state); } static void free_verifier_state(struct bpf_verifier_state *state, bool free_self) { int i; for (i = 0; i <= state->curframe; i++) { free_func_state(state->frame[i]); state->frame[i] = NULL; } if (free_self) kfree(state); } /* copy verifier state from src to dst growing dst stack space * when necessary to accommodate larger src stack */ static int copy_func_state(struct bpf_func_state *dst, const struct bpf_func_state *src) { int err; err = realloc_func_state(dst, src->allocated_stack, false); if (err) return err; memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack)); return copy_stack_state(dst, src); } static int copy_verifier_state(struct bpf_verifier_state *dst_state, const struct bpf_verifier_state *src) { struct bpf_func_state *dst; int i, err; /* if dst has more stack frames then src frame, free them */ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { free_func_state(dst_state->frame[i]); dst_state->frame[i] = NULL; } dst_state->curframe = src->curframe; dst_state->parent = src->parent; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) return -ENOMEM; dst_state->frame[i] = dst; } err = copy_func_state(dst, src->frame[i]); if (err) return err; } return 0; } static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, int *insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem, *head = env->head; int err; if (env->head == NULL) return -ENOENT; if (cur) { err = copy_verifier_state(cur, &head->st); if (err) return err; } if (insn_idx) *insn_idx = head->insn_idx; if (prev_insn_idx) *prev_insn_idx = head->prev_insn_idx; elem = head->next; free_verifier_state(&head->st, false); kfree(head); env->head = elem; env->stack_size--; return 0; } static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) { struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_stack_elem *elem; int err; elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); if (!elem) goto err; elem->insn_idx = insn_idx; elem->prev_insn_idx = prev_insn_idx; elem->next = env->head; env->head = elem; env->stack_size++; err = copy_verifier_state(&elem->st, cur); if (err) goto err; if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { verbose(env, "BPF program is too complex\n"); goto err; } return &elem->st; err: free_verifier_state(env->cur_state, true); env->cur_state = NULL; /* pop all elements and return */ while (!pop_stack(env, NULL, NULL)); return NULL; } #define CALLER_SAVED_REGS 6 static const int caller_saved[CALLER_SAVED_REGS] = { BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 }; static void __mark_reg_not_init(struct bpf_reg_state *reg); /* Mark the unknown part of a register (variable offset or scalar value) as * known to have the value @imm. */ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) { reg->id = 0; reg->var_off = tnum_const(imm); reg->smin_value = (s64)imm; reg->smax_value = (s64)imm; reg->umin_value = imm; reg->umax_value = imm; } /* Mark the 'variable offset' part of a register as zero. This should be * used only on registers holding a pointer type. */ static void __mark_reg_known_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); } static void __mark_reg_const_zero(struct bpf_reg_state *reg) { __mark_reg_known(reg, 0); reg->off = 0; reg->type = SCALAR_VALUE; } static void mark_reg_known_zero(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); /* Something bad happened, let's kill all regs */ for (regno = 0; regno < MAX_BPF_REG; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_known_zero(regs + regno); } static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) { return type_is_pkt_pointer(reg->type); } static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) { return reg_is_pkt_pointer(reg) || reg->type == PTR_TO_PACKET_END; } /* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, enum bpf_reg_type which) { /* The register can already have a range from prior markings. * This is fine as long as it hasn't been advanced from its * origin. */ return reg->type == which && reg->id == 0 && reg->off == 0 && tnum_equals_const(reg->var_off, 0); } /* Attempts to improve min/max values based on var_off information */ static void __update_reg_bounds(struct bpf_reg_state *reg) { /* min signed is max(sign bit) | min(other bits) */ reg->smin_value = max_t(s64, reg->smin_value, reg->var_off.value | (reg->var_off.mask & S64_MIN)); /* max signed is min(sign bit) | max(other bits) */ reg->smax_value = min_t(s64, reg->smax_value, reg->var_off.value | (reg->var_off.mask & S64_MAX)); reg->umin_value = max(reg->umin_value, reg->var_off.value); reg->umax_value = min(reg->umax_value, reg->var_off.value | reg->var_off.mask); } /* Uses signed min/max values to inform unsigned, and vice-versa */ static void __reg_deduce_bounds(struct bpf_reg_state *reg) { /* Learn sign from signed bounds. * If we cannot cross the sign boundary, then signed and unsigned bounds * are the same, so combine. This works even in the negative case, e.g. * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. */ if (reg->smin_value >= 0 || reg->smax_value < 0) { reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); return; } /* Learn sign from unsigned bounds. Signed bounds cross the sign * boundary, so we must be careful. */ if ((s64)reg->umax_value >= 0) { /* Positive. We can't learn anything from the smin, but smax * is positive, hence safe. */ reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, reg->umax_value); } else if ((s64)reg->umin_value < 0) { /* Negative. We can't learn anything from the smax, but smin * is negative, hence safe. */ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, reg->umin_value); reg->smax_value = reg->umax_value; } } /* Attempts to improve var_off based on unsigned min/max information */ static void __reg_bound_offset(struct bpf_reg_state *reg) { reg->var_off = tnum_intersect(reg->var_off, tnum_range(reg->umin_value, reg->umax_value)); } /* Reset the min/max bounds of a register */ static void __mark_reg_unbounded(struct bpf_reg_state *reg) { reg->smin_value = S64_MIN; reg->smax_value = S64_MAX; reg->umin_value = 0; reg->umax_value = U64_MAX; } /* Mark a register as having a completely unknown (scalar) value. */ static void __mark_reg_unknown(struct bpf_reg_state *reg) { reg->type = SCALAR_VALUE; reg->id = 0; reg->off = 0; reg->var_off = tnum_unknown; reg->frameno = 0; __mark_reg_unbounded(reg); } static void mark_reg_unknown(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_unknown(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_unknown(regs + regno); } static void __mark_reg_not_init(struct bpf_reg_state *reg) { __mark_reg_unknown(reg); reg->type = NOT_INIT; } static void mark_reg_not_init(struct bpf_verifier_env *env, struct bpf_reg_state *regs, u32 regno) { if (WARN_ON(regno >= MAX_BPF_REG)) { verbose(env, "mark_reg_not_init(regs, %u)\n", regno); /* Something bad happened, let's kill all regs except FP */ for (regno = 0; regno < BPF_REG_FP; regno++) __mark_reg_not_init(regs + regno); return; } __mark_reg_not_init(regs + regno); } static void init_reg_state(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs; int i; for (i = 0; i < MAX_BPF_REG; i++) { mark_reg_not_init(env, regs, i); regs[i].live = REG_LIVE_NONE; } /* frame pointer */ regs[BPF_REG_FP].type = PTR_TO_STACK; mark_reg_known_zero(env, regs, BPF_REG_FP); regs[BPF_REG_FP].frameno = state->frameno; /* 1st arg to a function */ regs[BPF_REG_1].type = PTR_TO_CTX; mark_reg_known_zero(env, regs, BPF_REG_1); } #define BPF_MAIN_FUNC (-1) static void init_func_state(struct bpf_verifier_env *env, struct bpf_func_state *state, int callsite, int frameno, int subprogno) { state->callsite = callsite; state->frameno = frameno; state->subprogno = subprogno; init_reg_state(env, state); } enum reg_arg_type { SRC_OP, /* register is used as source operand */ DST_OP, /* register is used as destination operand */ DST_OP_NO_MARK /* same as above, check only, don't mark */ }; static int cmp_subprogs(const void *a, const void *b) { return ((struct bpf_subprog_info *)a)->start - ((struct bpf_subprog_info *)b)->start; } static int find_subprog(struct bpf_verifier_env *env, int off) { struct bpf_subprog_info *p; p = bsearch(&off, env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs); if (!p) return -ENOENT; return p - env->subprog_info; } static int add_subprog(struct bpf_verifier_env *env, int off) { int insn_cnt = env->prog->len; int ret; if (off >= insn_cnt || off < 0) { verbose(env, "call to invalid destination\n"); return -EINVAL; } ret = find_subprog(env, off); if (ret >= 0) return 0; if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { verbose(env, "too many subprograms\n"); return -E2BIG; } env->subprog_info[env->subprog_cnt++].start = off; sort(env->subprog_info, env->subprog_cnt, sizeof(env->subprog_info[0]), cmp_subprogs, NULL); return 0; } static int check_subprogs(struct bpf_verifier_env *env) { int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; /* Add entry function. */ ret = add_subprog(env, 0); if (ret < 0) return ret; /* determine subprog starts. The end is one before the next starts */ for (i = 0; i < insn_cnt; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; if (!env->allow_ptr_leaks) { verbose(env, "function calls to other bpf functions are allowed for root only\n"); return -EPERM; } if (bpf_prog_is_dev_bound(env->prog->aux)) { verbose(env, "function calls in offloaded programs are not supported yet\n"); return -EINVAL; } ret = add_subprog(env, i + insn[i].imm + 1); if (ret < 0) return ret; } /* Add a fake 'exit' subprog which could simplify subprog iteration * logic. 'subprog_cnt' should not be increased. */ subprog[env->subprog_cnt].start = insn_cnt; if (env->log.level > 1) for (i = 0; i < env->subprog_cnt; i++) verbose(env, "func#%d @%d\n", i, subprog[i].start); /* now check that all jumps are within the same subprog */ subprog_start = subprog[cur_subprog].start; subprog_end = subprog[cur_subprog + 1].start; for (i = 0; i < insn_cnt; i++) { u8 code = insn[i].code; if (BPF_CLASS(code) != BPF_JMP) goto next; if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) goto next; off = i + insn[i].off + 1; if (off < subprog_start || off >= subprog_end) { verbose(env, "jump out of range from insn %d to %d\n", i, off); return -EINVAL; } next: if (i == subprog_end - 1) { /* to avoid fall-through from one subprog into another * the last insn of the subprog should be either exit * or unconditional jump back */ if (code != (BPF_JMP | BPF_EXIT) && code != (BPF_JMP | BPF_JA)) { verbose(env, "last insn is not an exit or jmp\n"); return -EINVAL; } subprog_start = subprog_end; cur_subprog++; if (cur_subprog < env->subprog_cnt) subprog_end = subprog[cur_subprog + 1].start; } } return 0; } static struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, u32 regno) { struct bpf_verifier_state *tmp = NULL; /* 'parent' could be a state of caller and * 'state' could be a state of callee. In such case * parent->curframe < state->curframe * and it's ok for r1 - r5 registers * * 'parent' could be a callee's state after it bpf_exit-ed. * In such case parent->curframe > state->curframe * and it's ok for r0 only */ if (parent->curframe == state->curframe || (parent->curframe < state->curframe && regno >= BPF_REG_1 && regno <= BPF_REG_5) || (parent->curframe > state->curframe && regno == BPF_REG_0)) return parent; if (parent->curframe > state->curframe && regno >= BPF_REG_6) { /* for callee saved regs we have to skip the whole chain * of states that belong to callee and mark as LIVE_READ * the registers before the call */ tmp = parent; while (tmp && tmp->curframe != state->curframe) { tmp = tmp->parent; } if (!tmp) goto bug; parent = tmp; } else { goto bug; } return parent; bug: verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); verbose(env, "regno %d parent frame %d current frame %d\n", regno, parent->curframe, state->curframe); return NULL; } static int mark_reg_read(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, u32 regno) { bool writes = parent == state->parent; /* Observe write marks */ if (regno == BPF_REG_FP) /* We don't need to worry about FP liveness because it's read-only */ return 0; while (parent) { /* if read wasn't screened by an earlier write ... */ if (writes && state->frame[state->curframe]->regs[regno].live & REG_LIVE_WRITTEN) break; parent = skip_callee(env, state, parent, regno); if (!parent) return -EFAULT; /* ... then we depend on parent's value */ parent->frame[parent->curframe]->regs[regno].live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; } return 0; } static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, enum reg_arg_type t) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; if (regno >= MAX_BPF_REG) { verbose(env, "R%d is invalid\n", regno); return -EINVAL; } if (t == SRC_OP) { /* check whether register used as source operand can be read */ if (regs[regno].type == NOT_INIT) { verbose(env, "R%d !read_ok\n", regno); return -EACCES; } return mark_reg_read(env, vstate, vstate->parent, regno); } else { /* check whether register used as dest operand can be written to */ if (regno == BPF_REG_FP) { verbose(env, "frame pointer is read only\n"); return -EACCES; } regs[regno].live |= REG_LIVE_WRITTEN; if (t == DST_OP) mark_reg_unknown(env, regs, regno); } return 0; } static bool is_spillable_regtype(enum bpf_reg_type type) { switch (type) { case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE_OR_NULL: case PTR_TO_STACK: case PTR_TO_CTX: case PTR_TO_PACKET: case PTR_TO_PACKET_META: case PTR_TO_PACKET_END: case CONST_PTR_TO_MAP: return true; default: return false; } } /* Does this register contain a constant zero? */ static bool register_is_null(struct bpf_reg_state *reg) { return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); } /* check_stack_read/write functions track spill/fill of registers, * stack boundary and alignment are checked in check_mem_access() */ static int check_stack_write(struct bpf_verifier_env *env, struct bpf_func_state *state, /* func where register points to */ int off, int size, int value_regno, int insn_idx) { struct bpf_func_state *cur; /* state of the current function */ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; enum bpf_reg_type type; err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), true); if (err) return err; /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, * so it's aligned access and [off, off + size) are within stack limits */ if (!env->allow_ptr_leaks && state->stack[spi].slot_type[0] == STACK_SPILL && size != BPF_REG_SIZE) { verbose(env, "attempt to corrupt spilled pointer on stack\n"); return -EACCES; } cur = env->cur_state->frame[env->cur_state->curframe]; if (value_regno >= 0 && is_spillable_regtype((type = cur->regs[value_regno].type))) { /* register containing pointer is being spilled into stack */ if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } if (state != cur && type == PTR_TO_STACK) { verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); return -EINVAL; } /* save register state */ state->stack[spi].spilled_ptr = cur->regs[value_regno]; state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; for (i = 0; i < BPF_REG_SIZE; i++) { if (state->stack[spi].slot_type[i] == STACK_MISC && !env->allow_ptr_leaks) { int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; int soff = (-spi - 1) * BPF_REG_SIZE; /* detected reuse of integer stack slot with a pointer * which means either llvm is reusing stack slot or * an attacker is trying to exploit CVE-2018-3639 * (speculative store bypass) * Have to sanitize that slot with preemptive * store of zero. */ if (*poff && *poff != soff) { /* disallow programs where single insn stores * into two different stack slots, since verifier * cannot sanitize them */ verbose(env, "insn %d cannot access two stack slots fp%d and fp%d", insn_idx, *poff, soff); return -EINVAL; } *poff = soff; } state->stack[spi].slot_type[i] = STACK_SPILL; } } else { u8 type = STACK_MISC; /* regular write of data into stack */ state->stack[spi].spilled_ptr = (struct bpf_reg_state) {}; /* only mark the slot as written if all 8 bytes were written * otherwise read propagation may incorrectly stop too soon * when stack slots are partially written. * This heuristic means that read propagation will be * conservative, since it will add reg_live_read marks * to stack slots all the way to first state when programs * writes+reads less than 8 bytes */ if (size == BPF_REG_SIZE) state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; /* when we zero initialize stack slots mark them as such */ if (value_regno >= 0 && register_is_null(&cur->regs[value_regno])) type = STACK_ZERO; for (i = 0; i < size; i++) state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type; } return 0; } /* registers of every function are unique and mark_reg_read() propagates * the liveness in the following cases: * - from callee into caller for R1 - R5 that were used as arguments * - from caller into callee for R0 that used as result of the call * - from caller to the same caller skipping states of the callee for R6 - R9, * since R6 - R9 are callee saved by implicit function prologue and * caller's R6 != callee's R6, so when we propagate liveness up to * parent states we need to skip callee states for R6 - R9. * * stack slot marking is different, since stacks of caller and callee are * accessible in both (since caller can pass a pointer to caller's stack to * callee which can pass it to another function), hence mark_stack_slot_read() * has to propagate the stack liveness to all parent states at given frame number. * Consider code: * f1() { * ptr = fp - 8; * *ptr = ctx; * call f2 { * .. = *ptr; * } * .. = *ptr; * } * First *ptr is reading from f1's stack and mark_stack_slot_read() has * to mark liveness at the f1's frame and not f2's frame. * Second *ptr is also reading from f1's stack and mark_stack_slot_read() has * to propagate liveness to f2 states at f1's frame level and further into * f1 states at f1's frame level until write into that stack slot */ static void mark_stack_slot_read(struct bpf_verifier_env *env, const struct bpf_verifier_state *state, struct bpf_verifier_state *parent, int slot, int frameno) { bool writes = parent == state->parent; /* Observe write marks */ while (parent) { if (parent->frame[frameno]->allocated_stack <= slot * BPF_REG_SIZE) /* since LIVE_WRITTEN mark is only done for full 8-byte * write the read marks are conservative and parent * state may not even have the stack allocated. In such case * end the propagation, since the loop reached beginning * of the function */ break; /* if read wasn't screened by an earlier write ... */ if (writes && state->frame[frameno]->stack[slot].spilled_ptr.live & REG_LIVE_WRITTEN) break; /* ... then we depend on parent's value */ parent->frame[frameno]->stack[slot].spilled_ptr.live |= REG_LIVE_READ; state = parent; parent = state->parent; writes = true; } } static int check_stack_read(struct bpf_verifier_env *env, struct bpf_func_state *reg_state /* func where register points to */, int off, int size, int value_regno) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; u8 *stype; if (reg_state->allocated_stack <= slot) { verbose(env, "invalid read from stack off %d+0 size %d\n", off, size); return -EACCES; } stype = reg_state->stack[spi].slot_type; if (stype[0] == STACK_SPILL) { if (size != BPF_REG_SIZE) { verbose(env, "invalid size of register spill\n"); return -EACCES; } for (i = 1; i < BPF_REG_SIZE; i++) { if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { verbose(env, "corrupted spill memory\n"); return -EACCES; } } if (value_regno >= 0) { /* restore register state from stack */ state->regs[value_regno] = reg_state->stack[spi].spilled_ptr; /* mark reg as written since spilled pointer state likely * has its liveness marks cleared by is_state_visited() * which resets stack/reg liveness for state transitions */ state->regs[value_regno].live |= REG_LIVE_WRITTEN; } mark_stack_slot_read(env, vstate, vstate->parent, spi, reg_state->frameno); return 0; } else { int zeros = 0; for (i = 0; i < size; i++) { if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) continue; if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { zeros++; continue; } verbose(env, "invalid read from stack off %d+%d size %d\n", off, i, size); return -EACCES; } mark_stack_slot_read(env, vstate, vstate->parent, spi, reg_state->frameno); if (value_regno >= 0) { if (zeros == size) { /* any size read into register is zero extended, * so the whole register == const_zero */ __mark_reg_const_zero(&state->regs[value_regno]); } else { /* have read misc data from the stack */ mark_reg_unknown(env, state->regs, value_regno); } state->regs[value_regno].live |= REG_LIVE_WRITTEN; } return 0; } } /* check read/write into map element returned by bpf_map_lookup_elem() */ static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_map *map = regs[regno].map_ptr; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || off + size > map->value_size) { verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", map->value_size, off, size); return -EACCES; } return 0; } /* check read/write into a map element with possible variable offset */ static int check_map_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *reg = &state->regs[regno]; int err; /* We may have adjusted the register to this map value, so we * need to try adding each of min_value and max_value to off * to make sure our theoretical access will be safe. */ if (env->log.level) print_verifier_state(env, state); /* The minimum value is only important with signed * comparisons where we can't assume the floor of a * value is 0. If we are using signed variables for our * index'es we need to make sure that whatever we use * will have a set floor within our range. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->smin_value + off, size, zero_size_allowed); if (err) { verbose(env, "R%d min value is outside of the array range\n", regno); return err; } /* If we haven't set a max value then we need to bail since we can't be * sure we won't do bad things. * If reg->umax_value + off could overflow, treat that as unbounded too. */ if (reg->umax_value >= BPF_MAX_VAR_OFF) { verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", regno); return -EACCES; } err = __check_map_access(env, regno, reg->umax_value + off, size, zero_size_allowed); if (err) verbose(env, "R%d max value is outside of the array range\n", regno); return err; } #define MAX_PACKET_OFF 0xffff static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_access_type t) { switch (env->prog->type) { case BPF_PROG_TYPE_LWT_IN: case BPF_PROG_TYPE_LWT_OUT: case BPF_PROG_TYPE_LWT_SEG6LOCAL: case BPF_PROG_TYPE_SK_REUSEPORT: /* dst_input() and dst_output() can't write for now */ if (t == BPF_WRITE) return false; /* fallthrough */ case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: case BPF_PROG_TYPE_XDP: case BPF_PROG_TYPE_LWT_XMIT: case BPF_PROG_TYPE_SK_SKB: case BPF_PROG_TYPE_SK_MSG: if (meta) return meta->pkt_access; env->seen_direct_write = true; return true; default: return false; } } static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || (u64)off + size > reg->range) { verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", off, size, regno, reg->id, reg->off, reg->range); return -EACCES; } return 0; } static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, int size, bool zero_size_allowed) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = &regs[regno]; int err; /* We may have added a variable offset to the packet pointer; but any * reg->range we have comes after that. We are only checking the fixed * offset. */ /* We don't allow negative numbers, because we aren't tracking enough * detail to prove they're safe. */ if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", regno); return -EACCES; } err = __check_packet_access(env, regno, off, size, zero_size_allowed); if (err) { verbose(env, "R%d offset is outside of the packet\n", regno); return err; } return err; } /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, }; if (env->ops->is_valid_access && env->ops->is_valid_access(off, size, t, env->prog, &info)) { /* A non zero info.ctx_field_size indicates that this field is a * candidate for later verifier transformation to load the whole * field and then apply a mask when accessed with a narrower * access than actual ctx access size. A zero info.ctx_field_size * will only allow for whole field access and rejects any other * type of narrower access. */ *reg_type = info.reg_type; env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; /* remember the offset of last byte accessed in ctx */ if (env->prog->aux->max_ctx_offset < off + size) env->prog->aux->max_ctx_offset = off + size; return 0; } verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); return -EACCES; } static bool __is_pointer_value(bool allow_ptr_leaks, const struct bpf_reg_state *reg) { if (allow_ptr_leaks) return false; return reg->type != SCALAR_VALUE; } static bool is_pointer_value(struct bpf_verifier_env *env, int regno) { return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); } static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = cur_regs(env) + regno; return reg->type == PTR_TO_CTX; } static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) { const struct bpf_reg_state *reg = cur_regs(env) + regno; return type_is_pkt_pointer(reg->type); } static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict) { struct tnum reg_off; int ip_align; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; /* For platforms that do not have a Kconfig enabling * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of * NET_IP_ALIGN is universally set to '2'. And on platforms * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get * to this code only in strict mode where we want to emulate * the NET_IP_ALIGN==2 checking. Therefore use an * unconditional IP align value of '2'. */ ip_align = 2; reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned packet access off %d+%s+%d+%d size %d\n", ip_align, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_generic_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, const char *pointer_desc, int off, int size, bool strict) { struct tnum reg_off; /* Byte size accesses are always allowed. */ if (!strict || size == 1) return 0; reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); if (!tnum_is_aligned(reg_off, size)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", pointer_desc, tn_buf, reg->off, off, size); return -EACCES; } return 0; } static int check_ptr_alignment(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int off, int size, bool strict_alignment_once) { bool strict = env->strict_alignment || strict_alignment_once; const char *pointer_desc = ""; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: /* Special case, because of NET_IP_ALIGN. Given metadata sits * right in front, treat it the very same way. */ return check_pkt_ptr_alignment(env, reg, off, size, strict); case PTR_TO_MAP_VALUE: pointer_desc = "value "; break; case PTR_TO_CTX: pointer_desc = "context "; break; case PTR_TO_STACK: pointer_desc = "stack "; /* The stack spill tracking logic in check_stack_write() * and check_stack_read() relies on stack accesses being * aligned. */ strict = true; break; default: break; } return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, strict); } static int update_stack_depth(struct bpf_verifier_env *env, const struct bpf_func_state *func, int off) { u16 stack = env->subprog_info[func->subprogno].stack_depth; if (stack >= -off) return 0; /* update known max for given subprogram */ env->subprog_info[func->subprogno].stack_depth = -off; return 0; } /* starting from main bpf function walk all instructions of the function * and recursively walk all callees that given function can call. * Ignore jump and exit insns. * Since recursion is prevented by check_cfg() this algorithm * only needs a local stack of MAX_CALL_FRAMES to remember callsites */ static int check_max_stack_depth(struct bpf_verifier_env *env) { int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; struct bpf_subprog_info *subprog = env->subprog_info; struct bpf_insn *insn = env->prog->insnsi; int ret_insn[MAX_CALL_FRAMES]; int ret_prog[MAX_CALL_FRAMES]; process_func: /* round up to 32-bytes, since this is granularity * of interpreter stack size */ depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); if (depth > MAX_BPF_STACK) { verbose(env, "combined stack size of %d calls is %d. Too large\n", frame + 1, depth); return -EACCES; } continue_func: subprog_end = subprog[idx + 1].start; for (; i < subprog_end; i++) { if (insn[i].code != (BPF_JMP | BPF_CALL)) continue; if (insn[i].src_reg != BPF_PSEUDO_CALL) continue; /* remember insn and function to return to */ ret_insn[frame] = i + 1; ret_prog[frame] = idx; /* find the callee */ i = i + insn[i].imm + 1; idx = find_subprog(env, i); if (idx < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i); return -EFAULT; } frame++; if (frame >= MAX_CALL_FRAMES) { WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); return -EFAULT; } goto process_func; } /* end of for() loop means the last insn of the 'subprog' * was reached. Doesn't matter whether it was JA or EXIT */ if (frame == 0) return 0; depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); frame--; i = ret_insn[frame]; idx = ret_prog[frame]; goto continue_func; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON static int get_callee_stack_depth(struct bpf_verifier_env *env, const struct bpf_insn *insn, int idx) { int start = idx + insn->imm + 1, subprog; subprog = find_subprog(env, start); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", start); return -EFAULT; } return env->subprog_info[subprog].stack_depth; } #endif static int check_ctx_reg(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, int regno) { /* Access to ctx or passing it to a helper is only allowed in * its original, unmodified form. */ if (reg->off) { verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", regno, reg->off); return -EACCES; } if (!tnum_is_const(reg->var_off) || reg->var_off.value) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); return -EACCES; } return 0; } /* truncate register to smaller size (in bytes) * must be called with size < BPF_REG_SIZE */ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) { u64 mask; /* clear high bits in bit representation */ reg->var_off = tnum_cast(reg->var_off, size); /* fix arithmetic bounds */ mask = ((u64)1 << (size * 8)) - 1; if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { reg->umin_value &= mask; reg->umax_value &= mask; } else { reg->umin_value = 0; reg->umax_value = mask; } reg->smin_value = reg->umin_value; reg->smax_value = reg->umax_value; } /* check whether memory at (regno + off) is accessible for t = (read | write) * if t==write, value_regno is a register which value is stored into memory * if t==read, value_regno is a register which will receive the value from memory * if t==write && value_regno==-1, some unknown value is stored into memory * if t==read && value_regno==-1, don't care what we read from memory */ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off, int bpf_size, enum bpf_access_type t, int value_regno, bool strict_alignment_once) { struct bpf_reg_state *regs = cur_regs(env); struct bpf_reg_state *reg = regs + regno; struct bpf_func_state *state; int size, err = 0; size = bpf_size_to_bytes(bpf_size); if (size < 0) return size; /* alignment checks will add in reg->off themselves */ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); if (err) return err; /* for access checks, reg->off is just part of off */ off += reg->off; if (reg->type == PTR_TO_MAP_VALUE) { if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into map\n", value_regno); return -EACCES; } err = check_map_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into ctx\n", value_regno); return -EACCES; } err = check_ctx_reg(env, reg, regno); if (err < 0) return err; err = check_ctx_access(env, insn_idx, off, size, t, &reg_type); if (!err && t == BPF_READ && value_regno >= 0) { /* ctx access returns either a scalar, or a * PTR_TO_PACKET[_META,_END]. In the latter * case, we know the offset is zero. */ if (reg_type == SCALAR_VALUE) mark_reg_unknown(env, regs, value_regno); else mark_reg_known_zero(env, regs, value_regno); regs[value_regno].id = 0; regs[value_regno].off = 0; regs[value_regno].range = 0; regs[value_regno].type = reg_type; } } else if (reg->type == PTR_TO_STACK) { /* stack accesses must be at a fixed offset, so that we can * determine what type of data were returned. * See check_stack_read(). */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "variable stack access var_off=%s off=%d size=%d", tn_buf, off, size); return -EACCES; } off += reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK) { verbose(env, "invalid stack off=%d size=%d\n", off, size); return -EACCES; } state = func(env, reg); err = update_stack_depth(env, state, off); if (err) return err; if (t == BPF_WRITE) err = check_stack_write(env, state, off, size, value_regno, insn_idx); else err = check_stack_read(env, state, off, size, value_regno); } else if (reg_is_pkt_pointer(reg)) { if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { verbose(env, "cannot write into packet\n"); return -EACCES; } if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { verbose(env, "R%d leaks addr into packet\n", value_regno); return -EACCES; } err = check_packet_access(env, regno, off, size, false); if (!err && t == BPF_READ && value_regno >= 0) mark_reg_unknown(env, regs, value_regno); } else { verbose(env, "R%d invalid mem access '%s'\n", regno, reg_type_str[reg->type]); return -EACCES; } if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && regs[value_regno].type == SCALAR_VALUE) { /* b/h/w load zero-extends, mark upper bits as known 0 */ coerce_reg_to_size(&regs[value_regno], size); } return err; } static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int err; if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || insn->imm != 0) { verbose(env, "BPF_XADD uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d leaks addr into mem\n", insn->src_reg); return -EACCES; } if (is_ctx_reg(env, insn->dst_reg) || is_pkt_reg(env, insn->dst_reg)) { verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ? "context" : "packet"); return -EACCES; } /* check whether atomic_add can read the memory */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, -1, true); if (err) return err; /* check whether atomic_add can write into the same memory */ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true); } /* when register 'regno' is passed into function that will read 'access_size' * bytes from that pointer, make sure that it's within stack boundary * and all elements of stack are initialized. * Unlike most pointer bounds-checking functions, this one doesn't take an * 'off' argument, so it has to add in reg->off itself. */ static int check_stack_boundary(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *reg = cur_regs(env) + regno; struct bpf_func_state *state = func(env, reg); int off, i, slot, spi; if (reg->type != PTR_TO_STACK) { /* Allow zero-byte read from NULL, regardless of pointer type */ if (zero_size_allowed && access_size == 0 && register_is_null(reg)) return 0; verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[reg->type], reg_type_str[PTR_TO_STACK]); return -EACCES; } /* Only allow fixed-offset stack reads */ if (!tnum_is_const(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "invalid variable stack read R%d var_off=%s\n", regno, tn_buf); return -EACCES; } off = reg->off + reg->var_off.value; if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || access_size < 0 || (access_size == 0 && !zero_size_allowed)) { verbose(env, "invalid stack type R%d off=%d access_size=%d\n", regno, off, access_size); return -EACCES; } if (meta && meta->raw_mode) { meta->access_size = access_size; meta->regno = regno; return 0; } for (i = 0; i < access_size; i++) { u8 *stype; slot = -(off + i) - 1; spi = slot / BPF_REG_SIZE; if (state->allocated_stack <= slot) goto err; stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; if (*stype == STACK_MISC) goto mark; if (*stype == STACK_ZERO) { /* helper can write anything into the stack */ *stype = STACK_MISC; goto mark; } err: verbose(env, "invalid indirect read from stack off %d+%d size %d\n", off, i, access_size); return -EACCES; mark: /* reading any byte out of 8-byte 'spill_slot' will cause * the whole slot to be marked as 'read' */ mark_stack_slot_read(env, env->cur_state, env->cur_state->parent, spi, state->frameno); } return update_stack_depth(env, state, off); } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, int access_size, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; switch (reg->type) { case PTR_TO_PACKET: case PTR_TO_PACKET_META: return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_VALUE: return check_map_access(env, regno, reg->off, access_size, zero_size_allowed); default: /* scalar_value|ptr_to_stack or invalid ptr */ return check_stack_boundary(env, regno, access_size, zero_size_allowed, meta); } } static bool arg_type_is_mem_ptr(enum bpf_arg_type type) { return type == ARG_PTR_TO_MEM || type == ARG_PTR_TO_MEM_OR_NULL || type == ARG_PTR_TO_UNINIT_MEM; } static bool arg_type_is_mem_size(enum bpf_arg_type type) { return type == ARG_CONST_SIZE || type == ARG_CONST_SIZE_OR_ZERO; } static int check_func_arg(struct bpf_verifier_env *env, u32 regno, enum bpf_arg_type arg_type, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno]; enum bpf_reg_type expected_type, type = reg->type; int err = 0; if (arg_type == ARG_DONTCARE) return 0; err = check_reg_arg(env, regno, SRC_OP); if (err) return err; if (arg_type == ARG_ANYTHING) { if (is_pointer_value(env, regno)) { verbose(env, "R%d leaks addr into helper function\n", regno); return -EACCES; } return 0; } if (type_is_pkt_pointer(type) && !may_access_direct_pkt_data(env, meta, BPF_READ)) { verbose(env, "helper access to the packet is not allowed\n"); return -EACCES; } if (arg_type == ARG_PTR_TO_MAP_KEY || arg_type == ARG_PTR_TO_MAP_VALUE) { expected_type = PTR_TO_STACK; if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_SIZE || arg_type == ARG_CONST_SIZE_OR_ZERO) { expected_type = SCALAR_VALUE; if (type != expected_type) goto err_type; } else if (arg_type == ARG_CONST_MAP_PTR) { expected_type = CONST_PTR_TO_MAP; if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_CTX) { expected_type = PTR_TO_CTX; if (type != expected_type) goto err_type; err = check_ctx_reg(env, reg, regno); if (err < 0) return err; } else if (arg_type_is_mem_ptr(arg_type)) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ if (register_is_null(reg) && arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && type != expected_type) goto err_type; meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; } else { verbose(env, "unsupported arg_type %d\n", arg_type); return -EFAULT; } if (arg_type == ARG_CONST_MAP_PTR) { /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ meta->map_ptr = reg->map_ptr; } else if (arg_type == ARG_PTR_TO_MAP_KEY) { /* bpf_map_xxx(..., map_ptr, ..., key) call: * check that [key, key + map->key_size) are within * stack limits and initialized */ if (!meta->map_ptr) { /* in function declaration map_ptr must come before * map_key, so that it's verified and known before * we have to check map_key here. Otherwise it means * that kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, false, NULL); } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { /* bpf_map_xxx(..., map_ptr, ..., value) call: * check [value, value + map->value_size) validity */ if (!meta->map_ptr) { /* kernel subsystem misconfigured verifier */ verbose(env, "invalid map_ptr to access map->value\n"); return -EACCES; } err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, false, NULL); } else if (arg_type_is_mem_size(arg_type)) { bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); /* remember the mem_size which may be used later * to refine return values. */ meta->msize_smax_value = reg->smax_value; meta->msize_umax_value = reg->umax_value; /* The register is SCALAR_VALUE; the access check * happens using its boundaries. */ if (!tnum_is_const(reg->var_off)) /* For unprivileged variable accesses, disable raw * mode so that the program is required to * initialize all the memory that the helper could * just partially fill up. */ meta = NULL; if (reg->smin_value < 0) { verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", regno); return -EACCES; } if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, zero_size_allowed, meta); if (err) return err; } if (reg->umax_value >= BPF_MAX_VAR_SIZ) { verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", regno); return -EACCES; } err = check_helper_mem_access(env, regno - 1, reg->umax_value, zero_size_allowed, meta); } return err; err_type: verbose(env, "R%d type=%s expected=%s\n", regno, reg_type_str[type], reg_type_str[expected_type]); return -EACCES; } static int check_map_func_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, int func_id) { if (!map) return 0; /* We need a two way check, first is from map perspective ... */ switch (map->map_type) { case BPF_MAP_TYPE_PROG_ARRAY: if (func_id != BPF_FUNC_tail_call) goto error; break; case BPF_MAP_TYPE_PERF_EVENT_ARRAY: if (func_id != BPF_FUNC_perf_event_read && func_id != BPF_FUNC_perf_event_output && func_id != BPF_FUNC_perf_event_read_value) goto error; break; case BPF_MAP_TYPE_STACK_TRACE: if (func_id != BPF_FUNC_get_stackid) goto error; break; case BPF_MAP_TYPE_CGROUP_ARRAY: if (func_id != BPF_FUNC_skb_under_cgroup && func_id != BPF_FUNC_current_task_under_cgroup) goto error; break; case BPF_MAP_TYPE_CGROUP_STORAGE: if (func_id != BPF_FUNC_get_local_storage) goto error; break; /* devmap returns a pointer to a live net_device ifindex that we cannot * allow to be modified from bpf side. So do not allow lookup elements * for now. */ case BPF_MAP_TYPE_DEVMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; /* Restrict bpf side of cpumap and xskmap, open when use-cases * appear. */ case BPF_MAP_TYPE_CPUMAP: case BPF_MAP_TYPE_XSKMAP: if (func_id != BPF_FUNC_redirect_map) goto error; break; case BPF_MAP_TYPE_ARRAY_OF_MAPS: case BPF_MAP_TYPE_HASH_OF_MAPS: if (func_id != BPF_FUNC_map_lookup_elem) goto error; break; case BPF_MAP_TYPE_SOCKMAP: if (func_id != BPF_FUNC_sk_redirect_map && func_id != BPF_FUNC_sock_map_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_map) goto error; break; case BPF_MAP_TYPE_SOCKHASH: if (func_id != BPF_FUNC_sk_redirect_hash && func_id != BPF_FUNC_sock_hash_update && func_id != BPF_FUNC_map_delete_elem && func_id != BPF_FUNC_msg_redirect_hash) goto error; break; case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: if (func_id != BPF_FUNC_sk_select_reuseport) goto error; break; default: break; } /* ... and second from the function itself. */ switch (func_id) { case BPF_FUNC_tail_call: if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) goto error; if (env->subprog_cnt > 1) { verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); return -EINVAL; } break; case BPF_FUNC_perf_event_read: case BPF_FUNC_perf_event_output: case BPF_FUNC_perf_event_read_value: if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) goto error; break; case BPF_FUNC_get_stackid: if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) goto error; break; case BPF_FUNC_current_task_under_cgroup: case BPF_FUNC_skb_under_cgroup: if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) goto error; break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; break; case BPF_FUNC_sk_redirect_map: case BPF_FUNC_msg_redirect_map: case BPF_FUNC_sock_map_update: if (map->map_type != BPF_MAP_TYPE_SOCKMAP) goto error; break; case BPF_FUNC_sk_redirect_hash: case BPF_FUNC_msg_redirect_hash: case BPF_FUNC_sock_hash_update: if (map->map_type != BPF_MAP_TYPE_SOCKHASH) goto error; break; case BPF_FUNC_get_local_storage: if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE) goto error; break; case BPF_FUNC_sk_select_reuseport: if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) goto error; break; default: break; } return 0; error: verbose(env, "cannot pass map_type %d into func %s#%d\n", map->map_type, func_id_name(func_id), func_id); return -EINVAL; } static bool check_raw_mode_ok(const struct bpf_func_proto *fn) { int count = 0; if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) count++; if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) count++; /* We only support one arg being in raw mode at the moment, * which is sufficient for the helper functions we have * right now. */ return count <= 1; } static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, enum bpf_arg_type arg_next) { return (arg_type_is_mem_ptr(arg_curr) && !arg_type_is_mem_size(arg_next)) || (!arg_type_is_mem_ptr(arg_curr) && arg_type_is_mem_size(arg_next)); } static bool check_arg_pair_ok(const struct bpf_func_proto *fn) { /* bpf_xxx(..., buf, len) call will access 'len' * bytes from memory 'buf'. Both arg types need * to be paired, so make sure there's no buggy * helper function specification. */ if (arg_type_is_mem_size(fn->arg1_type) || arg_type_is_mem_ptr(fn->arg5_type) || check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) return false; return true; } static int check_func_proto(const struct bpf_func_proto *fn) { return check_raw_mode_ok(fn) && check_arg_pair_ok(fn) ? 0 : -EINVAL; } /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] * are now invalid, so turn them into unknown SCALAR_VALUE. */ static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, struct bpf_func_state *state) { struct bpf_reg_state *regs = state->regs, *reg; int i; for (i = 0; i < MAX_BPF_REG; i++) if (reg_is_pkt_pointer_any(&regs[i])) mark_reg_unknown(env, regs, i); for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg_is_pkt_pointer_any(reg)) __mark_reg_unknown(reg); } } static void clear_all_pkt_pointers(struct bpf_verifier_env *env) { struct bpf_verifier_state *vstate = env->cur_state; int i; for (i = 0; i <= vstate->curframe; i++) __clear_all_pkt_pointers(env, vstate->frame[i]); } static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; int i, subprog, target_insn; if (state->curframe + 1 >= MAX_CALL_FRAMES) { verbose(env, "the call stack of %d frames is too deep\n", state->curframe + 2); return -E2BIG; } target_insn = *insn_idx + insn->imm; subprog = find_subprog(env, target_insn + 1); if (subprog < 0) { verbose(env, "verifier bug. No program starts at insn %d\n", target_insn + 1); return -EFAULT; } caller = state->frame[state->curframe]; if (state->frame[state->curframe + 1]) { verbose(env, "verifier bug. Frame %d already allocated\n", state->curframe + 1); return -EFAULT; } callee = kzalloc(sizeof(*callee), GFP_KERNEL); if (!callee) return -ENOMEM; state->frame[state->curframe + 1] = callee; /* callee cannot access r0, r6 - r9 for reading and has to write * into its own stack before reading from it. * callee can read/write into caller's stack */ init_func_state(env, callee, /* remember the callsite, it will be used by bpf_exit */ *insn_idx /* callsite */, state->curframe + 1 /* frameno within this callchain */, subprog /* subprog number within this prog */); /* copy r1 - r5 args that callee can access */ for (i = BPF_REG_1; i <= BPF_REG_5; i++) callee->regs[i] = caller->regs[i]; /* after the call regsiters r0 - r5 were scratched */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, caller->regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* only increment it after check_reg_arg() finished */ state->curframe++; /* and go analyze first insn of the callee */ *insn_idx = target_insn; if (env->log.level) { verbose(env, "caller:\n"); print_verifier_state(env, caller); verbose(env, "callee:\n"); print_verifier_state(env, callee); } return 0; } static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) { struct bpf_verifier_state *state = env->cur_state; struct bpf_func_state *caller, *callee; struct bpf_reg_state *r0; callee = state->frame[state->curframe]; r0 = &callee->regs[BPF_REG_0]; if (r0->type == PTR_TO_STACK) { /* technically it's ok to return caller's stack pointer * (or caller's caller's pointer) back to the caller, * since these pointers are valid. Only current stack * pointer will be invalid as soon as function exits, * but let's be conservative */ verbose(env, "cannot return stack pointer to the caller\n"); return -EINVAL; } state->curframe--; caller = state->frame[state->curframe]; /* return to the caller whatever r0 had in the callee */ caller->regs[BPF_REG_0] = *r0; *insn_idx = callee->callsite + 1; if (env->log.level) { verbose(env, "returning from callee:\n"); print_verifier_state(env, callee); verbose(env, "to caller at %d:\n", *insn_idx); print_verifier_state(env, caller); } /* clear everything in the callee */ free_func_state(callee); state->frame[state->curframe + 1] = NULL; return 0; } static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, int func_id, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *ret_reg = &regs[BPF_REG_0]; if (ret_type != RET_INTEGER || (func_id != BPF_FUNC_get_stack && func_id != BPF_FUNC_probe_read_str)) return; ret_reg->smax_value = meta->msize_smax_value; ret_reg->umax_value = meta->msize_umax_value; __reg_deduce_bounds(ret_reg); __reg_bound_offset(ret_reg); } static int record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, int func_id, int insn_idx) { struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; if (func_id != BPF_FUNC_tail_call && func_id != BPF_FUNC_map_lookup_elem && func_id != BPF_FUNC_map_update_elem && func_id != BPF_FUNC_map_delete_elem) return 0; if (meta->map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } if (!BPF_MAP_PTR(aux->map_state)) bpf_map_ptr_store(aux, meta->map_ptr, meta->map_ptr->unpriv_array); else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, meta->map_ptr->unpriv_array); return 0; } static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) { const struct bpf_func_proto *fn = NULL; struct bpf_reg_state *regs; struct bpf_call_arg_meta meta; bool changes_data; int i, err; /* find function prototype */ if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { verbose(env, "invalid func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } if (env->ops->get_func_proto) fn = env->ops->get_func_proto(func_id, env->prog); if (!fn) { verbose(env, "unknown func %s#%d\n", func_id_name(func_id), func_id); return -EINVAL; } /* eBPF programs must be GPL compatible to use GPL-ed functions */ if (!env->prog->gpl_compatible && fn->gpl_only) { verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); return -EINVAL; } /* With LD_ABS/IND some JITs save/restore skb from r1. */ changes_data = bpf_helper_changes_pkt_data(fn->func); if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", func_id_name(func_id), func_id); return -EINVAL; } memset(&meta, 0, sizeof(meta)); meta.pkt_access = fn->pkt_access; err = check_func_proto(fn); if (err) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(func_id), func_id); return err; } /* check args */ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); if (err) return err; err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); if (err) return err; err = record_func_map(env, &meta, func_id, insn_idx); if (err) return err; /* Mark slots with STACK_MISC in case of raw mode, stack offset * is inferred from register state. */ for (i = 0; i < meta.access_size; i++) { err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1, false); if (err) return err; } regs = cur_regs(env); /* check that flags argument in get_local_storage(map, flags) is 0, * this is required because get_local_storage() can't return an error. */ if (func_id == BPF_FUNC_get_local_storage && !register_is_null(&regs[BPF_REG_2])) { verbose(env, "get_local_storage() doesn't support non-zero flags\n"); return -EINVAL; } /* reset caller saved regs */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* update return register (already marked as written above) */ if (fn->ret_type == RET_INTEGER) { /* sets type to SCALAR_VALUE */ mark_reg_unknown(env, regs, BPF_REG_0); } else if (fn->ret_type == RET_VOID) { regs[BPF_REG_0].type = NOT_INIT; } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || fn->ret_type == RET_PTR_TO_MAP_VALUE) { if (fn->ret_type == RET_PTR_TO_MAP_VALUE) regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; else regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; /* There is no offset yet applied, variable or fixed */ mark_reg_known_zero(env, regs, BPF_REG_0); regs[BPF_REG_0].off = 0; /* remember map_ptr, so that check_map_access() * can check 'value_size' boundary of memory access * to map element returned from bpf_map_lookup_elem() */ if (meta.map_ptr == NULL) { verbose(env, "kernel subsystem misconfigured verifier\n"); return -EINVAL; } regs[BPF_REG_0].map_ptr = meta.map_ptr; regs[BPF_REG_0].id = ++env->id_gen; } else { verbose(env, "unknown return type %d of func %s#%d\n", fn->ret_type, func_id_name(func_id), func_id); return -EINVAL; } do_refine_retval_range(regs, fn->ret_type, func_id, &meta); err = check_map_func_compatibility(env, meta.map_ptr, func_id); if (err) return err; if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { const char *err_str; #ifdef CONFIG_PERF_EVENTS err = get_callchain_buffers(sysctl_perf_event_max_stack); err_str = "cannot get callchain buffer for func %s#%d\n"; #else err = -ENOTSUPP; err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; #endif if (err) { verbose(env, err_str, func_id_name(func_id), func_id); return err; } env->prog->has_callchain_buf = true; } if (changes_data) clear_all_pkt_pointers(env); return 0; } static bool signed_add_overflows(s64 a, s64 b) { /* Do the add in u64, where overflow is well-defined */ s64 res = (s64)((u64)a + (u64)b); if (b < 0) return res > a; return res < a; } static bool signed_sub_overflows(s64 a, s64 b) { /* Do the sub in u64, where overflow is well-defined */ s64 res = (s64)((u64)a - (u64)b); if (b < 0) return res < a; return res > a; } static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) { bool known = tnum_is_const(reg->var_off); s64 val = reg->var_off.value; s64 smin = reg->smin_value; if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { verbose(env, "math between %s pointer and %lld is not allowed\n", reg_type_str[type], val); return false; } if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { verbose(env, "%s pointer offset %d is not allowed\n", reg_type_str[type], reg->off); return false; } if (smin == S64_MIN) { verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", reg_type_str[type]); return false; } if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { verbose(env, "value %lld makes %s pointer be out of bounds\n", smin, reg_type_str[type]); return false; } return true; } /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. * Caller should also handle BPF_MOV case separately. * If we return -EACCES, caller may want to try again treating pointer as a * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. */ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *off_reg) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg; bool known = tnum_is_const(off_reg->var_off); s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; u8 opcode = BPF_OP(insn->code); u32 dst = insn->dst_reg; dst_reg = &regs[dst]; if ((known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops on pointers produce (meaningless) scalars */ verbose(env, "R%d 32-bit pointer arithmetic prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", dst); return -EACCES; } if (ptr_reg->type == CONST_PTR_TO_MAP) { verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", dst); return -EACCES; } if (ptr_reg->type == PTR_TO_PACKET_END) { verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", dst); return -EACCES; } /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. * The id may be overwritten later if we create a new variable offset. */ dst_reg->type = ptr_reg->type; dst_reg->id = ptr_reg->id; if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) return -EINVAL; switch (opcode) { case BPF_ADD: /* We can take a fixed offset as long as it doesn't overflow * the s32 'off' field */ if (known && (ptr_reg->off + smin_val == (s64)(s32)(ptr_reg->off + smin_val))) { /* pointer += K. Accumulate it into fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->off = ptr_reg->off + smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. Note that off_reg->off * == 0, since it's a scalar. * dst_reg gets the pointer type and since some positive * integer value was added to the pointer, give it a new 'id' * if it's a PTR_TO_PACKET. * this creates a new 'base' pointer, off_reg (variable) gets * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ if (signed_add_overflows(smin_ptr, smin_val) || signed_add_overflows(smax_ptr, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr + smin_val; dst_reg->smax_value = smax_ptr + smax_val; } if (umin_ptr + umin_val < umin_ptr || umax_ptr + umax_val < umax_ptr) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value = umin_ptr + umin_val; dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ dst_reg->range = 0; } break; case BPF_SUB: if (dst_reg == off_reg) { /* scalar -= pointer. Creates an unknown scalar */ verbose(env, "R%d tried to subtract pointer from scalar\n", dst); return -EACCES; } /* We don't allow subtraction from FP, because (according to * test_verifier.c test "invalid fp arithmetic", JITs might not * be able to deal with it. */ if (ptr_reg->type == PTR_TO_STACK) { verbose(env, "R%d subtraction from stack pointer prohibited\n", dst); return -EACCES; } if (known && (ptr_reg->off - smin_val == (s64)(s32)(ptr_reg->off - smin_val))) { /* pointer -= K. Subtract it from fixed offset */ dst_reg->smin_value = smin_ptr; dst_reg->smax_value = smax_ptr; dst_reg->umin_value = umin_ptr; dst_reg->umax_value = umax_ptr; dst_reg->var_off = ptr_reg->var_off; dst_reg->id = ptr_reg->id; dst_reg->off = ptr_reg->off - smin_val; dst_reg->range = ptr_reg->range; break; } /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ if (signed_sub_overflows(smin_ptr, smax_val) || signed_sub_overflows(smax_ptr, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = smin_ptr - smax_val; dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value = umin_ptr - umax_val; dst_reg->umax_value = umax_ptr - umin_val; } dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; if (reg_is_pkt_pointer(ptr_reg)) { dst_reg->id = ++env->id_gen; /* something was added to pkt_ptr, set range to zero */ if (smin_val < 0) dst_reg->range = 0; } break; case BPF_AND: case BPF_OR: case BPF_XOR: /* bitwise ops on pointers are troublesome, prohibit. */ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; default: /* other operators (e.g. MUL,LSH) produce non-pointer results */ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", dst, bpf_alu_string[opcode >> 4]); return -EACCES; } if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) return -EINVAL; __update_reg_bounds(dst_reg); __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* WARNING: This function does calculations on 64-bit values, but the actual * execution may occur on 32-bit values. Therefore, things like bitshifts * need extra checks in the 32-bit case. */ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; if (insn_bitness == 32) { /* Relevant for 32-bit RSH: Information can propagate towards * LSB, so it isn't sufficient to only truncate the output to * 32 bits. */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; } /* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max * and var_off. */ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_verifier_state *vstate = env->cur_state; struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; u8 opcode = BPF_OP(insn->code); dst_reg = &regs[insn->dst_reg]; src_reg = NULL; if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; if (BPF_SRC(insn->code) == BPF_X) { src_reg = &regs[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) { /* Combining two pointers by any ALU op yields * an arbitrary scalar. Disallow all math except * pointer subtraction */ if (opcode == BPF_SUB && env->allow_ptr_leaks) { mark_reg_unknown(env, regs, insn->dst_reg); return 0; } verbose(env, "R%d pointer %s pointer prohibited\n", insn->dst_reg, bpf_alu_string[opcode >> 4]); return -EACCES; } else { /* scalar += pointer * This is legal, but we have to reverse our * src/dest handling in computing the range */ return adjust_ptr_min_max_vals(env, insn, src_reg, dst_reg); } } else if (ptr_reg) { /* pointer += scalar */ return adjust_ptr_min_max_vals(env, insn, dst_reg, src_reg); } } else { /* Pretend the src is a reg with a known value, since we only * need to be able to read from this state. */ off_reg.type = SCALAR_VALUE; __mark_reg_known(&off_reg, insn->imm); src_reg = &off_reg; if (ptr_reg) /* pointer += K */ return adjust_ptr_min_max_vals(env, insn, ptr_reg, src_reg); } /* Got here implies adding two SCALAR_VALUEs */ if (WARN_ON_ONCE(ptr_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: unexpected ptr_reg\n"); return -EINVAL; } if (WARN_ON(!src_reg)) { print_verifier_state(env, state); verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); } /* check validity of 32-bit and 64-bit arithmetic operations */ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); int err; if (opcode == BPF_END || opcode == BPF_NEG) { if (opcode == BPF_NEG) { if (BPF_SRC(insn->code) != 0 || insn->src_reg != BPF_REG_0 || insn->off != 0 || insn->imm != 0) { verbose(env, "BPF_NEG uses reserved fields\n"); return -EINVAL; } } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0 || (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || BPF_CLASS(insn->code) == BPF_ALU64) { verbose(env, "BPF_END uses reserved fields\n"); return -EINVAL; } } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer arithmetic prohibited\n", insn->dst_reg); return -EACCES; } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; } else if (opcode == BPF_MOV) { if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_MOV uses reserved fields\n"); return -EINVAL; } } /* check dest operand, mark as required later */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; if (BPF_SRC(insn->code) == BPF_X) { if (BPF_CLASS(insn->code) == BPF_ALU64) { /* case: R1 = R2 * copy register state to dest reg */ regs[insn->dst_reg] = regs[insn->src_reg]; regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; } else { /* R1 = (u32) R2 */ if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d partial copy of pointer\n", insn->src_reg); return -EACCES; } mark_reg_unknown(env, regs, insn->dst_reg); coerce_reg_to_size(&regs[insn->dst_reg], 4); } } else { /* case: R = imm * remember the value we stored into this reg */ /* clear any state __mark_reg_known doesn't set */ mark_reg_unknown(env, regs, insn->dst_reg); regs[insn->dst_reg].type = SCALAR_VALUE; if (BPF_CLASS(insn->code) == BPF_ALU64) { __mark_reg_known(regs + insn->dst_reg, insn->imm); } else { __mark_reg_known(regs + insn->dst_reg, (u32)insn->imm); } } } else if (opcode > BPF_END) { verbose(env, "invalid BPF_ALU opcode %x\n", opcode); return -EINVAL; } else { /* all other ALU ops: and, sub, xor, add, ... */ if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } else { if (insn->src_reg != BPF_REG_0 || insn->off != 0) { verbose(env, "BPF_ALU uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if ((opcode == BPF_MOD || opcode == BPF_DIV) && BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { verbose(env, "div by zero\n"); return -EINVAL; } if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) { verbose(env, "BPF_ARSH not supported for 32 bit ALU\n"); return -EINVAL; } if ((opcode == BPF_LSH || opcode == BPF_RSH || opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) { int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; if (insn->imm < 0 || insn->imm >= size) { verbose(env, "invalid shift %d\n", insn->imm); return -EINVAL; } } /* check dest operand */ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; return adjust_reg_min_max_vals(env, insn); } return 0; } static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, struct bpf_reg_state *dst_reg, enum bpf_reg_type type, bool range_right_open) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *reg; u16 new_range; int i, j; if (dst_reg->off < 0 || (dst_reg->off == 0 && range_right_open)) /* This doesn't give us any range */ return; if (dst_reg->umax_value > MAX_PACKET_OFF || dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) /* Risk of overflow. For instance, ptr + (1<<63) may be less * than pkt_end, but that's because it's also less than pkt. */ return; new_range = dst_reg->off; if (range_right_open) new_range--; /* Examples for register markings: * * pkt_data in dst register: * * r2 = r3; * r2 += 8; * if (r2 > pkt_end) goto <handle exception> * <access okay> * * r2 = r3; * r2 += 8; * if (r2 < pkt_end) goto <access okay> * <handle exception> * * Where: * r2 == dst_reg, pkt_end == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * pkt_data in src register: * * r2 = r3; * r2 += 8; * if (pkt_end >= r2) goto <access okay> * <handle exception> * * r2 = r3; * r2 += 8; * if (pkt_end <= r2) goto <handle exception> * <access okay> * * Where: * pkt_end == dst_reg, r2 == src_reg * r2=pkt(id=n,off=8,r=0) * r3=pkt(id=n,off=0,r=0) * * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) * and [r3, r3 + 8-1) respectively is safe to access depending on * the check. */ /* If our ids match, then we must have the same max_value. And we * don't care about the other reg's fixed offset, since if it's too big * the range won't allow anything. * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. */ for (i = 0; i < MAX_BPF_REG; i++) if (regs[i].type == type && regs[i].id == dst_reg->id) /* keep the maximum range already checked */ regs[i].range = max(regs[i].range, new_range); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; reg = &state->stack[i].spilled_ptr; if (reg->type == type && reg->id == dst_reg->id) reg->range = max(reg->range, new_range); } } } /* Adjusts the register min/max values in the case that the dst_reg is the * variable register that we are working on, and src_reg is a constant or we're * simply doing a BPF_K check. * In JEQ/JNE cases we also adjust the var_off values. */ static void reg_set_min_max(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { /* If the dst_reg is a pointer, we can't learn anything about its * variable offset from the compare (unless src_reg were a pointer into * the same object, but we don't bother with that. * Since false_reg and true_reg have the same type by construction, we * only need to check one of them for pointerness. */ if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: false_reg->umax_value = min(false_reg->umax_value, val); true_reg->umin_value = max(true_reg->umin_value, val + 1); break; case BPF_JSGT: false_reg->smax_value = min_t(s64, false_reg->smax_value, val); true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); break; case BPF_JLT: false_reg->umin_value = max(false_reg->umin_value, val); true_reg->umax_value = min(true_reg->umax_value, val - 1); break; case BPF_JSLT: false_reg->smin_value = max_t(s64, false_reg->smin_value, val); true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); break; case BPF_JGE: false_reg->umax_value = min(false_reg->umax_value, val - 1); true_reg->umin_value = max(true_reg->umin_value, val); break; case BPF_JSGE: false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); true_reg->smin_value = max_t(s64, true_reg->smin_value, val); break; case BPF_JLE: false_reg->umin_value = max(false_reg->umin_value, val + 1); true_reg->umax_value = min(true_reg->umax_value, val); break; case BPF_JSLE: false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); true_reg->smax_value = min_t(s64, true_reg->smax_value, val); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Same as above, but for the case that dst_reg holds a constant and src_reg is * the variable reg. */ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, struct bpf_reg_state *false_reg, u64 val, u8 opcode) { if (__is_pointer_value(false, false_reg)) return; switch (opcode) { case BPF_JEQ: /* If this is false then we know nothing Jon Snow, but if it is * true then we know for sure. */ __mark_reg_known(true_reg, val); break; case BPF_JNE: /* If this is true we know nothing Jon Snow, but if it is false * we know the value for sure; */ __mark_reg_known(false_reg, val); break; case BPF_JGT: true_reg->umax_value = min(true_reg->umax_value, val - 1); false_reg->umin_value = max(false_reg->umin_value, val); break; case BPF_JSGT: true_reg->smax_value = min_t(s64, true_reg->smax_value, val - 1); false_reg->smin_value = max_t(s64, false_reg->smin_value, val); break; case BPF_JLT: true_reg->umin_value = max(true_reg->umin_value, val + 1); false_reg->umax_value = min(false_reg->umax_value, val); break; case BPF_JSLT: true_reg->smin_value = max_t(s64, true_reg->smin_value, val + 1); false_reg->smax_value = min_t(s64, false_reg->smax_value, val); break; case BPF_JGE: true_reg->umax_value = min(true_reg->umax_value, val); false_reg->umin_value = max(false_reg->umin_value, val + 1); break; case BPF_JSGE: true_reg->smax_value = min_t(s64, true_reg->smax_value, val); false_reg->smin_value = max_t(s64, false_reg->smin_value, val + 1); break; case BPF_JLE: true_reg->umin_value = max(true_reg->umin_value, val); false_reg->umax_value = min(false_reg->umax_value, val - 1); break; case BPF_JSLE: true_reg->smin_value = max_t(s64, true_reg->smin_value, val); false_reg->smax_value = min_t(s64, false_reg->smax_value, val - 1); break; default: break; } __reg_deduce_bounds(false_reg); __reg_deduce_bounds(true_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(false_reg); __reg_bound_offset(true_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(false_reg); __update_reg_bounds(true_reg); } /* Regs are known to be equal, so intersect their min/max/var_off */ static void __reg_combine_min_max(struct bpf_reg_state *src_reg, struct bpf_reg_state *dst_reg) { src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, dst_reg->umin_value); src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, dst_reg->umax_value); src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, dst_reg->smin_value); src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, dst_reg->smax_value); src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, dst_reg->var_off); /* We might have learned new bounds from the var_off. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); /* We might have learned something about the sign bit. */ __reg_deduce_bounds(src_reg); __reg_deduce_bounds(dst_reg); /* We might have learned some bits from the bounds. */ __reg_bound_offset(src_reg); __reg_bound_offset(dst_reg); /* Intersecting with the old var_off might have improved our bounds * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), * then new var_off is (0; 0x7f...fc) which improves our umax. */ __update_reg_bounds(src_reg); __update_reg_bounds(dst_reg); } static void reg_combine_min_max(struct bpf_reg_state *true_src, struct bpf_reg_state *true_dst, struct bpf_reg_state *false_src, struct bpf_reg_state *false_dst, u8 opcode) { switch (opcode) { case BPF_JEQ: __reg_combine_min_max(true_src, true_dst); break; case BPF_JNE: __reg_combine_min_max(false_src, false_dst); break; } } static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, bool is_null) { struct bpf_reg_state *reg = &regs[regno]; if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { /* Old offset (both fixed and variable parts) should * have been known-zero, because we don't allow pointer * arithmetic on pointers that might be NULL. */ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || !tnum_equals_const(reg->var_off, 0) || reg->off)) { __mark_reg_known_zero(reg); reg->off = 0; } if (is_null) { reg->type = SCALAR_VALUE; } else if (reg->map_ptr->inner_map_meta) { reg->type = CONST_PTR_TO_MAP; reg->map_ptr = reg->map_ptr->inner_map_meta; } else { reg->type = PTR_TO_MAP_VALUE; } /* We don't need id from this point onwards anymore, thus we * should better reset it, so that state pruning has chances * to take effect. */ reg->id = 0; } } /* The logic is similar to find_good_pkt_pointers(), both could eventually * be folded together at some point. */ static void mark_map_regs(struct bpf_verifier_state *vstate, u32 regno, bool is_null) { struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs; u32 id = regs[regno].id; int i, j; for (i = 0; i < MAX_BPF_REG; i++) mark_map_reg(regs, i, id, is_null); for (j = 0; j <= vstate->curframe; j++) { state = vstate->frame[j]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { if (state->stack[i].slot_type[0] != STACK_SPILL) continue; mark_map_reg(&state->stack[i].spilled_ptr, 0, id, is_null); } } } static bool try_match_pkt_pointers(const struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg, struct bpf_verifier_state *this_branch, struct bpf_verifier_state *other_branch) { if (BPF_SRC(insn->code) != BPF_X) return false; switch (BPF_OP(insn->code)) { case BPF_JGT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end > pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, true); } else { return false; } break; case BPF_JLT: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end < pkt_data', pkt_data > pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JGE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ find_good_pkt_pointers(this_branch, dst_reg, dst_reg->type, true); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ find_good_pkt_pointers(other_branch, src_reg, src_reg->type, false); } else { return false; } break; case BPF_JLE: if ((dst_reg->type == PTR_TO_PACKET && src_reg->type == PTR_TO_PACKET_END) || (dst_reg->type == PTR_TO_PACKET_META && reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ find_good_pkt_pointers(other_branch, dst_reg, dst_reg->type, false); } else if ((dst_reg->type == PTR_TO_PACKET_END && src_reg->type == PTR_TO_PACKET) || (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && src_reg->type == PTR_TO_PACKET_META)) { /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ find_good_pkt_pointers(this_branch, src_reg, src_reg->type, true); } else { return false; } break; default: return false; } return true; } static int check_cond_jmp_op(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx) { struct bpf_verifier_state *this_branch = env->cur_state; struct bpf_verifier_state *other_branch; struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; struct bpf_reg_state *dst_reg, *other_branch_regs; u8 opcode = BPF_OP(insn->code); int err; if (opcode > BPF_JSLE) { verbose(env, "invalid BPF_JMP opcode %x\n", opcode); return -EINVAL; } if (BPF_SRC(insn->code) == BPF_X) { if (insn->imm != 0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; if (is_pointer_value(env, insn->src_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->src_reg); return -EACCES; } } else { if (insn->src_reg != BPF_REG_0) { verbose(env, "BPF_JMP uses reserved fields\n"); return -EINVAL; } } /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg = &regs[insn->dst_reg]; /* detect if R == 0 where R was initialized to zero earlier */ if (BPF_SRC(insn->code) == BPF_K && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == SCALAR_VALUE && tnum_is_const(dst_reg->var_off)) { if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { /* if (imm == imm) goto pc+off; * only follow the goto, ignore fall-through */ *insn_idx += insn->off; return 0; } else { /* if (imm != imm) goto pc+off; * only follow fall-through branch, since * that's where the program will go */ return 0; } } other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); if (!other_branch) return -EFAULT; other_branch_regs = other_branch->frame[other_branch->curframe]->regs; /* detect if we are comparing against a constant value so we can adjust * our min/max values for our dst register. * this is only legit if both are scalars (or pointers to the same * object, I suppose, but we don't support that right now), because * otherwise the different base pointers mean the offsets aren't * comparable. */ if (BPF_SRC(insn->code) == BPF_X) { if (dst_reg->type == SCALAR_VALUE && regs[insn->src_reg].type == SCALAR_VALUE) { if (tnum_is_const(regs[insn->src_reg].var_off)) reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, regs[insn->src_reg].var_off.value, opcode); else if (tnum_is_const(dst_reg->var_off)) reg_set_min_max_inv(&other_branch_regs[insn->src_reg], &regs[insn->src_reg], dst_reg->var_off.value, opcode); else if (opcode == BPF_JEQ || opcode == BPF_JNE) /* Comparing for equality, we can combine knowledge */ reg_combine_min_max(&other_branch_regs[insn->src_reg], &other_branch_regs[insn->dst_reg], &regs[insn->src_reg], &regs[insn->dst_reg], opcode); } } else if (dst_reg->type == SCALAR_VALUE) { reg_set_min_max(&other_branch_regs[insn->dst_reg], dst_reg, insn->imm, opcode); } /* detect if R == 0 where R is returned from bpf_map_lookup_elem() */ if (BPF_SRC(insn->code) == BPF_K && insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { /* Mark all identical map registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ mark_map_regs(this_branch, insn->dst_reg, opcode == BPF_JNE); mark_map_regs(other_branch, insn->dst_reg, opcode == BPF_JEQ); } else if (!try_match_pkt_pointers(insn, dst_reg, &regs[insn->src_reg], this_branch, other_branch) && is_pointer_value(env, insn->dst_reg)) { verbose(env, "R%d pointer comparison prohibited\n", insn->dst_reg); return -EACCES; } if (env->log.level) print_verifier_state(env, this_branch->frame[this_branch->curframe]); return 0; } /* return the map pointer stored inside BPF_LD_IMM64 instruction */ static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) { u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; return (struct bpf_map *) (unsigned long) imm64; } /* verify BPF_LD_IMM64 instruction */ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); int err; if (BPF_SIZE(insn->code) != BPF_DW) { verbose(env, "invalid BPF_LD_IMM insn\n"); return -EINVAL; } if (insn->off != 0) { verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); return -EINVAL; } err = check_reg_arg(env, insn->dst_reg, DST_OP); if (err) return err; if (insn->src_reg == 0) { u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; regs[insn->dst_reg].type = SCALAR_VALUE; __mark_reg_known(&regs[insn->dst_reg], imm); return 0; } /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); regs[insn->dst_reg].type = CONST_PTR_TO_MAP; regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); return 0; } static bool may_access_skb(enum bpf_prog_type type) { switch (type) { case BPF_PROG_TYPE_SOCKET_FILTER: case BPF_PROG_TYPE_SCHED_CLS: case BPF_PROG_TYPE_SCHED_ACT: return true; default: return false; } } /* verify safety of LD_ABS|LD_IND instructions: * - they can only appear in the programs where ctx == skb * - since they are wrappers of function calls, they scratch R1-R5 registers, * preserve R6-R9, and store return value into R0 * * Implicit input: * ctx == skb == R6 == CTX * * Explicit input: * SRC == any register * IMM == 32-bit immediate * * Output: * R0 - 8/16/32-bit skb data converted to cpu endianness */ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) { struct bpf_reg_state *regs = cur_regs(env); u8 mode = BPF_MODE(insn->code); int i, err; if (!may_access_skb(env->prog->type)) { verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); return -EINVAL; } if (!env->ops->gen_ld_abs) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (env->subprog_cnt > 1) { /* when program has LD_ABS insn JITs and interpreter assume * that r1 == ctx == skb which is not the case for callees * that can have arbitrary arguments. It's problematic * for main prog as well since JITs would need to analyze * all functions in order to make proper register save/restore * decisions in the main prog. Hence disallow LD_ABS with calls */ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); return -EINVAL; } if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || BPF_SIZE(insn->code) == BPF_DW || (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); return -EINVAL; } /* check whether implicit source operand (register R6) is readable */ err = check_reg_arg(env, BPF_REG_6, SRC_OP); if (err) return err; if (regs[BPF_REG_6].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); return -EINVAL; } if (mode == BPF_IND) { /* check explicit source operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; } /* reset caller saved regs to unreadable */ for (i = 0; i < CALLER_SAVED_REGS; i++) { mark_reg_not_init(env, regs, caller_saved[i]); check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); } /* mark destination R0 register as readable, since it contains * the value fetched from the packet. * Already marked as written above. */ mark_reg_unknown(env, regs, BPF_REG_0); return 0; } static int check_return_code(struct bpf_verifier_env *env) { struct bpf_reg_state *reg; struct tnum range = tnum_range(0, 1); switch (env->prog->type) { case BPF_PROG_TYPE_CGROUP_SKB: case BPF_PROG_TYPE_CGROUP_SOCK: case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: case BPF_PROG_TYPE_SOCK_OPS: case BPF_PROG_TYPE_CGROUP_DEVICE: break; default: return 0; } reg = cur_regs(env) + BPF_REG_0; if (reg->type != SCALAR_VALUE) { verbose(env, "At program exit the register R0 is not a known value (%s)\n", reg_type_str[reg->type]); return -EINVAL; } if (!tnum_in(range, reg->var_off)) { verbose(env, "At program exit the register R0 "); if (!tnum_is_unknown(reg->var_off)) { char tn_buf[48]; tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); verbose(env, "has value %s", tn_buf); } else { verbose(env, "has unknown scalar value"); } verbose(env, " should have been 0 or 1\n"); return -EINVAL; } return 0; } /* non-recursive DFS pseudo code * 1 procedure DFS-iterative(G,v): * 2 label v as discovered * 3 let S be a stack * 4 S.push(v) * 5 while S is not empty * 6 t <- S.pop() * 7 if t is what we're looking for: * 8 return t * 9 for all edges e in G.adjacentEdges(t) do * 10 if edge e is already labelled * 11 continue with the next edge * 12 w <- G.adjacentVertex(t,e) * 13 if vertex w is not discovered and not explored * 14 label e as tree-edge * 15 label w as discovered * 16 S.push(w) * 17 continue at 5 * 18 else if vertex w is discovered * 19 label e as back-edge * 20 else * 21 // vertex w is explored * 22 label e as forward- or cross-edge * 23 label t as explored * 24 S.pop() * * convention: * 0x10 - discovered * 0x11 - discovered and fall-through edge labelled * 0x12 - discovered and fall-through and branch edges labelled * 0x20 - explored */ enum { DISCOVERED = 0x10, EXPLORED = 0x20, FALLTHROUGH = 1, BRANCH = 2, }; #define STATE_LIST_MARK ((struct bpf_verifier_state_list *) -1L) static int *insn_stack; /* stack of insns to process */ static int cur_stack; /* current stack index */ static int *insn_state; /* t, w, e - match pseudo-code above: * t - index of current instruction * w - next instruction * e - edge */ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env) { if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) return 0; if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH)) return 0; if (w < 0 || w >= env->prog->len) { verbose(env, "jump out of range from insn %d to %d\n", t, w); return -EINVAL; } if (e == BRANCH) /* mark branch target for state pruning */ env->explored_states[w] = STATE_LIST_MARK; if (insn_state[w] == 0) { /* tree-edge */ insn_state[t] = DISCOVERED | e; insn_state[w] = DISCOVERED; if (cur_stack >= env->prog->len) return -E2BIG; insn_stack[cur_stack++] = w; return 1; } else if ((insn_state[w] & 0xF0) == DISCOVERED) { verbose(env, "back-edge from insn %d to %d\n", t, w); return -EINVAL; } else if (insn_state[w] == EXPLORED) { /* forward- or cross-edge */ insn_state[t] = DISCOVERED | e; } else { verbose(env, "insn state internal bug\n"); return -EFAULT; } return 0; } /* non-recursive depth-first-search to detect loops in BPF program * loop == back-edge in directed graph */ static int check_cfg(struct bpf_verifier_env *env) { struct bpf_insn *insns = env->prog->insnsi; int insn_cnt = env->prog->len; int ret = 0; int i, t; ret = check_subprogs(env); if (ret < 0) return ret; insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_state) return -ENOMEM; insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); if (!insn_stack) { kfree(insn_state); return -ENOMEM; } insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ insn_stack[0] = 0; /* 0 is the first instruction */ cur_stack = 1; peek_stack: if (cur_stack == 0) goto check_state; t = insn_stack[cur_stack - 1]; if (BPF_CLASS(insns[t].code) == BPF_JMP) { u8 opcode = BPF_OP(insns[t].code); if (opcode == BPF_EXIT) { goto mark_explored; } else if (opcode == BPF_CALL) { ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; if (insns[t].src_reg == BPF_PSEUDO_CALL) { env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else if (opcode == BPF_JA) { if (BPF_SRC(insns[t].code) != BPF_K) { ret = -EINVAL; goto err_free; } /* unconditional jump with single edge */ ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; /* tell verifier to check for equivalent states * after every call and jump */ if (t + 1 < insn_cnt) env->explored_states[t + 1] = STATE_LIST_MARK; } else { /* conditional jump with two edges */ env->explored_states[t] = STATE_LIST_MARK; ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } } else { /* all other non-branch instructions with single * fall-through edge */ ret = push_insn(t, t + 1, FALLTHROUGH, env); if (ret == 1) goto peek_stack; else if (ret < 0) goto err_free; } mark_explored: insn_state[t] = EXPLORED; if (cur_stack-- <= 0) { verbose(env, "pop stack internal bug\n"); ret = -EFAULT; goto err_free; } goto peek_stack; check_state: for (i = 0; i < insn_cnt; i++) { if (insn_state[i] != EXPLORED) { verbose(env, "unreachable insn %d\n", i); ret = -EINVAL; goto err_free; } } ret = 0; /* cfg looks good */ err_free: kfree(insn_state); kfree(insn_stack); return ret; } /* check %cur's range satisfies %old's */ static bool range_within(struct bpf_reg_state *old, struct bpf_reg_state *cur) { return old->umin_value <= cur->umin_value && old->umax_value >= cur->umax_value && old->smin_value <= cur->smin_value && old->smax_value >= cur->smax_value; } /* Maximum number of register states that can exist at once */ #define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) struct idpair { u32 old; u32 cur; }; /* If in the old state two registers had the same id, then they need to have * the same id in the new state as well. But that id could be different from * the old state, so we need to track the mapping from old to new ids. * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent * regs with old id 5 must also have new id 9 for the new state to be safe. But * regs with a different old id could still have new id 9, we don't care about * that. * So we look through our idmap to see if this old id has been seen before. If * so, we require the new id to match; otherwise, we add the id pair to the map. */ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) { unsigned int i; for (i = 0; i < ID_MAP_SIZE; i++) { if (!idmap[i].old) { /* Reached an empty slot; haven't seen this id before */ idmap[i].old = old_id; idmap[i].cur = cur_id; return true; } if (idmap[i].old == old_id) return idmap[i].cur == cur_id; } /* We ran out of idmap slots, which should be impossible */ WARN_ON_ONCE(1); return false; } /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, struct idpair *idmap) { bool equal; if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, frameno)) == 0; if (rold->type == PTR_TO_STACK) /* two stack pointers are equal only if they're pointing to * the same stack frame, since fp-8 in foo != fp-8 in bar */ return equal && rold->frameno == rcur->frameno; if (equal) return true; if (rold->type == NOT_INIT) /* explored state can't have used this */ return true; if (rcur->type == NOT_INIT) return false; switch (rold->type) { case SCALAR_VALUE: if (rcur->type == SCALAR_VALUE) { /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); } else { /* We're trying to use a pointer in place of a scalar. * Even if the scalar was unbounded, this could lead to * pointer leaks because scalars are allowed to leak * while pointers are not. We could make this safe in * special cases if root is calling us, but it's * probably not worth the hassle. */ return false; } case PTR_TO_MAP_VALUE: /* If the new min/max/var_off satisfy the old ones and * everything else matches, we are OK. * We don't care about the 'id' value, because nothing * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) */ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_MAP_VALUE_OR_NULL: /* a PTR_TO_MAP_VALUE could be safe to use as a * PTR_TO_MAP_VALUE_OR_NULL into the same map. * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- * checked, doing so could have affected others with the same * id, and we can't check for that because we lost the id when * we converted to a PTR_TO_MAP_VALUE. */ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) return false; if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) return false; /* Check our ids match any regs they're supposed to */ return check_ids(rold->id, rcur->id, idmap); case PTR_TO_PACKET_META: case PTR_TO_PACKET: if (rcur->type != rold->type) return false; /* We must have at least as much range as the old ptr * did, so that any accesses which were safe before are * still safe. This is true even if old range < old off, * since someone could have accessed through (ptr - k), or * even done ptr -= k in a register, to get a safe access. */ if (rold->range > rcur->range) return false; /* If the offsets don't match, we can't trust our alignment; * nor can we be sure that we won't fall out of range. */ if (rold->off != rcur->off) return false; /* id relations must be preserved */ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) return false; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && tnum_in(rold->var_off, rcur->var_off); case PTR_TO_CTX: case CONST_PTR_TO_MAP: case PTR_TO_PACKET_END: /* Only valid matches are exact, which memcmp() above * would have accepted */ default: /* Don't know what's going on, just say it's not safe */ return false; } /* Shouldn't get here; if we do, say it's not safe */ WARN_ON_ONCE(1); return false; } static bool stacksafe(struct bpf_func_state *old, struct bpf_func_state *cur, struct idpair *idmap) { int i, spi; /* if explored stack has more populated slots than current stack * such stacks are not equivalent */ if (old->allocated_stack > cur->allocated_stack) return false; /* walk slots of the explored stack and ignore any additional * slots in the current stack, since explored(safe) state * didn't use them */ for (i = 0; i < old->allocated_stack; i++) { spi = i / BPF_REG_SIZE; if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) /* explored state didn't use this */ continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) continue; /* if old state was safe with misc data in the stack * it will be safe with zero-initialized stack. * The opposite is not true */ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) continue; if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != cur->stack[spi].slot_type[i % BPF_REG_SIZE]) /* Ex: old explored (safe) state has STACK_SPILL in * this stack slot, but current has has STACK_MISC -> * this verifier states are not equivalent, * return false to continue verification of this path */ return false; if (i % BPF_REG_SIZE) continue; if (old->stack[spi].slot_type[0] != STACK_SPILL) continue; if (!regsafe(&old->stack[spi].spilled_ptr, &cur->stack[spi].spilled_ptr, idmap)) /* when explored and current stack slot are both storing * spilled registers, check that stored pointers types * are the same as well. * Ex: explored safe path could have stored * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} * but current path has stored: * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} * such verifier states are not equivalent. * return false to continue verification of this path */ return false; } return true; } /* compare two verifier states * * all states stored in state_list are known to be valid, since * verifier reached 'bpf_exit' instruction through them * * this function is called when verifier exploring different branches of * execution popped from the state stack. If it sees an old state that has * more strict register state and more strict stack state then this execution * branch doesn't need to be explored further, since verifier already * concluded that more strict state leads to valid finish. * * Therefore two states are equivalent if register state is more conservative * and explored stack state is more conservative than the current one. * Example: * explored current * (slot1=INV slot2=MISC) == (slot1=MISC slot2=MISC) * (slot1=MISC slot2=MISC) != (slot1=INV slot2=MISC) * * In other words if current stack state (one being explored) has more * valid slots than old one that already passed validation, it means * the verifier can stop exploring and conclude that current state is valid too * * Similarly with registers. If explored state has register type as invalid * whereas register type in current state is meaningful, it means that * the current state will reach 'bpf_exit' instruction safely */ static bool func_states_equal(struct bpf_func_state *old, struct bpf_func_state *cur) { struct idpair *idmap; bool ret = false; int i; idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); /* If we failed to allocate the idmap, just say it's not safe */ if (!idmap) return false; for (i = 0; i < MAX_BPF_REG; i++) { if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) goto out_free; } if (!stacksafe(old, cur, idmap)) goto out_free; ret = true; out_free: kfree(idmap); return ret; } static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, struct bpf_verifier_state *cur) { int i; if (old->curframe != cur->curframe) return false; /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; if (!func_states_equal(old->frame[i], cur->frame[i])) return false; } return true; } /* A write screens off any subsequent reads; but write marks come from the * straight-line code between a state and its parent. When we arrive at an * equivalent state (jump target or such) we didn't arrive by the straight-line * code, so read marks in the state must propagate to the parent regardless * of the state's write marks. That's what 'parent == state->parent' comparison * in mark_reg_read() and mark_stack_slot_read() is for. */ static int propagate_liveness(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate, struct bpf_verifier_state *vparent) { int i, frame, err = 0; struct bpf_func_state *state, *parent; if (vparent->curframe != vstate->curframe) { WARN(1, "propagate_live: parent frame %d current frame %d\n", vparent->curframe, vstate->curframe); return -EFAULT; } /* Propagate read liveness of registers... */ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); /* We don't need to worry about FP liveness because it's read-only */ for (i = 0; i < BPF_REG_FP; i++) { if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ) continue; if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) { err = mark_reg_read(env, vstate, vparent, i); if (err) return err; } } /* ... and stack slots */ for (frame = 0; frame <= vstate->curframe; frame++) { state = vstate->frame[frame]; parent = vparent->frame[frame]; for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && i < parent->allocated_stack / BPF_REG_SIZE; i++) { if (parent->stack[i].spilled_ptr.live & REG_LIVE_READ) continue; if (state->stack[i].spilled_ptr.live & REG_LIVE_READ) mark_stack_slot_read(env, vstate, vparent, i, frame); } } return err; } static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl; struct bpf_verifier_state *cur = env->cur_state; int i, j, err; sl = env->explored_states[insn_idx]; if (!sl) /* this 'insn_idx' instruction wasn't marked, so we will not * be doing state search here */ return 0; while (sl != STATE_LIST_MARK) { if (states_equal(env, &sl->state, cur)) { /* reached equivalent register/stack state, * prune the search. * Registers read by the continuation are read by us. * If we have any write marks in env->cur_state, they * will prevent corresponding reads in the continuation * from reaching our parent (an explored_state). Our * own state will get the read marks recorded, but * they'll be immediately forgotten as we're pruning * this state and will pop a new one. */ err = propagate_liveness(env, &sl->state, cur); if (err) return err; return 1; } sl = sl->next; } /* there were no equivalent states, remember current one. * technically the current state is not proven to be safe yet, * but it will either reach outer most bpf_exit (which means it's safe) * or it will be rejected. Since there are no loops, we won't be * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) * again on the way to bpf_exit */ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); if (!new_sl) return -ENOMEM; /* add new state to the head of linked list */ err = copy_verifier_state(&new_sl->state, cur); if (err) { free_verifier_state(&new_sl->state, false); kfree(new_sl); return err; } new_sl->next = env->explored_states[insn_idx]; env->explored_states[insn_idx] = new_sl; /* connect new state to parentage chain */ cur->parent = &new_sl->state; /* clear write marks in current state: the writes we did are not writes * our child did, so they don't screen off its reads from us. * (There are no read marks in current state, because reads always mark * their parent and current state never has children yet. Only * explored_states can get read marks.) */ for (i = 0; i < BPF_REG_FP; i++) cur->frame[cur->curframe]->regs[i].live = REG_LIVE_NONE; /* all stack frames are accessible from callee, clear them all */ for (j = 0; j <= cur->curframe; j++) { struct bpf_func_state *frame = cur->frame[j]; for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; } return 0; } static int do_check(struct bpf_verifier_env *env) { struct bpf_verifier_state *state; struct bpf_insn *insns = env->prog->insnsi; struct bpf_reg_state *regs; int insn_cnt = env->prog->len, i; int insn_idx, prev_insn_idx = 0; int insn_processed = 0; bool do_print_state = false; state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); if (!state) return -ENOMEM; state->curframe = 0; state->parent = NULL; state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); if (!state->frame[0]) { kfree(state); return -ENOMEM; } env->cur_state = state; init_func_state(env, state->frame[0], BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, 0 /* subprogno, zero == main subprog */); insn_idx = 0; for (;;) { struct bpf_insn *insn; u8 class; int err; if (insn_idx >= insn_cnt) { verbose(env, "invalid insn idx %d insn_cnt %d\n", insn_idx, insn_cnt); return -EFAULT; } insn = &insns[insn_idx]; class = BPF_CLASS(insn->code); if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { verbose(env, "BPF program is too large. Processed %d insn\n", insn_processed); return -E2BIG; } err = is_state_visited(env, insn_idx); if (err < 0) return err; if (err == 1) { /* found equivalent state, can prune the search */ if (env->log.level) { if (do_print_state) verbose(env, "\nfrom %d to %d: safe\n", prev_insn_idx, insn_idx); else verbose(env, "%d: safe\n", insn_idx); } goto process_bpf_exit; } if (need_resched()) cond_resched(); if (env->log.level > 1 || (env->log.level && do_print_state)) { if (env->log.level > 1) verbose(env, "%d:", insn_idx); else verbose(env, "\nfrom %d to %d:", prev_insn_idx, insn_idx); print_verifier_state(env, state->frame[state->curframe]); do_print_state = false; } if (env->log.level) { const struct bpf_insn_cbs cbs = { .cb_print = verbose, .private_data = env, }; verbose(env, "%d: ", insn_idx); print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); } if (bpf_prog_is_dev_bound(env->prog->aux)) { err = bpf_prog_offload_verify_insn(env, insn_idx, prev_insn_idx); if (err) return err; } regs = cur_regs(env); env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) return err; } else if (class == BPF_LDX) { enum bpf_reg_type *prev_src_type, src_reg_type; /* check for reserved fields is already done */ /* check src operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); if (err) return err; src_reg_type = regs[insn->src_reg].type; /* check that memory (src_reg + off) is readable, * the state of dst_reg will be updated by this func */ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, BPF_SIZE(insn->code), BPF_READ, insn->dst_reg, false); if (err) return err; prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_src_type == NOT_INIT) { /* saw a valid insn * dst_reg = *(u32 *)(src_reg + off) * save type to validate intersecting paths */ *prev_src_type = src_reg_type; } else if (src_reg_type != *prev_src_type && (src_reg_type == PTR_TO_CTX || *prev_src_type == PTR_TO_CTX)) { /* ABuser program is trying to use the same insn * dst_reg = *(u32*) (src_reg + off) * with different pointer types: * src_reg == ctx in one branch and * src_reg == stack|map in some other branch. * Reject it. */ verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_STX) { enum bpf_reg_type *prev_dst_type, dst_reg_type; if (BPF_MODE(insn->code) == BPF_XADD) { err = check_xadd(env, insn_idx, insn); if (err) return err; insn_idx++; continue; } /* check src1 operand */ err = check_reg_arg(env, insn->src_reg, SRC_OP); if (err) return err; /* check src2 operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; dst_reg_type = regs[insn->dst_reg].type; /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, insn->src_reg, false); if (err) return err; prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; if (*prev_dst_type == NOT_INIT) { *prev_dst_type = dst_reg_type; } else if (dst_reg_type != *prev_dst_type && (dst_reg_type == PTR_TO_CTX || *prev_dst_type == PTR_TO_CTX)) { verbose(env, "same insn cannot be used with different pointers\n"); return -EINVAL; } } else if (class == BPF_ST) { if (BPF_MODE(insn->code) != BPF_MEM || insn->src_reg != BPF_REG_0) { verbose(env, "BPF_ST uses reserved fields\n"); return -EINVAL; } /* check src operand */ err = check_reg_arg(env, insn->dst_reg, SRC_OP); if (err) return err; if (is_ctx_reg(env, insn->dst_reg)) { verbose(env, "BPF_ST stores into R%d context is not allowed\n", insn->dst_reg); return -EACCES; } /* check that memory (dst_reg + off) is writeable */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, false); if (err) return err; } else if (class == BPF_JMP) { u8 opcode = BPF_OP(insn->code); if (opcode == BPF_CALL) { if (BPF_SRC(insn->code) != BPF_K || insn->off != 0 || (insn->src_reg != BPF_REG_0 && insn->src_reg != BPF_PSEUDO_CALL) || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_CALL uses reserved fields\n"); return -EINVAL; } if (insn->src_reg == BPF_PSEUDO_CALL) err = check_func_call(env, insn, &insn_idx); else err = check_helper_call(env, insn->imm, insn_idx); if (err) return err; } else if (opcode == BPF_JA) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_JA uses reserved fields\n"); return -EINVAL; } insn_idx += insn->off + 1; continue; } else if (opcode == BPF_EXIT) { if (BPF_SRC(insn->code) != BPF_K || insn->imm != 0 || insn->src_reg != BPF_REG_0 || insn->dst_reg != BPF_REG_0) { verbose(env, "BPF_EXIT uses reserved fields\n"); return -EINVAL; } if (state->curframe) { /* exit from nested function */ prev_insn_idx = insn_idx; err = prepare_func_exit(env, &insn_idx); if (err) return err; do_print_state = true; continue; } /* eBPF calling convetion is such that R0 is used * to return the value from eBPF program. * Make sure that it's readable at this time * of bpf_exit, which means that program wrote * something into it earlier */ err = check_reg_arg(env, BPF_REG_0, SRC_OP); if (err) return err; if (is_pointer_value(env, BPF_REG_0)) { verbose(env, "R0 leaks addr as return value\n"); return -EACCES; } err = check_return_code(env); if (err) return err; process_bpf_exit: err = pop_stack(env, &prev_insn_idx, &insn_idx); if (err < 0) { if (err != -ENOENT) return err; break; } else { do_print_state = true; continue; } } else { err = check_cond_jmp_op(env, insn, &insn_idx); if (err) return err; } } else if (class == BPF_LD) { u8 mode = BPF_MODE(insn->code); if (mode == BPF_ABS || mode == BPF_IND) { err = check_ld_abs(env, insn); if (err) return err; } else if (mode == BPF_IMM) { err = check_ld_imm(env, insn); if (err) return err; insn_idx++; env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; } } else { verbose(env, "unknown insn class %d\n", class); return -EINVAL; } insn_idx++; } verbose(env, "processed %d insns (limit %d), stack depth ", insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); for (i = 0; i < env->subprog_cnt; i++) { u32 depth = env->subprog_info[i].stack_depth; verbose(env, "%d", depth); if (i + 1 < env->subprog_cnt) verbose(env, "+"); } verbose(env, "\n"); env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; return 0; } static int check_map_prealloc(struct bpf_map *map) { return (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || !(map->map_flags & BPF_F_NO_PREALLOC); } static int check_map_prog_compatibility(struct bpf_verifier_env *env, struct bpf_map *map, struct bpf_prog *prog) { /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use * preallocated hash maps, since doing memory allocation * in overflow_handler can crash depending on where nmi got * triggered. */ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { if (!check_map_prealloc(map)) { verbose(env, "perf_event programs can only use preallocated hash map\n"); return -EINVAL; } if (map->inner_map_meta && !check_map_prealloc(map->inner_map_meta)) { verbose(env, "perf_event programs can only use preallocated inner hash map\n"); return -EINVAL; } } if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); return -EINVAL; } return 0; } /* look for pseudo eBPF instructions that access map FDs and * replace them with actual map pointers */ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i, j, err; err = bpf_prog_calc_tag(env->prog); if (err) return err; for (i = 0; i < insn_cnt; i++, insn++) { if (BPF_CLASS(insn->code) == BPF_LDX && (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { verbose(env, "BPF_LDX uses reserved fields\n"); return -EINVAL; } if (BPF_CLASS(insn->code) == BPF_STX && ((BPF_MODE(insn->code) != BPF_MEM && BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { verbose(env, "BPF_STX uses reserved fields\n"); return -EINVAL; } if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { struct bpf_map *map; struct fd f; if (i == insn_cnt - 1 || insn[1].code != 0 || insn[1].dst_reg != 0 || insn[1].src_reg != 0 || insn[1].off != 0) { verbose(env, "invalid bpf_ld_imm64 insn\n"); return -EINVAL; } if (insn->src_reg == 0) /* valid generic load 64-bit imm */ goto next_insn; if (insn->src_reg != BPF_PSEUDO_MAP_FD) { verbose(env, "unrecognized bpf_ld_imm64 insn\n"); return -EINVAL; } f = fdget(insn->imm); map = __bpf_map_get(f); if (IS_ERR(map)) { verbose(env, "fd %d is not pointing to valid bpf_map\n", insn->imm); return PTR_ERR(map); } err = check_map_prog_compatibility(env, map, env->prog); if (err) { fdput(f); return err; } /* store map pointer inside BPF_LD_IMM64 instruction */ insn[0].imm = (u32) (unsigned long) map; insn[1].imm = ((u64) (unsigned long) map) >> 32; /* check whether we recorded this map already */ for (j = 0; j < env->used_map_cnt; j++) if (env->used_maps[j] == map) { fdput(f); goto next_insn; } if (env->used_map_cnt >= MAX_USED_MAPS) { fdput(f); return -E2BIG; } /* hold the map. If the program is rejected by verifier, * the map will be released by release_maps() or it * will be used by the valid program until it's unloaded * and all maps are released in free_used_maps() */ map = bpf_map_inc(map, false); if (IS_ERR(map)) { fdput(f); return PTR_ERR(map); } env->used_maps[env->used_map_cnt++] = map; if (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE && bpf_cgroup_storage_assign(env->prog, map)) { verbose(env, "only one cgroup storage is allowed\n"); fdput(f); return -EBUSY; } fdput(f); next_insn: insn++; i++; continue; } /* Basic sanity check before we invest more work here. */ if (!bpf_opcode_in_insntable(insn->code)) { verbose(env, "unknown opcode %02x\n", insn->code); return -EINVAL; } } /* now all pseudo BPF_LD_IMM64 instructions load valid * 'struct bpf_map *' into a register instead of user map_fd. * These pointers will be used later by verifier to validate map access. */ return 0; } /* drop refcnt of maps used by the rejected program */ static void release_maps(struct bpf_verifier_env *env) { int i; if (env->prog->aux->cgroup_storage) bpf_cgroup_storage_release(env->prog, env->prog->aux->cgroup_storage); for (i = 0; i < env->used_map_cnt; i++) bpf_map_put(env->used_maps[i]); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) { struct bpf_insn *insn = env->prog->insnsi; int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++, insn++) if (insn->code == (BPF_LD | BPF_IMM | BPF_DW)) insn->src_reg = 0; } /* single env->prog->insni[off] instruction was replaced with the range * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying * [0, off) and [off, end) to new locations, so the patched range stays zero */ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; int i; if (cnt == 1) return 0; new_data = vzalloc(array_size(prog_len, sizeof(struct bpf_insn_aux_data))); if (!new_data) return -ENOMEM; memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); for (i = off; i < off + cnt - 1; i++) new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; } static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) { int i; if (len == 1) return; /* NOTE: fake 'exit' subprog should be updated as well. */ for (i = 0; i <= env->subprog_cnt; i++) { if (env->subprog_info[i].start < off) continue; env->subprog_info[i].start += len - 1; } } static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, const struct bpf_insn *patch, u32 len) { struct bpf_prog *new_prog; new_prog = bpf_patch_insn_single(env->prog, off, patch, len); if (!new_prog) return NULL; if (adjust_insn_aux_data(env, new_prog->len, off, len)) return NULL; adjust_subprog_starts(env, off, len); return new_prog; } /* The verifier does more data flow analysis than llvm and will not * explore branches that are dead at run time. Malicious programs can * have dead code too. Therefore replace all dead at-run-time code * with 'ja -1'. * * Just nops are not optimal, e.g. if they would sit at the end of the * program and through another bug we would manage to jump there, then * we'd execute beyond program memory otherwise. Returning exception * code also wouldn't work since we can have subprogs where the dead * code could be located. */ static void sanitize_dead_code(struct bpf_verifier_env *env) { struct bpf_insn_aux_data *aux_data = env->insn_aux_data; struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); struct bpf_insn *insn = env->prog->insnsi; const int insn_cnt = env->prog->len; int i; for (i = 0; i < insn_cnt; i++) { if (aux_data[i].seen) continue; memcpy(insn + i, &trap, sizeof(trap)); } } /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ static int convert_ctx_accesses(struct bpf_verifier_env *env) { const struct bpf_verifier_ops *ops = env->ops; int i, cnt, size, ctx_field_size, delta = 0; const int insn_cnt = env->prog->len; struct bpf_insn insn_buf[16], *insn; struct bpf_prog *new_prog; enum bpf_access_type type; bool is_narrower_load; u32 target_size; if (ops->gen_prologue) { cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, env->prog); if (cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } else if (cnt) { new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); if (!new_prog) return -ENOMEM; env->prog = new_prog; delta += cnt - 1; } } if (!ops->convert_ctx_access || bpf_prog_is_dev_bound(env->prog->aux)) return 0; insn = env->prog->insnsi + delta; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || insn->code == (BPF_LDX | BPF_MEM | BPF_H) || insn->code == (BPF_LDX | BPF_MEM | BPF_W) || insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) type = BPF_READ; else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || insn->code == (BPF_STX | BPF_MEM | BPF_H) || insn->code == (BPF_STX | BPF_MEM | BPF_W) || insn->code == (BPF_STX | BPF_MEM | BPF_DW)) type = BPF_WRITE; else continue; if (type == BPF_WRITE && env->insn_aux_data[i + delta].sanitize_stack_off) { struct bpf_insn patch[] = { /* Sanitize suspicious stack slot with zero. * There are no memory dependencies for this store, * since it's only using frame pointer and immediate * constant of zero */ BPF_ST_MEM(BPF_DW, BPF_REG_FP, env->insn_aux_data[i + delta].sanitize_stack_off, 0), /* the original STX instruction will immediately * overwrite the same stack slot with appropriate value */ *insn, }; cnt = ARRAY_SIZE(patch); new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX) continue; ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; size = BPF_LDST_BYTES(insn); /* If the read access is a narrower load of the field, * convert to a 4/8-byte load, to minimum program type specific * convert_ctx_access changes. If conversion is successful, * we will apply proper mask to the result. */ is_narrower_load = size < ctx_field_size; if (is_narrower_load) { u32 size_default = bpf_ctx_off_adjust_machine(ctx_field_size); u32 off = insn->off; u8 size_code; if (type == BPF_WRITE) { verbose(env, "bpf verifier narrow ctx access misconfigured\n"); return -EINVAL; } size_code = BPF_H; if (ctx_field_size == 4) size_code = BPF_W; else if (ctx_field_size == 8) size_code = BPF_DW; insn->off = off & ~(size_default - 1); insn->code = BPF_LDX | BPF_MEM | size_code; } target_size = 0; cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog, &target_size); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || (ctx_field_size && !target_size)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } if (is_narrower_load && size < target_size) { if (ctx_field_size <= 4) insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); else insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, (1 << size * 8) - 1); } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; /* keep walking new program and skip insns we just inserted */ env->prog = new_prog; insn = new_prog->insnsi + i + delta; } return 0; } static int jit_subprogs(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog, **func, *tmp; int i, j, subprog_start, subprog_end = 0, len, subprog; struct bpf_insn *insn; void *old_bpf_func; int err = -ENOMEM; if (env->subprog_cnt <= 1) return 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; /* Upon error here we cannot fall back to interpreter but * need a hard reject of the program. Thus -EFAULT is * propagated in any case. */ subprog = find_subprog(env, i + insn->imm + 1); if (subprog < 0) { WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", i + insn->imm + 1); return -EFAULT; } /* temporarily remember subprog id inside insn instead of * aux_data, since next loop will split up all insns into funcs */ insn->off = subprog; /* remember original imm in case JIT fails and fallback * to interpreter will be needed */ env->insn_aux_data[i].call_imm = insn->imm; /* point imm to __bpf_call_base+1 from JITs point of view */ insn->imm = 1; } func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); if (!func) goto out_undo_insn; for (i = 0; i < env->subprog_cnt; i++) { subprog_start = subprog_end; subprog_end = env->subprog_info[i + 1].start; len = subprog_end - subprog_start; func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); if (!func[i]) goto out_free; memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], len * sizeof(struct bpf_insn)); func[i]->type = prog->type; func[i]->len = len; if (bpf_prog_calc_tag(func[i])) goto out_free; func[i]->is_func = 1; /* Use bpf_prog_F_tag to indicate functions in stack traces. * Long term would need debug info to populate names */ func[i]->aux->name[0] = 'F'; func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; func[i]->jit_requested = 1; func[i] = bpf_int_jit_compile(func[i]); if (!func[i]->jited) { err = -ENOTSUPP; goto out_free; } cond_resched(); } /* at this point all bpf functions were successfully JITed * now populate all bpf_calls with correct addresses and * run last pass of JIT */ for (i = 0; i < env->subprog_cnt; i++) { insn = func[i]->insnsi; for (j = 0; j < func[i]->len; j++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; subprog = insn->off; insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) func[subprog]->bpf_func - __bpf_call_base; } /* we use the aux data to keep a list of the start addresses * of the JITed images for each function in the program * * for some architectures, such as powerpc64, the imm field * might not be large enough to hold the offset of the start * address of the callee's JITed image from __bpf_call_base * * in such cases, we can lookup the start address of a callee * by using its subprog id, available from the off field of * the call instruction, as an index for this list */ func[i]->aux->func = func; func[i]->aux->func_cnt = env->subprog_cnt; } for (i = 0; i < env->subprog_cnt; i++) { old_bpf_func = func[i]->bpf_func; tmp = bpf_int_jit_compile(func[i]); if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); err = -ENOTSUPP; goto out_free; } cond_resched(); } /* finally lock prog and jit images for all functions and * populate kallsysm */ for (i = 0; i < env->subprog_cnt; i++) { bpf_prog_lock_ro(func[i]); bpf_prog_kallsyms_add(func[i]); } /* Last step: make now unused interpreter insns from main * prog consistent for later dump requests, so they can * later look the same as if they were interpreted only. */ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = env->insn_aux_data[i].call_imm; subprog = find_subprog(env, i + insn->off + 1); insn->imm = subprog; } prog->jited = 1; prog->bpf_func = func[0]->bpf_func; prog->aux->func = func; prog->aux->func_cnt = env->subprog_cnt; return 0; out_free: for (i = 0; i < env->subprog_cnt; i++) if (func[i]) bpf_jit_free(func[i]); kfree(func); out_undo_insn: /* cleanup main prog to be interpreted */ prog->jit_requested = 0; for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; insn->off = 0; insn->imm = env->insn_aux_data[i].call_imm; } return err; } static int fixup_call_args(struct bpf_verifier_env *env) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; int i, depth; #endif int err; err = 0; if (env->prog->jit_requested) { err = jit_subprogs(env); if (err == 0) return 0; if (err == -EFAULT) return err; } #ifndef CONFIG_BPF_JIT_ALWAYS_ON for (i = 0; i < prog->len; i++, insn++) { if (insn->code != (BPF_JMP | BPF_CALL) || insn->src_reg != BPF_PSEUDO_CALL) continue; depth = get_callee_stack_depth(env, insn, i); if (depth < 0) return depth; bpf_patch_call_args(insn, depth); } err = 0; #endif return err; } /* fixup insn->imm field of bpf_call instructions * and inline eligible helpers as explicit sequence of BPF instructions * * this function is called after eBPF program passed verification */ static int fixup_bpf_calls(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; struct bpf_insn *insn = prog->insnsi; const struct bpf_func_proto *fn; const int insn_cnt = prog->len; const struct bpf_map_ops *ops; struct bpf_insn_aux_data *aux; struct bpf_insn insn_buf[16]; struct bpf_prog *new_prog; struct bpf_map *map_ptr; int i, cnt, delta = 0; for (i = 0; i < insn_cnt; i++, insn++) { if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_MOD | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; struct bpf_insn mask_and_div[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx div 0 -> 0 */ BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2), BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), BPF_JMP_IMM(BPF_JA, 0, 0, 1), *insn, }; struct bpf_insn mask_and_mod[] = { BPF_MOV32_REG(insn->src_reg, insn->src_reg), /* Rx mod 0 -> Rx */ BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1), *insn, }; struct bpf_insn *patchlet; if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { patchlet = mask_and_div + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0); } else { patchlet = mask_and_mod + (is64 ? 1 : 0); cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0); } new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || BPF_MODE(insn->code) == BPF_IND)) { cnt = env->ops->gen_ld_abs(insn, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } if (insn->code != (BPF_JMP | BPF_CALL)) continue; if (insn->src_reg == BPF_PSEUDO_CALL) continue; if (insn->imm == BPF_FUNC_get_route_realm) prog->dst_needed = 1; if (insn->imm == BPF_FUNC_get_prandom_u32) bpf_user_rnd_init_once(); if (insn->imm == BPF_FUNC_override_return) prog->kprobe_override = 1; if (insn->imm == BPF_FUNC_tail_call) { /* If we tail call into other programs, we * cannot make any assumptions since they can * be replaced dynamically during runtime in * the program array. */ prog->cb_access = 1; env->prog->aux->stack_depth = MAX_BPF_STACK; /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler * that doesn't support bpf_tail_call yet */ insn->imm = 0; insn->code = BPF_JMP | BPF_TAIL_CALL; aux = &env->insn_aux_data[i + delta]; if (!bpf_map_ptr_unpriv(aux)) continue; /* instead of changing every JIT dealing with tail_call * emit two extra insns: * if (index >= max_entries) goto out; * index &= array->index_mask; * to avoid out-of-bounds cpu speculation */ if (bpf_map_ptr_poisoned(aux)) { verbose(env, "tail_call abusing map_ptr\n"); return -EINVAL; } map_ptr = BPF_MAP_PTR(aux->map_state); insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, container_of(map_ptr, struct bpf_array, map)->index_mask); insn_buf[2] = *insn; cnt = 3; new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup * and other inlining handlers are currently limited to 64 bit * only. */ if (prog->jit_requested && BITS_PER_LONG == 64 && (insn->imm == BPF_FUNC_map_lookup_elem || insn->imm == BPF_FUNC_map_update_elem || insn->imm == BPF_FUNC_map_delete_elem)) { aux = &env->insn_aux_data[i + delta]; if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; map_ptr = BPF_MAP_PTR(aux->map_state); ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { cnt = ops->map_gen_lookup(map_ptr, insn_buf); if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { verbose(env, "bpf verifier is misconfigured\n"); return -EINVAL; } new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); if (!new_prog) return -ENOMEM; delta += cnt - 1; env->prog = prog = new_prog; insn = new_prog->insnsi + i + delta; continue; } BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, (void *(*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_delete_elem, (int (*)(struct bpf_map *map, void *key))NULL)); BUILD_BUG_ON(!__same_type(ops->map_update_elem, (int (*)(struct bpf_map *map, void *key, void *value, u64 flags))NULL)); switch (insn->imm) { case BPF_FUNC_map_lookup_elem: insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - __bpf_call_base; continue; case BPF_FUNC_map_update_elem: insn->imm = BPF_CAST_CALL(ops->map_update_elem) - __bpf_call_base; continue; case BPF_FUNC_map_delete_elem: insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - __bpf_call_base; continue; } goto patch_call_imm; } patch_call_imm: fn = env->ops->get_func_proto(insn->imm, env->prog); /* all functions that have prototype and verifier allowed * programs to call them, must be real in-kernel functions */ if (!fn->func) { verbose(env, "kernel subsystem misconfigured func %s#%d\n", func_id_name(insn->imm), insn->imm); return -EFAULT; } insn->imm = fn->func - __bpf_call_base; } return 0; } static void free_states(struct bpf_verifier_env *env) { struct bpf_verifier_state_list *sl, *sln; int i; if (!env->explored_states) return; for (i = 0; i < env->prog->len; i++) { sl = env->explored_states[i]; if (sl) while (sl != STATE_LIST_MARK) { sln = sl->next; free_verifier_state(&sl->state, false); kfree(sl); sl = sln; } } kfree(env->explored_states); } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) { struct bpf_verifier_env *env; struct bpf_verifier_log *log; int ret = -EINVAL; /* no program is valid */ if (ARRAY_SIZE(bpf_verifier_ops) == 0) return -EINVAL; /* 'struct bpf_verifier_env' can be global, but since it's not small, * allocate/free it every time bpf_check() is called */ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); if (!env) return -ENOMEM; log = &env->log; env->insn_aux_data = vzalloc(array_size(sizeof(struct bpf_insn_aux_data), (*prog)->len)); ret = -ENOMEM; if (!env->insn_aux_data) goto err_free_env; env->prog = *prog; env->ops = bpf_verifier_ops[env->prog->type]; /* grab the mutex to protect few globals used by verifier */ mutex_lock(&bpf_verifier_lock); if (attr->log_level || attr->log_buf || attr->log_size) { /* user requested verbose verifier output * and supplied buffer to store the verification trace */ log->level = attr->log_level; log->ubuf = (char __user *) (unsigned long) attr->log_buf; log->len_total = attr->log_size; ret = -EINVAL; /* log attributes have to be sane */ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || !log->level || !log->ubuf) goto err_unlock; } env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) env->strict_alignment = true; ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; if (bpf_prog_is_dev_bound(env->prog->aux)) { ret = bpf_prog_offload_verifier_prep(env); if (ret) goto skip_full_check; } env->explored_states = kcalloc(env->prog->len, sizeof(struct bpf_verifier_state_list *), GFP_USER); ret = -ENOMEM; if (!env->explored_states) goto skip_full_check; env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ret = check_cfg(env); if (ret < 0) goto skip_full_check; ret = do_check(env); if (env->cur_state) { free_verifier_state(env->cur_state, true); env->cur_state = NULL; } skip_full_check: while (!pop_stack(env, NULL, NULL)); free_states(env); if (ret == 0) sanitize_dead_code(env); if (ret == 0) ret = check_max_stack_depth(env); if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); if (ret == 0) ret = fixup_bpf_calls(env); if (ret == 0) ret = fixup_call_args(env); if (log->level && bpf_verifier_log_full(log)) ret = -ENOSPC; if (log->level && !log->ubuf) { ret = -EFAULT; goto err_release_maps; } if (ret == 0 && env->used_map_cnt) { /* if program passed verifier, update used_maps in bpf_prog_info */ env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt, sizeof(env->used_maps[0]), GFP_KERNEL); if (!env->prog->aux->used_maps) { ret = -ENOMEM; goto err_release_maps; } memcpy(env->prog->aux->used_maps, env->used_maps, sizeof(env->used_maps[0]) * env->used_map_cnt); env->prog->aux->used_map_cnt = env->used_map_cnt; /* program is valid. Convert pseudo bpf_ld_imm64 into generic * bpf_ld_imm64 instructions */ convert_pseudo_ld_imm64(env); } err_release_maps: if (!env->prog->aux->used_maps) /* if we didn't copy map pointers into bpf_prog_info, release * them now. Otherwise free_used_maps() will release them. */ release_maps(env); *prog = env->prog; err_unlock: mutex_unlock(&bpf_verifier_lock); vfree(env->insn_aux_data); err_free_env: kfree(env); return ret; }
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; }
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, struct bpf_insn *insn, struct bpf_reg_state *dst_reg, struct bpf_reg_state src_reg) { struct bpf_reg_state *regs = cur_regs(env); u8 opcode = BPF_OP(insn->code); bool src_known, dst_known; s64 smin_val, smax_val; u64 umin_val, umax_val; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; if (insn_bitness == 32) { /* Relevant for 32-bit RSH: Information can propagate towards * LSB, so it isn't sufficient to only truncate the output to * 32 bits. */ coerce_reg_to_size(dst_reg, 4); coerce_reg_to_size(&src_reg, 4); } smin_val = src_reg.smin_value; smax_val = src_reg.smax_value; umin_val = src_reg.umin_value; umax_val = src_reg.umax_value; src_known = tnum_is_const(src_reg.var_off); dst_known = tnum_is_const(dst_reg->var_off); if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || smin_val > smax_val || umin_val > umax_val) { /* Taint dst register if offset had invalid bounds derived from * e.g. dead branches. */ __mark_reg_unknown(dst_reg); return 0; } if (!src_known && opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { __mark_reg_unknown(dst_reg); return 0; } switch (opcode) { case BPF_ADD: if (signed_add_overflows(dst_reg->smin_value, smin_val) || signed_add_overflows(dst_reg->smax_value, smax_val)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value += smin_val; dst_reg->smax_value += smax_val; } if (dst_reg->umin_value + umin_val < umin_val || dst_reg->umax_value + umax_val < umax_val) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value += umin_val; dst_reg->umax_value += umax_val; } dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); break; case BPF_SUB: if (signed_sub_overflows(dst_reg->smin_value, smax_val) || signed_sub_overflows(dst_reg->smax_value, smin_val)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value -= smax_val; dst_reg->smax_value -= smin_val; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { /* Cannot overflow (as long as bounds are consistent) */ dst_reg->umin_value -= umax_val; dst_reg->umax_value -= umin_val; } dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); break; case BPF_MUL: dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); if (smin_val < 0 || dst_reg->smin_value < 0) { /* Ain't nobody got time to multiply that sign */ __mark_reg_unbounded(dst_reg); __update_reg_bounds(dst_reg); break; } /* Both values are positive, so we can work with unsigned and * copy the result to signed (unless it exceeds S64_MAX). */ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { /* Potential overflow, we know nothing */ __mark_reg_unbounded(dst_reg); /* (except what we can learn from the var_off) */ __update_reg_bounds(dst_reg); break; } dst_reg->umin_value *= umin_val; dst_reg->umax_value *= umax_val; if (dst_reg->umax_value > S64_MAX) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } break; case BPF_AND: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value & src_reg.var_off.value); break; } /* We get our minimum from the var_off, since that's inherently * bitwise. Our maximum is the minimum of the operands' maxima. */ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = dst_reg->var_off.value; dst_reg->umax_value = min(dst_reg->umax_value, umax_val); if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ANDing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ANDing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_OR: if (src_known && dst_known) { __mark_reg_known(dst_reg, dst_reg->var_off.value | src_reg.var_off.value); break; } /* We get our maximum from the var_off, and our minimum is the * maximum of the operands' minima */ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); dst_reg->umin_value = max(dst_reg->umin_value, umin_val); dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask; if (dst_reg->smin_value < 0 || smin_val < 0) { /* Lose signed bounds when ORing negative numbers, * ain't nobody got time for that. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; } else { /* ORing two positives gives a positive, so safe to * cast result into s64. */ dst_reg->smin_value = dst_reg->umin_value; dst_reg->smax_value = dst_reg->umax_value; } /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_LSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* We lose all sign bit information (except what we can pick * up from var_off) */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; /* If we might shift our top bit out, then we know nothing */ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; } else { dst_reg->umin_value <<= umin_val; dst_reg->umax_value <<= umax_val; } dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_RSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* BPF_RSH is an unsigned shift. If the value in dst_reg might * be negative, then either: * 1) src_reg might be zero, so the sign bit of the result is * unknown, so we lose our signed bounds * 2) it's known negative, thus the unsigned bounds capture the * signed bounds * 3) the signed bounds cross zero, so they tell us nothing * about the result * If the value in dst_reg is known nonnegative, then again the * unsigned bounts capture the signed bounds. * Thus, in all cases it suffices to blow away our signed bounds * and rely on inferring new ones from the unsigned bounds and * var_off of the result. */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); dst_reg->umin_value >>= umax_val; dst_reg->umax_value >>= umin_val; /* We may learn something more from the var_off */ __update_reg_bounds(dst_reg); break; case BPF_ARSH: if (umax_val >= insn_bitness) { /* Shifts greater than 31 or 63 are undefined. * This includes shifts by a negative number. */ mark_reg_unknown(env, regs, insn->dst_reg); break; } /* Upon reaching here, src_known is true and * umax_val is equal to umin_val. */ dst_reg->smin_value >>= umin_val; dst_reg->smax_value >>= umin_val; dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); /* blow away the dst_reg umin_value/umax_value and rely on * dst_reg var_off to refine the result. */ dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; __update_reg_bounds(dst_reg); break; default: mark_reg_unknown(env, regs, insn->dst_reg); break; } if (BPF_CLASS(insn->code) != BPF_ALU64) { /* 32-bit ALU ops are (32,32)->32 */ coerce_reg_to_size(dst_reg, 4); } __reg_deduce_bounds(dst_reg); __reg_bound_offset(dst_reg); return 0; }
{'added': [(2899, '\tif (insn_bitness == 32) {'), (2900, '\t\t/* Relevant for 32-bit RSH: Information can propagate towards'), (2901, "\t\t * LSB, so it isn't sufficient to only truncate the output to"), (2902, '\t\t * 32 bits.'), (2903, '\t\t */'), (2904, '\t\tcoerce_reg_to_size(dst_reg, 4);'), (2905, '\t\tcoerce_reg_to_size(&src_reg, 4);'), (2906, '\t}'), (2907, '')], 'deleted': [(3134, '\t\tcoerce_reg_to_size(&src_reg, 4);')]}
9
1
4,230
27,473
177
1,072
44
https://github.com/torvalds/linux
CVE-2018-18445
CWE-125
2,055
avc_ext.c
C
AV1_RewriteESDescriptorEx
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/constants.h> #include <gpac/internal/media_dev.h> #ifndef GPAC_DISABLE_ISOM Bool gf_isom_is_nalu_based_entry(GF_MediaBox *mdia, GF_SampleEntryBox *_entry) { GF_MPEGVisualSampleEntryBox *entry; if (!gf_isom_is_video_handler_type(mdia->handler->handlerType)) return GF_FALSE; if (!_entry) return GF_FALSE; entry = (GF_MPEGVisualSampleEntryBox*)_entry; switch (_entry->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_SVC2: case GF_ISOM_BOX_TYPE_MVC1: case GF_ISOM_BOX_TYPE_MVC2: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_MHV1: case GF_ISOM_BOX_TYPE_MHC1: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_LHT1: return GF_TRUE; case GF_ISOM_BOX_TYPE_GNRV: case GF_ISOM_BOX_TYPE_GNRA: case GF_ISOM_BOX_TYPE_GNRM: return GF_FALSE; default: break; } if (!gf_isom_is_video_handler_type(entry->internal_type)) return GF_FALSE; if (entry->avc_config || entry->svc_config || entry->mvc_config || entry->hevc_config || entry->lhvc_config) { GF_ProtectionSchemeInfoBox *schi = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (!schi || !schi->scheme_type) return GF_TRUE; switch (schi->scheme_type->scheme_type) { case GF_ISOM_CENC_SCHEME: case GF_ISOM_CBC_SCHEME: case GF_ISOM_CENS_SCHEME: case GF_ISOM_CBCS_SCHEME: return GF_TRUE; default: break; } } return GF_FALSE; } static void rewrite_nalus_list(GF_List *nalus, GF_BitStream *bs, Bool rewrite_start_codes, u32 nal_unit_size_field) { u32 i, count = gf_list_count(nalus); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(nalus, i); if (rewrite_start_codes) gf_bs_write_u32(bs, 1); else gf_bs_write_int(bs, sl->size, 8*nal_unit_size_field); gf_bs_write_data(bs, sl->data, sl->size); } } static GF_Err process_extractor(GF_ISOFile *file, GF_MediaBox *mdia, u32 sampleNumber, u64 sampleDTS, u32 nal_size, u16 nal_hdr, u32 nal_unit_size_field, Bool is_hevc, Bool rewrite_ps, Bool rewrite_start_codes, u32 extractor_mode) { GF_Err e; u32 di, ref_track_index, ref_track_num, data_offset, data_length, cur_extract_mode, ref_extract_mode, ref_nalu_size, nb_bytes_nalh; GF_TrackReferenceTypeBox *dpnd; GF_TrackBox *ref_trak; s8 sample_offset; u32 last_byte, ref_sample_num, prev_ref_sample_num; Bool header_written = GF_FALSE; nb_bytes_nalh = is_hevc ? 2 : 1; switch (extractor_mode) { case 0: last_byte = (u32) gf_bs_get_position(mdia->nalu_parser) + nal_size - (is_hevc ? 2 : 1); if (!is_hevc) gf_bs_read_int(mdia->nalu_parser, 24); //1 byte for HEVC , 3 bytes for AVC of NALUHeader in extractor while (gf_bs_get_position(mdia->nalu_parser) < last_byte) { u32 xmode = 0; //hevc extractors use constructors if (is_hevc) xmode = gf_bs_read_u8(mdia->nalu_parser); if (xmode) { u8 done=0, len = gf_bs_read_u8(mdia->nalu_parser); while (done<len) { u8 c = gf_bs_read_u8(mdia->nalu_parser); done++; if (header_written) { gf_bs_write_u8(mdia->nalu_out_bs, c); } else if (done==nal_unit_size_field) { if (rewrite_start_codes) { gf_bs_write_int(mdia->nalu_out_bs, 1, 32); } else { gf_bs_write_u8(mdia->nalu_out_bs, c); } header_written = GF_TRUE; } else if (!rewrite_start_codes) { gf_bs_write_u8(mdia->nalu_out_bs, c); } } continue; } ref_track_index = gf_bs_read_u8(mdia->nalu_parser); sample_offset = (s8) gf_bs_read_int(mdia->nalu_parser, 8); data_offset = gf_bs_read_int(mdia->nalu_parser, nal_unit_size_field*8); data_length = gf_bs_read_int(mdia->nalu_parser, nal_unit_size_field*8); Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &dpnd); ref_track_num = 0; if (dpnd && ref_track_index && (ref_track_index<=dpnd->trackIDCount)) ref_track_num = gf_isom_get_track_by_id(file, dpnd->trackIDs[ref_track_index-1]); if (!ref_track_num) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("ISOBMF: Extractor target track is not present in file - skipping.\n")); return GF_OK; } cur_extract_mode = gf_isom_get_nalu_extract_mode(file, ref_track_num); //we must be in inspect mode only otherwise the reference sample will not be the one stored on file (change in start codes, PS inserted or other NALUs inserted) //and this will corrupt extraction (wrong data offsets) ref_extract_mode = GF_ISOM_NALU_EXTRACT_INSPECT; gf_isom_set_nalu_extract_mode(file, ref_track_num, ref_extract_mode); ref_trak = gf_isom_get_track_from_file(file, ref_track_num); if (!ref_trak) return GF_ISOM_INVALID_FILE; if (!mdia->extracted_samp) { mdia->extracted_samp = gf_isom_sample_new(); if (!mdia->extracted_samp) return GF_IO_ERR; } if (!mdia->extracted_bs) { mdia->extracted_bs = gf_bs_new("a", 1, GF_BITSTREAM_READ); if (!mdia->extracted_bs) return GF_IO_ERR; } e = stbl_findEntryForTime(ref_trak->Media->information->sampleTable, sampleDTS, 0, &ref_sample_num, &prev_ref_sample_num); if (e) return e; if (!ref_sample_num) ref_sample_num = prev_ref_sample_num; if (!ref_sample_num) return GF_ISOM_INVALID_FILE; if ((sample_offset<0) && (ref_sample_num > (u32) -sample_offset)) return GF_ISOM_INVALID_FILE; ref_sample_num = (u32) ( (s32) ref_sample_num + sample_offset); e = Media_GetSample(ref_trak->Media, ref_sample_num, &mdia->extracted_samp, &di, GF_FALSE, NULL); if (e) return e; if (!mdia->extracted_samp->alloc_size) mdia->extracted_samp->alloc_size = mdia->extracted_samp->dataLength; #if 0 if (!header_written && rewrite_start_codes) { gf_bs_write_int(dst_bs, 1, 32); if (is_hevc) { gf_bs_write_int(dst_bs, 0, 1); gf_bs_write_int(dst_bs, GF_HEVC_NALU_ACCESS_UNIT, 6); gf_bs_write_int(dst_bs, 0, 9); /*pic-type - by default we signal all slice types possible*/ gf_bs_write_int(dst_bs, 2, 3); gf_bs_write_int(dst_bs, 0, 5); } else { gf_bs_write_int(dst_bs, (ref_samp->data[0] & 0x60) | GF_AVC_NALU_ACCESS_UNIT, 8); gf_bs_write_int(dst_bs, 0xF0 , 8); /*7 "all supported NALUs" (=111) + rbsp trailing (10000)*/; } } #endif gf_bs_reassign_buffer(mdia->extracted_bs, mdia->extracted_samp->data + data_offset, mdia->extracted_samp->dataLength - data_offset); if (mdia->extracted_samp->dataLength - data_offset >= data_length) { while (data_length && gf_bs_available(mdia->extracted_bs)) { if (!header_written) { ref_nalu_size = gf_bs_read_int(mdia->extracted_bs, 8*nal_unit_size_field); assert(data_length>nal_unit_size_field); data_length -= nal_unit_size_field; if (data_length > gf_bs_available(mdia->extracted_bs)) { data_length = (u32)gf_bs_available(mdia->extracted_bs); } } else { ref_nalu_size = data_length; } if (ref_nalu_size > mdia->tmp_nal_copy_buffer_alloc) { mdia->tmp_nal_copy_buffer_alloc = ref_nalu_size; mdia->tmp_nal_copy_buffer = (char*) gf_realloc(mdia->tmp_nal_copy_buffer, sizeof(char) * ref_nalu_size ); } gf_bs_read_data(mdia->extracted_bs, mdia->tmp_nal_copy_buffer, ref_nalu_size); if (!header_written) { if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, ref_nalu_size, 8*nal_unit_size_field); } assert(data_length >= ref_nalu_size); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, ref_nalu_size); data_length -= ref_nalu_size; header_written = GF_FALSE; } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("ISOBMF: Extractor size is larger than referred sample size - skipping.\n")); } gf_isom_set_nalu_extract_mode(file, ref_track_num, cur_extract_mode); if (!is_hevc) break; } break; case 1: //skip to end of this NALU gf_bs_skip_bytes(mdia->nalu_parser, nal_size - nb_bytes_nalh); break; case 2: if (nal_size - nb_bytes_nalh > mdia->tmp_nal_copy_buffer_alloc) { mdia->tmp_nal_copy_buffer_alloc = nal_size - nb_bytes_nalh; mdia->tmp_nal_copy_buffer = (char*) gf_realloc(mdia->tmp_nal_copy_buffer, sizeof(char) * (nal_size - nb_bytes_nalh) ); } gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size - nb_bytes_nalh); if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u8(mdia->nalu_out_bs, nal_hdr); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, nal_size - nb_bytes_nalh); break; } return GF_OK; } #ifndef GPAC_DISABLE_HEVC /* returns the SAP type as defined in the 14496-12 specification */ static GF_ISOSAPType sap_type_from_nal_type(u8 nal_type) { switch (nal_type) { case GF_HEVC_NALU_SLICE_CRA: return SAP_TYPE_3; case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_BLA_N_LP: return SAP_TYPE_1; case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_LP: return SAP_TYPE_2; default: return RAP_NO; } } #endif static GF_ISOSAPType is_sample_idr(GF_MediaBox *mdia, GF_ISOSample *sample, GF_MPEGVisualSampleEntryBox *entry) { Bool is_hevc = GF_FALSE; u32 nalu_size_field = 0; if (entry->avc_config && entry->avc_config->config) nalu_size_field = entry->avc_config->config->nal_unit_size; else if (entry->svc_config && entry->svc_config->config) nalu_size_field = entry->svc_config->config->nal_unit_size; else if (entry->mvc_config && entry->mvc_config->config) nalu_size_field = entry->mvc_config->config->nal_unit_size; else if (entry->hevc_config && entry->hevc_config->config) { nalu_size_field = entry->hevc_config->config->nal_unit_size; is_hevc = GF_TRUE; } else if (entry->lhvc_config && entry->lhvc_config->config) { nalu_size_field = entry->lhvc_config->config->nal_unit_size; is_hevc = GF_TRUE; } if (!nalu_size_field) return RAP_NO; if (!mdia->nalu_parser) mdia->nalu_parser = gf_bs_new(sample->data, sample->dataLength, GF_BITSTREAM_READ); else gf_bs_reassign_buffer(mdia->nalu_parser, sample->data, sample->dataLength); if (!mdia->nalu_parser) return RAP_NO; while (gf_bs_available(mdia->nalu_parser)) { u8 nal_type; u32 size = gf_bs_read_int(mdia->nalu_parser, 8*nalu_size_field); if (is_hevc) { #ifndef GPAC_DISABLE_HEVC u16 nal_hdr = gf_bs_read_u16(mdia->nalu_parser); nal_type = (nal_hdr&0x7E00) >> 9; switch (nal_type) { case GF_HEVC_NALU_SLICE_CRA: return SAP_TYPE_3; case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_BLA_N_LP: return SAP_TYPE_1; case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_LP: return SAP_TYPE_2; case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_FILLER_DATA: case GF_HEVC_NALU_SEI_PREFIX: case GF_HEVC_NALU_VID_PARAM: case GF_HEVC_NALU_SEQ_PARAM: case GF_HEVC_NALU_PIC_PARAM: break; default: return RAP_NO; } gf_bs_skip_bytes(mdia->nalu_parser, size - 2); #endif } else { u8 nal_hdr = gf_bs_read_u8(mdia->nalu_parser); nal_type = nal_hdr & 0x1F; if (nal_type==GF_AVC_NALU_IDR_SLICE) return SAP_TYPE_1; if (nal_type<GF_AVC_NALU_IDR_SLICE) return RAP_NO; gf_bs_skip_bytes(mdia->nalu_parser, size - 1); } } return RAP_NO; } static void nalu_merge_ps(GF_BitStream *ps_bs, Bool rewrite_start_codes, u32 nal_unit_size_field, GF_MPEGVisualSampleEntryBox *entry, Bool is_hevc, Bool *has_vps) { u32 i, count; if (is_hevc) { if (entry->hevc_config) { count = gf_list_count(entry->hevc_config->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(entry->hevc_config->config->param_array, i); if (ar->type == GF_HEVC_NALU_VID_PARAM) { if (! *has_vps) *has_vps = GF_TRUE; else continue; } rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field); } } if (entry->lhvc_config) { count = gf_list_count(entry->lhvc_config->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(entry->lhvc_config->config->param_array, i); if (ar->type == GF_HEVC_NALU_VID_PARAM) { if (! *has_vps) *has_vps = GF_TRUE; else continue; } rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field); } } } else { if (entry->avc_config) { rewrite_nalus_list(entry->avc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->avc_config->config->sequenceParameterSetExtensions, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->avc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); } /*add svc config */ if (entry->svc_config) { rewrite_nalus_list(entry->svc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->svc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); } /*add mvc config */ if (entry->mvc_config) { rewrite_nalus_list(entry->mvc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->mvc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); } } } GF_Err gf_isom_nalu_sample_rewrite(GF_MediaBox *mdia, GF_ISOSample *sample, u32 sampleNumber, GF_MPEGVisualSampleEntryBox *entry) { Bool is_hevc = GF_FALSE; //if only one sync given in the sample sync table, insert sps/pps/vps before cra/bla in hevc // Bool check_cra_bla = (mdia->information->sampleTable->SyncSample && mdia->information->sampleTable->SyncSample->nb_entries>1) ? 0 : 1; Bool check_cra_bla = GF_TRUE; Bool insert_nalu_delim = GF_TRUE; Bool force_sei_inspect = GF_FALSE; GF_Err e = GF_OK; GF_BitStream *sei_suffix_bs = NULL; Bool ps_transfered = GF_FALSE; u32 nal_size, nal_unit_size_field, extractor_mode; Bool rewrite_ps, rewrite_start_codes, insert_vdrd_code; u8 nal_type; u32 nal_hdr, sabt_ref, i, track_num; u32 temporal_id = 0; GF_ISOFile *file = mdia->mediaTrack->moov->mov; GF_TrackReferenceTypeBox *scal = NULL; Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &scal); rewrite_ps = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG) ? GF_TRUE : GF_FALSE; rewrite_start_codes = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG) ? GF_TRUE : GF_FALSE; insert_vdrd_code = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_VDRD_FLAG) ? GF_TRUE : GF_FALSE; if (!entry->svc_config && !entry->mvc_config && !entry->lhvc_config) insert_vdrd_code = GF_FALSE; extractor_mode = mdia->mediaTrack->extractor_mode&0x0000FFFF; if (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_TILE_ONLY) { insert_nalu_delim = GF_FALSE; } track_num = 1 + gf_list_find(mdia->mediaTrack->moov->trackList, mdia->mediaTrack); if ( (extractor_mode != GF_ISOM_NALU_EXTRACT_INSPECT) && !(mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_TILE_ONLY) ) { u32 ref_track, di; //aggregate all sabt samples with the same DTS if (entry->lhvc_config && !entry->hevc_config && !(mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_LAYER_ONLY)) { if (gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_SCAL) <= 0) { //FIXME - for now we only support two layers (base + enh) in implicit if ( gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_BASE) >= 1) { GF_ISOSample *base_samp; gf_isom_get_reference(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_BASE, 1, &ref_track); switch (gf_isom_get_media_subtype(mdia->mediaTrack->moov->mov , ref_track, 1)) { case GF_ISOM_SUBTYPE_HVC1: case GF_ISOM_SUBTYPE_HVC2: case GF_ISOM_SUBTYPE_HEV1: case GF_ISOM_SUBTYPE_HEV2: if (!mdia->extracted_samp) { mdia->extracted_samp = gf_isom_sample_new(); if (!mdia->extracted_samp) return GF_OUT_OF_MEM; } base_samp = gf_isom_get_sample_ex(mdia->mediaTrack->moov->mov, ref_track, sampleNumber + mdia->mediaTrack->sample_count_at_seg_start, &di, mdia->extracted_samp, NULL); if (base_samp && base_samp->data) { if (!sample->alloc_size || (sample->alloc_size<sample->dataLength+base_samp->dataLength) ) { sample->data = gf_realloc(sample->data, sample->dataLength+base_samp->dataLength); if (sample->alloc_size) sample->alloc_size = sample->dataLength+base_samp->dataLength; } memmove(sample->data + base_samp->dataLength, sample->data , sample->dataLength); memcpy(sample->data, base_samp->data, base_samp->dataLength); sample->dataLength += base_samp->dataLength; } Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_BASE, &scal); break; } } } } sabt_ref = gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_SABT); if ((s32) sabt_ref > 0) { force_sei_inspect = GF_TRUE; for (i=0; i<sabt_ref; i++) { GF_ISOSample *tile_samp; gf_isom_get_reference(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_SABT, i+1, &ref_track); if (!mdia->extracted_samp) { mdia->extracted_samp = gf_isom_sample_new(); if (!mdia->extracted_samp) return GF_OUT_OF_MEM; } tile_samp = gf_isom_get_sample_ex(mdia->mediaTrack->moov->mov, ref_track, sampleNumber + mdia->mediaTrack->sample_count_at_seg_start, &di, mdia->extracted_samp, NULL); if (tile_samp && tile_samp ->data) { if (!sample->alloc_size || (sample->alloc_size<sample->dataLength+tile_samp->dataLength) ) { sample->data = gf_realloc(sample->data, sample->dataLength+tile_samp->dataLength); if (sample->alloc_size) sample->alloc_size = sample->dataLength+tile_samp->dataLength; } memcpy(sample->data + sample->dataLength, tile_samp->data, tile_samp->dataLength); sample->dataLength += tile_samp->dataLength; } } } } if ( gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_TBAS) >= 1) { u32 ref_track; u32 idx = gf_list_find(mdia->information->sampleTable->SampleDescription->child_boxes, entry); GF_TrackBox *tbas; gf_isom_get_reference(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_TBAS, 1, &ref_track); tbas = (GF_TrackBox *)gf_list_get(mdia->mediaTrack->moov->trackList, ref_track-1); entry = gf_list_get(tbas->Media->information->sampleTable->SampleDescription->child_boxes, idx); } if (sample->IsRAP < SAP_TYPE_2) { if (mdia->information->sampleTable->no_sync_found || (!sample->IsRAP && check_cra_bla) ) { sample->IsRAP = is_sample_idr(mdia, sample, entry); } } if (!sample->IsRAP) rewrite_ps = GF_FALSE; if (extractor_mode != GF_ISOM_NALU_EXTRACT_LAYER_ONLY) insert_vdrd_code = GF_FALSE; if (!entry) return GF_BAD_PARAM; //this is a compatible HEVC, don't insert VDRD, insert NALU delim if (entry->lhvc_config && entry->hevc_config) insert_vdrd_code = GF_FALSE; if (extractor_mode == GF_ISOM_NALU_EXTRACT_INSPECT) { if (!rewrite_ps && !rewrite_start_codes) return GF_OK; } nal_unit_size_field = 0; /*if svc rewrite*/ if (entry->svc_config && entry->svc_config->config) nal_unit_size_field = entry->svc_config->config->nal_unit_size; /*if mvc rewrite*/ if (entry->mvc_config && entry->mvc_config->config) nal_unit_size_field = entry->mvc_config->config->nal_unit_size; /*if lhvc rewrite*/ else if (entry->lhvc_config && entry->lhvc_config->config) { is_hevc = GF_TRUE; nal_unit_size_field = entry->lhvc_config->config->nal_unit_size; } /*otherwise do nothing*/ else if (!rewrite_ps && !rewrite_start_codes && !scal && !force_sei_inspect) { return GF_OK; } if (!nal_unit_size_field) { if (entry->avc_config && entry->avc_config->config) nal_unit_size_field = entry->avc_config->config->nal_unit_size; else if (entry->lhvc_config && entry->lhvc_config->config) { nal_unit_size_field = entry->lhvc_config->config->nal_unit_size; is_hevc = GF_TRUE; } else if (entry->hevc_config && entry->hevc_config->config) { nal_unit_size_field = entry->hevc_config->config->nal_unit_size; is_hevc = GF_TRUE; } } if (!nal_unit_size_field) return GF_ISOM_INVALID_FILE; //setup PS rewriter if (!mdia->nalu_ps_bs) mdia->nalu_ps_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(mdia->nalu_ps_bs, 0); //setup sample reader if (mdia->in_sample_buffer_alloc<sample->dataLength) { mdia->in_sample_buffer_alloc = sample->dataLength; mdia->in_sample_buffer = gf_realloc(mdia->in_sample_buffer, sample->dataLength); } memcpy(mdia->in_sample_buffer, sample->data, sample->dataLength); if (!mdia->nalu_parser) { mdia->nalu_parser = gf_bs_new(mdia->in_sample_buffer, sample->dataLength, GF_BITSTREAM_READ); if (!mdia->nalu_parser && sample->data) return GF_ISOM_INVALID_FILE; } else { e = gf_bs_reassign_buffer(mdia->nalu_parser, mdia->in_sample_buffer, sample->dataLength); if (e) return e; } //setup ouput if (!mdia->nalu_out_bs) { u8 *output; u32 outSize; mdia->nalu_out_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_get_content(mdia->nalu_out_bs, &output, &outSize); } gf_bs_reassign_buffer(mdia->nalu_out_bs, sample->data, sample->alloc_size ? sample->alloc_size : sample->dataLength); /*rewrite start code with NALU delim*/ if (rewrite_start_codes) { //we are SVC, don't write NALU delim, only insert VDRD NALU if (insert_vdrd_code) { if (is_hevc) { //spec is not clear here, we don't insert an NALU AU delimiter before the layer starts since it breaks openHEVC // insert_nalu_delim=0; } else { gf_bs_write_int(mdia->nalu_out_bs, 1, 32); gf_bs_write_int(mdia->nalu_out_bs, GF_AVC_NALU_VDRD , 8); insert_nalu_delim=0; } } //AVC/HEVC base, insert NALU delim if (insert_nalu_delim) { gf_bs_write_int(mdia->nalu_out_bs, 1, 32); if (is_hevc) { #ifndef GPAC_DISABLE_HEVC gf_bs_write_int(mdia->nalu_out_bs, 0, 1); gf_bs_write_int(mdia->nalu_out_bs, GF_HEVC_NALU_ACCESS_UNIT, 6); gf_bs_write_int(mdia->nalu_out_bs, insert_vdrd_code ? 1 : 0, 6); //we should pick the layerID of the following nalus ... gf_bs_write_int(mdia->nalu_out_bs, 1, 3); //nuh_temporal_id_plus1 - cannot be 0, we use 1 by default, and overwrite it if needed at the end /*pic-type - by default we signal all slice types possible*/ gf_bs_write_int(mdia->nalu_out_bs, 2, 3); gf_bs_write_int(mdia->nalu_out_bs, 0, 5); #endif } else { gf_bs_write_int(mdia->nalu_out_bs, (sample->data[0] & 0x60) | GF_AVC_NALU_ACCESS_UNIT, 8); gf_bs_write_int(mdia->nalu_out_bs, 0xF0 , 8); /*7 "all supported NALUs" (=111) + rbsp trailing (10000)*/; } } } if (rewrite_ps) { Bool has_vps = GF_FALSE; //in inspect mode or single-layer mode just use the xPS from this layer if (extractor_mode == GF_ISOM_NALU_EXTRACT_DEFAULT) { if (scal) { for (i=0; i<scal->trackIDCount; i++) { GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]); GF_MPEGVisualSampleEntryBox *an_entry = NULL; if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription) an_entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(a_track->Media->information->sampleTable->SampleDescription->child_boxes, 0); if (an_entry) nalu_merge_ps(mdia->nalu_ps_bs, rewrite_start_codes, nal_unit_size_field, an_entry, is_hevc, &has_vps); } } } nalu_merge_ps(mdia->nalu_ps_bs, rewrite_start_codes, nal_unit_size_field, entry, is_hevc, &has_vps); if (is_hevc) { /*little optimization if we are not asked to start codes: copy over the sample*/ if (!rewrite_start_codes && !entry->lhvc_config && !scal) { if (! ps_transfered) { nal_type = (sample->data[nal_unit_size_field] & 0x7E) >> 1; //temp fix - if we detect xPS in the beginning of the sample do NOT copy the ps bitstream //this is not correct since we are not sure whether they are the same xPS or not, but it crashes openHEVC ... switch (nal_type) { #ifndef GPAC_DISABLE_HEVC case GF_HEVC_NALU_VID_PARAM: case GF_HEVC_NALU_SEQ_PARAM: case GF_HEVC_NALU_PIC_PARAM: break; #endif default: gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); break; } } gf_bs_write_data(mdia->nalu_out_bs, mdia->in_sample_buffer, sample->dataLength); gf_bs_get_content_no_truncate(mdia->nalu_out_bs, &sample->data, &sample->dataLength, &sample->alloc_size); return GF_OK; } } } else { ps_transfered = GF_TRUE; } /*little optimization if we are not asked to rewrite extractors or start codes: copy over the sample*/ if (!scal && !rewrite_start_codes && !rewrite_ps && !force_sei_inspect) { if (! ps_transfered) { gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); } gf_bs_write_data(mdia->nalu_out_bs, mdia->in_sample_buffer, sample->dataLength); gf_bs_get_content_no_truncate(mdia->nalu_out_bs, &sample->data, &sample->dataLength, &sample->alloc_size); return GF_OK; } if (!mdia->tmp_nal_copy_buffer) { mdia->tmp_nal_copy_buffer = gf_malloc(sizeof(char) * 4096); mdia->tmp_nal_copy_buffer_alloc = 4096; } while (gf_bs_available(mdia->nalu_parser)) { nal_size = gf_bs_read_int(mdia->nalu_parser, 8*nal_unit_size_field); if (gf_bs_get_position(mdia->nalu_parser) + nal_size > sample->dataLength) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Sample %u (size %u) rewrite: corrupted NAL Unit (size %u)\n", sampleNumber, sample->dataLength, nal_size)); goto exit; } if (nal_size > mdia->tmp_nal_copy_buffer_alloc) { mdia->tmp_nal_copy_buffer_alloc = nal_size; mdia->tmp_nal_copy_buffer = (char*) gf_realloc(mdia->tmp_nal_copy_buffer, sizeof(char)*nal_size); } if (is_hevc) { nal_hdr = gf_bs_read_u16(mdia->nalu_parser); nal_type = (nal_hdr&0x7E00) >> 9; } else { nal_hdr = gf_bs_read_u8(mdia->nalu_parser); nal_type = nal_hdr & 0x1F; } if (is_hevc) { #ifndef GPAC_DISABLE_HEVC GF_BitStream *write_to_bs = mdia->nalu_out_bs; #endif if (!ps_transfered) { gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); ps_transfered = GF_TRUE; } #ifndef GPAC_DISABLE_HEVC /*we already wrote this stuff*/ if (nal_type==GF_HEVC_NALU_ACCESS_UNIT) { gf_bs_skip_bytes(mdia->nalu_parser, nal_size-2); continue; } switch (nal_type) { //extractor case 49: e = process_extractor(file, mdia, sampleNumber, sample->DTS, nal_size, nal_hdr, nal_unit_size_field, GF_TRUE, rewrite_ps, rewrite_start_codes, extractor_mode); if (e) goto exit; break; case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_R: if (temporal_id < (nal_hdr & 0x7)) temporal_id = (nal_hdr & 0x7); /*rewrite nal*/ gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size-2); if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u16(mdia->nalu_out_bs, nal_hdr); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, nal_size-2); break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: //insert xPS before CRA/BLA if (check_cra_bla && !sample->IsRAP) { sample->IsRAP = sap_type_from_nal_type(nal_type); if (sei_suffix_bs) gf_bs_del(sei_suffix_bs); return gf_isom_nalu_sample_rewrite(mdia, sample, sampleNumber, entry); } default: /*rewrite nal*/ if (nal_size<2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid nal size %d in sample %d\n", nal_type, sampleNumber)); e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size-2); if (nal_type==GF_HEVC_NALU_SEI_SUFFIX) { if (!sei_suffix_bs) sei_suffix_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); write_to_bs = sei_suffix_bs; } if (rewrite_start_codes) gf_bs_write_u32(write_to_bs, 1); else gf_bs_write_int(write_to_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u16(write_to_bs, nal_hdr); gf_bs_write_data(write_to_bs, mdia->tmp_nal_copy_buffer, nal_size-2); } #endif //done with HEVC continue; } switch(nal_type) { case GF_AVC_NALU_ACCESS_UNIT: /*we already wrote this stuff*/ gf_bs_skip_bytes(mdia->nalu_parser, nal_size-1); continue; //extractor case 31: e = process_extractor(file, mdia, sampleNumber, sample->DTS, nal_size, nal_hdr, nal_unit_size_field, GF_FALSE, rewrite_ps, rewrite_start_codes, extractor_mode); if (e) goto exit; break; // case GF_AVC_NALU_SEI: case GF_AVC_NALU_SEQ_PARAM: case GF_AVC_NALU_PIC_PARAM: case GF_AVC_NALU_SEQ_PARAM_EXT: case GF_AVC_NALU_SVC_SUBSEQ_PARAM: // we will rewrite the sps/pps if and only if there is no sps/pps in bistream if (!ps_transfered) { ps_transfered = GF_TRUE; } default: if (!ps_transfered) { gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); ps_transfered = GF_TRUE; } gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size-1); if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u8(mdia->nalu_out_bs, nal_hdr); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, nal_size-1); } } if (sei_suffix_bs) { gf_bs_transfer(mdia->nalu_out_bs, sei_suffix_bs, GF_FALSE); } /*done*/ gf_bs_get_content_no_truncate(mdia->nalu_out_bs, &sample->data, &sample->dataLength, &sample->alloc_size); /*rewrite temporal ID of AU Ddelim NALU (first one)*/ if (rewrite_start_codes && is_hevc && temporal_id) { sample->data[6] = (sample->data[6] & 0xF8) | (temporal_id+1); } exit: if (sei_suffix_bs) gf_bs_del(sei_suffix_bs); return e; } GF_HEVCConfig *HEVC_DuplicateConfig(GF_HEVCConfig *cfg) { u8 *data; u32 data_size; GF_HEVCConfig *new_cfg; GF_BitStream *bs; if (!cfg) return NULL; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_odf_hevc_cfg_write_bs(cfg, bs); gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); new_cfg = gf_odf_hevc_cfg_read_bs(bs, cfg->is_lhvc); new_cfg->is_lhvc = cfg->is_lhvc; gf_bs_del(bs); gf_free(data); return new_cfg; } GF_VVCConfig *VVC_DuplicateConfig(GF_VVCConfig *cfg) { u8 *data; u32 data_size; GF_VVCConfig *new_cfg; GF_BitStream *bs; if (!cfg) return NULL; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_odf_vvc_cfg_write_bs(cfg, bs); gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); new_cfg = gf_odf_vvc_cfg_read_bs(bs); gf_bs_del(bs); gf_free(data); return new_cfg; } static GF_AVCConfig *AVC_DuplicateConfig(GF_AVCConfig *cfg) { u32 i, count; GF_NALUFFParam *p1, *p2; GF_AVCConfig *cfg_new; if (!cfg) return NULL; cfg_new = gf_odf_avc_cfg_new(); cfg_new->AVCLevelIndication = cfg->AVCLevelIndication; cfg_new->AVCProfileIndication = cfg->AVCProfileIndication; cfg_new->configurationVersion = cfg->configurationVersion; cfg_new->nal_unit_size = cfg->nal_unit_size; cfg_new->profile_compatibility = cfg->profile_compatibility; cfg_new->complete_representation = cfg->complete_representation; cfg_new->chroma_bit_depth = cfg->chroma_bit_depth; cfg_new->luma_bit_depth = cfg->luma_bit_depth; cfg_new->chroma_format = cfg->chroma_format; count = gf_list_count(cfg->sequenceParameterSets); for (i=0; i<count; i++) { p1 = (GF_NALUFFParam*)gf_list_get(cfg->sequenceParameterSets, i); p2 = (GF_NALUFFParam*)gf_malloc(sizeof(GF_NALUFFParam)); p2->size = p1->size; p2->id = p1->id; p2->data = (char *)gf_malloc(sizeof(char)*p1->size); memcpy(p2->data, p1->data, sizeof(char)*p1->size); gf_list_add(cfg_new->sequenceParameterSets, p2); } count = gf_list_count(cfg->pictureParameterSets); for (i=0; i<count; i++) { p1 = (GF_NALUFFParam*)gf_list_get(cfg->pictureParameterSets, i); p2 = (GF_NALUFFParam*)gf_malloc(sizeof(GF_NALUFFParam)); p2->size = p1->size; p2->id = p1->id; p2->data = (char*)gf_malloc(sizeof(char)*p1->size); memcpy(p2->data, p1->data, sizeof(char)*p1->size); gf_list_add(cfg_new->pictureParameterSets, p2); } if (cfg->sequenceParameterSetExtensions) { cfg_new->sequenceParameterSetExtensions = gf_list_new(); count = gf_list_count(cfg->sequenceParameterSetExtensions); for (i=0; i<count; i++) { p1 = (GF_NALUFFParam*)gf_list_get(cfg->sequenceParameterSetExtensions, i); p2 = (GF_NALUFFParam*)gf_malloc(sizeof(GF_NALUFFParam)); p2->size = p1->size; p2->id = p1->id; p2->data = (char*)gf_malloc(sizeof(char)*p1->size); memcpy(p2->data, p1->data, sizeof(char)*p1->size); gf_list_add(cfg_new->sequenceParameterSetExtensions, p2); } } return cfg_new; } static void merge_avc_config(GF_AVCConfig *dst_cfg, GF_AVCConfig *src_cfg) { GF_AVCConfig *cfg; if (!src_cfg || !dst_cfg) return; cfg = AVC_DuplicateConfig(src_cfg); if (!cfg) return; while (gf_list_count(cfg->sequenceParameterSets)) { GF_NALUFFParam *p = (GF_NALUFFParam*)gf_list_get(cfg->sequenceParameterSets, 0); gf_list_rem(cfg->sequenceParameterSets, 0); gf_list_insert(dst_cfg->sequenceParameterSets, p, 0); } while (gf_list_count(cfg->pictureParameterSets)) { GF_NALUFFParam *p = (GF_NALUFFParam*)gf_list_get(cfg->pictureParameterSets, 0); gf_list_rem(cfg->pictureParameterSets, 0); gf_list_insert(dst_cfg->pictureParameterSets, p, 0); } gf_odf_avc_cfg_del(cfg); } void merge_hevc_config(GF_HEVCConfig *dst_cfg, GF_HEVCConfig *src_cfg, Bool force_insert) { GF_HEVCConfig *cfg = HEVC_DuplicateConfig(src_cfg); //merge all xPS u32 i, j, count = cfg->param_array ? gf_list_count(cfg->param_array) : 0; for (i=0; i<count; i++) { GF_NALUFFParamArray *ar_h = NULL; u32 count2 = dst_cfg->param_array ? gf_list_count(dst_cfg->param_array) : 0; GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(cfg->param_array, i); for (j=0; j<count2; j++) { ar_h = (GF_NALUFFParamArray*)gf_list_get(dst_cfg->param_array, j); if (ar_h->type==ar->type) { break; } ar_h = NULL; } if (!ar_h) { gf_list_add(dst_cfg->param_array, ar); gf_list_rem(cfg->param_array, i); count--; i--; } else { while (gf_list_count(ar->nalus)) { GF_NALUFFParam *p = (GF_NALUFFParam*)gf_list_get(ar->nalus, 0); gf_list_rem(ar->nalus, 0); if (force_insert) gf_list_insert(ar_h->nalus, p, 0); else gf_list_add(ar_h->nalus, p); } } } gf_odf_hevc_cfg_del(cfg); #define CHECK_CODE(__code) if (dst_cfg->__code < src_cfg->__code) dst_cfg->__code = src_cfg->__code; CHECK_CODE(configurationVersion) CHECK_CODE(profile_idc) CHECK_CODE(profile_space) CHECK_CODE(tier_flag) CHECK_CODE(general_profile_compatibility_flags) CHECK_CODE(progressive_source_flag) CHECK_CODE(interlaced_source_flag) CHECK_CODE(constraint_indicator_flags) CHECK_CODE(level_idc) CHECK_CODE(min_spatial_segmentation_idc) } void merge_all_config(GF_AVCConfig *avc_cfg, GF_HEVCConfig *hevc_cfg, GF_MediaBox *mdia) { u32 i; GF_TrackReferenceTypeBox *scal = NULL; Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &scal); if (!scal) return; for (i=0; i<scal->trackIDCount; i++) { GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]); GF_MPEGVisualSampleEntryBox *an_entry = NULL; if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription) an_entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(a_track->Media->information->sampleTable->SampleDescription->child_boxes, 0); if (!an_entry) continue; if (avc_cfg && an_entry->svc_config && an_entry->svc_config->config) merge_avc_config(avc_cfg, an_entry->svc_config->config); if (avc_cfg && an_entry->mvc_config && an_entry->mvc_config->config) merge_avc_config(avc_cfg, an_entry->mvc_config->config); if (avc_cfg && an_entry->avc_config && an_entry->avc_config->config) merge_avc_config(avc_cfg, an_entry->avc_config->config); if (hevc_cfg && an_entry->lhvc_config && an_entry->lhvc_config->config) merge_hevc_config(hevc_cfg, an_entry->lhvc_config->config, GF_TRUE); if (hevc_cfg && an_entry->hevc_config && an_entry->hevc_config->config) merge_hevc_config(hevc_cfg, an_entry->hevc_config->config, GF_TRUE); } if (hevc_cfg) hevc_cfg->is_lhvc = GF_FALSE; } void AVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *avc, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)avc, GF_FALSE); if (avc->emul_esd) gf_odf_desc_del((GF_Descriptor *)avc->emul_esd); avc->emul_esd = gf_odf_desc_esd_new(2); avc->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; /*AVC OTI is 0x21, AVC parameter set stream OTI (not supported in gpac) is 0x22, SVC OTI is 0x24*/ /*if we have only SVC stream, set objectTypeIndication to AVC OTI; else set it to AVC OTI*/ if (avc->svc_config && !avc->avc_config) avc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_SVC; else if (avc->mvc_config && !avc->avc_config) avc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_MVC; else avc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AVC; if (btrt) { avc->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; avc->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; avc->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } GF_MPEG4ExtensionDescriptorsBox *mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_find_child(avc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (mdesc) { u32 i=0; GF_Descriptor *desc,*clone; i=0; while ((desc = (GF_Descriptor *)gf_list_enum(mdesc->descriptors, &i))) { clone = NULL; gf_odf_desc_copy(desc, &clone); if (gf_odf_desc_add_desc((GF_Descriptor *)avc->emul_esd, clone) != GF_OK) gf_odf_desc_del(clone); } } if (avc->avc_config) { GF_AVCConfig *avcc = avc->avc_config->config ? AVC_DuplicateConfig(avc->avc_config->config) : NULL; /*merge SVC config*/ if (avc->svc_config) { merge_avc_config(avcc, avc->svc_config->config); } /*merge MVC config*/ if (avc->mvc_config) { merge_avc_config(avcc, avc->mvc_config->config); } if (avcc) { if (mdia) merge_all_config(avcc, NULL, mdia); gf_odf_avc_cfg_write(avcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(avcc); } } else if (avc->svc_config) { GF_AVCConfig *svcc = AVC_DuplicateConfig(avc->svc_config->config); if (mdia) merge_all_config(svcc, NULL, mdia); gf_odf_avc_cfg_write(svcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(svcc); } else if (avc->mvc_config) { GF_AVCConfig *mvcc = AVC_DuplicateConfig(avc->mvc_config->config); if (mdia) merge_all_config(mvcc, NULL, mdia); gf_odf_avc_cfg_write(mvcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(mvcc); } } void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc) { AVC_RewriteESDescriptorEx(avc, NULL); } void HEVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *hevc, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)hevc, GF_FALSE); if (hevc->emul_esd) gf_odf_desc_del((GF_Descriptor *)hevc->emul_esd); hevc->emul_esd = gf_odf_desc_esd_new(2); hevc->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; hevc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_HEVC; if (btrt) { hevc->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; hevc->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; hevc->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } GF_MPEG4ExtensionDescriptorsBox *mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_find_child(hevc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (mdesc) { u32 i=0; GF_Descriptor *desc,*clone; i=0; while ((desc = (GF_Descriptor *)gf_list_enum(mdesc->descriptors, &i))) { clone = NULL; gf_odf_desc_copy(desc, &clone); if (gf_odf_desc_add_desc((GF_Descriptor *)hevc->emul_esd, clone) != GF_OK) gf_odf_desc_del(clone); } } if (hevc->hevc_config || hevc->lhvc_config) { GF_HEVCConfig *hcfg = HEVC_DuplicateConfig(hevc->hevc_config ? hevc->hevc_config->config : hevc->lhvc_config->config); if (hevc->hevc_config && hevc->lhvc_config) { //merge LHVC config to HEVC conf, so we add entry rather than insert merge_hevc_config(hcfg, hevc->lhvc_config->config, GF_FALSE); } if (mdia) merge_all_config(NULL, hcfg, mdia); if (hcfg) { if (mdia && ((mdia->mediaTrack->extractor_mode&0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT)) { hcfg->is_lhvc=GF_FALSE; } gf_odf_hevc_cfg_write(hcfg, &hevc->emul_esd->decoderConfig->decoderSpecificInfo->data, &hevc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_hevc_cfg_del(hcfg); } } } void HEVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *hevc) { HEVC_RewriteESDescriptorEx(hevc, NULL); } GF_Err AVC_HEVC_UpdateESD(GF_MPEGVisualSampleEntryBox *avc, GF_ESD *esd) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)avc, GF_TRUE); GF_MPEG4ExtensionDescriptorsBox *mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_find_child(avc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (mdesc) { gf_isom_box_del_parent(&avc->child_boxes, (GF_Box *) mdesc); } btrt->avgBitrate = esd->decoderConfig->avgBitrate; btrt->maxBitrate = esd->decoderConfig->maxBitrate; btrt->bufferSizeDB = esd->decoderConfig->bufferSizeDB; if (gf_list_count(esd->IPIDataSet) || gf_list_count(esd->IPMPDescriptorPointers) || esd->langDesc || gf_list_count(esd->extensionDescriptors) || esd->ipiPtr || esd->qos || esd->RegDescriptor) { mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (!mdesc) return GF_OUT_OF_MEM; if (esd->RegDescriptor) { gf_list_add(mdesc->descriptors, esd->RegDescriptor); esd->RegDescriptor = NULL; } if (esd->qos) { gf_list_add(mdesc->descriptors, esd->qos); esd->qos = NULL; } if (esd->ipiPtr) { gf_list_add(mdesc->descriptors, esd->ipiPtr); esd->ipiPtr= NULL; } while (gf_list_count(esd->IPIDataSet)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->IPIDataSet, 0); gf_list_rem(esd->IPIDataSet, 0); gf_list_add(mdesc->descriptors, desc); } while (gf_list_count(esd->IPMPDescriptorPointers)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->IPMPDescriptorPointers, 0); gf_list_rem(esd->IPMPDescriptorPointers, 0); gf_list_add(mdesc->descriptors, desc); } if (esd->langDesc) { gf_list_add(mdesc->descriptors, esd->langDesc); esd->langDesc = NULL; } while (gf_list_count(esd->extensionDescriptors)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->extensionDescriptors, 0); gf_list_rem(esd->extensionDescriptors, 0); gf_list_add(mdesc->descriptors, desc); } } if (!avc->lhvc_config && (esd->decoderConfig->objectTypeIndication==GF_CODECID_HEVC)) { if (!avc->hevc_config) { avc->hevc_config = (GF_HEVCConfigurationBox *)gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!avc->hevc_config) return GF_OUT_OF_MEM; } if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { if (avc->hevc_config->config) gf_odf_hevc_cfg_del(avc->hevc_config->config); avc->hevc_config->config = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, GF_FALSE); } } else if (!avc->svc_config && !avc->mvc_config && (esd->decoderConfig->objectTypeIndication==GF_CODECID_AVC)) { if (!avc->avc_config) { avc->avc_config = (GF_AVCConfigurationBox *)gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_AVCC); if (!avc->avc_config) return GF_OUT_OF_MEM; } if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { if (avc->avc_config->config) gf_odf_avc_cfg_del(avc->avc_config->config); avc->avc_config->config = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength); } } gf_odf_desc_del((GF_Descriptor *)esd); if (avc->hevc_config) { HEVC_RewriteESDescriptor(avc); } else { AVC_RewriteESDescriptor(avc); } return GF_OK; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) void gf_hevc_parse_ps(GF_HEVCConfig* hevccfg, HEVCState* hevc, u32 nal_type) { u32 i, j; if (!hevccfg) return; for (i = 0; i < gf_list_count(hevccfg->param_array); i++) { GF_NALUFFParamArray* ar = gf_list_get(hevccfg->param_array, i); if (ar->type != nal_type) continue; for (j = 0; j < gf_list_count(ar->nalus); j++) { u8 ntype, tid, lid; GF_NALUFFParam* sl = gf_list_get(ar->nalus, j); gf_hevc_parse_nalu(sl->data, sl->size, hevc, &ntype, &tid, &lid); } } } #endif static GF_Err gf_isom_check_mvc(GF_ISOFile *the_file, GF_TrackBox *trak, GF_MPEGVisualSampleEntryBox *entry) { u32 i; GF_Box *mvci; GF_MultiviewGroupBox *mvcg; GF_ViewIdentifierBox *vwid; if (entry->mvc_config) {} else if (entry->avc_config && entry->avc_config->config && entry->avc_config->config->sequenceParameterSetExtensions) {} else return GF_OK; mvci = gf_isom_box_find_child(trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_MVCI); if (!mvci) { mvci = gf_isom_box_new_parent(&trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_MVCI); if (!mvci) return GF_OUT_OF_MEM; } mvcg = (GF_MultiviewGroupBox *) gf_isom_box_find_child(mvci->child_boxes, GF_ISOM_BOX_TYPE_MVCG); if (!mvcg) { mvcg = (GF_MultiviewGroupBox *)gf_isom_box_new_parent(&mvci->child_boxes, GF_ISOM_BOX_TYPE_MVCG); if (!mvcg) return GF_OUT_OF_MEM; } //this is very crude, we should try to parse the bitstream to fill these mvcg->num_entries = 0; if (mvcg->entries) { gf_free(mvcg->entries); mvcg->entries = NULL; } if (entry->avc_config) { if (gf_list_count(entry->avc_config->config->sequenceParameterSets)) mvcg->num_entries += 1; mvcg->num_entries += gf_list_count(entry->avc_config->config->sequenceParameterSetExtensions); } if (entry->mvc_config && entry->mvc_config->config) { mvcg->num_entries += gf_list_count(entry->mvc_config->config->sequenceParameterSets); } mvcg->entries = gf_malloc(sizeof(MVCIEntry)*mvcg->num_entries); if (!mvcg->entries) return GF_OUT_OF_MEM; memset(mvcg->entries, 0, sizeof(MVCIEntry)*mvcg->num_entries); for (i=0; i<mvcg->num_entries; i++) { mvcg->entries[i].entry_type = 2; mvcg->entries[i].output_view_id = i; } vwid = (GF_ViewIdentifierBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_VWID); if (!vwid) { vwid = (GF_ViewIdentifierBox *)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VWID); if (!mvcg) return GF_OUT_OF_MEM; } if (vwid->views) gf_free(vwid->views); vwid->num_views = mvcg->num_entries; vwid->views = gf_malloc(sizeof(ViewIDEntry)*vwid->num_views); if (!vwid->views) return GF_OUT_OF_MEM; memset(vwid->views, 0, sizeof(ViewIDEntry)*vwid->num_views); for (i=0; i<vwid->num_views; i++) { vwid->views[i].base_view_type = i ? 0 : 1; vwid->views[i].view_id = i; vwid->views[i].view_order_index = i; } return GF_OK; } static GF_AV1Config* AV1_DuplicateConfig(GF_AV1Config const * const cfg) { u32 i = 0; GF_AV1Config *out = gf_malloc(sizeof(GF_AV1Config)); out->marker = cfg->marker; out->version = cfg->version; out->seq_profile = cfg->seq_profile; out->seq_level_idx_0 = cfg->seq_level_idx_0; out->seq_tier_0 = cfg->seq_tier_0; out->high_bitdepth = cfg->high_bitdepth; out->twelve_bit = cfg->twelve_bit; out->monochrome = cfg->monochrome; out->chroma_subsampling_x = cfg->chroma_subsampling_x; out->chroma_subsampling_y = cfg->chroma_subsampling_y; out->chroma_sample_position = cfg->chroma_sample_position; out->initial_presentation_delay_present = cfg->initial_presentation_delay_present; out->initial_presentation_delay_minus_one = cfg->initial_presentation_delay_minus_one; out->obu_array = gf_list_new(); for (i = 0; i<gf_list_count(cfg->obu_array); ++i) { GF_AV1_OBUArrayEntry *dst = gf_malloc(sizeof(GF_AV1_OBUArrayEntry)), *src = gf_list_get(cfg->obu_array, i); dst->obu_length = src->obu_length; dst->obu_type = src->obu_type; dst->obu = gf_malloc((size_t)dst->obu_length); memcpy(dst->obu, src->obu, (size_t)src->obu_length); gf_list_add(out->obu_array, dst); } return out; } void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } } void AV1_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *av1) { AV1_RewriteESDescriptorEx(av1, NULL); } static GF_VPConfig* VP_DuplicateConfig(GF_VPConfig const * const cfg) { GF_VPConfig *out = gf_odf_vp_cfg_new(); if (out) { out->profile = cfg->profile; out->level = cfg->level; out->bit_depth = cfg->bit_depth; out->chroma_subsampling = cfg->chroma_subsampling; out->video_fullRange_flag = cfg->video_fullRange_flag; out->colour_primaries = cfg->colour_primaries; out->transfer_characteristics = cfg->transfer_characteristics; out->matrix_coefficients = cfg->matrix_coefficients; } return out; } void VP9_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *vp9, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)vp9, GF_FALSE); if (vp9->emul_esd) gf_odf_desc_del((GF_Descriptor *)vp9->emul_esd); vp9->emul_esd = gf_odf_desc_esd_new(2); vp9->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; if (vp9->type == GF_ISOM_BOX_TYPE_VP08) vp9->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_VP8; else vp9->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_VP9; if (btrt) { vp9->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; vp9->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; vp9->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (vp9->vp_config) { GF_VPConfig *vp9_cfg = VP_DuplicateConfig(vp9->vp_config->config); if (vp9_cfg) { gf_odf_vp_cfg_write(vp9_cfg, &vp9->emul_esd->decoderConfig->decoderSpecificInfo->data, &vp9->emul_esd->decoderConfig->decoderSpecificInfo->dataLength, GF_FALSE); gf_odf_vp_cfg_del(vp9_cfg); } } } void VP9_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *vp9) { VP9_RewriteESDescriptorEx(vp9, NULL); } static GF_DOVIDecoderConfigurationRecord* DOVI_DuplicateConfig(GF_DOVIDecoderConfigurationRecord *cfg) { GF_DOVIDecoderConfigurationRecord* out = NULL; GF_SAFEALLOC(out, GF_DOVIDecoderConfigurationRecord); if (!out) return NULL; out->dv_version_major = cfg->dv_version_major; out->dv_version_minor = cfg->dv_version_minor; out->dv_profile = cfg->dv_profile; out->dv_level = cfg->dv_level; out->rpu_present_flag = cfg->rpu_present_flag; out->el_present_flag = cfg->el_present_flag; out->bl_present_flag = cfg->bl_present_flag; return out; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_EXPORT GF_Err gf_isom_avc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; GF_SampleDescriptionBox *stsd; u32 dataRefIndex; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc && !gf_sys_is_test_mode() ) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_AVC1); if (!entry) return GF_OUT_OF_MEM; *outDescriptionIndex = gf_list_count(stsd->child_boxes); entry->avc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_AVCC); if (!entry->avc_config) return GF_OUT_OF_MEM; entry->avc_config->config = AVC_DuplicateConfig(cfg); if (!entry->avc_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; AVC_RewriteESDescriptor(entry); return e; } static GF_Err gf_isom_avc_config_update_ex(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg, u32 op_type, Bool keep_xps) { GF_TrackBox *trak; GF_Err e; u32 i; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: break; default: return GF_BAD_PARAM; } switch (op_type) { /*AVCC replacement*/ case 0: if (!cfg) return GF_BAD_PARAM; if (!entry->avc_config) { entry->avc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_AVCC); if (!entry->avc_config) return GF_OUT_OF_MEM; } if (entry->avc_config->config) gf_odf_avc_cfg_del(entry->avc_config->config); entry->avc_config->config = AVC_DuplicateConfig(cfg); if (!entry->avc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_AVC1; break; /*SVCC replacement*/ case 1: if (!cfg) return GF_BAD_PARAM; if (!entry->svc_config) { entry->svc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_SVCC); if (!entry->svc_config) return GF_OUT_OF_MEM; } if (entry->svc_config->config) gf_odf_avc_cfg_del(entry->svc_config->config); entry->svc_config->config = AVC_DuplicateConfig(cfg); if (!entry->svc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_AVC1; break; /*SVCC replacement and AVC removal*/ case 2: if (!cfg) return GF_BAD_PARAM; if (entry->avc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->avc_config); entry->avc_config = NULL; } if (!entry->svc_config) { entry->svc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_SVCC); if (!entry->svc_config) return GF_OUT_OF_MEM; } if (entry->svc_config->config) gf_odf_avc_cfg_del(entry->svc_config->config); entry->svc_config->config = AVC_DuplicateConfig(cfg); if (!entry->svc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_SVC1; break; /*AVCC removal and switch to avc3*/ case 3: if (!entry->avc_config || !entry->avc_config->config) return GF_BAD_PARAM; if (!keep_xps) { for (i=0; i<3; i++) { GF_AVCConfigurationBox *a_cfg = entry->avc_config; if (i==1) a_cfg = entry->svc_config; else if (i==2) a_cfg = entry->mvc_config; if (!a_cfg) continue; while (gf_list_count(a_cfg->config->sequenceParameterSets)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(a_cfg->config->sequenceParameterSets, 0); gf_list_rem(a_cfg->config->sequenceParameterSets, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } while (gf_list_count(a_cfg->config->pictureParameterSets)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(a_cfg->config->pictureParameterSets, 0); gf_list_rem(a_cfg->config->pictureParameterSets, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } while (gf_list_count(a_cfg->config->sequenceParameterSetExtensions)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(a_cfg->config->sequenceParameterSetExtensions, 0); gf_list_rem(a_cfg->config->sequenceParameterSetExtensions, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } } } if (entry->type == GF_ISOM_BOX_TYPE_AVC1) entry->type = GF_ISOM_BOX_TYPE_AVC3; else if (entry->type == GF_ISOM_BOX_TYPE_AVC2) entry->type = GF_ISOM_BOX_TYPE_AVC4; break; /*MVCC replacement*/ case 4: if (!cfg) return GF_BAD_PARAM; if (!entry->mvc_config) { entry->mvc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_MVCC); if (!entry->mvc_config) return GF_OUT_OF_MEM; } if (entry->mvc_config->config) gf_odf_avc_cfg_del(entry->mvc_config->config); entry->mvc_config->config = AVC_DuplicateConfig(cfg); if (!entry->mvc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_AVC1; e = gf_isom_check_mvc(the_file, trak, entry); if (e) return e; break; /*MVCC replacement and AVC removal*/ case 5: if (!cfg) return GF_BAD_PARAM; if (entry->avc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->avc_config); entry->avc_config = NULL; } if (!entry->mvc_config) { entry->mvc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_MVCC); if (!entry->mvc_config) return GF_OUT_OF_MEM; } if (entry->mvc_config->config) gf_odf_avc_cfg_del(entry->mvc_config->config); entry->mvc_config->config = AVC_DuplicateConfig(cfg); if (!entry->mvc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_MVC1; e = gf_isom_check_mvc(the_file, trak, entry); if (e) return e; break; } AVC_RewriteESDescriptor(entry); return GF_OK; } GF_EXPORT GF_Err gf_isom_avc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, 3, keep_xps); } GF_EXPORT GF_Err gf_isom_avc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, 0, GF_FALSE); } GF_EXPORT GF_Err gf_isom_svc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg, Bool is_add) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, is_add ? 1 : 2, GF_FALSE); } GF_Err gf_isom_mvc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg, Bool is_add) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, is_add ? 4 : 5, GF_FALSE); } static GF_Err gf_isom_svc_mvc_config_del(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool is_mvc) { GF_TrackBox *trak; GF_Err e; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: break; default: return GF_BAD_PARAM; } if (is_mvc && entry->mvc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->mvc_config); entry->mvc_config = NULL; } else if (!is_mvc && entry->svc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->svc_config); entry->svc_config = NULL; } AVC_RewriteESDescriptor(entry); return GF_OK; } GF_Err gf_isom_svc_config_del(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { return gf_isom_svc_mvc_config_del(the_file, trackNumber, DescriptionIndex, GF_FALSE); } GF_Err gf_isom_mvc_config_del(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { return gf_isom_svc_mvc_config_del(the_file, trackNumber, DescriptionIndex, GF_TRUE); } static GF_Err gf_isom_svc_mvc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, Bool is_mvc, char *URLname, char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, URLname, URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, URLname, URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry if (is_mvc) { entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_MVC1); if (!entry) return GF_OUT_OF_MEM; entry->mvc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_MVCC); if (!entry->mvc_config) return GF_OUT_OF_MEM; entry->mvc_config->config = AVC_DuplicateConfig(cfg); if (!entry->mvc_config->config) return GF_OUT_OF_MEM; } else { entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes,GF_ISOM_BOX_TYPE_SVC1); if (!entry) return GF_OUT_OF_MEM; entry->svc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes,GF_ISOM_BOX_TYPE_SVCC); if (!entry->svc_config) return GF_OUT_OF_MEM; entry->svc_config->config = AVC_DuplicateConfig(cfg); if (!entry->svc_config->config) return GF_OUT_OF_MEM; } entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); AVC_RewriteESDescriptor(entry); return e; } GF_EXPORT GF_Err gf_isom_svc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { return gf_isom_svc_mvc_config_new(the_file, trackNumber, cfg, GF_FALSE, (char *) URLname, (char *) URNname,outDescriptionIndex); } GF_EXPORT GF_Err gf_isom_mvc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { return gf_isom_svc_mvc_config_new(the_file, trackNumber, cfg, GF_TRUE, (char *) URLname, (char *) URNname,outDescriptionIndex); } GF_EXPORT GF_Err gf_isom_hevc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_HEVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_HVC1); if (!entry) return GF_OUT_OF_MEM; entry->hevc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!entry->hevc_config) return GF_OUT_OF_MEM; entry->hevc_config->config = HEVC_DuplicateConfig(cfg); if (!entry->hevc_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); HEVC_RewriteESDescriptor(entry); return e; } GF_EXPORT GF_Err gf_isom_vvc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_VVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_VVC1); if (!entry) return GF_OUT_OF_MEM; entry->vvc_config = (GF_VVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VVCC); if (!entry->vvc_config) return GF_OUT_OF_MEM; entry->vvc_config->config = VVC_DuplicateConfig(cfg); if (!entry->vvc_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); return e; } GF_EXPORT GF_Err gf_isom_vp_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_VPConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex, u32 vpx_type) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *)gf_isom_box_new_parent(&stsd->child_boxes, vpx_type); if (!entry) return GF_OUT_OF_MEM; entry->vp_config = (GF_VPConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VPCC); if (!entry->vp_config) return GF_OUT_OF_MEM; entry->vp_config->config = VP_DuplicateConfig(cfg); if (!entry->vp_config->config) return GF_OUT_OF_MEM; strncpy(entry->compressor_name, "\012VPC Coding", sizeof(entry->compressor_name)-1); entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); return e; } GF_EXPORT GF_Err gf_isom_av1_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AV1Config *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_MPEGVisualSampleEntryBox *entry; GF_SampleDescriptionBox *stsd; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *)gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_AV01); if (!entry) return GF_OUT_OF_MEM; entry->av1_config = (GF_AV1ConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_AV1C); if (!entry->av1_config) return GF_OUT_OF_MEM; entry->av1_config->config = AV1_DuplicateConfig(cfg); if (!entry->av1_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); return e; } typedef enum { GF_ISOM_HVCC_UPDATE = 0, GF_ISOM_HVCC_SET_INBAND, GF_ISOM_HVCC_SET_TILE, GF_ISOM_HVCC_SET_TILE_BASE_TRACK, GF_ISOM_HVCC_SET_LHVC, GF_ISOM_HVCC_SET_LHVC_WITH_BASE, GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD, GF_ISOM_HVCC_SET_HEVC_TILE_BASE, GF_ISOM_LHCC_SET_INBAND } HevcConfigUpdateType; static Bool nalu_cleanup_config(GF_List *param_array, Bool set_inband, Bool keep_xps) { u32 i; Bool array_incomplete = set_inband; if (!param_array) return 0; for (i=0; i<gf_list_count(param_array); i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(param_array, i); /*we want to force inband signaling*/ if (set_inband) { ar->array_completeness = 0; if (keep_xps) { array_incomplete=1; continue; } while (gf_list_count(ar->nalus)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(ar->nalus, 0); gf_list_rem(ar->nalus, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } gf_list_del(ar->nalus); gf_free(ar); ar=NULL; gf_list_rem(param_array, i); i--; continue; } if (ar && !ar->array_completeness) array_incomplete = 1; } return array_incomplete; } static GF_Err gf_isom_hevc_config_update_ex(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, u32 operand_type, Bool keep_xps) { u32 array_incomplete; GF_TrackBox *trak; GF_Err e; GF_MPEGVisualSampleEntryBox *entry; GF_SampleDescriptionBox *stsd; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; stsd = trak->Media->information->sampleTable->SampleDescription; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(stsd->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_HVT1: break; default: return GF_BAD_PARAM; } if (operand_type == GF_ISOM_HVCC_SET_TILE_BASE_TRACK) { if (entry->type==GF_ISOM_BOX_TYPE_HVC1) entry->type = GF_ISOM_BOX_TYPE_HVC2; else if (entry->type==GF_ISOM_BOX_TYPE_HEV1) entry->type = GF_ISOM_BOX_TYPE_HEV2; } else if (operand_type == GF_ISOM_HVCC_SET_TILE) { if (!entry->hevc_config) entry->hevc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!entry->hevc_config) return GF_OUT_OF_MEM; if (entry->hevc_config->config) gf_odf_hevc_cfg_del(entry->hevc_config->config); entry->hevc_config->config = NULL; entry->type = GF_ISOM_BOX_TYPE_HVT1; } else if (operand_type < GF_ISOM_HVCC_SET_LHVC) { if ((operand_type != GF_ISOM_HVCC_SET_INBAND) && !entry->hevc_config) { entry->hevc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!entry->hevc_config) return GF_OUT_OF_MEM; } if (cfg) { if (entry->hevc_config->config) gf_odf_hevc_cfg_del(entry->hevc_config->config); entry->hevc_config->config = HEVC_DuplicateConfig(cfg); } else { operand_type=GF_ISOM_HVCC_SET_INBAND; } array_incomplete = (operand_type==GF_ISOM_HVCC_SET_INBAND) ? 1 : 0; if (entry->hevc_config && nalu_cleanup_config(entry->hevc_config->config ? entry->hevc_config->config->param_array : NULL, (operand_type==GF_ISOM_HVCC_SET_INBAND) ? GF_TRUE:GF_FALSE, keep_xps) ) { array_incomplete=1; } if (entry->lhvc_config && nalu_cleanup_config(entry->lhvc_config->config ? entry->lhvc_config->config->param_array : NULL, (operand_type==GF_ISOM_HVCC_SET_INBAND), keep_xps) ) array_incomplete=1; switch (entry->type) { case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC1: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_HEV1 : GF_ISOM_BOX_TYPE_HVC1; break; case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVC2: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_HEV2 : GF_ISOM_BOX_TYPE_HVC2; break; case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_LHV1: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_LHE1 : GF_ISOM_BOX_TYPE_LHV1; break; } } else { /*SVCC replacement/removal with HEVC base, backward compatible signaling*/ if ((operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD) || (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) || (operand_type==GF_ISOM_HVCC_SET_HEVC_TILE_BASE) ) { if (!entry->hevc_config) return GF_BAD_PARAM; if (!cfg) { if (entry->lhvc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->lhvc_config); entry->lhvc_config = NULL; } if (entry->type==GF_ISOM_BOX_TYPE_LHE1) entry->type = (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) ? GF_ISOM_BOX_TYPE_HEV2 : GF_ISOM_BOX_TYPE_HEV1; else if (entry->type==GF_ISOM_BOX_TYPE_HEV1) entry->type = (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) ? GF_ISOM_BOX_TYPE_HEV2 : GF_ISOM_BOX_TYPE_HEV1; else entry->type = (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) ? GF_ISOM_BOX_TYPE_HVC2 : GF_ISOM_BOX_TYPE_HVC1; } else { if (operand_type != GF_ISOM_HVCC_SET_HEVC_TILE_BASE) { if (!entry->lhvc_config) { entry->lhvc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_LHVC); if (!entry->lhvc_config) return GF_OUT_OF_MEM; } if (entry->lhvc_config->config) gf_odf_hevc_cfg_del(entry->lhvc_config->config); entry->lhvc_config->config = HEVC_DuplicateConfig(cfg); if (!entry->lhvc_config->config) return GF_OUT_OF_MEM; } if (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD) { if (entry->type==GF_ISOM_BOX_TYPE_HEV2) entry->type = GF_ISOM_BOX_TYPE_HEV1; else entry->type = GF_ISOM_BOX_TYPE_HVC1; } else { if (entry->type==GF_ISOM_BOX_TYPE_HEV1) entry->type = GF_ISOM_BOX_TYPE_HEV2; else entry->type = GF_ISOM_BOX_TYPE_HVC2; } } } /*LHEVC track without base*/ else if (operand_type==GF_ISOM_HVCC_SET_LHVC) { if (entry->hevc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->hevc_config); entry->hevc_config=NULL; } if (!cfg) return GF_BAD_PARAM; if (!entry->lhvc_config) { entry->lhvc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_LHVC); if (!entry->lhvc_config) return GF_OUT_OF_MEM; } if (entry->lhvc_config->config) gf_odf_hevc_cfg_del(entry->lhvc_config->config); entry->lhvc_config->config = HEVC_DuplicateConfig(cfg); if (!entry->lhvc_config->config) return GF_OUT_OF_MEM; if ((entry->type==GF_ISOM_BOX_TYPE_HEV1) || (entry->type==GF_ISOM_BOX_TYPE_HEV2)) entry->type = GF_ISOM_BOX_TYPE_LHE1; else entry->type = GF_ISOM_BOX_TYPE_LHV1; } /*LHEVC inband, no config change*/ else if (operand_type==GF_ISOM_LHCC_SET_INBAND) { entry->type = GF_ISOM_BOX_TYPE_LHE1; } } HEVC_RewriteESDescriptor(entry); return GF_OK; } GF_EXPORT GF_Err gf_isom_hevc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_UPDATE, GF_FALSE); } GF_EXPORT GF_Err gf_isom_hevc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_HVCC_SET_INBAND, keep_xps); } GF_EXPORT GF_Err gf_isom_lhvc_force_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_LHCC_SET_INBAND, GF_FALSE); } GF_EXPORT GF_Err gf_isom_hevc_set_tile_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, Bool is_base_track) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, is_base_track ? GF_ISOM_HVCC_SET_TILE_BASE_TRACK : GF_ISOM_HVCC_SET_TILE, GF_FALSE); } GF_EXPORT GF_Err gf_isom_lhvc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, GF_ISOMLHEVCTrackType track_type) { if (cfg) cfg->is_lhvc = GF_TRUE; switch (track_type) { case GF_ISOM_LEHVC_ONLY: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC, GF_FALSE); case GF_ISOM_LEHVC_WITH_BASE: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC_WITH_BASE, GF_FALSE); case GF_ISOM_LEHVC_WITH_BASE_BACKWARD: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD, GF_FALSE); case GF_ISOM_HEVC_TILE_BASE: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_HEVC_TILE_BASE, GF_FALSE); default: return GF_BAD_PARAM; } } typedef enum { GF_ISOM_VVCC_UPDATE = 0, GF_ISOM_VVCC_SET_INBAND, } VvcConfigUpdateType; static GF_Err gf_isom_vvc_config_update_ex(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_VVCConfig *cfg, u32 operand_type, Bool keep_xps) { u32 array_incomplete; GF_TrackBox *trak; GF_Err e; GF_MPEGVisualSampleEntryBox *entry; GF_SampleDescriptionBox *stsd; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; stsd = trak->Media->information->sampleTable->SampleDescription; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(stsd->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: break; default: return GF_BAD_PARAM; } if (operand_type <= GF_ISOM_VVCC_SET_INBAND) { if ((operand_type != GF_ISOM_VVCC_SET_INBAND) && !entry->hevc_config) { entry->vvc_config = (GF_VVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VVCC); if (!entry->vvc_config) return GF_OUT_OF_MEM; } if (cfg) { if (entry->vvc_config->config) gf_odf_vvc_cfg_del(entry->vvc_config->config); entry->vvc_config->config = VVC_DuplicateConfig(cfg); } else { operand_type = GF_ISOM_VVCC_SET_INBAND; } array_incomplete = (operand_type==GF_ISOM_VVCC_SET_INBAND) ? 1 : 0; if (entry->vvc_config && nalu_cleanup_config(entry->vvc_config->config ? entry->vvc_config->config->param_array : NULL, (operand_type==GF_ISOM_VVCC_SET_INBAND), keep_xps) ) { array_incomplete=1; } switch (entry->type) { case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_VVI1 : GF_ISOM_BOX_TYPE_VVC1; break; } } return GF_OK; } GF_EXPORT GF_Err gf_isom_vvc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps) { return gf_isom_vvc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_VVCC_SET_INBAND, keep_xps); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_EXPORT GF_Box *gf_isom_clone_config_box(GF_Box *box) { u8 *data=NULL; u32 size=0; GF_Err e; GF_Box *clone=NULL; GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); e = gf_isom_box_size(box); if (!e) e = gf_isom_box_write(box, bs); gf_bs_get_content(bs, &data, &size); gf_bs_del(bs); if (!e) { bs = gf_bs_new(data, size, GF_BITSTREAM_READ); e = gf_isom_box_parse(&clone, bs); gf_bs_del(bs); } if (data) gf_free(data); if (e) { if (clone) gf_isom_box_del(clone); clone = NULL; } return clone; } GF_EXPORT GF_AVCConfig *gf_isom_avc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_avc_svc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_AVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->avc_config) return NULL; return AVC_DuplicateConfig(entry->avc_config->config); } GF_EXPORT GF_HEVCConfig *gf_isom_hevc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; if (gf_isom_get_reference_count(the_file, trackNumber, GF_ISOM_REF_TBAS)) { u32 ref_track; GF_Err e = gf_isom_get_reference(the_file, trackNumber, GF_ISOM_REF_TBAS, 1, &ref_track); if (e == GF_OK) { trackNumber = ref_track; } } trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_hevc_lhvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_HEVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->hevc_config) return NULL; return HEVC_DuplicateConfig(entry->hevc_config->config); } GF_EXPORT GF_ISOMVVCType gf_isom_get_vvc_type(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { u32 type; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_ISOM_VVCTYPE_NONE; if (!gf_isom_is_video_handler_type(trak->Media->handler->handlerType)) return GF_ISOM_VVCTYPE_NONE; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_ISOM_VVCTYPE_NONE; type = entry->type; if (type == GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) type = sinf->original_format->data_format; } else if (type == GF_ISOM_BOX_TYPE_RESV) { if (entry->rinf && entry->rinf->original_format) type = entry->rinf->original_format->data_format; } switch (type) { case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: return GF_ISOM_VVCTYPE_ONLY; default: return GF_ISOM_VVCTYPE_NONE; } return GF_ISOM_VVCTYPE_NONE; } GF_EXPORT GF_VVCConfig *gf_isom_vvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; /*todo, add support for subpic track and nvcl tracks*/ trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_vvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_VVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->vvc_config) return NULL; return VVC_DuplicateConfig(entry->vvc_config->config); } GF_EXPORT GF_AVCConfig *gf_isom_svc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_avc_svc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_AVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->svc_config) return NULL; return AVC_DuplicateConfig(entry->svc_config->config); } GF_EXPORT GF_AVCConfig *gf_isom_mvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_avc_svc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_AVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->mvc_config) return NULL; return AVC_DuplicateConfig(entry->mvc_config->config); } GF_EXPORT GF_AV1Config *gf_isom_av1_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; if (gf_isom_get_reference_count(the_file, trackNumber, GF_ISOM_REF_TBAS)) { u32 ref_track; GF_Err e = gf_isom_get_reference(the_file, trackNumber, GF_ISOM_REF_TBAS, 1, &ref_track); if (e == GF_OK) { trackNumber = ref_track; } } trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex - 1); if (!entry || !entry->av1_config) return NULL; return AV1_DuplicateConfig(entry->av1_config->config); } GF_EXPORT GF_VPConfig *gf_isom_vp_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex - 1); if (!entry || !entry->vp_config) return NULL; return VP_DuplicateConfig(entry->vp_config->config); } GF_EXPORT GF_DOVIDecoderConfigurationRecord *gf_isom_dovi_config_get(GF_ISOFile* the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox* trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex - 1); if (!entry || !entry->dovi_config) return NULL; return DOVI_DuplicateConfig(&entry->dovi_config->DOVIConfig); } GF_EXPORT GF_ISOMAVCType gf_isom_get_avc_svc_type(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { u32 type; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !trak->Media->handler || !DescriptionIndex) return GF_ISOM_AVCTYPE_NONE; if (!gf_isom_is_video_handler_type(trak->Media->handler->handlerType)) return GF_ISOM_AVCTYPE_NONE; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_ISOM_AVCTYPE_NONE; type = entry->type; if (type == GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) type = sinf->original_format->data_format; } else if (type == GF_ISOM_BOX_TYPE_RESV) { if (entry->rinf && entry->rinf->original_format) type = entry->rinf->original_format->data_format; } switch (type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: break; default: return GF_ISOM_AVCTYPE_NONE; } if (entry->avc_config && !entry->svc_config && !entry->mvc_config) return GF_ISOM_AVCTYPE_AVC_ONLY; if (entry->avc_config && entry->svc_config) return GF_ISOM_AVCTYPE_AVC_SVC; if (entry->avc_config && entry->mvc_config) return GF_ISOM_AVCTYPE_AVC_MVC; if (!entry->avc_config && entry->svc_config) return GF_ISOM_AVCTYPE_SVC_ONLY; if (!entry->avc_config && entry->mvc_config) return GF_ISOM_AVCTYPE_MVC_ONLY; return GF_ISOM_AVCTYPE_NONE; } GF_EXPORT GF_ISOMHEVCType gf_isom_get_hevc_lhvc_type(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { u32 type; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_ISOM_HEVCTYPE_NONE; if (!gf_isom_is_video_handler_type(trak->Media->handler->handlerType)) return GF_ISOM_HEVCTYPE_NONE; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_ISOM_HEVCTYPE_NONE; type = entry->type; if (type == GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) type = sinf->original_format->data_format; } else if (type == GF_ISOM_BOX_TYPE_RESV) { if (entry->rinf && entry->rinf->original_format) type = entry->rinf->original_format->data_format; } if (type == GF_ISOM_BOX_TYPE_DVHE) { type = GF_ISOM_BOX_TYPE_HEV1; } switch (type) { case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_HVT1: break; default: return GF_ISOM_HEVCTYPE_NONE; } if (entry->hevc_config && !entry->lhvc_config) return GF_ISOM_HEVCTYPE_HEVC_ONLY; if (entry->hevc_config && entry->lhvc_config) return GF_ISOM_HEVCTYPE_HEVC_LHVC; if (!entry->hevc_config && entry->lhvc_config) return GF_ISOM_HEVCTYPE_LHVC_ONLY; return GF_ISOM_HEVCTYPE_NONE; } GF_EXPORT GF_HEVCConfig *gf_isom_lhvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_HEVCConfig *lhvc; GF_OperatingPointsInformation *oinf=NULL; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_hevc_lhvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_HEVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->lhvc_config) return NULL; lhvc = HEVC_DuplicateConfig(entry->lhvc_config->config); if (!lhvc) return NULL; gf_isom_get_oinf_info(the_file, trackNumber, &oinf); if (oinf) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_last(oinf->profile_tier_levels); if (ptl) { lhvc->profile_space = ptl->general_profile_space; lhvc->tier_flag = ptl->general_tier_flag; lhvc->profile_idc = ptl->general_profile_idc; lhvc->general_profile_compatibility_flags = ptl->general_profile_compatibility_flags; lhvc->constraint_indicator_flags = ptl->general_constraint_indicator_flags; } } return lhvc; } void btrt_box_del(GF_Box *s) { GF_BitRateBox *ptr = (GF_BitRateBox *)s; if (ptr) gf_free(ptr); } GF_Err btrt_box_read(GF_Box *s, GF_BitStream *bs) { GF_BitRateBox *ptr = (GF_BitRateBox *)s; ISOM_DECREASE_SIZE(ptr, 12) ptr->bufferSizeDB = gf_bs_read_u32(bs); ptr->maxBitrate = gf_bs_read_u32(bs); ptr->avgBitrate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *btrt_box_new() { GF_BitRateBox *tmp = (GF_BitRateBox *) gf_malloc(sizeof(GF_BitRateBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_BitRateBox)); tmp->type = GF_ISOM_BOX_TYPE_BTRT; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err btrt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_BitRateBox *ptr = (GF_BitRateBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->bufferSizeDB); gf_bs_write_u32(bs, ptr->maxBitrate); gf_bs_write_u32(bs, ptr->avgBitrate); return GF_OK; } GF_Err btrt_box_size(GF_Box *s) { GF_BitRateBox *ptr = (GF_BitRateBox *)s; ptr->size += 12; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void m4ds_box_del(GF_Box *s) { GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *)s; gf_odf_desc_list_del(ptr->descriptors); gf_list_del(ptr->descriptors); gf_free(ptr); } GF_Err m4ds_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; char *enc_od; GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *)s; u32 od_size = (u32) ptr->size; if (!od_size) return GF_OK; enc_od = (char *)gf_malloc(sizeof(char) * od_size); gf_bs_read_data(bs, enc_od, od_size); e = gf_odf_desc_list_read((char *)enc_od, od_size, ptr->descriptors); gf_free(enc_od); return e; } GF_Box *m4ds_box_new() { GF_MPEG4ExtensionDescriptorsBox *tmp = (GF_MPEG4ExtensionDescriptorsBox *) gf_malloc(sizeof(GF_MPEG4ExtensionDescriptorsBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_MPEG4ExtensionDescriptorsBox)); tmp->type = GF_ISOM_BOX_TYPE_M4DS; tmp->descriptors = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err m4ds_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u8 *enc_ods; u32 enc_od_size; GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; enc_ods = NULL; enc_od_size = 0; e = gf_odf_desc_list_write(ptr->descriptors, &enc_ods, &enc_od_size); if (e) return e; if (enc_od_size) { gf_bs_write_data(bs, enc_ods, enc_od_size); gf_free(enc_ods); } return GF_OK; } GF_Err m4ds_box_size(GF_Box *s) { GF_Err e; u32 descSize = 0; GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *)s; e = gf_odf_desc_list_size(ptr->descriptors, &descSize); ptr->size += descSize; return e; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void avcc_box_del(GF_Box *s) { GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s; if (ptr->config) gf_odf_avc_cfg_del(ptr->config); ptr->config = NULL; gf_free(ptr); } GF_Err avcc_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, count; GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s; if (ptr->config) gf_odf_avc_cfg_del(ptr->config); ptr->config = gf_odf_avc_cfg_new(); ISOM_DECREASE_SIZE(ptr, 7) //7 includes the 2 counts of sps and pps ptr->config->configurationVersion = gf_bs_read_u8(bs); ptr->config->AVCProfileIndication = gf_bs_read_u8(bs); ptr->config->profile_compatibility = gf_bs_read_u8(bs); ptr->config->AVCLevelIndication = gf_bs_read_u8(bs); if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { gf_bs_read_int(bs, 6); } else { ptr->config->complete_representation = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 5); } ptr->config->nal_unit_size = 1 + gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 3); count = gf_bs_read_int(bs, 5); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_malloc(sizeof(GF_NALUFFParam)); ISOM_DECREASE_SIZE(ptr, 2) sl->size = gf_bs_read_u16(bs); if (!sl->size || (gf_bs_available(bs) < sl->size) || (ptr->size < sl->size) ) { gf_free(sl); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("AVCC: Not enough bits to parse. Aborting.\n")); return GF_ISOM_INVALID_FILE; } sl->data = (char *)gf_malloc(sizeof(char) * sl->size); gf_bs_read_data(bs, sl->data, sl->size); gf_list_add(ptr->config->sequenceParameterSets, sl); ptr->size -= sl->size; } count = gf_bs_read_u8(bs); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_malloc(sizeof(GF_NALUFFParam)); ISOM_DECREASE_SIZE(ptr, 2) sl->size = gf_bs_read_u16(bs); if (!sl->size || (gf_bs_available(bs) < sl->size) || (ptr->size<sl->size)) { gf_free(sl); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("AVCC: Not enough bits to parse. Aborting.\n")); return GF_ISOM_INVALID_FILE; } sl->data = (char *)gf_malloc(sizeof(char) * sl->size); gf_bs_read_data(bs, sl->data, sl->size); gf_list_add(ptr->config->pictureParameterSets, sl); ptr->size -= sl->size; } if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(ptr->config->AVCProfileIndication)) { if (!ptr->size) { #ifndef GPAC_DISABLE_AV_PARSERS AVCState avc; s32 idx; GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(ptr->config->sequenceParameterSets, 0); idx = sl ? gf_avc_read_sps(sl->data+1, sl->size-1, &avc, 0, NULL) : -1; if (idx>=0) { ptr->config->chroma_format = avc.sps[idx].chroma_format; ptr->config->luma_bit_depth = 8 + avc.sps[idx].luma_bit_depth_m8; ptr->config->chroma_bit_depth = 8 + avc.sps[idx].chroma_bit_depth_m8; } #else /*set default values ...*/ ptr->config->chroma_format = 1; ptr->config->luma_bit_depth = 8; ptr->config->chroma_bit_depth = 8; #endif return GF_OK; } ISOM_DECREASE_SIZE(ptr, 4) gf_bs_read_int(bs, 6); ptr->config->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 5); ptr->config->luma_bit_depth = 8 + gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 5); ptr->config->chroma_bit_depth = 8 + gf_bs_read_int(bs, 3); count = gf_bs_read_int(bs, 8); if (count*2 > ptr->size) { //ffmpeg just ignores this part while allocating bytes (filled with garbage?) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("AVCC: invalid numOfSequenceParameterSetExt value. Skipping.\n")); return GF_OK; } if (count) { ptr->config->sequenceParameterSetExtensions = gf_list_new(); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_malloc(sizeof(GF_NALUFFParam)); ISOM_DECREASE_SIZE(ptr, 2) sl->size = gf_bs_read_u16(bs); if ((gf_bs_available(bs) < sl->size) || (ptr->size<sl->size)) { gf_free(sl); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("AVCC: Not enough bits to parse. Aborting.\n")); return GF_ISOM_INVALID_FILE; } sl->data = (char *)gf_malloc(sizeof(char) * sl->size); gf_bs_read_data(bs, sl->data, sl->size); gf_list_add(ptr->config->sequenceParameterSetExtensions, sl); ptr->size -= sl->size; } } } } return GF_OK; } GF_Box *avcc_box_new() { GF_AVCConfigurationBox *tmp = (GF_AVCConfigurationBox *) gf_malloc(sizeof(GF_AVCConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_AVCConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_AVCC; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err avcc_box_write(GF_Box *s, GF_BitStream *bs) { u32 i, count; GF_Err e; GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->config->configurationVersion); gf_bs_write_u8(bs, ptr->config->AVCProfileIndication); gf_bs_write_u8(bs, ptr->config->profile_compatibility); gf_bs_write_u8(bs, ptr->config->AVCLevelIndication); if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { gf_bs_write_int(bs, 0x3F, 6); } else { gf_bs_write_int(bs, ptr->config->complete_representation, 1); gf_bs_write_int(bs, 0x1F, 5); } gf_bs_write_int(bs, ptr->config->nal_unit_size - 1, 2); gf_bs_write_int(bs, 0x7, 3); count = gf_list_count(ptr->config->sequenceParameterSets); gf_bs_write_int(bs, count, 5); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(ptr->config->sequenceParameterSets, i); gf_bs_write_u16(bs, sl->size); gf_bs_write_data(bs, sl->data, sl->size); } count = gf_list_count(ptr->config->pictureParameterSets); gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(ptr->config->pictureParameterSets, i); gf_bs_write_u16(bs, sl->size); gf_bs_write_data(bs, sl->data, sl->size); } if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(ptr->config->AVCProfileIndication)) { gf_bs_write_int(bs, 0xFF, 6); gf_bs_write_int(bs, ptr->config->chroma_format, 2); gf_bs_write_int(bs, 0xFF, 5); gf_bs_write_int(bs, ptr->config->luma_bit_depth - 8, 3); gf_bs_write_int(bs, 0xFF, 5); gf_bs_write_int(bs, ptr->config->chroma_bit_depth - 8, 3); count = ptr->config->sequenceParameterSetExtensions ? gf_list_count(ptr->config->sequenceParameterSetExtensions) : 0; gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(ptr->config->sequenceParameterSetExtensions, i); gf_bs_write_u16(bs, sl->size); gf_bs_write_data(bs, sl->data, sl->size); } } } return GF_OK; } GF_Err avcc_box_size(GF_Box *s) { u32 i, count; GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } ptr->size += 7; count = gf_list_count(ptr->config->sequenceParameterSets); for (i=0; i<count; i++) ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->sequenceParameterSets, i))->size; count = gf_list_count(ptr->config->pictureParameterSets); for (i=0; i<count; i++) ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->pictureParameterSets, i))->size; if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(ptr->config->AVCProfileIndication)) { ptr->size += 4; count = ptr->config->sequenceParameterSetExtensions ?gf_list_count(ptr->config->sequenceParameterSetExtensions) : 0; for (i=0; i<count; i++) ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->sequenceParameterSetExtensions, i))->size; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hvcc_box_del(GF_Box *s) { GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox*)s; if (ptr->config) gf_odf_hevc_cfg_del(ptr->config); gf_free(ptr); } GF_Err hvcc_box_read(GF_Box *s, GF_BitStream *bs) { u64 consumed; GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox *)s; if (ptr->config) gf_odf_hevc_cfg_del(ptr->config); consumed = gf_bs_get_position(bs); ptr->config = gf_odf_hevc_cfg_read_bs(bs, (s->type == GF_ISOM_BOX_TYPE_HVCC) ? GF_FALSE : GF_TRUE); consumed = gf_bs_get_position(bs) - consumed ; ISOM_DECREASE_SIZE(ptr, (u32)consumed) return ptr->config ? GF_OK : GF_ISOM_INVALID_FILE; } GF_Box *hvcc_box_new() { GF_HEVCConfigurationBox *tmp = (GF_HEVCConfigurationBox *) gf_malloc(sizeof(GF_HEVCConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_HEVCConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_HVCC; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hvcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_odf_hevc_cfg_write_bs(ptr->config, bs); } GF_Err hvcc_box_size(GF_Box *s) { u32 i, count, j, subcount; GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } if (!ptr->config->is_lhvc) ptr->size += 23; else ptr->size += 6; count = gf_list_count(ptr->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(ptr->config->param_array, i); ptr->size += 3; subcount = gf_list_count(ar->nalus); for (j=0; j<subcount; j++) { ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ar->nalus, j))->size; } } return GF_OK; } void vvcc_box_del(GF_Box *s) { GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox*)s; if (ptr->config) gf_odf_vvc_cfg_del(ptr->config); gf_free(ptr); } GF_Err vvcc_box_read(GF_Box *s, GF_BitStream *bs) { u64 consumed; GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox *)s; if (ptr->config) gf_odf_vvc_cfg_del(ptr->config); consumed = gf_bs_get_position(bs); ptr->config = gf_odf_vvc_cfg_read_bs(bs); consumed = gf_bs_get_position(bs) - consumed ; ISOM_DECREASE_SIZE(ptr, (u32)consumed) return ptr->config ? GF_OK : GF_ISOM_INVALID_FILE; } GF_Box *vvcc_box_new() { GF_VVCConfigurationBox *tmp = (GF_VVCConfigurationBox *) gf_malloc(sizeof(GF_VVCConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_VVCConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_HVCC; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vvcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_odf_vvc_cfg_write_bs(ptr->config, bs); } GF_Err vvcc_box_size(GF_Box *s) { u32 i, count, j, subcount; GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } ptr->size += 6; if (ptr->config->ptl_present) { if (!ptr->config->general_constraint_info) ptr->config->num_constraint_info = 0; if (!ptr->config->sub_profiles_idc) ptr->config->num_sub_profiles = 0; ptr->size += 2 + 2 + ptr->config->num_constraint_info + 2 + ptr->config->num_sub_profiles*4; if (ptr->config->numTemporalLayers>1) ptr->size += 1; for (i=0; i<ptr->config->numTemporalLayers; i++) { if (ptr->config->ptl_sublayer_present_mask & (1<<i)) ptr->size+=1; } } count = gf_list_count(ptr->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(ptr->config->param_array, i); ptr->size += 3; subcount = gf_list_count(ar->nalus); for (j=0; j<subcount; j++) { ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ar->nalus, j))->size; } } return GF_OK; } #endif GF_Box *av1c_box_new() { GF_AV1ConfigurationBox *tmp = (GF_AV1ConfigurationBox *)gf_malloc(sizeof(GF_AV1ConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_AV1ConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_AV1C; return (GF_Box *)tmp; } void av1c_box_del(GF_Box *s) { GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox*)s; if (ptr->config) gf_odf_av1_cfg_del(ptr->config); gf_free(ptr); } GF_Err av1c_box_read(GF_Box *s, GF_BitStream *bs) { u64 pos, read; GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox*)s; if (ptr->config) gf_odf_av1_cfg_del(ptr->config); pos = gf_bs_get_position(bs); ptr->config = gf_odf_av1_cfg_read_bs_size(bs, (u32) ptr->size); read = gf_bs_get_position(bs) - pos; if (read < ptr->size) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[ISOBMFF] AV1ConfigurationBox: read only "LLU" bytes (expected "LLU").\n", read, ptr->size)); if (read > ptr->size) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[ISOBMFF] AV1ConfigurationBox overflow read "LLU" bytes, of box size "LLU".\n", read, ptr->size)); return GF_OK; } GF_Err av1c_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox*)s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_odf_av1_cfg_write_bs(ptr->config, bs); } GF_Err av1c_box_size(GF_Box *s) { u32 i; GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_BAD_PARAM; } ptr->size += 4; for (i = 0; i < gf_list_count(ptr->config->obu_array); ++i) { GF_AV1_OBUArrayEntry *a = gf_list_get(ptr->config->obu_array, i); ptr->size += a->obu_length; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vpcc_box_del(GF_Box *s) { GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox*)s; if (ptr->config) gf_odf_vp_cfg_del(ptr->config); ptr->config = NULL; gf_free(ptr); } GF_Err vpcc_box_read(GF_Box *s, GF_BitStream *bs) { u64 pos; GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox *)s; if (ptr->config) gf_odf_vp_cfg_del(ptr->config); ptr->config = NULL; pos = gf_bs_get_position(bs); ptr->config = gf_odf_vp_cfg_read_bs(bs, ptr->version == 0); pos = gf_bs_get_position(bs) - pos ; if (pos < ptr->size) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[ISOBMFF] VPConfigurationBox: read only "LLU" bytes (expected "LLU").\n", pos, ptr->size)); if (pos > ptr->size) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[ISOBMFF] VPConfigurationBox overflow read "LLU" bytes, of box size "LLU".\n", pos, ptr->size)); return ptr->config ? GF_OK : GF_ISOM_INVALID_FILE; } GF_Box *vpcc_box_new() { GF_VPConfigurationBox *tmp = (GF_VPConfigurationBox *) gf_malloc(sizeof(GF_VPConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_VPConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_VPCC; tmp->version = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vpcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_full_box_write(s, bs); if (e) return e; return gf_odf_vp_cfg_write_bs(ptr->config, bs, ptr->version == 0); } #endif GF_Err vpcc_box_size(GF_Box *s) { GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } if (ptr->version == 0) { ptr->size += 6; } else { if (ptr->config->codec_initdata_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[ISOBMFF] VPConfigurationBox: codec_initdata_size MUST be 0, was %d\n", ptr->config->codec_initdata_size)); return GF_ISOM_INVALID_FILE; } ptr->size += 8; } return GF_OK; } GF_Box *SmDm_box_new() { ISOM_DECL_BOX_ALLOC(GF_SMPTE2086MasteringDisplayMetadataBox, GF_ISOM_BOX_TYPE_SMDM); return (GF_Box *)tmp; } void SmDm_box_del(GF_Box *a) { GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox *)a; gf_free(p); } GF_Err SmDm_box_read(GF_Box *s, GF_BitStream *bs) { GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox *)s; ISOM_DECREASE_SIZE(p, 24) p->primaryRChromaticity_x = gf_bs_read_u16(bs); p->primaryRChromaticity_y = gf_bs_read_u16(bs); p->primaryGChromaticity_x = gf_bs_read_u16(bs); p->primaryGChromaticity_y = gf_bs_read_u16(bs); p->primaryBChromaticity_x = gf_bs_read_u16(bs); p->primaryBChromaticity_y = gf_bs_read_u16(bs); p->whitePointChromaticity_x = gf_bs_read_u16(bs); p->whitePointChromaticity_y = gf_bs_read_u16(bs); p->luminanceMax = gf_bs_read_u32(bs); p->luminanceMin = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err SmDm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, p->primaryRChromaticity_x); gf_bs_write_u16(bs, p->primaryRChromaticity_y); gf_bs_write_u16(bs, p->primaryGChromaticity_x); gf_bs_write_u16(bs, p->primaryGChromaticity_y); gf_bs_write_u16(bs, p->primaryBChromaticity_x); gf_bs_write_u16(bs, p->primaryBChromaticity_y); gf_bs_write_u16(bs, p->whitePointChromaticity_x); gf_bs_write_u16(bs, p->whitePointChromaticity_y); gf_bs_write_u32(bs, p->luminanceMax); gf_bs_write_u32(bs, p->luminanceMin); return GF_OK; } GF_Err SmDm_box_size(GF_Box *s) { GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox*)s; p->size += 24; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *CoLL_box_new() { ISOM_DECL_BOX_ALLOC(GF_VPContentLightLevelBox, GF_ISOM_BOX_TYPE_COLL); return (GF_Box *)tmp; } void CoLL_box_del(GF_Box *a) { GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox *)a; gf_free(p); } GF_Err CoLL_box_read(GF_Box *s, GF_BitStream *bs) { GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox *)s; ISOM_DECREASE_SIZE(p, 4) p->maxCLL = gf_bs_read_u16(bs); p->maxFALL = gf_bs_read_u16(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err CoLL_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, p->maxCLL); gf_bs_write_u16(bs, p->maxFALL); return GF_OK; } GF_Err CoLL_box_size(GF_Box *s) { GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox*)s; p->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_OperatingPointsInformation *gf_isom_oinf_new_entry() { GF_OperatingPointsInformation* ptr; GF_SAFEALLOC(ptr, GF_OperatingPointsInformation); if (ptr) { ptr->profile_tier_levels = gf_list_new(); ptr->operating_points = gf_list_new(); ptr->dependency_layers = gf_list_new(); } return ptr; } void gf_isom_oinf_del_entry(void *entry) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; if (!ptr) return; if (ptr->profile_tier_levels) { while (gf_list_count(ptr->profile_tier_levels)) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, 0); gf_free(ptl); gf_list_rem(ptr->profile_tier_levels, 0); } gf_list_del(ptr->profile_tier_levels); } if (ptr->operating_points) { while (gf_list_count(ptr->operating_points)) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, 0); gf_free(op); gf_list_rem(ptr->operating_points, 0); } gf_list_del(ptr->operating_points); } if (ptr->dependency_layers) { while (gf_list_count(ptr->dependency_layers)) { LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, 0); gf_free(dep); gf_list_rem(ptr->dependency_layers, 0); } gf_list_del(ptr->dependency_layers); } gf_free(ptr); return; } GF_Err gf_isom_oinf_read_entry(void *entry, GF_BitStream *bs) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 i, j, count; if (!ptr) return GF_BAD_PARAM; ptr->scalability_mask = gf_bs_read_u16(bs); gf_bs_read_int(bs, 2);//reserved count = gf_bs_read_int(bs, 6); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl; GF_SAFEALLOC(ptl, LHEVC_ProfileTierLevel); if (!ptl) return GF_OUT_OF_MEM; ptl->general_profile_space = gf_bs_read_int(bs, 2); ptl->general_tier_flag= gf_bs_read_int(bs, 1); ptl->general_profile_idc = gf_bs_read_int(bs, 5); ptl->general_profile_compatibility_flags = gf_bs_read_u32(bs); ptl->general_constraint_indicator_flags = gf_bs_read_long_int(bs, 48); ptl->general_level_idc = gf_bs_read_u8(bs); gf_list_add(ptr->profile_tier_levels, ptl); } count = gf_bs_read_u16(bs); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op; GF_SAFEALLOC(op, LHEVC_OperatingPoint); if (!op) return GF_OUT_OF_MEM; op->output_layer_set_idx = gf_bs_read_u16(bs); op->max_temporal_id = gf_bs_read_u8(bs); op->layer_count = gf_bs_read_u8(bs); if (op->layer_count > GF_ARRAY_LENGTH(op->layers_info)) return GF_NON_COMPLIANT_BITSTREAM; for (j = 0; j < op->layer_count; j++) { op->layers_info[j].ptl_idx = gf_bs_read_u8(bs); op->layers_info[j].layer_id = gf_bs_read_int(bs, 6); op->layers_info[j].is_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->layers_info[j].is_alternate_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; } op->minPicWidth = gf_bs_read_u16(bs); op->minPicHeight = gf_bs_read_u16(bs); op->maxPicWidth = gf_bs_read_u16(bs); op->maxPicHeight = gf_bs_read_u16(bs); op->maxChromaFormat = gf_bs_read_int(bs, 2); op->maxBitDepth = gf_bs_read_int(bs, 3) + 8; gf_bs_read_int(bs, 1);//reserved op->frame_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->bit_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; if (op->frame_rate_info_flag) { op->avgFrameRate = gf_bs_read_u16(bs); gf_bs_read_int(bs, 6); //reserved op->constantFrameRate = gf_bs_read_int(bs, 2); } if (op->bit_rate_info_flag) { op->maxBitRate = gf_bs_read_u32(bs); op->avgBitRate = gf_bs_read_u32(bs); } gf_list_add(ptr->operating_points, op); } count = gf_bs_read_u8(bs); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep; GF_SAFEALLOC(dep, LHEVC_DependentLayer); if (!dep) return GF_OUT_OF_MEM; dep->dependent_layerID = gf_bs_read_u8(bs); dep->num_layers_dependent_on = gf_bs_read_u8(bs); if (dep->num_layers_dependent_on > GF_ARRAY_LENGTH(dep->dependent_on_layerID)) { gf_free(dep); return GF_NON_COMPLIANT_BITSTREAM; } for (j = 0; j < dep->num_layers_dependent_on; j++) dep->dependent_on_layerID[j] = gf_bs_read_u8(bs); for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) dep->dimension_identifier[j] = gf_bs_read_u8(bs); } gf_list_add(ptr->dependency_layers, dep); } return GF_OK; } GF_Err gf_isom_oinf_write_entry(void *entry, GF_BitStream *bs) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 i, j, count; if (!ptr) return GF_OK; gf_bs_write_u16(bs, ptr->scalability_mask); gf_bs_write_int(bs, 0xFF, 2);//reserved count=gf_list_count(ptr->profile_tier_levels); gf_bs_write_int(bs, count, 6); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, i); gf_bs_write_int(bs, ptl->general_profile_space, 2); gf_bs_write_int(bs, ptl->general_tier_flag, 1); gf_bs_write_int(bs, ptl->general_profile_idc, 5); gf_bs_write_u32(bs, ptl->general_profile_compatibility_flags); gf_bs_write_long_int(bs, ptl->general_constraint_indicator_flags, 48); gf_bs_write_u8(bs, ptl->general_level_idc); } count=gf_list_count(ptr->operating_points); gf_bs_write_u16(bs, count); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i); gf_bs_write_u16(bs, op->output_layer_set_idx); gf_bs_write_u8(bs, op->max_temporal_id); gf_bs_write_u8(bs, op->layer_count); for (j = 0; j < op->layer_count; j++) { gf_bs_write_u8(bs, op->layers_info[j].ptl_idx); gf_bs_write_int(bs, op->layers_info[j].layer_id, 6); op->layers_info[j].is_outputlayer ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); op->layers_info[j].is_alternate_outputlayer ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); } gf_bs_write_u16(bs, op->minPicWidth); gf_bs_write_u16(bs, op->minPicHeight); gf_bs_write_u16(bs, op->maxPicWidth); gf_bs_write_u16(bs, op->maxPicHeight); gf_bs_write_int(bs, op->maxChromaFormat, 2); gf_bs_write_int(bs, op->maxBitDepth - 8, 3); gf_bs_write_int(bs, 0x1, 1);//resereved op->frame_rate_info_flag ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); op->bit_rate_info_flag ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); if (op->frame_rate_info_flag) { gf_bs_write_u16(bs, op->avgFrameRate); gf_bs_write_int(bs, 0xFF, 6); //reserved gf_bs_write_int(bs, op->constantFrameRate, 2); } if (op->bit_rate_info_flag) { gf_bs_write_u32(bs, op->maxBitRate); gf_bs_write_u32(bs, op->avgBitRate); } } count=gf_list_count(ptr->dependency_layers); gf_bs_write_u8(bs, count); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i); gf_bs_write_u8(bs, dep->dependent_layerID); gf_bs_write_u8(bs, dep->num_layers_dependent_on); for (j = 0; j < dep->num_layers_dependent_on; j++) gf_bs_write_u8(bs, dep->dependent_on_layerID[j]); for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) gf_bs_write_u8(bs, dep->dimension_identifier[j]); } } return GF_OK; } u32 gf_isom_oinf_size_entry(void *entry) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 size = 0, i ,j, count; if (!ptr) return 0; size += 3; //scalability_mask + reserved + num_profile_tier_level count=gf_list_count(ptr->profile_tier_levels); size += count * 12; //general_profile_space + general_tier_flag + general_profile_idc + general_profile_compatibility_flags + general_constraint_indicator_flags + general_level_idc size += 2;//num_operating_points count=gf_list_count(ptr->operating_points); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i); size += 2/*output_layer_set_idx*/ + 1/*max_temporal_id*/ + 1/*layer_count*/; size += op->layer_count * 2; size += 9; if (op->frame_rate_info_flag) { size += 3; } if (op->bit_rate_info_flag) { size += 8; } } size += 1;//max_layer_count count=gf_list_count(ptr->dependency_layers); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i); size += 1/*dependent_layerID*/ + 1/*num_layers_dependent_on*/; size += dep->num_layers_dependent_on * 1;//dependent_on_layerID for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) size += 1;//dimension_identifier } } return size; } GF_LHVCLayerInformation *gf_isom_linf_new_entry() { GF_LHVCLayerInformation* ptr; GF_SAFEALLOC(ptr, GF_LHVCLayerInformation); if (ptr) ptr->num_layers_in_track = gf_list_new(); return ptr; } void gf_isom_linf_del_entry(void *entry) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; if (!ptr) return; while (gf_list_count(ptr->num_layers_in_track)) { LHVCLayerInfoItem *li = (LHVCLayerInfoItem *)gf_list_get(ptr->num_layers_in_track, 0); gf_free(li); gf_list_rem(ptr->num_layers_in_track, 0); } gf_list_del(ptr->num_layers_in_track); gf_free(ptr); return; } GF_Err gf_isom_linf_read_entry(void *entry, GF_BitStream *bs) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; u32 i, count; if (!ptr) return GF_BAD_PARAM; gf_bs_read_int(bs, 2); count = gf_bs_read_int(bs, 6); for (i = 0; i < count; i++) { LHVCLayerInfoItem *li; GF_SAFEALLOC(li, LHVCLayerInfoItem); if (!li) return GF_OUT_OF_MEM; gf_bs_read_int(bs, 4); li->layer_id = gf_bs_read_int(bs, 6); li->min_TemporalId = gf_bs_read_int(bs, 3); li->max_TemporalId = gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); li->sub_layer_presence_flags = gf_bs_read_int(bs, 7); gf_list_add(ptr->num_layers_in_track, li); } return GF_OK; } GF_Err gf_isom_linf_write_entry(void *entry, GF_BitStream *bs) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; u32 i, count; if (!ptr) return GF_OK; gf_bs_write_int(bs, 0, 2); count=gf_list_count(ptr->num_layers_in_track); gf_bs_write_int(bs, count, 6); for (i = 0; i < count; i++) { LHVCLayerInfoItem *li = (LHVCLayerInfoItem *)gf_list_get(ptr->num_layers_in_track, i); gf_bs_write_int(bs, 0, 4); gf_bs_write_int(bs, li->layer_id, 6); gf_bs_write_int(bs, li->min_TemporalId, 3); gf_bs_write_int(bs, li->max_TemporalId, 3); gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, li->sub_layer_presence_flags, 7); } return GF_OK; } u32 gf_isom_linf_size_entry(void *entry) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; u32 size = 0, count; if (!ptr) return 0; size += 1; count=gf_list_count(ptr->num_layers_in_track); size += count * 3; return size; } #endif /*GPAC_DISABLE_ISOM*/
/* * GPAC - Multimedia Framework C SDK * * Authors: Jean Le Feuvre * Copyright (c) Telecom ParisTech 2000-2021 * All rights reserved * * This file is part of GPAC / ISO Media File Format sub-project * * GPAC is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * GPAC is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <gpac/internal/isomedia_dev.h> #include <gpac/constants.h> #include <gpac/internal/media_dev.h> #ifndef GPAC_DISABLE_ISOM Bool gf_isom_is_nalu_based_entry(GF_MediaBox *mdia, GF_SampleEntryBox *_entry) { GF_MPEGVisualSampleEntryBox *entry; if (!gf_isom_is_video_handler_type(mdia->handler->handlerType)) return GF_FALSE; if (!_entry) return GF_FALSE; entry = (GF_MPEGVisualSampleEntryBox*)_entry; switch (_entry->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_SVC2: case GF_ISOM_BOX_TYPE_MVC1: case GF_ISOM_BOX_TYPE_MVC2: case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_MHV1: case GF_ISOM_BOX_TYPE_MHC1: case GF_ISOM_BOX_TYPE_HVT1: case GF_ISOM_BOX_TYPE_LHT1: return GF_TRUE; case GF_ISOM_BOX_TYPE_GNRV: case GF_ISOM_BOX_TYPE_GNRA: case GF_ISOM_BOX_TYPE_GNRM: return GF_FALSE; default: break; } if (!gf_isom_is_video_handler_type(entry->internal_type)) return GF_FALSE; if (entry->avc_config || entry->svc_config || entry->mvc_config || entry->hevc_config || entry->lhvc_config) { GF_ProtectionSchemeInfoBox *schi = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (!schi || !schi->scheme_type) return GF_TRUE; switch (schi->scheme_type->scheme_type) { case GF_ISOM_CENC_SCHEME: case GF_ISOM_CBC_SCHEME: case GF_ISOM_CENS_SCHEME: case GF_ISOM_CBCS_SCHEME: return GF_TRUE; default: break; } } return GF_FALSE; } static void rewrite_nalus_list(GF_List *nalus, GF_BitStream *bs, Bool rewrite_start_codes, u32 nal_unit_size_field) { u32 i, count = gf_list_count(nalus); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(nalus, i); if (rewrite_start_codes) gf_bs_write_u32(bs, 1); else gf_bs_write_int(bs, sl->size, 8*nal_unit_size_field); gf_bs_write_data(bs, sl->data, sl->size); } } static GF_Err process_extractor(GF_ISOFile *file, GF_MediaBox *mdia, u32 sampleNumber, u64 sampleDTS, u32 nal_size, u16 nal_hdr, u32 nal_unit_size_field, Bool is_hevc, Bool rewrite_ps, Bool rewrite_start_codes, u32 extractor_mode) { GF_Err e; u32 di, ref_track_index, ref_track_num, data_offset, data_length, cur_extract_mode, ref_extract_mode, ref_nalu_size, nb_bytes_nalh; GF_TrackReferenceTypeBox *dpnd; GF_TrackBox *ref_trak; s8 sample_offset; u32 last_byte, ref_sample_num, prev_ref_sample_num; Bool header_written = GF_FALSE; nb_bytes_nalh = is_hevc ? 2 : 1; switch (extractor_mode) { case 0: last_byte = (u32) gf_bs_get_position(mdia->nalu_parser) + nal_size - (is_hevc ? 2 : 1); if (!is_hevc) gf_bs_read_int(mdia->nalu_parser, 24); //1 byte for HEVC , 3 bytes for AVC of NALUHeader in extractor while (gf_bs_get_position(mdia->nalu_parser) < last_byte) { u32 xmode = 0; //hevc extractors use constructors if (is_hevc) xmode = gf_bs_read_u8(mdia->nalu_parser); if (xmode) { u8 done=0, len = gf_bs_read_u8(mdia->nalu_parser); while (done<len) { u8 c = gf_bs_read_u8(mdia->nalu_parser); done++; if (header_written) { gf_bs_write_u8(mdia->nalu_out_bs, c); } else if (done==nal_unit_size_field) { if (rewrite_start_codes) { gf_bs_write_int(mdia->nalu_out_bs, 1, 32); } else { gf_bs_write_u8(mdia->nalu_out_bs, c); } header_written = GF_TRUE; } else if (!rewrite_start_codes) { gf_bs_write_u8(mdia->nalu_out_bs, c); } } continue; } ref_track_index = gf_bs_read_u8(mdia->nalu_parser); sample_offset = (s8) gf_bs_read_int(mdia->nalu_parser, 8); data_offset = gf_bs_read_int(mdia->nalu_parser, nal_unit_size_field*8); data_length = gf_bs_read_int(mdia->nalu_parser, nal_unit_size_field*8); Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &dpnd); ref_track_num = 0; if (dpnd && ref_track_index && (ref_track_index<=dpnd->trackIDCount)) ref_track_num = gf_isom_get_track_by_id(file, dpnd->trackIDs[ref_track_index-1]); if (!ref_track_num) { GF_LOG(GF_LOG_INFO, GF_LOG_CONTAINER, ("ISOBMF: Extractor target track is not present in file - skipping.\n")); return GF_OK; } cur_extract_mode = gf_isom_get_nalu_extract_mode(file, ref_track_num); //we must be in inspect mode only otherwise the reference sample will not be the one stored on file (change in start codes, PS inserted or other NALUs inserted) //and this will corrupt extraction (wrong data offsets) ref_extract_mode = GF_ISOM_NALU_EXTRACT_INSPECT; gf_isom_set_nalu_extract_mode(file, ref_track_num, ref_extract_mode); ref_trak = gf_isom_get_track_from_file(file, ref_track_num); if (!ref_trak) return GF_ISOM_INVALID_FILE; if (!mdia->extracted_samp) { mdia->extracted_samp = gf_isom_sample_new(); if (!mdia->extracted_samp) return GF_IO_ERR; } if (!mdia->extracted_bs) { mdia->extracted_bs = gf_bs_new("a", 1, GF_BITSTREAM_READ); if (!mdia->extracted_bs) return GF_IO_ERR; } e = stbl_findEntryForTime(ref_trak->Media->information->sampleTable, sampleDTS, 0, &ref_sample_num, &prev_ref_sample_num); if (e) return e; if (!ref_sample_num) ref_sample_num = prev_ref_sample_num; if (!ref_sample_num) return GF_ISOM_INVALID_FILE; if ((sample_offset<0) && (ref_sample_num > (u32) -sample_offset)) return GF_ISOM_INVALID_FILE; ref_sample_num = (u32) ( (s32) ref_sample_num + sample_offset); e = Media_GetSample(ref_trak->Media, ref_sample_num, &mdia->extracted_samp, &di, GF_FALSE, NULL); if (e) return e; if (!mdia->extracted_samp->alloc_size) mdia->extracted_samp->alloc_size = mdia->extracted_samp->dataLength; #if 0 if (!header_written && rewrite_start_codes) { gf_bs_write_int(dst_bs, 1, 32); if (is_hevc) { gf_bs_write_int(dst_bs, 0, 1); gf_bs_write_int(dst_bs, GF_HEVC_NALU_ACCESS_UNIT, 6); gf_bs_write_int(dst_bs, 0, 9); /*pic-type - by default we signal all slice types possible*/ gf_bs_write_int(dst_bs, 2, 3); gf_bs_write_int(dst_bs, 0, 5); } else { gf_bs_write_int(dst_bs, (ref_samp->data[0] & 0x60) | GF_AVC_NALU_ACCESS_UNIT, 8); gf_bs_write_int(dst_bs, 0xF0 , 8); /*7 "all supported NALUs" (=111) + rbsp trailing (10000)*/; } } #endif gf_bs_reassign_buffer(mdia->extracted_bs, mdia->extracted_samp->data + data_offset, mdia->extracted_samp->dataLength - data_offset); if (mdia->extracted_samp->dataLength - data_offset >= data_length) { while (data_length && gf_bs_available(mdia->extracted_bs)) { if (!header_written) { ref_nalu_size = gf_bs_read_int(mdia->extracted_bs, 8*nal_unit_size_field); assert(data_length>nal_unit_size_field); data_length -= nal_unit_size_field; if (data_length > gf_bs_available(mdia->extracted_bs)) { data_length = (u32)gf_bs_available(mdia->extracted_bs); } } else { ref_nalu_size = data_length; } if (ref_nalu_size > mdia->tmp_nal_copy_buffer_alloc) { mdia->tmp_nal_copy_buffer_alloc = ref_nalu_size; mdia->tmp_nal_copy_buffer = (char*) gf_realloc(mdia->tmp_nal_copy_buffer, sizeof(char) * ref_nalu_size ); } gf_bs_read_data(mdia->extracted_bs, mdia->tmp_nal_copy_buffer, ref_nalu_size); if (!header_written) { if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, ref_nalu_size, 8*nal_unit_size_field); } assert(data_length >= ref_nalu_size); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, ref_nalu_size); data_length -= ref_nalu_size; header_written = GF_FALSE; } } else { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("ISOBMF: Extractor size is larger than referred sample size - skipping.\n")); } gf_isom_set_nalu_extract_mode(file, ref_track_num, cur_extract_mode); if (!is_hevc) break; } break; case 1: //skip to end of this NALU gf_bs_skip_bytes(mdia->nalu_parser, nal_size - nb_bytes_nalh); break; case 2: if (nal_size - nb_bytes_nalh > mdia->tmp_nal_copy_buffer_alloc) { mdia->tmp_nal_copy_buffer_alloc = nal_size - nb_bytes_nalh; mdia->tmp_nal_copy_buffer = (char*) gf_realloc(mdia->tmp_nal_copy_buffer, sizeof(char) * (nal_size - nb_bytes_nalh) ); } gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size - nb_bytes_nalh); if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u8(mdia->nalu_out_bs, nal_hdr); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, nal_size - nb_bytes_nalh); break; } return GF_OK; } #ifndef GPAC_DISABLE_HEVC /* returns the SAP type as defined in the 14496-12 specification */ static GF_ISOSAPType sap_type_from_nal_type(u8 nal_type) { switch (nal_type) { case GF_HEVC_NALU_SLICE_CRA: return SAP_TYPE_3; case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_BLA_N_LP: return SAP_TYPE_1; case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_LP: return SAP_TYPE_2; default: return RAP_NO; } } #endif static GF_ISOSAPType is_sample_idr(GF_MediaBox *mdia, GF_ISOSample *sample, GF_MPEGVisualSampleEntryBox *entry) { Bool is_hevc = GF_FALSE; u32 nalu_size_field = 0; if (entry->avc_config && entry->avc_config->config) nalu_size_field = entry->avc_config->config->nal_unit_size; else if (entry->svc_config && entry->svc_config->config) nalu_size_field = entry->svc_config->config->nal_unit_size; else if (entry->mvc_config && entry->mvc_config->config) nalu_size_field = entry->mvc_config->config->nal_unit_size; else if (entry->hevc_config && entry->hevc_config->config) { nalu_size_field = entry->hevc_config->config->nal_unit_size; is_hevc = GF_TRUE; } else if (entry->lhvc_config && entry->lhvc_config->config) { nalu_size_field = entry->lhvc_config->config->nal_unit_size; is_hevc = GF_TRUE; } if (!nalu_size_field) return RAP_NO; if (!mdia->nalu_parser) mdia->nalu_parser = gf_bs_new(sample->data, sample->dataLength, GF_BITSTREAM_READ); else gf_bs_reassign_buffer(mdia->nalu_parser, sample->data, sample->dataLength); if (!mdia->nalu_parser) return RAP_NO; while (gf_bs_available(mdia->nalu_parser)) { u8 nal_type; u32 size = gf_bs_read_int(mdia->nalu_parser, 8*nalu_size_field); if (is_hevc) { #ifndef GPAC_DISABLE_HEVC u16 nal_hdr = gf_bs_read_u16(mdia->nalu_parser); nal_type = (nal_hdr&0x7E00) >> 9; switch (nal_type) { case GF_HEVC_NALU_SLICE_CRA: return SAP_TYPE_3; case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_BLA_N_LP: return SAP_TYPE_1; case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_W_LP: return SAP_TYPE_2; case GF_HEVC_NALU_ACCESS_UNIT: case GF_HEVC_NALU_FILLER_DATA: case GF_HEVC_NALU_SEI_PREFIX: case GF_HEVC_NALU_VID_PARAM: case GF_HEVC_NALU_SEQ_PARAM: case GF_HEVC_NALU_PIC_PARAM: break; default: return RAP_NO; } gf_bs_skip_bytes(mdia->nalu_parser, size - 2); #endif } else { u8 nal_hdr = gf_bs_read_u8(mdia->nalu_parser); nal_type = nal_hdr & 0x1F; if (nal_type==GF_AVC_NALU_IDR_SLICE) return SAP_TYPE_1; if (nal_type<GF_AVC_NALU_IDR_SLICE) return RAP_NO; gf_bs_skip_bytes(mdia->nalu_parser, size - 1); } } return RAP_NO; } static void nalu_merge_ps(GF_BitStream *ps_bs, Bool rewrite_start_codes, u32 nal_unit_size_field, GF_MPEGVisualSampleEntryBox *entry, Bool is_hevc, Bool *has_vps) { u32 i, count; if (is_hevc) { if (entry->hevc_config) { count = gf_list_count(entry->hevc_config->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(entry->hevc_config->config->param_array, i); if (ar->type == GF_HEVC_NALU_VID_PARAM) { if (! *has_vps) *has_vps = GF_TRUE; else continue; } rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field); } } if (entry->lhvc_config) { count = gf_list_count(entry->lhvc_config->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(entry->lhvc_config->config->param_array, i); if (ar->type == GF_HEVC_NALU_VID_PARAM) { if (! *has_vps) *has_vps = GF_TRUE; else continue; } rewrite_nalus_list(ar->nalus, ps_bs, rewrite_start_codes, nal_unit_size_field); } } } else { if (entry->avc_config) { rewrite_nalus_list(entry->avc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->avc_config->config->sequenceParameterSetExtensions, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->avc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); } /*add svc config */ if (entry->svc_config) { rewrite_nalus_list(entry->svc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->svc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); } /*add mvc config */ if (entry->mvc_config) { rewrite_nalus_list(entry->mvc_config->config->sequenceParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); rewrite_nalus_list(entry->mvc_config->config->pictureParameterSets, ps_bs, rewrite_start_codes, nal_unit_size_field); } } } GF_Err gf_isom_nalu_sample_rewrite(GF_MediaBox *mdia, GF_ISOSample *sample, u32 sampleNumber, GF_MPEGVisualSampleEntryBox *entry) { Bool is_hevc = GF_FALSE; //if only one sync given in the sample sync table, insert sps/pps/vps before cra/bla in hevc // Bool check_cra_bla = (mdia->information->sampleTable->SyncSample && mdia->information->sampleTable->SyncSample->nb_entries>1) ? 0 : 1; Bool check_cra_bla = GF_TRUE; Bool insert_nalu_delim = GF_TRUE; Bool force_sei_inspect = GF_FALSE; GF_Err e = GF_OK; GF_BitStream *sei_suffix_bs = NULL; Bool ps_transfered = GF_FALSE; u32 nal_size, nal_unit_size_field, extractor_mode; Bool rewrite_ps, rewrite_start_codes, insert_vdrd_code; u8 nal_type; u32 nal_hdr, sabt_ref, i, track_num; u32 temporal_id = 0; GF_ISOFile *file = mdia->mediaTrack->moov->mov; GF_TrackReferenceTypeBox *scal = NULL; Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &scal); rewrite_ps = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_INBAND_PS_FLAG) ? GF_TRUE : GF_FALSE; rewrite_start_codes = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_ANNEXB_FLAG) ? GF_TRUE : GF_FALSE; insert_vdrd_code = (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_VDRD_FLAG) ? GF_TRUE : GF_FALSE; if (!entry->svc_config && !entry->mvc_config && !entry->lhvc_config) insert_vdrd_code = GF_FALSE; extractor_mode = mdia->mediaTrack->extractor_mode&0x0000FFFF; if (mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_TILE_ONLY) { insert_nalu_delim = GF_FALSE; } track_num = 1 + gf_list_find(mdia->mediaTrack->moov->trackList, mdia->mediaTrack); if ( (extractor_mode != GF_ISOM_NALU_EXTRACT_INSPECT) && !(mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_TILE_ONLY) ) { u32 ref_track, di; //aggregate all sabt samples with the same DTS if (entry->lhvc_config && !entry->hevc_config && !(mdia->mediaTrack->extractor_mode & GF_ISOM_NALU_EXTRACT_LAYER_ONLY)) { if (gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_SCAL) <= 0) { //FIXME - for now we only support two layers (base + enh) in implicit if ( gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_BASE) >= 1) { GF_ISOSample *base_samp; gf_isom_get_reference(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_BASE, 1, &ref_track); switch (gf_isom_get_media_subtype(mdia->mediaTrack->moov->mov , ref_track, 1)) { case GF_ISOM_SUBTYPE_HVC1: case GF_ISOM_SUBTYPE_HVC2: case GF_ISOM_SUBTYPE_HEV1: case GF_ISOM_SUBTYPE_HEV2: if (!mdia->extracted_samp) { mdia->extracted_samp = gf_isom_sample_new(); if (!mdia->extracted_samp) return GF_OUT_OF_MEM; } base_samp = gf_isom_get_sample_ex(mdia->mediaTrack->moov->mov, ref_track, sampleNumber + mdia->mediaTrack->sample_count_at_seg_start, &di, mdia->extracted_samp, NULL); if (base_samp && base_samp->data) { if (!sample->alloc_size || (sample->alloc_size<sample->dataLength+base_samp->dataLength) ) { sample->data = gf_realloc(sample->data, sample->dataLength+base_samp->dataLength); if (sample->alloc_size) sample->alloc_size = sample->dataLength+base_samp->dataLength; } memmove(sample->data + base_samp->dataLength, sample->data , sample->dataLength); memcpy(sample->data, base_samp->data, base_samp->dataLength); sample->dataLength += base_samp->dataLength; } Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_BASE, &scal); break; } } } } sabt_ref = gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_SABT); if ((s32) sabt_ref > 0) { force_sei_inspect = GF_TRUE; for (i=0; i<sabt_ref; i++) { GF_ISOSample *tile_samp; gf_isom_get_reference(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_SABT, i+1, &ref_track); if (!mdia->extracted_samp) { mdia->extracted_samp = gf_isom_sample_new(); if (!mdia->extracted_samp) return GF_OUT_OF_MEM; } tile_samp = gf_isom_get_sample_ex(mdia->mediaTrack->moov->mov, ref_track, sampleNumber + mdia->mediaTrack->sample_count_at_seg_start, &di, mdia->extracted_samp, NULL); if (tile_samp && tile_samp ->data) { if (!sample->alloc_size || (sample->alloc_size<sample->dataLength+tile_samp->dataLength) ) { sample->data = gf_realloc(sample->data, sample->dataLength+tile_samp->dataLength); if (sample->alloc_size) sample->alloc_size = sample->dataLength+tile_samp->dataLength; } memcpy(sample->data + sample->dataLength, tile_samp->data, tile_samp->dataLength); sample->dataLength += tile_samp->dataLength; } } } } if ( gf_isom_get_reference_count(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_TBAS) >= 1) { u32 ref_track; u32 idx = gf_list_find(mdia->information->sampleTable->SampleDescription->child_boxes, entry); GF_TrackBox *tbas; gf_isom_get_reference(mdia->mediaTrack->moov->mov, track_num, GF_ISOM_REF_TBAS, 1, &ref_track); tbas = (GF_TrackBox *)gf_list_get(mdia->mediaTrack->moov->trackList, ref_track-1); entry = gf_list_get(tbas->Media->information->sampleTable->SampleDescription->child_boxes, idx); } if (sample->IsRAP < SAP_TYPE_2) { if (mdia->information->sampleTable->no_sync_found || (!sample->IsRAP && check_cra_bla) ) { sample->IsRAP = is_sample_idr(mdia, sample, entry); } } if (!sample->IsRAP) rewrite_ps = GF_FALSE; if (extractor_mode != GF_ISOM_NALU_EXTRACT_LAYER_ONLY) insert_vdrd_code = GF_FALSE; if (!entry) return GF_BAD_PARAM; //this is a compatible HEVC, don't insert VDRD, insert NALU delim if (entry->lhvc_config && entry->hevc_config) insert_vdrd_code = GF_FALSE; if (extractor_mode == GF_ISOM_NALU_EXTRACT_INSPECT) { if (!rewrite_ps && !rewrite_start_codes) return GF_OK; } nal_unit_size_field = 0; /*if svc rewrite*/ if (entry->svc_config && entry->svc_config->config) nal_unit_size_field = entry->svc_config->config->nal_unit_size; /*if mvc rewrite*/ if (entry->mvc_config && entry->mvc_config->config) nal_unit_size_field = entry->mvc_config->config->nal_unit_size; /*if lhvc rewrite*/ else if (entry->lhvc_config && entry->lhvc_config->config) { is_hevc = GF_TRUE; nal_unit_size_field = entry->lhvc_config->config->nal_unit_size; } /*otherwise do nothing*/ else if (!rewrite_ps && !rewrite_start_codes && !scal && !force_sei_inspect) { return GF_OK; } if (!nal_unit_size_field) { if (entry->avc_config && entry->avc_config->config) nal_unit_size_field = entry->avc_config->config->nal_unit_size; else if (entry->lhvc_config && entry->lhvc_config->config) { nal_unit_size_field = entry->lhvc_config->config->nal_unit_size; is_hevc = GF_TRUE; } else if (entry->hevc_config && entry->hevc_config->config) { nal_unit_size_field = entry->hevc_config->config->nal_unit_size; is_hevc = GF_TRUE; } } if (!nal_unit_size_field) return GF_ISOM_INVALID_FILE; //setup PS rewriter if (!mdia->nalu_ps_bs) mdia->nalu_ps_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_seek(mdia->nalu_ps_bs, 0); //setup sample reader if (mdia->in_sample_buffer_alloc<sample->dataLength) { mdia->in_sample_buffer_alloc = sample->dataLength; mdia->in_sample_buffer = gf_realloc(mdia->in_sample_buffer, sample->dataLength); } memcpy(mdia->in_sample_buffer, sample->data, sample->dataLength); if (!mdia->nalu_parser) { mdia->nalu_parser = gf_bs_new(mdia->in_sample_buffer, sample->dataLength, GF_BITSTREAM_READ); if (!mdia->nalu_parser && sample->data) return GF_ISOM_INVALID_FILE; } else { e = gf_bs_reassign_buffer(mdia->nalu_parser, mdia->in_sample_buffer, sample->dataLength); if (e) return e; } //setup ouput if (!mdia->nalu_out_bs) { u8 *output; u32 outSize; mdia->nalu_out_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_bs_get_content(mdia->nalu_out_bs, &output, &outSize); } gf_bs_reassign_buffer(mdia->nalu_out_bs, sample->data, sample->alloc_size ? sample->alloc_size : sample->dataLength); /*rewrite start code with NALU delim*/ if (rewrite_start_codes) { //we are SVC, don't write NALU delim, only insert VDRD NALU if (insert_vdrd_code) { if (is_hevc) { //spec is not clear here, we don't insert an NALU AU delimiter before the layer starts since it breaks openHEVC // insert_nalu_delim=0; } else { gf_bs_write_int(mdia->nalu_out_bs, 1, 32); gf_bs_write_int(mdia->nalu_out_bs, GF_AVC_NALU_VDRD , 8); insert_nalu_delim=0; } } //AVC/HEVC base, insert NALU delim if (insert_nalu_delim) { gf_bs_write_int(mdia->nalu_out_bs, 1, 32); if (is_hevc) { #ifndef GPAC_DISABLE_HEVC gf_bs_write_int(mdia->nalu_out_bs, 0, 1); gf_bs_write_int(mdia->nalu_out_bs, GF_HEVC_NALU_ACCESS_UNIT, 6); gf_bs_write_int(mdia->nalu_out_bs, insert_vdrd_code ? 1 : 0, 6); //we should pick the layerID of the following nalus ... gf_bs_write_int(mdia->nalu_out_bs, 1, 3); //nuh_temporal_id_plus1 - cannot be 0, we use 1 by default, and overwrite it if needed at the end /*pic-type - by default we signal all slice types possible*/ gf_bs_write_int(mdia->nalu_out_bs, 2, 3); gf_bs_write_int(mdia->nalu_out_bs, 0, 5); #endif } else { gf_bs_write_int(mdia->nalu_out_bs, (sample->data[0] & 0x60) | GF_AVC_NALU_ACCESS_UNIT, 8); gf_bs_write_int(mdia->nalu_out_bs, 0xF0 , 8); /*7 "all supported NALUs" (=111) + rbsp trailing (10000)*/; } } } if (rewrite_ps) { Bool has_vps = GF_FALSE; //in inspect mode or single-layer mode just use the xPS from this layer if (extractor_mode == GF_ISOM_NALU_EXTRACT_DEFAULT) { if (scal) { for (i=0; i<scal->trackIDCount; i++) { GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]); GF_MPEGVisualSampleEntryBox *an_entry = NULL; if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription) an_entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(a_track->Media->information->sampleTable->SampleDescription->child_boxes, 0); if (an_entry) nalu_merge_ps(mdia->nalu_ps_bs, rewrite_start_codes, nal_unit_size_field, an_entry, is_hevc, &has_vps); } } } nalu_merge_ps(mdia->nalu_ps_bs, rewrite_start_codes, nal_unit_size_field, entry, is_hevc, &has_vps); if (is_hevc) { /*little optimization if we are not asked to start codes: copy over the sample*/ if (!rewrite_start_codes && !entry->lhvc_config && !scal) { if (! ps_transfered) { nal_type = (sample->data[nal_unit_size_field] & 0x7E) >> 1; //temp fix - if we detect xPS in the beginning of the sample do NOT copy the ps bitstream //this is not correct since we are not sure whether they are the same xPS or not, but it crashes openHEVC ... switch (nal_type) { #ifndef GPAC_DISABLE_HEVC case GF_HEVC_NALU_VID_PARAM: case GF_HEVC_NALU_SEQ_PARAM: case GF_HEVC_NALU_PIC_PARAM: break; #endif default: gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); break; } } gf_bs_write_data(mdia->nalu_out_bs, mdia->in_sample_buffer, sample->dataLength); gf_bs_get_content_no_truncate(mdia->nalu_out_bs, &sample->data, &sample->dataLength, &sample->alloc_size); return GF_OK; } } } else { ps_transfered = GF_TRUE; } /*little optimization if we are not asked to rewrite extractors or start codes: copy over the sample*/ if (!scal && !rewrite_start_codes && !rewrite_ps && !force_sei_inspect) { if (! ps_transfered) { gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); } gf_bs_write_data(mdia->nalu_out_bs, mdia->in_sample_buffer, sample->dataLength); gf_bs_get_content_no_truncate(mdia->nalu_out_bs, &sample->data, &sample->dataLength, &sample->alloc_size); return GF_OK; } if (!mdia->tmp_nal_copy_buffer) { mdia->tmp_nal_copy_buffer = gf_malloc(sizeof(char) * 4096); mdia->tmp_nal_copy_buffer_alloc = 4096; } while (gf_bs_available(mdia->nalu_parser)) { nal_size = gf_bs_read_int(mdia->nalu_parser, 8*nal_unit_size_field); if (gf_bs_get_position(mdia->nalu_parser) + nal_size > sample->dataLength) { GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("Sample %u (size %u) rewrite: corrupted NAL Unit (size %u)\n", sampleNumber, sample->dataLength, nal_size)); goto exit; } if (nal_size > mdia->tmp_nal_copy_buffer_alloc) { mdia->tmp_nal_copy_buffer_alloc = nal_size; mdia->tmp_nal_copy_buffer = (char*) gf_realloc(mdia->tmp_nal_copy_buffer, sizeof(char)*nal_size); } if (is_hevc) { nal_hdr = gf_bs_read_u16(mdia->nalu_parser); nal_type = (nal_hdr&0x7E00) >> 9; } else { nal_hdr = gf_bs_read_u8(mdia->nalu_parser); nal_type = nal_hdr & 0x1F; } if (is_hevc) { #ifndef GPAC_DISABLE_HEVC GF_BitStream *write_to_bs = mdia->nalu_out_bs; #endif if (!ps_transfered) { gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); ps_transfered = GF_TRUE; } #ifndef GPAC_DISABLE_HEVC /*we already wrote this stuff*/ if (nal_type==GF_HEVC_NALU_ACCESS_UNIT) { gf_bs_skip_bytes(mdia->nalu_parser, nal_size-2); continue; } switch (nal_type) { //extractor case 49: e = process_extractor(file, mdia, sampleNumber, sample->DTS, nal_size, nal_hdr, nal_unit_size_field, GF_TRUE, rewrite_ps, rewrite_start_codes, extractor_mode); if (e) goto exit; break; case GF_HEVC_NALU_SLICE_TSA_N: case GF_HEVC_NALU_SLICE_STSA_N: case GF_HEVC_NALU_SLICE_TSA_R: case GF_HEVC_NALU_SLICE_STSA_R: if (temporal_id < (nal_hdr & 0x7)) temporal_id = (nal_hdr & 0x7); /*rewrite nal*/ gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size-2); if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u16(mdia->nalu_out_bs, nal_hdr); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, nal_size-2); break; case GF_HEVC_NALU_SLICE_BLA_W_LP: case GF_HEVC_NALU_SLICE_BLA_W_DLP: case GF_HEVC_NALU_SLICE_BLA_N_LP: case GF_HEVC_NALU_SLICE_IDR_W_DLP: case GF_HEVC_NALU_SLICE_IDR_N_LP: case GF_HEVC_NALU_SLICE_CRA: //insert xPS before CRA/BLA if (check_cra_bla && !sample->IsRAP) { sample->IsRAP = sap_type_from_nal_type(nal_type); if (sei_suffix_bs) gf_bs_del(sei_suffix_bs); return gf_isom_nalu_sample_rewrite(mdia, sample, sampleNumber, entry); } default: /*rewrite nal*/ if (nal_size<2) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[iso file] Invalid nal size %d in sample %d\n", nal_type, sampleNumber)); e = GF_NON_COMPLIANT_BITSTREAM; goto exit; } gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size-2); if (nal_type==GF_HEVC_NALU_SEI_SUFFIX) { if (!sei_suffix_bs) sei_suffix_bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); write_to_bs = sei_suffix_bs; } if (rewrite_start_codes) gf_bs_write_u32(write_to_bs, 1); else gf_bs_write_int(write_to_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u16(write_to_bs, nal_hdr); gf_bs_write_data(write_to_bs, mdia->tmp_nal_copy_buffer, nal_size-2); } #endif //done with HEVC continue; } switch(nal_type) { case GF_AVC_NALU_ACCESS_UNIT: /*we already wrote this stuff*/ gf_bs_skip_bytes(mdia->nalu_parser, nal_size-1); continue; //extractor case 31: e = process_extractor(file, mdia, sampleNumber, sample->DTS, nal_size, nal_hdr, nal_unit_size_field, GF_FALSE, rewrite_ps, rewrite_start_codes, extractor_mode); if (e) goto exit; break; // case GF_AVC_NALU_SEI: case GF_AVC_NALU_SEQ_PARAM: case GF_AVC_NALU_PIC_PARAM: case GF_AVC_NALU_SEQ_PARAM_EXT: case GF_AVC_NALU_SVC_SUBSEQ_PARAM: // we will rewrite the sps/pps if and only if there is no sps/pps in bistream if (!ps_transfered) { ps_transfered = GF_TRUE; } default: if (!ps_transfered) { gf_bs_transfer(mdia->nalu_out_bs, mdia->nalu_ps_bs, GF_TRUE); ps_transfered = GF_TRUE; } gf_bs_read_data(mdia->nalu_parser, mdia->tmp_nal_copy_buffer, nal_size-1); if (rewrite_start_codes) gf_bs_write_u32(mdia->nalu_out_bs, 1); else gf_bs_write_int(mdia->nalu_out_bs, nal_size, 8*nal_unit_size_field); gf_bs_write_u8(mdia->nalu_out_bs, nal_hdr); gf_bs_write_data(mdia->nalu_out_bs, mdia->tmp_nal_copy_buffer, nal_size-1); } } if (sei_suffix_bs) { gf_bs_transfer(mdia->nalu_out_bs, sei_suffix_bs, GF_FALSE); } /*done*/ gf_bs_get_content_no_truncate(mdia->nalu_out_bs, &sample->data, &sample->dataLength, &sample->alloc_size); /*rewrite temporal ID of AU Ddelim NALU (first one)*/ if (rewrite_start_codes && is_hevc && temporal_id) { sample->data[6] = (sample->data[6] & 0xF8) | (temporal_id+1); } exit: if (sei_suffix_bs) gf_bs_del(sei_suffix_bs); return e; } GF_HEVCConfig *HEVC_DuplicateConfig(GF_HEVCConfig *cfg) { u8 *data; u32 data_size; GF_HEVCConfig *new_cfg; GF_BitStream *bs; if (!cfg) return NULL; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_odf_hevc_cfg_write_bs(cfg, bs); gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); new_cfg = gf_odf_hevc_cfg_read_bs(bs, cfg->is_lhvc); new_cfg->is_lhvc = cfg->is_lhvc; gf_bs_del(bs); gf_free(data); return new_cfg; } GF_VVCConfig *VVC_DuplicateConfig(GF_VVCConfig *cfg) { u8 *data; u32 data_size; GF_VVCConfig *new_cfg; GF_BitStream *bs; if (!cfg) return NULL; bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); gf_odf_vvc_cfg_write_bs(cfg, bs); gf_bs_get_content(bs, &data, &data_size); gf_bs_del(bs); bs = gf_bs_new(data, data_size, GF_BITSTREAM_READ); new_cfg = gf_odf_vvc_cfg_read_bs(bs); gf_bs_del(bs); gf_free(data); return new_cfg; } static GF_AVCConfig *AVC_DuplicateConfig(GF_AVCConfig *cfg) { u32 i, count; GF_NALUFFParam *p1, *p2; GF_AVCConfig *cfg_new; if (!cfg) return NULL; cfg_new = gf_odf_avc_cfg_new(); cfg_new->AVCLevelIndication = cfg->AVCLevelIndication; cfg_new->AVCProfileIndication = cfg->AVCProfileIndication; cfg_new->configurationVersion = cfg->configurationVersion; cfg_new->nal_unit_size = cfg->nal_unit_size; cfg_new->profile_compatibility = cfg->profile_compatibility; cfg_new->complete_representation = cfg->complete_representation; cfg_new->chroma_bit_depth = cfg->chroma_bit_depth; cfg_new->luma_bit_depth = cfg->luma_bit_depth; cfg_new->chroma_format = cfg->chroma_format; count = gf_list_count(cfg->sequenceParameterSets); for (i=0; i<count; i++) { p1 = (GF_NALUFFParam*)gf_list_get(cfg->sequenceParameterSets, i); p2 = (GF_NALUFFParam*)gf_malloc(sizeof(GF_NALUFFParam)); p2->size = p1->size; p2->id = p1->id; p2->data = (char *)gf_malloc(sizeof(char)*p1->size); memcpy(p2->data, p1->data, sizeof(char)*p1->size); gf_list_add(cfg_new->sequenceParameterSets, p2); } count = gf_list_count(cfg->pictureParameterSets); for (i=0; i<count; i++) { p1 = (GF_NALUFFParam*)gf_list_get(cfg->pictureParameterSets, i); p2 = (GF_NALUFFParam*)gf_malloc(sizeof(GF_NALUFFParam)); p2->size = p1->size; p2->id = p1->id; p2->data = (char*)gf_malloc(sizeof(char)*p1->size); memcpy(p2->data, p1->data, sizeof(char)*p1->size); gf_list_add(cfg_new->pictureParameterSets, p2); } if (cfg->sequenceParameterSetExtensions) { cfg_new->sequenceParameterSetExtensions = gf_list_new(); count = gf_list_count(cfg->sequenceParameterSetExtensions); for (i=0; i<count; i++) { p1 = (GF_NALUFFParam*)gf_list_get(cfg->sequenceParameterSetExtensions, i); p2 = (GF_NALUFFParam*)gf_malloc(sizeof(GF_NALUFFParam)); p2->size = p1->size; p2->id = p1->id; p2->data = (char*)gf_malloc(sizeof(char)*p1->size); memcpy(p2->data, p1->data, sizeof(char)*p1->size); gf_list_add(cfg_new->sequenceParameterSetExtensions, p2); } } return cfg_new; } static void merge_avc_config(GF_AVCConfig *dst_cfg, GF_AVCConfig *src_cfg) { GF_AVCConfig *cfg; if (!src_cfg || !dst_cfg) return; cfg = AVC_DuplicateConfig(src_cfg); if (!cfg) return; while (gf_list_count(cfg->sequenceParameterSets)) { GF_NALUFFParam *p = (GF_NALUFFParam*)gf_list_get(cfg->sequenceParameterSets, 0); gf_list_rem(cfg->sequenceParameterSets, 0); gf_list_insert(dst_cfg->sequenceParameterSets, p, 0); } while (gf_list_count(cfg->pictureParameterSets)) { GF_NALUFFParam *p = (GF_NALUFFParam*)gf_list_get(cfg->pictureParameterSets, 0); gf_list_rem(cfg->pictureParameterSets, 0); gf_list_insert(dst_cfg->pictureParameterSets, p, 0); } gf_odf_avc_cfg_del(cfg); } void merge_hevc_config(GF_HEVCConfig *dst_cfg, GF_HEVCConfig *src_cfg, Bool force_insert) { GF_HEVCConfig *cfg = HEVC_DuplicateConfig(src_cfg); //merge all xPS u32 i, j, count = cfg->param_array ? gf_list_count(cfg->param_array) : 0; for (i=0; i<count; i++) { GF_NALUFFParamArray *ar_h = NULL; u32 count2 = dst_cfg->param_array ? gf_list_count(dst_cfg->param_array) : 0; GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(cfg->param_array, i); for (j=0; j<count2; j++) { ar_h = (GF_NALUFFParamArray*)gf_list_get(dst_cfg->param_array, j); if (ar_h->type==ar->type) { break; } ar_h = NULL; } if (!ar_h) { gf_list_add(dst_cfg->param_array, ar); gf_list_rem(cfg->param_array, i); count--; i--; } else { while (gf_list_count(ar->nalus)) { GF_NALUFFParam *p = (GF_NALUFFParam*)gf_list_get(ar->nalus, 0); gf_list_rem(ar->nalus, 0); if (force_insert) gf_list_insert(ar_h->nalus, p, 0); else gf_list_add(ar_h->nalus, p); } } } gf_odf_hevc_cfg_del(cfg); #define CHECK_CODE(__code) if (dst_cfg->__code < src_cfg->__code) dst_cfg->__code = src_cfg->__code; CHECK_CODE(configurationVersion) CHECK_CODE(profile_idc) CHECK_CODE(profile_space) CHECK_CODE(tier_flag) CHECK_CODE(general_profile_compatibility_flags) CHECK_CODE(progressive_source_flag) CHECK_CODE(interlaced_source_flag) CHECK_CODE(constraint_indicator_flags) CHECK_CODE(level_idc) CHECK_CODE(min_spatial_segmentation_idc) } void merge_all_config(GF_AVCConfig *avc_cfg, GF_HEVCConfig *hevc_cfg, GF_MediaBox *mdia) { u32 i; GF_TrackReferenceTypeBox *scal = NULL; Track_FindRef(mdia->mediaTrack, GF_ISOM_REF_SCAL, &scal); if (!scal) return; for (i=0; i<scal->trackIDCount; i++) { GF_TrackBox *a_track = GetTrackbyID(mdia->mediaTrack->moov, scal->trackIDs[i]); GF_MPEGVisualSampleEntryBox *an_entry = NULL; if (a_track && a_track->Media && a_track->Media->information && a_track->Media->information->sampleTable && a_track->Media->information->sampleTable->SampleDescription) an_entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(a_track->Media->information->sampleTable->SampleDescription->child_boxes, 0); if (!an_entry) continue; if (avc_cfg && an_entry->svc_config && an_entry->svc_config->config) merge_avc_config(avc_cfg, an_entry->svc_config->config); if (avc_cfg && an_entry->mvc_config && an_entry->mvc_config->config) merge_avc_config(avc_cfg, an_entry->mvc_config->config); if (avc_cfg && an_entry->avc_config && an_entry->avc_config->config) merge_avc_config(avc_cfg, an_entry->avc_config->config); if (hevc_cfg && an_entry->lhvc_config && an_entry->lhvc_config->config) merge_hevc_config(hevc_cfg, an_entry->lhvc_config->config, GF_TRUE); if (hevc_cfg && an_entry->hevc_config && an_entry->hevc_config->config) merge_hevc_config(hevc_cfg, an_entry->hevc_config->config, GF_TRUE); } if (hevc_cfg) hevc_cfg->is_lhvc = GF_FALSE; } void AVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *avc, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)avc, GF_FALSE); if (avc->emul_esd) gf_odf_desc_del((GF_Descriptor *)avc->emul_esd); avc->emul_esd = gf_odf_desc_esd_new(2); avc->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; /*AVC OTI is 0x21, AVC parameter set stream OTI (not supported in gpac) is 0x22, SVC OTI is 0x24*/ /*if we have only SVC stream, set objectTypeIndication to AVC OTI; else set it to AVC OTI*/ if (avc->svc_config && !avc->avc_config) avc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_SVC; else if (avc->mvc_config && !avc->avc_config) avc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_MVC; else avc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AVC; if (btrt) { avc->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; avc->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; avc->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } GF_MPEG4ExtensionDescriptorsBox *mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_find_child(avc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (mdesc) { u32 i=0; GF_Descriptor *desc,*clone; i=0; while ((desc = (GF_Descriptor *)gf_list_enum(mdesc->descriptors, &i))) { clone = NULL; gf_odf_desc_copy(desc, &clone); if (gf_odf_desc_add_desc((GF_Descriptor *)avc->emul_esd, clone) != GF_OK) gf_odf_desc_del(clone); } } if (avc->avc_config) { GF_AVCConfig *avcc = avc->avc_config->config ? AVC_DuplicateConfig(avc->avc_config->config) : NULL; /*merge SVC config*/ if (avc->svc_config) { merge_avc_config(avcc, avc->svc_config->config); } /*merge MVC config*/ if (avc->mvc_config) { merge_avc_config(avcc, avc->mvc_config->config); } if (avcc) { if (mdia) merge_all_config(avcc, NULL, mdia); gf_odf_avc_cfg_write(avcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(avcc); } } else if (avc->svc_config) { GF_AVCConfig *svcc = AVC_DuplicateConfig(avc->svc_config->config); if (mdia) merge_all_config(svcc, NULL, mdia); gf_odf_avc_cfg_write(svcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(svcc); } else if (avc->mvc_config) { GF_AVCConfig *mvcc = AVC_DuplicateConfig(avc->mvc_config->config); if (mdia) merge_all_config(mvcc, NULL, mdia); gf_odf_avc_cfg_write(mvcc, &avc->emul_esd->decoderConfig->decoderSpecificInfo->data, &avc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_avc_cfg_del(mvcc); } } void AVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *avc) { AVC_RewriteESDescriptorEx(avc, NULL); } void HEVC_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *hevc, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)hevc, GF_FALSE); if (hevc->emul_esd) gf_odf_desc_del((GF_Descriptor *)hevc->emul_esd); hevc->emul_esd = gf_odf_desc_esd_new(2); hevc->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; hevc->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_HEVC; if (btrt) { hevc->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; hevc->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; hevc->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } GF_MPEG4ExtensionDescriptorsBox *mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_find_child(hevc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (mdesc) { u32 i=0; GF_Descriptor *desc,*clone; i=0; while ((desc = (GF_Descriptor *)gf_list_enum(mdesc->descriptors, &i))) { clone = NULL; gf_odf_desc_copy(desc, &clone); if (gf_odf_desc_add_desc((GF_Descriptor *)hevc->emul_esd, clone) != GF_OK) gf_odf_desc_del(clone); } } if (hevc->hevc_config || hevc->lhvc_config) { GF_HEVCConfig *hcfg = HEVC_DuplicateConfig(hevc->hevc_config ? hevc->hevc_config->config : hevc->lhvc_config->config); if (hevc->hevc_config && hevc->lhvc_config) { //merge LHVC config to HEVC conf, so we add entry rather than insert merge_hevc_config(hcfg, hevc->lhvc_config->config, GF_FALSE); } if (mdia) merge_all_config(NULL, hcfg, mdia); if (hcfg) { if (mdia && ((mdia->mediaTrack->extractor_mode&0x0000FFFF) != GF_ISOM_NALU_EXTRACT_INSPECT)) { hcfg->is_lhvc=GF_FALSE; } gf_odf_hevc_cfg_write(hcfg, &hevc->emul_esd->decoderConfig->decoderSpecificInfo->data, &hevc->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_hevc_cfg_del(hcfg); } } } void HEVC_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *hevc) { HEVC_RewriteESDescriptorEx(hevc, NULL); } GF_Err AVC_HEVC_UpdateESD(GF_MPEGVisualSampleEntryBox *avc, GF_ESD *esd) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)avc, GF_TRUE); GF_MPEG4ExtensionDescriptorsBox *mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_find_child(avc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (mdesc) { gf_isom_box_del_parent(&avc->child_boxes, (GF_Box *) mdesc); } btrt->avgBitrate = esd->decoderConfig->avgBitrate; btrt->maxBitrate = esd->decoderConfig->maxBitrate; btrt->bufferSizeDB = esd->decoderConfig->bufferSizeDB; if (gf_list_count(esd->IPIDataSet) || gf_list_count(esd->IPMPDescriptorPointers) || esd->langDesc || gf_list_count(esd->extensionDescriptors) || esd->ipiPtr || esd->qos || esd->RegDescriptor) { mdesc = (GF_MPEG4ExtensionDescriptorsBox *) gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_M4DS); if (!mdesc) return GF_OUT_OF_MEM; if (esd->RegDescriptor) { gf_list_add(mdesc->descriptors, esd->RegDescriptor); esd->RegDescriptor = NULL; } if (esd->qos) { gf_list_add(mdesc->descriptors, esd->qos); esd->qos = NULL; } if (esd->ipiPtr) { gf_list_add(mdesc->descriptors, esd->ipiPtr); esd->ipiPtr= NULL; } while (gf_list_count(esd->IPIDataSet)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->IPIDataSet, 0); gf_list_rem(esd->IPIDataSet, 0); gf_list_add(mdesc->descriptors, desc); } while (gf_list_count(esd->IPMPDescriptorPointers)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->IPMPDescriptorPointers, 0); gf_list_rem(esd->IPMPDescriptorPointers, 0); gf_list_add(mdesc->descriptors, desc); } if (esd->langDesc) { gf_list_add(mdesc->descriptors, esd->langDesc); esd->langDesc = NULL; } while (gf_list_count(esd->extensionDescriptors)) { GF_Descriptor *desc = (GF_Descriptor *)gf_list_get(esd->extensionDescriptors, 0); gf_list_rem(esd->extensionDescriptors, 0); gf_list_add(mdesc->descriptors, desc); } } if (!avc->lhvc_config && (esd->decoderConfig->objectTypeIndication==GF_CODECID_HEVC)) { if (!avc->hevc_config) { avc->hevc_config = (GF_HEVCConfigurationBox *)gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!avc->hevc_config) return GF_OUT_OF_MEM; } if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { if (avc->hevc_config->config) gf_odf_hevc_cfg_del(avc->hevc_config->config); avc->hevc_config->config = gf_odf_hevc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength, GF_FALSE); } } else if (!avc->svc_config && !avc->mvc_config && (esd->decoderConfig->objectTypeIndication==GF_CODECID_AVC)) { if (!avc->avc_config) { avc->avc_config = (GF_AVCConfigurationBox *)gf_isom_box_new_parent(&avc->child_boxes, GF_ISOM_BOX_TYPE_AVCC); if (!avc->avc_config) return GF_OUT_OF_MEM; } if (esd->decoderConfig->decoderSpecificInfo && esd->decoderConfig->decoderSpecificInfo->data) { if (avc->avc_config->config) gf_odf_avc_cfg_del(avc->avc_config->config); avc->avc_config->config = gf_odf_avc_cfg_read(esd->decoderConfig->decoderSpecificInfo->data, esd->decoderConfig->decoderSpecificInfo->dataLength); } } gf_odf_desc_del((GF_Descriptor *)esd); if (avc->hevc_config) { HEVC_RewriteESDescriptor(avc); } else { AVC_RewriteESDescriptor(avc); } return GF_OK; } #if !defined(GPAC_DISABLE_AV_PARSERS) && !defined(GPAC_DISABLE_HEVC) void gf_hevc_parse_ps(GF_HEVCConfig* hevccfg, HEVCState* hevc, u32 nal_type) { u32 i, j; if (!hevccfg) return; for (i = 0; i < gf_list_count(hevccfg->param_array); i++) { GF_NALUFFParamArray* ar = gf_list_get(hevccfg->param_array, i); if (ar->type != nal_type) continue; for (j = 0; j < gf_list_count(ar->nalus); j++) { u8 ntype, tid, lid; GF_NALUFFParam* sl = gf_list_get(ar->nalus, j); gf_hevc_parse_nalu(sl->data, sl->size, hevc, &ntype, &tid, &lid); } } } #endif static GF_Err gf_isom_check_mvc(GF_ISOFile *the_file, GF_TrackBox *trak, GF_MPEGVisualSampleEntryBox *entry) { u32 i; GF_Box *mvci; GF_MultiviewGroupBox *mvcg; GF_ViewIdentifierBox *vwid; if (entry->mvc_config) {} else if (entry->avc_config && entry->avc_config->config && entry->avc_config->config->sequenceParameterSetExtensions) {} else return GF_OK; mvci = gf_isom_box_find_child(trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_MVCI); if (!mvci) { mvci = gf_isom_box_new_parent(&trak->Media->information->child_boxes, GF_ISOM_BOX_TYPE_MVCI); if (!mvci) return GF_OUT_OF_MEM; } mvcg = (GF_MultiviewGroupBox *) gf_isom_box_find_child(mvci->child_boxes, GF_ISOM_BOX_TYPE_MVCG); if (!mvcg) { mvcg = (GF_MultiviewGroupBox *)gf_isom_box_new_parent(&mvci->child_boxes, GF_ISOM_BOX_TYPE_MVCG); if (!mvcg) return GF_OUT_OF_MEM; } //this is very crude, we should try to parse the bitstream to fill these mvcg->num_entries = 0; if (mvcg->entries) { gf_free(mvcg->entries); mvcg->entries = NULL; } if (entry->avc_config) { if (gf_list_count(entry->avc_config->config->sequenceParameterSets)) mvcg->num_entries += 1; mvcg->num_entries += gf_list_count(entry->avc_config->config->sequenceParameterSetExtensions); } if (entry->mvc_config && entry->mvc_config->config) { mvcg->num_entries += gf_list_count(entry->mvc_config->config->sequenceParameterSets); } mvcg->entries = gf_malloc(sizeof(MVCIEntry)*mvcg->num_entries); if (!mvcg->entries) return GF_OUT_OF_MEM; memset(mvcg->entries, 0, sizeof(MVCIEntry)*mvcg->num_entries); for (i=0; i<mvcg->num_entries; i++) { mvcg->entries[i].entry_type = 2; mvcg->entries[i].output_view_id = i; } vwid = (GF_ViewIdentifierBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_VWID); if (!vwid) { vwid = (GF_ViewIdentifierBox *)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VWID); if (!mvcg) return GF_OUT_OF_MEM; } if (vwid->views) gf_free(vwid->views); vwid->num_views = mvcg->num_entries; vwid->views = gf_malloc(sizeof(ViewIDEntry)*vwid->num_views); if (!vwid->views) return GF_OUT_OF_MEM; memset(vwid->views, 0, sizeof(ViewIDEntry)*vwid->num_views); for (i=0; i<vwid->num_views; i++) { vwid->views[i].base_view_type = i ? 0 : 1; vwid->views[i].view_id = i; vwid->views[i].view_order_index = i; } return GF_OK; } static GF_AV1Config* AV1_DuplicateConfig(GF_AV1Config const * const cfg) { u32 i = 0; GF_AV1Config *out = gf_malloc(sizeof(GF_AV1Config)); out->marker = cfg->marker; out->version = cfg->version; out->seq_profile = cfg->seq_profile; out->seq_level_idx_0 = cfg->seq_level_idx_0; out->seq_tier_0 = cfg->seq_tier_0; out->high_bitdepth = cfg->high_bitdepth; out->twelve_bit = cfg->twelve_bit; out->monochrome = cfg->monochrome; out->chroma_subsampling_x = cfg->chroma_subsampling_x; out->chroma_subsampling_y = cfg->chroma_subsampling_y; out->chroma_sample_position = cfg->chroma_sample_position; out->initial_presentation_delay_present = cfg->initial_presentation_delay_present; out->initial_presentation_delay_minus_one = cfg->initial_presentation_delay_minus_one; out->obu_array = gf_list_new(); for (i = 0; i<gf_list_count(cfg->obu_array); ++i) { GF_AV1_OBUArrayEntry *dst = gf_malloc(sizeof(GF_AV1_OBUArrayEntry)), *src = gf_list_get(cfg->obu_array, i); dst->obu_length = src->obu_length; dst->obu_type = src->obu_type; dst->obu = gf_malloc((size_t)dst->obu_length); memcpy(dst->obu, src->obu, (size_t)src->obu_length); gf_list_add(out->obu_array, dst); } return out; } void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config && av1->av1_config->config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } } void AV1_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *av1) { AV1_RewriteESDescriptorEx(av1, NULL); } static GF_VPConfig* VP_DuplicateConfig(GF_VPConfig const * const cfg) { GF_VPConfig *out = gf_odf_vp_cfg_new(); if (out) { out->profile = cfg->profile; out->level = cfg->level; out->bit_depth = cfg->bit_depth; out->chroma_subsampling = cfg->chroma_subsampling; out->video_fullRange_flag = cfg->video_fullRange_flag; out->colour_primaries = cfg->colour_primaries; out->transfer_characteristics = cfg->transfer_characteristics; out->matrix_coefficients = cfg->matrix_coefficients; } return out; } void VP9_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *vp9, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)vp9, GF_FALSE); if (vp9->emul_esd) gf_odf_desc_del((GF_Descriptor *)vp9->emul_esd); vp9->emul_esd = gf_odf_desc_esd_new(2); vp9->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; if (vp9->type == GF_ISOM_BOX_TYPE_VP08) vp9->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_VP8; else vp9->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_VP9; if (btrt) { vp9->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; vp9->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; vp9->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (vp9->vp_config) { GF_VPConfig *vp9_cfg = VP_DuplicateConfig(vp9->vp_config->config); if (vp9_cfg) { gf_odf_vp_cfg_write(vp9_cfg, &vp9->emul_esd->decoderConfig->decoderSpecificInfo->data, &vp9->emul_esd->decoderConfig->decoderSpecificInfo->dataLength, GF_FALSE); gf_odf_vp_cfg_del(vp9_cfg); } } } void VP9_RewriteESDescriptor(GF_MPEGVisualSampleEntryBox *vp9) { VP9_RewriteESDescriptorEx(vp9, NULL); } static GF_DOVIDecoderConfigurationRecord* DOVI_DuplicateConfig(GF_DOVIDecoderConfigurationRecord *cfg) { GF_DOVIDecoderConfigurationRecord* out = NULL; GF_SAFEALLOC(out, GF_DOVIDecoderConfigurationRecord); if (!out) return NULL; out->dv_version_major = cfg->dv_version_major; out->dv_version_minor = cfg->dv_version_minor; out->dv_profile = cfg->dv_profile; out->dv_level = cfg->dv_level; out->rpu_present_flag = cfg->rpu_present_flag; out->el_present_flag = cfg->el_present_flag; out->bl_present_flag = cfg->bl_present_flag; return out; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_EXPORT GF_Err gf_isom_avc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; GF_SampleDescriptionBox *stsd; u32 dataRefIndex; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc && !gf_sys_is_test_mode() ) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_AVC1); if (!entry) return GF_OUT_OF_MEM; *outDescriptionIndex = gf_list_count(stsd->child_boxes); entry->avc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_AVCC); if (!entry->avc_config) return GF_OUT_OF_MEM; entry->avc_config->config = AVC_DuplicateConfig(cfg); if (!entry->avc_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; AVC_RewriteESDescriptor(entry); return e; } static GF_Err gf_isom_avc_config_update_ex(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg, u32 op_type, Bool keep_xps) { GF_TrackBox *trak; GF_Err e; u32 i; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: break; default: return GF_BAD_PARAM; } switch (op_type) { /*AVCC replacement*/ case 0: if (!cfg) return GF_BAD_PARAM; if (!entry->avc_config) { entry->avc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_AVCC); if (!entry->avc_config) return GF_OUT_OF_MEM; } if (entry->avc_config->config) gf_odf_avc_cfg_del(entry->avc_config->config); entry->avc_config->config = AVC_DuplicateConfig(cfg); if (!entry->avc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_AVC1; break; /*SVCC replacement*/ case 1: if (!cfg) return GF_BAD_PARAM; if (!entry->svc_config) { entry->svc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_SVCC); if (!entry->svc_config) return GF_OUT_OF_MEM; } if (entry->svc_config->config) gf_odf_avc_cfg_del(entry->svc_config->config); entry->svc_config->config = AVC_DuplicateConfig(cfg); if (!entry->svc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_AVC1; break; /*SVCC replacement and AVC removal*/ case 2: if (!cfg) return GF_BAD_PARAM; if (entry->avc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->avc_config); entry->avc_config = NULL; } if (!entry->svc_config) { entry->svc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_SVCC); if (!entry->svc_config) return GF_OUT_OF_MEM; } if (entry->svc_config->config) gf_odf_avc_cfg_del(entry->svc_config->config); entry->svc_config->config = AVC_DuplicateConfig(cfg); if (!entry->svc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_SVC1; break; /*AVCC removal and switch to avc3*/ case 3: if (!entry->avc_config || !entry->avc_config->config) return GF_BAD_PARAM; if (!keep_xps) { for (i=0; i<3; i++) { GF_AVCConfigurationBox *a_cfg = entry->avc_config; if (i==1) a_cfg = entry->svc_config; else if (i==2) a_cfg = entry->mvc_config; if (!a_cfg) continue; while (gf_list_count(a_cfg->config->sequenceParameterSets)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(a_cfg->config->sequenceParameterSets, 0); gf_list_rem(a_cfg->config->sequenceParameterSets, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } while (gf_list_count(a_cfg->config->pictureParameterSets)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(a_cfg->config->pictureParameterSets, 0); gf_list_rem(a_cfg->config->pictureParameterSets, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } while (gf_list_count(a_cfg->config->sequenceParameterSetExtensions)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(a_cfg->config->sequenceParameterSetExtensions, 0); gf_list_rem(a_cfg->config->sequenceParameterSetExtensions, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } } } if (entry->type == GF_ISOM_BOX_TYPE_AVC1) entry->type = GF_ISOM_BOX_TYPE_AVC3; else if (entry->type == GF_ISOM_BOX_TYPE_AVC2) entry->type = GF_ISOM_BOX_TYPE_AVC4; break; /*MVCC replacement*/ case 4: if (!cfg) return GF_BAD_PARAM; if (!entry->mvc_config) { entry->mvc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_MVCC); if (!entry->mvc_config) return GF_OUT_OF_MEM; } if (entry->mvc_config->config) gf_odf_avc_cfg_del(entry->mvc_config->config); entry->mvc_config->config = AVC_DuplicateConfig(cfg); if (!entry->mvc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_AVC1; e = gf_isom_check_mvc(the_file, trak, entry); if (e) return e; break; /*MVCC replacement and AVC removal*/ case 5: if (!cfg) return GF_BAD_PARAM; if (entry->avc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->avc_config); entry->avc_config = NULL; } if (!entry->mvc_config) { entry->mvc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_MVCC); if (!entry->mvc_config) return GF_OUT_OF_MEM; } if (entry->mvc_config->config) gf_odf_avc_cfg_del(entry->mvc_config->config); entry->mvc_config->config = AVC_DuplicateConfig(cfg); if (!entry->mvc_config->config) return GF_OUT_OF_MEM; entry->type = GF_ISOM_BOX_TYPE_MVC1; e = gf_isom_check_mvc(the_file, trak, entry); if (e) return e; break; } AVC_RewriteESDescriptor(entry); return GF_OK; } GF_EXPORT GF_Err gf_isom_avc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, 3, keep_xps); } GF_EXPORT GF_Err gf_isom_avc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, 0, GF_FALSE); } GF_EXPORT GF_Err gf_isom_svc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg, Bool is_add) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, is_add ? 1 : 2, GF_FALSE); } GF_Err gf_isom_mvc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_AVCConfig *cfg, Bool is_add) { return gf_isom_avc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, is_add ? 4 : 5, GF_FALSE); } static GF_Err gf_isom_svc_mvc_config_del(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool is_mvc) { GF_TrackBox *trak; GF_Err e; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: break; default: return GF_BAD_PARAM; } if (is_mvc && entry->mvc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->mvc_config); entry->mvc_config = NULL; } else if (!is_mvc && entry->svc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->svc_config); entry->svc_config = NULL; } AVC_RewriteESDescriptor(entry); return GF_OK; } GF_Err gf_isom_svc_config_del(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { return gf_isom_svc_mvc_config_del(the_file, trackNumber, DescriptionIndex, GF_FALSE); } GF_Err gf_isom_mvc_config_del(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { return gf_isom_svc_mvc_config_del(the_file, trackNumber, DescriptionIndex, GF_TRUE); } static GF_Err gf_isom_svc_mvc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, Bool is_mvc, char *URLname, char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, URLname, URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, URLname, URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry if (is_mvc) { entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_MVC1); if (!entry) return GF_OUT_OF_MEM; entry->mvc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_MVCC); if (!entry->mvc_config) return GF_OUT_OF_MEM; entry->mvc_config->config = AVC_DuplicateConfig(cfg); if (!entry->mvc_config->config) return GF_OUT_OF_MEM; } else { entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes,GF_ISOM_BOX_TYPE_SVC1); if (!entry) return GF_OUT_OF_MEM; entry->svc_config = (GF_AVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes,GF_ISOM_BOX_TYPE_SVCC); if (!entry->svc_config) return GF_OUT_OF_MEM; entry->svc_config->config = AVC_DuplicateConfig(cfg); if (!entry->svc_config->config) return GF_OUT_OF_MEM; } entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); AVC_RewriteESDescriptor(entry); return e; } GF_EXPORT GF_Err gf_isom_svc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { return gf_isom_svc_mvc_config_new(the_file, trackNumber, cfg, GF_FALSE, (char *) URLname, (char *) URNname,outDescriptionIndex); } GF_EXPORT GF_Err gf_isom_mvc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { return gf_isom_svc_mvc_config_new(the_file, trackNumber, cfg, GF_TRUE, (char *) URLname, (char *) URNname,outDescriptionIndex); } GF_EXPORT GF_Err gf_isom_hevc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_HEVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_HVC1); if (!entry) return GF_OUT_OF_MEM; entry->hevc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!entry->hevc_config) return GF_OUT_OF_MEM; entry->hevc_config->config = HEVC_DuplicateConfig(cfg); if (!entry->hevc_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); HEVC_RewriteESDescriptor(entry); return e; } GF_EXPORT GF_Err gf_isom_vvc_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_VVCConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *) gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_VVC1); if (!entry) return GF_OUT_OF_MEM; entry->vvc_config = (GF_VVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VVCC); if (!entry->vvc_config) return GF_OUT_OF_MEM; entry->vvc_config->config = VVC_DuplicateConfig(cfg); if (!entry->vvc_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); return e; } GF_EXPORT GF_Err gf_isom_vp_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_VPConfig *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex, u32 vpx_type) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_SampleDescriptionBox *stsd; GF_MPEGVisualSampleEntryBox *entry; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *)gf_isom_box_new_parent(&stsd->child_boxes, vpx_type); if (!entry) return GF_OUT_OF_MEM; entry->vp_config = (GF_VPConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VPCC); if (!entry->vp_config) return GF_OUT_OF_MEM; entry->vp_config->config = VP_DuplicateConfig(cfg); if (!entry->vp_config->config) return GF_OUT_OF_MEM; strncpy(entry->compressor_name, "\012VPC Coding", sizeof(entry->compressor_name)-1); entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); return e; } GF_EXPORT GF_Err gf_isom_av1_config_new(GF_ISOFile *the_file, u32 trackNumber, GF_AV1Config *cfg, const char *URLname, const char *URNname, u32 *outDescriptionIndex) { GF_TrackBox *trak; GF_Err e; u32 dataRefIndex; GF_MPEGVisualSampleEntryBox *entry; GF_SampleDescriptionBox *stsd; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !cfg) return GF_BAD_PARAM; //get or create the data ref e = Media_FindDataRef(trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; if (!dataRefIndex) { e = Media_CreateDataRef(the_file, trak->Media->information->dataInformation->dref, (char *)URLname, (char *)URNname, &dataRefIndex); if (e) return e; } if (!the_file->keep_utc) trak->Media->mediaHeader->modificationTime = gf_isom_get_mp4time(); stsd = trak->Media->information->sampleTable->SampleDescription; //create a new entry entry = (GF_MPEGVisualSampleEntryBox *)gf_isom_box_new_parent(&stsd->child_boxes, GF_ISOM_BOX_TYPE_AV01); if (!entry) return GF_OUT_OF_MEM; entry->av1_config = (GF_AV1ConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_AV1C); if (!entry->av1_config) return GF_OUT_OF_MEM; entry->av1_config->config = AV1_DuplicateConfig(cfg); if (!entry->av1_config->config) return GF_OUT_OF_MEM; entry->dataReferenceIndex = dataRefIndex; *outDescriptionIndex = gf_list_count(stsd->child_boxes); return e; } typedef enum { GF_ISOM_HVCC_UPDATE = 0, GF_ISOM_HVCC_SET_INBAND, GF_ISOM_HVCC_SET_TILE, GF_ISOM_HVCC_SET_TILE_BASE_TRACK, GF_ISOM_HVCC_SET_LHVC, GF_ISOM_HVCC_SET_LHVC_WITH_BASE, GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD, GF_ISOM_HVCC_SET_HEVC_TILE_BASE, GF_ISOM_LHCC_SET_INBAND } HevcConfigUpdateType; static Bool nalu_cleanup_config(GF_List *param_array, Bool set_inband, Bool keep_xps) { u32 i; Bool array_incomplete = set_inband; if (!param_array) return 0; for (i=0; i<gf_list_count(param_array); i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(param_array, i); /*we want to force inband signaling*/ if (set_inband) { ar->array_completeness = 0; if (keep_xps) { array_incomplete=1; continue; } while (gf_list_count(ar->nalus)) { GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(ar->nalus, 0); gf_list_rem(ar->nalus, 0); if (sl->data) gf_free(sl->data); gf_free(sl); } gf_list_del(ar->nalus); gf_free(ar); ar=NULL; gf_list_rem(param_array, i); i--; continue; } if (ar && !ar->array_completeness) array_incomplete = 1; } return array_incomplete; } static GF_Err gf_isom_hevc_config_update_ex(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, u32 operand_type, Bool keep_xps) { u32 array_incomplete; GF_TrackBox *trak; GF_Err e; GF_MPEGVisualSampleEntryBox *entry; GF_SampleDescriptionBox *stsd; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; stsd = trak->Media->information->sampleTable->SampleDescription; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(stsd->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_HVT1: break; default: return GF_BAD_PARAM; } if (operand_type == GF_ISOM_HVCC_SET_TILE_BASE_TRACK) { if (entry->type==GF_ISOM_BOX_TYPE_HVC1) entry->type = GF_ISOM_BOX_TYPE_HVC2; else if (entry->type==GF_ISOM_BOX_TYPE_HEV1) entry->type = GF_ISOM_BOX_TYPE_HEV2; } else if (operand_type == GF_ISOM_HVCC_SET_TILE) { if (!entry->hevc_config) entry->hevc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!entry->hevc_config) return GF_OUT_OF_MEM; if (entry->hevc_config->config) gf_odf_hevc_cfg_del(entry->hevc_config->config); entry->hevc_config->config = NULL; entry->type = GF_ISOM_BOX_TYPE_HVT1; } else if (operand_type < GF_ISOM_HVCC_SET_LHVC) { if ((operand_type != GF_ISOM_HVCC_SET_INBAND) && !entry->hevc_config) { entry->hevc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_HVCC); if (!entry->hevc_config) return GF_OUT_OF_MEM; } if (cfg) { if (entry->hevc_config->config) gf_odf_hevc_cfg_del(entry->hevc_config->config); entry->hevc_config->config = HEVC_DuplicateConfig(cfg); } else { operand_type=GF_ISOM_HVCC_SET_INBAND; } array_incomplete = (operand_type==GF_ISOM_HVCC_SET_INBAND) ? 1 : 0; if (entry->hevc_config && nalu_cleanup_config(entry->hevc_config->config ? entry->hevc_config->config->param_array : NULL, (operand_type==GF_ISOM_HVCC_SET_INBAND) ? GF_TRUE:GF_FALSE, keep_xps) ) { array_incomplete=1; } if (entry->lhvc_config && nalu_cleanup_config(entry->lhvc_config->config ? entry->lhvc_config->config->param_array : NULL, (operand_type==GF_ISOM_HVCC_SET_INBAND), keep_xps) ) array_incomplete=1; switch (entry->type) { case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC1: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_HEV1 : GF_ISOM_BOX_TYPE_HVC1; break; case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_HVC2: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_HEV2 : GF_ISOM_BOX_TYPE_HVC2; break; case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_LHV1: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_LHE1 : GF_ISOM_BOX_TYPE_LHV1; break; } } else { /*SVCC replacement/removal with HEVC base, backward compatible signaling*/ if ((operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD) || (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) || (operand_type==GF_ISOM_HVCC_SET_HEVC_TILE_BASE) ) { if (!entry->hevc_config) return GF_BAD_PARAM; if (!cfg) { if (entry->lhvc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->lhvc_config); entry->lhvc_config = NULL; } if (entry->type==GF_ISOM_BOX_TYPE_LHE1) entry->type = (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) ? GF_ISOM_BOX_TYPE_HEV2 : GF_ISOM_BOX_TYPE_HEV1; else if (entry->type==GF_ISOM_BOX_TYPE_HEV1) entry->type = (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) ? GF_ISOM_BOX_TYPE_HEV2 : GF_ISOM_BOX_TYPE_HEV1; else entry->type = (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE) ? GF_ISOM_BOX_TYPE_HVC2 : GF_ISOM_BOX_TYPE_HVC1; } else { if (operand_type != GF_ISOM_HVCC_SET_HEVC_TILE_BASE) { if (!entry->lhvc_config) { entry->lhvc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_LHVC); if (!entry->lhvc_config) return GF_OUT_OF_MEM; } if (entry->lhvc_config->config) gf_odf_hevc_cfg_del(entry->lhvc_config->config); entry->lhvc_config->config = HEVC_DuplicateConfig(cfg); if (!entry->lhvc_config->config) return GF_OUT_OF_MEM; } if (operand_type==GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD) { if (entry->type==GF_ISOM_BOX_TYPE_HEV2) entry->type = GF_ISOM_BOX_TYPE_HEV1; else entry->type = GF_ISOM_BOX_TYPE_HVC1; } else { if (entry->type==GF_ISOM_BOX_TYPE_HEV1) entry->type = GF_ISOM_BOX_TYPE_HEV2; else entry->type = GF_ISOM_BOX_TYPE_HVC2; } } } /*LHEVC track without base*/ else if (operand_type==GF_ISOM_HVCC_SET_LHVC) { if (entry->hevc_config) { gf_isom_box_del_parent(&entry->child_boxes, (GF_Box*)entry->hevc_config); entry->hevc_config=NULL; } if (!cfg) return GF_BAD_PARAM; if (!entry->lhvc_config) { entry->lhvc_config = (GF_HEVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_LHVC); if (!entry->lhvc_config) return GF_OUT_OF_MEM; } if (entry->lhvc_config->config) gf_odf_hevc_cfg_del(entry->lhvc_config->config); entry->lhvc_config->config = HEVC_DuplicateConfig(cfg); if (!entry->lhvc_config->config) return GF_OUT_OF_MEM; if ((entry->type==GF_ISOM_BOX_TYPE_HEV1) || (entry->type==GF_ISOM_BOX_TYPE_HEV2)) entry->type = GF_ISOM_BOX_TYPE_LHE1; else entry->type = GF_ISOM_BOX_TYPE_LHV1; } /*LHEVC inband, no config change*/ else if (operand_type==GF_ISOM_LHCC_SET_INBAND) { entry->type = GF_ISOM_BOX_TYPE_LHE1; } } HEVC_RewriteESDescriptor(entry); return GF_OK; } GF_EXPORT GF_Err gf_isom_hevc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_UPDATE, GF_FALSE); } GF_EXPORT GF_Err gf_isom_hevc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_HVCC_SET_INBAND, keep_xps); } GF_EXPORT GF_Err gf_isom_lhvc_force_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_LHCC_SET_INBAND, GF_FALSE); } GF_EXPORT GF_Err gf_isom_hevc_set_tile_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, Bool is_base_track) { return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, is_base_track ? GF_ISOM_HVCC_SET_TILE_BASE_TRACK : GF_ISOM_HVCC_SET_TILE, GF_FALSE); } GF_EXPORT GF_Err gf_isom_lhvc_config_update(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_HEVCConfig *cfg, GF_ISOMLHEVCTrackType track_type) { if (cfg) cfg->is_lhvc = GF_TRUE; switch (track_type) { case GF_ISOM_LEHVC_ONLY: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC, GF_FALSE); case GF_ISOM_LEHVC_WITH_BASE: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC_WITH_BASE, GF_FALSE); case GF_ISOM_LEHVC_WITH_BASE_BACKWARD: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_LHVC_WITH_BASE_BACKWARD, GF_FALSE); case GF_ISOM_HEVC_TILE_BASE: return gf_isom_hevc_config_update_ex(the_file, trackNumber, DescriptionIndex, cfg, GF_ISOM_HVCC_SET_HEVC_TILE_BASE, GF_FALSE); default: return GF_BAD_PARAM; } } typedef enum { GF_ISOM_VVCC_UPDATE = 0, GF_ISOM_VVCC_SET_INBAND, } VvcConfigUpdateType; static GF_Err gf_isom_vvc_config_update_ex(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, GF_VVCConfig *cfg, u32 operand_type, Bool keep_xps) { u32 array_incomplete; GF_TrackBox *trak; GF_Err e; GF_MPEGVisualSampleEntryBox *entry; GF_SampleDescriptionBox *stsd; e = CanAccessMovie(the_file, GF_ISOM_OPEN_WRITE); if (e) return e; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_BAD_PARAM; stsd = trak->Media->information->sampleTable->SampleDescription; entry = (GF_MPEGVisualSampleEntryBox *)gf_list_get(stsd->child_boxes, DescriptionIndex-1); if (!entry) return GF_BAD_PARAM; switch (entry->type) { case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: break; default: return GF_BAD_PARAM; } if (operand_type <= GF_ISOM_VVCC_SET_INBAND) { if ((operand_type != GF_ISOM_VVCC_SET_INBAND) && !entry->hevc_config) { entry->vvc_config = (GF_VVCConfigurationBox*)gf_isom_box_new_parent(&entry->child_boxes, GF_ISOM_BOX_TYPE_VVCC); if (!entry->vvc_config) return GF_OUT_OF_MEM; } if (cfg) { if (entry->vvc_config->config) gf_odf_vvc_cfg_del(entry->vvc_config->config); entry->vvc_config->config = VVC_DuplicateConfig(cfg); } else { operand_type = GF_ISOM_VVCC_SET_INBAND; } array_incomplete = (operand_type==GF_ISOM_VVCC_SET_INBAND) ? 1 : 0; if (entry->vvc_config && nalu_cleanup_config(entry->vvc_config->config ? entry->vvc_config->config->param_array : NULL, (operand_type==GF_ISOM_VVCC_SET_INBAND), keep_xps) ) { array_incomplete=1; } switch (entry->type) { case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: entry->type = array_incomplete ? GF_ISOM_BOX_TYPE_VVI1 : GF_ISOM_BOX_TYPE_VVC1; break; } } return GF_OK; } GF_EXPORT GF_Err gf_isom_vvc_set_inband_config(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex, Bool keep_xps) { return gf_isom_vvc_config_update_ex(the_file, trackNumber, DescriptionIndex, NULL, GF_ISOM_VVCC_SET_INBAND, keep_xps); } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_EXPORT GF_Box *gf_isom_clone_config_box(GF_Box *box) { u8 *data=NULL; u32 size=0; GF_Err e; GF_Box *clone=NULL; GF_BitStream *bs = gf_bs_new(NULL, 0, GF_BITSTREAM_WRITE); e = gf_isom_box_size(box); if (!e) e = gf_isom_box_write(box, bs); gf_bs_get_content(bs, &data, &size); gf_bs_del(bs); if (!e) { bs = gf_bs_new(data, size, GF_BITSTREAM_READ); e = gf_isom_box_parse(&clone, bs); gf_bs_del(bs); } if (data) gf_free(data); if (e) { if (clone) gf_isom_box_del(clone); clone = NULL; } return clone; } GF_EXPORT GF_AVCConfig *gf_isom_avc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_avc_svc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_AVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->avc_config) return NULL; return AVC_DuplicateConfig(entry->avc_config->config); } GF_EXPORT GF_HEVCConfig *gf_isom_hevc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; if (gf_isom_get_reference_count(the_file, trackNumber, GF_ISOM_REF_TBAS)) { u32 ref_track; GF_Err e = gf_isom_get_reference(the_file, trackNumber, GF_ISOM_REF_TBAS, 1, &ref_track); if (e == GF_OK) { trackNumber = ref_track; } } trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_hevc_lhvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_HEVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->hevc_config) return NULL; return HEVC_DuplicateConfig(entry->hevc_config->config); } GF_EXPORT GF_ISOMVVCType gf_isom_get_vvc_type(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { u32 type; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_ISOM_VVCTYPE_NONE; if (!gf_isom_is_video_handler_type(trak->Media->handler->handlerType)) return GF_ISOM_VVCTYPE_NONE; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_ISOM_VVCTYPE_NONE; type = entry->type; if (type == GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) type = sinf->original_format->data_format; } else if (type == GF_ISOM_BOX_TYPE_RESV) { if (entry->rinf && entry->rinf->original_format) type = entry->rinf->original_format->data_format; } switch (type) { case GF_ISOM_BOX_TYPE_VVC1: case GF_ISOM_BOX_TYPE_VVI1: return GF_ISOM_VVCTYPE_ONLY; default: return GF_ISOM_VVCTYPE_NONE; } return GF_ISOM_VVCTYPE_NONE; } GF_EXPORT GF_VVCConfig *gf_isom_vvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; /*todo, add support for subpic track and nvcl tracks*/ trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_vvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_VVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->vvc_config) return NULL; return VVC_DuplicateConfig(entry->vvc_config->config); } GF_EXPORT GF_AVCConfig *gf_isom_svc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_avc_svc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_AVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->svc_config) return NULL; return AVC_DuplicateConfig(entry->svc_config->config); } GF_EXPORT GF_AVCConfig *gf_isom_mvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_avc_svc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_AVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->mvc_config) return NULL; return AVC_DuplicateConfig(entry->mvc_config->config); } GF_EXPORT GF_AV1Config *gf_isom_av1_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; if (gf_isom_get_reference_count(the_file, trackNumber, GF_ISOM_REF_TBAS)) { u32 ref_track; GF_Err e = gf_isom_get_reference(the_file, trackNumber, GF_ISOM_REF_TBAS, 1, &ref_track); if (e == GF_OK) { trackNumber = ref_track; } } trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex - 1); if (!entry || !entry->av1_config|| !entry->av1_config->config) return NULL; return AV1_DuplicateConfig(entry->av1_config->config); } GF_EXPORT GF_VPConfig *gf_isom_vp_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex - 1); if (!entry || !entry->vp_config) return NULL; return VP_DuplicateConfig(entry->vp_config->config); } GF_EXPORT GF_DOVIDecoderConfigurationRecord *gf_isom_dovi_config_get(GF_ISOFile* the_file, u32 trackNumber, u32 DescriptionIndex) { GF_TrackBox* trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex - 1); if (!entry || !entry->dovi_config) return NULL; return DOVI_DuplicateConfig(&entry->dovi_config->DOVIConfig); } GF_EXPORT GF_ISOMAVCType gf_isom_get_avc_svc_type(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { u32 type; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !trak->Media->handler || !DescriptionIndex) return GF_ISOM_AVCTYPE_NONE; if (!gf_isom_is_video_handler_type(trak->Media->handler->handlerType)) return GF_ISOM_AVCTYPE_NONE; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_ISOM_AVCTYPE_NONE; type = entry->type; if (type == GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) type = sinf->original_format->data_format; } else if (type == GF_ISOM_BOX_TYPE_RESV) { if (entry->rinf && entry->rinf->original_format) type = entry->rinf->original_format->data_format; } switch (type) { case GF_ISOM_BOX_TYPE_AVC1: case GF_ISOM_BOX_TYPE_AVC2: case GF_ISOM_BOX_TYPE_AVC3: case GF_ISOM_BOX_TYPE_AVC4: case GF_ISOM_BOX_TYPE_SVC1: case GF_ISOM_BOX_TYPE_MVC1: break; default: return GF_ISOM_AVCTYPE_NONE; } if (entry->avc_config && !entry->svc_config && !entry->mvc_config) return GF_ISOM_AVCTYPE_AVC_ONLY; if (entry->avc_config && entry->svc_config) return GF_ISOM_AVCTYPE_AVC_SVC; if (entry->avc_config && entry->mvc_config) return GF_ISOM_AVCTYPE_AVC_MVC; if (!entry->avc_config && entry->svc_config) return GF_ISOM_AVCTYPE_SVC_ONLY; if (!entry->avc_config && entry->mvc_config) return GF_ISOM_AVCTYPE_MVC_ONLY; return GF_ISOM_AVCTYPE_NONE; } GF_EXPORT GF_ISOMHEVCType gf_isom_get_hevc_lhvc_type(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { u32 type; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return GF_ISOM_HEVCTYPE_NONE; if (!gf_isom_is_video_handler_type(trak->Media->handler->handlerType)) return GF_ISOM_HEVCTYPE_NONE; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return GF_ISOM_HEVCTYPE_NONE; type = entry->type; if (type == GF_ISOM_BOX_TYPE_ENCV) { GF_ProtectionSchemeInfoBox *sinf = (GF_ProtectionSchemeInfoBox *) gf_isom_box_find_child(entry->child_boxes, GF_ISOM_BOX_TYPE_SINF); if (sinf && sinf->original_format) type = sinf->original_format->data_format; } else if (type == GF_ISOM_BOX_TYPE_RESV) { if (entry->rinf && entry->rinf->original_format) type = entry->rinf->original_format->data_format; } if (type == GF_ISOM_BOX_TYPE_DVHE) { type = GF_ISOM_BOX_TYPE_HEV1; } switch (type) { case GF_ISOM_BOX_TYPE_HVC1: case GF_ISOM_BOX_TYPE_HEV1: case GF_ISOM_BOX_TYPE_HVC2: case GF_ISOM_BOX_TYPE_HEV2: case GF_ISOM_BOX_TYPE_LHV1: case GF_ISOM_BOX_TYPE_LHE1: case GF_ISOM_BOX_TYPE_HVT1: break; default: return GF_ISOM_HEVCTYPE_NONE; } if (entry->hevc_config && !entry->lhvc_config) return GF_ISOM_HEVCTYPE_HEVC_ONLY; if (entry->hevc_config && entry->lhvc_config) return GF_ISOM_HEVCTYPE_HEVC_LHVC; if (!entry->hevc_config && entry->lhvc_config) return GF_ISOM_HEVCTYPE_LHVC_ONLY; return GF_ISOM_HEVCTYPE_NONE; } GF_EXPORT GF_HEVCConfig *gf_isom_lhvc_config_get(GF_ISOFile *the_file, u32 trackNumber, u32 DescriptionIndex) { GF_HEVCConfig *lhvc; GF_OperatingPointsInformation *oinf=NULL; GF_TrackBox *trak; GF_MPEGVisualSampleEntryBox *entry; trak = gf_isom_get_track_from_file(the_file, trackNumber); if (!trak || !trak->Media || !DescriptionIndex) return NULL; if (gf_isom_get_hevc_lhvc_type(the_file, trackNumber, DescriptionIndex)==GF_ISOM_HEVCTYPE_NONE) return NULL; entry = (GF_MPEGVisualSampleEntryBox*)gf_list_get(trak->Media->information->sampleTable->SampleDescription->child_boxes, DescriptionIndex-1); if (!entry) return NULL; if (!entry->lhvc_config) return NULL; lhvc = HEVC_DuplicateConfig(entry->lhvc_config->config); if (!lhvc) return NULL; gf_isom_get_oinf_info(the_file, trackNumber, &oinf); if (oinf) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_last(oinf->profile_tier_levels); if (ptl) { lhvc->profile_space = ptl->general_profile_space; lhvc->tier_flag = ptl->general_tier_flag; lhvc->profile_idc = ptl->general_profile_idc; lhvc->general_profile_compatibility_flags = ptl->general_profile_compatibility_flags; lhvc->constraint_indicator_flags = ptl->general_constraint_indicator_flags; } } return lhvc; } void btrt_box_del(GF_Box *s) { GF_BitRateBox *ptr = (GF_BitRateBox *)s; if (ptr) gf_free(ptr); } GF_Err btrt_box_read(GF_Box *s, GF_BitStream *bs) { GF_BitRateBox *ptr = (GF_BitRateBox *)s; ISOM_DECREASE_SIZE(ptr, 12) ptr->bufferSizeDB = gf_bs_read_u32(bs); ptr->maxBitrate = gf_bs_read_u32(bs); ptr->avgBitrate = gf_bs_read_u32(bs); return GF_OK; } GF_Box *btrt_box_new() { GF_BitRateBox *tmp = (GF_BitRateBox *) gf_malloc(sizeof(GF_BitRateBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_BitRateBox)); tmp->type = GF_ISOM_BOX_TYPE_BTRT; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err btrt_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_BitRateBox *ptr = (GF_BitRateBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u32(bs, ptr->bufferSizeDB); gf_bs_write_u32(bs, ptr->maxBitrate); gf_bs_write_u32(bs, ptr->avgBitrate); return GF_OK; } GF_Err btrt_box_size(GF_Box *s) { GF_BitRateBox *ptr = (GF_BitRateBox *)s; ptr->size += 12; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void m4ds_box_del(GF_Box *s) { GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *)s; gf_odf_desc_list_del(ptr->descriptors); gf_list_del(ptr->descriptors); gf_free(ptr); } GF_Err m4ds_box_read(GF_Box *s, GF_BitStream *bs) { GF_Err e; char *enc_od; GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *)s; u32 od_size = (u32) ptr->size; if (!od_size) return GF_OK; enc_od = (char *)gf_malloc(sizeof(char) * od_size); gf_bs_read_data(bs, enc_od, od_size); e = gf_odf_desc_list_read((char *)enc_od, od_size, ptr->descriptors); gf_free(enc_od); return e; } GF_Box *m4ds_box_new() { GF_MPEG4ExtensionDescriptorsBox *tmp = (GF_MPEG4ExtensionDescriptorsBox *) gf_malloc(sizeof(GF_MPEG4ExtensionDescriptorsBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_MPEG4ExtensionDescriptorsBox)); tmp->type = GF_ISOM_BOX_TYPE_M4DS; tmp->descriptors = gf_list_new(); return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err m4ds_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; u8 *enc_ods; u32 enc_od_size; GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *) s; if (!s) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; enc_ods = NULL; enc_od_size = 0; e = gf_odf_desc_list_write(ptr->descriptors, &enc_ods, &enc_od_size); if (e) return e; if (enc_od_size) { gf_bs_write_data(bs, enc_ods, enc_od_size); gf_free(enc_ods); } return GF_OK; } GF_Err m4ds_box_size(GF_Box *s) { GF_Err e; u32 descSize = 0; GF_MPEG4ExtensionDescriptorsBox *ptr = (GF_MPEG4ExtensionDescriptorsBox *)s; e = gf_odf_desc_list_size(ptr->descriptors, &descSize); ptr->size += descSize; return e; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void avcc_box_del(GF_Box *s) { GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s; if (ptr->config) gf_odf_avc_cfg_del(ptr->config); ptr->config = NULL; gf_free(ptr); } GF_Err avcc_box_read(GF_Box *s, GF_BitStream *bs) { u32 i, count; GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s; if (ptr->config) gf_odf_avc_cfg_del(ptr->config); ptr->config = gf_odf_avc_cfg_new(); ISOM_DECREASE_SIZE(ptr, 7) //7 includes the 2 counts of sps and pps ptr->config->configurationVersion = gf_bs_read_u8(bs); ptr->config->AVCProfileIndication = gf_bs_read_u8(bs); ptr->config->profile_compatibility = gf_bs_read_u8(bs); ptr->config->AVCLevelIndication = gf_bs_read_u8(bs); if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { gf_bs_read_int(bs, 6); } else { ptr->config->complete_representation = gf_bs_read_int(bs, 1); gf_bs_read_int(bs, 5); } ptr->config->nal_unit_size = 1 + gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 3); count = gf_bs_read_int(bs, 5); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_malloc(sizeof(GF_NALUFFParam)); ISOM_DECREASE_SIZE(ptr, 2) sl->size = gf_bs_read_u16(bs); if (!sl->size || (gf_bs_available(bs) < sl->size) || (ptr->size < sl->size) ) { gf_free(sl); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("AVCC: Not enough bits to parse. Aborting.\n")); return GF_ISOM_INVALID_FILE; } sl->data = (char *)gf_malloc(sizeof(char) * sl->size); gf_bs_read_data(bs, sl->data, sl->size); gf_list_add(ptr->config->sequenceParameterSets, sl); ptr->size -= sl->size; } count = gf_bs_read_u8(bs); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_malloc(sizeof(GF_NALUFFParam)); ISOM_DECREASE_SIZE(ptr, 2) sl->size = gf_bs_read_u16(bs); if (!sl->size || (gf_bs_available(bs) < sl->size) || (ptr->size<sl->size)) { gf_free(sl); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("AVCC: Not enough bits to parse. Aborting.\n")); return GF_ISOM_INVALID_FILE; } sl->data = (char *)gf_malloc(sizeof(char) * sl->size); gf_bs_read_data(bs, sl->data, sl->size); gf_list_add(ptr->config->pictureParameterSets, sl); ptr->size -= sl->size; } if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(ptr->config->AVCProfileIndication)) { if (!ptr->size) { #ifndef GPAC_DISABLE_AV_PARSERS AVCState avc; s32 idx; GF_NALUFFParam *sl = (GF_NALUFFParam*)gf_list_get(ptr->config->sequenceParameterSets, 0); idx = sl ? gf_avc_read_sps(sl->data+1, sl->size-1, &avc, 0, NULL) : -1; if (idx>=0) { ptr->config->chroma_format = avc.sps[idx].chroma_format; ptr->config->luma_bit_depth = 8 + avc.sps[idx].luma_bit_depth_m8; ptr->config->chroma_bit_depth = 8 + avc.sps[idx].chroma_bit_depth_m8; } #else /*set default values ...*/ ptr->config->chroma_format = 1; ptr->config->luma_bit_depth = 8; ptr->config->chroma_bit_depth = 8; #endif return GF_OK; } ISOM_DECREASE_SIZE(ptr, 4) gf_bs_read_int(bs, 6); ptr->config->chroma_format = gf_bs_read_int(bs, 2); gf_bs_read_int(bs, 5); ptr->config->luma_bit_depth = 8 + gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 5); ptr->config->chroma_bit_depth = 8 + gf_bs_read_int(bs, 3); count = gf_bs_read_int(bs, 8); if (count*2 > ptr->size) { //ffmpeg just ignores this part while allocating bytes (filled with garbage?) GF_LOG(GF_LOG_WARNING, GF_LOG_CODING, ("AVCC: invalid numOfSequenceParameterSetExt value. Skipping.\n")); return GF_OK; } if (count) { ptr->config->sequenceParameterSetExtensions = gf_list_new(); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *)gf_malloc(sizeof(GF_NALUFFParam)); ISOM_DECREASE_SIZE(ptr, 2) sl->size = gf_bs_read_u16(bs); if ((gf_bs_available(bs) < sl->size) || (ptr->size<sl->size)) { gf_free(sl); GF_LOG(GF_LOG_ERROR, GF_LOG_CODING, ("AVCC: Not enough bits to parse. Aborting.\n")); return GF_ISOM_INVALID_FILE; } sl->data = (char *)gf_malloc(sizeof(char) * sl->size); gf_bs_read_data(bs, sl->data, sl->size); gf_list_add(ptr->config->sequenceParameterSetExtensions, sl); ptr->size -= sl->size; } } } } return GF_OK; } GF_Box *avcc_box_new() { GF_AVCConfigurationBox *tmp = (GF_AVCConfigurationBox *) gf_malloc(sizeof(GF_AVCConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_AVCConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_AVCC; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err avcc_box_write(GF_Box *s, GF_BitStream *bs) { u32 i, count; GF_Err e; GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_box_write_header(s, bs); if (e) return e; gf_bs_write_u8(bs, ptr->config->configurationVersion); gf_bs_write_u8(bs, ptr->config->AVCProfileIndication); gf_bs_write_u8(bs, ptr->config->profile_compatibility); gf_bs_write_u8(bs, ptr->config->AVCLevelIndication); if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { gf_bs_write_int(bs, 0x3F, 6); } else { gf_bs_write_int(bs, ptr->config->complete_representation, 1); gf_bs_write_int(bs, 0x1F, 5); } gf_bs_write_int(bs, ptr->config->nal_unit_size - 1, 2); gf_bs_write_int(bs, 0x7, 3); count = gf_list_count(ptr->config->sequenceParameterSets); gf_bs_write_int(bs, count, 5); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(ptr->config->sequenceParameterSets, i); gf_bs_write_u16(bs, sl->size); gf_bs_write_data(bs, sl->data, sl->size); } count = gf_list_count(ptr->config->pictureParameterSets); gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(ptr->config->pictureParameterSets, i); gf_bs_write_u16(bs, sl->size); gf_bs_write_data(bs, sl->data, sl->size); } if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(ptr->config->AVCProfileIndication)) { gf_bs_write_int(bs, 0xFF, 6); gf_bs_write_int(bs, ptr->config->chroma_format, 2); gf_bs_write_int(bs, 0xFF, 5); gf_bs_write_int(bs, ptr->config->luma_bit_depth - 8, 3); gf_bs_write_int(bs, 0xFF, 5); gf_bs_write_int(bs, ptr->config->chroma_bit_depth - 8, 3); count = ptr->config->sequenceParameterSetExtensions ? gf_list_count(ptr->config->sequenceParameterSetExtensions) : 0; gf_bs_write_u8(bs, count); for (i=0; i<count; i++) { GF_NALUFFParam *sl = (GF_NALUFFParam *) gf_list_get(ptr->config->sequenceParameterSetExtensions, i); gf_bs_write_u16(bs, sl->size); gf_bs_write_data(bs, sl->data, sl->size); } } } return GF_OK; } GF_Err avcc_box_size(GF_Box *s) { u32 i, count; GF_AVCConfigurationBox *ptr = (GF_AVCConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } ptr->size += 7; count = gf_list_count(ptr->config->sequenceParameterSets); for (i=0; i<count; i++) ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->sequenceParameterSets, i))->size; count = gf_list_count(ptr->config->pictureParameterSets); for (i=0; i<count; i++) ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->pictureParameterSets, i))->size; if (ptr->type==GF_ISOM_BOX_TYPE_AVCC) { if (gf_avc_is_rext_profile(ptr->config->AVCProfileIndication)) { ptr->size += 4; count = ptr->config->sequenceParameterSetExtensions ?gf_list_count(ptr->config->sequenceParameterSetExtensions) : 0; for (i=0; i<count; i++) ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ptr->config->sequenceParameterSetExtensions, i))->size; } } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void hvcc_box_del(GF_Box *s) { GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox*)s; if (ptr->config) gf_odf_hevc_cfg_del(ptr->config); gf_free(ptr); } GF_Err hvcc_box_read(GF_Box *s, GF_BitStream *bs) { u64 consumed; GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox *)s; if (ptr->config) gf_odf_hevc_cfg_del(ptr->config); consumed = gf_bs_get_position(bs); ptr->config = gf_odf_hevc_cfg_read_bs(bs, (s->type == GF_ISOM_BOX_TYPE_HVCC) ? GF_FALSE : GF_TRUE); consumed = gf_bs_get_position(bs) - consumed ; ISOM_DECREASE_SIZE(ptr, (u32)consumed) return ptr->config ? GF_OK : GF_ISOM_INVALID_FILE; } GF_Box *hvcc_box_new() { GF_HEVCConfigurationBox *tmp = (GF_HEVCConfigurationBox *) gf_malloc(sizeof(GF_HEVCConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_HEVCConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_HVCC; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err hvcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_odf_hevc_cfg_write_bs(ptr->config, bs); } GF_Err hvcc_box_size(GF_Box *s) { u32 i, count, j, subcount; GF_HEVCConfigurationBox *ptr = (GF_HEVCConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } if (!ptr->config->is_lhvc) ptr->size += 23; else ptr->size += 6; count = gf_list_count(ptr->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(ptr->config->param_array, i); ptr->size += 3; subcount = gf_list_count(ar->nalus); for (j=0; j<subcount; j++) { ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ar->nalus, j))->size; } } return GF_OK; } void vvcc_box_del(GF_Box *s) { GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox*)s; if (ptr->config) gf_odf_vvc_cfg_del(ptr->config); gf_free(ptr); } GF_Err vvcc_box_read(GF_Box *s, GF_BitStream *bs) { u64 consumed; GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox *)s; if (ptr->config) gf_odf_vvc_cfg_del(ptr->config); consumed = gf_bs_get_position(bs); ptr->config = gf_odf_vvc_cfg_read_bs(bs); consumed = gf_bs_get_position(bs) - consumed ; ISOM_DECREASE_SIZE(ptr, (u32)consumed) return ptr->config ? GF_OK : GF_ISOM_INVALID_FILE; } GF_Box *vvcc_box_new() { GF_VVCConfigurationBox *tmp = (GF_VVCConfigurationBox *) gf_malloc(sizeof(GF_VVCConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_VVCConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_HVCC; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vvcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_odf_vvc_cfg_write_bs(ptr->config, bs); } GF_Err vvcc_box_size(GF_Box *s) { u32 i, count, j, subcount; GF_VVCConfigurationBox *ptr = (GF_VVCConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } ptr->size += 6; if (ptr->config->ptl_present) { if (!ptr->config->general_constraint_info) ptr->config->num_constraint_info = 0; if (!ptr->config->sub_profiles_idc) ptr->config->num_sub_profiles = 0; ptr->size += 2 + 2 + ptr->config->num_constraint_info + 2 + ptr->config->num_sub_profiles*4; if (ptr->config->numTemporalLayers>1) ptr->size += 1; for (i=0; i<ptr->config->numTemporalLayers; i++) { if (ptr->config->ptl_sublayer_present_mask & (1<<i)) ptr->size+=1; } } count = gf_list_count(ptr->config->param_array); for (i=0; i<count; i++) { GF_NALUFFParamArray *ar = (GF_NALUFFParamArray*)gf_list_get(ptr->config->param_array, i); ptr->size += 3; subcount = gf_list_count(ar->nalus); for (j=0; j<subcount; j++) { ptr->size += 2 + ((GF_NALUFFParam *)gf_list_get(ar->nalus, j))->size; } } return GF_OK; } #endif GF_Box *av1c_box_new() { GF_AV1ConfigurationBox *tmp = (GF_AV1ConfigurationBox *)gf_malloc(sizeof(GF_AV1ConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_AV1ConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_AV1C; return (GF_Box *)tmp; } void av1c_box_del(GF_Box *s) { GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox*)s; if (ptr->config) gf_odf_av1_cfg_del(ptr->config); gf_free(ptr); } GF_Err av1c_box_read(GF_Box *s, GF_BitStream *bs) { u64 pos, read; GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox*)s; if (ptr->config) gf_odf_av1_cfg_del(ptr->config); pos = gf_bs_get_position(bs); ptr->config = gf_odf_av1_cfg_read_bs_size(bs, (u32) ptr->size); read = gf_bs_get_position(bs) - pos; if (read < ptr->size) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[ISOBMFF] AV1ConfigurationBox: read only "LLU" bytes (expected "LLU").\n", read, ptr->size)); if (read > ptr->size) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[ISOBMFF] AV1ConfigurationBox overflow read "LLU" bytes, of box size "LLU".\n", read, ptr->size)); return GF_OK; } GF_Err av1c_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox*)s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_BAD_PARAM; e = gf_isom_box_write_header(s, bs); if (e) return e; return gf_odf_av1_cfg_write_bs(ptr->config, bs); } GF_Err av1c_box_size(GF_Box *s) { u32 i; GF_AV1ConfigurationBox *ptr = (GF_AV1ConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_BAD_PARAM; } ptr->size += 4; for (i = 0; i < gf_list_count(ptr->config->obu_array); ++i) { GF_AV1_OBUArrayEntry *a = gf_list_get(ptr->config->obu_array, i); ptr->size += a->obu_length; } return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ void vpcc_box_del(GF_Box *s) { GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox*)s; if (ptr->config) gf_odf_vp_cfg_del(ptr->config); ptr->config = NULL; gf_free(ptr); } GF_Err vpcc_box_read(GF_Box *s, GF_BitStream *bs) { u64 pos; GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox *)s; if (ptr->config) gf_odf_vp_cfg_del(ptr->config); ptr->config = NULL; pos = gf_bs_get_position(bs); ptr->config = gf_odf_vp_cfg_read_bs(bs, ptr->version == 0); pos = gf_bs_get_position(bs) - pos ; if (pos < ptr->size) GF_LOG(GF_LOG_WARNING, GF_LOG_CONTAINER, ("[ISOBMFF] VPConfigurationBox: read only "LLU" bytes (expected "LLU").\n", pos, ptr->size)); if (pos > ptr->size) GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[ISOBMFF] VPConfigurationBox overflow read "LLU" bytes, of box size "LLU".\n", pos, ptr->size)); return ptr->config ? GF_OK : GF_ISOM_INVALID_FILE; } GF_Box *vpcc_box_new() { GF_VPConfigurationBox *tmp = (GF_VPConfigurationBox *) gf_malloc(sizeof(GF_VPConfigurationBox)); if (tmp == NULL) return NULL; memset(tmp, 0, sizeof(GF_VPConfigurationBox)); tmp->type = GF_ISOM_BOX_TYPE_VPCC; tmp->version = 1; return (GF_Box *)tmp; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err vpcc_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox *) s; if (!s) return GF_BAD_PARAM; if (!ptr->config) return GF_OK; e = gf_isom_full_box_write(s, bs); if (e) return e; return gf_odf_vp_cfg_write_bs(ptr->config, bs, ptr->version == 0); } #endif GF_Err vpcc_box_size(GF_Box *s) { GF_VPConfigurationBox *ptr = (GF_VPConfigurationBox *)s; if (!ptr->config) { ptr->size = 0; return GF_OK; } if (ptr->version == 0) { ptr->size += 6; } else { if (ptr->config->codec_initdata_size) { GF_LOG(GF_LOG_ERROR, GF_LOG_CONTAINER, ("[ISOBMFF] VPConfigurationBox: codec_initdata_size MUST be 0, was %d\n", ptr->config->codec_initdata_size)); return GF_ISOM_INVALID_FILE; } ptr->size += 8; } return GF_OK; } GF_Box *SmDm_box_new() { ISOM_DECL_BOX_ALLOC(GF_SMPTE2086MasteringDisplayMetadataBox, GF_ISOM_BOX_TYPE_SMDM); return (GF_Box *)tmp; } void SmDm_box_del(GF_Box *a) { GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox *)a; gf_free(p); } GF_Err SmDm_box_read(GF_Box *s, GF_BitStream *bs) { GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox *)s; ISOM_DECREASE_SIZE(p, 24) p->primaryRChromaticity_x = gf_bs_read_u16(bs); p->primaryRChromaticity_y = gf_bs_read_u16(bs); p->primaryGChromaticity_x = gf_bs_read_u16(bs); p->primaryGChromaticity_y = gf_bs_read_u16(bs); p->primaryBChromaticity_x = gf_bs_read_u16(bs); p->primaryBChromaticity_y = gf_bs_read_u16(bs); p->whitePointChromaticity_x = gf_bs_read_u16(bs); p->whitePointChromaticity_y = gf_bs_read_u16(bs); p->luminanceMax = gf_bs_read_u32(bs); p->luminanceMin = gf_bs_read_u32(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err SmDm_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, p->primaryRChromaticity_x); gf_bs_write_u16(bs, p->primaryRChromaticity_y); gf_bs_write_u16(bs, p->primaryGChromaticity_x); gf_bs_write_u16(bs, p->primaryGChromaticity_y); gf_bs_write_u16(bs, p->primaryBChromaticity_x); gf_bs_write_u16(bs, p->primaryBChromaticity_y); gf_bs_write_u16(bs, p->whitePointChromaticity_x); gf_bs_write_u16(bs, p->whitePointChromaticity_y); gf_bs_write_u32(bs, p->luminanceMax); gf_bs_write_u32(bs, p->luminanceMin); return GF_OK; } GF_Err SmDm_box_size(GF_Box *s) { GF_SMPTE2086MasteringDisplayMetadataBox *p = (GF_SMPTE2086MasteringDisplayMetadataBox*)s; p->size += 24; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_Box *CoLL_box_new() { ISOM_DECL_BOX_ALLOC(GF_VPContentLightLevelBox, GF_ISOM_BOX_TYPE_COLL); return (GF_Box *)tmp; } void CoLL_box_del(GF_Box *a) { GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox *)a; gf_free(p); } GF_Err CoLL_box_read(GF_Box *s, GF_BitStream *bs) { GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox *)s; ISOM_DECREASE_SIZE(p, 4) p->maxCLL = gf_bs_read_u16(bs); p->maxFALL = gf_bs_read_u16(bs); return GF_OK; } #ifndef GPAC_DISABLE_ISOM_WRITE GF_Err CoLL_box_write(GF_Box *s, GF_BitStream *bs) { GF_Err e; GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox*)s; e = gf_isom_full_box_write(s, bs); if (e) return e; gf_bs_write_u16(bs, p->maxCLL); gf_bs_write_u16(bs, p->maxFALL); return GF_OK; } GF_Err CoLL_box_size(GF_Box *s) { GF_VPContentLightLevelBox *p = (GF_VPContentLightLevelBox*)s; p->size += 4; return GF_OK; } #endif /*GPAC_DISABLE_ISOM_WRITE*/ GF_OperatingPointsInformation *gf_isom_oinf_new_entry() { GF_OperatingPointsInformation* ptr; GF_SAFEALLOC(ptr, GF_OperatingPointsInformation); if (ptr) { ptr->profile_tier_levels = gf_list_new(); ptr->operating_points = gf_list_new(); ptr->dependency_layers = gf_list_new(); } return ptr; } void gf_isom_oinf_del_entry(void *entry) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; if (!ptr) return; if (ptr->profile_tier_levels) { while (gf_list_count(ptr->profile_tier_levels)) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, 0); gf_free(ptl); gf_list_rem(ptr->profile_tier_levels, 0); } gf_list_del(ptr->profile_tier_levels); } if (ptr->operating_points) { while (gf_list_count(ptr->operating_points)) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, 0); gf_free(op); gf_list_rem(ptr->operating_points, 0); } gf_list_del(ptr->operating_points); } if (ptr->dependency_layers) { while (gf_list_count(ptr->dependency_layers)) { LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, 0); gf_free(dep); gf_list_rem(ptr->dependency_layers, 0); } gf_list_del(ptr->dependency_layers); } gf_free(ptr); return; } GF_Err gf_isom_oinf_read_entry(void *entry, GF_BitStream *bs) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 i, j, count; if (!ptr) return GF_BAD_PARAM; ptr->scalability_mask = gf_bs_read_u16(bs); gf_bs_read_int(bs, 2);//reserved count = gf_bs_read_int(bs, 6); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl; GF_SAFEALLOC(ptl, LHEVC_ProfileTierLevel); if (!ptl) return GF_OUT_OF_MEM; ptl->general_profile_space = gf_bs_read_int(bs, 2); ptl->general_tier_flag= gf_bs_read_int(bs, 1); ptl->general_profile_idc = gf_bs_read_int(bs, 5); ptl->general_profile_compatibility_flags = gf_bs_read_u32(bs); ptl->general_constraint_indicator_flags = gf_bs_read_long_int(bs, 48); ptl->general_level_idc = gf_bs_read_u8(bs); gf_list_add(ptr->profile_tier_levels, ptl); } count = gf_bs_read_u16(bs); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op; GF_SAFEALLOC(op, LHEVC_OperatingPoint); if (!op) return GF_OUT_OF_MEM; op->output_layer_set_idx = gf_bs_read_u16(bs); op->max_temporal_id = gf_bs_read_u8(bs); op->layer_count = gf_bs_read_u8(bs); if (op->layer_count > GF_ARRAY_LENGTH(op->layers_info)) return GF_NON_COMPLIANT_BITSTREAM; for (j = 0; j < op->layer_count; j++) { op->layers_info[j].ptl_idx = gf_bs_read_u8(bs); op->layers_info[j].layer_id = gf_bs_read_int(bs, 6); op->layers_info[j].is_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->layers_info[j].is_alternate_outputlayer = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; } op->minPicWidth = gf_bs_read_u16(bs); op->minPicHeight = gf_bs_read_u16(bs); op->maxPicWidth = gf_bs_read_u16(bs); op->maxPicHeight = gf_bs_read_u16(bs); op->maxChromaFormat = gf_bs_read_int(bs, 2); op->maxBitDepth = gf_bs_read_int(bs, 3) + 8; gf_bs_read_int(bs, 1);//reserved op->frame_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; op->bit_rate_info_flag = gf_bs_read_int(bs, 1) ? GF_TRUE : GF_FALSE; if (op->frame_rate_info_flag) { op->avgFrameRate = gf_bs_read_u16(bs); gf_bs_read_int(bs, 6); //reserved op->constantFrameRate = gf_bs_read_int(bs, 2); } if (op->bit_rate_info_flag) { op->maxBitRate = gf_bs_read_u32(bs); op->avgBitRate = gf_bs_read_u32(bs); } gf_list_add(ptr->operating_points, op); } count = gf_bs_read_u8(bs); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep; GF_SAFEALLOC(dep, LHEVC_DependentLayer); if (!dep) return GF_OUT_OF_MEM; dep->dependent_layerID = gf_bs_read_u8(bs); dep->num_layers_dependent_on = gf_bs_read_u8(bs); if (dep->num_layers_dependent_on > GF_ARRAY_LENGTH(dep->dependent_on_layerID)) { gf_free(dep); return GF_NON_COMPLIANT_BITSTREAM; } for (j = 0; j < dep->num_layers_dependent_on; j++) dep->dependent_on_layerID[j] = gf_bs_read_u8(bs); for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) dep->dimension_identifier[j] = gf_bs_read_u8(bs); } gf_list_add(ptr->dependency_layers, dep); } return GF_OK; } GF_Err gf_isom_oinf_write_entry(void *entry, GF_BitStream *bs) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 i, j, count; if (!ptr) return GF_OK; gf_bs_write_u16(bs, ptr->scalability_mask); gf_bs_write_int(bs, 0xFF, 2);//reserved count=gf_list_count(ptr->profile_tier_levels); gf_bs_write_int(bs, count, 6); for (i = 0; i < count; i++) { LHEVC_ProfileTierLevel *ptl = (LHEVC_ProfileTierLevel *)gf_list_get(ptr->profile_tier_levels, i); gf_bs_write_int(bs, ptl->general_profile_space, 2); gf_bs_write_int(bs, ptl->general_tier_flag, 1); gf_bs_write_int(bs, ptl->general_profile_idc, 5); gf_bs_write_u32(bs, ptl->general_profile_compatibility_flags); gf_bs_write_long_int(bs, ptl->general_constraint_indicator_flags, 48); gf_bs_write_u8(bs, ptl->general_level_idc); } count=gf_list_count(ptr->operating_points); gf_bs_write_u16(bs, count); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i); gf_bs_write_u16(bs, op->output_layer_set_idx); gf_bs_write_u8(bs, op->max_temporal_id); gf_bs_write_u8(bs, op->layer_count); for (j = 0; j < op->layer_count; j++) { gf_bs_write_u8(bs, op->layers_info[j].ptl_idx); gf_bs_write_int(bs, op->layers_info[j].layer_id, 6); op->layers_info[j].is_outputlayer ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); op->layers_info[j].is_alternate_outputlayer ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); } gf_bs_write_u16(bs, op->minPicWidth); gf_bs_write_u16(bs, op->minPicHeight); gf_bs_write_u16(bs, op->maxPicWidth); gf_bs_write_u16(bs, op->maxPicHeight); gf_bs_write_int(bs, op->maxChromaFormat, 2); gf_bs_write_int(bs, op->maxBitDepth - 8, 3); gf_bs_write_int(bs, 0x1, 1);//resereved op->frame_rate_info_flag ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); op->bit_rate_info_flag ? gf_bs_write_int(bs, 0x1, 1) : gf_bs_write_int(bs, 0x0, 1); if (op->frame_rate_info_flag) { gf_bs_write_u16(bs, op->avgFrameRate); gf_bs_write_int(bs, 0xFF, 6); //reserved gf_bs_write_int(bs, op->constantFrameRate, 2); } if (op->bit_rate_info_flag) { gf_bs_write_u32(bs, op->maxBitRate); gf_bs_write_u32(bs, op->avgBitRate); } } count=gf_list_count(ptr->dependency_layers); gf_bs_write_u8(bs, count); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i); gf_bs_write_u8(bs, dep->dependent_layerID); gf_bs_write_u8(bs, dep->num_layers_dependent_on); for (j = 0; j < dep->num_layers_dependent_on; j++) gf_bs_write_u8(bs, dep->dependent_on_layerID[j]); for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) gf_bs_write_u8(bs, dep->dimension_identifier[j]); } } return GF_OK; } u32 gf_isom_oinf_size_entry(void *entry) { GF_OperatingPointsInformation* ptr = (GF_OperatingPointsInformation *)entry; u32 size = 0, i ,j, count; if (!ptr) return 0; size += 3; //scalability_mask + reserved + num_profile_tier_level count=gf_list_count(ptr->profile_tier_levels); size += count * 12; //general_profile_space + general_tier_flag + general_profile_idc + general_profile_compatibility_flags + general_constraint_indicator_flags + general_level_idc size += 2;//num_operating_points count=gf_list_count(ptr->operating_points); for (i = 0; i < count; i++) { LHEVC_OperatingPoint *op = (LHEVC_OperatingPoint *)gf_list_get(ptr->operating_points, i); size += 2/*output_layer_set_idx*/ + 1/*max_temporal_id*/ + 1/*layer_count*/; size += op->layer_count * 2; size += 9; if (op->frame_rate_info_flag) { size += 3; } if (op->bit_rate_info_flag) { size += 8; } } size += 1;//max_layer_count count=gf_list_count(ptr->dependency_layers); for (i = 0; i < count; i++) { LHEVC_DependentLayer *dep = (LHEVC_DependentLayer *)gf_list_get(ptr->dependency_layers, i); size += 1/*dependent_layerID*/ + 1/*num_layers_dependent_on*/; size += dep->num_layers_dependent_on * 1;//dependent_on_layerID for (j = 0; j < 16; j++) { if (ptr->scalability_mask & (1 << j)) size += 1;//dimension_identifier } } return size; } GF_LHVCLayerInformation *gf_isom_linf_new_entry() { GF_LHVCLayerInformation* ptr; GF_SAFEALLOC(ptr, GF_LHVCLayerInformation); if (ptr) ptr->num_layers_in_track = gf_list_new(); return ptr; } void gf_isom_linf_del_entry(void *entry) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; if (!ptr) return; while (gf_list_count(ptr->num_layers_in_track)) { LHVCLayerInfoItem *li = (LHVCLayerInfoItem *)gf_list_get(ptr->num_layers_in_track, 0); gf_free(li); gf_list_rem(ptr->num_layers_in_track, 0); } gf_list_del(ptr->num_layers_in_track); gf_free(ptr); return; } GF_Err gf_isom_linf_read_entry(void *entry, GF_BitStream *bs) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; u32 i, count; if (!ptr) return GF_BAD_PARAM; gf_bs_read_int(bs, 2); count = gf_bs_read_int(bs, 6); for (i = 0; i < count; i++) { LHVCLayerInfoItem *li; GF_SAFEALLOC(li, LHVCLayerInfoItem); if (!li) return GF_OUT_OF_MEM; gf_bs_read_int(bs, 4); li->layer_id = gf_bs_read_int(bs, 6); li->min_TemporalId = gf_bs_read_int(bs, 3); li->max_TemporalId = gf_bs_read_int(bs, 3); gf_bs_read_int(bs, 1); li->sub_layer_presence_flags = gf_bs_read_int(bs, 7); gf_list_add(ptr->num_layers_in_track, li); } return GF_OK; } GF_Err gf_isom_linf_write_entry(void *entry, GF_BitStream *bs) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; u32 i, count; if (!ptr) return GF_OK; gf_bs_write_int(bs, 0, 2); count=gf_list_count(ptr->num_layers_in_track); gf_bs_write_int(bs, count, 6); for (i = 0; i < count; i++) { LHVCLayerInfoItem *li = (LHVCLayerInfoItem *)gf_list_get(ptr->num_layers_in_track, i); gf_bs_write_int(bs, 0, 4); gf_bs_write_int(bs, li->layer_id, 6); gf_bs_write_int(bs, li->min_TemporalId, 3); gf_bs_write_int(bs, li->max_TemporalId, 3); gf_bs_write_int(bs, 0, 1); gf_bs_write_int(bs, li->sub_layer_presence_flags, 7); } return GF_OK; } u32 gf_isom_linf_size_entry(void *entry) { GF_LHVCLayerInformation* ptr = (GF_LHVCLayerInformation *)entry; u32 size = 0, count; if (!ptr) return 0; size += 1; count=gf_list_count(ptr->num_layers_in_track); size += count * 3; return size; } #endif /*GPAC_DISABLE_ISOM*/
void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } }
void AV1_RewriteESDescriptorEx(GF_MPEGVisualSampleEntryBox *av1, GF_MediaBox *mdia) { GF_BitRateBox *btrt = gf_isom_sample_entry_get_bitrate((GF_SampleEntryBox *)av1, GF_FALSE); if (av1->emul_esd) gf_odf_desc_del((GF_Descriptor *)av1->emul_esd); av1->emul_esd = gf_odf_desc_esd_new(2); av1->emul_esd->decoderConfig->streamType = GF_STREAM_VISUAL; av1->emul_esd->decoderConfig->objectTypeIndication = GF_CODECID_AV1; if (btrt) { av1->emul_esd->decoderConfig->bufferSizeDB = btrt->bufferSizeDB; av1->emul_esd->decoderConfig->avgBitrate = btrt->avgBitrate; av1->emul_esd->decoderConfig->maxBitrate = btrt->maxBitrate; } if (av1->av1_config && av1->av1_config->config) { GF_AV1Config *av1_cfg = AV1_DuplicateConfig(av1->av1_config->config); if (av1_cfg) { gf_odf_av1_cfg_write(av1_cfg, &av1->emul_esd->decoderConfig->decoderSpecificInfo->data, &av1->emul_esd->decoderConfig->decoderSpecificInfo->dataLength); gf_odf_av1_cfg_del(av1_cfg); } } }
{'added': [(1341, 'static GF_AV1Config* AV1_DuplicateConfig(GF_AV1Config const * const cfg)'), (1342, '{'), (1386, '\tif (av1->av1_config && av1->av1_config->config) {'), (2410, '\tif (!entry || !entry->av1_config|| !entry->av1_config->config) return NULL;')], 'deleted': [(1341, 'static GF_AV1Config* AV1_DuplicateConfig(GF_AV1Config const * const cfg) {'), (1385, '\tif (av1->av1_config) {'), (2409, '\tif (!entry || !entry->av1_config) return NULL;')]}
4
3
3,034
23,825
20
174
5
https://github.com/gpac/gpac
CVE-2021-31262
CWE-476